aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/clang
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/clang')
-rw-r--r--contrib/llvm-project/clang/include/clang-c/BuildSystem.h2
-rw-r--r--contrib/llvm-project/clang/include/clang-c/Index.h1065
-rw-r--r--contrib/llvm-project/clang/include/clang-c/Platform.h23
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/APValue.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTConcept.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTContext.h235
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTDumper.h18
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTDumperUtils.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTFwd.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTImporter.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTNodeTraverser.h43
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTTypeTraits.h48
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Attr.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/BuiltinTypes.def14
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CXXInheritance.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CanonicalType.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Comment.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CommentCommands.td2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CommentSema.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ComputeDependence.h194
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DataCollection.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Decl.h91
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclBase.h109
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclCXX.h162
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclGroup.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclObjC.h97
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclObjCCommon.h55
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclOpenMP.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DependenceFlags.h284
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Expr.h958
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ExprCXX.h440
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ExprObjC.h168
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ExprOpenMP.h351
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ExternalASTSource.h32
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/GlobalDecl.h58
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/JSONNodeDumper.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/LocInfoType.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Mangle.h22
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/NestedNameSpecifier.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/NonTrivialTypeVisitor.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ODRHash.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h1748
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ParentMapContext.h144
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h18
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/RawCommentList.h21
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/RecursiveASTVisitor.h408
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Stmt.h118
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/StmtOpenMP.h343
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TemplateBase.h14
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TemplateName.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TextNodeDumper.h35
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Type.h709
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TypeLoc.h69
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TypeLocVisitor.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TypeProperties.td81
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/VTableBuilder.h37
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchFinder.h24
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h497
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchersInternal.h405
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchersMacros.h176
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h22
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/GtestMatchers.h45
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/Dominators.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/LiveVariables.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafety.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/UninitializedValues.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/AnalysisDeclContext.h383
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/AnyCall.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/CFG.h19
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/CallGraph.h74
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/ConstructionContext.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowWorklist.h94
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/PathDiagnostic.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AArch64SVEACLETypes.def88
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Attr.td413
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td314
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AttributeCommonInfo.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Builtins.def20
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsAMDGPU.def40
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsARM.def2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsBPF.def3
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagon.def1916
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonDep.def1721
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonMapCustomDep.def206
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsMips.def6
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def13
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsPPC.def61
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsSVE.def20
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsWebAssembly.def79
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def16
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86_64.def24
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def40
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h42
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Cuda.h44
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DeclNodes.td1
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Diagnostic.h59
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td28
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommonKinds.td15
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td46
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontendKinds.td19
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td79
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticLexKinds.td9
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.def1
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td195
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td855
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/ExpressionTraits.h22
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/FPOptions.def26
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Features.def3
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/FileManager.h13
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/FixedPoint.h105
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h23
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/JsonSupport.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/LangOptions.def50
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/LangOptions.h247
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/LangStandard.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/LangStandards.def4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Module.h113
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/ObjCRuntime.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensions.def8
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.def1004
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.h88
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/PartialDiagnostic.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/PragmaKinds.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/SanitizerBlacklist.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/SanitizerSpecialCaseList.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Sanitizers.def2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/SourceLocation.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/SourceManager.h69
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Specifiers.h21
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/StmtNodes.td7
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TargetBuiltins.h123
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TargetCXXABI.h12
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h107
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def86
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TokenKinds.h23
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td5
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TypeTraits.h138
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/X86Target.def240
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/XRayInstr.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/XRayLists.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_bf16.td14
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_cde.td232
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_mve.td671
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_mve_defs.td109
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_neon.td500
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_neon_incl.td23
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_sve.td2083
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h27
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/CodeGenABITypes.h64
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/ConstantInitBuilder.h23
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/ConstantInitFuture.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/CrossTU/CrossTranslationUnit.h83
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Action.h14
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/CC1Options.td931
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/CLCompatOptions.td466
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Distro.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Driver.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Job.h88
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Multilib.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Options.td2025
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Phases.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Tool.h56
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/ToolChain.h57
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Types.h16
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/XRayArgs.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Format/Format.h258
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/ASTConsumers.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/ASTUnit.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/CommandLineSourceLoc.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h14
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/CompilerInvocation.h30
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/FrontendAction.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h12
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h16
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/LogDiagnosticPrinter.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h25
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h19
-rw-r--r--contrib/llvm-project/clang/include/clang/Index/IndexSymbol.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Index/IndexingAction.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/Index/IndexingOptions.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/DirectoryLookup.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/HeaderSearchOptions.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/LiteralSupport.h14
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/ModuleMap.h24
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/PPCallbacks.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/Pragma.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h67
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/PreprocessorOptions.h12
-rw-r--r--contrib/llvm-project/clang/include/clang/Parse/Parser.h333
-rw-r--r--contrib/llvm-project/clang/include/clang/Parse/RAIIObjectsForParser.h24
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h89
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Initialization.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Lookup.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/MultiplexExternalSemaSource.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Overload.h30
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Ownership.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h102
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/ParsedTemplate.h31
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Scope.h23
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h13
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Sema.h833
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Template.h36
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h266
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h33
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ASTRecordReader.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ASTRecordWriter.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h33
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ModuleFile.h32
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/TypeBitCodes.def4
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td29
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td291
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h36
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Analyses.def70
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def38
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h41
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h24
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Checker.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h71
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerRegistryData.h226
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h16
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h257
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h28
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h11
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h13
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h53
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h18
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h190
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h205
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h19
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Regions.def10
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SimpleConstraintManager.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h178
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h149
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalysisConsumer.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalyzerHelpFlags.h30
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/CheckerRegistration.h38
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/CheckerRegistry.h212
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h20
-rw-r--r--contrib/llvm-project/clang/include/clang/Testing/CommandLineArgs.h41
-rw-r--r--contrib/llvm-project/clang/include/clang/Testing/TestClangConfig.h85
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiff.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiffInternal.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/AllTUsExecution.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Core/Diagnostic.h18
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h73
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h89
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DiagnosticsYaml.h29
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/ASTSelection.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/AtomicChange.h14
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/ReplacementsYaml.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Syntax/Nodes.h470
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tokens.h76
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tree.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Tooling.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/Parsing.h41
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/RangeSelector.h16
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/RewriteRule.h89
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCode.h27
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/Stencil.h13
-rw-r--r--contrib/llvm-project/clang/include/clang/module.modulemap10
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/FileRemapper.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/Internals.h4
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/TransGCAttrs.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/TransProperties.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/TransProtectedScope.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/AST/APValue.cpp84
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTContext.cpp800
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTDumper.cpp95
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTImporter.cpp2129
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp58
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp34
-rw-r--r--contrib/llvm-project/clang/lib/AST/AttrImpl.cpp151
-rw-r--r--contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/AST/CommentCommandTraits.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/AST/CommentSema.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp796
-rw-r--r--contrib/llvm-project/clang/lib/AST/DataCollection.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/Decl.cpp146
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclBase.cpp62
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclCXX.cpp226
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclObjC.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp131
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclarationName.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/AST/Expr.cpp1168
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprCXX.cpp409
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprClassification.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp85
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprConstant.cpp847
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprObjC.cpp61
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/AST/FormatString.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Boolean.h7
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h1
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Context.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Integral.h33
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Interp.h2
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp (renamed from contrib/llvm-project/clang/lib/AST/Interp/Block.cpp)2
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h (renamed from contrib/llvm-project/clang/lib/AST/Interp/Block.h)2
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h8
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Pointer.h4
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Source.h4
-rw-r--r--contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp371
-rw-r--r--contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp91
-rw-r--r--contrib/llvm-project/clang/lib/AST/Linkage.h1
-rw-r--r--contrib/llvm-project/clang/lib/AST/Mangle.cpp74
-rw-r--r--contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp170
-rw-r--r--contrib/llvm-project/clang/lib/AST/NSAPI.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp89
-rw-r--r--contrib/llvm-project/clang/lib/AST/ODRHash.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/AST/OSLog.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp655
-rw-r--r--contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp321
-rw-r--r--contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/AST/RawCommentList.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp38
-rw-r--r--contrib/llvm-project/clang/lib/AST/Stmt.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp101
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp146
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtProfile.cpp101
-rw-r--r--contrib/llvm-project/clang/lib/AST/TemplateBase.cpp140
-rw-r--r--contrib/llvm-project/clang/lib/AST/TemplateName.cpp70
-rw-r--r--contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp402
-rw-r--r--contrib/llvm-project/clang/lib/AST/Type.cpp453
-rw-r--r--contrib/llvm-project/clang/lib/AST/TypeLoc.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/AST/TypePrinter.cpp85
-rw-r--r--contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp154
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchFinder.cpp128
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp274
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp172
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h264
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp33
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/GtestMatchers.cpp104
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp117
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp44
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/CFG.cpp40
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/CallGraph.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/CloneDetection.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/LiveVariables.cpp62
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/PostOrderCFGView.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ProgramPoint.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp59
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp142
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Attributes.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/Basic/CodeGenOptions.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Cuda.cpp356
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Basic/ExpressionTraits.cpp36
-rw-r--r--contrib/llvm-project/clang/lib/Basic/FileManager.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/Basic/FixedPoint.cpp136
-rw-r--r--contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/Basic/LangOptions.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Module.cpp92
-rw-r--r--contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp728
-rw-r--r--contrib/llvm-project/clang/lib/Basic/SanitizerBlacklist.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Basic/SourceManager.cpp165
-rw-r--r--contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp183
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h28
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp84
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h51
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARC.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp48
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARM.h8
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/BPF.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp46
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h17
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/MSP430.h10
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Mips.h1
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h6
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp113
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PPC.h66
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h5
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SPIR.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h3
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h15
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp39
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/VE.h170
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h21
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp840
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/X86.h58
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/XCore.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/TypeTraits.cpp86
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Version.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Warnings.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Basic/XRayInstr.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/Basic/XRayLists.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h6
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp266
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp223
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h32
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp3298
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp148
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h39
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h57
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp853
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCall.h34
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp54
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h23
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp287
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h22
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp76
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp194
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGException.cpp104
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp322
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp73
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp72
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp445
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp36
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.h2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp80
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp53
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp120
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.h5
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp3650
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h311
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp94
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h14
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp113
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp1726
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp367
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGVTables.h40
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGValue.h38
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenABITypes.cpp44
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp357
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h359
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp571
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h102
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypeCache.h4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp197
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h8
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp151
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h18
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/EHScopeStack.h14
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp367
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp69
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/PatternInit.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp1655
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h47
-rw-r--r--contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp283
-rw-r--r--contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.h2
-rw-r--r--contrib/llvm-project/clang/lib/DirectoryWatcher/default/DirectoryWatcher-not-implemented.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp53
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Action.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Compilation.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Distro.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Driver.cpp291
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Job.cpp63
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Multilib.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp290
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Tool.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChain.cpp226
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp53
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h7
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp483
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h41
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.h9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp68
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp59
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h6
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp198
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.h10
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.h33
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp722
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h5
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.h4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp344
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h28
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.h3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp195
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h15
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp445
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h44
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.h8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.h10
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp230
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.h46
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.cpp389
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.h38
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp122
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h18
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.cpp40
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/InterfaceStubs.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp170
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.h7
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp58
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h12
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp53
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.h8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.h9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.h4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.h4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.h9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.h8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp251
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h19
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h6
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h228
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp119
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.h66
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h5
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Types.cpp77
-rw-r--r--contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp298
-rw-r--r--contrib/llvm-project/clang/lib/Format/BreakableToken.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp174
-rw-r--r--contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h44
-rw-r--r--contrib/llvm-project/clang/lib/Format/Format.cpp271
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatToken.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatToken.h104
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp291
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h20
-rw-r--r--contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp658
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp38
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp382
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h9
-rw-r--r--contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp187
-rw-r--r--contrib/llvm-project/clang/lib/Format/WhitespaceManager.h30
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ASTConsumers.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp32
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ChainedIncludesSource.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp63
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp736
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp81
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/HeaderIncludeGen.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/InitHeaderSearch.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp31
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/LogDiagnosticPrinter.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp119
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/Rewrite/FixItRewriter.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/Rewrite/FrontendActions.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp110
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteObjC.cpp105
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/TextDiagnosticBuffer.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp91
-rw-r--r--contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_cmath.h41
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_complex_builtins.h268
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_device_functions.h333
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_libdevice_declares.h2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_math.h347
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_math_forward_declares.h41
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_runtime_wrapper.h17
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_hip_libdevice_declares.h326
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_hip_math.h1185
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h64
-rw-r--r--contrib/llvm-project/clang/lib/Headers/altivec.h402
-rw-r--r--contrib/llvm-project/clang/lib/Headers/amxintrin.h225
-rw-r--r--contrib/llvm-project/clang/lib/Headers/arm_acle.h16
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx2intrin.h2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h19
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512fintrin.h42
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vlbwintrin.h18
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h53
-rw-r--r--contrib/llvm-project/clang/lib/Headers/bmiintrin.h50
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cet.h66
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cldemoteintrin.h8
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cpuid.h9
-rw-r--r--contrib/llvm-project/clang/lib/Headers/emmintrin.h6
-rw-r--r--contrib/llvm-project/clang/lib/Headers/immintrin.h207
-rw-r--r--contrib/llvm-project/clang/lib/Headers/intrin.h3
-rw-r--r--contrib/llvm-project/clang/lib/Headers/module.modulemap6
-rw-r--r--contrib/llvm-project/clang/lib/Headers/msa.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/opencl-c.h698
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h (renamed from contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_math_declares.h)25
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_math.h35
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/cmath69
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex25
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex.h25
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/math.h46
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/new70
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/time.h32
-rw-r--r--contrib/llvm-project/clang/lib/Headers/serializeintrin.h30
-rw-r--r--contrib/llvm-project/clang/lib/Headers/tsxldtrkintrin.h56
-rw-r--r--contrib/llvm-project/clang/lib/Headers/vecintrin.h8962
-rw-r--r--contrib/llvm-project/clang/lib/Headers/wasm_simd128.h1133
-rw-r--r--contrib/llvm-project/clang/lib/Headers/x86intrin.h27
-rw-r--r--contrib/llvm-project/clang/lib/Headers/xmmintrin.h50
-rw-r--r--contrib/llvm-project/clang/lib/Index/CommentToXML.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Index/FileIndexRecord.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexBody.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexDecl.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexTypeSourceInfo.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexingAction.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexingContext.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Index/USRGeneration.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp68
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Lexer.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp111
-rw-r--r--contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp71
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPCallbacks.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp236
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Pragma.cpp34
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp36
-rw-r--r--contrib/llvm-project/clang/lib/Lex/TokenConcatenation.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp282
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp606
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp155
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp400
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp401
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseInit.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp66
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp1503
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp330
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp53
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseStmtAsm.cpp166
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp267
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp273
-rw-r--r--contrib/llvm-project/clang/lib/Parse/Parser.cpp104
-rw-r--r--contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp67
-rw-r--r--contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp44
-rw-r--r--contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp56
-rw-r--r--contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td331
-rw-r--r--contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp86
-rw-r--r--contrib/llvm-project/clang/lib/Sema/Sema.cpp338
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp250
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp964
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp120
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCast.cpp178
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp2536
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp782
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp121
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp140
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp805
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp1404
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp1120
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp117
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp1778
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp746
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp236
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaInit.cpp156
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp221
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp617
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp3883
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp535
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp107
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp49
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp121
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp41
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp483
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp279
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp127
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp231
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaType.cpp490
-rw-r--r--contrib/llvm-project/clang/lib/Sema/TreeTransform.h839
-rw-r--r--contrib/llvm-project/clang/lib/Sema/UsedDeclVisitor.h102
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp2029
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp53
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp301
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp336
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp33
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp163
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/GeneratePCH.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp80
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp81
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp623
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp357
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp299
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp1083
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp150
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp54
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp241
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp95
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp47
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp344
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h12
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp53
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp110
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.h22
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp1410
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp138
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp1406
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MismatchedIteratorChecker.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp33
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp149
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp181
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp66
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerIterationChecker.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSortingChecker.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp562
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp101
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h32
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h17
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp180
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h33
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrChecker.cpp80
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp189
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp2223
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp1048
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp298
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp93
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h84
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/DiagOutputUtils.h36
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp155
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp172
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h59
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp167
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp195
-rwxr-xr-xcontrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h1
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp66
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp71
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp45
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp363
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp42
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp59
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerRegistryData.cpp241
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicSize.cpp71
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicType.cpp206
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp354
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp36
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp56
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp296
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp842
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SubEngine.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp71
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp156
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp175
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalyzerHelpFlags.cpp (renamed from contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp)62
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp494
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CreateCheckerManager.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp70
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/ASTDiff/ASTDiff.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/AllTUsExecution.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/ArgumentsAdjusters.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/CompilationDatabase.cpp28
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Core/Diagnostic.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Core/Lookup.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Core/Replacement.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp131
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp101
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Execution.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/FileMatchTrie.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring/ASTSelection.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring/AtomicChange.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/RenamingAction.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFinder.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/RefactoringCallbacks.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp1108
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/Mutations.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/Nodes.cpp281
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp289
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Tooling.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/Parsing.cpp279
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/RangeSelector.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/RewriteRule.cpp68
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCode.cpp370
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp66
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/Transformer.cpp44
-rw-r--r--contrib/llvm-project/clang/tools/driver/cc1_main.cpp7
-rw-r--r--contrib/llvm-project/clang/tools/driver/cc1as_main.cpp41
-rw-r--r--contrib/llvm-project/clang/tools/driver/cc1gen_reproducer_main.cpp1
-rw-r--r--contrib/llvm-project/clang/tools/driver/driver.cpp17
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangASTNodesEmitter.cpp6
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangAttrEmitter.cpp850
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangCommentCommandInfoEmitter.cpp2
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLNamedCharacterReferenceEmitter.cpp4
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLTagsEmitter.cpp5
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp69
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp28
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangOptionDocEmitter.cpp12
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangSACheckersEmitter.cpp27
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/MveEmitter.cpp835
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp411
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/SveEmitter.cpp1436
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/TableGen.cpp65
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h13
999 files changed, 98647 insertions, 46714 deletions
diff --git a/contrib/llvm-project/clang/include/clang-c/BuildSystem.h b/contrib/llvm-project/clang/include/clang-c/BuildSystem.h
index 4e9f6dee0279..296e61247cef 100644
--- a/contrib/llvm-project/clang/include/clang-c/BuildSystem.h
+++ b/contrib/llvm-project/clang/include/clang-c/BuildSystem.h
@@ -117,7 +117,7 @@ clang_ModuleMapDescriptor_setFrameworkModuleName(CXModuleMapDescriptor,
const char *name);
/**
- * Sets the umbrealla header name that the module.map describes.
+ * Sets the umbrella header name that the module.map describes.
* \returns 0 for success, non-zero to indicate an error.
*/
CINDEX_LINKAGE enum CXErrorCode
diff --git a/contrib/llvm-project/clang/include/clang-c/Index.h b/contrib/llvm-project/clang/include/clang-c/Index.h
index b653995ebbd0..5fa728d6d66c 100644
--- a/contrib/llvm-project/clang/include/clang-c/Index.h
+++ b/contrib/llvm-project/clang/include/clang-c/Index.h
@@ -33,24 +33,19 @@
* compatible, thus CINDEX_VERSION_MAJOR is expected to remain stable.
*/
#define CINDEX_VERSION_MAJOR 0
-#define CINDEX_VERSION_MINOR 59
+#define CINDEX_VERSION_MINOR 60
-#define CINDEX_VERSION_ENCODE(major, minor) ( \
- ((major) * 10000) \
- + ((minor) * 1))
+#define CINDEX_VERSION_ENCODE(major, minor) (((major)*10000) + ((minor)*1))
-#define CINDEX_VERSION CINDEX_VERSION_ENCODE( \
- CINDEX_VERSION_MAJOR, \
- CINDEX_VERSION_MINOR )
+#define CINDEX_VERSION \
+ CINDEX_VERSION_ENCODE(CINDEX_VERSION_MAJOR, CINDEX_VERSION_MINOR)
-#define CINDEX_VERSION_STRINGIZE_(major, minor) \
- #major"."#minor
-#define CINDEX_VERSION_STRINGIZE(major, minor) \
- CINDEX_VERSION_STRINGIZE_(major, minor)
+#define CINDEX_VERSION_STRINGIZE_(major, minor) #major "." #minor
+#define CINDEX_VERSION_STRINGIZE(major, minor) \
+ CINDEX_VERSION_STRINGIZE_(major, minor)
-#define CINDEX_VERSION_STRING CINDEX_VERSION_STRINGIZE( \
- CINDEX_VERSION_MAJOR, \
- CINDEX_VERSION_MINOR)
+#define CINDEX_VERSION_STRING \
+ CINDEX_VERSION_STRINGIZE(CINDEX_VERSION_MAJOR, CINDEX_VERSION_MINOR)
LLVM_CLANG_C_EXTERN_C_BEGIN
@@ -382,7 +377,7 @@ typedef struct {
* \param outID stores the returned CXFileUniqueID.
* \returns If there was a failure getting the unique ID, returns non-zero,
* otherwise returns 0.
-*/
+ */
CINDEX_LINKAGE int clang_getFileUniqueID(CXFile file, CXFileUniqueID *outID);
/**
@@ -390,8 +385,8 @@ CINDEX_LINKAGE int clang_getFileUniqueID(CXFile file, CXFileUniqueID *outID);
* multiple inclusions, either with the conventional
* \#ifndef/\#define/\#endif macro guards or with \#pragma once.
*/
-CINDEX_LINKAGE unsigned
-clang_isFileMultipleIncludeGuarded(CXTranslationUnit tu, CXFile file);
+CINDEX_LINKAGE unsigned clang_isFileMultipleIncludeGuarded(CXTranslationUnit tu,
+ CXFile file);
/**
* Retrieve a file handle within the given translation unit.
@@ -496,8 +491,7 @@ CINDEX_LINKAGE unsigned clang_equalLocations(CXSourceLocation loc1,
* in a particular translation unit.
*/
CINDEX_LINKAGE CXSourceLocation clang_getLocation(CXTranslationUnit tu,
- CXFile file,
- unsigned line,
+ CXFile file, unsigned line,
unsigned column);
/**
* Retrieves the source location associated with a given character offset
@@ -566,8 +560,7 @@ CINDEX_LINKAGE int clang_Range_isNull(CXSourceRange range);
* buffer to which the given source location points.
*/
CINDEX_LINKAGE void clang_getExpansionLocation(CXSourceLocation location,
- CXFile *file,
- unsigned *line,
+ CXFile *file, unsigned *line,
unsigned *column,
unsigned *offset);
@@ -613,8 +606,7 @@ CINDEX_LINKAGE void clang_getExpansionLocation(CXSourceLocation location,
*/
CINDEX_LINKAGE void clang_getPresumedLocation(CXSourceLocation location,
CXString *filename,
- unsigned *line,
- unsigned *column);
+ unsigned *line, unsigned *column);
/**
* Legacy API to retrieve the file, line, column, and offset represented
@@ -625,8 +617,7 @@ CINDEX_LINKAGE void clang_getPresumedLocation(CXSourceLocation location,
* details.
*/
CINDEX_LINKAGE void clang_getInstantiationLocation(CXSourceLocation location,
- CXFile *file,
- unsigned *line,
+ CXFile *file, unsigned *line,
unsigned *column,
unsigned *offset);
@@ -653,8 +644,7 @@ CINDEX_LINKAGE void clang_getInstantiationLocation(CXSourceLocation location,
* buffer to which the given source location points.
*/
CINDEX_LINKAGE void clang_getSpellingLocation(CXSourceLocation location,
- CXFile *file,
- unsigned *line,
+ CXFile *file, unsigned *line,
unsigned *column,
unsigned *offset);
@@ -682,10 +672,8 @@ CINDEX_LINKAGE void clang_getSpellingLocation(CXSourceLocation location,
* buffer to which the given source location points.
*/
CINDEX_LINKAGE void clang_getFileLocation(CXSourceLocation location,
- CXFile *file,
- unsigned *line,
- unsigned *column,
- unsigned *offset);
+ CXFile *file, unsigned *line,
+ unsigned *column, unsigned *offset);
/**
* Retrieve a source location representing the first character within a
@@ -727,7 +715,8 @@ CINDEX_LINKAGE CXSourceRangeList *clang_getSkippedRanges(CXTranslationUnit tu,
* The preprocessor will skip lines when they are surrounded by an
* if/ifdef/ifndef directive whose condition does not evaluate to true.
*/
-CINDEX_LINKAGE CXSourceRangeList *clang_getAllSkippedRanges(CXTranslationUnit tu);
+CINDEX_LINKAGE CXSourceRangeList *
+clang_getAllSkippedRanges(CXTranslationUnit tu);
/**
* Destroy the given \c CXSourceRangeList.
@@ -758,7 +747,7 @@ enum CXDiagnosticSeverity {
* This diagnostic is a note that should be attached to the
* previous (non-note) diagnostic.
*/
- CXDiagnostic_Note = 1,
+ CXDiagnostic_Note = 1,
/**
* This diagnostic indicates suspicious code that may not be
@@ -769,14 +758,14 @@ enum CXDiagnosticSeverity {
/**
* This diagnostic indicates that the code is ill-formed.
*/
- CXDiagnostic_Error = 3,
+ CXDiagnostic_Error = 3,
/**
* This diagnostic indicates that the code is ill-formed such
* that future parser recovery is unlikely to produce useful
* results.
*/
- CXDiagnostic_Fatal = 4
+ CXDiagnostic_Fatal = 4
};
/**
@@ -849,9 +838,8 @@ enum CXLoadDiag_Error {
* \returns A loaded CXDiagnosticSet if successful, and NULL otherwise. These
* diagnostics should be released using clang_disposeDiagnosticSet().
*/
-CINDEX_LINKAGE CXDiagnosticSet clang_loadDiagnostics(const char *file,
- enum CXLoadDiag_Error *error,
- CXString *errorString);
+CINDEX_LINKAGE CXDiagnosticSet clang_loadDiagnostics(
+ const char *file, enum CXLoadDiag_Error *error, CXString *errorString);
/**
* Release a CXDiagnosticSet and all of its contained diagnostics.
@@ -891,7 +879,7 @@ CINDEX_LINKAGE CXDiagnostic clang_getDiagnostic(CXTranslationUnit Unit,
* \param Unit the translation unit to query.
*/
CINDEX_LINKAGE CXDiagnosticSet
- clang_getDiagnosticSetFromTU(CXTranslationUnit Unit);
+clang_getDiagnosticSetFromTU(CXTranslationUnit Unit);
/**
* Destroy a diagnostic.
@@ -997,7 +985,7 @@ CINDEX_LINKAGE unsigned clang_defaultDiagnosticDisplayOptions(void);
* Determine the severity of the given diagnostic.
*/
CINDEX_LINKAGE enum CXDiagnosticSeverity
-clang_getDiagnosticSeverity(CXDiagnostic);
+ clang_getDiagnosticSeverity(CXDiagnostic);
/**
* Retrieve the source location of the given diagnostic.
@@ -1049,8 +1037,8 @@ CINDEX_LINKAGE unsigned clang_getDiagnosticCategory(CXDiagnostic);
*
* \returns The name of the given diagnostic category.
*/
-CINDEX_DEPRECATED CINDEX_LINKAGE
-CXString clang_getDiagnosticCategoryName(unsigned Category);
+CINDEX_DEPRECATED CINDEX_LINKAGE CXString
+clang_getDiagnosticCategoryName(unsigned Category);
/**
* Retrieve the diagnostic category text for a given diagnostic.
@@ -1112,9 +1100,8 @@ CINDEX_LINKAGE unsigned clang_getDiagnosticNumFixIts(CXDiagnostic Diagnostic);
* \returns A string containing text that should be replace the source
* code indicated by the \c ReplacementRange.
*/
-CINDEX_LINKAGE CXString clang_getDiagnosticFixIt(CXDiagnostic Diagnostic,
- unsigned FixIt,
- CXSourceRange *ReplacementRange);
+CINDEX_LINKAGE CXString clang_getDiagnosticFixIt(
+ CXDiagnostic Diagnostic, unsigned FixIt, CXSourceRange *ReplacementRange);
/**
* @}
@@ -1177,12 +1164,9 @@ clang_getTranslationUnitSpelling(CXTranslationUnit CTUnit);
* guarantee their validity until the call to this function returns.
*/
CINDEX_LINKAGE CXTranslationUnit clang_createTranslationUnitFromSourceFile(
- CXIndex CIdx,
- const char *source_filename,
- int num_clang_command_line_args,
- const char * const *clang_command_line_args,
- unsigned num_unsaved_files,
- struct CXUnsavedFile *unsaved_files);
+ CXIndex CIdx, const char *source_filename, int num_clang_command_line_args,
+ const char *const *clang_command_line_args, unsigned num_unsaved_files,
+ struct CXUnsavedFile *unsaved_files);
/**
* Same as \c clang_createTranslationUnit2, but returns
@@ -1190,9 +1174,8 @@ CINDEX_LINKAGE CXTranslationUnit clang_createTranslationUnitFromSourceFile(
* routine returns a \c NULL \c CXTranslationUnit, without further detailed
* error codes.
*/
-CINDEX_LINKAGE CXTranslationUnit clang_createTranslationUnit(
- CXIndex CIdx,
- const char *ast_filename);
+CINDEX_LINKAGE CXTranslationUnit
+clang_createTranslationUnit(CXIndex CIdx, const char *ast_filename);
/**
* Create a translation unit from an AST file (\c -emit-ast).
@@ -1202,10 +1185,9 @@ CINDEX_LINKAGE CXTranslationUnit clang_createTranslationUnit(
*
* \returns Zero on success, otherwise returns an error code.
*/
-CINDEX_LINKAGE enum CXErrorCode clang_createTranslationUnit2(
- CXIndex CIdx,
- const char *ast_filename,
- CXTranslationUnit *out_TU);
+CINDEX_LINKAGE enum CXErrorCode
+clang_createTranslationUnit2(CXIndex CIdx, const char *ast_filename,
+ CXTranslationUnit *out_TU);
/**
* Flags that control the creation of translation units.
@@ -1383,14 +1365,11 @@ CINDEX_LINKAGE unsigned clang_defaultEditingTranslationUnitOptions(void);
* routine returns a \c NULL \c CXTranslationUnit, without further detailed
* error codes.
*/
-CINDEX_LINKAGE CXTranslationUnit
-clang_parseTranslationUnit(CXIndex CIdx,
- const char *source_filename,
- const char *const *command_line_args,
- int num_command_line_args,
- struct CXUnsavedFile *unsaved_files,
- unsigned num_unsaved_files,
- unsigned options);
+CINDEX_LINKAGE CXTranslationUnit clang_parseTranslationUnit(
+ CXIndex CIdx, const char *source_filename,
+ const char *const *command_line_args, int num_command_line_args,
+ struct CXUnsavedFile *unsaved_files, unsigned num_unsaved_files,
+ unsigned options);
/**
* Parse the given source file and the translation unit corresponding
@@ -1436,15 +1415,11 @@ clang_parseTranslationUnit(CXIndex CIdx,
*
* \returns Zero on success, otherwise returns an error code.
*/
-CINDEX_LINKAGE enum CXErrorCode
-clang_parseTranslationUnit2(CXIndex CIdx,
- const char *source_filename,
- const char *const *command_line_args,
- int num_command_line_args,
- struct CXUnsavedFile *unsaved_files,
- unsigned num_unsaved_files,
- unsigned options,
- CXTranslationUnit *out_TU);
+CINDEX_LINKAGE enum CXErrorCode clang_parseTranslationUnit2(
+ CXIndex CIdx, const char *source_filename,
+ const char *const *command_line_args, int num_command_line_args,
+ struct CXUnsavedFile *unsaved_files, unsigned num_unsaved_files,
+ unsigned options, CXTranslationUnit *out_TU);
/**
* Same as clang_parseTranslationUnit2 but requires a full command line
@@ -1623,14 +1598,14 @@ CINDEX_LINKAGE unsigned clang_defaultReparseOptions(CXTranslationUnit TU);
* \c clang_disposeTranslationUnit(TU). The error codes returned by this
* routine are described by the \c CXErrorCode enum.
*/
-CINDEX_LINKAGE int clang_reparseTranslationUnit(CXTranslationUnit TU,
- unsigned num_unsaved_files,
- struct CXUnsavedFile *unsaved_files,
- unsigned options);
+CINDEX_LINKAGE int
+clang_reparseTranslationUnit(CXTranslationUnit TU, unsigned num_unsaved_files,
+ struct CXUnsavedFile *unsaved_files,
+ unsigned options);
/**
- * Categorizes how memory is being used by a translation unit.
- */
+ * Categorizes how memory is being used by a translation unit.
+ */
enum CXTUResourceUsageKind {
CXTUResourceUsage_AST = 1,
CXTUResourceUsage_Identifiers = 2,
@@ -1648,16 +1623,16 @@ enum CXTUResourceUsageKind {
CXTUResourceUsage_Preprocessor_HeaderSearch = 14,
CXTUResourceUsage_MEMORY_IN_BYTES_BEGIN = CXTUResourceUsage_AST,
CXTUResourceUsage_MEMORY_IN_BYTES_END =
- CXTUResourceUsage_Preprocessor_HeaderSearch,
+ CXTUResourceUsage_Preprocessor_HeaderSearch,
CXTUResourceUsage_First = CXTUResourceUsage_AST,
CXTUResourceUsage_Last = CXTUResourceUsage_Preprocessor_HeaderSearch
};
/**
- * Returns the human-readable null-terminated C string that represents
- * the name of the memory category. This string should never be freed.
- */
+ * Returns the human-readable null-terminated C string that represents
+ * the name of the memory category. This string should never be freed.
+ */
CINDEX_LINKAGE
const char *clang_getTUResourceUsageName(enum CXTUResourceUsageKind kind);
@@ -1670,8 +1645,8 @@ typedef struct CXTUResourceUsageEntry {
} CXTUResourceUsageEntry;
/**
- * The memory usage of a CXTranslationUnit, broken into categories.
- */
+ * The memory usage of a CXTranslationUnit, broken into categories.
+ */
typedef struct CXTUResourceUsage {
/* Private data member, used for queries. */
void *data;
@@ -1686,10 +1661,11 @@ typedef struct CXTUResourceUsage {
} CXTUResourceUsage;
/**
- * Return the memory usage of a translation unit. This object
- * should be released with clang_disposeCXTUResourceUsage().
- */
-CINDEX_LINKAGE CXTUResourceUsage clang_getCXTUResourceUsage(CXTranslationUnit TU);
+ * Return the memory usage of a translation unit. This object
+ * should be released with clang_disposeCXTUResourceUsage().
+ */
+CINDEX_LINKAGE CXTUResourceUsage
+clang_getCXTUResourceUsage(CXTranslationUnit TU);
CINDEX_LINKAGE void clang_disposeCXTUResourceUsage(CXTUResourceUsage usage);
@@ -1704,24 +1680,21 @@ clang_getTranslationUnitTargetInfo(CXTranslationUnit CTUnit);
/**
* Destroy the CXTargetInfo object.
*/
-CINDEX_LINKAGE void
-clang_TargetInfo_dispose(CXTargetInfo Info);
+CINDEX_LINKAGE void clang_TargetInfo_dispose(CXTargetInfo Info);
/**
* Get the normalized target triple as a string.
*
* Returns the empty string in case of any error.
*/
-CINDEX_LINKAGE CXString
-clang_TargetInfo_getTriple(CXTargetInfo Info);
+CINDEX_LINKAGE CXString clang_TargetInfo_getTriple(CXTargetInfo Info);
/**
* Get the pointer width of the target in bits.
*
* Returns -1 in case of error.
*/
-CINDEX_LINKAGE int
-clang_TargetInfo_getPointerWidth(CXTargetInfo Info);
+CINDEX_LINKAGE int clang_TargetInfo_getPointerWidth(CXTargetInfo Info);
/**
* @}
@@ -1741,95 +1714,95 @@ enum CXCursorKind {
* spelling, find their definitions, etc. However, the specific kind
* of the declaration is not reported.
*/
- CXCursor_UnexposedDecl = 1,
+ CXCursor_UnexposedDecl = 1,
/** A C or C++ struct. */
- CXCursor_StructDecl = 2,
+ CXCursor_StructDecl = 2,
/** A C or C++ union. */
- CXCursor_UnionDecl = 3,
+ CXCursor_UnionDecl = 3,
/** A C++ class. */
- CXCursor_ClassDecl = 4,
+ CXCursor_ClassDecl = 4,
/** An enumeration. */
- CXCursor_EnumDecl = 5,
+ CXCursor_EnumDecl = 5,
/**
* A field (in C) or non-static data member (in C++) in a
* struct, union, or C++ class.
*/
- CXCursor_FieldDecl = 6,
+ CXCursor_FieldDecl = 6,
/** An enumerator constant. */
- CXCursor_EnumConstantDecl = 7,
+ CXCursor_EnumConstantDecl = 7,
/** A function. */
- CXCursor_FunctionDecl = 8,
+ CXCursor_FunctionDecl = 8,
/** A variable. */
- CXCursor_VarDecl = 9,
+ CXCursor_VarDecl = 9,
/** A function or method parameter. */
- CXCursor_ParmDecl = 10,
+ CXCursor_ParmDecl = 10,
/** An Objective-C \@interface. */
- CXCursor_ObjCInterfaceDecl = 11,
+ CXCursor_ObjCInterfaceDecl = 11,
/** An Objective-C \@interface for a category. */
- CXCursor_ObjCCategoryDecl = 12,
+ CXCursor_ObjCCategoryDecl = 12,
/** An Objective-C \@protocol declaration. */
- CXCursor_ObjCProtocolDecl = 13,
+ CXCursor_ObjCProtocolDecl = 13,
/** An Objective-C \@property declaration. */
- CXCursor_ObjCPropertyDecl = 14,
+ CXCursor_ObjCPropertyDecl = 14,
/** An Objective-C instance variable. */
- CXCursor_ObjCIvarDecl = 15,
+ CXCursor_ObjCIvarDecl = 15,
/** An Objective-C instance method. */
- CXCursor_ObjCInstanceMethodDecl = 16,
+ CXCursor_ObjCInstanceMethodDecl = 16,
/** An Objective-C class method. */
- CXCursor_ObjCClassMethodDecl = 17,
+ CXCursor_ObjCClassMethodDecl = 17,
/** An Objective-C \@implementation. */
- CXCursor_ObjCImplementationDecl = 18,
+ CXCursor_ObjCImplementationDecl = 18,
/** An Objective-C \@implementation for a category. */
- CXCursor_ObjCCategoryImplDecl = 19,
+ CXCursor_ObjCCategoryImplDecl = 19,
/** A typedef. */
- CXCursor_TypedefDecl = 20,
+ CXCursor_TypedefDecl = 20,
/** A C++ class method. */
- CXCursor_CXXMethod = 21,
+ CXCursor_CXXMethod = 21,
/** A C++ namespace. */
- CXCursor_Namespace = 22,
+ CXCursor_Namespace = 22,
/** A linkage specification, e.g. 'extern "C"'. */
- CXCursor_LinkageSpec = 23,
+ CXCursor_LinkageSpec = 23,
/** A C++ constructor. */
- CXCursor_Constructor = 24,
+ CXCursor_Constructor = 24,
/** A C++ destructor. */
- CXCursor_Destructor = 25,
+ CXCursor_Destructor = 25,
/** A C++ conversion function. */
- CXCursor_ConversionFunction = 26,
+ CXCursor_ConversionFunction = 26,
/** A C++ template type parameter. */
- CXCursor_TemplateTypeParameter = 27,
+ CXCursor_TemplateTypeParameter = 27,
/** A C++ non-type template parameter. */
- CXCursor_NonTypeTemplateParameter = 28,
+ CXCursor_NonTypeTemplateParameter = 28,
/** A C++ template template parameter. */
- CXCursor_TemplateTemplateParameter = 29,
+ CXCursor_TemplateTemplateParameter = 29,
/** A C++ function template. */
- CXCursor_FunctionTemplate = 30,
+ CXCursor_FunctionTemplate = 30,
/** A C++ class template. */
- CXCursor_ClassTemplate = 31,
+ CXCursor_ClassTemplate = 31,
/** A C++ class template partial specialization. */
CXCursor_ClassTemplatePartialSpecialization = 32,
/** A C++ namespace alias declaration. */
- CXCursor_NamespaceAlias = 33,
+ CXCursor_NamespaceAlias = 33,
/** A C++ using directive. */
- CXCursor_UsingDirective = 34,
+ CXCursor_UsingDirective = 34,
/** A C++ using declaration. */
- CXCursor_UsingDeclaration = 35,
+ CXCursor_UsingDeclaration = 35,
/** A C++ alias declaration */
- CXCursor_TypeAliasDecl = 36,
+ CXCursor_TypeAliasDecl = 36,
/** An Objective-C \@synthesize definition. */
- CXCursor_ObjCSynthesizeDecl = 37,
+ CXCursor_ObjCSynthesizeDecl = 37,
/** An Objective-C \@dynamic definition. */
- CXCursor_ObjCDynamicDecl = 38,
+ CXCursor_ObjCDynamicDecl = 38,
/** An access specifier. */
- CXCursor_CXXAccessSpecifier = 39,
+ CXCursor_CXXAccessSpecifier = 39,
- CXCursor_FirstDecl = CXCursor_UnexposedDecl,
- CXCursor_LastDecl = CXCursor_CXXAccessSpecifier,
+ CXCursor_FirstDecl = CXCursor_UnexposedDecl,
+ CXCursor_LastDecl = CXCursor_CXXAccessSpecifier,
/* References */
- CXCursor_FirstRef = 40, /* Decl references */
- CXCursor_ObjCSuperClassRef = 40,
- CXCursor_ObjCProtocolRef = 41,
- CXCursor_ObjCClassRef = 42,
+ CXCursor_FirstRef = 40, /* Decl references */
+ CXCursor_ObjCSuperClassRef = 40,
+ CXCursor_ObjCProtocolRef = 41,
+ CXCursor_ObjCClassRef = 42,
/**
* A reference to a type declaration.
*
@@ -1845,22 +1818,22 @@ enum CXCursorKind {
* while the type of the variable "size" is referenced. The cursor
* referenced by the type of size is the typedef for size_type.
*/
- CXCursor_TypeRef = 43,
- CXCursor_CXXBaseSpecifier = 44,
+ CXCursor_TypeRef = 43,
+ CXCursor_CXXBaseSpecifier = 44,
/**
* A reference to a class template, function template, template
* template parameter, or class template partial specialization.
*/
- CXCursor_TemplateRef = 45,
+ CXCursor_TemplateRef = 45,
/**
* A reference to a namespace or namespace alias.
*/
- CXCursor_NamespaceRef = 46,
+ CXCursor_NamespaceRef = 46,
/**
* A reference to a member of a struct, union, or class that occurs in
* some non-expression context, e.g., a designated initializer.
*/
- CXCursor_MemberRef = 47,
+ CXCursor_MemberRef = 47,
/**
* A reference to a labeled statement.
*
@@ -1876,7 +1849,7 @@ enum CXCursorKind {
*
* A label reference cursor refers to a label statement.
*/
- CXCursor_LabelRef = 48,
+ CXCursor_LabelRef = 48,
/**
* A reference to a set of overloaded functions or function templates
@@ -1914,26 +1887,26 @@ enum CXCursorKind {
* \c clang_getOverloadedDecl() can be used to retrieve the definitions
* referenced by this cursor.
*/
- CXCursor_OverloadedDeclRef = 49,
+ CXCursor_OverloadedDeclRef = 49,
/**
* A reference to a variable that occurs in some non-expression
* context, e.g., a C++ lambda capture list.
*/
- CXCursor_VariableRef = 50,
+ CXCursor_VariableRef = 50,
- CXCursor_LastRef = CXCursor_VariableRef,
+ CXCursor_LastRef = CXCursor_VariableRef,
/* Error conditions */
- CXCursor_FirstInvalid = 70,
- CXCursor_InvalidFile = 70,
- CXCursor_NoDeclFound = 71,
- CXCursor_NotImplemented = 72,
- CXCursor_InvalidCode = 73,
- CXCursor_LastInvalid = CXCursor_InvalidCode,
+ CXCursor_FirstInvalid = 70,
+ CXCursor_InvalidFile = 70,
+ CXCursor_NoDeclFound = 71,
+ CXCursor_NotImplemented = 72,
+ CXCursor_InvalidCode = 73,
+ CXCursor_LastInvalid = CXCursor_InvalidCode,
/* Expressions */
- CXCursor_FirstExpr = 100,
+ CXCursor_FirstExpr = 100,
/**
* An expression whose specific kind is not exposed via this
@@ -1944,104 +1917,104 @@ enum CXCursorKind {
* spelling, children, etc. However, the specific kind of the
* expression is not reported.
*/
- CXCursor_UnexposedExpr = 100,
+ CXCursor_UnexposedExpr = 100,
/**
* An expression that refers to some value declaration, such
* as a function, variable, or enumerator.
*/
- CXCursor_DeclRefExpr = 101,
+ CXCursor_DeclRefExpr = 101,
/**
* An expression that refers to a member of a struct, union,
* class, Objective-C class, etc.
*/
- CXCursor_MemberRefExpr = 102,
+ CXCursor_MemberRefExpr = 102,
/** An expression that calls a function. */
- CXCursor_CallExpr = 103,
+ CXCursor_CallExpr = 103,
/** An expression that sends a message to an Objective-C
object or class. */
- CXCursor_ObjCMessageExpr = 104,
+ CXCursor_ObjCMessageExpr = 104,
/** An expression that represents a block literal. */
- CXCursor_BlockExpr = 105,
+ CXCursor_BlockExpr = 105,
/** An integer literal.
*/
- CXCursor_IntegerLiteral = 106,
+ CXCursor_IntegerLiteral = 106,
/** A floating point number literal.
*/
- CXCursor_FloatingLiteral = 107,
+ CXCursor_FloatingLiteral = 107,
/** An imaginary number literal.
*/
- CXCursor_ImaginaryLiteral = 108,
+ CXCursor_ImaginaryLiteral = 108,
/** A string literal.
*/
- CXCursor_StringLiteral = 109,
+ CXCursor_StringLiteral = 109,
/** A character literal.
*/
- CXCursor_CharacterLiteral = 110,
+ CXCursor_CharacterLiteral = 110,
/** A parenthesized expression, e.g. "(1)".
*
* This AST node is only formed if full location information is requested.
*/
- CXCursor_ParenExpr = 111,
+ CXCursor_ParenExpr = 111,
/** This represents the unary-expression's (except sizeof and
* alignof).
*/
- CXCursor_UnaryOperator = 112,
+ CXCursor_UnaryOperator = 112,
/** [C99 6.5.2.1] Array Subscripting.
*/
- CXCursor_ArraySubscriptExpr = 113,
+ CXCursor_ArraySubscriptExpr = 113,
/** A builtin binary operation expression such as "x + y" or
* "x <= y".
*/
- CXCursor_BinaryOperator = 114,
+ CXCursor_BinaryOperator = 114,
/** Compound assignment such as "+=".
*/
- CXCursor_CompoundAssignOperator = 115,
+ CXCursor_CompoundAssignOperator = 115,
/** The ?: ternary operator.
*/
- CXCursor_ConditionalOperator = 116,
+ CXCursor_ConditionalOperator = 116,
/** An explicit cast in C (C99 6.5.4) or a C-style cast in C++
* (C++ [expr.cast]), which uses the syntax (Type)expr.
*
* For example: (int)f.
*/
- CXCursor_CStyleCastExpr = 117,
+ CXCursor_CStyleCastExpr = 117,
/** [C99 6.5.2.5]
*/
- CXCursor_CompoundLiteralExpr = 118,
+ CXCursor_CompoundLiteralExpr = 118,
/** Describes an C or C++ initializer list.
*/
- CXCursor_InitListExpr = 119,
+ CXCursor_InitListExpr = 119,
/** The GNU address of label extension, representing &&label.
*/
- CXCursor_AddrLabelExpr = 120,
+ CXCursor_AddrLabelExpr = 120,
/** This is the GNU Statement Expression extension: ({int X=4; X;})
*/
- CXCursor_StmtExpr = 121,
+ CXCursor_StmtExpr = 121,
/** Represents a C11 generic selection.
*/
- CXCursor_GenericSelectionExpr = 122,
+ CXCursor_GenericSelectionExpr = 122,
/** Implements the GNU __null extension, which is a name for a null
* pointer constant that has integral type (e.g., int or long) and is the same
@@ -2051,23 +2024,23 @@ enum CXCursorKind {
* NULL as __null in C++ rather than using 0 (which is an integer that may not
* match the size of a pointer).
*/
- CXCursor_GNUNullExpr = 123,
+ CXCursor_GNUNullExpr = 123,
/** C++'s static_cast<> expression.
*/
- CXCursor_CXXStaticCastExpr = 124,
+ CXCursor_CXXStaticCastExpr = 124,
/** C++'s dynamic_cast<> expression.
*/
- CXCursor_CXXDynamicCastExpr = 125,
+ CXCursor_CXXDynamicCastExpr = 125,
/** C++'s reinterpret_cast<> expression.
*/
- CXCursor_CXXReinterpretCastExpr = 126,
+ CXCursor_CXXReinterpretCastExpr = 126,
/** C++'s const_cast<> expression.
*/
- CXCursor_CXXConstCastExpr = 127,
+ CXCursor_CXXConstCastExpr = 127,
/** Represents an explicit C++ type conversion that uses "functional"
* notion (C++ [expr.type.conv]).
@@ -2077,60 +2050,64 @@ enum CXCursorKind {
* x = int(0.5);
* \endcode
*/
- CXCursor_CXXFunctionalCastExpr = 128,
+ CXCursor_CXXFunctionalCastExpr = 128,
+
+ /** OpenCL's addrspace_cast<> expression.
+ */
+ CXCursor_CXXAddrspaceCastExpr = 129,
/** A C++ typeid expression (C++ [expr.typeid]).
*/
- CXCursor_CXXTypeidExpr = 129,
+ CXCursor_CXXTypeidExpr = 130,
/** [C++ 2.13.5] C++ Boolean Literal.
*/
- CXCursor_CXXBoolLiteralExpr = 130,
+ CXCursor_CXXBoolLiteralExpr = 131,
/** [C++0x 2.14.7] C++ Pointer Literal.
*/
- CXCursor_CXXNullPtrLiteralExpr = 131,
+ CXCursor_CXXNullPtrLiteralExpr = 132,
/** Represents the "this" expression in C++
*/
- CXCursor_CXXThisExpr = 132,
+ CXCursor_CXXThisExpr = 133,
/** [C++ 15] C++ Throw Expression.
*
* This handles 'throw' and 'throw' assignment-expression. When
* assignment-expression isn't present, Op will be null.
*/
- CXCursor_CXXThrowExpr = 133,
+ CXCursor_CXXThrowExpr = 134,
/** A new expression for memory allocation and constructor calls, e.g:
* "new CXXNewExpr(foo)".
*/
- CXCursor_CXXNewExpr = 134,
+ CXCursor_CXXNewExpr = 135,
/** A delete expression for memory deallocation and destructor calls,
* e.g. "delete[] pArray".
*/
- CXCursor_CXXDeleteExpr = 135,
+ CXCursor_CXXDeleteExpr = 136,
/** A unary expression. (noexcept, sizeof, or other traits)
*/
- CXCursor_UnaryExpr = 136,
+ CXCursor_UnaryExpr = 137,
/** An Objective-C string literal i.e. @"foo".
*/
- CXCursor_ObjCStringLiteral = 137,
+ CXCursor_ObjCStringLiteral = 138,
/** An Objective-C \@encode expression.
*/
- CXCursor_ObjCEncodeExpr = 138,
+ CXCursor_ObjCEncodeExpr = 139,
/** An Objective-C \@selector expression.
*/
- CXCursor_ObjCSelectorExpr = 139,
+ CXCursor_ObjCSelectorExpr = 140,
/** An Objective-C \@protocol expression.
*/
- CXCursor_ObjCProtocolExpr = 140,
+ CXCursor_ObjCProtocolExpr = 141,
/** An Objective-C "bridged" cast expression, which casts between
* Objective-C pointers and C pointers, transferring ownership in the process.
@@ -2139,7 +2116,7 @@ enum CXCursorKind {
* NSString *str = (__bridge_transfer NSString *)CFCreateString();
* \endcode
*/
- CXCursor_ObjCBridgedCastExpr = 141,
+ CXCursor_ObjCBridgedCastExpr = 142,
/** Represents a C++0x pack expansion that produces a sequence of
* expressions.
@@ -2154,7 +2131,7 @@ enum CXCursorKind {
* }
* \endcode
*/
- CXCursor_PackExpansionExpr = 142,
+ CXCursor_PackExpansionExpr = 143,
/** Represents an expression that computes the length of a parameter
* pack.
@@ -2166,7 +2143,7 @@ enum CXCursorKind {
* };
* \endcode
*/
- CXCursor_SizeOfPackExpr = 143,
+ CXCursor_SizeOfPackExpr = 144,
/* Represents a C++ lambda expression that produces a local function
* object.
@@ -2180,33 +2157,42 @@ enum CXCursorKind {
* }
* \endcode
*/
- CXCursor_LambdaExpr = 144,
+ CXCursor_LambdaExpr = 145,
/** Objective-c Boolean Literal.
*/
- CXCursor_ObjCBoolLiteralExpr = 145,
+ CXCursor_ObjCBoolLiteralExpr = 146,
/** Represents the "self" expression in an Objective-C method.
*/
- CXCursor_ObjCSelfExpr = 146,
+ CXCursor_ObjCSelfExpr = 147,
- /** OpenMP 4.0 [2.4, Array Section].
+ /** OpenMP 5.0 [2.1.5, Array Section].
*/
- CXCursor_OMPArraySectionExpr = 147,
+ CXCursor_OMPArraySectionExpr = 148,
/** Represents an @available(...) check.
*/
- CXCursor_ObjCAvailabilityCheckExpr = 148,
+ CXCursor_ObjCAvailabilityCheckExpr = 149,
/**
* Fixed point literal
*/
- CXCursor_FixedPointLiteral = 149,
+ CXCursor_FixedPointLiteral = 150,
+
+ /** OpenMP 5.0 [2.1.4, Array Shaping].
+ */
+ CXCursor_OMPArrayShapingExpr = 151,
- CXCursor_LastExpr = CXCursor_FixedPointLiteral,
+ /**
+ * OpenMP 5.0 [2.1.6 Iterators]
+ */
+ CXCursor_OMPIteratorExpr = 152,
+
+ CXCursor_LastExpr = CXCursor_OMPIteratorExpr,
/* Statements */
- CXCursor_FirstStmt = 200,
+ CXCursor_FirstStmt = 200,
/**
* A statement whose specific kind is not exposed via this
* interface.
@@ -2216,7 +2202,7 @@ enum CXCursorKind {
* children, etc. However, the specific kind of the statement is not
* reported.
*/
- CXCursor_UnexposedStmt = 200,
+ CXCursor_UnexposedStmt = 200,
/** A labelled statement in a function.
*
@@ -2229,226 +2215,226 @@ enum CXCursorKind {
* \endcode
*
*/
- CXCursor_LabelStmt = 201,
+ CXCursor_LabelStmt = 201,
/** A group of statements like { stmt stmt }.
*
* This cursor kind is used to describe compound statements, e.g. function
* bodies.
*/
- CXCursor_CompoundStmt = 202,
+ CXCursor_CompoundStmt = 202,
/** A case statement.
*/
- CXCursor_CaseStmt = 203,
+ CXCursor_CaseStmt = 203,
/** A default statement.
*/
- CXCursor_DefaultStmt = 204,
+ CXCursor_DefaultStmt = 204,
/** An if statement
*/
- CXCursor_IfStmt = 205,
+ CXCursor_IfStmt = 205,
/** A switch statement.
*/
- CXCursor_SwitchStmt = 206,
+ CXCursor_SwitchStmt = 206,
/** A while statement.
*/
- CXCursor_WhileStmt = 207,
+ CXCursor_WhileStmt = 207,
/** A do statement.
*/
- CXCursor_DoStmt = 208,
+ CXCursor_DoStmt = 208,
/** A for statement.
*/
- CXCursor_ForStmt = 209,
+ CXCursor_ForStmt = 209,
/** A goto statement.
*/
- CXCursor_GotoStmt = 210,
+ CXCursor_GotoStmt = 210,
/** An indirect goto statement.
*/
- CXCursor_IndirectGotoStmt = 211,
+ CXCursor_IndirectGotoStmt = 211,
/** A continue statement.
*/
- CXCursor_ContinueStmt = 212,
+ CXCursor_ContinueStmt = 212,
/** A break statement.
*/
- CXCursor_BreakStmt = 213,
+ CXCursor_BreakStmt = 213,
/** A return statement.
*/
- CXCursor_ReturnStmt = 214,
+ CXCursor_ReturnStmt = 214,
/** A GCC inline assembly statement extension.
*/
- CXCursor_GCCAsmStmt = 215,
- CXCursor_AsmStmt = CXCursor_GCCAsmStmt,
+ CXCursor_GCCAsmStmt = 215,
+ CXCursor_AsmStmt = CXCursor_GCCAsmStmt,
/** Objective-C's overall \@try-\@catch-\@finally statement.
*/
- CXCursor_ObjCAtTryStmt = 216,
+ CXCursor_ObjCAtTryStmt = 216,
/** Objective-C's \@catch statement.
*/
- CXCursor_ObjCAtCatchStmt = 217,
+ CXCursor_ObjCAtCatchStmt = 217,
/** Objective-C's \@finally statement.
*/
- CXCursor_ObjCAtFinallyStmt = 218,
+ CXCursor_ObjCAtFinallyStmt = 218,
/** Objective-C's \@throw statement.
*/
- CXCursor_ObjCAtThrowStmt = 219,
+ CXCursor_ObjCAtThrowStmt = 219,
/** Objective-C's \@synchronized statement.
*/
- CXCursor_ObjCAtSynchronizedStmt = 220,
+ CXCursor_ObjCAtSynchronizedStmt = 220,
/** Objective-C's autorelease pool statement.
*/
- CXCursor_ObjCAutoreleasePoolStmt = 221,
+ CXCursor_ObjCAutoreleasePoolStmt = 221,
/** Objective-C's collection statement.
*/
- CXCursor_ObjCForCollectionStmt = 222,
+ CXCursor_ObjCForCollectionStmt = 222,
/** C++'s catch statement.
*/
- CXCursor_CXXCatchStmt = 223,
+ CXCursor_CXXCatchStmt = 223,
/** C++'s try statement.
*/
- CXCursor_CXXTryStmt = 224,
+ CXCursor_CXXTryStmt = 224,
/** C++'s for (* : *) statement.
*/
- CXCursor_CXXForRangeStmt = 225,
+ CXCursor_CXXForRangeStmt = 225,
/** Windows Structured Exception Handling's try statement.
*/
- CXCursor_SEHTryStmt = 226,
+ CXCursor_SEHTryStmt = 226,
/** Windows Structured Exception Handling's except statement.
*/
- CXCursor_SEHExceptStmt = 227,
+ CXCursor_SEHExceptStmt = 227,
/** Windows Structured Exception Handling's finally statement.
*/
- CXCursor_SEHFinallyStmt = 228,
+ CXCursor_SEHFinallyStmt = 228,
/** A MS inline assembly statement extension.
*/
- CXCursor_MSAsmStmt = 229,
+ CXCursor_MSAsmStmt = 229,
/** The null statement ";": C99 6.8.3p3.
*
* This cursor kind is used to describe the null statement.
*/
- CXCursor_NullStmt = 230,
+ CXCursor_NullStmt = 230,
/** Adaptor class for mixing declarations with statements and
* expressions.
*/
- CXCursor_DeclStmt = 231,
+ CXCursor_DeclStmt = 231,
/** OpenMP parallel directive.
*/
- CXCursor_OMPParallelDirective = 232,
+ CXCursor_OMPParallelDirective = 232,
/** OpenMP SIMD directive.
*/
- CXCursor_OMPSimdDirective = 233,
+ CXCursor_OMPSimdDirective = 233,
/** OpenMP for directive.
*/
- CXCursor_OMPForDirective = 234,
+ CXCursor_OMPForDirective = 234,
/** OpenMP sections directive.
*/
- CXCursor_OMPSectionsDirective = 235,
+ CXCursor_OMPSectionsDirective = 235,
/** OpenMP section directive.
*/
- CXCursor_OMPSectionDirective = 236,
+ CXCursor_OMPSectionDirective = 236,
/** OpenMP single directive.
*/
- CXCursor_OMPSingleDirective = 237,
+ CXCursor_OMPSingleDirective = 237,
/** OpenMP parallel for directive.
*/
- CXCursor_OMPParallelForDirective = 238,
+ CXCursor_OMPParallelForDirective = 238,
/** OpenMP parallel sections directive.
*/
- CXCursor_OMPParallelSectionsDirective = 239,
+ CXCursor_OMPParallelSectionsDirective = 239,
/** OpenMP task directive.
*/
- CXCursor_OMPTaskDirective = 240,
+ CXCursor_OMPTaskDirective = 240,
/** OpenMP master directive.
*/
- CXCursor_OMPMasterDirective = 241,
+ CXCursor_OMPMasterDirective = 241,
/** OpenMP critical directive.
*/
- CXCursor_OMPCriticalDirective = 242,
+ CXCursor_OMPCriticalDirective = 242,
/** OpenMP taskyield directive.
*/
- CXCursor_OMPTaskyieldDirective = 243,
+ CXCursor_OMPTaskyieldDirective = 243,
/** OpenMP barrier directive.
*/
- CXCursor_OMPBarrierDirective = 244,
+ CXCursor_OMPBarrierDirective = 244,
/** OpenMP taskwait directive.
*/
- CXCursor_OMPTaskwaitDirective = 245,
+ CXCursor_OMPTaskwaitDirective = 245,
/** OpenMP flush directive.
*/
- CXCursor_OMPFlushDirective = 246,
+ CXCursor_OMPFlushDirective = 246,
/** Windows Structured Exception Handling's leave statement.
*/
- CXCursor_SEHLeaveStmt = 247,
+ CXCursor_SEHLeaveStmt = 247,
/** OpenMP ordered directive.
*/
- CXCursor_OMPOrderedDirective = 248,
+ CXCursor_OMPOrderedDirective = 248,
/** OpenMP atomic directive.
*/
- CXCursor_OMPAtomicDirective = 249,
+ CXCursor_OMPAtomicDirective = 249,
/** OpenMP for SIMD directive.
*/
- CXCursor_OMPForSimdDirective = 250,
+ CXCursor_OMPForSimdDirective = 250,
/** OpenMP parallel for SIMD directive.
*/
- CXCursor_OMPParallelForSimdDirective = 251,
+ CXCursor_OMPParallelForSimdDirective = 251,
/** OpenMP target directive.
*/
- CXCursor_OMPTargetDirective = 252,
+ CXCursor_OMPTargetDirective = 252,
/** OpenMP teams directive.
*/
- CXCursor_OMPTeamsDirective = 253,
+ CXCursor_OMPTeamsDirective = 253,
/** OpenMP taskgroup directive.
*/
- CXCursor_OMPTaskgroupDirective = 254,
+ CXCursor_OMPTaskgroupDirective = 254,
/** OpenMP cancellation point directive.
*/
@@ -2456,35 +2442,35 @@ enum CXCursorKind {
/** OpenMP cancel directive.
*/
- CXCursor_OMPCancelDirective = 256,
+ CXCursor_OMPCancelDirective = 256,
/** OpenMP target data directive.
*/
- CXCursor_OMPTargetDataDirective = 257,
+ CXCursor_OMPTargetDataDirective = 257,
/** OpenMP taskloop directive.
*/
- CXCursor_OMPTaskLoopDirective = 258,
+ CXCursor_OMPTaskLoopDirective = 258,
/** OpenMP taskloop simd directive.
*/
- CXCursor_OMPTaskLoopSimdDirective = 259,
+ CXCursor_OMPTaskLoopSimdDirective = 259,
/** OpenMP distribute directive.
*/
- CXCursor_OMPDistributeDirective = 260,
+ CXCursor_OMPDistributeDirective = 260,
/** OpenMP target enter data directive.
*/
- CXCursor_OMPTargetEnterDataDirective = 261,
+ CXCursor_OMPTargetEnterDataDirective = 261,
/** OpenMP target exit data directive.
*/
- CXCursor_OMPTargetExitDataDirective = 262,
+ CXCursor_OMPTargetExitDataDirective = 262,
/** OpenMP target parallel directive.
*/
- CXCursor_OMPTargetParallelDirective = 263,
+ CXCursor_OMPTargetParallelDirective = 263,
/** OpenMP target parallel for directive.
*/
@@ -2492,7 +2478,7 @@ enum CXCursorKind {
/** OpenMP target update directive.
*/
- CXCursor_OMPTargetUpdateDirective = 265,
+ CXCursor_OMPTargetUpdateDirective = 265,
/** OpenMP distribute parallel for directive.
*/
@@ -2564,17 +2550,25 @@ enum CXCursorKind {
/** OpenMP master taskloop simd directive.
*/
- CXCursor_OMPMasterTaskLoopSimdDirective = 283,
+ CXCursor_OMPMasterTaskLoopSimdDirective = 283,
/** OpenMP parallel master taskloop simd directive.
*/
- CXCursor_OMPParallelMasterTaskLoopSimdDirective = 284,
+ CXCursor_OMPParallelMasterTaskLoopSimdDirective = 284,
/** OpenMP parallel master directive.
*/
- CXCursor_OMPParallelMasterDirective = 285,
+ CXCursor_OMPParallelMasterDirective = 285,
+
+ /** OpenMP depobj directive.
+ */
+ CXCursor_OMPDepobjDirective = 286,
+
+ /** OpenMP scan directive.
+ */
+ CXCursor_OMPScanDirective = 287,
- CXCursor_LastStmt = CXCursor_OMPParallelMasterDirective,
+ CXCursor_LastStmt = CXCursor_OMPScanDirective,
/**
* Cursor that represents the translation unit itself.
@@ -2582,89 +2576,89 @@ enum CXCursorKind {
* The translation unit cursor exists primarily to act as the root
* cursor for traversing the contents of a translation unit.
*/
- CXCursor_TranslationUnit = 300,
+ CXCursor_TranslationUnit = 300,
/* Attributes */
- CXCursor_FirstAttr = 400,
+ CXCursor_FirstAttr = 400,
/**
* An attribute whose specific kind is not exposed via this
* interface.
*/
- CXCursor_UnexposedAttr = 400,
-
- CXCursor_IBActionAttr = 401,
- CXCursor_IBOutletAttr = 402,
- CXCursor_IBOutletCollectionAttr = 403,
- CXCursor_CXXFinalAttr = 404,
- CXCursor_CXXOverrideAttr = 405,
- CXCursor_AnnotateAttr = 406,
- CXCursor_AsmLabelAttr = 407,
- CXCursor_PackedAttr = 408,
- CXCursor_PureAttr = 409,
- CXCursor_ConstAttr = 410,
- CXCursor_NoDuplicateAttr = 411,
- CXCursor_CUDAConstantAttr = 412,
- CXCursor_CUDADeviceAttr = 413,
- CXCursor_CUDAGlobalAttr = 414,
- CXCursor_CUDAHostAttr = 415,
- CXCursor_CUDASharedAttr = 416,
- CXCursor_VisibilityAttr = 417,
- CXCursor_DLLExport = 418,
- CXCursor_DLLImport = 419,
- CXCursor_NSReturnsRetained = 420,
- CXCursor_NSReturnsNotRetained = 421,
- CXCursor_NSReturnsAutoreleased = 422,
- CXCursor_NSConsumesSelf = 423,
- CXCursor_NSConsumed = 424,
- CXCursor_ObjCException = 425,
- CXCursor_ObjCNSObject = 426,
- CXCursor_ObjCIndependentClass = 427,
- CXCursor_ObjCPreciseLifetime = 428,
- CXCursor_ObjCReturnsInnerPointer = 429,
- CXCursor_ObjCRequiresSuper = 430,
- CXCursor_ObjCRootClass = 431,
- CXCursor_ObjCSubclassingRestricted = 432,
- CXCursor_ObjCExplicitProtocolImpl = 433,
- CXCursor_ObjCDesignatedInitializer = 434,
- CXCursor_ObjCRuntimeVisible = 435,
- CXCursor_ObjCBoxable = 436,
- CXCursor_FlagEnum = 437,
- CXCursor_ConvergentAttr = 438,
- CXCursor_WarnUnusedAttr = 439,
- CXCursor_WarnUnusedResultAttr = 440,
- CXCursor_AlignedAttr = 441,
- CXCursor_LastAttr = CXCursor_AlignedAttr,
+ CXCursor_UnexposedAttr = 400,
+
+ CXCursor_IBActionAttr = 401,
+ CXCursor_IBOutletAttr = 402,
+ CXCursor_IBOutletCollectionAttr = 403,
+ CXCursor_CXXFinalAttr = 404,
+ CXCursor_CXXOverrideAttr = 405,
+ CXCursor_AnnotateAttr = 406,
+ CXCursor_AsmLabelAttr = 407,
+ CXCursor_PackedAttr = 408,
+ CXCursor_PureAttr = 409,
+ CXCursor_ConstAttr = 410,
+ CXCursor_NoDuplicateAttr = 411,
+ CXCursor_CUDAConstantAttr = 412,
+ CXCursor_CUDADeviceAttr = 413,
+ CXCursor_CUDAGlobalAttr = 414,
+ CXCursor_CUDAHostAttr = 415,
+ CXCursor_CUDASharedAttr = 416,
+ CXCursor_VisibilityAttr = 417,
+ CXCursor_DLLExport = 418,
+ CXCursor_DLLImport = 419,
+ CXCursor_NSReturnsRetained = 420,
+ CXCursor_NSReturnsNotRetained = 421,
+ CXCursor_NSReturnsAutoreleased = 422,
+ CXCursor_NSConsumesSelf = 423,
+ CXCursor_NSConsumed = 424,
+ CXCursor_ObjCException = 425,
+ CXCursor_ObjCNSObject = 426,
+ CXCursor_ObjCIndependentClass = 427,
+ CXCursor_ObjCPreciseLifetime = 428,
+ CXCursor_ObjCReturnsInnerPointer = 429,
+ CXCursor_ObjCRequiresSuper = 430,
+ CXCursor_ObjCRootClass = 431,
+ CXCursor_ObjCSubclassingRestricted = 432,
+ CXCursor_ObjCExplicitProtocolImpl = 433,
+ CXCursor_ObjCDesignatedInitializer = 434,
+ CXCursor_ObjCRuntimeVisible = 435,
+ CXCursor_ObjCBoxable = 436,
+ CXCursor_FlagEnum = 437,
+ CXCursor_ConvergentAttr = 438,
+ CXCursor_WarnUnusedAttr = 439,
+ CXCursor_WarnUnusedResultAttr = 440,
+ CXCursor_AlignedAttr = 441,
+ CXCursor_LastAttr = CXCursor_AlignedAttr,
/* Preprocessing */
- CXCursor_PreprocessingDirective = 500,
- CXCursor_MacroDefinition = 501,
- CXCursor_MacroExpansion = 502,
- CXCursor_MacroInstantiation = CXCursor_MacroExpansion,
- CXCursor_InclusionDirective = 503,
- CXCursor_FirstPreprocessing = CXCursor_PreprocessingDirective,
- CXCursor_LastPreprocessing = CXCursor_InclusionDirective,
+ CXCursor_PreprocessingDirective = 500,
+ CXCursor_MacroDefinition = 501,
+ CXCursor_MacroExpansion = 502,
+ CXCursor_MacroInstantiation = CXCursor_MacroExpansion,
+ CXCursor_InclusionDirective = 503,
+ CXCursor_FirstPreprocessing = CXCursor_PreprocessingDirective,
+ CXCursor_LastPreprocessing = CXCursor_InclusionDirective,
/* Extra Declarations */
/**
* A module import declaration.
*/
- CXCursor_ModuleImportDecl = 600,
- CXCursor_TypeAliasTemplateDecl = 601,
+ CXCursor_ModuleImportDecl = 600,
+ CXCursor_TypeAliasTemplateDecl = 601,
/**
* A static_assert or _Static_assert node
*/
- CXCursor_StaticAssert = 602,
+ CXCursor_StaticAssert = 602,
/**
* a friend declaration.
*/
- CXCursor_FriendDecl = 603,
- CXCursor_FirstExtraDecl = CXCursor_ModuleImportDecl,
- CXCursor_LastExtraDecl = CXCursor_FriendDecl,
+ CXCursor_FriendDecl = 603,
+ CXCursor_FirstExtraDecl = CXCursor_ModuleImportDecl,
+ CXCursor_LastExtraDecl = CXCursor_FriendDecl,
/**
* A code completion overload candidate.
*/
- CXCursor_OverloadCandidate = 700
+ CXCursor_OverloadCandidate = 700
};
/**
@@ -2935,14 +2929,10 @@ typedef struct CXPlatformAvailability {
* platform-availability structures returned. There are
* \c min(N, availability_size) such structures.
*/
-CINDEX_LINKAGE int
-clang_getCursorPlatformAvailability(CXCursor cursor,
- int *always_deprecated,
- CXString *deprecated_message,
- int *always_unavailable,
- CXString *unavailable_message,
- CXPlatformAvailability *availability,
- int availability_size);
+CINDEX_LINKAGE int clang_getCursorPlatformAvailability(
+ CXCursor cursor, int *always_deprecated, CXString *deprecated_message,
+ int *always_unavailable, CXString *unavailable_message,
+ CXPlatformAvailability *availability, int availability_size);
/**
* Free the memory associated with a \c CXPlatformAvailability structure.
@@ -2969,11 +2959,7 @@ CINDEX_LINKAGE enum CXLanguageKind clang_getCursorLanguage(CXCursor cursor);
* Describe the "thread-local storage (TLS) kind" of the declaration
* referred to by a cursor.
*/
-enum CXTLSKind {
- CXTLS_None = 0,
- CXTLS_Dynamic,
- CXTLS_Static
-};
+enum CXTLSKind { CXTLS_None = 0, CXTLS_Dynamic, CXTLS_Static };
/**
* Determine the "thread-local storage (TLS) kind" of the declaration
@@ -3005,7 +2991,7 @@ CINDEX_LINKAGE void clang_disposeCXCursorSet(CXCursorSet cset);
* Queries a CXCursorSet to see if it contains a specific CXCursor.
*
* \returns non-zero if the set contains the specified cursor.
-*/
+ */
CINDEX_LINKAGE unsigned clang_CXCursorSet_contains(CXCursorSet cset,
CXCursor cursor);
@@ -3013,7 +2999,7 @@ CINDEX_LINKAGE unsigned clang_CXCursorSet_contains(CXCursorSet cset,
* Inserts a CXCursor into a CXCursorSet.
*
* \returns zero if the CXCursor was already in the set, and non-zero otherwise.
-*/
+ */
CINDEX_LINKAGE unsigned clang_CXCursorSet_insert(CXCursorSet cset,
CXCursor cursor);
@@ -3267,8 +3253,9 @@ enum CXTypeKind {
CXType_UShortAccum = 36,
CXType_UAccum = 37,
CXType_ULongAccum = 38,
+ CXType_BFloat16 = 39,
CXType_FirstBuiltin = CXType_Void,
- CXType_LastBuiltin = CXType_ULongAccum,
+ CXType_LastBuiltin = CXType_BFloat16,
CXType_Complex = 100,
CXType_Pointer = 101,
@@ -3360,7 +3347,8 @@ enum CXTypeKind {
CXType_OCLIntelSubgroupAVCImeDualRefStreamin = 175,
- CXType_ExtVector = 176
+ CXType_ExtVector = 176,
+ CXType_Atomic = 177
};
/**
@@ -3433,9 +3421,9 @@ CINDEX_LINKAGE CXType clang_getEnumDeclIntegerType(CXCursor C);
* Retrieve the integer value of an enum constant declaration as a signed
* long long.
*
- * If the cursor does not reference an enum constant declaration, LLONG_MIN is returned.
- * Since this is also potentially a valid constant value, the kind of the cursor
- * must be verified before calling this function.
+ * If the cursor does not reference an enum constant declaration, LLONG_MIN is
+ * returned. Since this is also potentially a valid constant value, the kind of
+ * the cursor must be verified before calling this function.
*/
CINDEX_LINKAGE long long clang_getEnumConstantDeclValue(CXCursor C);
@@ -3443,11 +3431,12 @@ CINDEX_LINKAGE long long clang_getEnumConstantDeclValue(CXCursor C);
* Retrieve the integer value of an enum constant declaration as an unsigned
* long long.
*
- * If the cursor does not reference an enum constant declaration, ULLONG_MAX is returned.
- * Since this is also potentially a valid constant value, the kind of the cursor
- * must be verified before calling this function.
+ * If the cursor does not reference an enum constant declaration, ULLONG_MAX is
+ * returned. Since this is also potentially a valid constant value, the kind of
+ * the cursor must be verified before calling this function.
*/
-CINDEX_LINKAGE unsigned long long clang_getEnumConstantDeclUnsignedValue(CXCursor C);
+CINDEX_LINKAGE unsigned long long
+clang_getEnumConstantDeclUnsignedValue(CXCursor C);
/**
* Retrieve the bit width of a bit field declaration as an integer.
@@ -3528,8 +3517,8 @@ CINDEX_LINKAGE int clang_Cursor_getNumTemplateArguments(CXCursor C);
* For I = 0, 1, and 2, Type, Integral, and Integral will be returned,
* respectively.
*/
-CINDEX_LINKAGE enum CXTemplateArgumentKind clang_Cursor_getTemplateArgumentKind(
- CXCursor C, unsigned I);
+CINDEX_LINKAGE enum CXTemplateArgumentKind
+clang_Cursor_getTemplateArgumentKind(CXCursor C, unsigned I);
/**
* Retrieve a CXType representing the type of a TemplateArgument of a
@@ -3589,8 +3578,8 @@ CINDEX_LINKAGE long long clang_Cursor_getTemplateArgumentValue(CXCursor C,
* If called with I = 1 or 2, 2147483649 or true will be returned, respectively.
* For I == 0, this function's behavior is undefined.
*/
-CINDEX_LINKAGE unsigned long long clang_Cursor_getTemplateArgumentUnsignedValue(
- CXCursor C, unsigned I);
+CINDEX_LINKAGE unsigned long long
+clang_Cursor_getTemplateArgumentUnsignedValue(CXCursor C, unsigned I);
/**
* Determine whether two CXTypes represent the same type.
@@ -3745,7 +3734,7 @@ CINDEX_LINKAGE unsigned clang_Type_getNumObjCProtocolRefs(CXType T);
CINDEX_LINKAGE CXCursor clang_Type_getObjCProtocolDecl(CXType T, unsigned i);
/**
- * Retreive the number of type arguments associated with an ObjC object.
+ * Retrieve the number of type arguments associated with an ObjC object.
*
* If the type is not an ObjC object, 0 is returned.
*/
@@ -3775,7 +3764,8 @@ CINDEX_LINKAGE CXType clang_getCursorResultType(CXCursor C);
* Retrieve the exception specification type associated with a given cursor.
* This is a value of type CXCursor_ExceptionSpecificationKind.
*
- * This only returns a valid result if the cursor refers to a function or method.
+ * This only returns a valid result if the cursor refers to a function or
+ * method.
*/
CINDEX_LINKAGE int clang_getCursorExceptionSpecificationType(CXCursor C);
@@ -3949,6 +3939,13 @@ CINDEX_LINKAGE long long clang_Type_getOffsetOf(CXType T, const char *S);
CINDEX_LINKAGE CXType clang_Type_getModifiedType(CXType T);
/**
+ * Gets the type contained by this atomic type.
+ *
+ * If a non-atomic type is passed in, an invalid type is returned.
+ */
+CINDEX_LINKAGE CXType clang_Type_getValueType(CXType CT);
+
+/**
* Return the offset of the field represented by the Cursor.
*
* If the cursor is not a field declaration, -1 is returned.
@@ -4003,7 +4000,8 @@ CINDEX_LINKAGE int clang_Type_getNumTemplateArguments(CXType T);
* This function only returns template type arguments and does not handle
* template template arguments or variadic packs.
*/
-CINDEX_LINKAGE CXType clang_Type_getTemplateArgumentAsType(CXType T, unsigned i);
+CINDEX_LINKAGE CXType clang_Type_getTemplateArgumentAsType(CXType T,
+ unsigned i);
/**
* Retrieve the ref-qualifier kind of a function or method.
@@ -4039,9 +4037,9 @@ enum CX_CXXAccessSpecifier {
/**
* Returns the access control level for the referenced object.
*
- * If the cursor refers to a C++ declaration, its access control level within its
- * parent scope is returned. Otherwise, if the cursor refers to a base specifier or
- * access specifier, the specifier itself is returned.
+ * If the cursor refers to a C++ declaration, its access control level within
+ * its parent scope is returned. Otherwise, if the cursor refers to a base
+ * specifier or access specifier, the specifier itself is returned.
*/
CINDEX_LINKAGE enum CX_CXXAccessSpecifier clang_getCXXAccessSpecifier(CXCursor);
@@ -4192,7 +4190,7 @@ CINDEX_LINKAGE unsigned clang_visitChildren(CXCursor parent,
CXCursorVisitor visitor,
CXClientData client_data);
#ifdef __has_feature
-# if __has_feature(blocks)
+#if __has_feature(blocks)
/**
* Visitor invoked for each cursor found by a traversal.
*
@@ -4203,16 +4201,16 @@ CINDEX_LINKAGE unsigned clang_visitChildren(CXCursor parent,
* The visitor should return one of the \c CXChildVisitResult values
* to direct clang_visitChildrenWithBlock().
*/
-typedef enum CXChildVisitResult
- (^CXCursorVisitorBlock)(CXCursor cursor, CXCursor parent);
+typedef enum CXChildVisitResult (^CXCursorVisitorBlock)(CXCursor cursor,
+ CXCursor parent);
/**
* Visits the children of a cursor using the specified block. Behaves
* identically to clang_visitChildren() in all other respects.
*/
-CINDEX_LINKAGE unsigned clang_visitChildrenWithBlock(CXCursor parent,
- CXCursorVisitorBlock block);
-# endif
+CINDEX_LINKAGE unsigned
+clang_visitChildrenWithBlock(CXCursor parent, CXCursorVisitorBlock block);
+#endif
#endif
/**
@@ -4249,15 +4247,14 @@ CINDEX_LINKAGE CXString clang_constructUSR_ObjCClass(const char *class_name);
/**
* Construct a USR for a specified Objective-C category.
*/
-CINDEX_LINKAGE CXString
- clang_constructUSR_ObjCCategory(const char *class_name,
- const char *category_name);
+CINDEX_LINKAGE CXString clang_constructUSR_ObjCCategory(
+ const char *class_name, const char *category_name);
/**
* Construct a USR for a specified Objective-C protocol.
*/
CINDEX_LINKAGE CXString
- clang_constructUSR_ObjCProtocol(const char *protocol_name);
+clang_constructUSR_ObjCProtocol(const char *protocol_name);
/**
* Construct a USR for a specified Objective-C instance variable and
@@ -4297,9 +4294,8 @@ CINDEX_LINKAGE CXString clang_getCursorSpelling(CXCursor);
*
* \param options Reserved.
*/
-CINDEX_LINKAGE CXSourceRange clang_Cursor_getSpellingNameRange(CXCursor,
- unsigned pieceIndex,
- unsigned options);
+CINDEX_LINKAGE CXSourceRange clang_Cursor_getSpellingNameRange(
+ CXCursor, unsigned pieceIndex, unsigned options);
/**
* Opaque pointer representing a policy that controls pretty printing
@@ -4353,9 +4349,10 @@ clang_PrintingPolicy_getProperty(CXPrintingPolicy Policy,
/**
* Set a property value for the given printing policy.
*/
-CINDEX_LINKAGE void clang_PrintingPolicy_setProperty(CXPrintingPolicy Policy,
- enum CXPrintingPolicyProperty Property,
- unsigned Value);
+CINDEX_LINKAGE void
+clang_PrintingPolicy_setProperty(CXPrintingPolicy Policy,
+ enum CXPrintingPolicyProperty Property,
+ unsigned Value);
/**
* Retrieve the default policy for the cursor.
@@ -4503,18 +4500,18 @@ CINDEX_LINKAGE CXType clang_Cursor_getReceiverType(CXCursor C);
* Property attributes for a \c CXCursor_ObjCPropertyDecl.
*/
typedef enum {
- CXObjCPropertyAttr_noattr = 0x00,
- CXObjCPropertyAttr_readonly = 0x01,
- CXObjCPropertyAttr_getter = 0x02,
- CXObjCPropertyAttr_assign = 0x04,
+ CXObjCPropertyAttr_noattr = 0x00,
+ CXObjCPropertyAttr_readonly = 0x01,
+ CXObjCPropertyAttr_getter = 0x02,
+ CXObjCPropertyAttr_assign = 0x04,
CXObjCPropertyAttr_readwrite = 0x08,
- CXObjCPropertyAttr_retain = 0x10,
- CXObjCPropertyAttr_copy = 0x20,
+ CXObjCPropertyAttr_retain = 0x10,
+ CXObjCPropertyAttr_copy = 0x20,
CXObjCPropertyAttr_nonatomic = 0x40,
- CXObjCPropertyAttr_setter = 0x80,
- CXObjCPropertyAttr_atomic = 0x100,
- CXObjCPropertyAttr_weak = 0x200,
- CXObjCPropertyAttr_strong = 0x400,
+ CXObjCPropertyAttr_setter = 0x80,
+ CXObjCPropertyAttr_atomic = 0x100,
+ CXObjCPropertyAttr_weak = 0x200,
+ CXObjCPropertyAttr_strong = 0x400,
CXObjCPropertyAttr_unsafe_unretained = 0x800,
CXObjCPropertyAttr_class = 0x1000
} CXObjCPropertyAttrKind;
@@ -4526,8 +4523,8 @@ typedef enum {
*
* \param reserved Reserved for future use, pass 0.
*/
-CINDEX_LINKAGE unsigned clang_Cursor_getObjCPropertyAttributes(CXCursor C,
- unsigned reserved);
+CINDEX_LINKAGE unsigned
+clang_Cursor_getObjCPropertyAttributes(CXCursor C, unsigned reserved);
/**
* Given a cursor that represents a property declaration, return the
@@ -4589,8 +4586,9 @@ CINDEX_LINKAGE unsigned clang_Cursor_isVariadic(CXCursor C);
* non-zero if the 'generated_declaration' is set in the attribute.
*/
CINDEX_LINKAGE unsigned clang_Cursor_isExternalSymbol(CXCursor C,
- CXString *language, CXString *definedIn,
- unsigned *isGenerated);
+ CXString *language,
+ CXString *definedIn,
+ unsigned *isGenerated);
/**
* Given a cursor that represents a declaration, return the associated
@@ -4716,8 +4714,8 @@ CINDEX_LINKAGE unsigned clang_Module_getNumTopLevelHeaders(CXTranslationUnit,
* \returns the specified top level header associated with the module.
*/
CINDEX_LINKAGE
-CXFile clang_Module_getTopLevelHeader(CXTranslationUnit,
- CXModule Module, unsigned Index);
+CXFile clang_Module_getTopLevelHeader(CXTranslationUnit, CXModule Module,
+ unsigned Index);
/**
* @}
@@ -4735,7 +4733,8 @@ CXFile clang_Module_getTopLevelHeader(CXTranslationUnit,
/**
* Determine if a C++ constructor is a converting constructor.
*/
-CINDEX_LINKAGE unsigned clang_CXXConstructor_isConvertingConstructor(CXCursor C);
+CINDEX_LINKAGE unsigned
+clang_CXXConstructor_isConvertingConstructor(CXCursor C);
/**
* Determine if a C++ constructor is a copy constructor.
@@ -4865,9 +4864,8 @@ CINDEX_LINKAGE CXCursor clang_getSpecializedCursorTemplate(CXCursor C);
* \returns The piece of the name pointed to by the given cursor. If there is no
* name, or if the PieceIndex is out-of-range, a null-cursor will be returned.
*/
-CINDEX_LINKAGE CXSourceRange clang_getCursorReferenceNameRange(CXCursor C,
- unsigned NameFlags,
- unsigned PieceIndex);
+CINDEX_LINKAGE CXSourceRange clang_getCursorReferenceNameRange(
+ CXCursor C, unsigned NameFlags, unsigned PieceIndex);
enum CXNameRefFlags {
/**
@@ -5035,15 +5033,14 @@ CINDEX_LINKAGE void clang_tokenize(CXTranslationUnit TU, CXSourceRange Range,
* \param Cursors an array of \p NumTokens cursors, whose contents will be
* replaced with the cursors corresponding to each token.
*/
-CINDEX_LINKAGE void clang_annotateTokens(CXTranslationUnit TU,
- CXToken *Tokens, unsigned NumTokens,
- CXCursor *Cursors);
+CINDEX_LINKAGE void clang_annotateTokens(CXTranslationUnit TU, CXToken *Tokens,
+ unsigned NumTokens, CXCursor *Cursors);
/**
* Free the given set of tokens.
*/
-CINDEX_LINKAGE void clang_disposeTokens(CXTranslationUnit TU,
- CXToken *Tokens, unsigned NumTokens);
+CINDEX_LINKAGE void clang_disposeTokens(CXTranslationUnit TU, CXToken *Tokens,
+ unsigned NumTokens);
/**
* @}
@@ -5060,15 +5057,11 @@ CINDEX_LINKAGE void clang_disposeTokens(CXTranslationUnit TU,
/* for debug/testing */
CINDEX_LINKAGE CXString clang_getCursorKindSpelling(enum CXCursorKind Kind);
-CINDEX_LINKAGE void clang_getDefinitionSpellingAndExtent(CXCursor,
- const char **startBuf,
- const char **endBuf,
- unsigned *startLine,
- unsigned *startColumn,
- unsigned *endLine,
- unsigned *endColumn);
+CINDEX_LINKAGE void clang_getDefinitionSpellingAndExtent(
+ CXCursor, const char **startBuf, const char **endBuf, unsigned *startLine,
+ unsigned *startColumn, unsigned *endLine, unsigned *endColumn);
CINDEX_LINKAGE void clang_enableStackTraces(void);
-CINDEX_LINKAGE void clang_executeOnThread(void (*fn)(void*), void *user_data,
+CINDEX_LINKAGE void clang_executeOnThread(void (*fn)(void *), void *user_data,
unsigned stack_size);
/**
@@ -5319,9 +5312,8 @@ clang_getCompletionChunkKind(CXCompletionString completion_string,
*
* \returns the text associated with the chunk at index \c chunk_number.
*/
-CINDEX_LINKAGE CXString
-clang_getCompletionChunkText(CXCompletionString completion_string,
- unsigned chunk_number);
+CINDEX_LINKAGE CXString clang_getCompletionChunkText(
+ CXCompletionString completion_string, unsigned chunk_number);
/**
* Retrieve the completion string associated with a particular chunk
@@ -5334,9 +5326,8 @@ clang_getCompletionChunkText(CXCompletionString completion_string,
* \returns the completion string associated with the chunk at index
* \c chunk_number.
*/
-CINDEX_LINKAGE CXCompletionString
-clang_getCompletionChunkCompletionString(CXCompletionString completion_string,
- unsigned chunk_number);
+CINDEX_LINKAGE CXCompletionString clang_getCompletionChunkCompletionString(
+ CXCompletionString completion_string, unsigned chunk_number);
/**
* Retrieve the number of chunks in the given code-completion string.
@@ -5393,9 +5384,8 @@ clang_getCompletionNumAnnotations(CXCompletionString completion_string);
* \returns annotation string associated with the completion at index
* \c annotation_number, or a NULL string if that annotation is not available.
*/
-CINDEX_LINKAGE CXString
-clang_getCompletionAnnotation(CXCompletionString completion_string,
- unsigned annotation_number);
+CINDEX_LINKAGE CXString clang_getCompletionAnnotation(
+ CXCompletionString completion_string, unsigned annotation_number);
/**
* Retrieve the parent context of the given completion string.
@@ -5413,9 +5403,8 @@ clang_getCompletionAnnotation(CXCompletionString completion_string,
* \returns The name of the completion parent, e.g., "NSObject" if
* the completion string represents a method in the NSObject class.
*/
-CINDEX_LINKAGE CXString
-clang_getCompletionParent(CXCompletionString completion_string,
- enum CXCursorKind *kind);
+CINDEX_LINKAGE CXString clang_getCompletionParent(
+ CXCompletionString completion_string, enum CXCursorKind *kind);
/**
* Retrieve the brief documentation comment attached to the declaration
@@ -5771,13 +5760,11 @@ CINDEX_LINKAGE unsigned clang_defaultCodeCompleteOptions(void);
* completion fails, returns NULL.
*/
CINDEX_LINKAGE
-CXCodeCompleteResults *clang_codeCompleteAt(CXTranslationUnit TU,
- const char *complete_filename,
- unsigned complete_line,
- unsigned complete_column,
- struct CXUnsavedFile *unsaved_files,
- unsigned num_unsaved_files,
- unsigned options);
+CXCodeCompleteResults *
+clang_codeCompleteAt(CXTranslationUnit TU, const char *complete_filename,
+ unsigned complete_line, unsigned complete_column,
+ struct CXUnsavedFile *unsaved_files,
+ unsigned num_unsaved_files, unsigned options);
/**
* Sort the code-completion results in case-insensitive alphabetical
@@ -5826,8 +5813,8 @@ CXDiagnostic clang_codeCompleteGetDiagnostic(CXCodeCompleteResults *Results,
* along with the given code completion results.
*/
CINDEX_LINKAGE
-unsigned long long clang_codeCompleteGetContexts(
- CXCodeCompleteResults *Results);
+unsigned long long
+clang_codeCompleteGetContexts(CXCodeCompleteResults *Results);
/**
* Returns the cursor kind for the container for the current code
@@ -5846,9 +5833,9 @@ unsigned long long clang_codeCompleteGetContexts(
* container
*/
CINDEX_LINKAGE
-enum CXCursorKind clang_codeCompleteGetContainerKind(
- CXCodeCompleteResults *Results,
- unsigned *IsIncomplete);
+enum CXCursorKind
+clang_codeCompleteGetContainerKind(CXCodeCompleteResults *Results,
+ unsigned *IsIncomplete);
/**
* Returns the USR for the container for the current code completion
@@ -5900,19 +5887,19 @@ CINDEX_LINKAGE CXString clang_getClangVersion(void);
*/
CINDEX_LINKAGE void clang_toggleCrashRecovery(unsigned isEnabled);
- /**
- * Visitor invoked for each file in a translation unit
- * (used with clang_getInclusions()).
- *
- * This visitor function will be invoked by clang_getInclusions() for each
- * file included (either at the top-level or by \#include directives) within
- * a translation unit. The first argument is the file being included, and
- * the second and third arguments provide the inclusion stack. The
- * array is sorted in order of immediate inclusion. For example,
- * the first element refers to the location that included 'included_file'.
- */
+/**
+ * Visitor invoked for each file in a translation unit
+ * (used with clang_getInclusions()).
+ *
+ * This visitor function will be invoked by clang_getInclusions() for each
+ * file included (either at the top-level or by \#include directives) within
+ * a translation unit. The first argument is the file being included, and
+ * the second and third arguments provide the inclusion stack. The
+ * array is sorted in order of immediate inclusion. For example,
+ * the first element refers to the location that included 'included_file'.
+ */
typedef void (*CXInclusionVisitor)(CXFile included_file,
- CXSourceLocation* inclusion_stack,
+ CXSourceLocation *inclusion_stack,
unsigned include_len,
CXClientData client_data);
@@ -5927,7 +5914,7 @@ CINDEX_LINKAGE void clang_getInclusions(CXTranslationUnit tu,
CXClientData client_data);
typedef enum {
- CXEval_Int = 1 ,
+ CXEval_Int = 1,
CXEval_Float = 2,
CXEval_ObjCStrLiteral = 3,
CXEval_StrLiteral = 4,
@@ -5936,17 +5923,18 @@ typedef enum {
CXEval_UnExposed = 0
-} CXEvalResultKind ;
+} CXEvalResultKind;
/**
* Evaluation result of a cursor
*/
-typedef void * CXEvalResult;
+typedef void *CXEvalResult;
/**
* If cursor is a statement declaration tries to evaluate the
* statement and if its variable, tries to evaluate its initializer,
* into its corresponding type.
+ * If it's an expression, tries to evaluate the expression.
*/
CINDEX_LINKAGE CXEvalResult clang_Cursor_Evaluate(CXCursor C);
@@ -5978,7 +5966,8 @@ CINDEX_LINKAGE unsigned clang_EvalResult_isUnsignedInt(CXEvalResult E);
* Returns the evaluation result as an unsigned integer if
* the kind is Int and clang_EvalResult_isUnsignedInt is non-zero.
*/
-CINDEX_LINKAGE unsigned long long clang_EvalResult_getAsUnsigned(CXEvalResult E);
+CINDEX_LINKAGE unsigned long long
+clang_EvalResult_getAsUnsigned(CXEvalResult E);
/**
* Returns the evaluation result as double if the
@@ -5992,7 +5981,7 @@ CINDEX_LINKAGE double clang_EvalResult_getAsDouble(CXEvalResult E);
* instead call clang_EvalResult_dispose on the CXEvalResult returned
* by clang_Cursor_Evaluate.
*/
-CINDEX_LINKAGE const char* clang_EvalResult_getAsStr(CXEvalResult E);
+CINDEX_LINKAGE const char *clang_EvalResult_getAsStr(CXEvalResult E);
/**
* Disposes the created Eval memory.
@@ -6050,7 +6039,8 @@ CINDEX_LINKAGE unsigned clang_remap_getNumFiles(CXRemapping);
* is associated with.
*/
CINDEX_LINKAGE void clang_remap_getFilenames(CXRemapping, unsigned index,
- CXString *original, CXString *transformed);
+ CXString *original,
+ CXString *transformed);
/**
* Dispose the remapping.
@@ -6066,10 +6056,7 @@ CINDEX_LINKAGE void clang_remap_dispose(CXRemapping);
* @{
*/
-enum CXVisitorResult {
- CXVisit_Break,
- CXVisit_Continue
-};
+enum CXVisitorResult { CXVisit_Break, CXVisit_Continue };
typedef struct CXCursorAndRangeVisitor {
void *context;
@@ -6107,8 +6094,8 @@ typedef enum {
*
* \returns one of the CXResult enumerators.
*/
-CINDEX_LINKAGE CXResult clang_findReferencesInFile(CXCursor cursor, CXFile file,
- CXCursorAndRangeVisitor visitor);
+CINDEX_LINKAGE CXResult clang_findReferencesInFile(
+ CXCursor cursor, CXFile file, CXCursorAndRangeVisitor visitor);
/**
* Find #import/#include directives in a specific file.
@@ -6122,15 +6109,14 @@ CINDEX_LINKAGE CXResult clang_findReferencesInFile(CXCursor cursor, CXFile file,
*
* \returns one of the CXResult enumerators.
*/
-CINDEX_LINKAGE CXResult clang_findIncludesInFile(CXTranslationUnit TU,
- CXFile file,
- CXCursorAndRangeVisitor visitor);
+CINDEX_LINKAGE CXResult clang_findIncludesInFile(
+ CXTranslationUnit TU, CXFile file, CXCursorAndRangeVisitor visitor);
#ifdef __has_feature
-# if __has_feature(blocks)
+#if __has_feature(blocks)
-typedef enum CXVisitorResult
- (^CXCursorAndRangeVisitorBlock)(CXCursor, CXSourceRange);
+typedef enum CXVisitorResult (^CXCursorAndRangeVisitorBlock)(CXCursor,
+ CXSourceRange);
CINDEX_LINKAGE
CXResult clang_findReferencesInFileWithBlock(CXCursor, CXFile,
@@ -6140,7 +6126,7 @@ CINDEX_LINKAGE
CXResult clang_findIncludesInFileWithBlock(CXTranslationUnit, CXFile,
CXCursorAndRangeVisitorBlock);
-# endif
+#endif
#endif
/**
@@ -6223,46 +6209,46 @@ typedef struct {
} CXIdxImportedASTFileInfo;
typedef enum {
- CXIdxEntity_Unexposed = 0,
- CXIdxEntity_Typedef = 1,
- CXIdxEntity_Function = 2,
- CXIdxEntity_Variable = 3,
- CXIdxEntity_Field = 4,
- CXIdxEntity_EnumConstant = 5,
+ CXIdxEntity_Unexposed = 0,
+ CXIdxEntity_Typedef = 1,
+ CXIdxEntity_Function = 2,
+ CXIdxEntity_Variable = 3,
+ CXIdxEntity_Field = 4,
+ CXIdxEntity_EnumConstant = 5,
- CXIdxEntity_ObjCClass = 6,
- CXIdxEntity_ObjCProtocol = 7,
- CXIdxEntity_ObjCCategory = 8,
+ CXIdxEntity_ObjCClass = 6,
+ CXIdxEntity_ObjCProtocol = 7,
+ CXIdxEntity_ObjCCategory = 8,
CXIdxEntity_ObjCInstanceMethod = 9,
- CXIdxEntity_ObjCClassMethod = 10,
- CXIdxEntity_ObjCProperty = 11,
- CXIdxEntity_ObjCIvar = 12,
-
- CXIdxEntity_Enum = 13,
- CXIdxEntity_Struct = 14,
- CXIdxEntity_Union = 15,
-
- CXIdxEntity_CXXClass = 16,
- CXIdxEntity_CXXNamespace = 17,
- CXIdxEntity_CXXNamespaceAlias = 18,
- CXIdxEntity_CXXStaticVariable = 19,
- CXIdxEntity_CXXStaticMethod = 20,
- CXIdxEntity_CXXInstanceMethod = 21,
- CXIdxEntity_CXXConstructor = 22,
- CXIdxEntity_CXXDestructor = 23,
+ CXIdxEntity_ObjCClassMethod = 10,
+ CXIdxEntity_ObjCProperty = 11,
+ CXIdxEntity_ObjCIvar = 12,
+
+ CXIdxEntity_Enum = 13,
+ CXIdxEntity_Struct = 14,
+ CXIdxEntity_Union = 15,
+
+ CXIdxEntity_CXXClass = 16,
+ CXIdxEntity_CXXNamespace = 17,
+ CXIdxEntity_CXXNamespaceAlias = 18,
+ CXIdxEntity_CXXStaticVariable = 19,
+ CXIdxEntity_CXXStaticMethod = 20,
+ CXIdxEntity_CXXInstanceMethod = 21,
+ CXIdxEntity_CXXConstructor = 22,
+ CXIdxEntity_CXXDestructor = 23,
CXIdxEntity_CXXConversionFunction = 24,
- CXIdxEntity_CXXTypeAlias = 25,
- CXIdxEntity_CXXInterface = 26
+ CXIdxEntity_CXXTypeAlias = 25,
+ CXIdxEntity_CXXInterface = 26
} CXIdxEntityKind;
typedef enum {
CXIdxEntityLang_None = 0,
- CXIdxEntityLang_C = 1,
+ CXIdxEntityLang_C = 1,
CXIdxEntityLang_ObjC = 2,
- CXIdxEntityLang_CXX = 3,
- CXIdxEntityLang_Swift = 4
+ CXIdxEntityLang_CXX = 3,
+ CXIdxEntityLang_Swift = 4
} CXIdxEntityLanguage;
/**
@@ -6276,16 +6262,16 @@ typedef enum {
* CXIdxEntity_CXXTypeAlias
*/
typedef enum {
- CXIdxEntity_NonTemplate = 0,
- CXIdxEntity_Template = 1,
+ CXIdxEntity_NonTemplate = 0,
+ CXIdxEntity_Template = 1,
CXIdxEntity_TemplatePartialSpecialization = 2,
CXIdxEntity_TemplateSpecialization = 3
} CXIdxEntityCXXTemplateKind;
typedef enum {
- CXIdxAttr_Unexposed = 0,
- CXIdxAttr_IBAction = 1,
- CXIdxAttr_IBOutlet = 2,
+ CXIdxAttr_Unexposed = 0,
+ CXIdxAttr_IBAction = 1,
+ CXIdxAttr_IBOutlet = 2,
CXIdxAttr_IBOutletCollection = 3
} CXIdxAttrKind;
@@ -6317,9 +6303,7 @@ typedef struct {
CXIdxLoc classLoc;
} CXIdxIBOutletCollectionAttrInfo;
-typedef enum {
- CXIdxDeclFlag_Skipped = 0x1
-} CXIdxDeclInfoFlags;
+typedef enum { CXIdxDeclFlag_Skipped = 0x1 } CXIdxDeclInfoFlags;
typedef struct {
const CXIdxEntityInfo *entityInfo;
@@ -6488,11 +6472,10 @@ typedef struct {
/**
* Called at the end of indexing; passes the complete diagnostic set.
*/
- void (*diagnostic)(CXClientData client_data,
- CXDiagnosticSet, void *reserved);
+ void (*diagnostic)(CXClientData client_data, CXDiagnosticSet, void *reserved);
- CXIdxClientFile (*enteredMainFile)(CXClientData client_data,
- CXFile mainFile, void *reserved);
+ CXIdxClientFile (*enteredMainFile)(CXClientData client_data, CXFile mainFile,
+ void *reserved);
/**
* Called when a file gets \#included/\#imported.
@@ -6517,8 +6500,7 @@ typedef struct {
CXIdxClientContainer (*startedTranslationUnit)(CXClientData client_data,
void *reserved);
- void (*indexDeclaration)(CXClientData client_data,
- const CXIdxDeclInfo *);
+ void (*indexDeclaration)(CXClientData client_data, const CXIdxDeclInfo *);
/**
* Called to index a reference of an entity.
@@ -6562,8 +6544,8 @@ clang_index_getClientContainer(const CXIdxContainerInfo *);
* For setting a custom CXIdxClientContainer attached to a
* container.
*/
-CINDEX_LINKAGE void
-clang_index_setClientContainer(const CXIdxContainerInfo *,CXIdxClientContainer);
+CINDEX_LINKAGE void clang_index_setClientContainer(const CXIdxContainerInfo *,
+ CXIdxClientContainer);
/**
* For retrieving a custom CXIdxClientEntity attached to an entity.
@@ -6574,8 +6556,8 @@ clang_index_getClientEntity(const CXIdxEntityInfo *);
/**
* For setting a custom CXIdxClientEntity attached to an entity.
*/
-CINDEX_LINKAGE void
-clang_index_setClientEntity(const CXIdxEntityInfo *, CXIdxClientEntity);
+CINDEX_LINKAGE void clang_index_setClientEntity(const CXIdxEntityInfo *,
+ CXIdxClientEntity);
/**
* An indexing action/session, to be applied to one or multiple
@@ -6663,18 +6645,12 @@ typedef enum {
*
* The rest of the parameters are the same as #clang_parseTranslationUnit.
*/
-CINDEX_LINKAGE int clang_indexSourceFile(CXIndexAction,
- CXClientData client_data,
- IndexerCallbacks *index_callbacks,
- unsigned index_callbacks_size,
- unsigned index_options,
- const char *source_filename,
- const char * const *command_line_args,
- int num_command_line_args,
- struct CXUnsavedFile *unsaved_files,
- unsigned num_unsaved_files,
- CXTranslationUnit *out_TU,
- unsigned TU_options);
+CINDEX_LINKAGE int clang_indexSourceFile(
+ CXIndexAction, CXClientData client_data, IndexerCallbacks *index_callbacks,
+ unsigned index_callbacks_size, unsigned index_options,
+ const char *source_filename, const char *const *command_line_args,
+ int num_command_line_args, struct CXUnsavedFile *unsaved_files,
+ unsigned num_unsaved_files, CXTranslationUnit *out_TU, unsigned TU_options);
/**
* Same as clang_indexSourceFile but requires a full command line
@@ -6704,12 +6680,9 @@ CINDEX_LINKAGE int clang_indexSourceFileFullArgv(
* \returns If there is a failure from which there is no recovery, returns
* non-zero, otherwise returns 0.
*/
-CINDEX_LINKAGE int clang_indexTranslationUnit(CXIndexAction,
- CXClientData client_data,
- IndexerCallbacks *index_callbacks,
- unsigned index_callbacks_size,
- unsigned index_options,
- CXTranslationUnit);
+CINDEX_LINKAGE int clang_indexTranslationUnit(
+ CXIndexAction, CXClientData client_data, IndexerCallbacks *index_callbacks,
+ unsigned index_callbacks_size, unsigned index_options, CXTranslationUnit);
/**
* Retrieve the CXIdxFile, file, line, column, and offset represented by
@@ -6721,8 +6694,7 @@ CINDEX_LINKAGE int clang_indexTranslationUnit(CXIndexAction,
*/
CINDEX_LINKAGE void clang_indexLoc_getFileLocation(CXIdxLoc loc,
CXIdxClientFile *indexFile,
- CXFile *file,
- unsigned *line,
+ CXFile *file, unsigned *line,
unsigned *column,
unsigned *offset);
@@ -6765,8 +6737,7 @@ typedef enum CXVisitorResult (*CXFieldVisitor)(CXCursor C,
* \returns a non-zero value if the traversal was terminated
* prematurely by the visitor returning \c CXFieldVisit_Break.
*/
-CINDEX_LINKAGE unsigned clang_Type_visitFields(CXType T,
- CXFieldVisitor visitor,
+CINDEX_LINKAGE unsigned clang_Type_visitFields(CXType T, CXFieldVisitor visitor,
CXClientData client_data);
/**
diff --git a/contrib/llvm-project/clang/include/clang-c/Platform.h b/contrib/llvm-project/clang/include/clang-c/Platform.h
index 3bb66bb0df48..67c1fff8ff78 100644
--- a/contrib/llvm-project/clang/include/clang-c/Platform.h
+++ b/contrib/llvm-project/clang/include/clang-c/Platform.h
@@ -18,14 +18,23 @@
LLVM_CLANG_C_EXTERN_C_BEGIN
-/* MSVC DLL import/export. */
-#ifdef _MSC_VER
- #ifdef _CINDEX_LIB_
- #define CINDEX_LINKAGE __declspec(dllexport)
- #else
- #define CINDEX_LINKAGE __declspec(dllimport)
+/* Windows DLL import/export. */
+#ifndef CINDEX_NO_EXPORTS
+ #define CINDEX_EXPORTS
+#endif
+#ifdef _WIN32
+ #ifdef CINDEX_EXPORTS
+ #ifdef _CINDEX_LIB_
+ #define CINDEX_LINKAGE __declspec(dllexport)
+ #else
+ #define CINDEX_LINKAGE __declspec(dllimport)
+ #endif
#endif
-#else
+#elif defined(CINDEX_EXPORTS) && defined(__GNUC__)
+ #define CINDEX_LINKAGE __attribute__((visibility("default")))
+#endif
+
+#ifndef CINDEX_LINKAGE
#define CINDEX_LINKAGE
#endif
diff --git a/contrib/llvm-project/clang/include/clang/AST/APValue.h b/contrib/llvm-project/clang/include/clang/AST/APValue.h
index 63359294ef63..cca92b5f8235 100644
--- a/contrib/llvm-project/clang/include/clang/AST/APValue.h
+++ b/contrib/llvm-project/clang/include/clang/AST/APValue.h
@@ -372,7 +372,7 @@ public:
bool isAddrLabelDiff() const { return Kind == AddrLabelDiff; }
void dump() const;
- void dump(raw_ostream &OS) const;
+ void dump(raw_ostream &OS, const ASTContext &Context) const;
void printPretty(raw_ostream &OS, const ASTContext &Ctx, QualType Ty) const;
std::string getAsString(const ASTContext &Ctx, QualType Ty) const;
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTConcept.h b/contrib/llvm-project/clang/include/clang/AST/ASTConcept.h
index 3ebaad4eafdd..71bf14a87865 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTConcept.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTConcept.h
@@ -193,4 +193,4 @@ public:
} // clang
-#endif // LLVM_CLANG_AST_ASTCONCEPT_H \ No newline at end of file
+#endif // LLVM_CLANG_AST_ASTCONCEPT_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTContext.h b/contrib/llvm-project/clang/include/clang/AST/ASTContext.h
index f8403cf13c4a..2b988be60da9 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTContext.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTContext.h
@@ -15,7 +15,7 @@
#define LLVM_CLANG_AST_ASTCONTEXT_H
#include "clang/AST/ASTContextAllocate.h"
-#include "clang/AST/ASTTypeTraits.h"
+#include "clang/AST/ASTFwd.h"
#include "clang/AST/CanonicalType.h"
#include "clang/AST/CommentCommandTraits.h"
#include "clang/AST/ComparisonCategories.h"
@@ -26,7 +26,6 @@
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/RawCommentList.h"
-#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/Basic/AddressSpaces.h"
@@ -40,7 +39,6 @@
#include "clang/Basic/SanitizerBlacklist.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Specifiers.h"
-#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/XRayLists.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
@@ -75,6 +73,7 @@
namespace llvm {
struct fltSemantics;
+template <typename T, unsigned N> class SmallPtrSet;
} // namespace llvm
@@ -94,6 +93,9 @@ class CXXConstructorDecl;
class CXXMethodDecl;
class CXXRecordDecl;
class DiagnosticsEngine;
+class ParentMapContext;
+class DynTypedNode;
+class DynTypedNodeList;
class Expr;
class FixedPointSemantics;
class GlobalDecl;
@@ -102,6 +104,7 @@ class MangleNumberingContext;
class MaterializeTemporaryExpr;
class MemberSpecializationInfo;
class Module;
+struct MSGuidDeclParts;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCContainerDecl;
@@ -114,11 +117,13 @@ class ObjCPropertyDecl;
class ObjCPropertyImplDecl;
class ObjCProtocolDecl;
class ObjCTypeParamDecl;
+class OMPTraitInfo;
struct ParsedTargetAttr;
class Preprocessor;
class Stmt;
class StoredDeclsMap;
class TargetAttr;
+class TargetInfo;
class TemplateDecl;
class TemplateParameterList;
class TemplateTemplateParmDecl;
@@ -136,6 +141,7 @@ class Context;
} // namespace Builtin
enum BuiltinTemplateKind : int;
+enum OpenCLTypeKind : uint8_t;
namespace comments {
@@ -188,6 +194,8 @@ class ASTContext : public RefCountedBase<ASTContext> {
DependentAddressSpaceTypes;
mutable llvm::FoldingSet<VectorType> VectorTypes;
mutable llvm::FoldingSet<DependentVectorType> DependentVectorTypes;
+ mutable llvm::FoldingSet<ConstantMatrixType> MatrixTypes;
+ mutable llvm::FoldingSet<DependentSizedMatrixType> DependentSizedMatrixTypes;
mutable llvm::FoldingSet<FunctionNoProtoType> FunctionNoProtoTypes;
mutable llvm::ContextualFoldingSet<FunctionProtoType, ASTContext&>
FunctionProtoTypes;
@@ -218,6 +226,8 @@ class ASTContext : public RefCountedBase<ASTContext> {
mutable llvm::FoldingSet<AtomicType> AtomicTypes;
llvm::FoldingSet<AttributedType> AttributedTypes;
mutable llvm::FoldingSet<PipeType> PipeTypes;
+ mutable llvm::FoldingSet<ExtIntType> ExtIntTypes;
+ mutable llvm::FoldingSet<DependentExtIntType> DependentExtIntTypes;
mutable llvm::FoldingSet<QualifiedTemplateName> QualifiedTemplateNames;
mutable llvm::FoldingSet<DependentTemplateName> DependentTemplateNames;
@@ -264,6 +274,9 @@ class ASTContext : public RefCountedBase<ASTContext> {
/// Mapping from __block VarDecls to BlockVarCopyInit.
llvm::DenseMap<const VarDecl *, BlockVarCopyInit> BlockVarCopyInits;
+ /// Mapping from GUIDs to the corresponding MSGuidDecl.
+ mutable llvm::FoldingSet<MSGuidDecl> MSGuidDecls;
+
/// Used to cleanups APValues stored in the AST.
mutable llvm::SmallVector<APValue *, 0> APValueCleanups;
@@ -565,18 +578,9 @@ private:
const TargetInfo *AuxTarget = nullptr;
clang::PrintingPolicy PrintingPolicy;
std::unique_ptr<interp::Context> InterpContext;
-
- ast_type_traits::TraversalKind Traversal = ast_type_traits::TK_AsIs;
+ std::unique_ptr<ParentMapContext> ParentMapCtx;
public:
- ast_type_traits::TraversalKind getTraversalKind() const { return Traversal; }
- void setTraversalKind(ast_type_traits::TraversalKind TK) { Traversal = TK; }
-
- const Expr *traverseIgnored(const Expr *E) const;
- Expr *traverseIgnored(Expr *E) const;
- ast_type_traits::DynTypedNode
- traverseIgnored(const ast_type_traits::DynTypedNode &N) const;
-
IdentifierTable &Idents;
SelectorTable &Selectors;
Builtin::Context &BuiltinInfo;
@@ -587,46 +591,8 @@ public:
/// Returns the clang bytecode interpreter context.
interp::Context &getInterpContext();
- /// Container for either a single DynTypedNode or for an ArrayRef to
- /// DynTypedNode. For use with ParentMap.
- class DynTypedNodeList {
- using DynTypedNode = ast_type_traits::DynTypedNode;
-
- llvm::AlignedCharArrayUnion<ast_type_traits::DynTypedNode,
- ArrayRef<DynTypedNode>> Storage;
- bool IsSingleNode;
-
- public:
- DynTypedNodeList(const DynTypedNode &N) : IsSingleNode(true) {
- new (Storage.buffer) DynTypedNode(N);
- }
-
- DynTypedNodeList(ArrayRef<DynTypedNode> A) : IsSingleNode(false) {
- new (Storage.buffer) ArrayRef<DynTypedNode>(A);
- }
-
- const ast_type_traits::DynTypedNode *begin() const {
- if (!IsSingleNode)
- return reinterpret_cast<const ArrayRef<DynTypedNode> *>(Storage.buffer)
- ->begin();
- return reinterpret_cast<const DynTypedNode *>(Storage.buffer);
- }
-
- const ast_type_traits::DynTypedNode *end() const {
- if (!IsSingleNode)
- return reinterpret_cast<const ArrayRef<DynTypedNode> *>(Storage.buffer)
- ->end();
- return reinterpret_cast<const DynTypedNode *>(Storage.buffer) + 1;
- }
-
- size_t size() const { return end() - begin(); }
- bool empty() const { return begin() == end(); }
-
- const DynTypedNode &operator[](size_t N) const {
- assert(N < size() && "Out of bounds!");
- return *(begin() + N);
- }
- };
+ /// Returns the dynamic AST node parent map context.
+ ParentMapContext &getParentMapContext();
// A traversal scope limits the parts of the AST visible to certain analyses.
// RecursiveASTVisitor::TraverseAST will only visit reachable nodes, and
@@ -638,35 +604,9 @@ public:
std::vector<Decl *> getTraversalScope() const { return TraversalScope; }
void setTraversalScope(const std::vector<Decl *> &);
- /// Returns the parents of the given node (within the traversal scope).
- ///
- /// Note that this will lazily compute the parents of all nodes
- /// and store them for later retrieval. Thus, the first call is O(n)
- /// in the number of AST nodes.
- ///
- /// Caveats and FIXMEs:
- /// Calculating the parent map over all AST nodes will need to load the
- /// full AST. This can be undesirable in the case where the full AST is
- /// expensive to create (for example, when using precompiled header
- /// preambles). Thus, there are good opportunities for optimization here.
- /// One idea is to walk the given node downwards, looking for references
- /// to declaration contexts - once a declaration context is found, compute
- /// the parent map for the declaration context; if that can satisfy the
- /// request, loading the whole AST can be avoided. Note that this is made
- /// more complex by statements in templates having multiple parents - those
- /// problems can be solved by building closure over the templated parts of
- /// the AST, which also avoids touching large parts of the AST.
- /// Additionally, we will want to add an interface to already give a hint
- /// where to search for the parents, for example when looking at a statement
- /// inside a certain function.
- ///
- /// 'NodeT' can be one of Decl, Stmt, Type, TypeLoc,
- /// NestedNameSpecifier or NestedNameSpecifierLoc.
- template <typename NodeT> DynTypedNodeList getParents(const NodeT &Node) {
- return getParents(ast_type_traits::DynTypedNode::create(Node));
- }
-
- DynTypedNodeList getParents(const ast_type_traits::DynTypedNode &Node);
+ /// Forwards to get node parents from the ParentMapContext. New callers should
+ /// use ParentMapContext::getParents() directly.
+ template <typename NodeT> DynTypedNodeList getParents(const NodeT &Node);
const clang::PrintingPolicy &getPrintingPolicy() const {
return PrintingPolicy;
@@ -717,7 +657,7 @@ public:
/// getRealTypeForBitwidth -
/// sets floating point QualTy according to specified bitwidth.
/// Returns empty type if there is no appropriate target types.
- QualType getRealTypeForBitwidth(unsigned DestWidth) const;
+ QualType getRealTypeForBitwidth(unsigned DestWidth, bool ExplicitIEEE) const;
bool AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const;
@@ -791,15 +731,7 @@ public:
RawComment *getRawCommentForDeclNoCache(const Decl *D) const;
public:
- RawCommentList &getRawCommentList() {
- return Comments;
- }
-
- void addComment(const RawComment &RC) {
- assert(LangOpts.RetainCommentsFromSystemHeaders ||
- !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()));
- Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc);
- }
+ void addComment(const RawComment &RC);
/// Return the documentation comment attached to a given declaration.
/// Returns nullptr if no comment is attached.
@@ -959,7 +891,7 @@ public:
void addedLocalImportDecl(ImportDecl *Import);
static ImportDecl *getNextLocalImport(ImportDecl *Import) {
- return Import->NextLocalImport;
+ return Import->getNextLocalImport();
}
using import_range = llvm::iterator_range<import_iterator>;
@@ -987,13 +919,7 @@ public:
/// Get the additional modules in which the definition \p Def has
/// been merged.
- ArrayRef<Module*> getModulesWithMergedDefinition(const NamedDecl *Def) {
- auto MergedIt =
- MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl()));
- if (MergedIt == MergedDefModules.end())
- return None;
- return MergedIt->second;
- }
+ ArrayRef<Module*> getModulesWithMergedDefinition(const NamedDecl *Def);
/// Add a declaration to the list of declarations that are initialized
/// for a module. This will typically be a global variable (with internal
@@ -1038,6 +964,7 @@ public:
CanQualType SatUnsignedShortFractTy, SatUnsignedFractTy,
SatUnsignedLongFractTy;
CanQualType HalfTy; // [OpenCL 6.1.1.1], ARM NEON
+ CanQualType BFloat16Ty;
CanQualType Float16Ty; // C11 extension ISO/IEC TS 18661-3
CanQualType FloatComplexTy, DoubleComplexTy, LongDoubleComplexTy;
CanQualType Float128ComplexTy;
@@ -1052,7 +979,8 @@ public:
#include "clang/Basic/OpenCLImageTypes.def"
CanQualType OCLSamplerTy, OCLEventTy, OCLClkEventTy;
CanQualType OCLQueueTy, OCLReserveIDTy;
- CanQualType OMPArraySectionTy;
+ CanQualType IncompleteMatrixIdxTy;
+ CanQualType OMPArraySectionTy, OMPArrayShapingTy, OMPIteratorTy;
#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
CanQualType Id##Ty;
#include "clang/Basic/OpenCLExtensionTypes.def"
@@ -1066,7 +994,10 @@ public:
// Decl used to help define __builtin_va_list for some targets.
// The decl is built when constructing 'BuiltinVaListDecl'.
- mutable Decl *VaListTagDecl;
+ mutable Decl *VaListTagDecl = nullptr;
+
+ // Implicitly-declared type 'struct _GUID'.
+ mutable TagDecl *MSGuidTagDecl = nullptr;
ASTContext(LangOptions &LOpts, SourceManager &SM, IdentifierTable &idents,
SelectorTable &sels, Builtin::Context &builtins);
@@ -1278,12 +1209,20 @@ public:
/// Return a write_only pipe type for the specified type.
QualType getWritePipeType(QualType T) const;
+ /// Return an extended integer type with the specified signedness and bit
+ /// count.
+ QualType getExtIntType(bool Unsigned, unsigned NumBits) const;
+
+ /// Return a dependent extended integer type with the specified signedness and
+ /// bit count.
+ QualType getDependentExtIntType(bool Unsigned, Expr *BitsExpr) const;
+
/// Gets the struct used to keep track of the extended descriptor for
/// pointer to blocks.
QualType getBlockDescriptorExtendedType() const;
/// Map an AST Type to an OpenCLTypeKind enum value.
- TargetInfo::OpenCLTypeKind getOpenCLTypeKind(const Type *T) const;
+ OpenCLTypeKind getOpenCLTypeKind(const Type *T) const;
/// Get address space for OpenCL type.
LangAS getOpenCLTypeAddrSpace(const Type *T) const;
@@ -1358,6 +1297,12 @@ public:
/// Returns a vla type where known sizes are replaced with [*].
QualType getVariableArrayDecayedType(QualType Ty) const;
+ /// Return the unique reference to a scalable vector type of the specified
+ /// element type and scalable number of elements.
+ ///
+ /// \pre \p EltTy must be a built-in type.
+ QualType getScalableVectorType(QualType EltTy, unsigned NumElts) const;
+
/// Return the unique reference to a vector type of the specified
/// element type and size.
///
@@ -1385,6 +1330,20 @@ public:
Expr *SizeExpr,
SourceLocation AttrLoc) const;
+ /// Return the unique reference to the matrix type of the specified element
+ /// type and size
+ ///
+ /// \pre \p ElementType must be a valid matrix element type (see
+ /// MatrixType::isValidElementType).
+ QualType getConstantMatrixType(QualType ElementType, unsigned NumRows,
+ unsigned NumColumns) const;
+
+ /// Return the unique reference to the matrix type of the specified element
+ /// type and size
+ QualType getDependentSizedMatrixType(QualType ElementType, Expr *RowExpr,
+ Expr *ColumnExpr,
+ SourceLocation AttrLoc) const;
+
QualType getDependentAddressSpaceType(QualType PointeeType,
Expr *AddrSpaceExpr,
SourceLocation AttrLoc) const;
@@ -1518,6 +1477,8 @@ public:
QualType getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
ArrayRef<ObjCProtocolDecl *> protocols) const;
+ void adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig,
+ ObjCTypeParamDecl *New) const;
bool ObjCObjectAdoptsQTypeProtocols(QualType QT, ObjCInterfaceDecl *Decl);
@@ -1713,23 +1674,9 @@ public:
return NSCopyingName;
}
- CanQualType getNSUIntegerType() const {
- assert(Target && "Expected target to be initialized");
- const llvm::Triple &T = Target->getTriple();
- // Windows is LLP64 rather than LP64
- if (T.isOSWindows() && T.isArch64Bit())
- return UnsignedLongLongTy;
- return UnsignedLongTy;
- }
+ CanQualType getNSUIntegerType() const;
- CanQualType getNSIntegerType() const {
- assert(Target && "Expected target to be initialized");
- const llvm::Triple &T = Target->getTriple();
- // Windows is LLP64 rather than LP64
- if (T.isOSWindows() && T.isArch64Bit())
- return LongLongTy;
- return LongTy;
- }
+ CanQualType getNSIntegerType() const;
/// Retrieve the identifier 'bool'.
IdentifierInfo *getBoolName() const {
@@ -1944,6 +1891,15 @@ public:
return getTypeDeclType(getBuiltinMSVaListDecl());
}
+ /// Retrieve the implicitly-predeclared 'struct _GUID' declaration.
+ TagDecl *getMSGuidTagDecl() const { return MSGuidTagDecl; }
+
+ /// Retrieve the implicitly-predeclared 'struct _GUID' type.
+ QualType getMSGuidType() const {
+ assert(MSGuidTagDecl && "asked for GUID type but MS extensions disabled");
+ return getTagDeclType(MSGuidTagDecl);
+ }
+
/// Return whether a declaration to a builtin is allowed to be
/// overloaded/redeclared.
bool canBuiltinBeRedeclared(const FunctionDecl *) const;
@@ -2207,9 +2163,7 @@ public:
/// Return the alignment (in bytes) of the thrown exception object. This is
/// only meaningful for targets that allocate C++ exceptions in a system
/// runtime, such as those using the Itanium C++ ABI.
- CharUnits getExnObjectAlignment() const {
- return toCharUnitsFromBits(Target->getExnObjectAlignment());
- }
+ CharUnits getExnObjectAlignment() const;
/// Get or compute information about the layout of the specified
/// record (struct/union/class) \p D, which indicates its size and field
@@ -2627,7 +2581,7 @@ public:
QualType mergeTypes(QualType, QualType, bool OfBlockPointer=false,
bool Unqualified = false, bool BlockReturnType = false);
QualType mergeFunctionTypes(QualType, QualType, bool OfBlockPointer=false,
- bool Unqualified = false);
+ bool Unqualified = false, bool AllowCXX = false);
QualType mergeFunctionParameterTypes(QualType, QualType,
bool OfBlockPointer = false,
bool Unqualified = false);
@@ -2848,6 +2802,10 @@ public:
/// PredefinedExpr to cache evaluated results.
StringLiteral *getPredefinedStringLiteralFromCache(StringRef Key) const;
+ /// Return a declaration for the global GUID object representing the given
+ /// GUID value.
+ MSGuidDecl *getMSGuidDecl(MSGuidDeclParts Parts) const;
+
/// Parses the target attributes passed in, and returns only the ones that are
/// valid feature names.
ParsedTargetAttr filterFunctionTargetAttrs(const TargetAttr *TD) const;
@@ -3026,8 +2984,6 @@ private:
llvm::PointerIntPair<StoredDeclsMap *, 1> LastSDM;
std::vector<Decl *> TraversalScope;
- class ParentMap;
- std::map<ast_type_traits::TraversalKind, std::unique_ptr<ParentMap>> Parents;
std::unique_ptr<VTableContextBase> VTContext;
@@ -3040,6 +2996,7 @@ public:
PSF_Write = 0x2,
PSF_Execute = 0x4,
PSF_Implicit = 0x8,
+ PSF_ZeroInit = 0x10,
PSF_Invalid = 0x80000000U,
};
@@ -3057,8 +3014,20 @@ public:
};
llvm::StringMap<SectionInfo> SectionInfos;
+
+ /// Return a new OMPTraitInfo object owned by this context.
+ OMPTraitInfo &getNewOMPTraitInfo();
+
+private:
+ /// All OMPTraitInfo objects live in this collection, one per
+ /// `pragma omp [begin] declare variant` directive.
+ SmallVector<std::unique_ptr<OMPTraitInfo>, 4> OMPTraitInfoVector;
};
+/// Insertion operator for diagnostics.
+const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ const ASTContext::SectionInfo &Section);
+
/// Utility function for constructing a nullary selector.
inline Selector GetNullarySelector(StringRef name, ASTContext &Ctx) {
IdentifierInfo* II = &Ctx.Idents.get(name);
@@ -3071,22 +3040,6 @@ inline Selector GetUnarySelector(StringRef name, ASTContext &Ctx) {
return Ctx.Selectors.getSelector(1, &II);
}
-class TraversalKindScope {
- ASTContext &Ctx;
- ast_type_traits::TraversalKind TK = ast_type_traits::TK_AsIs;
-
-public:
- TraversalKindScope(ASTContext &Ctx,
- llvm::Optional<ast_type_traits::TraversalKind> ScopeTK)
- : Ctx(Ctx) {
- TK = Ctx.getTraversalKind();
- if (ScopeTK)
- Ctx.setTraversalKind(*ScopeTK);
- }
-
- ~TraversalKindScope() { Ctx.setTraversalKind(TK); }
-};
-
} // namespace clang
// operator new and delete aren't allowed inside namespaces.
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTDumper.h b/contrib/llvm-project/clang/include/clang/AST/ASTDumper.h
index 61202f057a80..a154bc2db3a7 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTDumper.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTDumper.h
@@ -11,6 +11,7 @@
#include "clang/AST/ASTNodeTraverser.h"
#include "clang/AST/TextNodeDumper.h"
+#include "clang/Basic/SourceManager.h"
namespace clang {
@@ -23,18 +24,11 @@ class ASTDumper : public ASTNodeTraverser<ASTDumper, TextNodeDumper> {
const bool ShowColors;
public:
- ASTDumper(raw_ostream &OS, const comments::CommandTraits *Traits,
- const SourceManager *SM)
- : ASTDumper(OS, Traits, SM, SM && SM->getDiagnostics().getShowColors()) {}
-
- ASTDumper(raw_ostream &OS, const comments::CommandTraits *Traits,
- const SourceManager *SM, bool ShowColors)
- : ASTDumper(OS, Traits, SM, ShowColors, LangOptions()) {}
- ASTDumper(raw_ostream &OS, const comments::CommandTraits *Traits,
- const SourceManager *SM, bool ShowColors,
- const PrintingPolicy &PrintPolicy)
- : NodeDumper(OS, ShowColors, SM, PrintPolicy, Traits), OS(OS),
- ShowColors(ShowColors) {}
+ ASTDumper(raw_ostream &OS, const ASTContext &Context, bool ShowColors)
+ : NodeDumper(OS, Context, ShowColors), OS(OS), ShowColors(ShowColors) {}
+
+ ASTDumper(raw_ostream &OS, bool ShowColors)
+ : NodeDumper(OS, ShowColors), OS(OS), ShowColors(ShowColors) {}
TextNodeDumper &doGetNodeDelegate() { return NodeDumper; }
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTDumperUtils.h b/contrib/llvm-project/clang/include/clang/AST/ASTDumperUtils.h
index 55a085449a9b..1dce913049ad 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTDumperUtils.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTDumperUtils.h
@@ -62,6 +62,8 @@ static const TerminalColor LocationColor = {llvm::raw_ostream::YELLOW, false};
static const TerminalColor ValueKindColor = {llvm::raw_ostream::CYAN, false};
// bitfield/objcproperty/objcsubscript/vectorcomponent
static const TerminalColor ObjectKindColor = {llvm::raw_ostream::CYAN, false};
+// contains-errors
+static const TerminalColor ErrorsColor = {llvm::raw_ostream::RED, true};
// Null statements
static const TerminalColor NullColor = {llvm::raw_ostream::BLUE, false};
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTFwd.h b/contrib/llvm-project/clang/include/clang/AST/ASTFwd.h
index 5a891817b336..65319a19728b 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTFwd.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTFwd.h
@@ -27,8 +27,8 @@ class Type;
#include "clang/AST/TypeNodes.inc"
class CXXCtorInitializer;
class OMPClause;
-#define OPENMP_CLAUSE(KIND, CLASSNAME) class CLASSNAME;
-#include "clang/Basic/OpenMPKinds.def"
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) class Class;
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTImporter.h b/contrib/llvm-project/clang/include/clang/AST/ASTImporter.h
index 490b34bf95e8..205d7ec67754 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTImporter.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTImporter.h
@@ -16,6 +16,7 @@
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclarationName.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
@@ -349,6 +350,10 @@ class TypeSourceInfo;
return ToOrErr.takeError();
}
+ /// Import cleanup objects owned by ExprWithCleanup.
+ llvm::Expected<ExprWithCleanups::CleanupObject>
+ Import(ExprWithCleanups::CleanupObject From);
+
/// Import the given type from the "from" context into the "to"
/// context. A null type is imported as a null type (no error).
///
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTNodeTraverser.h b/contrib/llvm-project/clang/include/clang/AST/ASTNodeTraverser.h
index 9ebf64a12af5..26656b7162b6 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTNodeTraverser.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTNodeTraverser.h
@@ -15,16 +15,20 @@
#ifndef LLVM_CLANG_AST_ASTNODETRAVERSER_H
#define LLVM_CLANG_AST_ASTNODETRAVERSER_H
+#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/AttrVisitor.h"
#include "clang/AST/CommentVisitor.h"
#include "clang/AST/DeclVisitor.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TemplateArgumentVisitor.h"
+#include "clang/AST/Type.h"
#include "clang/AST/TypeVisitor.h"
namespace clang {
+class APValue;
+
/**
ASTNodeTraverser traverses the Clang AST for dumping purposes.
@@ -49,6 +53,7 @@ struct {
void Visit(const OMPClause *C);
void Visit(const BlockDecl::Capture &C);
void Visit(const GenericSelectionExpr::ConstAssociation &A);
+ void Visit(const APValue &Value, QualType Ty);
};
*/
template <typename Derived, typename NodeDelegateType>
@@ -65,8 +70,7 @@ class ASTNodeTraverser
/// not already been loaded.
bool Deserialize = false;
- ast_type_traits::TraversalKind Traversal =
- ast_type_traits::TraversalKind::TK_AsIs;
+ TraversalKind Traversal = TraversalKind::TK_AsIs;
NodeDelegateType &getNodeDelegate() {
return getDerived().doGetNodeDelegate();
@@ -77,7 +81,7 @@ public:
void setDeserialize(bool D) { Deserialize = D; }
bool getDeserialize() const { return Deserialize; }
- void SetTraversalKind(ast_type_traits::TraversalKind TK) { Traversal = TK; }
+ void SetTraversalKind(TraversalKind TK) { Traversal = TK; }
void Visit(const Decl *D) {
getNodeDelegate().AddChild([=] {
@@ -108,12 +112,12 @@ public:
if (auto *E = dyn_cast_or_null<Expr>(S)) {
switch (Traversal) {
- case ast_type_traits::TK_AsIs:
+ case TK_AsIs:
break;
- case ast_type_traits::TK_IgnoreImplicitCastsAndParentheses:
+ case TK_IgnoreImplicitCastsAndParentheses:
S = E->IgnoreParenImpCasts();
break;
- case ast_type_traits::TK_IgnoreUnlessSpelledInSource:
+ case TK_IgnoreUnlessSpelledInSource:
S = E->IgnoreUnlessSpelledInSource();
break;
}
@@ -131,8 +135,7 @@ public:
if (isa<DeclStmt>(S) || isa<GenericSelectionExpr>(S))
return;
- if (isa<LambdaExpr>(S) &&
- Traversal == ast_type_traits::TK_IgnoreUnlessSpelledInSource)
+ if (isa<LambdaExpr>(S) && Traversal == TK_IgnoreUnlessSpelledInSource)
return;
for (const Stmt *SubStmt : S->children())
@@ -212,6 +215,10 @@ public:
});
}
+ void Visit(const APValue &Value, QualType Ty) {
+ getNodeDelegate().AddChild([=] { getNodeDelegate().Visit(Value, Ty); });
+ }
+
void Visit(const comments::Comment *C, const comments::FullComment *FC) {
getNodeDelegate().AddChild([=] {
getNodeDelegate().Visit(C, FC);
@@ -228,7 +235,7 @@ public:
});
}
- void Visit(const ast_type_traits::DynTypedNode &N) {
+ void Visit(const DynTypedNode &N) {
// FIXME: Improve this with a switch or a visitor pattern.
if (const auto *D = N.get<Decl>())
Visit(D);
@@ -353,8 +360,6 @@ public:
void VisitTemplateSpecializationType(const TemplateSpecializationType *T) {
for (const auto &Arg : *T)
Visit(Arg);
- if (T->isTypeAlias())
- Visit(T->getAliasedType());
}
void VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
Visit(T->getPointeeType());
@@ -658,7 +663,7 @@ public:
}
void VisitLambdaExpr(const LambdaExpr *Node) {
- if (Traversal == ast_type_traits::TK_IgnoreUnlessSpelledInSource) {
+ if (Traversal == TK_IgnoreUnlessSpelledInSource) {
for (unsigned I = 0, N = Node->capture_size(); I != N; ++I) {
const auto *C = Node->capture_begin() + I;
if (!C->isExplicit())
@@ -683,6 +688,15 @@ public:
Visit(A);
}
+ void VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E) {
+ Visit(E->getParameter());
+ }
+ void VisitSubstNonTypeTemplateParmPackExpr(
+ const SubstNonTypeTemplateParmPackExpr *E) {
+ Visit(E->getParameterPack());
+ Visit(E->getArgumentPack());
+ }
+
void VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node) {
if (const VarDecl *CatchParam = Node->getCatchParamDecl())
Visit(CatchParam);
@@ -691,6 +705,11 @@ public:
void VisitExpressionTemplateArgument(const TemplateArgument &TA) {
Visit(TA.getAsExpr());
}
+
+ void VisitTypeTemplateArgument(const TemplateArgument &TA) {
+ Visit(TA.getAsType());
+ }
+
void VisitPackTemplateArgument(const TemplateArgument &TA) {
for (const auto &TArg : TA.pack_elements())
Visit(TArg);
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTTypeTraits.h b/contrib/llvm-project/clang/include/clang/AST/ASTTypeTraits.h
index 1a12281d039d..328b7bce6ba5 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTTypeTraits.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTTypeTraits.h
@@ -16,6 +16,7 @@
#define LLVM_CLANG_AST_ASTTYPETRAITS_H
#include "clang/AST/ASTFwd.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TypeLoc.h"
@@ -33,8 +34,6 @@ namespace clang {
struct PrintingPolicy;
-namespace ast_type_traits {
-
/// Defines how we descend a level in the AST when we pass
/// through expressions.
enum TraversalKind {
@@ -138,6 +137,7 @@ private:
NKI_QualType,
NKI_TypeLoc,
NKI_LastKindWithoutPointerIdentity = NKI_TypeLoc,
+ NKI_CXXBaseSpecifier,
NKI_CXXCtorInitializer,
NKI_NestedNameSpecifier,
NKI_Decl,
@@ -150,8 +150,8 @@ private:
#define TYPE(DERIVED, BASE) NKI_##DERIVED##Type,
#include "clang/AST/TypeNodes.inc"
NKI_OMPClause,
-#define OPENMP_CLAUSE(TextualSpelling, Class) NKI_##Class,
-#include "clang/Basic/OpenMPKinds.def"
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) NKI_##Class,
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
NKI_NumberOfKinds
};
@@ -200,14 +200,15 @@ KIND_TO_KIND_ID(Decl)
KIND_TO_KIND_ID(Stmt)
KIND_TO_KIND_ID(Type)
KIND_TO_KIND_ID(OMPClause)
+KIND_TO_KIND_ID(CXXBaseSpecifier)
#define DECL(DERIVED, BASE) KIND_TO_KIND_ID(DERIVED##Decl)
#include "clang/AST/DeclNodes.inc"
#define STMT(DERIVED, BASE) KIND_TO_KIND_ID(DERIVED)
#include "clang/AST/StmtNodes.inc"
#define TYPE(DERIVED, BASE) KIND_TO_KIND_ID(DERIVED##Type)
#include "clang/AST/TypeNodes.inc"
-#define OPENMP_CLAUSE(TextualSpelling, Class) KIND_TO_KIND_ID(Class)
-#include "clang/Basic/OpenMPKinds.def"
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) KIND_TO_KIND_ID(Class)
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
#undef KIND_TO_KIND_ID
inline raw_ostream &operator<<(raw_ostream &OS, ASTNodeKind K) {
@@ -277,7 +278,7 @@ public:
void print(llvm::raw_ostream &OS, const PrintingPolicy &PP) const;
/// Dumps the node to the given output stream.
- void dump(llvm::raw_ostream &OS, SourceManager &SM) const;
+ void dump(llvm::raw_ostream &OS, const ASTContext &Context) const;
/// For nodes which represent textual entities in the source code,
/// return their SourceRange. For all other nodes, return SourceRange().
@@ -465,22 +466,22 @@ private:
template <typename T>
struct DynTypedNode::BaseConverter<
- T, typename std::enable_if<std::is_base_of<Decl, T>::value>::type>
+ T, std::enable_if_t<std::is_base_of<Decl, T>::value>>
: public DynCastPtrConverter<T, Decl> {};
template <typename T>
struct DynTypedNode::BaseConverter<
- T, typename std::enable_if<std::is_base_of<Stmt, T>::value>::type>
+ T, std::enable_if_t<std::is_base_of<Stmt, T>::value>>
: public DynCastPtrConverter<T, Stmt> {};
template <typename T>
struct DynTypedNode::BaseConverter<
- T, typename std::enable_if<std::is_base_of<Type, T>::value>::type>
+ T, std::enable_if_t<std::is_base_of<Type, T>::value>>
: public DynCastPtrConverter<T, Type> {};
template <typename T>
struct DynTypedNode::BaseConverter<
- T, typename std::enable_if<std::is_base_of<OMPClause, T>::value>::type>
+ T, std::enable_if_t<std::is_base_of<OMPClause, T>::value>>
: public DynCastPtrConverter<T, OMPClause> {};
template <>
@@ -512,6 +513,10 @@ template <>
struct DynTypedNode::BaseConverter<
TypeLoc, void> : public ValueConverter<TypeLoc> {};
+template <>
+struct DynTypedNode::BaseConverter<CXXBaseSpecifier, void>
+ : public PtrConverter<CXXBaseSpecifier> {};
+
// The only operation we allow on unsupported types is \c get.
// This allows to conveniently use \c DynTypedNode when having an arbitrary
// AST node that is not supported, but prevents misuse - a user cannot create
@@ -522,18 +527,29 @@ template <typename T, typename EnablerT> struct DynTypedNode::BaseConverter {
}
};
-} // end namespace ast_type_traits
+// Previously these types were defined in the clang::ast_type_traits namespace.
+// Provide typedefs so that legacy code can be fixed asynchronously.
+namespace ast_type_traits {
+using DynTypedNode = ::clang::DynTypedNode;
+using ASTNodeKind = ::clang::ASTNodeKind;
+using TraversalKind = ::clang::TraversalKind;
+
+constexpr TraversalKind TK_AsIs = ::clang::TK_AsIs;
+constexpr TraversalKind TK_IgnoreImplicitCastsAndParentheses =
+ ::clang::TK_IgnoreImplicitCastsAndParentheses;
+constexpr TraversalKind TK_IgnoreUnlessSpelledInSource =
+ ::clang::TK_IgnoreUnlessSpelledInSource;
+} // namespace ast_type_traits
+
} // end namespace clang
namespace llvm {
template <>
-struct DenseMapInfo<clang::ast_type_traits::ASTNodeKind>
- : clang::ast_type_traits::ASTNodeKind::DenseMapInfo {};
+struct DenseMapInfo<clang::ASTNodeKind> : clang::ASTNodeKind::DenseMapInfo {};
template <>
-struct DenseMapInfo<clang::ast_type_traits::DynTypedNode>
- : clang::ast_type_traits::DynTypedNode::DenseMapInfo {};
+struct DenseMapInfo<clang::DynTypedNode> : clang::DynTypedNode::DenseMapInfo {};
} // end namespace llvm
diff --git a/contrib/llvm-project/clang/include/clang/AST/Attr.h b/contrib/llvm-project/clang/include/clang/AST/Attr.h
index bbaa46363d97..1b457337d658 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Attr.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Attr.h
@@ -13,13 +13,13 @@
#ifndef LLVM_CLANG_AST_ATTR_H
#define LLVM_CLANG_AST_ATTR_H
-#include "clang/AST/ASTContextAllocate.h" // For Attrs.inc
+#include "clang/AST/ASTFwd.h"
#include "clang/AST/AttrIterator.h"
#include "clang/AST/Decl.h"
-#include "clang/AST/Expr.h"
#include "clang/AST/Type.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/AttributeCommonInfo.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/Sanitizers.h"
@@ -40,6 +40,7 @@ class Expr;
class QualType;
class FunctionDecl;
class TypeSourceInfo;
+class OMPTraitInfo;
/// Attr - This represents one attribute.
class Attr : public AttributeCommonInfo {
diff --git a/contrib/llvm-project/clang/include/clang/AST/BuiltinTypes.def b/contrib/llvm-project/clang/include/clang/AST/BuiltinTypes.def
index 74a45ee4ccc0..039765dfdfea 100644
--- a/contrib/llvm-project/clang/include/clang/AST/BuiltinTypes.def
+++ b/contrib/llvm-project/clang/include/clang/AST/BuiltinTypes.def
@@ -212,6 +212,9 @@ FLOATING_TYPE(LongDouble, LongDoubleTy)
// '_Float16'
FLOATING_TYPE(Float16, HalfTy)
+// '__bf16'
+FLOATING_TYPE(BFloat16, BFloat16Ty)
+
// '__float128'
FLOATING_TYPE(Float128, Float128Ty)
@@ -310,11 +313,20 @@ PLACEHOLDER_TYPE(BuiltinFn, BuiltinFnTy)
// context.
PLACEHOLDER_TYPE(ARCUnbridgedCast, ARCUnbridgedCastTy)
+// A placeholder type for incomplete matrix index expressions.
+PLACEHOLDER_TYPE(IncompleteMatrixIdx, IncompleteMatrixIdxTy)
+
// A placeholder type for OpenMP array sections.
PLACEHOLDER_TYPE(OMPArraySection, OMPArraySectionTy)
+// A placeholder type for OpenMP array shaping operation.
+PLACEHOLDER_TYPE(OMPArrayShaping, OMPArrayShapingTy)
+
+// A placeholder type for OpenMP iterators.
+PLACEHOLDER_TYPE(OMPIterator, OMPIteratorTy)
+
#ifdef LAST_BUILTIN_TYPE
-LAST_BUILTIN_TYPE(OMPArraySection)
+LAST_BUILTIN_TYPE(OMPIterator)
#undef LAST_BUILTIN_TYPE
#endif
diff --git a/contrib/llvm-project/clang/include/clang/AST/CXXInheritance.h b/contrib/llvm-project/clang/include/clang/AST/CXXInheritance.h
index f223c1f2f4f0..8b1bcb367b3b 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CXXInheritance.h
+++ b/contrib/llvm-project/clang/include/clang/AST/CXXInheritance.h
@@ -119,7 +119,7 @@ class CXXBasePaths {
friend class CXXRecordDecl;
/// The type from which this search originated.
- CXXRecordDecl *Origin = nullptr;
+ const CXXRecordDecl *Origin = nullptr;
/// Paths - The actual set of paths that can be taken from the
/// derived class to the same base class.
@@ -225,8 +225,8 @@ public:
/// Retrieve the type from which this base-paths search
/// began
- CXXRecordDecl *getOrigin() const { return Origin; }
- void setOrigin(CXXRecordDecl *Rec) { Origin = Rec; }
+ const CXXRecordDecl *getOrigin() const { return Origin; }
+ void setOrigin(const CXXRecordDecl *Rec) { Origin = Rec; }
/// Clear the base-paths results.
void clear();
diff --git a/contrib/llvm-project/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def b/contrib/llvm-project/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def
index bd4d8247aeca..33e65f8ebf44 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def
+++ b/contrib/llvm-project/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def
@@ -140,6 +140,7 @@ FIELD(HasInheritedAssignment, 1, NO_MERGE)
/// @{
FIELD(NeedOverloadResolutionForCopyConstructor, 1, NO_MERGE)
FIELD(NeedOverloadResolutionForMoveConstructor, 1, NO_MERGE)
+FIELD(NeedOverloadResolutionForCopyAssignment, 1, NO_MERGE)
FIELD(NeedOverloadResolutionForMoveAssignment, 1, NO_MERGE)
FIELD(NeedOverloadResolutionForDestructor, 1, NO_MERGE)
/// @}
@@ -149,6 +150,7 @@ FIELD(NeedOverloadResolutionForDestructor, 1, NO_MERGE)
/// @{
FIELD(DefaultedCopyConstructorIsDeleted, 1, NO_MERGE)
FIELD(DefaultedMoveConstructorIsDeleted, 1, NO_MERGE)
+FIELD(DefaultedCopyAssignmentIsDeleted, 1, NO_MERGE)
FIELD(DefaultedMoveAssignmentIsDeleted, 1, NO_MERGE)
FIELD(DefaultedDestructorIsDeleted, 1, NO_MERGE)
/// @}
diff --git a/contrib/llvm-project/clang/include/clang/AST/CanonicalType.h b/contrib/llvm-project/clang/include/clang/AST/CanonicalType.h
index 2e00d344533d..488284713bce 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CanonicalType.h
+++ b/contrib/llvm-project/clang/include/clang/AST/CanonicalType.h
@@ -74,7 +74,7 @@ public:
/// canonical type pointers.
template <typename U>
CanQual(const CanQual<U> &Other,
- typename std::enable_if<std::is_base_of<T, U>::value, int>::type = 0);
+ std::enable_if_t<std::is_base_of<T, U>::value, int> = 0);
/// Retrieve the underlying type pointer, which refers to a
/// canonical type.
@@ -264,6 +264,8 @@ public:
// Type predicates
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isObjectType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isIncompleteType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isSizelessType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isSizelessBuiltinType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isIncompleteOrObjectType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isVariablyModifiedType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isIntegerType)
@@ -384,7 +386,7 @@ struct PointerLikeTypeTraits<clang::CanQual<T>> {
}
// qualifier information is encoded in the low bits.
- enum { NumLowBitsAvailable = 0 };
+ static constexpr int NumLowBitsAvailable = 0;
};
} // namespace llvm
diff --git a/contrib/llvm-project/clang/include/clang/AST/Comment.h b/contrib/llvm-project/clang/include/clang/AST/Comment.h
index cd9c1ce2bce0..54a4b0a9cfe6 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Comment.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Comment.h
@@ -209,9 +209,7 @@ public:
void dump() const;
void dumpColor() const;
- void dump(const ASTContext &Context) const;
- void dump(raw_ostream &OS, const CommandTraits *Traits,
- const SourceManager *SM) const;
+ void dump(raw_ostream &OS, const ASTContext &Context) const;
SourceRange getSourceRange() const LLVM_READONLY { return Range; }
diff --git a/contrib/llvm-project/clang/include/clang/AST/CommentCommands.td b/contrib/llvm-project/clang/include/clang/AST/CommentCommands.td
index d387df7ce570..fbbfc9f7e0b7 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CommentCommands.td
+++ b/contrib/llvm-project/clang/include/clang/AST/CommentCommands.td
@@ -87,6 +87,7 @@ def P : InlineCommand<"p">;
def A : InlineCommand<"a">;
def E : InlineCommand<"e">;
def Em : InlineCommand<"em">;
+def Ref : InlineCommand<"ref">;
def Anchor : InlineCommand<"anchor">;
//===----------------------------------------------------------------------===//
@@ -205,7 +206,6 @@ def Paragraph : VerbatimLineCommand<"paragraph">;
def Mainpage : VerbatimLineCommand<"mainpage">;
def Subpage : VerbatimLineCommand<"subpage">;
-def Ref : VerbatimLineCommand<"ref">;
def Relates : VerbatimLineCommand<"relates">;
def Related : VerbatimLineCommand<"related">;
diff --git a/contrib/llvm-project/clang/include/clang/AST/CommentSema.h b/contrib/llvm-project/clang/include/clang/AST/CommentSema.h
index 307618fa5363..6dfe0f4920d0 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CommentSema.h
+++ b/contrib/llvm-project/clang/include/clang/AST/CommentSema.h
@@ -217,6 +217,9 @@ public:
bool isTemplateOrSpecialization();
bool isRecordLikeDecl();
bool isClassOrStructDecl();
+ /// \return \c true if the declaration that this comment is attached to
+ /// declares either struct, class or tag typedef.
+ bool isClassOrStructOrTagTypedefDecl();
bool isUnionDecl();
bool isObjCInterfaceDecl();
bool isObjCProtocolDecl();
diff --git a/contrib/llvm-project/clang/include/clang/AST/ComputeDependence.h b/contrib/llvm-project/clang/include/clang/AST/ComputeDependence.h
new file mode 100644
index 000000000000..ac2daf9eb95a
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/AST/ComputeDependence.h
@@ -0,0 +1,194 @@
+//===--- ComputeDependence.h -------------------------------------- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Calculate various template dependency flags for the AST.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_COMPUTE_DEPENDENCE_H
+#define LLVM_CLANG_AST_COMPUTE_DEPENDENCE_H
+
+#include "clang/AST/DependenceFlags.h"
+#include "clang/Basic/ExceptionSpecificationType.h"
+#include "llvm/ADT/ArrayRef.h"
+
+namespace clang {
+
+class ASTContext;
+
+class Expr;
+class FullExpr;
+class OpaqueValueExpr;
+class ParenExpr;
+class UnaryOperator;
+class UnaryExprOrTypeTraitExpr;
+class ArraySubscriptExpr;
+class MatrixSubscriptExpr;
+class CompoundLiteralExpr;
+class CastExpr;
+class BinaryOperator;
+class ConditionalOperator;
+class BinaryConditionalOperator;
+class StmtExpr;
+class ConvertVectorExpr;
+class VAArgExpr;
+class ChooseExpr;
+class NoInitExpr;
+class ArrayInitLoopExpr;
+class ImplicitValueInitExpr;
+class InitListExpr;
+class ExtVectorElementExpr;
+class BlockExpr;
+class AsTypeExpr;
+class DeclRefExpr;
+class RecoveryExpr;
+class CXXRewrittenBinaryOperator;
+class CXXStdInitializerListExpr;
+class CXXTypeidExpr;
+class MSPropertyRefExpr;
+class MSPropertySubscriptExpr;
+class CXXUuidofExpr;
+class CXXThisExpr;
+class CXXThrowExpr;
+class CXXBindTemporaryExpr;
+class CXXScalarValueInitExpr;
+class CXXDeleteExpr;
+class ArrayTypeTraitExpr;
+class ExpressionTraitExpr;
+class CXXNoexceptExpr;
+class PackExpansionExpr;
+class SubstNonTypeTemplateParmExpr;
+class CoroutineSuspendExpr;
+class DependentCoawaitExpr;
+class CXXNewExpr;
+class CXXPseudoDestructorExpr;
+class OverloadExpr;
+class DependentScopeDeclRefExpr;
+class CXXConstructExpr;
+class LambdaExpr;
+class CXXUnresolvedConstructExpr;
+class CXXDependentScopeMemberExpr;
+class MaterializeTemporaryExpr;
+class CXXFoldExpr;
+class TypeTraitExpr;
+class ConceptSpecializationExpr;
+class PredefinedExpr;
+class CallExpr;
+class OffsetOfExpr;
+class MemberExpr;
+class ShuffleVectorExpr;
+class GenericSelectionExpr;
+class DesignatedInitExpr;
+class ParenListExpr;
+class PseudoObjectExpr;
+class AtomicExpr;
+class OMPArraySectionExpr;
+class OMPArrayShapingExpr;
+class OMPIteratorExpr;
+class ObjCArrayLiteral;
+class ObjCDictionaryLiteral;
+class ObjCBoxedExpr;
+class ObjCEncodeExpr;
+class ObjCIvarRefExpr;
+class ObjCPropertyRefExpr;
+class ObjCSubscriptRefExpr;
+class ObjCIsaExpr;
+class ObjCIndirectCopyRestoreExpr;
+class ObjCMessageExpr;
+
+// The following functions are called from constructors of `Expr`, so they
+// should not access anything beyond basic
+ExprDependence computeDependence(FullExpr *E);
+ExprDependence computeDependence(OpaqueValueExpr *E);
+ExprDependence computeDependence(ParenExpr *E);
+ExprDependence computeDependence(UnaryOperator *E);
+ExprDependence computeDependence(UnaryExprOrTypeTraitExpr *E);
+ExprDependence computeDependence(ArraySubscriptExpr *E);
+ExprDependence computeDependence(MatrixSubscriptExpr *E);
+ExprDependence computeDependence(CompoundLiteralExpr *E);
+ExprDependence computeDependence(CastExpr *E);
+ExprDependence computeDependence(BinaryOperator *E);
+ExprDependence computeDependence(ConditionalOperator *E);
+ExprDependence computeDependence(BinaryConditionalOperator *E);
+ExprDependence computeDependence(StmtExpr *E, unsigned TemplateDepth);
+ExprDependence computeDependence(ConvertVectorExpr *E);
+ExprDependence computeDependence(VAArgExpr *E);
+ExprDependence computeDependence(ChooseExpr *E);
+ExprDependence computeDependence(NoInitExpr *E);
+ExprDependence computeDependence(ArrayInitLoopExpr *E);
+ExprDependence computeDependence(ImplicitValueInitExpr *E);
+ExprDependence computeDependence(InitListExpr *E);
+ExprDependence computeDependence(ExtVectorElementExpr *E);
+ExprDependence computeDependence(BlockExpr *E);
+ExprDependence computeDependence(AsTypeExpr *E);
+ExprDependence computeDependence(DeclRefExpr *E, const ASTContext &Ctx);
+ExprDependence computeDependence(RecoveryExpr *E);
+ExprDependence computeDependence(CXXRewrittenBinaryOperator *E);
+ExprDependence computeDependence(CXXStdInitializerListExpr *E);
+ExprDependence computeDependence(CXXTypeidExpr *E);
+ExprDependence computeDependence(MSPropertyRefExpr *E);
+ExprDependence computeDependence(MSPropertySubscriptExpr *E);
+ExprDependence computeDependence(CXXUuidofExpr *E);
+ExprDependence computeDependence(CXXThisExpr *E);
+ExprDependence computeDependence(CXXThrowExpr *E);
+ExprDependence computeDependence(CXXBindTemporaryExpr *E);
+ExprDependence computeDependence(CXXScalarValueInitExpr *E);
+ExprDependence computeDependence(CXXDeleteExpr *E);
+ExprDependence computeDependence(ArrayTypeTraitExpr *E);
+ExprDependence computeDependence(ExpressionTraitExpr *E);
+ExprDependence computeDependence(CXXNoexceptExpr *E, CanThrowResult CT);
+ExprDependence computeDependence(PackExpansionExpr *E);
+ExprDependence computeDependence(SubstNonTypeTemplateParmExpr *E);
+ExprDependence computeDependence(CoroutineSuspendExpr *E);
+ExprDependence computeDependence(DependentCoawaitExpr *E);
+ExprDependence computeDependence(CXXNewExpr *E);
+ExprDependence computeDependence(CXXPseudoDestructorExpr *E);
+ExprDependence computeDependence(OverloadExpr *E, bool KnownDependent,
+ bool KnownInstantiationDependent,
+ bool KnownContainsUnexpandedParameterPack);
+ExprDependence computeDependence(DependentScopeDeclRefExpr *E);
+ExprDependence computeDependence(CXXConstructExpr *E);
+ExprDependence computeDependence(LambdaExpr *E,
+ bool ContainsUnexpandedParameterPack);
+ExprDependence computeDependence(CXXUnresolvedConstructExpr *E);
+ExprDependence computeDependence(CXXDependentScopeMemberExpr *E);
+ExprDependence computeDependence(MaterializeTemporaryExpr *E);
+ExprDependence computeDependence(CXXFoldExpr *E);
+ExprDependence computeDependence(TypeTraitExpr *E);
+ExprDependence computeDependence(ConceptSpecializationExpr *E,
+ bool ValueDependent);
+
+ExprDependence computeDependence(PredefinedExpr *E);
+ExprDependence computeDependence(CallExpr *E, llvm::ArrayRef<Expr *> PreArgs);
+ExprDependence computeDependence(OffsetOfExpr *E);
+ExprDependence computeDependence(MemberExpr *E);
+ExprDependence computeDependence(ShuffleVectorExpr *E);
+ExprDependence computeDependence(GenericSelectionExpr *E,
+ bool ContainsUnexpandedPack);
+ExprDependence computeDependence(DesignatedInitExpr *E);
+ExprDependence computeDependence(ParenListExpr *E);
+ExprDependence computeDependence(PseudoObjectExpr *E);
+ExprDependence computeDependence(AtomicExpr *E);
+
+ExprDependence computeDependence(OMPArraySectionExpr *E);
+ExprDependence computeDependence(OMPArrayShapingExpr *E);
+ExprDependence computeDependence(OMPIteratorExpr *E);
+
+ExprDependence computeDependence(ObjCArrayLiteral *E);
+ExprDependence computeDependence(ObjCDictionaryLiteral *E);
+ExprDependence computeDependence(ObjCBoxedExpr *E);
+ExprDependence computeDependence(ObjCEncodeExpr *E);
+ExprDependence computeDependence(ObjCIvarRefExpr *E);
+ExprDependence computeDependence(ObjCPropertyRefExpr *E);
+ExprDependence computeDependence(ObjCSubscriptRefExpr *E);
+ExprDependence computeDependence(ObjCIsaExpr *E);
+ExprDependence computeDependence(ObjCIndirectCopyRestoreExpr *E);
+ExprDependence computeDependence(ObjCMessageExpr *E);
+
+} // namespace clang
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/AST/DataCollection.h b/contrib/llvm-project/clang/include/clang/AST/DataCollection.h
index 37f101793ecc..14d1bc188623 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DataCollection.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DataCollection.h
@@ -50,10 +50,9 @@ template <class T> void addDataToConsumer(T &DataConsumer, const QualType &QT) {
}
template <class T, class Type>
-typename std::enable_if<
- std::is_integral<Type>::value || std::is_enum<Type>::value ||
- std::is_convertible<Type, size_t>::value // for llvm::hash_code
- >::type
+std::enable_if_t<std::is_integral<Type>::value || std::is_enum<Type>::value ||
+ std::is_convertible<Type, size_t>::value // for llvm::hash_code
+ >
addDataToConsumer(T &DataConsumer, Type Data) {
DataConsumer.update(StringRef(reinterpret_cast<char *>(&Data), sizeof(Data)));
}
diff --git a/contrib/llvm-project/clang/include/clang/AST/Decl.h b/contrib/llvm-project/clang/include/clang/AST/Decl.h
index 43c6c7b85db4..28faa2c1fc78 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Decl.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Decl.h
@@ -2030,7 +2030,7 @@ public:
/// declaration to the declaration that is a definition (if there is one).
bool isDefined(const FunctionDecl *&Definition) const;
- virtual bool isDefined() const {
+ bool isDefined() const {
const FunctionDecl* Definition;
return isDefined(Definition);
}
@@ -2125,19 +2125,17 @@ public:
bool isTrivialForCall() const { return FunctionDeclBits.IsTrivialForCall; }
void setTrivialForCall(bool IT) { FunctionDeclBits.IsTrivialForCall = IT; }
- /// Whether this function is defaulted per C++0x. Only valid for
- /// special member functions.
+ /// Whether this function is defaulted. Valid for e.g.
+ /// special member functions, defaulted comparisions (not methods!).
bool isDefaulted() const { return FunctionDeclBits.IsDefaulted; }
void setDefaulted(bool D = true) { FunctionDeclBits.IsDefaulted = D; }
- /// Whether this function is explicitly defaulted per C++0x. Only valid
- /// for special member functions.
+ /// Whether this function is explicitly defaulted.
bool isExplicitlyDefaulted() const {
return FunctionDeclBits.IsExplicitlyDefaulted;
}
- /// State that this function is explicitly defaulted per C++0x. Only valid
- /// for special member functions.
+ /// State that this function is explicitly defaulted.
void setExplicitlyDefaulted(bool ED = true) {
FunctionDeclBits.IsExplicitlyDefaulted = ED;
}
@@ -2306,8 +2304,13 @@ public:
/// allocation function. [...]
///
/// If this function is an aligned allocation/deallocation function, return
- /// true through IsAligned.
- bool isReplaceableGlobalAllocationFunction(bool *IsAligned = nullptr) const;
+ /// the parameter number of the requested alignment through AlignmentParam.
+ ///
+ /// If this function is an allocation/deallocation function that takes
+ /// the `std::nothrow_t` tag, return true through IsNothrow,
+ bool isReplaceableGlobalAllocationFunction(
+ Optional<unsigned> *AlignmentParam = nullptr,
+ bool *IsNothrow = nullptr) const;
/// Determine if this function provides an inline implementation of a builtin.
bool isInlineBuiltinDeclaration() const;
@@ -2436,6 +2439,14 @@ public:
/// parameters have default arguments (in C++).
unsigned getMinRequiredArguments() const;
+ /// Determine whether this function has a single parameter, or multiple
+ /// parameters where all but the first have default arguments.
+ ///
+ /// This notion is used in the definition of copy/move constructors and
+ /// initializer list constructors. Note that, unlike getMinRequiredArguments,
+ /// parameter packs are not treated specially here.
+ bool hasOneParamOrDefaultArgs() const;
+
/// Find the source location information for how the type of this function
/// was written. May be absent (for example if the function was declared via
/// a typedef) and may contain a different type from that of the function
@@ -2607,7 +2618,13 @@ public:
/// Retrieve the function declaration from which this function could
/// be instantiated, if it is an instantiation (rather than a non-template
/// or a specialization, for example).
- FunctionDecl *getTemplateInstantiationPattern() const;
+ ///
+ /// If \p ForDefinition is \c false, explicit specializations will be treated
+ /// as if they were implicit instantiations. This will then find the pattern
+ /// corresponding to non-definition portions of the declaration, such as
+ /// default arguments and the exception specification.
+ FunctionDecl *
+ getTemplateInstantiationPattern(bool ForDefinition = true) const;
/// Retrieve the primary template that this function template
/// specialization either specializes or was instantiated from.
@@ -2915,12 +2932,15 @@ public:
/// Returns the parent of this field declaration, which
/// is the struct in which this field is defined.
+ ///
+ /// Returns null if this is not a normal class/struct field declaration, e.g.
+ /// ObjCAtDefsFieldDecl, ObjCIvarDecl.
const RecordDecl *getParent() const {
- return cast<RecordDecl>(getDeclContext());
+ return dyn_cast<RecordDecl>(getDeclContext());
}
RecordDecl *getParent() {
- return cast<RecordDecl>(getDeclContext());
+ return dyn_cast<RecordDecl>(getDeclContext());
}
SourceRange getSourceRange() const override LLVM_READONLY;
@@ -3534,6 +3554,7 @@ class EnumDecl : public TagDecl {
/// negative enumerators of this enum. (see getNumNegativeBits)
void setNumNegativeBits(unsigned Num) { EnumDeclBits.NumNegativeBits = Num; }
+public:
/// True if this tag declaration is a scoped enumeration. Only
/// possible in C++11 mode.
void setScoped(bool Scoped = true) { EnumDeclBits.IsScoped = Scoped; }
@@ -3550,6 +3571,7 @@ class EnumDecl : public TagDecl {
/// Microsoft-style enumeration with a fixed underlying type.
void setFixed(bool Fixed = true) { EnumDeclBits.IsFixed = Fixed; }
+private:
/// True if a valid hash is stored in ODRHash.
bool hasODRHash() const { return EnumDeclBits.HasODRHash; }
void setHasODRHash(bool Hash = true) { EnumDeclBits.HasODRHash = Hash; }
@@ -3954,6 +3976,11 @@ public:
return cast_or_null<RecordDecl>(TagDecl::getDefinition());
}
+ /// Returns whether this record is a union, or contains (at any nesting level)
+ /// a union member. This is used by CMSE to warn about possible information
+ /// leaks.
+ bool isOrContainsUnion() const;
+
// Iterator access to field members. The field iterator only visits
// the non-static data members of this class, ignoring any static
// data members, functions, constructors, destructors, etc.
@@ -4335,17 +4362,18 @@ class ImportDecl final : public Decl,
friend class ASTReader;
friend TrailingObjects;
- /// The imported module, along with a bit that indicates whether
- /// we have source-location information for each identifier in the module
- /// name.
- ///
- /// When the bit is false, we only have a single source location for the
- /// end of the import declaration.
- llvm::PointerIntPair<Module *, 1, bool> ImportedAndComplete;
+ /// The imported module.
+ Module *ImportedModule = nullptr;
/// The next import in the list of imports local to the translation
/// unit being parsed (not loaded from an AST file).
- ImportDecl *NextLocalImport = nullptr;
+ ///
+ /// Includes a bit that indicates whether we have source-location information
+ /// for each identifier in the module name.
+ ///
+ /// When the bit is false, we only have a single source location for the
+ /// end of the import declaration.
+ llvm::PointerIntPair<ImportDecl *, 1, bool> NextLocalImportAndComplete;
ImportDecl(DeclContext *DC, SourceLocation StartLoc, Module *Imported,
ArrayRef<SourceLocation> IdentifierLocs);
@@ -4355,6 +4383,20 @@ class ImportDecl final : public Decl,
ImportDecl(EmptyShell Empty) : Decl(Import, Empty) {}
+ bool isImportComplete() const { return NextLocalImportAndComplete.getInt(); }
+
+ void setImportComplete(bool C) { NextLocalImportAndComplete.setInt(C); }
+
+ /// The next import in the list of imports local to the translation
+ /// unit being parsed (not loaded from an AST file).
+ ImportDecl *getNextLocalImport() const {
+ return NextLocalImportAndComplete.getPointer();
+ }
+
+ void setNextLocalImport(ImportDecl *Import) {
+ NextLocalImportAndComplete.setPointer(Import);
+ }
+
public:
/// Create a new module import declaration.
static ImportDecl *Create(ASTContext &C, DeclContext *DC,
@@ -4372,7 +4414,7 @@ public:
unsigned NumLocations);
/// Retrieve the module that was imported by the import declaration.
- Module *getImportedModule() const { return ImportedAndComplete.getPointer(); }
+ Module *getImportedModule() const { return ImportedModule; }
/// Retrieves the locations of each of the identifiers that make up
/// the complete module name in the import declaration.
@@ -4520,6 +4562,13 @@ inline bool IsEnumDeclScoped(EnumDecl *ED) {
return ED->isScoped();
}
+/// OpenMP variants are mangled early based on their OpenMP context selector.
+/// The new name looks likes this:
+/// <name> + OpenMPVariantManglingSeparatorStr + <mangled OpenMP context>
+static constexpr StringRef getOpenMPVariantManglingSeparatorStr() {
+ return "$ompvariant";
+}
+
} // namespace clang
#endif // LLVM_CLANG_AST_DECL_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclBase.h b/contrib/llvm-project/clang/include/clang/AST/DeclBase.h
index 1197fce41ded..4f33ff104ffd 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclBase.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclBase.h
@@ -66,6 +66,7 @@ class SourceManager;
class Stmt;
class StoredDeclsMap;
class TemplateDecl;
+class TemplateParameterList;
class TranslationUnitDecl;
class UsingDirectiveDecl;
@@ -465,6 +466,10 @@ public:
ASTContext &getASTContext() const LLVM_READONLY;
+ /// Helper to get the language options from the ASTContext.
+ /// Defined out of line to avoid depending on ASTContext.h.
+ const LangOptions &getLangOpts() const LLVM_READONLY;
+
void setAccess(AccessSpecifier AS) {
Access = AS;
assert(AccessDeclContextSanity());
@@ -514,7 +519,7 @@ public:
if (!HasAttrs) return;
AttrVec &Vec = getAttrs();
- Vec.erase(std::remove_if(Vec.begin(), Vec.end(), isa<T, Attr*>), Vec.end());
+ llvm::erase_if(Vec, [](Attr *A) { return isa<T>(A); });
if (Vec.empty())
HasAttrs = false;
@@ -626,7 +631,16 @@ protected:
setModuleOwnershipKind(ModuleOwnershipKind::ModulePrivate);
}
- /// Set the owning module ID.
+public:
+ /// Set the FromASTFile flag. This indicates that this declaration
+ /// was deserialized and not parsed from source code and enables
+ /// features such as module ownership information.
+ void setFromASTFile() {
+ FromASTFile = true;
+ }
+
+ /// Set the owning module ID. This may only be called for
+ /// deserialized Decls.
void setOwningModuleID(unsigned ID) {
assert(isFromASTFile() && "Only works on a deserialized declaration");
*((unsigned*)this - 2) = ID;
@@ -767,18 +781,19 @@ public:
/// all declarations in a global module fragment are unowned.
Module *getOwningModuleForLinkage(bool IgnoreLinkage = false) const;
- /// Determine whether this declaration might be hidden from name
- /// lookup. Note that the declaration might be visible even if this returns
- /// \c false, if the owning module is visible within the query context.
- // FIXME: Rename this to make it clearer what it does.
- bool isHidden() const {
- return (int)getModuleOwnershipKind() > (int)ModuleOwnershipKind::Visible;
+ /// Determine whether this declaration is definitely visible to name lookup,
+ /// independent of whether the owning module is visible.
+ /// Note: The declaration may be visible even if this returns \c false if the
+ /// owning module is visible within the query context. This is a low-level
+ /// helper function; most code should be calling Sema::isVisible() instead.
+ bool isUnconditionallyVisible() const {
+ return (int)getModuleOwnershipKind() <= (int)ModuleOwnershipKind::Visible;
}
/// Set that this declaration is globally visible, even if it came from a
/// module that is not visible.
void setVisibleDespiteOwningModule() {
- if (isHidden())
+ if (!isUnconditionallyVisible())
setModuleOwnershipKind(ModuleOwnershipKind::Visible);
}
@@ -848,6 +863,10 @@ public:
// within the scope of a template parameter).
bool isTemplated() const;
+ /// Determine the number of levels of template parameter surrounding this
+ /// declaration.
+ unsigned getTemplateDepth() const;
+
/// isDefinedOutsideFunctionOrMethod - This predicate returns true if this
/// scoped decl is defined outside the current function or method. This is
/// roughly global variables and functions, but also handles enums (which
@@ -856,15 +875,19 @@ public:
return getParentFunctionOrMethod() == nullptr;
}
- /// Returns true if this declaration is lexically inside a function or inside
- /// a variable initializer. It recognizes non-defining declarations as well
- /// as members of local classes:
+ /// Determine whether a substitution into this declaration would occur as
+ /// part of a substitution into a dependent local scope. Such a substitution
+ /// transitively substitutes into all constructs nested within this
+ /// declaration.
+ ///
+ /// This recognizes non-defining declarations as well as members of local
+ /// classes and lambdas:
/// \code
- /// void foo() { void bar(); }
- /// void foo2() { class ABC { void bar(); }; }
- /// inline int x = [](){ return 0; };
+ /// template<typename T> void foo() { void bar(); }
+ /// template<typename T> void foo2() { class ABC { void bar(); }; }
+ /// template<typename T> inline int x = [](){ return 0; }();
/// \endcode
- bool isInLocalScope() const;
+ bool isInLocalScopeForInstantiation() const;
/// If this decl is defined inside a function/method/block it returns
/// the corresponding DeclContext, otherwise it returns null.
@@ -1024,8 +1047,16 @@ public:
/// If this is a declaration that describes some template, this
/// method returns that template declaration.
+ ///
+ /// Note that this returns nullptr for partial specializations, because they
+ /// are not modeled as TemplateDecls. Use getDescribedTemplateParams to handle
+ /// those cases.
TemplateDecl *getDescribedTemplate() const;
+ /// If this is a declaration that describes some template or partial
+ /// specialization, this returns the corresponding template parameter list.
+ const TemplateParameterList *getDescribedTemplateParams() const;
+
/// Returns the function itself, or the templated function if this is a
/// function template.
FunctionDecl *getAsFunction() LLVM_READONLY;
@@ -1729,30 +1760,30 @@ protected:
ObjCContainerDeclBitfields ObjCContainerDeclBits;
LinkageSpecDeclBitfields LinkageSpecDeclBits;
BlockDeclBitfields BlockDeclBits;
- };
- static_assert(sizeof(DeclContextBitfields) <= 8,
- "DeclContextBitfields is larger than 8 bytes!");
- static_assert(sizeof(TagDeclBitfields) <= 8,
- "TagDeclBitfields is larger than 8 bytes!");
- static_assert(sizeof(EnumDeclBitfields) <= 8,
- "EnumDeclBitfields is larger than 8 bytes!");
- static_assert(sizeof(RecordDeclBitfields) <= 8,
- "RecordDeclBitfields is larger than 8 bytes!");
- static_assert(sizeof(OMPDeclareReductionDeclBitfields) <= 8,
- "OMPDeclareReductionDeclBitfields is larger than 8 bytes!");
- static_assert(sizeof(FunctionDeclBitfields) <= 8,
- "FunctionDeclBitfields is larger than 8 bytes!");
- static_assert(sizeof(CXXConstructorDeclBitfields) <= 8,
- "CXXConstructorDeclBitfields is larger than 8 bytes!");
- static_assert(sizeof(ObjCMethodDeclBitfields) <= 8,
- "ObjCMethodDeclBitfields is larger than 8 bytes!");
- static_assert(sizeof(ObjCContainerDeclBitfields) <= 8,
- "ObjCContainerDeclBitfields is larger than 8 bytes!");
- static_assert(sizeof(LinkageSpecDeclBitfields) <= 8,
- "LinkageSpecDeclBitfields is larger than 8 bytes!");
- static_assert(sizeof(BlockDeclBitfields) <= 8,
- "BlockDeclBitfields is larger than 8 bytes!");
+ static_assert(sizeof(DeclContextBitfields) <= 8,
+ "DeclContextBitfields is larger than 8 bytes!");
+ static_assert(sizeof(TagDeclBitfields) <= 8,
+ "TagDeclBitfields is larger than 8 bytes!");
+ static_assert(sizeof(EnumDeclBitfields) <= 8,
+ "EnumDeclBitfields is larger than 8 bytes!");
+ static_assert(sizeof(RecordDeclBitfields) <= 8,
+ "RecordDeclBitfields is larger than 8 bytes!");
+ static_assert(sizeof(OMPDeclareReductionDeclBitfields) <= 8,
+ "OMPDeclareReductionDeclBitfields is larger than 8 bytes!");
+ static_assert(sizeof(FunctionDeclBitfields) <= 8,
+ "FunctionDeclBitfields is larger than 8 bytes!");
+ static_assert(sizeof(CXXConstructorDeclBitfields) <= 8,
+ "CXXConstructorDeclBitfields is larger than 8 bytes!");
+ static_assert(sizeof(ObjCMethodDeclBitfields) <= 8,
+ "ObjCMethodDeclBitfields is larger than 8 bytes!");
+ static_assert(sizeof(ObjCContainerDeclBitfields) <= 8,
+ "ObjCContainerDeclBitfields is larger than 8 bytes!");
+ static_assert(sizeof(LinkageSpecDeclBitfields) <= 8,
+ "LinkageSpecDeclBitfields is larger than 8 bytes!");
+ static_assert(sizeof(BlockDeclBitfields) <= 8,
+ "BlockDeclBitfields is larger than 8 bytes!");
+ };
/// FirstDecl - The first declaration stored within this declaration
/// context.
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclCXX.h b/contrib/llvm-project/clang/include/clang/AST/DeclCXX.h
index 2e8e31dbf4c7..2b8d7e879a0a 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclCXX.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclCXX.h
@@ -15,7 +15,6 @@
#ifndef LLVM_CLANG_AST_DECLCXX_H
#define LLVM_CLANG_AST_DECLCXX_H
-#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTUnresolvedSet.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
@@ -40,6 +39,7 @@
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
@@ -53,6 +53,7 @@
namespace clang {
+class ASTContext;
class ClassTemplateDecl;
class ConstructorUsingShadowDecl;
class CXXBasePath;
@@ -712,6 +713,13 @@ public:
}
/// \c true if we know for sure that this class has a single,
+ /// accessible, unambiguous copy assignment operator that is not deleted.
+ bool hasSimpleCopyAssignment() const {
+ return !hasUserDeclaredCopyAssignment() &&
+ !data().DefaultedCopyAssignmentIsDeleted;
+ }
+
+ /// \c true if we know for sure that this class has a single,
/// accessible, unambiguous move assignment operator that is not deleted.
bool hasSimpleMoveAssignment() const {
return !hasUserDeclaredMoveAssignment() && hasMoveAssignment() &&
@@ -871,6 +879,15 @@ public:
return data().UserDeclaredSpecialMembers & SMF_CopyAssignment;
}
+ /// Set that we attempted to declare an implicit copy assignment
+ /// operator, but overload resolution failed so we deleted it.
+ void setImplicitCopyAssignmentIsDeleted() {
+ assert((data().DefaultedCopyAssignmentIsDeleted ||
+ needsOverloadResolutionForCopyAssignment()) &&
+ "copy assignment should not be deleted");
+ data().DefaultedCopyAssignmentIsDeleted = true;
+ }
+
/// Determine whether this class needs an implicit copy
/// assignment operator to be lazily declared.
bool needsImplicitCopyAssignment() const {
@@ -880,7 +897,16 @@ public:
/// Determine whether we need to eagerly declare a defaulted copy
/// assignment operator for this class.
bool needsOverloadResolutionForCopyAssignment() const {
- return data().HasMutableFields;
+ // C++20 [class.copy.assign]p2:
+ // If the class definition declares a move constructor or move assignment
+ // operator, the implicitly declared copy assignment operator is defined
+ // as deleted.
+ // In MSVC mode, sometimes a declared move constructor does not delete an
+ // implicit copy assignment, so defer this choice to Sema.
+ if (data().UserDeclaredSpecialMembers &
+ (SMF_MoveConstructor | SMF_MoveAssignment))
+ return true;
+ return data().NeedOverloadResolutionForCopyAssignment;
}
/// Determine whether an implicit copy assignment operator for this
@@ -998,6 +1024,9 @@ public:
return static_cast<LambdaCaptureDefault>(getLambdaData().CaptureDefault);
}
+ /// Set the captures for this lambda closure type.
+ void setCaptures(ArrayRef<LambdaCapture> Captures);
+
/// For a closure type, retrieve the mapping from captured
/// variables and \c this to the non-static data members that store the
/// values or references of the captures.
@@ -1029,6 +1058,8 @@ public:
: nullptr;
}
+ unsigned capture_size() const { return getLambdaData().NumCaptures; }
+
using conversion_iterator = UnresolvedSetIterator;
conversion_iterator conversion_begin() const {
@@ -1166,7 +1197,7 @@ public:
bool defaultedDefaultConstructorIsConstexpr() const {
return data().DefaultedDefaultConstructorIsConstexpr &&
(!isUnion() || hasInClassInitializer() || !hasVariantMembers() ||
- getASTContext().getLangOpts().CPlusPlus2a);
+ getLangOpts().CPlusPlus20);
}
/// Determine whether this class has a constexpr default constructor.
@@ -1258,7 +1289,7 @@ public:
/// would be constexpr.
bool defaultedDestructorIsConstexpr() const {
return data().DefaultedDestructorIsConstexpr &&
- getASTContext().getLangOpts().CPlusPlus2a;
+ getLangOpts().CPlusPlus20;
}
/// Determine whether this class has a constexpr destructor.
@@ -1355,10 +1386,10 @@ public:
///
/// Only in C++17 and beyond, are lambdas literal types.
bool isLiteral() const {
- ASTContext &Ctx = getASTContext();
- return (Ctx.getLangOpts().CPlusPlus2a ? hasConstexprDestructor()
+ const LangOptions &LangOpts = getLangOpts();
+ return (LangOpts.CPlusPlus20 ? hasConstexprDestructor()
: hasTrivialDestructor()) &&
- (!isLambda() || Ctx.getLangOpts().CPlusPlus17) &&
+ (!isLambda() || LangOpts.CPlusPlus17) &&
!hasNonLiteralTypeFieldsOrBases() &&
(isAggregate() || isLambda() ||
hasConstexprNonCopyMoveConstructor() ||
@@ -1517,14 +1548,8 @@ public:
/// returns false if the class has non-computable base classes.
///
/// \param BaseMatches Callback invoked for each (direct or indirect) base
- /// class of this type, or if \p AllowShortCircuit is true then until a call
- /// returns false.
- ///
- /// \param AllowShortCircuit if false, forces the callback to be called
- /// for every base class, even if a dependent or non-matching base was
- /// found.
- bool forallBases(ForallBasesCallback BaseMatches,
- bool AllowShortCircuit = true) const;
+ /// class of this type until a call returns false.
+ bool forallBases(ForallBasesCallback BaseMatches) const;
/// Function type used by lookupInBases() to determine whether a
/// specific base class subobject matches the lookup criteria.
@@ -1696,6 +1721,10 @@ public:
/// actually abstract.
bool mayBeAbstract() const;
+ /// Determine whether it's impossible for a class to be derived from this
+ /// class. This is best-effort, and may conservatively return false.
+ bool isEffectivelyFinal() const;
+
/// If this is the closure type of a lambda expression, retrieve the
/// number to be used for name mangling in the Itanium C++ ABI.
///
@@ -2037,7 +2066,8 @@ public:
method_iterator end_overridden_methods() const;
unsigned size_overridden_methods() const;
- using overridden_method_range= ASTContext::overridden_method_range;
+ using overridden_method_range = llvm::iterator_range<
+ llvm::TinyPtrVector<const CXXMethodDecl *>::const_iterator>;
overridden_method_range overridden_methods() const;
@@ -2417,17 +2447,6 @@ class CXXConstructorDecl final
: ExplicitSpecKind::ResolvedFalse);
}
- void setExplicitSpecifier(ExplicitSpecifier ES) {
- assert((!ES.getExpr() ||
- CXXConstructorDeclBits.HasTrailingExplicitSpecifier) &&
- "cannot set this explicit specifier. no trail-allocated space for "
- "explicit");
- if (ES.getExpr())
- *getCanonicalDecl()->getTrailingObjects<ExplicitSpecifier>() = ES;
- else
- CXXConstructorDeclBits.IsSimpleExplicit = ES.isExplicit();
- }
-
enum TraillingAllocKind {
TAKInheritsConstructor = 1,
TAKHasTailExplicit = 1 << 1,
@@ -2453,6 +2472,17 @@ public:
InheritedConstructor Inherited = InheritedConstructor(),
Expr *TrailingRequiresClause = nullptr);
+ void setExplicitSpecifier(ExplicitSpecifier ES) {
+ assert((!ES.getExpr() ||
+ CXXConstructorDeclBits.HasTrailingExplicitSpecifier) &&
+ "cannot set this explicit specifier. no trail-allocated space for "
+ "explicit");
+ if (ES.getExpr())
+ *getCanonicalDecl()->getTrailingObjects<ExplicitSpecifier>() = ES;
+ else
+ CXXConstructorDeclBits.IsSimpleExplicit = ES.isExplicit();
+ }
+
ExplicitSpecifier getExplicitSpecifier() {
return getCanonicalDecl()->getExplicitSpecifierInternal();
}
@@ -2724,8 +2754,6 @@ class CXXConversionDecl : public CXXMethodDecl {
ExplicitSpecifier ExplicitSpec;
- void setExplicitSpecifier(ExplicitSpecifier ES) { ExplicitSpec = ES; }
-
public:
friend class ASTDeclReader;
friend class ASTDeclWriter;
@@ -2747,6 +2775,7 @@ public:
/// Return true if the declartion is already resolved to be explicit.
bool isExplicit() const { return getExplicitSpecifier().isExplicit(); }
+ void setExplicitSpecifier(ExplicitSpecifier ES) { ExplicitSpec = ES; }
/// Returns the type that this conversion function is converting to.
QualType getConversionType() const {
@@ -3964,6 +3993,81 @@ public:
IdentifierInfo* getSetterId() const { return SetterId; }
};
+/// Parts of a decomposed MSGuidDecl. Factored out to avoid unnecessary
+/// dependencies on DeclCXX.h.
+struct MSGuidDeclParts {
+ /// {01234567-...
+ uint32_t Part1;
+ /// ...-89ab-...
+ uint16_t Part2;
+ /// ...-cdef-...
+ uint16_t Part3;
+ /// ...-0123-456789abcdef}
+ uint8_t Part4And5[8];
+
+ uint64_t getPart4And5AsUint64() const {
+ uint64_t Val;
+ memcpy(&Val, &Part4And5, sizeof(Part4And5));
+ return Val;
+ }
+};
+
+/// A global _GUID constant. These are implicitly created by UuidAttrs.
+///
+/// struct _declspec(uuid("01234567-89ab-cdef-0123-456789abcdef")) X{};
+///
+/// X is a CXXRecordDecl that contains a UuidAttr that references the (unique)
+/// MSGuidDecl for the specified UUID.
+class MSGuidDecl : public ValueDecl,
+ public Mergeable<MSGuidDecl>,
+ public llvm::FoldingSetNode {
+public:
+ using Parts = MSGuidDeclParts;
+
+private:
+ /// The decomposed form of the UUID.
+ Parts PartVal;
+
+ /// The resolved value of the UUID as an APValue. Computed on demand and
+ /// cached.
+ mutable APValue APVal;
+
+ void anchor() override;
+
+ MSGuidDecl(DeclContext *DC, QualType T, Parts P);
+
+ static MSGuidDecl *Create(const ASTContext &C, QualType T, Parts P);
+ static MSGuidDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ // Only ASTContext::getMSGuidDecl and deserialization create these.
+ friend class ASTContext;
+ friend class ASTReader;
+ friend class ASTDeclReader;
+
+public:
+ /// Print this UUID in a human-readable format.
+ void printName(llvm::raw_ostream &OS) const override;
+
+ /// Get the decomposed parts of this declaration.
+ Parts getParts() const { return PartVal; }
+
+ /// Get the value of this MSGuidDecl as an APValue. This may fail and return
+ /// an absent APValue if the type of the declaration is not of the expected
+ /// shape.
+ APValue &getAsAPValue() const;
+
+ static void Profile(llvm::FoldingSetNodeID &ID, Parts P) {
+ ID.AddInteger(P.Part1);
+ ID.AddInteger(P.Part2);
+ ID.AddInteger(P.Part3);
+ ID.AddInteger(P.getPart4And5AsUint64());
+ }
+ void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, PartVal); }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classofKind(Kind K) { return K == Decl::MSGuid; }
+};
+
/// Insertion operator for diagnostics. This allows sending an AccessSpecifier
/// into a diagnostic with <<.
const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclGroup.h b/contrib/llvm-project/clang/include/clang/AST/DeclGroup.h
index 2be9dae9431e..672b7b0a9fe2 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclGroup.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclGroup.h
@@ -147,7 +147,7 @@ namespace llvm {
return clang::DeclGroupRef::getFromOpaquePtr(P);
}
- enum { NumLowBitsAvailable = 0 };
+ static constexpr int NumLowBitsAvailable = 0;
};
} // namespace llvm
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclObjC.h b/contrib/llvm-project/clang/include/clang/AST/DeclObjC.h
index b98aef6b499d..5613ed8370c0 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclObjC.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclObjC.h
@@ -15,6 +15,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclObjCCommon.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/Redeclarable.h"
#include "clang/AST/SelectorLocationsKind.h"
@@ -402,7 +403,7 @@ public:
}
/// createImplicitParams - Used to lazily create the self and cmd
- /// implict parameters. This must be called prior to using getSelfDecl()
+ /// implicit parameters. This must be called prior to using getSelfDecl()
/// or getCmdDecl(). The call is ignored if the implicit parameters
/// have already been created.
void createImplicitParams(ASTContext &Context, const ObjCInterfaceDecl *ID);
@@ -742,34 +743,6 @@ class ObjCPropertyDecl : public NamedDecl {
void anchor() override;
public:
- enum PropertyAttributeKind {
- OBJC_PR_noattr = 0x00,
- OBJC_PR_readonly = 0x01,
- OBJC_PR_getter = 0x02,
- OBJC_PR_assign = 0x04,
- OBJC_PR_readwrite = 0x08,
- OBJC_PR_retain = 0x10,
- OBJC_PR_copy = 0x20,
- OBJC_PR_nonatomic = 0x40,
- OBJC_PR_setter = 0x80,
- OBJC_PR_atomic = 0x100,
- OBJC_PR_weak = 0x200,
- OBJC_PR_strong = 0x400,
- OBJC_PR_unsafe_unretained = 0x800,
- /// Indicates that the nullability of the type was spelled with a
- /// property attribute rather than a type qualifier.
- OBJC_PR_nullability = 0x1000,
- OBJC_PR_null_resettable = 0x2000,
- OBJC_PR_class = 0x4000,
- OBJC_PR_direct = 0x8000
- // Adding a property should change NumPropertyAttrsBits
- };
-
- enum {
- /// Number of bits fitting all the property attributes.
- NumPropertyAttrsBits = 16
- };
-
enum SetterKind { Assign, Retain, Copy, Weak };
enum PropertyControl { None, Required, Optional };
@@ -782,8 +755,8 @@ private:
QualType DeclType;
TypeSourceInfo *DeclTypeSourceInfo;
- unsigned PropertyAttributes : NumPropertyAttrsBits;
- unsigned PropertyAttributesAsWritten : NumPropertyAttrsBits;
+ unsigned PropertyAttributes : NumObjCPropertyAttrsBits;
+ unsigned PropertyAttributesAsWritten : NumObjCPropertyAttrsBits;
// \@required/\@optional
unsigned PropertyImplementation : 2;
@@ -810,15 +783,14 @@ private:
ObjCIvarDecl *PropertyIvarDecl = nullptr;
ObjCPropertyDecl(DeclContext *DC, SourceLocation L, IdentifierInfo *Id,
- SourceLocation AtLocation, SourceLocation LParenLocation,
- QualType T, TypeSourceInfo *TSI,
- PropertyControl propControl)
- : NamedDecl(ObjCProperty, DC, L, Id), AtLoc(AtLocation),
- LParenLoc(LParenLocation), DeclType(T), DeclTypeSourceInfo(TSI),
- PropertyAttributes(OBJC_PR_noattr),
- PropertyAttributesAsWritten(OBJC_PR_noattr),
- PropertyImplementation(propControl), GetterName(Selector()),
- SetterName(Selector()) {}
+ SourceLocation AtLocation, SourceLocation LParenLocation,
+ QualType T, TypeSourceInfo *TSI, PropertyControl propControl)
+ : NamedDecl(ObjCProperty, DC, L, Id), AtLoc(AtLocation),
+ LParenLoc(LParenLocation), DeclType(T), DeclTypeSourceInfo(TSI),
+ PropertyAttributes(ObjCPropertyAttribute::kind_noattr),
+ PropertyAttributesAsWritten(ObjCPropertyAttribute::kind_noattr),
+ PropertyImplementation(propControl), GetterName(Selector()),
+ SetterName(Selector()) {}
public:
static ObjCPropertyDecl *Create(ASTContext &C, DeclContext *DC,
@@ -850,11 +822,11 @@ public:
/// type.
QualType getUsageType(QualType objectType) const;
- PropertyAttributeKind getPropertyAttributes() const {
- return PropertyAttributeKind(PropertyAttributes);
+ ObjCPropertyAttribute::Kind getPropertyAttributes() const {
+ return ObjCPropertyAttribute::Kind(PropertyAttributes);
}
- void setPropertyAttributes(PropertyAttributeKind PRVal) {
+ void setPropertyAttributes(ObjCPropertyAttribute::Kind PRVal) {
PropertyAttributes |= PRVal;
}
@@ -862,11 +834,11 @@ public:
PropertyAttributes = PRVal;
}
- PropertyAttributeKind getPropertyAttributesAsWritten() const {
- return PropertyAttributeKind(PropertyAttributesAsWritten);
+ ObjCPropertyAttribute::Kind getPropertyAttributesAsWritten() const {
+ return ObjCPropertyAttribute::Kind(PropertyAttributesAsWritten);
}
- void setPropertyAttributesAsWritten(PropertyAttributeKind PRVal) {
+ void setPropertyAttributesAsWritten(ObjCPropertyAttribute::Kind PRVal) {
PropertyAttributesAsWritten = PRVal;
}
@@ -874,23 +846,28 @@ public:
/// isReadOnly - Return true iff the property has a setter.
bool isReadOnly() const {
- return (PropertyAttributes & OBJC_PR_readonly);
+ return (PropertyAttributes & ObjCPropertyAttribute::kind_readonly);
}
/// isAtomic - Return true if the property is atomic.
bool isAtomic() const {
- return (PropertyAttributes & OBJC_PR_atomic);
+ return (PropertyAttributes & ObjCPropertyAttribute::kind_atomic);
}
/// isRetaining - Return true if the property retains its value.
bool isRetaining() const {
- return (PropertyAttributes &
- (OBJC_PR_retain | OBJC_PR_strong | OBJC_PR_copy));
+ return (PropertyAttributes & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_strong |
+ ObjCPropertyAttribute::kind_copy));
}
bool isInstanceProperty() const { return !isClassProperty(); }
- bool isClassProperty() const { return PropertyAttributes & OBJC_PR_class; }
- bool isDirectProperty() const { return PropertyAttributes & OBJC_PR_direct; }
+ bool isClassProperty() const {
+ return PropertyAttributes & ObjCPropertyAttribute::kind_class;
+ }
+ bool isDirectProperty() const {
+ return PropertyAttributes & ObjCPropertyAttribute::kind_direct;
+ }
ObjCPropertyQueryKind getQueryKind() const {
return isClassProperty() ? ObjCPropertyQueryKind::OBJC_PR_query_class :
@@ -906,13 +883,13 @@ public:
/// the property setter. This is only valid if the property has been
/// defined to have a setter.
SetterKind getSetterKind() const {
- if (PropertyAttributes & OBJC_PR_strong)
+ if (PropertyAttributes & ObjCPropertyAttribute::kind_strong)
return getType()->isBlockPointerType() ? Copy : Retain;
- if (PropertyAttributes & OBJC_PR_retain)
+ if (PropertyAttributes & ObjCPropertyAttribute::kind_retain)
return Retain;
- if (PropertyAttributes & OBJC_PR_copy)
+ if (PropertyAttributes & ObjCPropertyAttribute::kind_copy)
return Copy;
- if (PropertyAttributes & OBJC_PR_weak)
+ if (PropertyAttributes & ObjCPropertyAttribute::kind_weak)
return Weak;
return Assign;
}
@@ -2692,9 +2669,7 @@ public:
/// Get the name of the class associated with this interface.
//
// FIXME: Move to StringRef API.
- std::string getNameAsString() const {
- return getName();
- }
+ std::string getNameAsString() const { return std::string(getName()); }
/// Produce a name to be used for class's metadata. It comes either via
/// class's objc_runtime_name attribute or class name.
@@ -2908,11 +2883,11 @@ ObjCInterfaceDecl::filtered_category_iterator<Filter>::operator++() {
}
inline bool ObjCInterfaceDecl::isVisibleCategory(ObjCCategoryDecl *Cat) {
- return !Cat->isHidden();
+ return Cat->isUnconditionallyVisible();
}
inline bool ObjCInterfaceDecl::isVisibleExtension(ObjCCategoryDecl *Cat) {
- return Cat->IsClassExtension() && !Cat->isHidden();
+ return Cat->IsClassExtension() && Cat->isUnconditionallyVisible();
}
inline bool ObjCInterfaceDecl::isKnownExtension(ObjCCategoryDecl *Cat) {
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclObjCCommon.h b/contrib/llvm-project/clang/include/clang/AST/DeclObjCCommon.h
new file mode 100644
index 000000000000..5f03bce6e9a8
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclObjCCommon.h
@@ -0,0 +1,55 @@
+//===- DeclObjCCommon.h - Classes for representing declarations -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains common ObjC enums and classes used in AST and
+// Sema.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_DECLOBJC_COMMON_H
+#define LLVM_CLANG_AST_DECLOBJC_COMMON_H
+
+namespace clang {
+
+/// ObjCPropertyAttribute::Kind - list of property attributes.
+/// Keep this list in sync with LLVM's Dwarf.h ApplePropertyAttributes.s
+namespace ObjCPropertyAttribute {
+enum Kind {
+ kind_noattr = 0x00,
+ kind_readonly = 0x01,
+ kind_getter = 0x02,
+ kind_assign = 0x04,
+ kind_readwrite = 0x08,
+ kind_retain = 0x10,
+ kind_copy = 0x20,
+ kind_nonatomic = 0x40,
+ kind_setter = 0x80,
+ kind_atomic = 0x100,
+ kind_weak = 0x200,
+ kind_strong = 0x400,
+ kind_unsafe_unretained = 0x800,
+ /// Indicates that the nullability of the type was spelled with a
+ /// property attribute rather than a type qualifier.
+ kind_nullability = 0x1000,
+ kind_null_resettable = 0x2000,
+ kind_class = 0x4000,
+ kind_direct = 0x8000,
+ // Adding a property should change NumObjCPropertyAttrsBits
+ // Also, don't forget to update the Clang C API at CXObjCPropertyAttrKind and
+ // clang_Cursor_getObjCPropertyAttributes.
+};
+} // namespace ObjCPropertyAttribute::Kind
+
+enum {
+ /// Number of bits fitting all the property attributes.
+ NumObjCPropertyAttrsBits = 16
+};
+
+} // namespace clang
+
+#endif // LLVM_CLANG_AST_DECLOBJC_COMMON_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclOpenMP.h b/contrib/llvm-project/clang/include/clang/AST/DeclOpenMP.h
index 437feaba28fb..154ecb977692 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclOpenMP.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclOpenMP.h
@@ -129,7 +129,7 @@ private:
/// the declare reduction construct is declared inside compound statement.
LazyDeclPtr PrevDeclInScope;
- virtual void anchor();
+ void anchor() override;
OMPDeclareReductionDecl(Kind DK, DeclContext *DC, SourceLocation L,
DeclarationName Name, QualType Ty,
@@ -228,7 +228,7 @@ class OMPDeclareMapperDecl final : public ValueDecl, public DeclContext {
LazyDeclPtr PrevDeclInScope;
- virtual void anchor();
+ void anchor() override;
OMPDeclareMapperDecl(Kind DK, DeclContext *DC, SourceLocation L,
DeclarationName Name, QualType Ty,
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h b/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h
index 7a9f623d8152..e9c4879b41e8 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h
@@ -1891,6 +1891,10 @@ public:
return *TemplateArgs;
}
+ void setTemplateArgs(TemplateArgumentList *Args) {
+ TemplateArgs = Args;
+ }
+
/// Determine the kind of specialization that this
/// declaration represents.
TemplateSpecializationKind getSpecializationKind() const {
@@ -1923,6 +1927,10 @@ public:
getTemplateSpecializationKind());
}
+ void setSpecializedTemplate(ClassTemplateDecl *Specialized) {
+ SpecializedTemplate = Specialized;
+ }
+
void setSpecializationKind(TemplateSpecializationKind TSK) {
SpecializationKind = TSK;
}
diff --git a/contrib/llvm-project/clang/include/clang/AST/DependenceFlags.h b/contrib/llvm-project/clang/include/clang/AST/DependenceFlags.h
new file mode 100644
index 000000000000..14a7ffaecb2b
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/AST/DependenceFlags.h
@@ -0,0 +1,284 @@
+//===--- DependenceFlags.h ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_AST_DEPENDENCEFLAGS_H
+#define LLVM_CLANG_AST_DEPENDENCEFLAGS_H
+
+#include "clang/Basic/BitmaskEnum.h"
+#include "llvm/ADT/BitmaskEnum.h"
+#include <cstdint>
+
+namespace clang {
+struct ExprDependenceScope {
+ enum ExprDependence : uint8_t {
+ UnexpandedPack = 1,
+ // This expr depends in any way on
+ // - a template parameter, it implies that the resolution of this expr may
+ // cause instantiation to fail
+ // - or an error (often in a non-template context)
+ //
+ // Note that C++ standard doesn't define the instantiation-dependent term,
+ // we follow the formal definition coming from the Itanium C++ ABI, and
+ // extend it to errors.
+ Instantiation = 2,
+ // The type of this expr depends on a template parameter, or an error.
+ Type = 4,
+ // The value of this expr depends on a template parameter, or an error.
+ Value = 8,
+
+ // clang extension: this expr contains or references an error, and is
+ // considered dependent on how that error is resolved.
+ Error = 16,
+
+ None = 0,
+ All = 31,
+
+ TypeValue = Type | Value,
+ TypeInstantiation = Type | Instantiation,
+ ValueInstantiation = Value | Instantiation,
+ TypeValueInstantiation = Type | Value | Instantiation,
+
+ LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/Error)
+ };
+};
+using ExprDependence = ExprDependenceScope::ExprDependence;
+
+struct TypeDependenceScope {
+ enum TypeDependence : uint8_t {
+ /// Whether this type contains an unexpanded parameter pack
+ /// (for C++11 variadic templates)
+ UnexpandedPack = 1,
+ /// Whether this type somehow involves
+ /// - a template parameter, even if the resolution of the type does not
+ /// depend on a template parameter.
+ /// - or an error.
+ Instantiation = 2,
+ /// Whether this type
+ /// - is a dependent type (C++ [temp.dep.type])
+ /// - or it somehow involves an error, e.g. decltype(recovery-expr)
+ Dependent = 4,
+ /// Whether this type is a variably-modified type (C99 6.7.5).
+ VariablyModified = 8,
+
+ /// Whether this type references an error, e.g. decltype(err-expression)
+ /// yields an error type.
+ Error = 16,
+
+ None = 0,
+ All = 31,
+
+ DependentInstantiation = Dependent | Instantiation,
+
+ LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/Error)
+ };
+};
+using TypeDependence = TypeDependenceScope::TypeDependence;
+
+#define LLVM_COMMON_DEPENDENCE(NAME) \
+ struct NAME##Scope { \
+ enum NAME : uint8_t { \
+ UnexpandedPack = 1, \
+ Instantiation = 2, \
+ Dependent = 4, \
+ Error = 8, \
+ \
+ None = 0, \
+ DependentInstantiation = Dependent | Instantiation, \
+ All = 15, \
+ \
+ LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/Error) \
+ }; \
+ }; \
+ using NAME = NAME##Scope::NAME;
+
+LLVM_COMMON_DEPENDENCE(NestedNameSpecifierDependence)
+LLVM_COMMON_DEPENDENCE(TemplateNameDependence)
+LLVM_COMMON_DEPENDENCE(TemplateArgumentDependence)
+#undef LLVM_COMMON_DEPENDENCE
+
+// A combined space of all dependence concepts for all node types.
+// Used when aggregating dependence of nodes of different types.
+class Dependence {
+public:
+ enum Bits : uint8_t {
+ None = 0,
+
+ // Contains a template parameter pack that wasn't expanded.
+ UnexpandedPack = 1,
+ // Depends on a template parameter or an error in some way.
+ // Validity depends on how the template is instantiated or the error is
+ // resolved.
+ Instantiation = 2,
+ // Expression type depends on template context, or an error.
+ // Value and Instantiation should also be set.
+ Type = 4,
+ // Expression value depends on template context, or an error.
+ // Instantiation should also be set.
+ Value = 8,
+ // Depends on template context, or an error.
+ // The type/value distinction is only meaningful for expressions.
+ Dependent = Type | Value,
+ // Includes an error, and depends on how it is resolved.
+ Error = 16,
+ // Type depends on a runtime value (variable-length array).
+ VariablyModified = 32,
+
+ LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/VariablyModified)
+ };
+
+ Dependence() : V(None) {}
+
+ Dependence(TypeDependence D)
+ : V(translate(D, TypeDependence::UnexpandedPack, UnexpandedPack) |
+ translate(D, TypeDependence::Instantiation, Instantiation) |
+ translate(D, TypeDependence::Dependent, Dependent) |
+ translate(D, TypeDependence::Error, Error) |
+ translate(D, TypeDependence::VariablyModified, VariablyModified)) {}
+
+ Dependence(ExprDependence D)
+ : V(translate(D, ExprDependence::UnexpandedPack, UnexpandedPack) |
+ translate(D, ExprDependence::Instantiation, Instantiation) |
+ translate(D, ExprDependence::Type, Type) |
+ translate(D, ExprDependence::Value, Value) |
+ translate(D, ExprDependence::Error, Error)) {}
+
+ Dependence(NestedNameSpecifierDependence D) :
+ V ( translate(D, NNSDependence::UnexpandedPack, UnexpandedPack) |
+ translate(D, NNSDependence::Instantiation, Instantiation) |
+ translate(D, NNSDependence::Dependent, Dependent) |
+ translate(D, NNSDependence::Error, Error)) {}
+
+ Dependence(TemplateArgumentDependence D)
+ : V(translate(D, TADependence::UnexpandedPack, UnexpandedPack) |
+ translate(D, TADependence::Instantiation, Instantiation) |
+ translate(D, TADependence::Dependent, Dependent) |
+ translate(D, TADependence::Error, Error)) {}
+
+ Dependence(TemplateNameDependence D)
+ : V(translate(D, TNDependence::UnexpandedPack, UnexpandedPack) |
+ translate(D, TNDependence::Instantiation, Instantiation) |
+ translate(D, TNDependence::Dependent, Dependent) |
+ translate(D, TNDependence::Error, Error)) {}
+
+ TypeDependence type() const {
+ return translate(V, UnexpandedPack, TypeDependence::UnexpandedPack) |
+ translate(V, Instantiation, TypeDependence::Instantiation) |
+ translate(V, Dependent, TypeDependence::Dependent) |
+ translate(V, Error, TypeDependence::Error) |
+ translate(V, VariablyModified, TypeDependence::VariablyModified);
+ }
+
+ ExprDependence expr() const {
+ return translate(V, UnexpandedPack, ExprDependence::UnexpandedPack) |
+ translate(V, Instantiation, ExprDependence::Instantiation) |
+ translate(V, Type, ExprDependence::Type) |
+ translate(V, Value, ExprDependence::Value) |
+ translate(V, Error, ExprDependence::Error);
+ }
+
+ NestedNameSpecifierDependence nestedNameSpecifier() const {
+ return translate(V, UnexpandedPack, NNSDependence::UnexpandedPack) |
+ translate(V, Instantiation, NNSDependence::Instantiation) |
+ translate(V, Dependent, NNSDependence::Dependent) |
+ translate(V, Error, NNSDependence::Error);
+ }
+
+ TemplateArgumentDependence templateArgument() const {
+ return translate(V, UnexpandedPack, TADependence::UnexpandedPack) |
+ translate(V, Instantiation, TADependence::Instantiation) |
+ translate(V, Dependent, TADependence::Dependent) |
+ translate(V, Error, TADependence::Error);
+ }
+
+ TemplateNameDependence templateName() const {
+ return translate(V, UnexpandedPack, TNDependence::UnexpandedPack) |
+ translate(V, Instantiation, TNDependence::Instantiation) |
+ translate(V, Dependent, TNDependence::Dependent) |
+ translate(V, Error, TNDependence::Error);
+ }
+
+private:
+ Bits V;
+
+ template <typename T, typename U>
+ static U translate(T Bits, T FromBit, U ToBit) {
+ return (Bits & FromBit) ? ToBit : static_cast<U>(0);
+ }
+
+ // Abbreviations to make conversions more readable.
+ using NNSDependence = NestedNameSpecifierDependence;
+ using TADependence = TemplateArgumentDependence;
+ using TNDependence = TemplateNameDependence;
+};
+
+/// Computes dependencies of a reference with the name having template arguments
+/// with \p TA dependencies.
+inline ExprDependence toExprDependence(TemplateArgumentDependence TA) {
+ return Dependence(TA).expr();
+}
+inline ExprDependence toExprDependence(TypeDependence D) {
+ return Dependence(D).expr();
+}
+// Note: it's often necessary to strip `Dependent` from qualifiers.
+// If V<T>:: refers to the current instantiation, NNS is considered dependent
+// but the containing V<T>::foo likely isn't.
+inline ExprDependence toExprDependence(NestedNameSpecifierDependence D) {
+ return Dependence(D).expr();
+}
+inline ExprDependence turnTypeToValueDependence(ExprDependence D) {
+ // Type-dependent expressions are always be value-dependent, so we simply drop
+ // type dependency.
+ return D & ~ExprDependence::Type;
+}
+inline ExprDependence turnValueToTypeDependence(ExprDependence D) {
+ // Type-dependent expressions are always be value-dependent.
+ if (D & ExprDependence::Value)
+ D |= ExprDependence::Type;
+ return D;
+}
+
+// Returned type-dependence will never have VariablyModified set.
+inline TypeDependence toTypeDependence(ExprDependence D) {
+ return Dependence(D).type();
+}
+inline TypeDependence toTypeDependence(NestedNameSpecifierDependence D) {
+ return Dependence(D).type();
+}
+inline TypeDependence toTypeDependence(TemplateNameDependence D) {
+ return Dependence(D).type();
+}
+inline TypeDependence toTypeDependence(TemplateArgumentDependence D) {
+ return Dependence(D).type();
+}
+
+inline NestedNameSpecifierDependence
+toNestedNameSpecifierDependendence(TypeDependence D) {
+ return Dependence(D).nestedNameSpecifier();
+}
+
+inline TemplateArgumentDependence
+toTemplateArgumentDependence(TypeDependence D) {
+ return Dependence(D).templateArgument();
+}
+inline TemplateArgumentDependence
+toTemplateArgumentDependence(TemplateNameDependence D) {
+ return Dependence(D).templateArgument();
+}
+inline TemplateArgumentDependence
+toTemplateArgumentDependence(ExprDependence D) {
+ return Dependence(D).templateArgument();
+}
+
+inline TemplateNameDependence
+toTemplateNameDependence(NestedNameSpecifierDependence D) {
+ return Dependence(D).templateName();
+}
+
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
+
+} // namespace clang
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/AST/Expr.h b/contrib/llvm-project/clang/include/clang/AST/Expr.h
index 7ff53ef7c653..c13b97119285 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Expr.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Expr.h
@@ -15,8 +15,10 @@
#include "clang/AST/APValue.h"
#include "clang/AST/ASTVector.h"
+#include "clang/AST/ComputeDependence.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclAccessPair.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/TemplateBase.h"
@@ -28,10 +30,10 @@
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APSInt.h"
-#include "llvm/ADT/iterator.h"
-#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/TrailingObjects.h"
@@ -116,23 +118,26 @@ public:
Expr &operator=(Expr&&) = delete;
protected:
- Expr(StmtClass SC, QualType T, ExprValueKind VK, ExprObjectKind OK,
- bool TD, bool VD, bool ID, bool ContainsUnexpandedParameterPack)
- : ValueStmt(SC)
- {
- ExprBits.TypeDependent = TD;
- ExprBits.ValueDependent = VD;
- ExprBits.InstantiationDependent = ID;
+ Expr(StmtClass SC, QualType T, ExprValueKind VK, ExprObjectKind OK)
+ : ValueStmt(SC) {
+ ExprBits.Dependent = 0;
ExprBits.ValueKind = VK;
ExprBits.ObjectKind = OK;
assert(ExprBits.ObjectKind == OK && "truncated kind");
- ExprBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
setType(T);
}
/// Construct an empty expression.
explicit Expr(StmtClass SC, EmptyShell) : ValueStmt(SC) { }
+ /// Each concrete expr subclass is expected to compute its dependence and call
+ /// this in the constructor.
+ void setDependence(ExprDependence Deps) {
+ ExprBits.Dependent = static_cast<unsigned>(Deps);
+ }
+ friend class ASTImporter; // Sets dependence dircetly.
+ friend class ASTStmtReader; // Sets dependence dircetly.
+
public:
QualType getType() const { return TR; }
void setType(QualType t) {
@@ -148,24 +153,29 @@ public:
TR = t;
}
- /// isValueDependent - Determines whether this expression is
- /// value-dependent (C++ [temp.dep.constexpr]). For example, the
- /// array bound of "Chars" in the following example is
+ ExprDependence getDependence() const {
+ return static_cast<ExprDependence>(ExprBits.Dependent);
+ }
+
+ /// Determines whether the value of this expression depends on
+ /// - a template parameter (C++ [temp.dep.constexpr])
+ /// - or an error, whose resolution is unknown
+ ///
+ /// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// @code
/// template<int Size, char (&Chars)[Size]> struct meta_string;
/// @endcode
- bool isValueDependent() const { return ExprBits.ValueDependent; }
-
- /// Set whether this expression is value-dependent or not.
- void setValueDependent(bool VD) {
- ExprBits.ValueDependent = VD;
+ bool isValueDependent() const {
+ return static_cast<bool>(getDependence() & ExprDependence::Value);
}
- /// isTypeDependent - Determines whether this expression is
- /// type-dependent (C++ [temp.dep.expr]), which means that its type
- /// could change from one template instantiation to the next. For
- /// example, the expressions "x" and "x + y" are type-dependent in
+ /// Determines whether the type of this expression depends on
+ /// - a template paramter (C++ [temp.dep.expr], which means that its type
+ /// could change from one template instantiation to the next)
+ /// - or an error
+ ///
+ /// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// @code
/// template<typename T>
@@ -173,16 +183,15 @@ public:
/// x + y;
/// }
/// @endcode
- bool isTypeDependent() const { return ExprBits.TypeDependent; }
-
- /// Set whether this expression is type-dependent or not.
- void setTypeDependent(bool TD) {
- ExprBits.TypeDependent = TD;
+ bool isTypeDependent() const {
+ return static_cast<bool>(getDependence() & ExprDependence::Type);
}
/// Whether this expression is instantiation-dependent, meaning that
- /// it depends in some way on a template parameter, even if neither its type
- /// nor (constant) value can change due to the template instantiation.
+ /// it depends in some way on
+ /// - a template parameter (even if neither its type nor (constant) value
+ /// can change due to the template instantiation)
+ /// - or an error
///
/// In the following example, the expression \c sizeof(sizeof(T() + T())) is
/// instantiation-dependent (since it involves a template parameter \c T), but
@@ -197,13 +206,14 @@ public:
/// }
/// \endcode
///
+ /// \code
+ /// void func(int) {
+ /// func(); // the expression is instantiation-dependent, because it depends
+ /// // on an error.
+ /// }
+ /// \endcode
bool isInstantiationDependent() const {
- return ExprBits.InstantiationDependent;
- }
-
- /// Set whether this expression is instantiation-dependent or not.
- void setInstantiationDependent(bool ID) {
- ExprBits.InstantiationDependent = ID;
+ return static_cast<bool>(getDependence() & ExprDependence::Instantiation);
}
/// Whether this expression contains an unexpanded parameter
@@ -221,19 +231,24 @@ public:
/// The expressions \c args and \c static_cast<Types&&>(args) both
/// contain parameter packs.
bool containsUnexpandedParameterPack() const {
- return ExprBits.ContainsUnexpandedParameterPack;
+ return static_cast<bool>(getDependence() & ExprDependence::UnexpandedPack);
}
- /// Set the bit that describes whether this expression
- /// contains an unexpanded parameter pack.
- void setContainsUnexpandedParameterPack(bool PP = true) {
- ExprBits.ContainsUnexpandedParameterPack = PP;
+ /// Whether this expression contains subexpressions which had errors, e.g. a
+ /// TypoExpr.
+ bool containsErrors() const {
+ return static_cast<bool>(getDependence() & ExprDependence::Error);
}
/// getExprLoc - Return the preferred location for the arrow when diagnosing
/// a problem with a generic expression.
SourceLocation getExprLoc() const LLVM_READONLY;
+ /// Determine whether an lvalue-to-rvalue conversion should implicitly be
+ /// applied to this expression if it appears as a discarded-value expression
+ /// in C++11 onwards. This applies to certain forms of volatile glvalues.
+ bool isReadIfDiscardedInCPlusPlus11() const;
+
/// isUnusedResultAWarning - Return true if this immediate expression should
/// be warned about if the result is unused. If so, fill in expr, location,
/// and ranges with expr to warn on and source locations/ranges appropriate
@@ -473,6 +488,11 @@ public:
/// Returns whether this expression refers to a vector element.
bool refersToVectorElement() const;
+ /// Returns whether this expression refers to a matrix element.
+ bool refersToMatrixElement() const {
+ return getObjectKind() == OK_MatrixComponent;
+ }
+
/// Returns whether this expression refers to a global register
/// variable.
bool refersToGlobalRegisterVar() const;
@@ -693,7 +713,8 @@ public:
/// Evaluate an expression that is required to be a constant expression.
bool EvaluateAsConstantExpr(EvalResult &Result, ConstExprUsage Usage,
- const ASTContext &Ctx) const;
+ const ASTContext &Ctx,
+ bool InPlace = false) const;
/// If the current Expr is a pointer, this will try to statically
/// determine the number of bytes available where the pointer is pointing.
@@ -952,11 +973,11 @@ protected:
Stmt *SubExpr;
FullExpr(StmtClass SC, Expr *subexpr)
- : Expr(SC, subexpr->getType(),
- subexpr->getValueKind(), subexpr->getObjectKind(),
- subexpr->isTypeDependent(), subexpr->isValueDependent(),
- subexpr->isInstantiationDependent(),
- subexpr->containsUnexpandedParameterPack()), SubExpr(subexpr) {}
+ : Expr(SC, subexpr->getType(), subexpr->getValueKind(),
+ subexpr->getObjectKind()),
+ SubExpr(subexpr) {
+ setDependence(computeDependence(this));
+ }
FullExpr(StmtClass SC, EmptyShell Empty)
: Expr(SC, Empty) {}
public:
@@ -979,11 +1000,14 @@ class ConstantExpr final
: public FullExpr,
private llvm::TrailingObjects<ConstantExpr, APValue, uint64_t> {
static_assert(std::is_same<uint64_t, llvm::APInt::WordType>::value,
- "this class assumes llvm::APInt::WordType is uint64_t for "
- "trail-allocated storage");
+ "ConstantExpr assumes that llvm::APInt::WordType is uint64_t "
+ "for tail-allocated storage");
+ friend TrailingObjects;
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
public:
- /// Describes the kind of result that can be trail-allocated.
+ /// Describes the kind of result that can be tail-allocated.
enum ResultStorageKind { RSK_None, RSK_Int64, RSK_APValue };
private:
@@ -994,7 +1018,6 @@ private:
return ConstantExprBits.ResultKind == ConstantExpr::RSK_Int64;
}
- void DefaultInit(ResultStorageKind StorageKind);
uint64_t &Int64Result() {
assert(ConstantExprBits.ResultKind == ConstantExpr::RSK_Int64 &&
"invalid accessor");
@@ -1008,24 +1031,22 @@ private:
"invalid accessor");
return *getTrailingObjects<APValue>();
}
- const APValue &APValueResult() const {
+ APValue &APValueResult() const {
return const_cast<ConstantExpr *>(this)->APValueResult();
}
- ConstantExpr(Expr *subexpr, ResultStorageKind StorageKind);
- ConstantExpr(ResultStorageKind StorageKind, EmptyShell Empty);
+ ConstantExpr(Expr *SubExpr, ResultStorageKind StorageKind,
+ bool IsImmediateInvocation);
+ ConstantExpr(EmptyShell Empty, ResultStorageKind StorageKind);
public:
- friend TrailingObjects;
- friend class ASTStmtReader;
- friend class ASTStmtWriter;
static ConstantExpr *Create(const ASTContext &Context, Expr *E,
const APValue &Result);
static ConstantExpr *Create(const ASTContext &Context, Expr *E,
- ResultStorageKind Storage = RSK_None);
+ ResultStorageKind Storage = RSK_None,
+ bool IsImmediateInvocation = false);
static ConstantExpr *CreateEmpty(const ASTContext &Context,
- ResultStorageKind StorageKind,
- EmptyShell Empty);
+ ResultStorageKind StorageKind);
static ResultStorageKind getStorageKind(const APValue &Value);
static ResultStorageKind getStorageKind(const Type *T,
@@ -1053,8 +1074,14 @@ public:
ResultStorageKind getResultStorageKind() const {
return static_cast<ResultStorageKind>(ConstantExprBits.ResultKind);
}
+ bool isImmediateInvocation() const {
+ return ConstantExprBits.IsImmediateInvocation;
+ }
+ bool hasAPValueResult() const {
+ return ConstantExprBits.APValueKind != APValue::None;
+ }
APValue getAPValueResult() const;
- const APValue &getResultAsAPValue() const { return APValueResult(); }
+ APValue &getResultAsAPValue() const { return APValueResult(); }
llvm::APSInt getResultAsAPSInt() const;
// Iterators
child_range children() { return child_range(&SubExpr, &SubExpr+1); }
@@ -1078,19 +1105,11 @@ class OpaqueValueExpr : public Expr {
public:
OpaqueValueExpr(SourceLocation Loc, QualType T, ExprValueKind VK,
- ExprObjectKind OK = OK_Ordinary,
- Expr *SourceExpr = nullptr)
- : Expr(OpaqueValueExprClass, T, VK, OK,
- T->isDependentType() ||
- (SourceExpr && SourceExpr->isTypeDependent()),
- T->isDependentType() ||
- (SourceExpr && SourceExpr->isValueDependent()),
- T->isInstantiationDependentType() ||
- (SourceExpr && SourceExpr->isInstantiationDependent()),
- false),
- SourceExpr(SourceExpr) {
+ ExprObjectKind OK = OK_Ordinary, Expr *SourceExpr = nullptr)
+ : Expr(OpaqueValueExprClass, T, VK, OK), SourceExpr(SourceExpr) {
setIsUnique(false);
OpaqueValueExprBits.Loc = Loc;
+ setDependence(computeDependence(this));
}
/// Given an expression which invokes a copy constructor --- i.e. a
@@ -1210,10 +1229,6 @@ class DeclRefExpr final
/// Construct an empty declaration reference expression.
explicit DeclRefExpr(EmptyShell Empty) : Expr(DeclRefExprClass, Empty) {}
- /// Computes the type- and value-dependence flags for this
- /// declaration reference expression.
- void computeDependence(const ASTContext &Ctx);
-
public:
DeclRefExpr(const ASTContext &Ctx, ValueDecl *D,
bool RefersToEnclosingVariableOrCapture, QualType T,
@@ -1490,7 +1505,7 @@ class FixedPointLiteral : public Expr, public APIntStorage {
SourceLocation Loc;
unsigned Scale;
- /// \brief Construct an empty integer literal.
+ /// \brief Construct an empty fixed-point literal.
explicit FixedPointLiteral(EmptyShell Empty)
: Expr(FixedPointLiteralClass, Empty) {}
@@ -1504,6 +1519,9 @@ class FixedPointLiteral : public Expr, public APIntStorage {
QualType type, SourceLocation l,
unsigned Scale);
+ /// Returns an empty fixed-point literal.
+ static FixedPointLiteral *Create(const ASTContext &C, EmptyShell Empty);
+
SourceLocation getBeginLoc() const LLVM_READONLY { return Loc; }
SourceLocation getEndLoc() const LLVM_READONLY { return Loc; }
@@ -1512,6 +1530,9 @@ class FixedPointLiteral : public Expr, public APIntStorage {
void setLocation(SourceLocation Location) { Loc = Location; }
+ unsigned getScale() const { return Scale; }
+ void setScale(unsigned S) { Scale = S; }
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == FixedPointLiteralClass;
}
@@ -1544,10 +1565,10 @@ public:
// type should be IntTy
CharacterLiteral(unsigned value, CharacterKind kind, QualType type,
SourceLocation l)
- : Expr(CharacterLiteralClass, type, VK_RValue, OK_Ordinary, false, false,
- false, false),
- Value(value), Loc(l) {
+ : Expr(CharacterLiteralClass, type, VK_RValue, OK_Ordinary), Value(value),
+ Loc(l) {
CharacterLiteralBits.Kind = kind;
+ setDependence(ExprDependence::None);
}
/// Construct an empty character literal.
@@ -1663,9 +1684,9 @@ class ImaginaryLiteral : public Expr {
Stmt *Val;
public:
ImaginaryLiteral(Expr *val, QualType Ty)
- : Expr(ImaginaryLiteralClass, Ty, VK_RValue, OK_Ordinary, false, false,
- false, false),
- Val(val) {}
+ : Expr(ImaginaryLiteralClass, Ty, VK_RValue, OK_Ordinary), Val(val) {
+ setDependence(ExprDependence::None);
+ }
/// Build an empty imaginary literal.
explicit ImaginaryLiteral(EmptyShell Empty)
@@ -1902,13 +1923,17 @@ public:
/// [C99 6.4.2.2] - A predefined identifier such as __func__.
class PredefinedExpr final
: public Expr,
- private llvm::TrailingObjects<PredefinedExpr, Stmt *> {
+ private llvm::TrailingObjects<PredefinedExpr, Stmt *, Expr *,
+ TypeSourceInfo *> {
friend class ASTStmtReader;
friend TrailingObjects;
// PredefinedExpr is optionally followed by a single trailing
// "Stmt *" for the predefined identifier. It is present if and only if
// hasFunctionName() is true and is always a "StringLiteral *".
+ // It can also be followed by a Expr* in the case of a
+ // __builtin_unique_stable_name with an expression, or TypeSourceInfo * if
+ // __builtin_unique_stable_name with a type.
public:
enum IdentKind {
@@ -1921,12 +1946,18 @@ public:
PrettyFunction,
/// The same as PrettyFunction, except that the
/// 'virtual' keyword is omitted for virtual member functions.
- PrettyFunctionNoVirtual
+ PrettyFunctionNoVirtual,
+ UniqueStableNameType,
+ UniqueStableNameExpr,
};
private:
PredefinedExpr(SourceLocation L, QualType FNTy, IdentKind IK,
StringLiteral *SL);
+ PredefinedExpr(SourceLocation L, QualType FNTy, IdentKind IK,
+ TypeSourceInfo *Info);
+ PredefinedExpr(SourceLocation L, QualType FNTy, IdentKind IK,
+ Expr *E);
explicit PredefinedExpr(EmptyShell Empty, bool HasFunctionName);
@@ -1939,10 +1970,39 @@ private:
*getTrailingObjects<Stmt *>() = SL;
}
+ void setTypeSourceInfo(TypeSourceInfo *Info) {
+ assert(!hasFunctionName() && getIdentKind() == UniqueStableNameType &&
+ "TypeSourceInfo only valid for UniqueStableName of a Type");
+ *getTrailingObjects<TypeSourceInfo *>() = Info;
+ }
+
+ void setExpr(Expr *E) {
+ assert(!hasFunctionName() && getIdentKind() == UniqueStableNameExpr &&
+ "TypeSourceInfo only valid for UniqueStableName of n Expression.");
+ *getTrailingObjects<Expr *>() = E;
+ }
+
+ size_t numTrailingObjects(OverloadToken<Stmt *>) const {
+ return hasFunctionName();
+ }
+
+ size_t numTrailingObjects(OverloadToken<TypeSourceInfo *>) const {
+ return getIdentKind() == UniqueStableNameType && !hasFunctionName();
+ }
+ size_t numTrailingObjects(OverloadToken<Expr *>) const {
+ return getIdentKind() == UniqueStableNameExpr && !hasFunctionName();
+ }
+
public:
/// Create a PredefinedExpr.
static PredefinedExpr *Create(const ASTContext &Ctx, SourceLocation L,
QualType FNTy, IdentKind IK, StringLiteral *SL);
+ static PredefinedExpr *Create(const ASTContext &Ctx, SourceLocation L,
+ QualType FNTy, IdentKind IK, StringLiteral *SL,
+ TypeSourceInfo *Info);
+ static PredefinedExpr *Create(const ASTContext &Ctx, SourceLocation L,
+ QualType FNTy, IdentKind IK, StringLiteral *SL,
+ Expr *E);
/// Create an empty PredefinedExpr.
static PredefinedExpr *CreateEmpty(const ASTContext &Ctx,
@@ -1967,8 +2027,34 @@ public:
: nullptr;
}
+ TypeSourceInfo *getTypeSourceInfo() {
+ assert(!hasFunctionName() && getIdentKind() == UniqueStableNameType &&
+ "TypeSourceInfo only valid for UniqueStableName of a Type");
+ return *getTrailingObjects<TypeSourceInfo *>();
+ }
+
+ const TypeSourceInfo *getTypeSourceInfo() const {
+ assert(!hasFunctionName() && getIdentKind() == UniqueStableNameType &&
+ "TypeSourceInfo only valid for UniqueStableName of a Type");
+ return *getTrailingObjects<TypeSourceInfo *>();
+ }
+
+ Expr *getExpr() {
+ assert(!hasFunctionName() && getIdentKind() == UniqueStableNameExpr &&
+ "TypeSourceInfo only valid for UniqueStableName of n Expression.");
+ return *getTrailingObjects<Expr *>();
+ }
+
+ const Expr *getExpr() const {
+ assert(!hasFunctionName() && getIdentKind() == UniqueStableNameExpr &&
+ "TypeSourceInfo only valid for UniqueStableName of n Expression.");
+ return *getTrailingObjects<Expr *>();
+ }
+
static StringRef getIdentKindName(IdentKind IK);
static std::string ComputeName(IdentKind IK, const Decl *CurrentDecl);
+ static std::string ComputeName(ASTContext &Context, IdentKind IK,
+ const QualType Ty);
SourceLocation getBeginLoc() const { return getLocation(); }
SourceLocation getEndLoc() const { return getLocation(); }
@@ -1996,12 +2082,11 @@ class ParenExpr : public Expr {
Stmt *Val;
public:
ParenExpr(SourceLocation l, SourceLocation r, Expr *val)
- : Expr(ParenExprClass, val->getType(),
- val->getValueKind(), val->getObjectKind(),
- val->isTypeDependent(), val->isValueDependent(),
- val->isInstantiationDependent(),
- val->containsUnexpandedParameterPack()),
- L(l), R(r), Val(val) {}
+ : Expr(ParenExprClass, val->getType(), val->getValueKind(),
+ val->getObjectKind()),
+ L(l), R(r), Val(val) {
+ setDependence(computeDependence(this));
+ }
/// Construct an empty parenthesized expression.
explicit ParenExpr(EmptyShell Empty)
@@ -2043,31 +2128,48 @@ public:
/// applied to a non-complex value, the former returns its operand and the
/// later returns zero in the type of the operand.
///
-class UnaryOperator : public Expr {
+class UnaryOperator final
+ : public Expr,
+ private llvm::TrailingObjects<UnaryOperator, FPOptionsOverride> {
Stmt *Val;
+ size_t numTrailingObjects(OverloadToken<FPOptionsOverride>) const {
+ return UnaryOperatorBits.HasFPFeatures ? 1 : 0;
+ }
+
+ FPOptionsOverride &getTrailingFPFeatures() {
+ assert(UnaryOperatorBits.HasFPFeatures);
+ return *getTrailingObjects<FPOptionsOverride>();
+ }
+
+ const FPOptionsOverride &getTrailingFPFeatures() const {
+ assert(UnaryOperatorBits.HasFPFeatures);
+ return *getTrailingObjects<FPOptionsOverride>();
+ }
+
public:
typedef UnaryOperatorKind Opcode;
- UnaryOperator(Expr *input, Opcode opc, QualType type, ExprValueKind VK,
- ExprObjectKind OK, SourceLocation l, bool CanOverflow)
- : Expr(UnaryOperatorClass, type, VK, OK,
- input->isTypeDependent() || type->isDependentType(),
- input->isValueDependent(),
- (input->isInstantiationDependent() ||
- type->isInstantiationDependentType()),
- input->containsUnexpandedParameterPack()),
- Val(input) {
- UnaryOperatorBits.Opc = opc;
- UnaryOperatorBits.CanOverflow = CanOverflow;
- UnaryOperatorBits.Loc = l;
- }
+protected:
+ UnaryOperator(const ASTContext &Ctx, Expr *input, Opcode opc, QualType type,
+ ExprValueKind VK, ExprObjectKind OK, SourceLocation l,
+ bool CanOverflow, FPOptionsOverride FPFeatures);
/// Build an empty unary operator.
- explicit UnaryOperator(EmptyShell Empty) : Expr(UnaryOperatorClass, Empty) {
+ explicit UnaryOperator(bool HasFPFeatures, EmptyShell Empty)
+ : Expr(UnaryOperatorClass, Empty) {
UnaryOperatorBits.Opc = UO_AddrOf;
+ UnaryOperatorBits.HasFPFeatures = HasFPFeatures;
}
+public:
+ static UnaryOperator *CreateEmpty(const ASTContext &C, bool hasFPFeatures);
+
+ static UnaryOperator *Create(const ASTContext &C, Expr *input, Opcode opc,
+ QualType type, ExprValueKind VK,
+ ExprObjectKind OK, SourceLocation l,
+ bool CanOverflow, FPOptionsOverride FPFeatures);
+
Opcode getOpcode() const {
return static_cast<Opcode>(UnaryOperatorBits.Opc);
}
@@ -2089,6 +2191,18 @@ public:
bool canOverflow() const { return UnaryOperatorBits.CanOverflow; }
void setCanOverflow(bool C) { UnaryOperatorBits.CanOverflow = C; }
+ // Get the FP contractability status of this operator. Only meaningful for
+ // operations on floating point types.
+ bool isFPContractableWithinStatement(const LangOptions &LO) const {
+ return getFPFeaturesInEffect(LO).allowFPContractWithinStatement();
+ }
+
+ // Get the FENV_ACCESS status of this operator. Only meaningful for
+ // operations on floating point types.
+ bool isFEnvAccessOn(const LangOptions &LO) const {
+ return getFPFeaturesInEffect(LO).getAllowFEnvAccess();
+ }
+
/// isPostfix - Return true if this is a postfix operation, like x++.
static bool isPostfix(Opcode Op) {
return Op == UO_PostInc || Op == UO_PostDec;
@@ -2155,6 +2269,37 @@ public:
const_child_range children() const {
return const_child_range(&Val, &Val + 1);
}
+
+ /// Is FPFeatures in Trailing Storage?
+ bool hasStoredFPFeatures() const { return UnaryOperatorBits.HasFPFeatures; }
+
+protected:
+ /// Get FPFeatures from trailing storage
+ FPOptionsOverride getStoredFPFeatures() const {
+ return getTrailingFPFeatures();
+ }
+
+ /// Set FPFeatures in trailing storage, used only by Serialization
+ void setStoredFPFeatures(FPOptionsOverride F) { getTrailingFPFeatures() = F; }
+
+public:
+ // Get the FP features status of this operator. Only meaningful for
+ // operations on floating point types.
+ FPOptions getFPFeaturesInEffect(const LangOptions &LO) const {
+ if (UnaryOperatorBits.HasFPFeatures)
+ return getStoredFPFeatures().applyOverrides(LO);
+ return FPOptions::defaultWithoutTrailingStorage(LO);
+ }
+ FPOptionsOverride getFPOptionsOverride() const {
+ if (UnaryOperatorBits.HasFPFeatures)
+ return getStoredFPFeatures();
+ return FPOptionsOverride();
+ }
+
+ friend TrailingObjects;
+ friend class ASTReader;
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
};
/// Helper class for OffsetOfExpr.
@@ -2379,17 +2524,17 @@ class UnaryExprOrTypeTraitExpr : public Expr {
public:
UnaryExprOrTypeTraitExpr(UnaryExprOrTypeTrait ExprKind, TypeSourceInfo *TInfo,
QualType resultType, SourceLocation op,
- SourceLocation rp) :
- Expr(UnaryExprOrTypeTraitExprClass, resultType, VK_RValue, OK_Ordinary,
- false, // Never type-dependent (C++ [temp.dep.expr]p3).
- // Value-dependent if the argument is type-dependent.
- TInfo->getType()->isDependentType(),
- TInfo->getType()->isInstantiationDependentType(),
- TInfo->getType()->containsUnexpandedParameterPack()),
- OpLoc(op), RParenLoc(rp) {
+ SourceLocation rp)
+ : Expr(UnaryExprOrTypeTraitExprClass, resultType, VK_RValue, OK_Ordinary),
+ OpLoc(op), RParenLoc(rp) {
+ assert(ExprKind <= UETT_Last && "invalid enum value!");
UnaryExprOrTypeTraitExprBits.Kind = ExprKind;
+ assert(static_cast<unsigned>(ExprKind) ==
+ UnaryExprOrTypeTraitExprBits.Kind &&
+ "UnaryExprOrTypeTraitExprBits.Kind overflow!");
UnaryExprOrTypeTraitExprBits.IsType = true;
Argument.Ty = TInfo;
+ setDependence(computeDependence(this));
}
UnaryExprOrTypeTraitExpr(UnaryExprOrTypeTrait ExprKind, Expr *E,
@@ -2403,7 +2548,12 @@ public:
UnaryExprOrTypeTrait getKind() const {
return static_cast<UnaryExprOrTypeTrait>(UnaryExprOrTypeTraitExprBits.Kind);
}
- void setKind(UnaryExprOrTypeTrait K) { UnaryExprOrTypeTraitExprBits.Kind = K;}
+ void setKind(UnaryExprOrTypeTrait K) {
+ assert(K <= UETT_Last && "invalid enum value!");
+ UnaryExprOrTypeTraitExprBits.Kind = K;
+ assert(static_cast<unsigned>(K) == UnaryExprOrTypeTraitExprBits.Kind &&
+ "UnaryExprOrTypeTraitExprBits.Kind overflow!");
+ }
bool isArgumentType() const { return UnaryExprOrTypeTraitExprBits.IsType; }
QualType getArgumentType() const {
@@ -2466,19 +2616,13 @@ class ArraySubscriptExpr : public Expr {
bool lhsIsBase() const { return getRHS()->getType()->isIntegerType(); }
public:
- ArraySubscriptExpr(Expr *lhs, Expr *rhs, QualType t,
- ExprValueKind VK, ExprObjectKind OK,
- SourceLocation rbracketloc)
- : Expr(ArraySubscriptExprClass, t, VK, OK,
- lhs->isTypeDependent() || rhs->isTypeDependent(),
- lhs->isValueDependent() || rhs->isValueDependent(),
- (lhs->isInstantiationDependent() ||
- rhs->isInstantiationDependent()),
- (lhs->containsUnexpandedParameterPack() ||
- rhs->containsUnexpandedParameterPack())) {
+ ArraySubscriptExpr(Expr *lhs, Expr *rhs, QualType t, ExprValueKind VK,
+ ExprObjectKind OK, SourceLocation rbracketloc)
+ : Expr(ArraySubscriptExprClass, t, VK, OK) {
SubExprs[LHS] = lhs;
SubExprs[RHS] = rhs;
- ArraySubscriptExprBits.RBracketLoc = rbracketloc;
+ ArrayOrMatrixSubscriptExprBits.RBracketLoc = rbracketloc;
+ setDependence(computeDependence(this));
}
/// Create an empty array subscript expression.
@@ -2514,10 +2658,10 @@ public:
SourceLocation getEndLoc() const { return getRBracketLoc(); }
SourceLocation getRBracketLoc() const {
- return ArraySubscriptExprBits.RBracketLoc;
+ return ArrayOrMatrixSubscriptExprBits.RBracketLoc;
}
void setRBracketLoc(SourceLocation L) {
- ArraySubscriptExprBits.RBracketLoc = L;
+ ArrayOrMatrixSubscriptExprBits.RBracketLoc = L;
}
SourceLocation getExprLoc() const LLVM_READONLY {
@@ -2537,6 +2681,84 @@ public:
}
};
+/// MatrixSubscriptExpr - Matrix subscript expression for the MatrixType
+/// extension.
+/// MatrixSubscriptExpr can be either incomplete (only Base and RowIdx are set
+/// so far, the type is IncompleteMatrixIdx) or complete (Base, RowIdx and
+/// ColumnIdx refer to valid expressions). Incomplete matrix expressions only
+/// exist during the initial construction of the AST.
+class MatrixSubscriptExpr : public Expr {
+ enum { BASE, ROW_IDX, COLUMN_IDX, END_EXPR };
+ Stmt *SubExprs[END_EXPR];
+
+public:
+ MatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, QualType T,
+ SourceLocation RBracketLoc)
+ : Expr(MatrixSubscriptExprClass, T, Base->getValueKind(),
+ OK_MatrixComponent) {
+ SubExprs[BASE] = Base;
+ SubExprs[ROW_IDX] = RowIdx;
+ SubExprs[COLUMN_IDX] = ColumnIdx;
+ ArrayOrMatrixSubscriptExprBits.RBracketLoc = RBracketLoc;
+ setDependence(computeDependence(this));
+ }
+
+ /// Create an empty matrix subscript expression.
+ explicit MatrixSubscriptExpr(EmptyShell Shell)
+ : Expr(MatrixSubscriptExprClass, Shell) {}
+
+ bool isIncomplete() const {
+ bool IsIncomplete = hasPlaceholderType(BuiltinType::IncompleteMatrixIdx);
+ assert((SubExprs[COLUMN_IDX] || IsIncomplete) &&
+ "expressions without column index must be marked as incomplete");
+ return IsIncomplete;
+ }
+ Expr *getBase() { return cast<Expr>(SubExprs[BASE]); }
+ const Expr *getBase() const { return cast<Expr>(SubExprs[BASE]); }
+ void setBase(Expr *E) { SubExprs[BASE] = E; }
+
+ Expr *getRowIdx() { return cast<Expr>(SubExprs[ROW_IDX]); }
+ const Expr *getRowIdx() const { return cast<Expr>(SubExprs[ROW_IDX]); }
+ void setRowIdx(Expr *E) { SubExprs[ROW_IDX] = E; }
+
+ Expr *getColumnIdx() { return cast_or_null<Expr>(SubExprs[COLUMN_IDX]); }
+ const Expr *getColumnIdx() const {
+ assert(!isIncomplete() &&
+ "cannot get the column index of an incomplete expression");
+ return cast<Expr>(SubExprs[COLUMN_IDX]);
+ }
+ void setColumnIdx(Expr *E) { SubExprs[COLUMN_IDX] = E; }
+
+ SourceLocation getBeginLoc() const LLVM_READONLY {
+ return getBase()->getBeginLoc();
+ }
+
+ SourceLocation getEndLoc() const { return getRBracketLoc(); }
+
+ SourceLocation getExprLoc() const LLVM_READONLY {
+ return getBase()->getExprLoc();
+ }
+
+ SourceLocation getRBracketLoc() const {
+ return ArrayOrMatrixSubscriptExprBits.RBracketLoc;
+ }
+ void setRBracketLoc(SourceLocation L) {
+ ArrayOrMatrixSubscriptExprBits.RBracketLoc = L;
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == MatrixSubscriptExprClass;
+ }
+
+ // Iterators
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
+ }
+ const_child_range children() const {
+ return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
+ }
+};
+
/// CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
/// CallExpr itself represents a normal function call, e.g., "f(x, 2)",
/// while its subclasses may represent alternative syntax that (semantically)
@@ -2553,8 +2775,6 @@ class CallExpr : public Expr {
/// the derived classes of CallExpr.
SourceLocation RParenLoc;
- void updateDependenciesFromArg(Expr *Arg);
-
// CallExpr store some data in trailing objects. However since CallExpr
// is used a base of other expression classes we cannot use
// llvm::TrailingObjects. Instead we manually perform the pointer arithmetic
@@ -2796,6 +3016,12 @@ public:
/// a non-value-dependent constant parameter evaluating as false.
bool isBuiltinAssumeFalse(const ASTContext &Ctx) const;
+ /// Used by Sema to implement MSVC-compatible delayed name lookup.
+ /// (Usually Exprs themselves should set dependence).
+ void markDependentForPostponedNameLookup() {
+ setDependence(getDependence() | ExprDependence::TypeValueInstantiation);
+ }
+
bool isCallToStdMove() const {
const FunctionDecl *FD = getDirectCallee();
return getNumArgs() == 1 && FD && FD->isInStdNamespace() &&
@@ -3088,13 +3314,10 @@ class CompoundLiteralExpr : public Expr {
public:
CompoundLiteralExpr(SourceLocation lparenloc, TypeSourceInfo *tinfo,
QualType T, ExprValueKind VK, Expr *init, bool fileScope)
- : Expr(CompoundLiteralExprClass, T, VK, OK_Ordinary,
- tinfo->getType()->isDependentType(),
- init->isValueDependent(),
- (init->isInstantiationDependent() ||
- tinfo->getType()->isInstantiationDependentType()),
- init->containsUnexpandedParameterPack()),
- LParenLoc(lparenloc), TInfoAndScope(tinfo, fileScope), Init(init) {}
+ : Expr(CompoundLiteralExprClass, T, VK, OK_Ordinary),
+ LParenLoc(lparenloc), TInfoAndScope(tinfo, fileScope), Init(init) {
+ setDependence(computeDependence(this));
+ }
/// Construct an empty compound literal.
explicit CompoundLiteralExpr(EmptyShell Empty)
@@ -3160,26 +3383,13 @@ class CastExpr : public Expr {
protected:
CastExpr(StmtClass SC, QualType ty, ExprValueKind VK, const CastKind kind,
Expr *op, unsigned BasePathSize)
- : Expr(SC, ty, VK, OK_Ordinary,
- // Cast expressions are type-dependent if the type is
- // dependent (C++ [temp.dep.expr]p3).
- ty->isDependentType(),
- // Cast expressions are value-dependent if the type is
- // dependent or if the subexpression is value-dependent.
- ty->isDependentType() || (op && op->isValueDependent()),
- (ty->isInstantiationDependentType() ||
- (op && op->isInstantiationDependent())),
- // An implicit cast expression doesn't (lexically) contain an
- // unexpanded pack, even if its target type does.
- ((SC != ImplicitCastExprClass &&
- ty->containsUnexpandedParameterPack()) ||
- (op && op->containsUnexpandedParameterPack()))),
- Op(op) {
+ : Expr(SC, ty, VK, OK_Ordinary), Op(op) {
CastExprBits.Kind = kind;
CastExprBits.PartOfExplicitCast = false;
CastExprBits.BasePathSize = BasePathSize;
assert((CastExprBits.BasePathSize == BasePathSize) &&
"BasePathSize overflow!");
+ setDependence(computeDependence(this));
assert(CastConsistency());
}
@@ -3438,30 +3648,39 @@ class BinaryOperator : public Expr {
public:
typedef BinaryOperatorKind Opcode;
- BinaryOperator(Expr *lhs, Expr *rhs, Opcode opc, QualType ResTy,
- ExprValueKind VK, ExprObjectKind OK,
- SourceLocation opLoc, FPOptions FPFeatures)
- : Expr(BinaryOperatorClass, ResTy, VK, OK,
- lhs->isTypeDependent() || rhs->isTypeDependent(),
- lhs->isValueDependent() || rhs->isValueDependent(),
- (lhs->isInstantiationDependent() ||
- rhs->isInstantiationDependent()),
- (lhs->containsUnexpandedParameterPack() ||
- rhs->containsUnexpandedParameterPack())) {
- BinaryOperatorBits.Opc = opc;
- BinaryOperatorBits.FPFeatures = FPFeatures.getInt();
- BinaryOperatorBits.OpLoc = opLoc;
- SubExprs[LHS] = lhs;
- SubExprs[RHS] = rhs;
- assert(!isCompoundAssignmentOp() &&
- "Use CompoundAssignOperator for compound assignments");
+protected:
+ size_t offsetOfTrailingStorage() const;
+
+ /// Return a pointer to the trailing FPOptions
+ FPOptionsOverride *getTrailingFPFeatures() {
+ assert(BinaryOperatorBits.HasFPFeatures);
+ return reinterpret_cast<FPOptionsOverride *>(
+ reinterpret_cast<char *>(this) + offsetOfTrailingStorage());
+ }
+ const FPOptionsOverride *getTrailingFPFeatures() const {
+ assert(BinaryOperatorBits.HasFPFeatures);
+ return reinterpret_cast<const FPOptionsOverride *>(
+ reinterpret_cast<const char *>(this) + offsetOfTrailingStorage());
}
+ /// Build a binary operator, assuming that appropriate storage has been
+ /// allocated for the trailing objects when needed.
+ BinaryOperator(const ASTContext &Ctx, Expr *lhs, Expr *rhs, Opcode opc,
+ QualType ResTy, ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation opLoc, FPOptionsOverride FPFeatures);
+
/// Construct an empty binary operator.
explicit BinaryOperator(EmptyShell Empty) : Expr(BinaryOperatorClass, Empty) {
BinaryOperatorBits.Opc = BO_Comma;
}
+public:
+ static BinaryOperator *CreateEmpty(const ASTContext &C, bool hasFPFeatures);
+
+ static BinaryOperator *Create(const ASTContext &C, Expr *lhs, Expr *rhs,
+ Opcode opc, QualType ResTy, ExprValueKind VK,
+ ExprObjectKind OK, SourceLocation opLoc,
+ FPOptionsOverride FPFeatures);
SourceLocation getExprLoc() const { return getOperatorLoc(); }
SourceLocation getOperatorLoc() const { return BinaryOperatorBits.OpLoc; }
void setOperatorLoc(SourceLocation L) { BinaryOperatorBits.OpLoc = L; }
@@ -3602,47 +3821,65 @@ public:
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
- // Set the FP contractability status of this operator. Only meaningful for
+ /// Set and fetch the bit that shows whether FPFeatures needs to be
+ /// allocated in Trailing Storage
+ void setHasStoredFPFeatures(bool B) { BinaryOperatorBits.HasFPFeatures = B; }
+ bool hasStoredFPFeatures() const { return BinaryOperatorBits.HasFPFeatures; }
+
+ /// Get FPFeatures from trailing storage
+ FPOptionsOverride getStoredFPFeatures() const {
+ assert(hasStoredFPFeatures());
+ return *getTrailingFPFeatures();
+ }
+ /// Set FPFeatures in trailing storage, used only by Serialization
+ void setStoredFPFeatures(FPOptionsOverride F) {
+ assert(BinaryOperatorBits.HasFPFeatures);
+ *getTrailingFPFeatures() = F;
+ }
+
+ // Get the FP features status of this operator. Only meaningful for
// operations on floating point types.
- void setFPFeatures(FPOptions F) {
- BinaryOperatorBits.FPFeatures = F.getInt();
+ FPOptions getFPFeaturesInEffect(const LangOptions &LO) const {
+ if (BinaryOperatorBits.HasFPFeatures)
+ return getStoredFPFeatures().applyOverrides(LO);
+ return FPOptions::defaultWithoutTrailingStorage(LO);
}
- FPOptions getFPFeatures() const {
- return FPOptions(BinaryOperatorBits.FPFeatures);
+ // This is used in ASTImporter
+ FPOptionsOverride getFPFeatures(const LangOptions &LO) const {
+ if (BinaryOperatorBits.HasFPFeatures)
+ return getStoredFPFeatures();
+ return FPOptionsOverride();
}
// Get the FP contractability status of this operator. Only meaningful for
// operations on floating point types.
- bool isFPContractableWithinStatement() const {
- return getFPFeatures().allowFPContractWithinStatement();
+ bool isFPContractableWithinStatement(const LangOptions &LO) const {
+ return getFPFeaturesInEffect(LO).allowFPContractWithinStatement();
}
// Get the FENV_ACCESS status of this operator. Only meaningful for
// operations on floating point types.
- bool isFEnvAccessOn() const { return getFPFeatures().allowFEnvAccess(); }
+ bool isFEnvAccessOn(const LangOptions &LO) const {
+ return getFPFeaturesInEffect(LO).getAllowFEnvAccess();
+ }
protected:
- BinaryOperator(Expr *lhs, Expr *rhs, Opcode opc, QualType ResTy,
- ExprValueKind VK, ExprObjectKind OK,
- SourceLocation opLoc, FPOptions FPFeatures, bool dead2)
- : Expr(CompoundAssignOperatorClass, ResTy, VK, OK,
- lhs->isTypeDependent() || rhs->isTypeDependent(),
- lhs->isValueDependent() || rhs->isValueDependent(),
- (lhs->isInstantiationDependent() ||
- rhs->isInstantiationDependent()),
- (lhs->containsUnexpandedParameterPack() ||
- rhs->containsUnexpandedParameterPack())) {
- BinaryOperatorBits.Opc = opc;
- BinaryOperatorBits.FPFeatures = FPFeatures.getInt();
- BinaryOperatorBits.OpLoc = opLoc;
- SubExprs[LHS] = lhs;
- SubExprs[RHS] = rhs;
- }
+ BinaryOperator(const ASTContext &Ctx, Expr *lhs, Expr *rhs, Opcode opc,
+ QualType ResTy, ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation opLoc, FPOptionsOverride FPFeatures,
+ bool dead2);
+ /// Construct an empty BinaryOperator, SC is CompoundAssignOperator.
BinaryOperator(StmtClass SC, EmptyShell Empty) : Expr(SC, Empty) {
BinaryOperatorBits.Opc = BO_MulAssign;
}
+
+ /// Return the size in bytes needed for the trailing objects.
+ /// Used to allocate the right amount of storage.
+ static unsigned sizeOfTrailingObjects(bool HasFPFeatures) {
+ return HasFPFeatures * sizeof(FPOptionsOverride);
+ }
};
/// CompoundAssignOperator - For compound assignments (e.g. +=), we keep
@@ -3654,22 +3891,33 @@ protected:
class CompoundAssignOperator : public BinaryOperator {
QualType ComputationLHSType;
QualType ComputationResultType;
-public:
- CompoundAssignOperator(Expr *lhs, Expr *rhs, Opcode opc, QualType ResType,
- ExprValueKind VK, ExprObjectKind OK,
- QualType CompLHSType, QualType CompResultType,
- SourceLocation OpLoc, FPOptions FPFeatures)
- : BinaryOperator(lhs, rhs, opc, ResType, VK, OK, OpLoc, FPFeatures,
- true),
- ComputationLHSType(CompLHSType),
- ComputationResultType(CompResultType) {
+
+ /// Construct an empty CompoundAssignOperator.
+ explicit CompoundAssignOperator(const ASTContext &C, EmptyShell Empty,
+ bool hasFPFeatures)
+ : BinaryOperator(CompoundAssignOperatorClass, Empty) {}
+
+protected:
+ CompoundAssignOperator(const ASTContext &C, Expr *lhs, Expr *rhs, Opcode opc,
+ QualType ResType, ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation OpLoc, FPOptionsOverride FPFeatures,
+ QualType CompLHSType, QualType CompResultType)
+ : BinaryOperator(C, lhs, rhs, opc, ResType, VK, OK, OpLoc, FPFeatures,
+ true),
+ ComputationLHSType(CompLHSType), ComputationResultType(CompResultType) {
assert(isCompoundAssignmentOp() &&
"Only should be used for compound assignments");
}
- /// Build an empty compound assignment operator expression.
- explicit CompoundAssignOperator(EmptyShell Empty)
- : BinaryOperator(CompoundAssignOperatorClass, Empty) { }
+public:
+ static CompoundAssignOperator *CreateEmpty(const ASTContext &C,
+ bool hasFPFeatures);
+
+ static CompoundAssignOperator *
+ Create(const ASTContext &C, Expr *lhs, Expr *rhs, Opcode opc, QualType ResTy,
+ ExprValueKind VK, ExprObjectKind OK, SourceLocation opLoc,
+ FPOptionsOverride FPFeatures, QualType CompLHSType = QualType(),
+ QualType CompResultType = QualType());
// The two computation types are the type the LHS is converted
// to for the computation and the type of the result; the two are
@@ -3685,6 +3933,12 @@ public:
}
};
+inline size_t BinaryOperator::offsetOfTrailingStorage() const {
+ assert(BinaryOperatorBits.HasFPFeatures);
+ return isa<CompoundAssignOperator>(this) ? sizeof(CompoundAssignOperator)
+ : sizeof(BinaryOperator);
+}
+
/// AbstractConditionalOperator - An abstract base class for
/// ConditionalOperator and BinaryConditionalOperator.
class AbstractConditionalOperator : public Expr {
@@ -3692,14 +3946,10 @@ class AbstractConditionalOperator : public Expr {
friend class ASTStmtReader;
protected:
- AbstractConditionalOperator(StmtClass SC, QualType T,
- ExprValueKind VK, ExprObjectKind OK,
- bool TD, bool VD, bool ID,
- bool ContainsUnexpandedParameterPack,
- SourceLocation qloc,
+ AbstractConditionalOperator(StmtClass SC, QualType T, ExprValueKind VK,
+ ExprObjectKind OK, SourceLocation qloc,
SourceLocation cloc)
- : Expr(SC, T, VK, OK, TD, VD, ID, ContainsUnexpandedParameterPack),
- QuestionLoc(qloc), ColonLoc(cloc) {}
+ : Expr(SC, T, VK, OK), QuestionLoc(qloc), ColonLoc(cloc) {}
AbstractConditionalOperator(StmtClass SC, EmptyShell Empty)
: Expr(SC, Empty) { }
@@ -3738,26 +3988,12 @@ public:
ConditionalOperator(Expr *cond, SourceLocation QLoc, Expr *lhs,
SourceLocation CLoc, Expr *rhs, QualType t,
ExprValueKind VK, ExprObjectKind OK)
- : AbstractConditionalOperator(
- ConditionalOperatorClass, t, VK, OK,
- // The type of the conditional operator depends on the type
- // of the conditional to support the GCC vector conditional
- // extension. Additionally, [temp.dep.expr] does specify state that
- // this should be dependent on ALL sub expressions.
- (cond->isTypeDependent() || lhs->isTypeDependent() ||
- rhs->isTypeDependent()),
- (cond->isValueDependent() || lhs->isValueDependent() ||
- rhs->isValueDependent()),
- (cond->isInstantiationDependent() ||
- lhs->isInstantiationDependent() ||
- rhs->isInstantiationDependent()),
- (cond->containsUnexpandedParameterPack() ||
- lhs->containsUnexpandedParameterPack() ||
- rhs->containsUnexpandedParameterPack()),
- QLoc, CLoc) {
+ : AbstractConditionalOperator(ConditionalOperatorClass, t, VK, OK, QLoc,
+ CLoc) {
SubExprs[COND] = cond;
SubExprs[LHS] = lhs;
SubExprs[RHS] = rhs;
+ setDependence(computeDependence(this));
}
/// Build an empty conditional operator.
@@ -3822,20 +4058,15 @@ public:
Expr *cond, Expr *lhs, Expr *rhs,
SourceLocation qloc, SourceLocation cloc,
QualType t, ExprValueKind VK, ExprObjectKind OK)
- : AbstractConditionalOperator(BinaryConditionalOperatorClass, t, VK, OK,
- (common->isTypeDependent() || rhs->isTypeDependent()),
- (common->isValueDependent() || rhs->isValueDependent()),
- (common->isInstantiationDependent() ||
- rhs->isInstantiationDependent()),
- (common->containsUnexpandedParameterPack() ||
- rhs->containsUnexpandedParameterPack()),
- qloc, cloc),
- OpaqueValue(opaqueValue) {
+ : AbstractConditionalOperator(BinaryConditionalOperatorClass, t, VK, OK,
+ qloc, cloc),
+ OpaqueValue(opaqueValue) {
SubExprs[COMMON] = common;
SubExprs[COND] = cond;
SubExprs[LHS] = lhs;
SubExprs[RHS] = rhs;
assert(OpaqueValue->getSourceExpr() == common && "Wrong opaque value");
+ setDependence(computeDependence(this));
}
/// Build an empty conditional operator.
@@ -3913,9 +4144,10 @@ class AddrLabelExpr : public Expr {
public:
AddrLabelExpr(SourceLocation AALoc, SourceLocation LLoc, LabelDecl *L,
QualType t)
- : Expr(AddrLabelExprClass, t, VK_RValue, OK_Ordinary, false, false, false,
- false),
- AmpAmpLoc(AALoc), LabelLoc(LLoc), Label(L) {}
+ : Expr(AddrLabelExprClass, t, VK_RValue, OK_Ordinary), AmpAmpLoc(AALoc),
+ LabelLoc(LLoc), Label(L) {
+ setDependence(ExprDependence::None);
+ }
/// Build an empty address of a label expression.
explicit AddrLabelExpr(EmptyShell Empty)
@@ -3957,12 +4189,9 @@ class StmtExpr : public Expr {
public:
StmtExpr(CompoundStmt *SubStmt, QualType T, SourceLocation LParenLoc,
SourceLocation RParenLoc, unsigned TemplateDepth)
- : // We treat a statement-expression in a dependent context as
- // always being value- and instantiation-dependent. This matches the
- // behavior of lambda-expressions and GCC.
- Expr(StmtExprClass, T, VK_RValue, OK_Ordinary, T->isDependentType(),
- TemplateDepth != 0, TemplateDepth != 0, false),
- SubStmt(SubStmt), LParenLoc(LParenLoc), RParenLoc(RParenLoc) {
+ : Expr(StmtExprClass, T, VK_RValue, OK_Ordinary), SubStmt(SubStmt),
+ LParenLoc(LParenLoc), RParenLoc(RParenLoc) {
+ setDependence(computeDependence(this, TemplateDepth));
// FIXME: A templated statement expression should have an associated
// DeclContext so that nested declarations always have a dependent context.
StmtExprBits.TemplateDepth = TemplateDepth;
@@ -4081,17 +4310,13 @@ private:
explicit ConvertVectorExpr(EmptyShell Empty) : Expr(ConvertVectorExprClass, Empty) {}
public:
- ConvertVectorExpr(Expr* SrcExpr, TypeSourceInfo *TI, QualType DstType,
- ExprValueKind VK, ExprObjectKind OK,
- SourceLocation BuiltinLoc, SourceLocation RParenLoc)
- : Expr(ConvertVectorExprClass, DstType, VK, OK,
- DstType->isDependentType(),
- DstType->isDependentType() || SrcExpr->isValueDependent(),
- (DstType->isInstantiationDependentType() ||
- SrcExpr->isInstantiationDependent()),
- (DstType->containsUnexpandedParameterPack() ||
- SrcExpr->containsUnexpandedParameterPack())),
- SrcExpr(SrcExpr), TInfo(TI), BuiltinLoc(BuiltinLoc), RParenLoc(RParenLoc) {}
+ ConvertVectorExpr(Expr *SrcExpr, TypeSourceInfo *TI, QualType DstType,
+ ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation BuiltinLoc, SourceLocation RParenLoc)
+ : Expr(ConvertVectorExprClass, DstType, VK, OK), SrcExpr(SrcExpr),
+ TInfo(TI), BuiltinLoc(BuiltinLoc), RParenLoc(RParenLoc) {
+ setDependence(computeDependence(this));
+ }
/// getSrcExpr - Return the Expr to be converted.
Expr *getSrcExpr() const { return cast<Expr>(SrcExpr); }
@@ -4139,22 +4364,17 @@ class ChooseExpr : public Expr {
SourceLocation BuiltinLoc, RParenLoc;
bool CondIsTrue;
public:
- ChooseExpr(SourceLocation BLoc, Expr *cond, Expr *lhs, Expr *rhs,
- QualType t, ExprValueKind VK, ExprObjectKind OK,
- SourceLocation RP, bool condIsTrue,
- bool TypeDependent, bool ValueDependent)
- : Expr(ChooseExprClass, t, VK, OK, TypeDependent, ValueDependent,
- (cond->isInstantiationDependent() ||
- lhs->isInstantiationDependent() ||
- rhs->isInstantiationDependent()),
- (cond->containsUnexpandedParameterPack() ||
- lhs->containsUnexpandedParameterPack() ||
- rhs->containsUnexpandedParameterPack())),
- BuiltinLoc(BLoc), RParenLoc(RP), CondIsTrue(condIsTrue) {
- SubExprs[COND] = cond;
- SubExprs[LHS] = lhs;
- SubExprs[RHS] = rhs;
- }
+ ChooseExpr(SourceLocation BLoc, Expr *cond, Expr *lhs, Expr *rhs, QualType t,
+ ExprValueKind VK, ExprObjectKind OK, SourceLocation RP,
+ bool condIsTrue)
+ : Expr(ChooseExprClass, t, VK, OK), BuiltinLoc(BLoc), RParenLoc(RP),
+ CondIsTrue(condIsTrue) {
+ SubExprs[COND] = cond;
+ SubExprs[LHS] = lhs;
+ SubExprs[RHS] = rhs;
+
+ setDependence(computeDependence(this));
+ }
/// Build an empty __builtin_choose_expr.
explicit ChooseExpr(EmptyShell Empty) : Expr(ChooseExprClass, Empty) { }
@@ -4219,9 +4439,9 @@ class GNUNullExpr : public Expr {
public:
GNUNullExpr(QualType Ty, SourceLocation Loc)
- : Expr(GNUNullExprClass, Ty, VK_RValue, OK_Ordinary, false, false, false,
- false),
- TokenLoc(Loc) { }
+ : Expr(GNUNullExprClass, Ty, VK_RValue, OK_Ordinary), TokenLoc(Loc) {
+ setDependence(ExprDependence::None);
+ }
/// Build an empty GNU __null expression.
explicit GNUNullExpr(EmptyShell Empty) : Expr(GNUNullExprClass, Empty) { }
@@ -4254,12 +4474,10 @@ class VAArgExpr : public Expr {
public:
VAArgExpr(SourceLocation BLoc, Expr *e, TypeSourceInfo *TInfo,
SourceLocation RPLoc, QualType t, bool IsMS)
- : Expr(VAArgExprClass, t, VK_RValue, OK_Ordinary, t->isDependentType(),
- false, (TInfo->getType()->isInstantiationDependentType() ||
- e->isInstantiationDependent()),
- (TInfo->getType()->containsUnexpandedParameterPack() ||
- e->containsUnexpandedParameterPack())),
- Val(e), TInfo(TInfo, IsMS), BuiltinLoc(BLoc), RParenLoc(RPLoc) {}
+ : Expr(VAArgExprClass, t, VK_RValue, OK_Ordinary), Val(e),
+ TInfo(TInfo, IsMS), BuiltinLoc(BLoc), RParenLoc(RPLoc) {
+ setDependence(computeDependence(this));
+ }
/// Create an empty __builtin_va_arg expression.
explicit VAArgExpr(EmptyShell Empty)
@@ -4468,13 +4686,8 @@ public:
assert(Init < getNumInits() && "Initializer access out of range!");
InitExprs[Init] = expr;
- if (expr) {
- ExprBits.TypeDependent |= expr->isTypeDependent();
- ExprBits.ValueDependent |= expr->isValueDependent();
- ExprBits.InstantiationDependent |= expr->isInstantiationDependent();
- ExprBits.ContainsUnexpandedParameterPack |=
- expr->containsUnexpandedParameterPack();
- }
+ if (expr)
+ setDependence(getDependence() | expr->getDependence());
}
/// Reserve space for some number of initializers.
@@ -4943,8 +5156,9 @@ public:
class NoInitExpr : public Expr {
public:
explicit NoInitExpr(QualType ty)
- : Expr(NoInitExprClass, ty, VK_RValue, OK_Ordinary,
- false, false, ty->isInstantiationDependentType(), false) { }
+ : Expr(NoInitExprClass, ty, VK_RValue, OK_Ordinary) {
+ setDependence(computeDependence(this));
+ }
explicit NoInitExpr(EmptyShell Empty)
: Expr(NoInitExprClass, Empty) { }
@@ -5038,12 +5252,10 @@ class ArrayInitLoopExpr : public Expr {
public:
explicit ArrayInitLoopExpr(QualType T, Expr *CommonInit, Expr *ElementInit)
- : Expr(ArrayInitLoopExprClass, T, VK_RValue, OK_Ordinary, false,
- CommonInit->isValueDependent() || ElementInit->isValueDependent(),
- T->isInstantiationDependentType(),
- CommonInit->containsUnexpandedParameterPack() ||
- ElementInit->containsUnexpandedParameterPack()),
- SubExprs{CommonInit, ElementInit} {}
+ : Expr(ArrayInitLoopExprClass, T, VK_RValue, OK_Ordinary),
+ SubExprs{CommonInit, ElementInit} {
+ setDependence(computeDependence(this));
+ }
/// Get the common subexpression shared by all initializations (the source
/// array).
@@ -5091,8 +5303,9 @@ class ArrayInitIndexExpr : public Expr {
public:
explicit ArrayInitIndexExpr(QualType T)
- : Expr(ArrayInitIndexExprClass, T, VK_RValue, OK_Ordinary,
- false, false, false, false) {}
+ : Expr(ArrayInitIndexExprClass, T, VK_RValue, OK_Ordinary) {
+ setDependence(ExprDependence::None);
+ }
static bool classof(const Stmt *S) {
return S->getStmtClass() == ArrayInitIndexExprClass;
@@ -5123,8 +5336,9 @@ public:
class ImplicitValueInitExpr : public Expr {
public:
explicit ImplicitValueInitExpr(QualType ty)
- : Expr(ImplicitValueInitExprClass, ty, VK_RValue, OK_Ordinary,
- false, false, ty->isInstantiationDependentType(), false) { }
+ : Expr(ImplicitValueInitExprClass, ty, VK_RValue, OK_Ordinary) {
+ setDependence(computeDependence(this));
+ }
/// Construct an empty implicit value initialization.
explicit ImplicitValueInitExpr(EmptyShell Empty)
@@ -5283,10 +5497,9 @@ class GenericSelectionExpr final
template <bool Const> class AssociationTy {
friend class GenericSelectionExpr;
template <bool OtherConst> friend class AssociationIteratorTy;
- using ExprPtrTy =
- typename std::conditional<Const, const Expr *, Expr *>::type;
- using TSIPtrTy = typename std::conditional<Const, const TypeSourceInfo *,
- TypeSourceInfo *>::type;
+ using ExprPtrTy = std::conditional_t<Const, const Expr *, Expr *>;
+ using TSIPtrTy =
+ std::conditional_t<Const, const TypeSourceInfo *, TypeSourceInfo *>;
ExprPtrTy E;
TSIPtrTy TSI;
bool Selected;
@@ -5328,10 +5541,9 @@ class GenericSelectionExpr final
// const Association &Assoc = *It++; // Oops, Assoc is dangling.
using BaseTy = typename AssociationIteratorTy::iterator_facade_base;
using StmtPtrPtrTy =
- typename std::conditional<Const, const Stmt *const *, Stmt **>::type;
- using TSIPtrPtrTy =
- typename std::conditional<Const, const TypeSourceInfo *const *,
- TypeSourceInfo **>::type;
+ std::conditional_t<Const, const Stmt *const *, Stmt **>;
+ using TSIPtrPtrTy = std::conditional_t<Const, const TypeSourceInfo *const *,
+ TypeSourceInfo **>;
StmtPtrPtrTy E; // = nullptr; FIXME: Once support for gcc 4.8 is dropped.
TSIPtrPtrTy TSI; // Kept in sync with E.
unsigned Offset = 0, SelectedOffset = 0;
@@ -5528,12 +5740,11 @@ class ExtVectorElementExpr : public Expr {
public:
ExtVectorElementExpr(QualType ty, ExprValueKind VK, Expr *base,
IdentifierInfo &accessor, SourceLocation loc)
- : Expr(ExtVectorElementExprClass, ty, VK,
- (VK == VK_RValue ? OK_Ordinary : OK_VectorComponent),
- base->isTypeDependent(), base->isValueDependent(),
- base->isInstantiationDependent(),
- base->containsUnexpandedParameterPack()),
- Base(base), Accessor(&accessor), AccessorLoc(loc) {}
+ : Expr(ExtVectorElementExprClass, ty, VK,
+ (VK == VK_RValue ? OK_Ordinary : OK_VectorComponent)),
+ Base(base), Accessor(&accessor), AccessorLoc(loc) {
+ setDependence(computeDependence(this));
+ }
/// Build an empty vector element expression.
explicit ExtVectorElementExpr(EmptyShell Empty)
@@ -5587,11 +5798,9 @@ protected:
BlockDecl *TheBlock;
public:
BlockExpr(BlockDecl *BD, QualType ty)
- : Expr(BlockExprClass, ty, VK_RValue, OK_Ordinary,
- ty->isDependentType(), ty->isDependentType(),
- ty->isInstantiationDependentType() || BD->isDependentContext(),
- false),
- TheBlock(BD) {}
+ : Expr(BlockExprClass, ty, VK_RValue, OK_Ordinary), TheBlock(BD) {
+ setDependence(computeDependence(this));
+ }
/// Build an empty block expression.
explicit BlockExpr(EmptyShell Empty) : Expr(BlockExprClass, Empty) { }
@@ -5655,17 +5864,13 @@ private:
explicit AsTypeExpr(EmptyShell Empty) : Expr(AsTypeExprClass, Empty) {}
public:
- AsTypeExpr(Expr* SrcExpr, QualType DstType,
- ExprValueKind VK, ExprObjectKind OK,
- SourceLocation BuiltinLoc, SourceLocation RParenLoc)
- : Expr(AsTypeExprClass, DstType, VK, OK,
- DstType->isDependentType(),
- DstType->isDependentType() || SrcExpr->isValueDependent(),
- (DstType->isInstantiationDependentType() ||
- SrcExpr->isInstantiationDependent()),
- (DstType->containsUnexpandedParameterPack() ||
- SrcExpr->containsUnexpandedParameterPack())),
- SrcExpr(SrcExpr), BuiltinLoc(BuiltinLoc), RParenLoc(RParenLoc) {}
+ AsTypeExpr(Expr *SrcExpr, QualType DstType, ExprValueKind VK,
+ ExprObjectKind OK, SourceLocation BuiltinLoc,
+ SourceLocation RParenLoc)
+ : Expr(AsTypeExprClass, DstType, VK, OK), SrcExpr(SrcExpr),
+ BuiltinLoc(BuiltinLoc), RParenLoc(RParenLoc) {
+ setDependence(computeDependence(this));
+ }
/// getSrcExpr - Return the Expr to be converted.
Expr *getSrcExpr() const { return cast<Expr>(SrcExpr); }
@@ -5982,14 +6187,15 @@ public:
/// TypoExpr - Internal placeholder for expressions where typo correction
/// still needs to be performed and/or an error diagnostic emitted.
class TypoExpr : public Expr {
+ // The location for the typo name.
+ SourceLocation TypoLoc;
+
public:
- TypoExpr(QualType T)
- : Expr(TypoExprClass, T, VK_LValue, OK_Ordinary,
- /*isTypeDependent*/ true,
- /*isValueDependent*/ true,
- /*isInstantiationDependent*/ true,
- /*containsUnexpandedParameterPack*/ false) {
+ TypoExpr(QualType T, SourceLocation TypoLoc)
+ : Expr(TypoExprClass, T, VK_LValue, OK_Ordinary), TypoLoc(TypoLoc) {
assert(T->isDependentType() && "TypoExpr given a non-dependent type");
+ setDependence(ExprDependence::TypeValueInstantiation |
+ ExprDependence::Error);
}
child_range children() {
@@ -5999,14 +6205,88 @@ public:
return const_child_range(const_child_iterator(), const_child_iterator());
}
- SourceLocation getBeginLoc() const LLVM_READONLY { return SourceLocation(); }
- SourceLocation getEndLoc() const LLVM_READONLY { return SourceLocation(); }
+ SourceLocation getBeginLoc() const LLVM_READONLY { return TypoLoc; }
+ SourceLocation getEndLoc() const LLVM_READONLY { return TypoLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == TypoExprClass;
}
};
+
+/// Frontend produces RecoveryExprs on semantic errors that prevent creating
+/// other well-formed expressions. E.g. when type-checking of a binary operator
+/// fails, we cannot produce a BinaryOperator expression. Instead, we can choose
+/// to produce a recovery expression storing left and right operands.
+///
+/// RecoveryExpr does not have any semantic meaning in C++, it is only useful to
+/// preserve expressions in AST that would otherwise be dropped. It captures
+/// subexpressions of some expression that we could not construct and source
+/// range covered by the expression.
+///
+/// By default, RecoveryExpr uses dependence-bits to take advantage of existing
+/// machinery to deal with dependent code in C++, e.g. RecoveryExpr is preserved
+/// in `decltype(<broken-expr>)` as part of the `DependentDecltypeType`. In
+/// addition to that, clang does not report most errors on dependent
+/// expressions, so we get rid of bogus errors for free. However, note that
+/// unlike other dependent expressions, RecoveryExpr can be produced in
+/// non-template contexts.
+///
+/// We will preserve the type in RecoveryExpr when the type is known, e.g.
+/// preserving the return type for a broken non-overloaded function call, a
+/// overloaded call where all candidates have the same return type. In this
+/// case, the expression is not type-dependent (unless the known type is itself
+/// dependent)
+///
+/// One can also reliably suppress all bogus errors on expressions containing
+/// recovery expressions by examining results of Expr::containsErrors().
+///
+/// FIXME: RecoveryExpr is currently generated by default in C++ mode only, as
+/// dependence isn't handled properly on several C-only codepaths.
+class RecoveryExpr final : public Expr,
+ private llvm::TrailingObjects<RecoveryExpr, Expr *> {
+public:
+ static RecoveryExpr *Create(ASTContext &Ctx, QualType T,
+ SourceLocation BeginLoc, SourceLocation EndLoc,
+ ArrayRef<Expr *> SubExprs);
+ static RecoveryExpr *CreateEmpty(ASTContext &Ctx, unsigned NumSubExprs);
+
+ ArrayRef<Expr *> subExpressions() {
+ auto *B = getTrailingObjects<Expr *>();
+ return llvm::makeArrayRef(B, B + NumExprs);
+ }
+
+ ArrayRef<const Expr *> subExpressions() const {
+ return const_cast<RecoveryExpr *>(this)->subExpressions();
+ }
+
+ child_range children() {
+ Stmt **B = reinterpret_cast<Stmt **>(getTrailingObjects<Expr *>());
+ return child_range(B, B + NumExprs);
+ }
+
+ SourceLocation getBeginLoc() const { return BeginLoc; }
+ SourceLocation getEndLoc() const { return EndLoc; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == RecoveryExprClass;
+ }
+
+private:
+ RecoveryExpr(ASTContext &Ctx, QualType T, SourceLocation BeginLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> SubExprs);
+ RecoveryExpr(EmptyShell Empty, unsigned NumSubExprs)
+ : Expr(RecoveryExprClass, Empty), NumExprs(NumSubExprs) {}
+
+ size_t numTrailingObjects(OverloadToken<Stmt *>) const { return NumExprs; }
+
+ SourceLocation BeginLoc, EndLoc;
+ unsigned NumExprs;
+ friend TrailingObjects;
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+};
+
} // end namespace clang
#endif // LLVM_CLANG_AST_EXPR_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/ExprCXX.h b/contrib/llvm-project/clang/include/clang/AST/ExprCXX.h
index cea360d12e91..6f0b68479b9d 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ExprCXX.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ExprCXX.h
@@ -14,15 +14,19 @@
#ifndef LLVM_CLANG_AST_EXPRCXX_H
#define LLVM_CLANG_AST_EXPRCXX_H
+#include "clang/AST/ASTConcept.h"
+#include "clang/AST/ComputeDependence.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/Type.h"
#include "clang/AST/UnresolvedSet.h"
@@ -80,6 +84,7 @@ class CXXOperatorCallExpr final : public CallExpr {
friend class ASTStmtWriter;
SourceRange Range;
+ FPOptionsOverride Overrides;
// CXXOperatorCallExpr has some trailing objects belonging
// to CallExpr. See CallExpr for the details.
@@ -88,7 +93,7 @@ class CXXOperatorCallExpr final : public CallExpr {
CXXOperatorCallExpr(OverloadedOperatorKind OpKind, Expr *Fn,
ArrayRef<Expr *> Args, QualType Ty, ExprValueKind VK,
- SourceLocation OperatorLoc, FPOptions FPFeatures,
+ SourceLocation OperatorLoc, FPOptionsOverride FPFeatures,
ADLCallKind UsesADL);
CXXOperatorCallExpr(unsigned NumArgs, EmptyShell Empty);
@@ -97,7 +102,7 @@ public:
static CXXOperatorCallExpr *
Create(const ASTContext &Ctx, OverloadedOperatorKind OpKind, Expr *Fn,
ArrayRef<Expr *> Args, QualType Ty, ExprValueKind VK,
- SourceLocation OperatorLoc, FPOptions FPFeatures,
+ SourceLocation OperatorLoc, FPOptionsOverride FPFeatures,
ADLCallKind UsesADL = NotADL);
static CXXOperatorCallExpr *CreateEmpty(const ASTContext &Ctx,
@@ -118,6 +123,22 @@ public:
}
bool isAssignmentOp() const { return isAssignmentOp(getOperator()); }
+ static bool isComparisonOp(OverloadedOperatorKind Opc) {
+ switch (Opc) {
+ case OO_EqualEqual:
+ case OO_ExclaimEqual:
+ case OO_Greater:
+ case OO_GreaterEqual:
+ case OO_Less:
+ case OO_LessEqual:
+ case OO_Spaceship:
+ return true;
+ default:
+ return false;
+ }
+ }
+ bool isComparisonOp() const { return isComparisonOp(getOperator()); }
+
/// Is this written as an infix binary operator?
bool isInfixBinaryOp() const;
@@ -144,20 +165,10 @@ public:
return T->getStmtClass() == CXXOperatorCallExprClass;
}
- // Set the FP contractability status of this operator. Only meaningful for
- // operations on floating point types.
- void setFPFeatures(FPOptions F) {
- CXXOperatorCallExprBits.FPFeatures = F.getInt();
- }
- FPOptions getFPFeatures() const {
- return FPOptions(CXXOperatorCallExprBits.FPFeatures);
- }
-
- // Get the FP contractability status of this operator. Only meaningful for
+ // Set the FPFeatures status of this operator. Only meaningful for
// operations on floating point types.
- bool isFPContractableWithinStatement() const {
- return getFPFeatures().allowFPContractWithinStatement();
- }
+ void setFPFeatures(FPOptionsOverride F) { Overrides = F; }
+ FPOptionsOverride getFPFeatures() const { return Overrides; }
};
/// Represents a call to a member function that
@@ -279,12 +290,10 @@ class CXXRewrittenBinaryOperator : public Expr {
public:
CXXRewrittenBinaryOperator(Expr *SemanticForm, bool IsReversed)
: Expr(CXXRewrittenBinaryOperatorClass, SemanticForm->getType(),
- SemanticForm->getValueKind(), SemanticForm->getObjectKind(),
- SemanticForm->isTypeDependent(), SemanticForm->isValueDependent(),
- SemanticForm->isInstantiationDependent(),
- SemanticForm->containsUnexpandedParameterPack()),
+ SemanticForm->getValueKind(), SemanticForm->getObjectKind()),
SemanticForm(SemanticForm) {
CXXRewrittenBinaryOperatorBits.IsReversed = IsReversed;
+ setDependence(computeDependence(this));
}
CXXRewrittenBinaryOperator(EmptyShell Empty)
: Expr(CXXRewrittenBinaryOperatorClass, Empty), SemanticForm() {}
@@ -349,7 +358,8 @@ public:
/// This abstract class is inherited by all of the classes
/// representing "named" casts: CXXStaticCastExpr for \c static_cast,
/// CXXDynamicCastExpr for \c dynamic_cast, CXXReinterpretCastExpr for
-/// reinterpret_cast, and CXXConstCastExpr for \c const_cast.
+/// reinterpret_cast, CXXConstCastExpr for \c const_cast and
+/// CXXAddrspaceCastExpr for addrspace_cast (in OpenCL).
class CXXNamedCastExpr : public ExplicitCastExpr {
private:
// the location of the casting op
@@ -395,6 +405,7 @@ public:
case CXXDynamicCastExprClass:
case CXXReinterpretCastExprClass:
case CXXConstCastExprClass:
+ case CXXAddrspaceCastExprClass:
return true;
default:
return false;
@@ -552,6 +563,41 @@ public:
}
};
+/// A C++ addrspace_cast expression (currently only enabled for OpenCL).
+///
+/// This expression node represents a cast between pointers to objects in
+/// different address spaces e.g.,
+/// \c addrspace_cast<global int*>(PtrToGenericInt).
+///
+/// A addrspace_cast can cast address space type qualifiers but does not change
+/// the underlying value.
+class CXXAddrspaceCastExpr final
+ : public CXXNamedCastExpr,
+ private llvm::TrailingObjects<CXXAddrspaceCastExpr, CXXBaseSpecifier *> {
+ CXXAddrspaceCastExpr(QualType ty, ExprValueKind VK, CastKind Kind, Expr *op,
+ TypeSourceInfo *writtenTy, SourceLocation l,
+ SourceLocation RParenLoc, SourceRange AngleBrackets)
+ : CXXNamedCastExpr(CXXAddrspaceCastExprClass, ty, VK, Kind, op, 0,
+ writtenTy, l, RParenLoc, AngleBrackets) {}
+
+ explicit CXXAddrspaceCastExpr(EmptyShell Empty)
+ : CXXNamedCastExpr(CXXAddrspaceCastExprClass, Empty, 0) {}
+
+public:
+ friend class CastExpr;
+ friend TrailingObjects;
+
+ static CXXAddrspaceCastExpr *
+ Create(const ASTContext &Context, QualType T, ExprValueKind VK, CastKind Kind,
+ Expr *Op, TypeSourceInfo *WrittenTy, SourceLocation L,
+ SourceLocation RParenLoc, SourceRange AngleBrackets);
+ static CXXAddrspaceCastExpr *CreateEmpty(const ASTContext &Context);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXAddrspaceCastExprClass;
+ }
+};
+
/// A call to a literal operator (C++11 [over.literal])
/// written as a user-defined literal (C++11 [lit.ext]).
///
@@ -645,10 +691,10 @@ public:
class CXXBoolLiteralExpr : public Expr {
public:
CXXBoolLiteralExpr(bool Val, QualType Ty, SourceLocation Loc)
- : Expr(CXXBoolLiteralExprClass, Ty, VK_RValue, OK_Ordinary, false, false,
- false, false) {
+ : Expr(CXXBoolLiteralExprClass, Ty, VK_RValue, OK_Ordinary) {
CXXBoolLiteralExprBits.Value = Val;
CXXBoolLiteralExprBits.Loc = Loc;
+ setDependence(ExprDependence::None);
}
explicit CXXBoolLiteralExpr(EmptyShell Empty)
@@ -683,9 +729,9 @@ public:
class CXXNullPtrLiteralExpr : public Expr {
public:
CXXNullPtrLiteralExpr(QualType Ty, SourceLocation Loc)
- : Expr(CXXNullPtrLiteralExprClass, Ty, VK_RValue, OK_Ordinary, false,
- false, false, false) {
+ : Expr(CXXNullPtrLiteralExprClass, Ty, VK_RValue, OK_Ordinary) {
CXXNullPtrLiteralExprBits.Loc = Loc;
+ setDependence(ExprDependence::None);
}
explicit CXXNullPtrLiteralExpr(EmptyShell Empty)
@@ -723,11 +769,10 @@ public:
friend class ASTStmtReader;
CXXStdInitializerListExpr(QualType Ty, Expr *SubExpr)
- : Expr(CXXStdInitializerListExprClass, Ty, VK_RValue, OK_Ordinary,
- Ty->isDependentType(), SubExpr->isValueDependent(),
- SubExpr->isInstantiationDependent(),
- SubExpr->containsUnexpandedParameterPack()),
- SubExpr(SubExpr) {}
+ : Expr(CXXStdInitializerListExprClass, Ty, VK_RValue, OK_Ordinary),
+ SubExpr(SubExpr) {
+ setDependence(computeDependence(this));
+ }
Expr *getSubExpr() { return static_cast<Expr*>(SubExpr); }
const Expr *getSubExpr() const { return static_cast<const Expr*>(SubExpr); }
@@ -762,32 +807,24 @@ public:
///
/// This represents code like \c typeid(int) or \c typeid(*objPtr)
class CXXTypeidExpr : public Expr {
+ friend class ASTStmtReader;
+
private:
llvm::PointerUnion<Stmt *, TypeSourceInfo *> Operand;
SourceRange Range;
public:
CXXTypeidExpr(QualType Ty, TypeSourceInfo *Operand, SourceRange R)
- : Expr(CXXTypeidExprClass, Ty, VK_LValue, OK_Ordinary,
- // typeid is never type-dependent (C++ [temp.dep.expr]p4)
- false,
- // typeid is value-dependent if the type or expression are
- // dependent
- Operand->getType()->isDependentType(),
- Operand->getType()->isInstantiationDependentType(),
- Operand->getType()->containsUnexpandedParameterPack()),
- Operand(Operand), Range(R) {}
+ : Expr(CXXTypeidExprClass, Ty, VK_LValue, OK_Ordinary), Operand(Operand),
+ Range(R) {
+ setDependence(computeDependence(this));
+ }
CXXTypeidExpr(QualType Ty, Expr *Operand, SourceRange R)
- : Expr(CXXTypeidExprClass, Ty, VK_LValue, OK_Ordinary,
- // typeid is never type-dependent (C++ [temp.dep.expr]p4)
- false,
- // typeid is value-dependent if the type or expression are
- // dependent
- Operand->isTypeDependent() || Operand->isValueDependent(),
- Operand->isInstantiationDependent(),
- Operand->containsUnexpandedParameterPack()),
- Operand(Operand), Range(R) {}
+ : Expr(CXXTypeidExprClass, Ty, VK_LValue, OK_Ordinary), Operand(Operand),
+ Range(R) {
+ setDependence(computeDependence(this));
+ }
CXXTypeidExpr(EmptyShell Empty, bool isExpr)
: Expr(CXXTypeidExprClass, Empty) {
@@ -812,22 +849,11 @@ public:
assert(isTypeOperand() && "Cannot call getTypeOperand for typeid(expr)");
return Operand.get<TypeSourceInfo *>();
}
-
- void setTypeOperandSourceInfo(TypeSourceInfo *TSI) {
- assert(isTypeOperand() && "Cannot call getTypeOperand for typeid(expr)");
- Operand = TSI;
- }
-
Expr *getExprOperand() const {
assert(!isTypeOperand() && "Cannot call getExprOperand for typeid(type)");
return static_cast<Expr*>(Operand.get<Stmt *>());
}
- void setExprOperand(Expr *E) {
- assert(!isTypeOperand() && "Cannot call getExprOperand for typeid(type)");
- Operand = E;
- }
-
SourceLocation getBeginLoc() const LLVM_READONLY { return Range.getBegin(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Range.getEnd(); }
SourceRange getSourceRange() const LLVM_READONLY { return Range; }
@@ -872,15 +898,12 @@ public:
MSPropertyRefExpr(Expr *baseExpr, MSPropertyDecl *decl, bool isArrow,
QualType ty, ExprValueKind VK,
- NestedNameSpecifierLoc qualifierLoc,
- SourceLocation nameLoc)
- : Expr(MSPropertyRefExprClass, ty, VK, OK_Ordinary,
- /*type-dependent*/ false, baseExpr->isValueDependent(),
- baseExpr->isInstantiationDependent(),
- baseExpr->containsUnexpandedParameterPack()),
- BaseExpr(baseExpr), TheDecl(decl),
- MemberLoc(nameLoc), IsArrow(isArrow),
- QualifierLoc(qualifierLoc) {}
+ NestedNameSpecifierLoc qualifierLoc, SourceLocation nameLoc)
+ : Expr(MSPropertyRefExprClass, ty, VK, OK_Ordinary), BaseExpr(baseExpr),
+ TheDecl(decl), MemberLoc(nameLoc), IsArrow(isArrow),
+ QualifierLoc(qualifierLoc) {
+ setDependence(computeDependence(this));
+ }
MSPropertyRefExpr(EmptyShell Empty) : Expr(MSPropertyRefExprClass, Empty) {}
@@ -948,12 +971,11 @@ class MSPropertySubscriptExpr : public Expr {
public:
MSPropertySubscriptExpr(Expr *Base, Expr *Idx, QualType Ty, ExprValueKind VK,
ExprObjectKind OK, SourceLocation RBracketLoc)
- : Expr(MSPropertySubscriptExprClass, Ty, VK, OK, Idx->isTypeDependent(),
- Idx->isValueDependent(), Idx->isInstantiationDependent(),
- Idx->containsUnexpandedParameterPack()),
+ : Expr(MSPropertySubscriptExprClass, Ty, VK, OK),
RBracketLoc(RBracketLoc) {
SubExprs[BASE_EXPR] = Base;
SubExprs[IDX_EXPR] = Idx;
+ setDependence(computeDependence(this));
}
/// Create an empty array subscript expression.
@@ -998,25 +1020,26 @@ public:
///
/// This represents code like @c __uuidof(COMTYPE) or @c __uuidof(*comPtr)
class CXXUuidofExpr : public Expr {
+ friend class ASTStmtReader;
+
private:
llvm::PointerUnion<Stmt *, TypeSourceInfo *> Operand;
- StringRef UuidStr;
+ MSGuidDecl *Guid;
SourceRange Range;
public:
- CXXUuidofExpr(QualType Ty, TypeSourceInfo *Operand, StringRef UuidStr,
+ CXXUuidofExpr(QualType Ty, TypeSourceInfo *Operand, MSGuidDecl *Guid,
SourceRange R)
- : Expr(CXXUuidofExprClass, Ty, VK_LValue, OK_Ordinary, false,
- Operand->getType()->isDependentType(),
- Operand->getType()->isInstantiationDependentType(),
- Operand->getType()->containsUnexpandedParameterPack()),
- Operand(Operand), UuidStr(UuidStr), Range(R) {}
-
- CXXUuidofExpr(QualType Ty, Expr *Operand, StringRef UuidStr, SourceRange R)
- : Expr(CXXUuidofExprClass, Ty, VK_LValue, OK_Ordinary, false,
- Operand->isTypeDependent(), Operand->isInstantiationDependent(),
- Operand->containsUnexpandedParameterPack()),
- Operand(Operand), UuidStr(UuidStr), Range(R) {}
+ : Expr(CXXUuidofExprClass, Ty, VK_LValue, OK_Ordinary), Operand(Operand),
+ Guid(Guid), Range(R) {
+ setDependence(computeDependence(this));
+ }
+
+ CXXUuidofExpr(QualType Ty, Expr *Operand, MSGuidDecl *Guid, SourceRange R)
+ : Expr(CXXUuidofExprClass, Ty, VK_LValue, OK_Ordinary), Operand(Operand),
+ Guid(Guid), Range(R) {
+ setDependence(computeDependence(this));
+ }
CXXUuidofExpr(EmptyShell Empty, bool isExpr)
: Expr(CXXUuidofExprClass, Empty) {
@@ -1037,24 +1060,12 @@ public:
assert(isTypeOperand() && "Cannot call getTypeOperand for __uuidof(expr)");
return Operand.get<TypeSourceInfo *>();
}
-
- void setTypeOperandSourceInfo(TypeSourceInfo *TSI) {
- assert(isTypeOperand() && "Cannot call getTypeOperand for __uuidof(expr)");
- Operand = TSI;
- }
-
Expr *getExprOperand() const {
assert(!isTypeOperand() && "Cannot call getExprOperand for __uuidof(type)");
return static_cast<Expr*>(Operand.get<Stmt *>());
}
- void setExprOperand(Expr *E) {
- assert(!isTypeOperand() && "Cannot call getExprOperand for __uuidof(type)");
- Operand = E;
- }
-
- void setUuidStr(StringRef US) { UuidStr = US; }
- StringRef getUuidStr() const { return UuidStr; }
+ MSGuidDecl *getGuidDecl() const { return Guid; }
SourceLocation getBeginLoc() const LLVM_READONLY { return Range.getBegin(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Range.getEnd(); }
@@ -1097,14 +1108,10 @@ public:
class CXXThisExpr : public Expr {
public:
CXXThisExpr(SourceLocation L, QualType Ty, bool IsImplicit)
- : Expr(CXXThisExprClass, Ty, VK_RValue, OK_Ordinary,
- // 'this' is type-dependent if the class type of the enclosing
- // member function is dependent (C++ [temp.dep.expr]p2)
- Ty->isDependentType(), Ty->isDependentType(),
- Ty->isInstantiationDependentType(),
- /*ContainsUnexpandedParameterPack=*/false) {
+ : Expr(CXXThisExprClass, Ty, VK_RValue, OK_Ordinary) {
CXXThisExprBits.IsImplicit = IsImplicit;
CXXThisExprBits.Loc = L;
+ setDependence(computeDependence(this));
}
CXXThisExpr(EmptyShell Empty) : Expr(CXXThisExprClass, Empty) {}
@@ -1150,12 +1157,10 @@ public:
// null if not present.
CXXThrowExpr(Expr *Operand, QualType Ty, SourceLocation Loc,
bool IsThrownVariableInScope)
- : Expr(CXXThrowExprClass, Ty, VK_RValue, OK_Ordinary, false, false,
- Operand && Operand->isInstantiationDependent(),
- Operand && Operand->containsUnexpandedParameterPack()),
- Operand(Operand) {
+ : Expr(CXXThrowExprClass, Ty, VK_RValue, OK_Ordinary), Operand(Operand) {
CXXThrowExprBits.ThrowLoc = Loc;
CXXThrowExprBits.IsThrownVariableInScope = IsThrownVariableInScope;
+ setDependence(computeDependence(this));
}
CXXThrowExpr(EmptyShell Empty) : Expr(CXXThrowExprClass, Empty) {}
@@ -1209,16 +1214,16 @@ class CXXDefaultArgExpr final : public Expr {
DeclContext *UsedContext;
CXXDefaultArgExpr(StmtClass SC, SourceLocation Loc, ParmVarDecl *Param,
- DeclContext *UsedContext)
+ DeclContext *UsedContext)
: Expr(SC,
Param->hasUnparsedDefaultArg()
? Param->getType().getNonReferenceType()
: Param->getDefaultArg()->getType(),
Param->getDefaultArg()->getValueKind(),
- Param->getDefaultArg()->getObjectKind(), false, false, false,
- false),
+ Param->getDefaultArg()->getObjectKind()),
Param(Param), UsedContext(UsedContext) {
CXXDefaultArgExprBits.Loc = Loc;
+ setDependence(ExprDependence::None);
}
public:
@@ -1374,13 +1379,12 @@ class CXXBindTemporaryExpr : public Expr {
CXXTemporary *Temp = nullptr;
Stmt *SubExpr = nullptr;
- CXXBindTemporaryExpr(CXXTemporary *temp, Expr* SubExpr)
- : Expr(CXXBindTemporaryExprClass, SubExpr->getType(),
- VK_RValue, OK_Ordinary, SubExpr->isTypeDependent(),
- SubExpr->isValueDependent(),
- SubExpr->isInstantiationDependent(),
- SubExpr->containsUnexpandedParameterPack()),
- Temp(temp), SubExpr(SubExpr) {}
+ CXXBindTemporaryExpr(CXXTemporary *temp, Expr *SubExpr)
+ : Expr(CXXBindTemporaryExprClass, SubExpr->getType(), VK_RValue,
+ OK_Ordinary),
+ Temp(temp), SubExpr(SubExpr) {
+ setDependence(computeDependence(this));
+ }
public:
CXXBindTemporaryExpr(EmptyShell Empty)
@@ -1631,12 +1635,12 @@ public:
CXXInheritedCtorInitExpr(SourceLocation Loc, QualType T,
CXXConstructorDecl *Ctor, bool ConstructsVirtualBase,
bool InheritedFromVirtualBase)
- : Expr(CXXInheritedCtorInitExprClass, T, VK_RValue, OK_Ordinary, false,
- false, false, false),
+ : Expr(CXXInheritedCtorInitExprClass, T, VK_RValue, OK_Ordinary),
Constructor(Ctor), Loc(Loc),
ConstructsVirtualBase(ConstructsVirtualBase),
InheritedFromVirtualBase(InheritedFromVirtualBase) {
assert(!T->isDependentType());
+ setDependence(ExprDependence::None);
}
/// Construct an empty C++ inheriting construction expression.
@@ -1817,26 +1821,14 @@ Stmt **CXXConstructExpr::getTrailingArgs() {
/// and which can never occur implicitly.
class LambdaExpr final : public Expr,
private llvm::TrailingObjects<LambdaExpr, Stmt *> {
+ // LambdaExpr has some data stored in LambdaExprBits.
+
/// The source range that covers the lambda introducer ([...]).
SourceRange IntroducerRange;
/// The source location of this lambda's capture-default ('=' or '&').
SourceLocation CaptureDefaultLoc;
- /// The number of captures.
- unsigned NumCaptures : 16;
-
- /// The default capture kind, which is a value of type
- /// LambdaCaptureDefault.
- unsigned CaptureDefault : 2;
-
- /// Whether this lambda had an explicit parameter list vs. an
- /// implicit (and empty) parameter list.
- unsigned ExplicitParams : 1;
-
- /// Whether this lambda had the result type explicitly specified.
- unsigned ExplicitResultType : 1;
-
/// The location of the closing brace ('}') that completes
/// the lambda.
///
@@ -1850,23 +1842,18 @@ class LambdaExpr final : public Expr,
/// Construct a lambda expression.
LambdaExpr(QualType T, SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
- SourceLocation CaptureDefaultLoc, ArrayRef<LambdaCapture> Captures,
- bool ExplicitParams, bool ExplicitResultType,
- ArrayRef<Expr *> CaptureInits, SourceLocation ClosingBrace,
- bool ContainsUnexpandedParameterPack);
+ SourceLocation CaptureDefaultLoc, bool ExplicitParams,
+ bool ExplicitResultType, ArrayRef<Expr *> CaptureInits,
+ SourceLocation ClosingBrace, bool ContainsUnexpandedParameterPack);
/// Construct an empty lambda expression.
- LambdaExpr(EmptyShell Empty, unsigned NumCaptures)
- : Expr(LambdaExprClass, Empty), NumCaptures(NumCaptures),
- CaptureDefault(LCD_None), ExplicitParams(false),
- ExplicitResultType(false) {
- getStoredStmts()[NumCaptures] = nullptr;
- }
+ LambdaExpr(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return getTrailingObjects<Stmt *>(); }
-
Stmt *const *getStoredStmts() const { return getTrailingObjects<Stmt *>(); }
+ void initBodyIfNeeded() const;
+
public:
friend class ASTStmtReader;
friend class ASTStmtWriter;
@@ -1876,9 +1863,9 @@ public:
static LambdaExpr *
Create(const ASTContext &C, CXXRecordDecl *Class, SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc,
- ArrayRef<LambdaCapture> Captures, bool ExplicitParams,
- bool ExplicitResultType, ArrayRef<Expr *> CaptureInits,
- SourceLocation ClosingBrace, bool ContainsUnexpandedParameterPack);
+ bool ExplicitParams, bool ExplicitResultType,
+ ArrayRef<Expr *> CaptureInits, SourceLocation ClosingBrace,
+ bool ContainsUnexpandedParameterPack);
/// Construct a new lambda expression that will be deserialized from
/// an external source.
@@ -1887,13 +1874,11 @@ public:
/// Determine the default capture kind for this lambda.
LambdaCaptureDefault getCaptureDefault() const {
- return static_cast<LambdaCaptureDefault>(CaptureDefault);
+ return static_cast<LambdaCaptureDefault>(LambdaExprBits.CaptureDefault);
}
/// Retrieve the location of this lambda's capture-default, if any.
- SourceLocation getCaptureDefaultLoc() const {
- return CaptureDefaultLoc;
- }
+ SourceLocation getCaptureDefaultLoc() const { return CaptureDefaultLoc; }
/// Determine whether one of this lambda's captures is an init-capture.
bool isInitCapture(const LambdaCapture *Capture) const;
@@ -1916,7 +1901,7 @@ public:
capture_iterator capture_end() const;
/// Determine the number of captures in this lambda.
- unsigned capture_size() const { return NumCaptures; }
+ unsigned capture_size() const { return LambdaExprBits.NumCaptures; }
/// Retrieve this lambda's explicit captures.
capture_range explicit_captures() const;
@@ -1946,6 +1931,7 @@ public:
/// Const iterator that walks over the capture initialization
/// arguments.
+ /// FIXME: This interface is prone to being used incorrectly.
using const_capture_init_iterator = Expr *const *;
/// Retrieve the initialization expressions for this lambda's captures.
@@ -1973,13 +1959,13 @@ public:
/// Retrieve the iterator pointing one past the last
/// initialization argument for this lambda expression.
capture_init_iterator capture_init_end() {
- return capture_init_begin() + NumCaptures;
+ return capture_init_begin() + capture_size();
}
/// Retrieve the iterator pointing one past the last
/// initialization argument for this lambda expression.
const_capture_init_iterator capture_init_end() const {
- return capture_init_begin() + NumCaptures;
+ return capture_init_begin() + capture_size();
}
/// Retrieve the source range covering the lambda introducer,
@@ -2013,8 +1999,20 @@ public:
/// Whether this is a generic lambda.
bool isGenericLambda() const { return getTemplateParameterList(); }
- /// Retrieve the body of the lambda.
- CompoundStmt *getBody() const;
+ /// Retrieve the body of the lambda. This will be most of the time
+ /// a \p CompoundStmt, but can also be \p CoroutineBodyStmt wrapping
+ /// a \p CompoundStmt. Note that unlike functions, lambda-expressions
+ /// cannot have a function-try-block.
+ Stmt *getBody() const;
+
+ /// Retrieve the \p CompoundStmt representing the body of the lambda.
+ /// This is a convenience function for callers who do not need
+ /// to handle node(s) which may wrap a \p CompoundStmt.
+ const CompoundStmt *getCompoundStmtBody() const;
+ CompoundStmt *getCompoundStmtBody() {
+ const auto *ConstThis = this;
+ return const_cast<CompoundStmt *>(ConstThis->getCompoundStmtBody());
+ }
/// Determine whether the lambda is mutable, meaning that any
/// captures values can be modified.
@@ -2022,10 +2020,12 @@ public:
/// Determine whether this lambda has an explicit parameter
/// list vs. an implicit (empty) parameter list.
- bool hasExplicitParameters() const { return ExplicitParams; }
+ bool hasExplicitParameters() const { return LambdaExprBits.ExplicitParams; }
/// Whether this lambda had its result type explicitly specified.
- bool hasExplicitResultType() const { return ExplicitResultType; }
+ bool hasExplicitResultType() const {
+ return LambdaExprBits.ExplicitResultType;
+ }
static bool classof(const Stmt *T) {
return T->getStmtClass() == LambdaExprClass;
@@ -2037,15 +2037,9 @@ public:
SourceLocation getEndLoc() const LLVM_READONLY { return ClosingBrace; }
- child_range children() {
- // Includes initialization exprs plus body stmt
- return child_range(getStoredStmts(), getStoredStmts() + NumCaptures + 1);
- }
-
- const_child_range children() const {
- return const_child_range(getStoredStmts(),
- getStoredStmts() + NumCaptures + 1);
- }
+ /// Includes the captures and the body of the lambda.
+ child_range children();
+ const_child_range children() const;
};
/// An expression "T()" which creates a value-initialized rvalue of type
@@ -2060,11 +2054,10 @@ public:
/// expression.
CXXScalarValueInitExpr(QualType Type, TypeSourceInfo *TypeInfo,
SourceLocation RParenLoc)
- : Expr(CXXScalarValueInitExprClass, Type, VK_RValue, OK_Ordinary, false,
- false, Type->isInstantiationDependentType(),
- Type->containsUnexpandedParameterPack()),
+ : Expr(CXXScalarValueInitExprClass, Type, VK_RValue, OK_Ordinary),
TypeInfo(TypeInfo) {
CXXScalarValueInitExprBits.RParenLoc = RParenLoc;
+ setDependence(computeDependence(this));
}
explicit CXXScalarValueInitExpr(EmptyShell Shell)
@@ -2369,15 +2362,14 @@ public:
CXXDeleteExpr(QualType Ty, bool GlobalDelete, bool ArrayForm,
bool ArrayFormAsWritten, bool UsualArrayDeleteWantsSize,
FunctionDecl *OperatorDelete, Expr *Arg, SourceLocation Loc)
- : Expr(CXXDeleteExprClass, Ty, VK_RValue, OK_Ordinary, false,
- Arg->isValueDependent(), Arg->isInstantiationDependent(),
- Arg->containsUnexpandedParameterPack()),
+ : Expr(CXXDeleteExprClass, Ty, VK_RValue, OK_Ordinary),
OperatorDelete(OperatorDelete), Argument(Arg) {
CXXDeleteExprBits.GlobalDelete = GlobalDelete;
CXXDeleteExprBits.ArrayForm = ArrayForm;
CXXDeleteExprBits.ArrayFormAsWritten = ArrayFormAsWritten;
CXXDeleteExprBits.UsualArrayDeleteWantsSize = UsualArrayDeleteWantsSize;
CXXDeleteExprBits.Loc = Loc;
+ setDependence(computeDependence(this));
}
explicit CXXDeleteExpr(EmptyShell Shell) : Expr(CXXDeleteExprClass, Shell) {}
@@ -2735,15 +2727,15 @@ public:
friend class ASTStmtReader;
ArrayTypeTraitExpr(SourceLocation loc, ArrayTypeTrait att,
- TypeSourceInfo *queried, uint64_t value,
- Expr *dimension, SourceLocation rparen, QualType ty)
- : Expr(ArrayTypeTraitExprClass, ty, VK_RValue, OK_Ordinary,
- false, queried->getType()->isDependentType(),
- (queried->getType()->isInstantiationDependentType() ||
- (dimension && dimension->isInstantiationDependent())),
- queried->getType()->containsUnexpandedParameterPack()),
- ATT(att), Value(value), Dimension(dimension),
- Loc(loc), RParen(rparen), QueriedType(queried) {}
+ TypeSourceInfo *queried, uint64_t value, Expr *dimension,
+ SourceLocation rparen, QualType ty)
+ : Expr(ArrayTypeTraitExprClass, ty, VK_RValue, OK_Ordinary), ATT(att),
+ Value(value), Dimension(dimension), Loc(loc), RParen(rparen),
+ QueriedType(queried) {
+ assert(att <= ATT_Last && "invalid enum value!");
+ assert(static_cast<unsigned>(att) == ATT && "ATT overflow!");
+ setDependence(computeDependence(this));
+ }
explicit ArrayTypeTraitExpr(EmptyShell Empty)
: Expr(ArrayTypeTraitExprClass, Empty), ATT(0) {}
@@ -2801,17 +2793,15 @@ class ExpressionTraitExpr : public Expr {
public:
friend class ASTStmtReader;
- ExpressionTraitExpr(SourceLocation loc, ExpressionTrait et,
- Expr *queried, bool value,
- SourceLocation rparen, QualType resultType)
- : Expr(ExpressionTraitExprClass, resultType, VK_RValue, OK_Ordinary,
- false, // Not type-dependent
- // Value-dependent if the argument is type-dependent.
- queried->isTypeDependent(),
- queried->isInstantiationDependent(),
- queried->containsUnexpandedParameterPack()),
+ ExpressionTraitExpr(SourceLocation loc, ExpressionTrait et, Expr *queried,
+ bool value, SourceLocation rparen, QualType resultType)
+ : Expr(ExpressionTraitExprClass, resultType, VK_RValue, OK_Ordinary),
ET(et), Value(value), Loc(loc), RParen(rparen),
- QueriedExpression(queried) {}
+ QueriedExpression(queried) {
+ assert(et <= ET_Last && "invalid enum value!");
+ assert(static_cast<unsigned>(et) == ET && "ET overflow!");
+ setDependence(computeDependence(this));
+ }
explicit ExpressionTraitExpr(EmptyShell Empty)
: Expr(ExpressionTraitExprClass, Empty), ET(0), Value(false) {}
@@ -3305,13 +3295,15 @@ public:
/// literal is the extent of the enclosing scope.
class ExprWithCleanups final
: public FullExpr,
- private llvm::TrailingObjects<ExprWithCleanups, BlockDecl *> {
+ private llvm::TrailingObjects<
+ ExprWithCleanups,
+ llvm::PointerUnion<BlockDecl *, CompoundLiteralExpr *>> {
public:
/// The type of objects that are kept in the cleanup.
- /// It's useful to remember the set of blocks; we could also
- /// remember the set of temporaries, but there's currently
- /// no need.
- using CleanupObject = BlockDecl *;
+ /// It's useful to remember the set of blocks and block-scoped compound
+ /// literals; we could also remember the set of temporaries, but there's
+ /// currently no need.
+ using CleanupObject = llvm::PointerUnion<BlockDecl *, CompoundLiteralExpr *>;
private:
friend class ASTStmtReader;
@@ -3964,13 +3956,10 @@ class CXXNoexceptExpr : public Expr {
public:
CXXNoexceptExpr(QualType Ty, Expr *Operand, CanThrowResult Val,
SourceLocation Keyword, SourceLocation RParen)
- : Expr(CXXNoexceptExprClass, Ty, VK_RValue, OK_Ordinary,
- /*TypeDependent*/ false,
- /*ValueDependent*/ Val == CT_Dependent,
- Val == CT_Dependent || Operand->isInstantiationDependent(),
- Operand->containsUnexpandedParameterPack()),
+ : Expr(CXXNoexceptExprClass, Ty, VK_RValue, OK_Ordinary),
Operand(Operand), Range(Keyword, RParen) {
CXXNoexceptExprBits.Value = Val == CT_Cannot;
+ setDependence(computeDependence(this, Val));
}
CXXNoexceptExpr(EmptyShell Empty) : Expr(CXXNoexceptExprClass, Empty) {}
@@ -4031,12 +4020,12 @@ public:
PackExpansionExpr(QualType T, Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions)
: Expr(PackExpansionExprClass, T, Pattern->getValueKind(),
- Pattern->getObjectKind(), /*TypeDependent=*/true,
- /*ValueDependent=*/true, /*InstantiationDependent=*/true,
- /*ContainsUnexpandedParameterPack=*/false),
+ Pattern->getObjectKind()),
EllipsisLoc(EllipsisLoc),
NumExpansions(NumExpansions ? *NumExpansions + 1 : 0),
- Pattern(Pattern) {}
+ Pattern(Pattern) {
+ setDependence(computeDependence(this));
+ }
PackExpansionExpr(EmptyShell Empty) : Expr(PackExpansionExprClass, Empty) {}
@@ -4123,17 +4112,17 @@ class SizeOfPackExpr final
/// the given parameter pack.
SizeOfPackExpr(QualType SizeType, SourceLocation OperatorLoc, NamedDecl *Pack,
SourceLocation PackLoc, SourceLocation RParenLoc,
- Optional<unsigned> Length, ArrayRef<TemplateArgument> PartialArgs)
- : Expr(SizeOfPackExprClass, SizeType, VK_RValue, OK_Ordinary,
- /*TypeDependent=*/false, /*ValueDependent=*/!Length,
- /*InstantiationDependent=*/!Length,
- /*ContainsUnexpandedParameterPack=*/false),
+ Optional<unsigned> Length,
+ ArrayRef<TemplateArgument> PartialArgs)
+ : Expr(SizeOfPackExprClass, SizeType, VK_RValue, OK_Ordinary),
OperatorLoc(OperatorLoc), PackLoc(PackLoc), RParenLoc(RParenLoc),
Length(Length ? *Length : PartialArgs.size()), Pack(Pack) {
assert((!Length || PartialArgs.empty()) &&
"have partial args for non-dependent sizeof... expression");
auto *Args = getTrailingObjects<TemplateArgument>();
std::uninitialized_copy(PartialArgs.begin(), PartialArgs.end(), Args);
+ setDependence(Length ? ExprDependence::None
+ : ExprDependence::ValueInstantiation);
}
/// Create an empty expression.
@@ -4224,12 +4213,10 @@ public:
SourceLocation Loc,
NonTypeTemplateParmDecl *Param,
Expr *Replacement)
- : Expr(SubstNonTypeTemplateParmExprClass, Ty, ValueKind, OK_Ordinary,
- Replacement->isTypeDependent(), Replacement->isValueDependent(),
- Replacement->isInstantiationDependent(),
- Replacement->containsUnexpandedParameterPack()),
+ : Expr(SubstNonTypeTemplateParmExprClass, Ty, ValueKind, OK_Ordinary),
Param(Param), Replacement(Replacement) {
SubstNonTypeTemplateParmExprBits.NameLoc = Loc;
+ setDependence(computeDependence(this));
}
SourceLocation getNameLoc() const {
@@ -4543,13 +4530,12 @@ public:
CXXFoldExpr(QualType T, SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Opcode, SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc, Optional<unsigned> NumExpansions)
- : Expr(CXXFoldExprClass, T, VK_RValue, OK_Ordinary,
- /*Dependent*/ true, true, true,
- /*ContainsUnexpandedParameterPack*/ false),
- LParenLoc(LParenLoc), EllipsisLoc(EllipsisLoc), RParenLoc(RParenLoc),
+ : Expr(CXXFoldExprClass, T, VK_RValue, OK_Ordinary), LParenLoc(LParenLoc),
+ EllipsisLoc(EllipsisLoc), RParenLoc(RParenLoc),
NumExpansions(NumExpansions ? *NumExpansions + 1 : 0), Opcode(Opcode) {
SubExprs[0] = LHS;
SubExprs[1] = RHS;
+ setDependence(computeDependence(this));
}
CXXFoldExpr(EmptyShell Empty) : Expr(CXXFoldExprClass, Empty) {}
@@ -4624,27 +4610,25 @@ public:
Expr *Ready, Expr *Suspend, Expr *Resume,
OpaqueValueExpr *OpaqueValue)
: Expr(SC, Resume->getType(), Resume->getValueKind(),
- Resume->getObjectKind(), Resume->isTypeDependent(),
- Resume->isValueDependent(), Common->isInstantiationDependent(),
- Common->containsUnexpandedParameterPack()),
+ Resume->getObjectKind()),
KeywordLoc(KeywordLoc), OpaqueValue(OpaqueValue) {
SubExprs[SubExpr::Common] = Common;
SubExprs[SubExpr::Ready] = Ready;
SubExprs[SubExpr::Suspend] = Suspend;
SubExprs[SubExpr::Resume] = Resume;
+ setDependence(computeDependence(this));
}
CoroutineSuspendExpr(StmtClass SC, SourceLocation KeywordLoc, QualType Ty,
Expr *Common)
- : Expr(SC, Ty, VK_RValue, OK_Ordinary, true, true, true,
- Common->containsUnexpandedParameterPack()),
- KeywordLoc(KeywordLoc) {
+ : Expr(SC, Ty, VK_RValue, OK_Ordinary), KeywordLoc(KeywordLoc) {
assert(Common->isTypeDependent() && Ty->isDependentType() &&
"wrong constructor for non-dependent co_await/co_yield expression");
SubExprs[SubExpr::Common] = Common;
SubExprs[SubExpr::Ready] = nullptr;
SubExprs[SubExpr::Suspend] = nullptr;
SubExprs[SubExpr::Resume] = nullptr;
+ setDependence(computeDependence(this));
}
CoroutineSuspendExpr(StmtClass SC, EmptyShell Empty) : Expr(SC, Empty) {
@@ -4741,10 +4725,7 @@ class DependentCoawaitExpr : public Expr {
public:
DependentCoawaitExpr(SourceLocation KeywordLoc, QualType Ty, Expr *Op,
UnresolvedLookupExpr *OpCoawait)
- : Expr(DependentCoawaitExprClass, Ty, VK_RValue, OK_Ordinary,
- /*TypeDependent*/ true, /*ValueDependent*/ true,
- /*InstantiationDependent*/ true,
- Op->containsUnexpandedParameterPack()),
+ : Expr(DependentCoawaitExprClass, Ty, VK_RValue, OK_Ordinary),
KeywordLoc(KeywordLoc) {
// NOTE: A co_await expression is dependent on the coroutines promise
// type and may be dependent even when the `Op` expression is not.
@@ -4752,6 +4733,7 @@ public:
"wrong constructor for non-dependent co_await/co_yield expression");
SubExprs[0] = Op;
SubExprs[1] = OpCoawait;
+ setDependence(computeDependence(this));
}
DependentCoawaitExpr(EmptyShell Empty)
@@ -4826,6 +4808,8 @@ public:
: ExplicitCastExpr(BuiltinBitCastExprClass, T, VK, CK, SrcExpr, 0,
DstType),
KWLoc(KWLoc), RParenLoc(RParenLoc) {}
+ BuiltinBitCastExpr(EmptyShell Empty)
+ : ExplicitCastExpr(BuiltinBitCastExprClass, Empty, 0) {}
SourceLocation getBeginLoc() const LLVM_READONLY { return KWLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
diff --git a/contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h b/contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h
index 271d487e2fc9..2a88ed5175d2 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h
@@ -149,6 +149,7 @@ public:
enum RequirementKind { RK_Type, RK_Simple, RK_Compound, RK_Nested };
private:
const RequirementKind Kind;
+ // FIXME: use RequirementDependence to model dependence?
bool Dependent : 1;
bool ContainsUnexpandedParameterPack : 1;
bool Satisfied : 1;
@@ -550,4 +551,4 @@ public:
} // namespace clang
-#endif // LLVM_CLANG_AST_EXPRCONCEPTS_H \ No newline at end of file
+#endif // LLVM_CLANG_AST_EXPRCONCEPTS_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/ExprObjC.h b/contrib/llvm-project/clang/include/clang/AST/ExprObjC.h
index d76b3a26b1f9..4b39d9ab96a6 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ExprObjC.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ExprObjC.h
@@ -13,8 +13,10 @@
#ifndef LLVM_CLANG_AST_EXPROBJC_H
#define LLVM_CLANG_AST_EXPROBJC_H
+#include "clang/AST/ComputeDependence.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/Expr.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/SelectorLocationsKind.h"
@@ -53,9 +55,10 @@ class ObjCStringLiteral : public Expr {
public:
ObjCStringLiteral(StringLiteral *SL, QualType T, SourceLocation L)
- : Expr(ObjCStringLiteralClass, T, VK_RValue, OK_Ordinary, false, false,
- false, false),
- String(SL), AtLoc(L) {}
+ : Expr(ObjCStringLiteralClass, T, VK_RValue, OK_Ordinary), String(SL),
+ AtLoc(L) {
+ setDependence(ExprDependence::None);
+ }
explicit ObjCStringLiteral(EmptyShell Empty)
: Expr(ObjCStringLiteralClass, Empty) {}
@@ -88,9 +91,10 @@ class ObjCBoolLiteralExpr : public Expr {
public:
ObjCBoolLiteralExpr(bool val, QualType Ty, SourceLocation l)
- : Expr(ObjCBoolLiteralExprClass, Ty, VK_RValue, OK_Ordinary, false, false,
- false, false),
- Value(val), Loc(l) {}
+ : Expr(ObjCBoolLiteralExprClass, Ty, VK_RValue, OK_Ordinary), Value(val),
+ Loc(l) {
+ setDependence(ExprDependence::None);
+ }
explicit ObjCBoolLiteralExpr(EmptyShell Empty)
: Expr(ObjCBoolLiteralExprClass, Empty) {}
@@ -129,13 +133,11 @@ class ObjCBoxedExpr : public Expr {
public:
friend class ASTStmtReader;
- ObjCBoxedExpr(Expr *E, QualType T, ObjCMethodDecl *method,
- SourceRange R)
- : Expr(ObjCBoxedExprClass, T, VK_RValue, OK_Ordinary,
- E->isTypeDependent(), E->isValueDependent(),
- E->isInstantiationDependent(),
- E->containsUnexpandedParameterPack()),
- SubExpr(E), BoxingMethod(method), Range(R) {}
+ ObjCBoxedExpr(Expr *E, QualType T, ObjCMethodDecl *method, SourceRange R)
+ : Expr(ObjCBoxedExprClass, T, VK_RValue, OK_Ordinary), SubExpr(E),
+ BoxingMethod(method), Range(R) {
+ setDependence(computeDependence(this));
+ }
explicit ObjCBoxedExpr(EmptyShell Empty)
: Expr(ObjCBoxedExprClass, Empty) {}
@@ -409,14 +411,12 @@ class ObjCEncodeExpr : public Expr {
SourceLocation AtLoc, RParenLoc;
public:
- ObjCEncodeExpr(QualType T, TypeSourceInfo *EncodedType,
- SourceLocation at, SourceLocation rp)
- : Expr(ObjCEncodeExprClass, T, VK_LValue, OK_Ordinary,
- EncodedType->getType()->isDependentType(),
- EncodedType->getType()->isDependentType(),
- EncodedType->getType()->isInstantiationDependentType(),
- EncodedType->getType()->containsUnexpandedParameterPack()),
- EncodedType(EncodedType), AtLoc(at), RParenLoc(rp) {}
+ ObjCEncodeExpr(QualType T, TypeSourceInfo *EncodedType, SourceLocation at,
+ SourceLocation rp)
+ : Expr(ObjCEncodeExprClass, T, VK_LValue, OK_Ordinary),
+ EncodedType(EncodedType), AtLoc(at), RParenLoc(rp) {
+ setDependence(computeDependence(this));
+ }
explicit ObjCEncodeExpr(EmptyShell Empty) : Expr(ObjCEncodeExprClass, Empty){}
@@ -456,11 +456,12 @@ class ObjCSelectorExpr : public Expr {
SourceLocation AtLoc, RParenLoc;
public:
- ObjCSelectorExpr(QualType T, Selector selInfo,
- SourceLocation at, SourceLocation rp)
- : Expr(ObjCSelectorExprClass, T, VK_RValue, OK_Ordinary, false, false,
- false, false),
- SelName(selInfo), AtLoc(at), RParenLoc(rp) {}
+ ObjCSelectorExpr(QualType T, Selector selInfo, SourceLocation at,
+ SourceLocation rp)
+ : Expr(ObjCSelectorExprClass, T, VK_RValue, OK_Ordinary),
+ SelName(selInfo), AtLoc(at), RParenLoc(rp) {
+ setDependence(ExprDependence::None);
+ }
explicit ObjCSelectorExpr(EmptyShell Empty)
: Expr(ObjCSelectorExprClass, Empty) {}
@@ -508,11 +509,12 @@ public:
friend class ASTStmtReader;
friend class ASTStmtWriter;
- ObjCProtocolExpr(QualType T, ObjCProtocolDecl *protocol,
- SourceLocation at, SourceLocation protoLoc, SourceLocation rp)
- : Expr(ObjCProtocolExprClass, T, VK_RValue, OK_Ordinary, false, false,
- false, false),
- TheProtocol(protocol), AtLoc(at), ProtoLoc(protoLoc), RParenLoc(rp) {}
+ ObjCProtocolExpr(QualType T, ObjCProtocolDecl *protocol, SourceLocation at,
+ SourceLocation protoLoc, SourceLocation rp)
+ : Expr(ObjCProtocolExprClass, T, VK_RValue, OK_Ordinary),
+ TheProtocol(protocol), AtLoc(at), ProtoLoc(protoLoc), RParenLoc(rp) {
+ setDependence(ExprDependence::None);
+ }
explicit ObjCProtocolExpr(EmptyShell Empty)
: Expr(ObjCProtocolExprClass, Empty) {}
@@ -558,17 +560,15 @@ class ObjCIvarRefExpr : public Expr {
bool IsFreeIvar : 1;
public:
- ObjCIvarRefExpr(ObjCIvarDecl *d, QualType t,
- SourceLocation l, SourceLocation oploc,
- Expr *base,
- bool arrow = false, bool freeIvar = false)
+ ObjCIvarRefExpr(ObjCIvarDecl *d, QualType t, SourceLocation l,
+ SourceLocation oploc, Expr *base, bool arrow = false,
+ bool freeIvar = false)
: Expr(ObjCIvarRefExprClass, t, VK_LValue,
- d->isBitField() ? OK_BitField : OK_Ordinary,
- /*TypeDependent=*/false, base->isValueDependent(),
- base->isInstantiationDependent(),
- base->containsUnexpandedParameterPack()),
+ d->isBitField() ? OK_BitField : OK_Ordinary),
D(d), Base(base), Loc(l), OpLoc(oploc), IsArrow(arrow),
- IsFreeIvar(freeIvar) {}
+ IsFreeIvar(freeIvar) {
+ setDependence(computeDependence(this));
+ }
explicit ObjCIvarRefExpr(EmptyShell Empty)
: Expr(ObjCIvarRefExprClass, Empty) {}
@@ -645,57 +645,53 @@ private:
llvm::PointerUnion<Stmt *, const Type *, ObjCInterfaceDecl *> Receiver;
public:
- ObjCPropertyRefExpr(ObjCPropertyDecl *PD, QualType t,
- ExprValueKind VK, ExprObjectKind OK,
- SourceLocation l, Expr *base)
- : Expr(ObjCPropertyRefExprClass, t, VK, OK,
- /*TypeDependent=*/false, base->isValueDependent(),
- base->isInstantiationDependent(),
- base->containsUnexpandedParameterPack()),
- PropertyOrGetter(PD, false), IdLoc(l), Receiver(base) {
+ ObjCPropertyRefExpr(ObjCPropertyDecl *PD, QualType t, ExprValueKind VK,
+ ExprObjectKind OK, SourceLocation l, Expr *base)
+ : Expr(ObjCPropertyRefExprClass, t, VK, OK), PropertyOrGetter(PD, false),
+ IdLoc(l), Receiver(base) {
assert(t->isSpecificPlaceholderType(BuiltinType::PseudoObject));
+ setDependence(computeDependence(this));
}
- ObjCPropertyRefExpr(ObjCPropertyDecl *PD, QualType t,
- ExprValueKind VK, ExprObjectKind OK,
- SourceLocation l, SourceLocation sl, QualType st)
- : Expr(ObjCPropertyRefExprClass, t, VK, OK,
- /*TypeDependent=*/false, false, st->isInstantiationDependentType(),
- st->containsUnexpandedParameterPack()),
- PropertyOrGetter(PD, false), IdLoc(l), ReceiverLoc(sl),
- Receiver(st.getTypePtr()) {
+ ObjCPropertyRefExpr(ObjCPropertyDecl *PD, QualType t, ExprValueKind VK,
+ ExprObjectKind OK, SourceLocation l, SourceLocation sl,
+ QualType st)
+ : Expr(ObjCPropertyRefExprClass, t, VK, OK), PropertyOrGetter(PD, false),
+ IdLoc(l), ReceiverLoc(sl), Receiver(st.getTypePtr()) {
assert(t->isSpecificPlaceholderType(BuiltinType::PseudoObject));
+ setDependence(computeDependence(this));
}
ObjCPropertyRefExpr(ObjCMethodDecl *Getter, ObjCMethodDecl *Setter,
QualType T, ExprValueKind VK, ExprObjectKind OK,
SourceLocation IdLoc, Expr *Base)
- : Expr(ObjCPropertyRefExprClass, T, VK, OK, false,
- Base->isValueDependent(), Base->isInstantiationDependent(),
- Base->containsUnexpandedParameterPack()),
+ : Expr(ObjCPropertyRefExprClass, T, VK, OK),
PropertyOrGetter(Getter, true), SetterAndMethodRefFlags(Setter, 0),
IdLoc(IdLoc), Receiver(Base) {
assert(T->isSpecificPlaceholderType(BuiltinType::PseudoObject));
+ setDependence(computeDependence(this));
}
ObjCPropertyRefExpr(ObjCMethodDecl *Getter, ObjCMethodDecl *Setter,
QualType T, ExprValueKind VK, ExprObjectKind OK,
- SourceLocation IdLoc,
- SourceLocation SuperLoc, QualType SuperTy)
- : Expr(ObjCPropertyRefExprClass, T, VK, OK, false, false, false, false),
+ SourceLocation IdLoc, SourceLocation SuperLoc,
+ QualType SuperTy)
+ : Expr(ObjCPropertyRefExprClass, T, VK, OK),
PropertyOrGetter(Getter, true), SetterAndMethodRefFlags(Setter, 0),
IdLoc(IdLoc), ReceiverLoc(SuperLoc), Receiver(SuperTy.getTypePtr()) {
assert(T->isSpecificPlaceholderType(BuiltinType::PseudoObject));
+ setDependence(computeDependence(this));
}
ObjCPropertyRefExpr(ObjCMethodDecl *Getter, ObjCMethodDecl *Setter,
QualType T, ExprValueKind VK, ExprObjectKind OK,
- SourceLocation IdLoc,
- SourceLocation ReceiverLoc, ObjCInterfaceDecl *Receiver)
- : Expr(ObjCPropertyRefExprClass, T, VK, OK, false, false, false, false),
+ SourceLocation IdLoc, SourceLocation ReceiverLoc,
+ ObjCInterfaceDecl *Receiver)
+ : Expr(ObjCPropertyRefExprClass, T, VK, OK),
PropertyOrGetter(Getter, true), SetterAndMethodRefFlags(Setter, 0),
IdLoc(IdLoc), ReceiverLoc(ReceiverLoc), Receiver(Receiver) {
assert(T->isSpecificPlaceholderType(BuiltinType::PseudoObject));
+ setDependence(computeDependence(this));
}
explicit ObjCPropertyRefExpr(EmptyShell Empty)
@@ -859,20 +855,14 @@ class ObjCSubscriptRefExpr : public Expr {
ObjCMethodDecl *SetAtIndexMethodDecl;
public:
- ObjCSubscriptRefExpr(Expr *base, Expr *key, QualType T,
- ExprValueKind VK, ExprObjectKind OK,
- ObjCMethodDecl *getMethod,
+ ObjCSubscriptRefExpr(Expr *base, Expr *key, QualType T, ExprValueKind VK,
+ ExprObjectKind OK, ObjCMethodDecl *getMethod,
ObjCMethodDecl *setMethod, SourceLocation RB)
- : Expr(ObjCSubscriptRefExprClass, T, VK, OK,
- base->isTypeDependent() || key->isTypeDependent(),
- base->isValueDependent() || key->isValueDependent(),
- (base->isInstantiationDependent() ||
- key->isInstantiationDependent()),
- (base->containsUnexpandedParameterPack() ||
- key->containsUnexpandedParameterPack())),
- RBracket(RB), GetAtIndexMethodDecl(getMethod),
- SetAtIndexMethodDecl(setMethod) {
- SubExprs[BASE] = base; SubExprs[KEY] = key;
+ : Expr(ObjCSubscriptRefExprClass, T, VK, OK), RBracket(RB),
+ GetAtIndexMethodDecl(getMethod), SetAtIndexMethodDecl(setMethod) {
+ SubExprs[BASE] = base;
+ SubExprs[KEY] = key;
+ setDependence(computeDependence(this));
}
explicit ObjCSubscriptRefExpr(EmptyShell Empty)
@@ -1505,11 +1495,10 @@ class ObjCIsaExpr : public Expr {
public:
ObjCIsaExpr(Expr *base, bool isarrow, SourceLocation l, SourceLocation oploc,
QualType ty)
- : Expr(ObjCIsaExprClass, ty, VK_LValue, OK_Ordinary,
- /*TypeDependent=*/false, base->isValueDependent(),
- base->isInstantiationDependent(),
- /*ContainsUnexpandedParameterPack=*/false),
- Base(base), IsaMemberLoc(l), OpLoc(oploc), IsArrow(isarrow) {}
+ : Expr(ObjCIsaExprClass, ty, VK_LValue, OK_Ordinary), Base(base),
+ IsaMemberLoc(l), OpLoc(oploc), IsArrow(isarrow) {
+ setDependence(computeDependence(this));
+ }
/// Build an empty expression.
explicit ObjCIsaExpr(EmptyShell Empty) : Expr(ObjCIsaExprClass, Empty) {}
@@ -1591,12 +1580,10 @@ class ObjCIndirectCopyRestoreExpr : public Expr {
public:
ObjCIndirectCopyRestoreExpr(Expr *operand, QualType type, bool shouldCopy)
- : Expr(ObjCIndirectCopyRestoreExprClass, type, VK_LValue, OK_Ordinary,
- operand->isTypeDependent(), operand->isValueDependent(),
- operand->isInstantiationDependent(),
- operand->containsUnexpandedParameterPack()),
+ : Expr(ObjCIndirectCopyRestoreExprClass, type, VK_LValue, OK_Ordinary),
Operand(operand) {
setShouldCopy(shouldCopy);
+ setDependence(computeDependence(this));
}
Expr *getSubExpr() { return cast<Expr>(Operand); }
@@ -1705,9 +1692,10 @@ class ObjCAvailabilityCheckExpr : public Expr {
public:
ObjCAvailabilityCheckExpr(VersionTuple VersionToCheck, SourceLocation AtLoc,
SourceLocation RParen, QualType Ty)
- : Expr(ObjCAvailabilityCheckExprClass, Ty, VK_RValue, OK_Ordinary, false,
- false, false, false),
- VersionToCheck(VersionToCheck), AtLoc(AtLoc), RParen(RParen) {}
+ : Expr(ObjCAvailabilityCheckExprClass, Ty, VK_RValue, OK_Ordinary),
+ VersionToCheck(VersionToCheck), AtLoc(AtLoc), RParen(RParen) {
+ setDependence(ExprDependence::None);
+ }
explicit ObjCAvailabilityCheckExpr(EmptyShell Shell)
: Expr(ObjCAvailabilityCheckExprClass, Shell) {}
diff --git a/contrib/llvm-project/clang/include/clang/AST/ExprOpenMP.h b/contrib/llvm-project/clang/include/clang/AST/ExprOpenMP.h
index 5607d2d1dc58..be5dda992334 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ExprOpenMP.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ExprOpenMP.h
@@ -13,62 +13,66 @@
#ifndef LLVM_CLANG_AST_EXPROPENMP_H
#define LLVM_CLANG_AST_EXPROPENMP_H
+#include "clang/AST/ComputeDependence.h"
#include "clang/AST/Expr.h"
namespace clang {
-/// OpenMP 4.0 [2.4, Array Sections].
+/// OpenMP 5.0 [2.1.5, Array Sections].
/// To specify an array section in an OpenMP construct, array subscript
/// expressions are extended with the following syntax:
/// \code
+/// [ lower-bound : length : stride ]
+/// [ lower-bound : length : ]
/// [ lower-bound : length ]
+/// [ lower-bound : : stride ]
+/// [ lower-bound : : ]
/// [ lower-bound : ]
+/// [ : length : stride ]
+/// [ : length : ]
/// [ : length ]
+/// [ : : stride ]
+/// [ : : ]
/// [ : ]
/// \endcode
/// The array section must be a subset of the original array.
/// Array sections are allowed on multidimensional arrays. Base language array
/// subscript expressions can be used to specify length-one dimensions of
/// multidimensional array sections.
-/// The lower-bound and length are integral type expressions. When evaluated
+/// Each of the lower-bound, length, and stride expressions if specified must be
+/// an integral type expressions of the base language. When evaluated
/// they represent a set of integer values as follows:
/// \code
-/// { lower-bound, lower-bound + 1, lower-bound + 2,... , lower-bound + length -
-/// 1 }
+/// { lower-bound, lower-bound + stride, lower-bound + 2 * stride,... ,
+/// lower-bound + ((length - 1) * stride) }
/// \endcode
/// The lower-bound and length must evaluate to non-negative integers.
+/// The stride must evaluate to a positive integer.
/// When the size of the array dimension is not known, the length must be
/// specified explicitly.
-/// When the length is absent, it defaults to the size of the array dimension
-/// minus the lower-bound.
-/// When the lower-bound is absent it defaults to 0.
+/// When the stride is absent it defaults to 1.
+/// When the length is absent it defaults to ⌈(size − lower-bound)/stride⌉,
+/// where size is the size of the array dimension. When the lower-bound is
+/// absent it defaults to 0.
class OMPArraySectionExpr : public Expr {
- enum { BASE, LOWER_BOUND, LENGTH, END_EXPR };
+ enum { BASE, LOWER_BOUND, LENGTH, STRIDE, END_EXPR };
Stmt *SubExprs[END_EXPR];
- SourceLocation ColonLoc;
+ SourceLocation ColonLocFirst;
+ SourceLocation ColonLocSecond;
SourceLocation RBracketLoc;
public:
- OMPArraySectionExpr(Expr *Base, Expr *LowerBound, Expr *Length, QualType Type,
- ExprValueKind VK, ExprObjectKind OK,
- SourceLocation ColonLoc, SourceLocation RBracketLoc)
- : Expr(
- OMPArraySectionExprClass, Type, VK, OK,
- Base->isTypeDependent() ||
- (LowerBound && LowerBound->isTypeDependent()) ||
- (Length && Length->isTypeDependent()),
- Base->isValueDependent() ||
- (LowerBound && LowerBound->isValueDependent()) ||
- (Length && Length->isValueDependent()),
- Base->isInstantiationDependent() ||
- (LowerBound && LowerBound->isInstantiationDependent()) ||
- (Length && Length->isInstantiationDependent()),
- Base->containsUnexpandedParameterPack() ||
- (LowerBound && LowerBound->containsUnexpandedParameterPack()) ||
- (Length && Length->containsUnexpandedParameterPack())),
- ColonLoc(ColonLoc), RBracketLoc(RBracketLoc) {
+ OMPArraySectionExpr(Expr *Base, Expr *LowerBound, Expr *Length, Expr *Stride,
+ QualType Type, ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation ColonLocFirst,
+ SourceLocation ColonLocSecond, SourceLocation RBracketLoc)
+ : Expr(OMPArraySectionExprClass, Type, VK, OK),
+ ColonLocFirst(ColonLocFirst), ColonLocSecond(ColonLocSecond),
+ RBracketLoc(RBracketLoc) {
SubExprs[BASE] = Base;
SubExprs[LOWER_BOUND] = LowerBound;
SubExprs[LENGTH] = Length;
+ SubExprs[STRIDE] = Stride;
+ setDependence(computeDependence(this));
}
/// Create an empty array section expression.
@@ -100,13 +104,22 @@ public:
/// Set length of the array section.
void setLength(Expr *E) { SubExprs[LENGTH] = E; }
+ /// Get stride of array section.
+ Expr *getStride() { return cast_or_null<Expr>(SubExprs[STRIDE]); }
+ const Expr *getStride() const { return cast_or_null<Expr>(SubExprs[STRIDE]); }
+ /// Set length of the array section.
+ void setStride(Expr *E) { SubExprs[STRIDE] = E; }
+
SourceLocation getBeginLoc() const LLVM_READONLY {
return getBase()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY { return RBracketLoc; }
- SourceLocation getColonLoc() const { return ColonLoc; }
- void setColonLoc(SourceLocation L) { ColonLoc = L; }
+ SourceLocation getColonLocFirst() const { return ColonLocFirst; }
+ void setColonLocFirst(SourceLocation L) { ColonLocFirst = L; }
+
+ SourceLocation getColonLocSecond() const { return ColonLocSecond; }
+ void setColonLocSecond(SourceLocation L) { ColonLocSecond = L; }
SourceLocation getRBracketLoc() const { return RBracketLoc; }
void setRBracketLoc(SourceLocation L) { RBracketLoc = L; }
@@ -127,6 +140,286 @@ public:
return const_child_range(&SubExprs[BASE], &SubExprs[END_EXPR]);
}
};
+
+/// An explicit cast in C or a C-style cast in C++, which uses the syntax
+/// ([s1][s2]...[sn])expr. For example: @c ([3][3])f.
+class OMPArrayShapingExpr final
+ : public Expr,
+ private llvm::TrailingObjects<OMPArrayShapingExpr, Expr *, SourceRange> {
+ friend TrailingObjects;
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+ /// Base node.
+ SourceLocation LPLoc; /// The location of the left paren
+ SourceLocation RPLoc; /// The location of the right paren
+ unsigned NumDims = 0; /// Number of dimensions in the shaping expression.
+
+ /// Construct full expression.
+ OMPArrayShapingExpr(QualType ExprTy, Expr *Op, SourceLocation L,
+ SourceLocation R, ArrayRef<Expr *> Dims);
+
+ /// Construct an empty expression.
+ explicit OMPArrayShapingExpr(EmptyShell Shell, unsigned NumDims)
+ : Expr(OMPArrayShapingExprClass, Shell), NumDims(NumDims) {}
+
+ /// Sets the dimensions for the array shaping.
+ void setDimensions(ArrayRef<Expr *> Dims);
+
+ /// Sets the base expression for array shaping operation.
+ void setBase(Expr *Op) { getTrailingObjects<Expr *>()[NumDims] = Op; }
+
+ /// Sets source ranges for the brackets in the array shaping operation.
+ void setBracketsRanges(ArrayRef<SourceRange> BR);
+
+ unsigned numTrailingObjects(OverloadToken<Expr *>) const {
+ // Add an extra one for the base expression.
+ return NumDims + 1;
+ }
+
+ unsigned numTrailingObjects(OverloadToken<SourceRange>) const {
+ return NumDims;
+ }
+
+public:
+ static OMPArrayShapingExpr *Create(const ASTContext &Context, QualType T,
+ Expr *Op, SourceLocation L,
+ SourceLocation R, ArrayRef<Expr *> Dims,
+ ArrayRef<SourceRange> BracketRanges);
+
+ static OMPArrayShapingExpr *CreateEmpty(const ASTContext &Context,
+ unsigned NumDims);
+
+ SourceLocation getLParenLoc() const { return LPLoc; }
+ void setLParenLoc(SourceLocation L) { LPLoc = L; }
+
+ SourceLocation getRParenLoc() const { return RPLoc; }
+ void setRParenLoc(SourceLocation L) { RPLoc = L; }
+
+ SourceLocation getBeginLoc() const LLVM_READONLY { return LPLoc; }
+ SourceLocation getEndLoc() const LLVM_READONLY {
+ return getBase()->getEndLoc();
+ }
+
+ /// Fetches the dimensions for array shaping expression.
+ ArrayRef<Expr *> getDimensions() const {
+ return llvm::makeArrayRef(getTrailingObjects<Expr *>(), NumDims);
+ }
+
+ /// Fetches source ranges for the brackets os the array shaping expression.
+ ArrayRef<SourceRange> getBracketsRanges() const {
+ return llvm::makeArrayRef(getTrailingObjects<SourceRange>(), NumDims);
+ }
+
+ /// Fetches base expression of array shaping expression.
+ Expr *getBase() { return getTrailingObjects<Expr *>()[NumDims]; }
+ const Expr *getBase() const { return getTrailingObjects<Expr *>()[NumDims]; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPArrayShapingExprClass;
+ }
+
+ // Iterators
+ child_range children() {
+ Stmt **Begin = reinterpret_cast<Stmt **>(getTrailingObjects<Expr *>());
+ return child_range(Begin, Begin + NumDims + 1);
+ }
+ const_child_range children() const {
+ Stmt *const *Begin =
+ reinterpret_cast<Stmt *const *>(getTrailingObjects<Expr *>());
+ return const_child_range(Begin, Begin + NumDims + 1);
+ }
+};
+
+/// Helper expressions and declaration for OMPIteratorExpr class for each
+/// iteration space.
+struct OMPIteratorHelperData {
+ /// Internal normalized counter.
+ VarDecl *CounterVD = nullptr;
+ /// Normalized upper bound. Normalized loop iterates from 0 to Upper with
+ /// step 1.
+ Expr *Upper = nullptr;
+ /// Update expression for the originally specified iteration variable,
+ /// calculated as VD = Begin + CounterVD * Step;
+ Expr *Update = nullptr;
+ /// Updater for the internal counter: ++CounterVD;
+ Expr *CounterUpdate = nullptr;
+};
+
+/// OpenMP 5.0 [2.1.6 Iterators]
+/// Iterators are identifiers that expand to multiple values in the clause on
+/// which they appear.
+/// The syntax of the iterator modifier is as follows:
+/// \code
+/// iterator(iterators-definition)
+/// \endcode
+/// where iterators-definition is one of the following:
+/// \code
+/// iterator-specifier [, iterators-definition ]
+/// \endcode
+/// where iterator-specifier is one of the following:
+/// \code
+/// [ iterator-type ] identifier = range-specification
+/// \endcode
+/// where identifier is a base language identifier.
+/// iterator-type is a type name.
+/// range-specification is of the form begin:end[:step], where begin and end are
+/// expressions for which their types can be converted to iterator-type and step
+/// is an integral expression.
+/// In an iterator-specifier, if the iterator-type is not specified then the
+/// type of that iterator is of int type.
+/// The iterator-type must be an integral or pointer type.
+/// The iterator-type must not be const qualified.
+class OMPIteratorExpr final
+ : public Expr,
+ private llvm::TrailingObjects<OMPIteratorExpr, Decl *, Expr *,
+ SourceLocation, OMPIteratorHelperData> {
+public:
+ /// Iterator range representation begin:end[:step].
+ struct IteratorRange {
+ Expr *Begin = nullptr;
+ Expr *End = nullptr;
+ Expr *Step = nullptr;
+ };
+ /// Iterator definition representation.
+ struct IteratorDefinition {
+ Decl *IteratorDecl = nullptr;
+ IteratorRange Range;
+ SourceLocation AssignmentLoc;
+ SourceLocation ColonLoc, SecondColonLoc;
+ };
+
+private:
+ friend TrailingObjects;
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+
+ /// Offset in the list of expressions for subelements of the ranges.
+ enum class RangeExprOffset {
+ Begin = 0,
+ End = 1,
+ Step = 2,
+ Total = 3,
+ };
+ /// Offset in the list of locations for subelements of colon symbols
+ /// locations.
+ enum class RangeLocOffset {
+ AssignLoc = 0,
+ FirstColonLoc = 1,
+ SecondColonLoc = 2,
+ Total = 3,
+ };
+ /// Location of 'iterator' keyword.
+ SourceLocation IteratorKwLoc;
+ /// Location of '('.
+ SourceLocation LPLoc;
+ /// Location of ')'.
+ SourceLocation RPLoc;
+ /// Number of iterator definitions.
+ unsigned NumIterators = 0;
+
+ OMPIteratorExpr(QualType ExprTy, SourceLocation IteratorKwLoc,
+ SourceLocation L, SourceLocation R,
+ ArrayRef<IteratorDefinition> Data,
+ ArrayRef<OMPIteratorHelperData> Helpers);
+
+ /// Construct an empty expression.
+ explicit OMPIteratorExpr(EmptyShell Shell, unsigned NumIterators)
+ : Expr(OMPIteratorExprClass, Shell), NumIterators(NumIterators) {}
+
+ /// Sets basic declaration for the specified iterator definition.
+ void setIteratorDeclaration(unsigned I, Decl *D);
+
+ /// Sets the location of the assignment symbol for the specified iterator
+ /// definition.
+ void setAssignmentLoc(unsigned I, SourceLocation Loc);
+
+ /// Sets begin, end and optional step expressions for specified iterator
+ /// definition.
+ void setIteratorRange(unsigned I, Expr *Begin, SourceLocation ColonLoc,
+ Expr *End, SourceLocation SecondColonLoc, Expr *Step);
+
+ /// Sets helpers for the specified iteration space.
+ void setHelper(unsigned I, const OMPIteratorHelperData &D);
+
+ unsigned numTrailingObjects(OverloadToken<Decl *>) const {
+ return NumIterators;
+ }
+
+ unsigned numTrailingObjects(OverloadToken<Expr *>) const {
+ return NumIterators * static_cast<int>(RangeExprOffset::Total);
+ }
+
+ unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
+ return NumIterators * static_cast<int>(RangeLocOffset::Total);
+ }
+
+public:
+ static OMPIteratorExpr *Create(const ASTContext &Context, QualType T,
+ SourceLocation IteratorKwLoc, SourceLocation L,
+ SourceLocation R,
+ ArrayRef<IteratorDefinition> Data,
+ ArrayRef<OMPIteratorHelperData> Helpers);
+
+ static OMPIteratorExpr *CreateEmpty(const ASTContext &Context,
+ unsigned NumIterators);
+
+ SourceLocation getLParenLoc() const { return LPLoc; }
+ void setLParenLoc(SourceLocation L) { LPLoc = L; }
+
+ SourceLocation getRParenLoc() const { return RPLoc; }
+ void setRParenLoc(SourceLocation L) { RPLoc = L; }
+
+ SourceLocation getIteratorKwLoc() const { return IteratorKwLoc; }
+ void setIteratorKwLoc(SourceLocation L) { IteratorKwLoc = L; }
+ SourceLocation getBeginLoc() const LLVM_READONLY { return IteratorKwLoc; }
+ SourceLocation getEndLoc() const LLVM_READONLY { return RPLoc; }
+
+ /// Gets the iterator declaration for the given iterator.
+ Decl *getIteratorDecl(unsigned I);
+ const Decl *getIteratorDecl(unsigned I) const {
+ return const_cast<OMPIteratorExpr *>(this)->getIteratorDecl(I);
+ }
+
+ /// Gets the iterator range for the given iterator.
+ IteratorRange getIteratorRange(unsigned I);
+ const IteratorRange getIteratorRange(unsigned I) const {
+ return const_cast<OMPIteratorExpr *>(this)->getIteratorRange(I);
+ }
+
+ /// Gets the location of '=' for the given iterator definition.
+ SourceLocation getAssignLoc(unsigned I) const;
+ /// Gets the location of the first ':' in the range for the given iterator
+ /// definition.
+ SourceLocation getColonLoc(unsigned I) const;
+ /// Gets the location of the second ':' (if any) in the range for the given
+ /// iteratori definition.
+ SourceLocation getSecondColonLoc(unsigned I) const;
+
+ /// Returns number of iterator definitions.
+ unsigned numOfIterators() const { return NumIterators; }
+
+ /// Fetches helper data for the specified iteration space.
+ OMPIteratorHelperData &getHelper(unsigned I);
+ const OMPIteratorHelperData &getHelper(unsigned I) const;
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPIteratorExprClass;
+ }
+
+ // Iterators
+ child_range children() {
+ Stmt **Begin = reinterpret_cast<Stmt **>(getTrailingObjects<Expr *>());
+ return child_range(
+ Begin, Begin + NumIterators * static_cast<int>(RangeExprOffset::Total));
+ }
+ const_child_range children() const {
+ Stmt *const *Begin =
+ reinterpret_cast<Stmt *const *>(getTrailingObjects<Expr *>());
+ return const_child_range(
+ Begin, Begin + NumIterators * static_cast<int>(RangeExprOffset::Total));
+ }
+};
+
} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/AST/ExternalASTSource.h b/contrib/llvm-project/clang/include/clang/AST/ExternalASTSource.h
index 899ac3f66937..def877b91816 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ExternalASTSource.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ExternalASTSource.h
@@ -17,7 +17,6 @@
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclBase.h"
#include "clang/Basic/LLVM.h"
-#include "clang/Basic/Module.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
@@ -39,6 +38,7 @@ namespace clang {
class ASTConsumer;
class ASTContext;
+class ASTSourceDescriptor;
class CXXBaseSpecifier;
class CXXCtorInitializer;
class CXXRecordDecl;
@@ -165,31 +165,6 @@ public:
/// object file.
virtual bool DeclIsFromPCHWithObjectFile(const Decl *D) { return false; }
- /// Abstracts clang modules and precompiled header files and holds
- /// everything needed to generate debug info for an imported module
- /// or PCH.
- class ASTSourceDescriptor {
- StringRef PCHModuleName;
- StringRef Path;
- StringRef ASTFile;
- ASTFileSignature Signature;
- const Module *ClangModule = nullptr;
-
- public:
- ASTSourceDescriptor() = default;
- ASTSourceDescriptor(StringRef Name, StringRef Path, StringRef ASTFile,
- ASTFileSignature Signature)
- : PCHModuleName(std::move(Name)), Path(std::move(Path)),
- ASTFile(std::move(ASTFile)), Signature(Signature) {}
- ASTSourceDescriptor(const Module &M);
-
- std::string getModuleName() const;
- StringRef getPath() const { return Path; }
- StringRef getASTFile() const { return ASTFile; }
- ASTFileSignature getSignature() const { return Signature; }
- const Module *getModuleOrNull() const { return ClangModule; }
- };
-
/// Return a descriptor for the corresponding module, if one exists.
virtual llvm::Optional<ASTSourceDescriptor> getSourceDescriptor(unsigned ID);
@@ -504,9 +479,8 @@ struct PointerLikeTypeTraits<
static void *getAsVoidPointer(Ptr P) { return P.getOpaqueValue(); }
static Ptr getFromVoidPointer(void *P) { return Ptr::getFromOpaqueValue(P); }
- enum {
- NumLowBitsAvailable = PointerLikeTypeTraits<T>::NumLowBitsAvailable - 1
- };
+ static constexpr int NumLowBitsAvailable =
+ PointerLikeTypeTraits<T>::NumLowBitsAvailable - 1;
};
} // namespace llvm
diff --git a/contrib/llvm-project/clang/include/clang/AST/GlobalDecl.h b/contrib/llvm-project/clang/include/clang/AST/GlobalDecl.h
index 145e961a23a3..d8ac498be54f 100644
--- a/contrib/llvm-project/clang/include/clang/AST/GlobalDecl.h
+++ b/contrib/llvm-project/clang/include/clang/AST/GlobalDecl.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_AST_GLOBALDECL_H
#define LLVM_CLANG_AST_GLOBALDECL_H
+#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclOpenMP.h"
@@ -33,17 +34,31 @@ enum class DynamicInitKind : unsigned {
AtExit,
};
+enum class KernelReferenceKind : unsigned {
+ Kernel = 0,
+ Stub = 1,
+};
+
/// GlobalDecl - represents a global declaration. This can either be a
/// CXXConstructorDecl and the constructor type (Base, Complete).
-/// a CXXDestructorDecl and the destructor type (Base, Complete) or
+/// a CXXDestructorDecl and the destructor type (Base, Complete),
+/// a FunctionDecl and the kernel reference type (Kernel, Stub), or
/// a VarDecl, a FunctionDecl or a BlockDecl.
+///
+/// When a new type of GlobalDecl is added, the following places should
+/// be updated to convert a Decl* to a GlobalDecl:
+/// PredefinedExpr::ComputeName() in lib/AST/Expr.cpp.
+/// getParentOfLocalEntity() in lib/AST/ItaniumMangle.cpp
+/// ASTNameGenerator::Implementation::writeFuncOrVarName in lib/AST/Mangle.cpp
+///
class GlobalDecl {
- llvm::PointerIntPair<const Decl *, 2> Value;
+ llvm::PointerIntPair<const Decl *, 3> Value;
unsigned MultiVersionIndex = 0;
void Init(const Decl *D) {
assert(!isa<CXXConstructorDecl>(D) && "Use other ctor with ctor decls!");
assert(!isa<CXXDestructorDecl>(D) && "Use other ctor with dtor decls!");
+ assert(!D->hasAttr<CUDAGlobalAttr>() && "Use other ctor with GPU kernels!");
Value.setPointer(D);
}
@@ -53,8 +68,17 @@ public:
GlobalDecl(const VarDecl *D) { Init(D);}
GlobalDecl(const FunctionDecl *D, unsigned MVIndex = 0)
: MultiVersionIndex(MVIndex) {
- Init(D);
+ if (!D->hasAttr<CUDAGlobalAttr>()) {
+ Init(D);
+ return;
+ }
+ Value.setPointerAndInt(D, unsigned(getDefaultKernelReference(D)));
+ }
+ GlobalDecl(const FunctionDecl *D, KernelReferenceKind Kind)
+ : Value(D, unsigned(Kind)) {
+ assert(D->hasAttr<CUDAGlobalAttr>() && "Decl is not a GPU kernel!");
}
+ GlobalDecl(const NamedDecl *D) { Init(D); }
GlobalDecl(const BlockDecl *D) { Init(D); }
GlobalDecl(const CapturedDecl *D) { Init(D); }
GlobalDecl(const ObjCMethodDecl *D) { Init(D); }
@@ -94,13 +118,22 @@ public:
}
unsigned getMultiVersionIndex() const {
- assert(isa<FunctionDecl>(getDecl()) &&
+ assert(isa<FunctionDecl>(
+ getDecl()) &&
+ !cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() &&
!isa<CXXConstructorDecl>(getDecl()) &&
!isa<CXXDestructorDecl>(getDecl()) &&
"Decl is not a plain FunctionDecl!");
return MultiVersionIndex;
}
+ KernelReferenceKind getKernelReferenceKind() const {
+ assert(isa<FunctionDecl>(getDecl()) &&
+ cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() &&
+ "Decl is not a GPU kernel!");
+ return static_cast<KernelReferenceKind>(Value.getInt());
+ }
+
friend bool operator==(const GlobalDecl &LHS, const GlobalDecl &RHS) {
return LHS.Value == RHS.Value &&
LHS.MultiVersionIndex == RHS.MultiVersionIndex;
@@ -108,12 +141,19 @@ public:
void *getAsOpaquePtr() const { return Value.getOpaqueValue(); }
+ explicit operator bool() const { return getAsOpaquePtr(); }
+
static GlobalDecl getFromOpaquePtr(void *P) {
GlobalDecl GD;
GD.Value.setFromOpaqueValue(P);
return GD;
}
+ static KernelReferenceKind getDefaultKernelReference(const FunctionDecl *D) {
+ return D->getLangOpts().CUDAIsDevice ? KernelReferenceKind::Kernel
+ : KernelReferenceKind::Stub;
+ }
+
GlobalDecl getWithDecl(const Decl *D) {
GlobalDecl Result(*this);
Result.Value.setPointer(D);
@@ -136,6 +176,7 @@ public:
GlobalDecl getWithMultiVersionIndex(unsigned Index) {
assert(isa<FunctionDecl>(getDecl()) &&
+ !cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() &&
!isa<CXXConstructorDecl>(getDecl()) &&
!isa<CXXDestructorDecl>(getDecl()) &&
"Decl is not a plain FunctionDecl!");
@@ -143,6 +184,15 @@ public:
Result.MultiVersionIndex = Index;
return Result;
}
+
+ GlobalDecl getWithKernelReferenceKind(KernelReferenceKind Kind) {
+ assert(isa<FunctionDecl>(getDecl()) &&
+ cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() &&
+ "Decl is not a GPU kernel!");
+ GlobalDecl Result(*this);
+ Result.Value.setInt(unsigned(Kind));
+ return Result;
+ }
};
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/AST/JSONNodeDumper.h b/contrib/llvm-project/clang/include/clang/AST/JSONNodeDumper.h
index 4023e023e9d5..4e7162992418 100644
--- a/contrib/llvm-project/clang/include/clang/AST/JSONNodeDumper.h
+++ b/contrib/llvm-project/clang/include/clang/AST/JSONNodeDumper.h
@@ -23,10 +23,13 @@
#include "clang/AST/CommentVisitor.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Mangle.h"
+#include "clang/AST/Type.h"
#include "llvm/Support/JSON.h"
namespace clang {
+class APValue;
+
class NodeStreamer {
bool FirstChild = true;
bool TopLevel = true;
@@ -64,7 +67,7 @@ public:
// We need to capture an owning-string in the lambda because the lambda
// is invoked in a deferred manner.
- std::string LabelStr = !Label.empty() ? Label : "inner";
+ std::string LabelStr(!Label.empty() ? Label : "inner");
bool WasFirstChild = FirstChild;
auto DumpWithIndent = [=](bool IsLastChild) {
if (WasFirstChild) {
@@ -201,6 +204,7 @@ public:
void Visit(const OMPClause *C);
void Visit(const BlockDecl::Capture &C);
void Visit(const GenericSelectionExpr::ConstAssociation &A);
+ void Visit(const APValue &Value, QualType Ty);
void VisitTypedefType(const TypedefType *TT);
void VisitFunctionType(const FunctionType *T);
diff --git a/contrib/llvm-project/clang/include/clang/AST/LocInfoType.h b/contrib/llvm-project/clang/include/clang/AST/LocInfoType.h
index 1073174bcf91..7e845ad03587 100644
--- a/contrib/llvm-project/clang/include/clang/AST/LocInfoType.h
+++ b/contrib/llvm-project/clang/include/clang/AST/LocInfoType.h
@@ -35,10 +35,7 @@ class LocInfoType : public Type {
TypeSourceInfo *DeclInfo;
LocInfoType(QualType ty, TypeSourceInfo *TInfo)
- : Type((TypeClass)LocInfo, ty, ty->isDependentType(),
- ty->isInstantiationDependentType(), ty->isVariablyModifiedType(),
- ty->containsUnexpandedParameterPack()),
- DeclInfo(TInfo) {
+ : Type((TypeClass)LocInfo, ty, ty->getDependence()), DeclInfo(TInfo) {
assert(getTypeClass() == (TypeClass)LocInfo && "LocInfo didn't fit in TC?");
}
friend class Sema;
diff --git a/contrib/llvm-project/clang/include/clang/AST/Mangle.h b/contrib/llvm-project/clang/include/clang/AST/Mangle.h
index 5db5c5b977da..011d1faab8ea 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Mangle.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Mangle.h
@@ -14,6 +14,7 @@
#define LLVM_CLANG_AST_MANGLE_H
#include "clang/AST/Decl.h"
+#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Type.h"
#include "clang/Basic/ABI.h"
#include "llvm/ADT/DenseMap.h"
@@ -96,8 +97,8 @@ public:
virtual bool shouldMangleStringLiteral(const StringLiteral *SL) = 0;
// FIXME: consider replacing raw_ostream & with something like SmallString &.
- void mangleName(const NamedDecl *D, raw_ostream &);
- virtual void mangleCXXName(const NamedDecl *D, raw_ostream &) = 0;
+ void mangleName(GlobalDecl GD, raw_ostream &);
+ virtual void mangleCXXName(GlobalDecl GD, raw_ostream &) = 0;
virtual void mangleThunk(const CXXMethodDecl *MD,
const ThunkInfo &Thunk,
raw_ostream &) = 0;
@@ -109,11 +110,8 @@ public:
raw_ostream &) = 0;
virtual void mangleCXXRTTI(QualType T, raw_ostream &) = 0;
virtual void mangleCXXRTTIName(QualType T, raw_ostream &) = 0;
- virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
- raw_ostream &) = 0;
- virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
- raw_ostream &) = 0;
virtual void mangleStringLiteral(const StringLiteral *SL, raw_ostream &) = 0;
+ virtual void mangleMSGuidDecl(const MSGuidDecl *GD, raw_ostream&);
void mangleGlobalBlock(const BlockDecl *BD,
const NamedDecl *ID,
@@ -151,9 +149,14 @@ public:
};
class ItaniumMangleContext : public MangleContext {
+ bool IsUniqueNameMangler = false;
public:
explicit ItaniumMangleContext(ASTContext &C, DiagnosticsEngine &D)
: MangleContext(C, D, MK_Itanium) {}
+ explicit ItaniumMangleContext(ASTContext &C, DiagnosticsEngine &D,
+ bool IsUniqueNameMangler)
+ : MangleContext(C, D, MK_Itanium),
+ IsUniqueNameMangler(IsUniqueNameMangler) {}
virtual void mangleCXXVTable(const CXXRecordDecl *RD, raw_ostream &) = 0;
virtual void mangleCXXVTT(const CXXRecordDecl *RD, raw_ostream &) = 0;
@@ -172,12 +175,17 @@ public:
virtual void mangleLambdaSig(const CXXRecordDecl *Lambda, raw_ostream &) = 0;
+ virtual void mangleDynamicStermFinalizer(const VarDecl *D, raw_ostream &) = 0;
+
+ bool isUniqueNameMangler() { return IsUniqueNameMangler; }
+
static bool classof(const MangleContext *C) {
return C->getKind() == MK_Itanium;
}
static ItaniumMangleContext *create(ASTContext &Context,
- DiagnosticsEngine &Diags);
+ DiagnosticsEngine &Diags,
+ bool IsUniqueNameMangler = false);
};
class MicrosoftMangleContext : public MangleContext {
diff --git a/contrib/llvm-project/clang/include/clang/AST/NestedNameSpecifier.h b/contrib/llvm-project/clang/include/clang/AST/NestedNameSpecifier.h
index c6fae6f465ff..540ac3df48fe 100644
--- a/contrib/llvm-project/clang/include/clang/AST/NestedNameSpecifier.h
+++ b/contrib/llvm-project/clang/include/clang/AST/NestedNameSpecifier.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_AST_NESTEDNAMESPECIFIER_H
#define LLVM_CLANG_AST_NESTEDNAMESPECIFIER_H
+#include "clang/AST/DependenceFlags.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/FoldingSet.h"
@@ -199,6 +200,8 @@ public:
return nullptr;
}
+ NestedNameSpecifierDependence getDependence() const;
+
/// Whether this nested name specifier refers to a dependent
/// type or not.
bool isDependent() const;
@@ -211,6 +214,9 @@ public:
/// parameter pack (for C++11 variadic templates).
bool containsUnexpandedParameterPack() const;
+ /// Whether this nested name specifier contains an error.
+ bool containsErrors() const;
+
/// Print this nested name specifier to the given output stream. If
/// `ResolveTemplateArguments` is true, we'll print actual types, e.g.
/// `ns::SomeTemplate<int, MyClass>` instead of
diff --git a/contrib/llvm-project/clang/include/clang/AST/NonTrivialTypeVisitor.h b/contrib/llvm-project/clang/include/clang/AST/NonTrivialTypeVisitor.h
index aafcedb9d10b..c95516538ad1 100644
--- a/contrib/llvm-project/clang/include/clang/AST/NonTrivialTypeVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/AST/NonTrivialTypeVisitor.h
@@ -1,4 +1,4 @@
-//===-- NonTrivialTypeVisitor.h - Visitor for non-trivial Types *- C++ --*-===//
+//===-- NonTrivialTypeVisitor.h - Visitor for non-trivial Types -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/clang/include/clang/AST/ODRHash.h b/contrib/llvm-project/clang/include/clang/AST/ODRHash.h
index cd4a6f37f5db..2e8593e0b835 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ODRHash.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ODRHash.h
@@ -89,7 +89,7 @@ public:
// Save booleans until the end to lower the size of data to process.
void AddBoolean(bool value);
- static bool isWhitelistedDecl(const Decl* D, const DeclContext *Parent);
+ static bool isDeclToBeProcessed(const Decl* D, const DeclContext *Parent);
private:
void AddDeclarationNameImpl(DeclarationName Name);
diff --git a/contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h b/contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h
index 26f8389f9cfa..6de7b6deb514 100644
--- a/contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h
+++ b/contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h
@@ -31,6 +31,7 @@
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
+#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/TrailingObjects.h"
@@ -284,12 +285,13 @@ public:
/// \param EndLoc Ending location of the clause.
OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
- : OMPClause(OMPC_allocator, StartLoc, EndLoc), LParenLoc(LParenLoc),
- Allocator(A) {}
+ : OMPClause(llvm::omp::OMPC_allocator, StartLoc, EndLoc),
+ LParenLoc(LParenLoc), Allocator(A) {}
/// Build an empty clause.
OMPAllocatorClause()
- : OMPClause(OMPC_allocator, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_allocator, SourceLocation(),
+ SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
@@ -314,7 +316,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_allocator;
+ return T->getClauseKind() == llvm::omp::OMPC_allocator;
}
};
@@ -349,17 +351,17 @@ class OMPAllocateClause final
OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
Expr *Allocator, SourceLocation ColonLoc,
SourceLocation EndLoc, unsigned N)
- : OMPVarListClause<OMPAllocateClause>(OMPC_allocate, StartLoc, LParenLoc,
- EndLoc, N),
+ : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, StartLoc,
+ LParenLoc, EndLoc, N),
Allocator(Allocator), ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPAllocateClause(unsigned N)
- : OMPVarListClause<OMPAllocateClause>(OMPC_allocate, SourceLocation(),
+ : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate,
SourceLocation(), SourceLocation(),
- N) {}
+ SourceLocation(), N) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
@@ -411,7 +413,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_allocate;
+ return T->getClauseKind() == llvm::omp::OMPC_allocate;
}
};
@@ -469,15 +471,16 @@ public:
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation NameModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this),
- LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc),
- NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) {
+ : OMPClause(llvm::omp::OMPC_if, StartLoc, EndLoc),
+ OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond),
+ ColonLoc(ColonLoc), NameModifier(NameModifier),
+ NameModifierLoc(NameModifierLoc) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// Build an empty clause.
OMPIfClause()
- : OMPClause(OMPC_if, SourceLocation(), SourceLocation()),
+ : OMPClause(llvm::omp::OMPC_if, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
@@ -511,7 +514,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_if;
+ return T->getClauseKind() == llvm::omp::OMPC_if;
}
};
@@ -547,14 +550,14 @@ public:
OMPFinalClause(Expr *Cond, Stmt *HelperCond,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_final, StartLoc, EndLoc), OMPClauseWithPreInit(this),
- LParenLoc(LParenLoc), Condition(Cond) {
+ : OMPClause(llvm::omp::OMPC_final, StartLoc, EndLoc),
+ OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// Build an empty clause.
OMPFinalClause()
- : OMPClause(OMPC_final, SourceLocation(), SourceLocation()),
+ : OMPClause(llvm::omp::OMPC_final, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
@@ -579,7 +582,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_final;
+ return T->getClauseKind() == llvm::omp::OMPC_final;
}
};
@@ -617,7 +620,7 @@ public:
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
- : OMPClause(OMPC_num_threads, StartLoc, EndLoc),
+ : OMPClause(llvm::omp::OMPC_num_threads, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc),
NumThreads(NumThreads) {
setPreInitStmt(HelperNumThreads, CaptureRegion);
@@ -625,7 +628,8 @@ public:
/// Build an empty clause.
OMPNumThreadsClause()
- : OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()),
+ : OMPClause(llvm::omp::OMPC_num_threads, SourceLocation(),
+ SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
@@ -651,7 +655,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_num_threads;
+ return T->getClauseKind() == llvm::omp::OMPC_num_threads;
}
};
@@ -687,12 +691,13 @@ public:
/// \param EndLoc Ending location of the clause.
OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
- : OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc),
- Safelen(Len) {}
+ : OMPClause(llvm::omp::OMPC_safelen, StartLoc, EndLoc),
+ LParenLoc(LParenLoc), Safelen(Len) {}
/// Build an empty clause.
explicit OMPSafelenClause()
- : OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_safelen, SourceLocation(), SourceLocation()) {
+ }
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
@@ -717,7 +722,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_safelen;
+ return T->getClauseKind() == llvm::omp::OMPC_safelen;
}
};
@@ -752,12 +757,13 @@ public:
/// \param EndLoc Ending location of the clause.
OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
- : OMPClause(OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc),
- Simdlen(Len) {}
+ : OMPClause(llvm::omp::OMPC_simdlen, StartLoc, EndLoc),
+ LParenLoc(LParenLoc), Simdlen(Len) {}
/// Build an empty clause.
explicit OMPSimdlenClause()
- : OMPClause(OMPC_simdlen, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_simdlen, SourceLocation(), SourceLocation()) {
+ }
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
@@ -782,7 +788,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_simdlen;
+ return T->getClauseKind() == llvm::omp::OMPC_simdlen;
}
};
@@ -818,12 +824,13 @@ public:
/// \param EndLoc Ending location of the clause.
OMPCollapseClause(Expr *Num, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc),
- NumForLoops(Num) {}
+ : OMPClause(llvm::omp::OMPC_collapse, StartLoc, EndLoc),
+ LParenLoc(LParenLoc), NumForLoops(Num) {}
/// Build an empty clause.
explicit OMPCollapseClause()
- : OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_collapse, SourceLocation(),
+ SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
@@ -848,7 +855,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_collapse;
+ return T->getClauseKind() == llvm::omp::OMPC_collapse;
}
};
@@ -866,7 +873,7 @@ class OMPDefaultClause : public OMPClause {
SourceLocation LParenLoc;
/// A kind of the 'default' clause.
- OpenMPDefaultClauseKind Kind = OMPC_DEFAULT_unknown;
+ llvm::omp::DefaultKind Kind = llvm::omp::OMP_DEFAULT_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
@@ -874,7 +881,7 @@ class OMPDefaultClause : public OMPClause {
/// Set kind of the clauses.
///
/// \param K Argument of clause.
- void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; }
+ void setDefaultKind(llvm::omp::DefaultKind K) { Kind = K; }
/// Set argument location.
///
@@ -889,15 +896,16 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
- OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc,
+ OMPDefaultClause(llvm::omp::DefaultKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
- : OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc),
- Kind(A), KindKwLoc(ALoc) {}
+ : OMPClause(llvm::omp::OMPC_default, StartLoc, EndLoc),
+ LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPDefaultClause()
- : OMPClause(OMPC_default, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_default, SourceLocation(), SourceLocation()) {
+ }
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
@@ -906,7 +914,7 @@ public:
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
- OpenMPDefaultClauseKind getDefaultKind() const { return Kind; }
+ llvm::omp::DefaultKind getDefaultKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; }
@@ -927,7 +935,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_default;
+ return T->getClauseKind() == llvm::omp::OMPC_default;
}
};
@@ -973,12 +981,13 @@ public:
OMPProcBindClause(llvm::omp::ProcBindKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
- : OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc),
- Kind(A), KindKwLoc(ALoc) {}
+ : OMPClause(llvm::omp::OMPC_proc_bind, StartLoc, EndLoc),
+ LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPProcBindClause()
- : OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_proc_bind, SourceLocation(),
+ SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
@@ -1008,7 +1017,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_proc_bind;
+ return T->getClauseKind() == llvm::omp::OMPC_proc_bind;
}
};
@@ -1028,11 +1037,12 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_unified_address, StartLoc, EndLoc) {}
+ : OMPClause(llvm::omp::OMPC_unified_address, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUnifiedAddressClause()
- : OMPClause(OMPC_unified_address, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_unified_address, SourceLocation(),
+ SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
@@ -1050,7 +1060,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_unified_address;
+ return T->getClauseKind() == llvm::omp::OMPC_unified_address;
}
};
@@ -1070,11 +1080,12 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_unified_shared_memory, StartLoc, EndLoc) {}
+ : OMPClause(llvm::omp::OMPC_unified_shared_memory, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUnifiedSharedMemoryClause()
- : OMPClause(OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_unified_shared_memory, SourceLocation(),
+ SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
@@ -1092,7 +1103,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_unified_shared_memory;
+ return T->getClauseKind() == llvm::omp::OMPC_unified_shared_memory;
}
};
@@ -1112,11 +1123,12 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_reverse_offload, StartLoc, EndLoc) {}
+ : OMPClause(llvm::omp::OMPC_reverse_offload, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReverseOffloadClause()
- : OMPClause(OMPC_reverse_offload, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_reverse_offload, SourceLocation(),
+ SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
@@ -1134,7 +1146,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_reverse_offload;
+ return T->getClauseKind() == llvm::omp::OMPC_reverse_offload;
}
};
@@ -1154,12 +1166,12 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_dynamic_allocators, StartLoc, EndLoc) {}
+ : OMPClause(llvm::omp::OMPC_dynamic_allocators, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPDynamicAllocatorsClause()
- : OMPClause(OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) {
- }
+ : OMPClause(llvm::omp::OMPC_dynamic_allocators, SourceLocation(),
+ SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
@@ -1177,7 +1189,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_dynamic_allocators;
+ return T->getClauseKind() == llvm::omp::OMPC_dynamic_allocators;
}
};
@@ -1229,12 +1241,12 @@ public:
SourceLocation ALoc, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc)
- : OMPClause(OMPC_atomic_default_mem_order, StartLoc, EndLoc),
+ : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, StartLoc, EndLoc),
LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPAtomicDefaultMemOrderClause()
- : OMPClause(OMPC_atomic_default_mem_order, SourceLocation(),
+ : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, SourceLocation(),
SourceLocation()) {}
/// Sets the location of '('.
@@ -1267,7 +1279,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_atomic_default_mem_order;
+ return T->getClauseKind() == llvm::omp::OMPC_atomic_default_mem_order;
}
};
@@ -1386,9 +1398,9 @@ public:
Expr *ChunkSize, Stmt *HelperChunkSize,
OpenMPScheduleClauseModifier M1, SourceLocation M1Loc,
OpenMPScheduleClauseModifier M2, SourceLocation M2Loc)
- : OMPClause(OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this),
- LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc),
- ChunkSize(ChunkSize) {
+ : OMPClause(llvm::omp::OMPC_schedule, StartLoc, EndLoc),
+ OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind),
+ KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
Modifiers[FIRST] = M1;
Modifiers[SECOND] = M2;
@@ -1398,7 +1410,7 @@ public:
/// Build an empty clause.
explicit OMPScheduleClause()
- : OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()),
+ : OMPClause(llvm::omp::OMPC_schedule, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {
Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown;
Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown;
@@ -1460,7 +1472,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_schedule;
+ return T->getClauseKind() == llvm::omp::OMPC_schedule;
}
};
@@ -1495,12 +1507,12 @@ class OMPOrderedClause final
/// \param EndLoc Ending location of the clause.
OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc),
- NumForLoops(Num), NumberOfLoops(NumLoops) {}
+ : OMPClause(llvm::omp::OMPC_ordered, StartLoc, EndLoc),
+ LParenLoc(LParenLoc), NumForLoops(Num), NumberOfLoops(NumLoops) {}
/// Build an empty clause.
explicit OMPOrderedClause(unsigned NumLoops)
- : OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()),
+ : OMPClause(llvm::omp::OMPC_ordered, SourceLocation(), SourceLocation()),
NumberOfLoops(NumLoops) {}
/// Set the number of associated for-loops.
@@ -1556,7 +1568,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_ordered;
+ return T->getClauseKind() == llvm::omp::OMPC_ordered;
}
};
@@ -1573,11 +1585,11 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_nowait, StartLoc, EndLoc) {}
+ : OMPClause(llvm::omp::OMPC_nowait, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPNowaitClause()
- : OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_nowait, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
@@ -1595,7 +1607,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_nowait;
+ return T->getClauseKind() == llvm::omp::OMPC_nowait;
}
};
@@ -1612,11 +1624,11 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_untied, StartLoc, EndLoc) {}
+ : OMPClause(llvm::omp::OMPC_untied, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUntiedClause()
- : OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_untied, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
@@ -1634,7 +1646,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_untied;
+ return T->getClauseKind() == llvm::omp::OMPC_untied;
}
};
@@ -1652,11 +1664,12 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_mergeable, StartLoc, EndLoc) {}
+ : OMPClause(llvm::omp::OMPC_mergeable, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPMergeableClause()
- : OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_mergeable, SourceLocation(),
+ SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
@@ -1674,7 +1687,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_mergeable;
+ return T->getClauseKind() == llvm::omp::OMPC_mergeable;
}
};
@@ -1691,10 +1704,11 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_read, StartLoc, EndLoc) {}
+ : OMPClause(llvm::omp::OMPC_read, StartLoc, EndLoc) {}
/// Build an empty clause.
- OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {}
+ OMPReadClause()
+ : OMPClause(llvm::omp::OMPC_read, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
@@ -1712,7 +1726,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_read;
+ return T->getClauseKind() == llvm::omp::OMPC_read;
}
};
@@ -1729,11 +1743,11 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_write, StartLoc, EndLoc) {}
+ : OMPClause(llvm::omp::OMPC_write, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPWriteClause()
- : OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_write, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
@@ -1751,7 +1765,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_write;
+ return T->getClauseKind() == llvm::omp::OMPC_write;
}
};
@@ -1762,18 +1776,95 @@ public:
/// #pragma omp atomic update
/// \endcode
/// In this example directive '#pragma omp atomic' has 'update' clause.
-class OMPUpdateClause : public OMPClause {
-public:
+/// Also, this class represents 'update' clause in '#pragma omp depobj'
+/// directive.
+///
+/// \code
+/// #pragma omp depobj(a) update(in)
+/// \endcode
+/// In this example directive '#pragma omp depobj' has 'update' clause with 'in'
+/// dependence kind.
+class OMPUpdateClause final
+ : public OMPClause,
+ private llvm::TrailingObjects<OMPUpdateClause, SourceLocation,
+ OpenMPDependClauseKind> {
+ friend class OMPClauseReader;
+ friend TrailingObjects;
+
+ /// true if extended version of the clause for 'depobj' directive.
+ bool IsExtended = false;
+
+ /// Define the sizes of each trailing object array except the last one. This
+ /// is required for TrailingObjects to work properly.
+ size_t numTrailingObjects(OverloadToken<SourceLocation>) const {
+ // 2 locations: for '(' and argument location.
+ return IsExtended ? 2 : 0;
+ }
+
+ /// Sets the the location of '(' in clause for 'depobj' directive.
+ void setLParenLoc(SourceLocation Loc) {
+ assert(IsExtended && "Expected extended clause.");
+ *getTrailingObjects<SourceLocation>() = Loc;
+ }
+
+ /// Sets the the location of '(' in clause for 'depobj' directive.
+ void setArgumentLoc(SourceLocation Loc) {
+ assert(IsExtended && "Expected extended clause.");
+ *std::next(getTrailingObjects<SourceLocation>(), 1) = Loc;
+ }
+
+ /// Sets the dependence kind for the clause for 'depobj' directive.
+ void setDependencyKind(OpenMPDependClauseKind DK) {
+ assert(IsExtended && "Expected extended clause.");
+ *getTrailingObjects<OpenMPDependClauseKind>() = DK;
+ }
+
/// Build 'update' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
- OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_update, StartLoc, EndLoc) {}
+ OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc,
+ bool IsExtended)
+ : OMPClause(llvm::omp::OMPC_update, StartLoc, EndLoc),
+ IsExtended(IsExtended) {}
/// Build an empty clause.
- OMPUpdateClause()
- : OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {}
+ OMPUpdateClause(bool IsExtended)
+ : OMPClause(llvm::omp::OMPC_update, SourceLocation(), SourceLocation()),
+ IsExtended(IsExtended) {}
+
+public:
+ /// Creates clause for 'atomic' directive.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the clause.
+ /// \param EndLoc Ending location of the clause.
+ static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc);
+
+ /// Creates clause for 'depobj' directive.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param ArgumentLoc Location of the argument.
+ /// \param DK Dependence kind.
+ /// \param EndLoc Ending location of the clause.
+ static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation ArgumentLoc,
+ OpenMPDependClauseKind DK,
+ SourceLocation EndLoc);
+
+ /// Creates an empty clause with the place for \a N variables.
+ ///
+ /// \param C AST context.
+ /// \param IsExtended true if extended clause for 'depobj' directive must be
+ /// created.
+ static OMPUpdateClause *CreateEmpty(const ASTContext &C, bool IsExtended);
+
+ /// Checks if the clause is the extended clauses for 'depobj' directive.
+ bool isExtended() const { return IsExtended; }
child_range children() {
return child_range(child_iterator(), child_iterator());
@@ -1790,8 +1881,26 @@ public:
return const_child_range(const_child_iterator(), const_child_iterator());
}
+ /// Gets the the location of '(' in clause for 'depobj' directive.
+ SourceLocation getLParenLoc() const {
+ assert(IsExtended && "Expected extended clause.");
+ return *getTrailingObjects<SourceLocation>();
+ }
+
+ /// Gets the the location of argument in clause for 'depobj' directive.
+ SourceLocation getArgumentLoc() const {
+ assert(IsExtended && "Expected extended clause.");
+ return *std::next(getTrailingObjects<SourceLocation>(), 1);
+ }
+
+ /// Gets the dependence kind in clause for 'depobj' directive.
+ OpenMPDependClauseKind getDependencyKind() const {
+ assert(IsExtended && "Expected extended clause.");
+ return *getTrailingObjects<OpenMPDependClauseKind>();
+ }
+
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_update;
+ return T->getClauseKind() == llvm::omp::OMPC_update;
}
};
@@ -1809,11 +1918,12 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_capture, StartLoc, EndLoc) {}
+ : OMPClause(llvm::omp::OMPC_capture, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPCaptureClause()
- : OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_capture, SourceLocation(), SourceLocation()) {
+ }
child_range children() {
return child_range(child_iterator(), child_iterator());
@@ -1831,7 +1941,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_capture;
+ return T->getClauseKind() == llvm::omp::OMPC_capture;
}
};
@@ -1849,11 +1959,12 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {}
+ : OMPClause(llvm::omp::OMPC_seq_cst, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPSeqCstClause()
- : OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_seq_cst, SourceLocation(), SourceLocation()) {
+ }
child_range children() {
return child_range(child_iterator(), child_iterator());
@@ -1871,7 +1982,171 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_seq_cst;
+ return T->getClauseKind() == llvm::omp::OMPC_seq_cst;
+ }
+};
+
+/// This represents 'acq_rel' clause in the '#pragma omp atomic|flush'
+/// directives.
+///
+/// \code
+/// #pragma omp flush acq_rel
+/// \endcode
+/// In this example directive '#pragma omp flush' has 'acq_rel' clause.
+class OMPAcqRelClause final : public OMPClause {
+public:
+ /// Build 'ack_rel' clause.
+ ///
+ /// \param StartLoc Starting location of the clause.
+ /// \param EndLoc Ending location of the clause.
+ OMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_acq_rel, StartLoc, EndLoc) {}
+
+ /// Build an empty clause.
+ OMPAcqRelClause()
+ : OMPClause(llvm::omp::OMPC_acq_rel, SourceLocation(), SourceLocation()) {
+ }
+
+ child_range children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+
+ const_child_range children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_acq_rel;
+ }
+};
+
+/// This represents 'acquire' clause in the '#pragma omp atomic|flush'
+/// directives.
+///
+/// \code
+/// #pragma omp flush acquire
+/// \endcode
+/// In this example directive '#pragma omp flush' has 'acquire' clause.
+class OMPAcquireClause final : public OMPClause {
+public:
+ /// Build 'acquire' clause.
+ ///
+ /// \param StartLoc Starting location of the clause.
+ /// \param EndLoc Ending location of the clause.
+ OMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_acquire, StartLoc, EndLoc) {}
+
+ /// Build an empty clause.
+ OMPAcquireClause()
+ : OMPClause(llvm::omp::OMPC_acquire, SourceLocation(), SourceLocation()) {
+ }
+
+ child_range children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+
+ const_child_range children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_acquire;
+ }
+};
+
+/// This represents 'release' clause in the '#pragma omp atomic|flush'
+/// directives.
+///
+/// \code
+/// #pragma omp flush release
+/// \endcode
+/// In this example directive '#pragma omp flush' has 'release' clause.
+class OMPReleaseClause final : public OMPClause {
+public:
+ /// Build 'release' clause.
+ ///
+ /// \param StartLoc Starting location of the clause.
+ /// \param EndLoc Ending location of the clause.
+ OMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_release, StartLoc, EndLoc) {}
+
+ /// Build an empty clause.
+ OMPReleaseClause()
+ : OMPClause(llvm::omp::OMPC_release, SourceLocation(), SourceLocation()) {
+ }
+
+ child_range children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+
+ const_child_range children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_release;
+ }
+};
+
+/// This represents 'relaxed' clause in the '#pragma omp atomic'
+/// directives.
+///
+/// \code
+/// #pragma omp atomic relaxed
+/// \endcode
+/// In this example directive '#pragma omp atomic' has 'relaxed' clause.
+class OMPRelaxedClause final : public OMPClause {
+public:
+ /// Build 'relaxed' clause.
+ ///
+ /// \param StartLoc Starting location of the clause.
+ /// \param EndLoc Ending location of the clause.
+ OMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_relaxed, StartLoc, EndLoc) {}
+
+ /// Build an empty clause.
+ OMPRelaxedClause()
+ : OMPClause(llvm::omp::OMPC_relaxed, SourceLocation(), SourceLocation()) {
+ }
+
+ child_range children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+
+ const_child_range children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_relaxed;
}
};
@@ -1897,16 +2172,16 @@ class OMPPrivateClause final
/// \param N Number of the variables in the clause.
OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
- : OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc,
- EndLoc, N) {}
+ : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, StartLoc,
+ LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPPrivateClause(unsigned N)
- : OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(),
+ : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private,
SourceLocation(), SourceLocation(),
- N) {}
+ SourceLocation(), N) {}
/// Sets the list of references to private copies with initializers for
/// new private variables.
@@ -1976,7 +2251,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_private;
+ return T->getClauseKind() == llvm::omp::OMPC_private;
}
};
@@ -2004,8 +2279,8 @@ class OMPFirstprivateClause final
/// \param N Number of the variables in the clause.
OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
- : OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc,
- LParenLoc, EndLoc, N),
+ : OMPVarListClause<OMPFirstprivateClause>(llvm::omp::OMPC_firstprivate,
+ StartLoc, LParenLoc, EndLoc, N),
OMPClauseWithPreInit(this) {}
/// Build an empty clause.
@@ -2013,7 +2288,7 @@ class OMPFirstprivateClause final
/// \param N Number of variables.
explicit OMPFirstprivateClause(unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(
- OMPC_firstprivate, SourceLocation(), SourceLocation(),
+ llvm::omp::OMPC_firstprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPreInit(this) {}
@@ -2117,7 +2392,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_firstprivate;
+ return T->getClauseKind() == llvm::omp::OMPC_firstprivate;
}
};
@@ -2170,8 +2445,8 @@ class OMPLastprivateClause final
SourceLocation EndLoc, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
unsigned N)
- : OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc,
- LParenLoc, EndLoc, N),
+ : OMPVarListClause<OMPLastprivateClause>(llvm::omp::OMPC_lastprivate,
+ StartLoc, LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), LPKind(LPKind), LPKindLoc(LPKindLoc),
ColonLoc(ColonLoc) {}
@@ -2180,7 +2455,7 @@ class OMPLastprivateClause final
/// \param N Number of variables.
explicit OMPLastprivateClause(unsigned N)
: OMPVarListClause<OMPLastprivateClause>(
- OMPC_lastprivate, SourceLocation(), SourceLocation(),
+ llvm::omp::OMPC_lastprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
@@ -2356,7 +2631,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_lastprivate;
+ return T->getClauseKind() == llvm::omp::OMPC_lastprivate;
}
};
@@ -2381,16 +2656,16 @@ class OMPSharedClause final
/// \param N Number of the variables in the clause.
OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
- : OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc,
- EndLoc, N) {}
+ : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, StartLoc,
+ LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPSharedClause(unsigned N)
- : OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(),
+ : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared,
SourceLocation(), SourceLocation(),
- N) {}
+ SourceLocation(), N) {}
public:
/// Creates clause with a list of variables \a VL.
@@ -2428,7 +2703,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_shared;
+ return T->getClauseKind() == llvm::omp::OMPC_shared;
}
};
@@ -2448,6 +2723,12 @@ class OMPReductionClause final
friend OMPVarListClause;
friend TrailingObjects;
+ /// Reduction modifier.
+ OpenMPReductionClauseModifier Modifier = OMPC_REDUCTION_unknown;
+
+ /// Reduction modifier location.
+ SourceLocation ModifierLoc;
+
/// Location of ':'.
SourceLocation ColonLoc;
@@ -2461,29 +2742,39 @@ class OMPReductionClause final
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
- /// \param EndLoc Ending location of the clause.
+ /// \param ModifierLoc Modifier location.
/// \param ColonLoc Location of ':'.
+ /// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N,
+ SourceLocation ModifierLoc, SourceLocation ColonLoc,
+ SourceLocation EndLoc,
+ OpenMPReductionClauseModifier Modifier, unsigned N,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
- : OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc,
- LParenLoc, EndLoc, N),
- OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
+ : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction,
+ StartLoc, LParenLoc, EndLoc, N),
+ OMPClauseWithPostUpdate(this), Modifier(Modifier),
+ ModifierLoc(ModifierLoc), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPReductionClause(unsigned N)
- : OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(),
+ : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction,
SourceLocation(), SourceLocation(),
- N),
+ SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
+ /// Sets reduction modifier.
+ void setModifier(OpenMPReductionClauseModifier M) { Modifier = M; }
+
+ /// Sets location of the modifier.
+ void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; }
+
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
@@ -2548,11 +2839,47 @@ class OMPReductionClause final
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
+ /// Set list of helper copy operations for inscan reductions.
+ /// The form is: Temps[i] = LHS[i];
+ void setInscanCopyOps(ArrayRef<Expr *> Ops);
+
+ /// Get the list of helper inscan copy operations.
+ MutableArrayRef<Expr *> getInscanCopyOps() {
+ return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size());
+ }
+ ArrayRef<const Expr *> getInscanCopyOps() const {
+ return llvm::makeArrayRef(getReductionOps().end(), varlist_size());
+ }
+
+ /// Set list of helper temp vars for inscan copy array operations.
+ void setInscanCopyArrayTemps(ArrayRef<Expr *> CopyArrayTemps);
+
+ /// Get the list of helper inscan copy temps.
+ MutableArrayRef<Expr *> getInscanCopyArrayTemps() {
+ return MutableArrayRef<Expr *>(getInscanCopyOps().end(), varlist_size());
+ }
+ ArrayRef<const Expr *> getInscanCopyArrayTemps() const {
+ return llvm::makeArrayRef(getInscanCopyOps().end(), varlist_size());
+ }
+
+ /// Set list of helper temp elements vars for inscan copy array operations.
+ void setInscanCopyArrayElems(ArrayRef<Expr *> CopyArrayElems);
+
+ /// Get the list of helper inscan copy temps.
+ MutableArrayRef<Expr *> getInscanCopyArrayElems() {
+ return MutableArrayRef<Expr *>(getInscanCopyArrayTemps().end(),
+ varlist_size());
+ }
+ ArrayRef<const Expr *> getInscanCopyArrayElems() const {
+ return llvm::makeArrayRef(getInscanCopyArrayTemps().end(), varlist_size());
+ }
+
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
+ /// \param ModifierLoc Modifier location.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
@@ -2577,23 +2904,41 @@ public:
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
+ /// \param CopyOps List of copy operations for inscan reductions:
+ /// \code
+ /// TempExprs = LHSExprs;
+ /// \endcode
+ /// \param CopyArrayTemps Temp arrays for prefix sums.
+ /// \param CopyArrayElems Temp arrays for prefix sums.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
- NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation ModifierLoc, SourceLocation ColonLoc,
+ SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier,
+ ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
- ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate);
+ ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> CopyOps,
+ ArrayRef<Expr *> CopyArrayTemps, ArrayRef<Expr *> CopyArrayElems,
+ Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
- static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
+ /// \param Modifier Reduction modifier.
+ static OMPReductionClause *
+ CreateEmpty(const ASTContext &C, unsigned N,
+ OpenMPReductionClauseModifier Modifier);
+
+ /// Returns modifier.
+ OpenMPReductionClauseModifier getModifier() const { return Modifier; }
+
+ /// Returns modifier location.
+ SourceLocation getModifierLoc() const { return ModifierLoc; }
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
@@ -2644,6 +2989,36 @@ public:
getReductionOps().end());
}
+ helper_expr_const_range copy_ops() const {
+ return helper_expr_const_range(getInscanCopyOps().begin(),
+ getInscanCopyOps().end());
+ }
+
+ helper_expr_range copy_ops() {
+ return helper_expr_range(getInscanCopyOps().begin(),
+ getInscanCopyOps().end());
+ }
+
+ helper_expr_const_range copy_array_temps() const {
+ return helper_expr_const_range(getInscanCopyArrayTemps().begin(),
+ getInscanCopyArrayTemps().end());
+ }
+
+ helper_expr_range copy_array_temps() {
+ return helper_expr_range(getInscanCopyArrayTemps().begin(),
+ getInscanCopyArrayTemps().end());
+ }
+
+ helper_expr_const_range copy_array_elems() const {
+ return helper_expr_const_range(getInscanCopyArrayElems().begin(),
+ getInscanCopyArrayElems().end());
+ }
+
+ helper_expr_range copy_array_elems() {
+ return helper_expr_range(getInscanCopyArrayElems().begin(),
+ getInscanCopyArrayElems().end());
+ }
+
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
@@ -2664,7 +3039,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_reduction;
+ return T->getClauseKind() == llvm::omp::OMPC_reduction;
}
};
@@ -2706,8 +3081,8 @@ class OMPTaskReductionClause final
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
- : OMPVarListClause<OMPTaskReductionClause>(OMPC_task_reduction, StartLoc,
- LParenLoc, EndLoc, N),
+ : OMPVarListClause<OMPTaskReductionClause>(
+ llvm::omp::OMPC_task_reduction, StartLoc, LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
@@ -2716,7 +3091,7 @@ class OMPTaskReductionClause final
/// \param N Number of variables.
explicit OMPTaskReductionClause(unsigned N)
: OMPVarListClause<OMPTaskReductionClause>(
- OMPC_task_reduction, SourceLocation(), SourceLocation(),
+ llvm::omp::OMPC_task_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
@@ -2896,7 +3271,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_task_reduction;
+ return T->getClauseKind() == llvm::omp::OMPC_task_reduction;
}
};
@@ -2937,8 +3312,8 @@ class OMPInReductionClause final
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
- : OMPVarListClause<OMPInReductionClause>(OMPC_in_reduction, StartLoc,
- LParenLoc, EndLoc, N),
+ : OMPVarListClause<OMPInReductionClause>(llvm::omp::OMPC_in_reduction,
+ StartLoc, LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
@@ -2947,7 +3322,7 @@ class OMPInReductionClause final
/// \param N Number of variables.
explicit OMPInReductionClause(unsigned N)
: OMPVarListClause<OMPInReductionClause>(
- OMPC_in_reduction, SourceLocation(), SourceLocation(),
+ llvm::omp::OMPC_in_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
@@ -3151,7 +3526,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_in_reduction;
+ return T->getClauseKind() == llvm::omp::OMPC_in_reduction;
}
};
@@ -3197,8 +3572,8 @@ class OMPLinearClause final
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
- : OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc,
- EndLoc, NumVars),
+ : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, StartLoc,
+ LParenLoc, EndLoc, NumVars),
OMPClauseWithPostUpdate(this), Modifier(Modifier),
ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {}
@@ -3206,9 +3581,9 @@ class OMPLinearClause final
///
/// \param NumVars Number of variables.
explicit OMPLinearClause(unsigned NumVars)
- : OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(),
+ : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear,
SourceLocation(), SourceLocation(),
- NumVars),
+ SourceLocation(), NumVars),
OMPClauseWithPostUpdate(this) {}
/// Gets the list of initial values for linear variables.
@@ -3428,7 +3803,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_linear;
+ return T->getClauseKind() == llvm::omp::OMPC_linear;
}
};
@@ -3463,17 +3838,17 @@ class OMPAlignedClause final
OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
- : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc,
- EndLoc, NumVars),
+ : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, StartLoc,
+ LParenLoc, EndLoc, NumVars),
ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param NumVars Number of variables.
explicit OMPAlignedClause(unsigned NumVars)
- : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(),
+ : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned,
SourceLocation(), SourceLocation(),
- NumVars) {}
+ SourceLocation(), NumVars) {}
public:
/// Creates clause with a list of variables \a VL and alignment \a A.
@@ -3527,7 +3902,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_aligned;
+ return T->getClauseKind() == llvm::omp::OMPC_aligned;
}
};
@@ -3566,16 +3941,16 @@ class OMPCopyinClause final
/// \param N Number of the variables in the clause.
OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
- : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc,
- EndLoc, N) {}
+ : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, StartLoc,
+ LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyinClause(unsigned N)
- : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(),
+ : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin,
SourceLocation(), SourceLocation(),
- N) {}
+ SourceLocation(), N) {}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
@@ -3703,7 +4078,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_copyin;
+ return T->getClauseKind() == llvm::omp::OMPC_copyin;
}
};
@@ -3730,15 +4105,16 @@ class OMPCopyprivateClause final
/// \param N Number of the variables in the clause.
OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
- : OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc,
- LParenLoc, EndLoc, N) {}
+ : OMPVarListClause<OMPCopyprivateClause>(llvm::omp::OMPC_copyprivate,
+ StartLoc, LParenLoc, EndLoc, N) {
+ }
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyprivateClause(unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(
- OMPC_copyprivate, SourceLocation(), SourceLocation(),
+ llvm::omp::OMPC_copyprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Set list of helper expressions, required for proper codegen of the
@@ -3866,7 +4242,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_copyprivate;
+ return T->getClauseKind() == llvm::omp::OMPC_copyprivate;
}
};
@@ -3896,16 +4272,16 @@ class OMPFlushClause final
/// \param N Number of the variables in the clause.
OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
- : OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc,
- EndLoc, N) {}
+ : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, StartLoc,
+ LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPFlushClause(unsigned N)
- : OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(),
+ : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush,
SourceLocation(), SourceLocation(),
- N) {}
+ SourceLocation(), N) {}
public:
/// Creates clause with a list of variables \a VL.
@@ -3943,7 +4319,94 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_flush;
+ return T->getClauseKind() == llvm::omp::OMPC_flush;
+ }
+};
+
+/// This represents implicit clause 'depobj' for the '#pragma omp depobj'
+/// directive.
+/// This clause does not exist by itself, it can be only as a part of 'omp
+/// depobj' directive. This clause is introduced to keep the original structure
+/// of \a OMPExecutableDirective class and its derivatives and to use the
+/// existing infrastructure of clauses with the list of variables.
+///
+/// \code
+/// #pragma omp depobj(a) destroy
+/// \endcode
+/// In this example directive '#pragma omp depobj' has implicit clause 'depobj'
+/// with the depobj 'a'.
+class OMPDepobjClause final : public OMPClause {
+ friend class OMPClauseReader;
+
+ /// Location of '('.
+ SourceLocation LParenLoc;
+
+ /// Chunk size.
+ Expr *Depobj = nullptr;
+
+ /// Build clause with number of variables \a N.
+ ///
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ OMPDepobjClause(SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_depobj, StartLoc, EndLoc),
+ LParenLoc(LParenLoc) {}
+
+ /// Build an empty clause.
+ ///
+ explicit OMPDepobjClause()
+ : OMPClause(llvm::omp::OMPC_depobj, SourceLocation(), SourceLocation()) {}
+
+ void setDepobj(Expr *E) { Depobj = E; }
+
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+public:
+ /// Creates clause.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ /// \param Depobj depobj expression associated with the 'depobj' directive.
+ static OMPDepobjClause *Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc, Expr *Depobj);
+
+ /// Creates an empty clause.
+ ///
+ /// \param C AST context.
+ static OMPDepobjClause *CreateEmpty(const ASTContext &C);
+
+ /// Returns depobj expression associated with the clause.
+ Expr *getDepobj() { return Depobj; }
+ const Expr *getDepobj() const { return Depobj; }
+
+ /// Returns the location of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ child_range children() {
+ return child_range(reinterpret_cast<Stmt **>(&Depobj),
+ reinterpret_cast<Stmt **>(&Depobj) + 1);
+ }
+
+ const_child_range children() const {
+ auto Children = const_cast<OMPDepobjClause *>(this)->children();
+ return const_child_range(Children.begin(), Children.end());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_depobj;
}
};
@@ -3984,8 +4447,9 @@ class OMPDependClause final
/// clause.
OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N, unsigned NumLoops)
- : OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc,
- EndLoc, N), NumLoops(NumLoops) {}
+ : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, StartLoc,
+ LParenLoc, EndLoc, N),
+ NumLoops(NumLoops) {}
/// Build an empty clause.
///
@@ -3993,9 +4457,9 @@ class OMPDependClause final
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
explicit OMPDependClause(unsigned N, unsigned NumLoops)
- : OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(),
+ : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend,
SourceLocation(), SourceLocation(),
- N),
+ SourceLocation(), N),
NumLoops(NumLoops) {}
/// Set dependency kind.
@@ -4007,6 +4471,9 @@ class OMPDependClause final
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
+ /// Sets optional dependency modifier.
+ void setModifier(Expr *DepModifier);
+
public:
/// Creates clause with a list of variables \a VL.
///
@@ -4022,7 +4489,7 @@ public:
/// clause.
static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
- SourceLocation EndLoc,
+ SourceLocation EndLoc, Expr *DepModifier,
OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VL, unsigned NumLoops);
@@ -4039,6 +4506,12 @@ public:
/// Get dependency type.
OpenMPDependClauseKind getDependencyKind() const { return DepKind; }
+ /// Return optional depend modifier.
+ Expr *getModifier();
+ const Expr *getModifier() const {
+ return const_cast<OMPDependClause *>(this)->getModifier();
+ }
+
/// Get dependency type location.
SourceLocation getDependencyLoc() const { return DepLoc; }
@@ -4074,7 +4547,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_depend;
+ return T->getClauseKind() == llvm::omp::OMPC_depend;
}
};
@@ -4092,6 +4565,12 @@ class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit {
/// Location of '('.
SourceLocation LParenLoc;
+ /// Device clause modifier.
+ OpenMPDeviceClauseModifier Modifier = OMPC_DEVICE_unknown;
+
+ /// Location of the modifier.
+ SourceLocation ModifierLoc;
+
/// Device number.
Stmt *Device = nullptr;
@@ -4100,26 +4579,36 @@ class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit {
/// \param E Device number.
void setDevice(Expr *E) { Device = E; }
+ /// Sets modifier.
+ void setModifier(OpenMPDeviceClauseModifier M) { Modifier = M; }
+
+ /// Setst modifier location.
+ void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; }
+
public:
/// Build 'device' clause.
///
+ /// \param Modifier Clause modifier.
/// \param E Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
+ /// \param ModifierLoc Modifier location.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
- OMPDeviceClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
- SourceLocation StartLoc, SourceLocation LParenLoc,
+ OMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *E, Stmt *HelperE,
+ OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation ModifierLoc,
SourceLocation EndLoc)
- : OMPClause(OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this),
- LParenLoc(LParenLoc), Device(E) {
+ : OMPClause(llvm::omp::OMPC_device, StartLoc, EndLoc),
+ OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Modifier(Modifier),
+ ModifierLoc(ModifierLoc), Device(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPDeviceClause()
- : OMPClause(OMPC_device, SourceLocation(), SourceLocation()),
+ : OMPClause(llvm::omp::OMPC_device, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
@@ -4134,6 +4623,12 @@ public:
/// Return device number.
Expr *getDevice() const { return cast<Expr>(Device); }
+ /// Gets modifier.
+ OpenMPDeviceClauseModifier getModifier() const { return Modifier; }
+
+ /// Gets modifier location.
+ SourceLocation getModifierLoc() const { return ModifierLoc; }
+
child_range children() { return child_range(&Device, &Device + 1); }
const_child_range children() const {
@@ -4148,7 +4643,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_device;
+ return T->getClauseKind() == llvm::omp::OMPC_device;
}
};
@@ -4165,11 +4660,12 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_threads, StartLoc, EndLoc) {}
+ : OMPClause(llvm::omp::OMPC_threads, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPThreadsClause()
- : OMPClause(OMPC_threads, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_threads, SourceLocation(), SourceLocation()) {
+ }
child_range children() {
return child_range(child_iterator(), child_iterator());
@@ -4187,7 +4683,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_threads;
+ return T->getClauseKind() == llvm::omp::OMPC_threads;
}
};
@@ -4204,10 +4700,11 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_simd, StartLoc, EndLoc) {}
+ : OMPClause(llvm::omp::OMPC_simd, StartLoc, EndLoc) {}
/// Build an empty clause.
- OMPSIMDClause() : OMPClause(OMPC_simd, SourceLocation(), SourceLocation()) {}
+ OMPSIMDClause()
+ : OMPClause(llvm::omp::OMPC_simd, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
@@ -4225,7 +4722,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_simd;
+ return T->getClauseKind() == llvm::omp::OMPC_simd;
}
};
@@ -4853,19 +5350,14 @@ class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>,
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
-public:
- /// Number of allowed map-type-modifiers.
- static constexpr unsigned NumberOfModifiers =
- OMPC_MAP_MODIFIER_last - OMPC_MAP_MODIFIER_unknown - 1;
-
private:
/// Map-type-modifiers for the 'map' clause.
- OpenMPMapModifierKind MapTypeModifiers[NumberOfModifiers] = {
+ OpenMPMapModifierKind MapTypeModifiers[NumberOfOMPMapClauseModifiers] = {
OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown,
OMPC_MAP_MODIFIER_unknown};
/// Location of map-type-modifiers for the 'map' clause.
- SourceLocation MapTypeModifiersLoc[NumberOfModifiers];
+ SourceLocation MapTypeModifiersLoc[NumberOfOMPMapClauseModifiers];
/// Map type for the 'map' clause.
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
@@ -4906,8 +5398,8 @@ private:
OpenMPMapClauseKind MapType, bool MapTypeIsImplicit,
SourceLocation MapLoc, const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
- : OMPMappableExprListClause(OMPC_map, Locs, Sizes, &MapperQualifierLoc,
- &MapperIdInfo),
+ : OMPMappableExprListClause(llvm::omp::OMPC_map, Locs, Sizes,
+ &MapperQualifierLoc, &MapperIdInfo),
MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) {
assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() &&
"Unexpected number of map type modifiers.");
@@ -4927,14 +5419,15 @@ private:
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes)
- : OMPMappableExprListClause(OMPC_map, OMPVarListLocTy(), Sizes) {}
+ : OMPMappableExprListClause(llvm::omp::OMPC_map, OMPVarListLocTy(),
+ Sizes) {}
/// Set map-type-modifier for the clause.
///
/// \param I index for map-type-modifier.
/// \param T map-type-modifier for the clause.
void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) {
- assert(I < NumberOfModifiers &&
+ assert(I < NumberOfOMPMapClauseModifiers &&
"Unexpected index to store map type modifier, exceeds array size.");
MapTypeModifiers[I] = T;
}
@@ -4944,7 +5437,7 @@ private:
/// \param I index for map-type-modifier location.
/// \param TLoc map-type-modifier location.
void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) {
- assert(I < NumberOfModifiers &&
+ assert(I < NumberOfOMPMapClauseModifiers &&
"Index to store map type modifier location exceeds array size.");
MapTypeModifiersLoc[I] = TLoc;
}
@@ -5019,7 +5512,7 @@ public:
///
/// \param Cnt index for map-type-modifier.
OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY {
- assert(Cnt < NumberOfModifiers &&
+ assert(Cnt < NumberOfOMPMapClauseModifiers &&
"Requested modifier exceeds the total number of modifiers.");
return MapTypeModifiers[Cnt];
}
@@ -5029,7 +5522,7 @@ public:
///
/// \param Cnt index for map-type-modifier location.
SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY {
- assert(Cnt < NumberOfModifiers &&
+ assert(Cnt < NumberOfOMPMapClauseModifiers &&
"Requested modifier location exceeds total number of modifiers.");
return MapTypeModifiersLoc[Cnt];
}
@@ -5074,7 +5567,7 @@ public:
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_map;
+ return T->getClauseKind() == llvm::omp::OMPC_map;
}
};
@@ -5113,14 +5606,15 @@ public:
OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
- : OMPClause(OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this),
- LParenLoc(LParenLoc), NumTeams(E) {
+ : OMPClause(llvm::omp::OMPC_num_teams, StartLoc, EndLoc),
+ OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPNumTeamsClause()
- : OMPClause(OMPC_num_teams, SourceLocation(), SourceLocation()),
+ : OMPClause(llvm::omp::OMPC_num_teams, SourceLocation(),
+ SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
@@ -5149,7 +5643,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_num_teams;
+ return T->getClauseKind() == llvm::omp::OMPC_num_teams;
}
};
@@ -5189,14 +5683,15 @@ public:
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
- : OMPClause(OMPC_thread_limit, StartLoc, EndLoc),
+ : OMPClause(llvm::omp::OMPC_thread_limit, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPThreadLimitClause()
- : OMPClause(OMPC_thread_limit, SourceLocation(), SourceLocation()),
+ : OMPClause(llvm::omp::OMPC_thread_limit, SourceLocation(),
+ SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
@@ -5225,7 +5720,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_thread_limit;
+ return T->getClauseKind() == llvm::omp::OMPC_thread_limit;
}
};
@@ -5264,14 +5759,14 @@ public:
OMPPriorityClause(Expr *Priority, Stmt *HelperPriority,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_priority, StartLoc, EndLoc), OMPClauseWithPreInit(this),
- LParenLoc(LParenLoc), Priority(Priority) {
+ : OMPClause(llvm::omp::OMPC_priority, StartLoc, EndLoc),
+ OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Priority(Priority) {
setPreInitStmt(HelperPriority, CaptureRegion);
}
/// Build an empty clause.
OMPPriorityClause()
- : OMPClause(OMPC_priority, SourceLocation(), SourceLocation()),
+ : OMPClause(llvm::omp::OMPC_priority, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
@@ -5299,7 +5794,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_priority;
+ return T->getClauseKind() == llvm::omp::OMPC_priority;
}
};
@@ -5335,14 +5830,15 @@ public:
OMPGrainsizeClause(Expr *Size, Stmt *HelperSize,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_grainsize, StartLoc, EndLoc), OMPClauseWithPreInit(this),
- LParenLoc(LParenLoc), Grainsize(Size) {
+ : OMPClause(llvm::omp::OMPC_grainsize, StartLoc, EndLoc),
+ OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Grainsize(Size) {
setPreInitStmt(HelperSize, CaptureRegion);
}
/// Build an empty clause.
explicit OMPGrainsizeClause()
- : OMPClause(OMPC_grainsize, SourceLocation(), SourceLocation()),
+ : OMPClause(llvm::omp::OMPC_grainsize, SourceLocation(),
+ SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
@@ -5367,7 +5863,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_grainsize;
+ return T->getClauseKind() == llvm::omp::OMPC_grainsize;
}
};
@@ -5384,11 +5880,12 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_nogroup, StartLoc, EndLoc) {}
+ : OMPClause(llvm::omp::OMPC_nogroup, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPNogroupClause()
- : OMPClause(OMPC_nogroup, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_nogroup, SourceLocation(), SourceLocation()) {
+ }
child_range children() {
return child_range(child_iterator(), child_iterator());
@@ -5406,7 +5903,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_nogroup;
+ return T->getClauseKind() == llvm::omp::OMPC_nogroup;
}
};
@@ -5442,14 +5939,15 @@ public:
OMPNumTasksClause(Expr *Size, Stmt *HelperSize,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
- : OMPClause(OMPC_num_tasks, StartLoc, EndLoc), OMPClauseWithPreInit(this),
- LParenLoc(LParenLoc), NumTasks(Size) {
+ : OMPClause(llvm::omp::OMPC_num_tasks, StartLoc, EndLoc),
+ OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTasks(Size) {
setPreInitStmt(HelperSize, CaptureRegion);
}
/// Build an empty clause.
explicit OMPNumTasksClause()
- : OMPClause(OMPC_num_tasks, SourceLocation(), SourceLocation()),
+ : OMPClause(llvm::omp::OMPC_num_tasks, SourceLocation(),
+ SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
@@ -5474,7 +5972,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_num_tasks;
+ return T->getClauseKind() == llvm::omp::OMPC_num_tasks;
}
};
@@ -5506,11 +6004,12 @@ public:
/// \param EndLoc Ending location of the clause.
OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
- : OMPClause(OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc),
+ : OMPClause(llvm::omp::OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc),
Hint(Hint) {}
/// Build an empty clause.
- OMPHintClause() : OMPClause(OMPC_hint, SourceLocation(), SourceLocation()) {}
+ OMPHintClause()
+ : OMPClause(llvm::omp::OMPC_hint, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
@@ -5535,7 +6034,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_hint;
+ return T->getClauseKind() == llvm::omp::OMPC_hint;
}
};
@@ -5607,7 +6106,7 @@ public:
SourceLocation EndLoc,
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
Stmt *HelperChunkSize)
- : OMPClause(OMPC_dist_schedule, StartLoc, EndLoc),
+ : OMPClause(llvm::omp::OMPC_dist_schedule, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind),
KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
@@ -5615,7 +6114,8 @@ public:
/// Build an empty clause.
explicit OMPDistScheduleClause()
- : OMPClause(OMPC_dist_schedule, SourceLocation(), SourceLocation()),
+ : OMPClause(llvm::omp::OMPC_dist_schedule, SourceLocation(),
+ SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Get kind of the clause.
@@ -5654,7 +6154,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_dist_schedule;
+ return T->getClauseKind() == llvm::omp::OMPC_dist_schedule;
}
};
@@ -5724,12 +6224,14 @@ public:
SourceLocation MLoc, SourceLocation KLoc,
SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind,
OpenMPDefaultmapClauseModifier M)
- : OMPClause(OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc),
- Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {}
+ : OMPClause(llvm::omp::OMPC_defaultmap, StartLoc, EndLoc),
+ LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind),
+ KindLoc(KLoc) {}
/// Build an empty clause.
explicit OMPDefaultmapClause()
- : OMPClause(OMPC_defaultmap, SourceLocation(), SourceLocation()) {}
+ : OMPClause(llvm::omp::OMPC_defaultmap, SourceLocation(),
+ SourceLocation()) {}
/// Get kind of the clause.
OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; }
@@ -5766,7 +6268,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_defaultmap;
+ return T->getClauseKind() == llvm::omp::OMPC_defaultmap;
}
};
@@ -5804,8 +6306,8 @@ class OMPToClause final : public OMPMappableExprListClause<OMPToClause>,
DeclarationNameInfo MapperIdInfo,
const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
- : OMPMappableExprListClause(OMPC_to, Locs, Sizes, &MapperQualifierLoc,
- &MapperIdInfo) {}
+ : OMPMappableExprListClause(llvm::omp::OMPC_to, Locs, Sizes,
+ &MapperQualifierLoc, &MapperIdInfo) {}
/// Build an empty clause.
///
@@ -5815,7 +6317,8 @@ class OMPToClause final : public OMPMappableExprListClause<OMPToClause>,
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes)
- : OMPMappableExprListClause(OMPC_to, OMPVarListLocTy(), Sizes) {}
+ : OMPMappableExprListClause(llvm::omp::OMPC_to, OMPVarListLocTy(),
+ Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
@@ -5883,7 +6386,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_to;
+ return T->getClauseKind() == llvm::omp::OMPC_to;
}
};
@@ -5922,8 +6425,8 @@ class OMPFromClause final
DeclarationNameInfo MapperIdInfo,
const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
- : OMPMappableExprListClause(OMPC_from, Locs, Sizes, &MapperQualifierLoc,
- &MapperIdInfo) {}
+ : OMPMappableExprListClause(llvm::omp::OMPC_from, Locs, Sizes,
+ &MapperQualifierLoc, &MapperIdInfo) {}
/// Build an empty clause.
///
@@ -5933,7 +6436,8 @@ class OMPFromClause final
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes)
- : OMPMappableExprListClause(OMPC_from, OMPVarListLocTy(), Sizes) {}
+ : OMPMappableExprListClause(llvm::omp::OMPC_from, OMPVarListLocTy(),
+ Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
@@ -6001,7 +6505,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_from;
+ return T->getClauseKind() == llvm::omp::OMPC_from;
}
};
@@ -6035,7 +6539,8 @@ class OMPUseDevicePtrClause final
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
- : OMPMappableExprListClause(OMPC_use_device_ptr, Locs, Sizes) {}
+ : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, Locs, Sizes) {
+ }
/// Build an empty clause.
///
@@ -6045,8 +6550,8 @@ class OMPUseDevicePtrClause final
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes)
- : OMPMappableExprListClause(OMPC_use_device_ptr, OMPVarListLocTy(),
- Sizes) {}
+ : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr,
+ OMPVarListLocTy(), Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
@@ -6164,7 +6669,111 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_use_device_ptr;
+ return T->getClauseKind() == llvm::omp::OMPC_use_device_ptr;
+ }
+};
+
+/// This represents clause 'use_device_addr' in the '#pragma omp ...'
+/// directives.
+///
+/// \code
+/// #pragma omp target data use_device_addr(a,b)
+/// \endcode
+/// In this example directive '#pragma omp target data' has clause
+/// 'use_device_addr' with the variables 'a' and 'b'.
+class OMPUseDeviceAddrClause final
+ : public OMPMappableExprListClause<OMPUseDeviceAddrClause>,
+ private llvm::TrailingObjects<
+ OMPUseDeviceAddrClause, Expr *, ValueDecl *, unsigned,
+ OMPClauseMappableExprCommon::MappableComponent> {
+ friend class OMPClauseReader;
+ friend OMPMappableExprListClause;
+ friend OMPVarListClause;
+ friend TrailingObjects;
+
+ /// Build clause with number of variables \a NumVars.
+ ///
+ /// \param Locs Locations needed to build a mappable clause. It includes 1)
+ /// StartLoc: starting location of the clause (the clause keyword); 2)
+ /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
+ /// \param Sizes All required sizes to build a mappable clause. It includes 1)
+ /// NumVars: number of expressions listed in this clause; 2)
+ /// NumUniqueDeclarations: number of unique base declarations in this clause;
+ /// 3) NumComponentLists: number of component lists in this clause; and 4)
+ /// NumComponents: total number of expression components in the clause.
+ explicit OMPUseDeviceAddrClause(const OMPVarListLocTy &Locs,
+ const OMPMappableExprListSizeTy &Sizes)
+ : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, Locs,
+ Sizes) {}
+
+ /// Build an empty clause.
+ ///
+ /// \param Sizes All required sizes to build a mappable clause. It includes 1)
+ /// NumVars: number of expressions listed in this clause; 2)
+ /// NumUniqueDeclarations: number of unique base declarations in this clause;
+ /// 3) NumComponentLists: number of component lists in this clause; and 4)
+ /// NumComponents: total number of expression components in the clause.
+ explicit OMPUseDeviceAddrClause(const OMPMappableExprListSizeTy &Sizes)
+ : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr,
+ OMPVarListLocTy(), Sizes) {}
+
+ /// Define the sizes of each trailing object array except the last one. This
+ /// is required for TrailingObjects to work properly.
+ size_t numTrailingObjects(OverloadToken<Expr *>) const {
+ return varlist_size();
+ }
+ size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
+ return getUniqueDeclarationsNum();
+ }
+ size_t numTrailingObjects(OverloadToken<unsigned>) const {
+ return getUniqueDeclarationsNum() + getTotalComponentListNum();
+ }
+
+public:
+ /// Creates clause with a list of variables \a Vars.
+ ///
+ /// \param C AST context.
+ /// \param Locs Locations needed to build a mappable clause. It includes 1)
+ /// StartLoc: starting location of the clause (the clause keyword); 2)
+ /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
+ /// \param Vars The original expression used in the clause.
+ /// \param Declarations Declarations used in the clause.
+ /// \param ComponentLists Component lists used in the clause.
+ static OMPUseDeviceAddrClause *
+ Create(const ASTContext &C, const OMPVarListLocTy &Locs,
+ ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
+ MappableExprComponentListsRef ComponentLists);
+
+ /// Creates an empty clause with the place for \a NumVars variables.
+ ///
+ /// \param C AST context.
+ /// \param Sizes All required sizes to build a mappable clause. It includes 1)
+ /// NumVars: number of expressions listed in this clause; 2)
+ /// NumUniqueDeclarations: number of unique base declarations in this clause;
+ /// 3) NumComponentLists: number of component lists in this clause; and 4)
+ /// NumComponents: total number of expression components in the clause.
+ static OMPUseDeviceAddrClause *
+ CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes);
+
+ child_range children() {
+ return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
+ reinterpret_cast<Stmt **>(varlist_end()));
+ }
+
+ const_child_range children() const {
+ auto Children = const_cast<OMPUseDeviceAddrClause *>(this)->children();
+ return const_child_range(Children.begin(), Children.end());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_use_device_addr;
}
};
@@ -6198,7 +6807,7 @@ class OMPIsDevicePtrClause final
/// NumComponents: total number of expression components in the clause.
explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
- : OMPMappableExprListClause(OMPC_is_device_ptr, Locs, Sizes) {}
+ : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, Locs, Sizes) {}
/// Build an empty clause.
///
@@ -6208,8 +6817,8 @@ class OMPIsDevicePtrClause final
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes)
- : OMPMappableExprListClause(OMPC_is_device_ptr, OMPVarListLocTy(),
- Sizes) {}
+ : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr,
+ OMPVarListLocTy(), Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
@@ -6267,7 +6876,7 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_is_device_ptr;
+ return T->getClauseKind() == llvm::omp::OMPC_is_device_ptr;
}
};
@@ -6293,15 +6902,16 @@ class OMPNontemporalClause final
/// \param N Number of the variables in the clause.
OMPNontemporalClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
- : OMPVarListClause<OMPNontemporalClause>(OMPC_nontemporal, StartLoc,
- LParenLoc, EndLoc, N) {}
+ : OMPVarListClause<OMPNontemporalClause>(llvm::omp::OMPC_nontemporal,
+ StartLoc, LParenLoc, EndLoc, N) {
+ }
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPNontemporalClause(unsigned N)
: OMPVarListClause<OMPNontemporalClause>(
- OMPC_nontemporal, SourceLocation(), SourceLocation(),
+ llvm::omp::OMPC_nontemporal, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Get the list of privatied copies if the member expression was captured by
@@ -6363,7 +6973,563 @@ public:
}
static bool classof(const OMPClause *T) {
- return T->getClauseKind() == OMPC_nontemporal;
+ return T->getClauseKind() == llvm::omp::OMPC_nontemporal;
+ }
+};
+
+/// This represents 'order' clause in the '#pragma omp ...' directive.
+///
+/// \code
+/// #pragma omp simd order(concurrent)
+/// \endcode
+/// In this example directive '#pragma omp parallel' has simple 'order'
+/// clause with kind 'concurrent'.
+class OMPOrderClause final : public OMPClause {
+ friend class OMPClauseReader;
+
+ /// Location of '('.
+ SourceLocation LParenLoc;
+
+ /// A kind of the 'default' clause.
+ OpenMPOrderClauseKind Kind = OMPC_ORDER_unknown;
+
+ /// Start location of the kind in source code.
+ SourceLocation KindKwLoc;
+
+ /// Set kind of the clause.
+ ///
+ /// \param K Argument of clause.
+ void setKind(OpenMPOrderClauseKind K) { Kind = K; }
+
+ /// Set argument location.
+ ///
+ /// \param KLoc Argument location.
+ void setKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
+
+public:
+ /// Build 'order' clause with argument \p A ('concurrent').
+ ///
+ /// \param A Argument of the clause ('concurrent').
+ /// \param ALoc Starting location of the argument.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ OMPOrderClause(OpenMPOrderClauseKind A, SourceLocation ALoc,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_order, StartLoc, EndLoc),
+ LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
+
+ /// Build an empty clause.
+ OMPOrderClause()
+ : OMPClause(llvm::omp::OMPC_order, SourceLocation(), SourceLocation()) {}
+
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+ /// Returns the location of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ /// Returns kind of the clause.
+ OpenMPOrderClauseKind getKind() const { return Kind; }
+
+ /// Returns location of clause kind.
+ SourceLocation getKindKwLoc() const { return KindKwLoc; }
+
+ child_range children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+
+ const_child_range children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_order;
+ }
+};
+
+/// This represents 'destroy' clause in the '#pragma omp depobj'
+/// directive.
+///
+/// \code
+/// #pragma omp depobj(a) destroy
+/// \endcode
+/// In this example directive '#pragma omp depobj' has 'destroy' clause.
+class OMPDestroyClause final : public OMPClause {
+public:
+ /// Build 'destroy' clause.
+ ///
+ /// \param StartLoc Starting location of the clause.
+ /// \param EndLoc Ending location of the clause.
+ OMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_destroy, StartLoc, EndLoc) {}
+
+ /// Build an empty clause.
+ OMPDestroyClause()
+ : OMPClause(llvm::omp::OMPC_destroy, SourceLocation(), SourceLocation()) {
+ }
+
+ child_range children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+
+ const_child_range children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_destroy;
+ }
+};
+
+/// This represents 'detach' clause in the '#pragma omp task' directive.
+///
+/// \code
+/// #pragma omp task detach(evt)
+/// \endcode
+/// In this example directive '#pragma omp detach' has simple 'detach' clause
+/// with the variable 'evt'.
+class OMPDetachClause final : public OMPClause {
+ friend class OMPClauseReader;
+
+ /// Location of '('.
+ SourceLocation LParenLoc;
+
+ /// Expression of the 'detach' clause.
+ Stmt *Evt = nullptr;
+
+ /// Set condition.
+ void setEventHandler(Expr *E) { Evt = E; }
+
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+public:
+ /// Build 'detach' clause with event-handler \a Evt.
+ ///
+ /// \param Evt Event handler expression.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ OMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_detach, StartLoc, EndLoc),
+ LParenLoc(LParenLoc), Evt(Evt) {}
+
+ /// Build an empty clause.
+ OMPDetachClause()
+ : OMPClause(llvm::omp::OMPC_detach, SourceLocation(), SourceLocation()) {}
+
+ /// Returns the location of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ /// Returns event-handler expression.
+ Expr *getEventHandler() const { return cast_or_null<Expr>(Evt); }
+
+ child_range children() { return child_range(&Evt, &Evt + 1); }
+
+ const_child_range children() const {
+ return const_child_range(&Evt, &Evt + 1);
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_detach;
+ }
+};
+
+/// This represents clause 'inclusive' in the '#pragma omp scan' directive.
+///
+/// \code
+/// #pragma omp scan inclusive(a,b)
+/// \endcode
+/// In this example directive '#pragma omp scan' has clause 'inclusive'
+/// with the variables 'a' and 'b'.
+class OMPInclusiveClause final
+ : public OMPVarListClause<OMPInclusiveClause>,
+ private llvm::TrailingObjects<OMPInclusiveClause, Expr *> {
+ friend class OMPClauseReader;
+ friend OMPVarListClause;
+ friend TrailingObjects;
+
+ /// Build clause with number of variables \a N.
+ ///
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ /// \param N Number of the variables in the clause.
+ OMPInclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, unsigned N)
+ : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive,
+ StartLoc, LParenLoc, EndLoc, N) {}
+
+ /// Build an empty clause.
+ ///
+ /// \param N Number of variables.
+ explicit OMPInclusiveClause(unsigned N)
+ : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive,
+ SourceLocation(), SourceLocation(),
+ SourceLocation(), N) {}
+
+public:
+ /// Creates clause with a list of variables \a VL.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ /// \param VL List of references to the original variables.
+ static OMPInclusiveClause *Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> VL);
+
+ /// Creates an empty clause with the place for \a N variables.
+ ///
+ /// \param C AST context.
+ /// \param N The number of variables.
+ static OMPInclusiveClause *CreateEmpty(const ASTContext &C, unsigned N);
+
+ child_range children() {
+ return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
+ reinterpret_cast<Stmt **>(varlist_end()));
+ }
+
+ const_child_range children() const {
+ auto Children = const_cast<OMPInclusiveClause *>(this)->children();
+ return const_child_range(Children.begin(), Children.end());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_inclusive;
+ }
+};
+
+/// This represents clause 'exclusive' in the '#pragma omp scan' directive.
+///
+/// \code
+/// #pragma omp scan exclusive(a,b)
+/// \endcode
+/// In this example directive '#pragma omp scan' has clause 'exclusive'
+/// with the variables 'a' and 'b'.
+class OMPExclusiveClause final
+ : public OMPVarListClause<OMPExclusiveClause>,
+ private llvm::TrailingObjects<OMPExclusiveClause, Expr *> {
+ friend class OMPClauseReader;
+ friend OMPVarListClause;
+ friend TrailingObjects;
+
+ /// Build clause with number of variables \a N.
+ ///
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ /// \param N Number of the variables in the clause.
+ OMPExclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, unsigned N)
+ : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive,
+ StartLoc, LParenLoc, EndLoc, N) {}
+
+ /// Build an empty clause.
+ ///
+ /// \param N Number of variables.
+ explicit OMPExclusiveClause(unsigned N)
+ : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive,
+ SourceLocation(), SourceLocation(),
+ SourceLocation(), N) {}
+
+public:
+ /// Creates clause with a list of variables \a VL.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ /// \param VL List of references to the original variables.
+ static OMPExclusiveClause *Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> VL);
+
+ /// Creates an empty clause with the place for \a N variables.
+ ///
+ /// \param C AST context.
+ /// \param N The number of variables.
+ static OMPExclusiveClause *CreateEmpty(const ASTContext &C, unsigned N);
+
+ child_range children() {
+ return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
+ reinterpret_cast<Stmt **>(varlist_end()));
+ }
+
+ const_child_range children() const {
+ auto Children = const_cast<OMPExclusiveClause *>(this)->children();
+ return const_child_range(Children.begin(), Children.end());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_exclusive;
+ }
+};
+
+/// This represents clause 'uses_allocators' in the '#pragma omp target'-based
+/// directives.
+///
+/// \code
+/// #pragma omp target uses_allocators(default_allocator, my_allocator(traits))
+/// \endcode
+/// In this example directive '#pragma omp target' has clause 'uses_allocators'
+/// with the allocators 'default_allocator' and user-defined 'my_allocator'.
+class OMPUsesAllocatorsClause final
+ : public OMPClause,
+ private llvm::TrailingObjects<OMPUsesAllocatorsClause, Expr *,
+ SourceLocation> {
+public:
+ /// Data for list of allocators.
+ struct Data {
+ /// Allocator.
+ Expr *Allocator = nullptr;
+ /// Allocator traits.
+ Expr *AllocatorTraits = nullptr;
+ /// Locations of '(' and ')' symbols.
+ SourceLocation LParenLoc, RParenLoc;
+ };
+
+private:
+ friend class OMPClauseReader;
+ friend TrailingObjects;
+
+ enum class ExprOffsets {
+ Allocator,
+ AllocatorTraits,
+ Total,
+ };
+
+ enum class ParenLocsOffsets {
+ LParen,
+ RParen,
+ Total,
+ };
+
+ /// Location of '('.
+ SourceLocation LParenLoc;
+ /// Total number of allocators in the clause.
+ unsigned NumOfAllocators = 0;
+
+ /// Build clause.
+ ///
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ /// \param N Number of allocators asssociated with the clause.
+ OMPUsesAllocatorsClause(SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, unsigned N)
+ : OMPClause(llvm::omp::OMPC_uses_allocators, StartLoc, EndLoc),
+ LParenLoc(LParenLoc), NumOfAllocators(N) {}
+
+ /// Build an empty clause.
+ /// \param N Number of allocators asssociated with the clause.
+ ///
+ explicit OMPUsesAllocatorsClause(unsigned N)
+ : OMPClause(llvm::omp::OMPC_uses_allocators, SourceLocation(),
+ SourceLocation()),
+ NumOfAllocators(N) {}
+
+ unsigned numTrailingObjects(OverloadToken<Expr *>) const {
+ return NumOfAllocators * static_cast<int>(ExprOffsets::Total);
+ }
+
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+ /// Sets the allocators data for the clause.
+ void setAllocatorsData(ArrayRef<OMPUsesAllocatorsClause::Data> Data);
+
+public:
+ /// Creates clause with a list of allocators \p Data.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ /// \param Data List of allocators.
+ static OMPUsesAllocatorsClause *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, ArrayRef<OMPUsesAllocatorsClause::Data> Data);
+
+ /// Creates an empty clause with the place for \p N allocators.
+ ///
+ /// \param C AST context.
+ /// \param N The number of allocators.
+ static OMPUsesAllocatorsClause *CreateEmpty(const ASTContext &C, unsigned N);
+
+ /// Returns the location of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ /// Returns number of allocators associated with the clause.
+ unsigned getNumberOfAllocators() const { return NumOfAllocators; }
+
+ /// Returns data for the specified allocator.
+ OMPUsesAllocatorsClause::Data getAllocatorData(unsigned I) const;
+
+ // Iterators
+ child_range children() {
+ Stmt **Begin = reinterpret_cast<Stmt **>(getTrailingObjects<Expr *>());
+ return child_range(Begin, Begin + NumOfAllocators *
+ static_cast<int>(ExprOffsets::Total));
+ }
+ const_child_range children() const {
+ Stmt *const *Begin =
+ reinterpret_cast<Stmt *const *>(getTrailingObjects<Expr *>());
+ return const_child_range(
+ Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total));
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_uses_allocators;
+ }
+};
+
+/// This represents clause 'affinity' in the '#pragma omp task'-based
+/// directives.
+///
+/// \code
+/// #pragma omp task affinity(iterator(i = 0:n) : ([3][n])a, b[:n], c[i])
+/// \endcode
+/// In this example directive '#pragma omp task' has clause 'affinity' with the
+/// affinity modifer 'iterator(i = 0:n)' and locator items '([3][n])a', 'b[:n]'
+/// and 'c[i]'.
+class OMPAffinityClause final
+ : public OMPVarListClause<OMPAffinityClause>,
+ private llvm::TrailingObjects<OMPAffinityClause, Expr *> {
+ friend class OMPClauseReader;
+ friend OMPVarListClause;
+ friend TrailingObjects;
+
+ /// Location of ':' symbol.
+ SourceLocation ColonLoc;
+
+ /// Build clause.
+ ///
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param ColonLoc Location of ':'.
+ /// \param EndLoc Ending location of the clause.
+ /// \param N Number of locators asssociated with the clause.
+ OMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N)
+ : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, StartLoc,
+ LParenLoc, EndLoc, N) {}
+
+ /// Build an empty clause.
+ /// \param N Number of locators asssociated with the clause.
+ ///
+ explicit OMPAffinityClause(unsigned N)
+ : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity,
+ SourceLocation(), SourceLocation(),
+ SourceLocation(), N) {}
+
+ /// Sets the affinity modifier for the clause, if any.
+ void setModifier(Expr *E) {
+ getTrailingObjects<Expr *>()[varlist_size()] = E;
+ }
+
+ /// Sets the location of ':' symbol.
+ void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
+
+public:
+ /// Creates clause with a modifier a list of locator items.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param ColonLoc Location of ':'.
+ /// \param EndLoc Ending location of the clause.
+ /// \param Locators List of locator items.
+ static OMPAffinityClause *Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation ColonLoc,
+ SourceLocation EndLoc, Expr *Modifier,
+ ArrayRef<Expr *> Locators);
+
+ /// Creates an empty clause with the place for \p N locator items.
+ ///
+ /// \param C AST context.
+ /// \param N The number of locator items.
+ static OMPAffinityClause *CreateEmpty(const ASTContext &C, unsigned N);
+
+ /// Gets affinity modifier.
+ Expr *getModifier() { return getTrailingObjects<Expr *>()[varlist_size()]; }
+ Expr *getModifier() const {
+ return getTrailingObjects<Expr *>()[varlist_size()];
+ }
+
+ /// Gets the location of ':' symbol.
+ SourceLocation getColonLoc() const { return ColonLoc; }
+
+ // Iterators
+ child_range children() {
+ int Offset = getModifier() ? 1 : 0;
+ return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
+ reinterpret_cast<Stmt **>(varlist_end() + Offset));
+ }
+
+ const_child_range children() const {
+ auto Children = const_cast<OMPAffinityClause *>(this)->children();
+ return const_child_range(Children.begin(), Children.end());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_affinity;
}
};
@@ -6372,21 +7538,26 @@ public:
template<class ImplClass, template <typename> class Ptr, typename RetTy>
class OMPClauseVisitorBase {
public:
-#define PTR(CLASS) typename Ptr<CLASS>::type
+#define PTR(CLASS) Ptr<CLASS>
#define DISPATCH(CLASS) \
return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S))
-#define OPENMP_CLAUSE(Name, Class) \
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
RetTy Visit ## Class (PTR(Class) S) { DISPATCH(Class); }
-#include "clang/Basic/OpenMPKinds.def"
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
RetTy Visit(PTR(OMPClause) S) {
// Top switch clause: visit each OMPClause.
switch (S->getClauseKind()) {
- default: llvm_unreachable("Unknown clause kind!");
-#define OPENMP_CLAUSE(Name, Class) \
- case OMPC_ ## Name : return Visit ## Class(static_cast<PTR(Class)>(S));
-#include "clang/Basic/OpenMPKinds.def"
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
+ case llvm::omp::Clause::Enum: \
+ return Visit##Class(static_cast<PTR(Class)>(S));
+#define OMP_CLAUSE_NO_CLASS(Enum, Str) \
+ case llvm::omp::Clause::Enum: \
+ break;
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+ default:
+ break;
}
}
// Base case, ignore it. :)
@@ -6395,12 +7566,11 @@ public:
#undef DISPATCH
};
-template <typename T>
-using const_ptr = typename std::add_pointer<typename std::add_const<T>::type>;
+template <typename T> using const_ptr = std::add_pointer_t<std::add_const_t<T>>;
-template<class ImplClass, typename RetTy = void>
-class OMPClauseVisitor :
- public OMPClauseVisitorBase <ImplClass, std::add_pointer, RetTy> {};
+template <class ImplClass, typename RetTy = void>
+class OMPClauseVisitor
+ : public OMPClauseVisitorBase<ImplClass, std::add_pointer_t, RetTy> {};
template<class ImplClass, typename RetTy = void>
class ConstOMPClauseVisitor :
public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {};
@@ -6416,9 +7586,69 @@ public:
OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy)
: OS(OS), Policy(Policy) {}
-#define OPENMP_CLAUSE(Name, Class) void Visit##Class(Class *S);
-#include "clang/Basic/OpenMPKinds.def"
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
+ void Visit##Class(Class *S);
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+};
+
+struct OMPTraitProperty {
+ llvm::omp::TraitProperty Kind = llvm::omp::TraitProperty::invalid;
+};
+struct OMPTraitSelector {
+ Expr *ScoreOrCondition = nullptr;
+ llvm::omp::TraitSelector Kind = llvm::omp::TraitSelector::invalid;
+ llvm::SmallVector<OMPTraitProperty, 1> Properties;
+};
+struct OMPTraitSet {
+ llvm::omp::TraitSet Kind = llvm::omp::TraitSet::invalid;
+ llvm::SmallVector<OMPTraitSelector, 2> Selectors;
+};
+
+/// Helper data structure representing the traits in a match clause of an
+/// `declare variant` or `metadirective`. The outer level is an ordered
+/// collection of selector sets, each with an associated kind and an ordered
+/// collection of selectors. A selector has a kind, an optional score/condition,
+/// and an ordered collection of properties.
+class OMPTraitInfo {
+ /// Private constructor accesible only by ASTContext.
+ OMPTraitInfo() {}
+ friend class ASTContext;
+
+public:
+ /// Reconstruct a (partial) OMPTraitInfo object from a mangled name.
+ OMPTraitInfo(StringRef MangledName);
+
+ /// The outermost level of selector sets.
+ llvm::SmallVector<OMPTraitSet, 2> Sets;
+
+ bool anyScoreOrCondition(
+ llvm::function_ref<bool(Expr *&, bool /* IsScore */)> Cond) {
+ return llvm::any_of(Sets, [&](OMPTraitSet &Set) {
+ return llvm::any_of(
+ Set.Selectors, [&](OMPTraitSelector &Selector) {
+ return Cond(Selector.ScoreOrCondition,
+ /* IsScore */ Selector.Kind !=
+ llvm::omp::TraitSelector::user_condition);
+ });
+ });
+ }
+
+ /// Create a variant match info object from this trait info object. While the
+ /// former is a flat representation the actual main difference is that the
+ /// latter uses clang::Expr to store the score/condition while the former is
+ /// independent of clang. Thus, expressions and conditions are evaluated in
+ /// this method.
+ void getAsVariantMatchInfo(ASTContext &ASTCtx,
+ llvm::omp::VariantMatchInfo &VMI) const;
+
+ /// Return a string representation identifying this context selector.
+ std::string getMangledName() const;
+
+ /// Print a human readable representation into \p OS.
+ void print(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const;
};
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo &TI);
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo *TI);
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/AST/ParentMapContext.h b/contrib/llvm-project/clang/include/clang/AST/ParentMapContext.h
new file mode 100644
index 000000000000..be4d75df7b99
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/AST/ParentMapContext.h
@@ -0,0 +1,144 @@
+//===- ParentMapContext.h - Map of parents using DynTypedNode -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Similar to ParentMap.h, but generalizes to non-Stmt nodes, which can have
+// multiple parents.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_PARENTMAPCONTEXT_H
+#define LLVM_CLANG_AST_PARENTMAPCONTEXT_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTTypeTraits.h"
+
+namespace clang {
+class DynTypedNodeList;
+
+class ParentMapContext {
+public:
+ ParentMapContext(ASTContext &Ctx);
+
+ ~ParentMapContext();
+
+ /// Returns the parents of the given node (within the traversal scope).
+ ///
+ /// Note that this will lazily compute the parents of all nodes
+ /// and store them for later retrieval. Thus, the first call is O(n)
+ /// in the number of AST nodes.
+ ///
+ /// Caveats and FIXMEs:
+ /// Calculating the parent map over all AST nodes will need to load the
+ /// full AST. This can be undesirable in the case where the full AST is
+ /// expensive to create (for example, when using precompiled header
+ /// preambles). Thus, there are good opportunities for optimization here.
+ /// One idea is to walk the given node downwards, looking for references
+ /// to declaration contexts - once a declaration context is found, compute
+ /// the parent map for the declaration context; if that can satisfy the
+ /// request, loading the whole AST can be avoided. Note that this is made
+ /// more complex by statements in templates having multiple parents - those
+ /// problems can be solved by building closure over the templated parts of
+ /// the AST, which also avoids touching large parts of the AST.
+ /// Additionally, we will want to add an interface to already give a hint
+ /// where to search for the parents, for example when looking at a statement
+ /// inside a certain function.
+ ///
+ /// 'NodeT' can be one of Decl, Stmt, Type, TypeLoc,
+ /// NestedNameSpecifier or NestedNameSpecifierLoc.
+ template <typename NodeT> DynTypedNodeList getParents(const NodeT &Node);
+
+ DynTypedNodeList getParents(const DynTypedNode &Node);
+
+ /// Clear parent maps.
+ void clear();
+
+ TraversalKind getTraversalKind() const { return Traversal; }
+ void setTraversalKind(TraversalKind TK) { Traversal = TK; }
+
+ const Expr *traverseIgnored(const Expr *E) const;
+ Expr *traverseIgnored(Expr *E) const;
+ DynTypedNode traverseIgnored(const DynTypedNode &N) const;
+
+private:
+ ASTContext &ASTCtx;
+ class ParentMap;
+ TraversalKind Traversal = TK_AsIs;
+ std::unique_ptr<ParentMap> Parents;
+};
+
+class TraversalKindScope {
+ ParentMapContext &Ctx;
+ TraversalKind TK = TK_AsIs;
+
+public:
+ TraversalKindScope(ASTContext &ASTCtx, llvm::Optional<TraversalKind> ScopeTK)
+ : Ctx(ASTCtx.getParentMapContext()) {
+ TK = Ctx.getTraversalKind();
+ if (ScopeTK)
+ Ctx.setTraversalKind(*ScopeTK);
+ }
+
+ ~TraversalKindScope() { Ctx.setTraversalKind(TK); }
+};
+
+/// Container for either a single DynTypedNode or for an ArrayRef to
+/// DynTypedNode. For use with ParentMap.
+class DynTypedNodeList {
+ llvm::AlignedCharArrayUnion<DynTypedNode, ArrayRef<DynTypedNode>> Storage;
+ bool IsSingleNode;
+
+public:
+ DynTypedNodeList(const DynTypedNode &N) : IsSingleNode(true) {
+ new (Storage.buffer) DynTypedNode(N);
+ }
+
+ DynTypedNodeList(ArrayRef<DynTypedNode> A) : IsSingleNode(false) {
+ new (Storage.buffer) ArrayRef<DynTypedNode>(A);
+ }
+
+ const DynTypedNode *begin() const {
+ if (!IsSingleNode)
+ return reinterpret_cast<const ArrayRef<DynTypedNode> *>(Storage.buffer)
+ ->begin();
+ return reinterpret_cast<const DynTypedNode *>(Storage.buffer);
+ }
+
+ const DynTypedNode *end() const {
+ if (!IsSingleNode)
+ return reinterpret_cast<const ArrayRef<DynTypedNode> *>(Storage.buffer)
+ ->end();
+ return reinterpret_cast<const DynTypedNode *>(Storage.buffer) + 1;
+ }
+
+ size_t size() const { return end() - begin(); }
+ bool empty() const { return begin() == end(); }
+
+ const DynTypedNode &operator[](size_t N) const {
+ assert(N < size() && "Out of bounds!");
+ return *(begin() + N);
+ }
+};
+
+template <typename NodeT>
+inline DynTypedNodeList ParentMapContext::getParents(const NodeT &Node) {
+ return getParents(DynTypedNode::create(Node));
+}
+
+template <typename NodeT>
+inline DynTypedNodeList ASTContext::getParents(const NodeT &Node) {
+ return getParentMapContext().getParents(Node);
+}
+
+template <>
+inline DynTypedNodeList ASTContext::getParents(const DynTypedNode &Node) {
+ return getParentMapContext().getParents(Node);
+}
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h b/contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h
index 80eec6a5a8be..616647f44430 100644
--- a/contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h
+++ b/contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h
@@ -36,7 +36,9 @@ protected:
public:
/// Remap a path to a form suitable for printing.
- virtual std::string remapPath(StringRef Path) const { return Path; }
+ virtual std::string remapPath(StringRef Path) const {
+ return std::string(Path);
+ }
};
/// Describes how types, statements, expressions, and declarations should be
@@ -55,12 +57,13 @@ struct PrintingPolicy {
SuppressLifetimeQualifiers(false),
SuppressTemplateArgsInCXXConstructors(false), Bool(LO.Bool),
Restrict(LO.C99), Alignof(LO.CPlusPlus11), UnderscoreAlignof(LO.C11),
- UseVoidForZeroParams(!LO.CPlusPlus), TerseOutput(false),
+ UseVoidForZeroParams(!LO.CPlusPlus),
+ SplitTemplateClosers(!LO.CPlusPlus11), TerseOutput(false),
PolishForDeclaration(false), Half(LO.Half),
MSWChar(LO.MicrosoftExt && !LO.WChar), IncludeNewlines(true),
MSVCFormatting(false), ConstantsAsWritten(false),
SuppressImplicitBase(false), FullyQualifiedName(false),
- PrintCanonicalTypes(false) {}
+ PrintCanonicalTypes(false), PrintInjectedClassNameWithArguments(true) {}
/// Adjust this printing policy for cases where it's known that we're
/// printing C++ code (for instance, if AST dumping reaches a C++-only
@@ -181,6 +184,10 @@ struct PrintingPolicy {
/// with zero parameters.
unsigned UseVoidForZeroParams : 1;
+ /// Whether nested templates must be closed like 'a\<b\<c\> \>' rather than
+ /// 'a\<b\<c\>\>'.
+ unsigned SplitTemplateClosers : 1;
+
/// Provide a 'terse' output.
///
/// For example, in this mode we don't print function bodies, class members,
@@ -237,6 +244,11 @@ struct PrintingPolicy {
/// Whether to print types as written or canonically.
unsigned PrintCanonicalTypes : 1;
+ /// Whether to print an InjectedClassNameType with template arguments or as
+ /// written. When a template argument is unnamed, printing it results in
+ /// invalid C++ code.
+ unsigned PrintInjectedClassNameWithArguments : 1;
+
/// Callbacks to use to allow the behavior of printing to be customized.
const PrintingCallbacks *Callbacks = nullptr;
};
diff --git a/contrib/llvm-project/clang/include/clang/AST/RawCommentList.h b/contrib/llvm-project/clang/include/clang/AST/RawCommentList.h
index 1eea56dee622..a18432c2b768 100644
--- a/contrib/llvm-project/clang/include/clang/AST/RawCommentList.h
+++ b/contrib/llvm-project/clang/include/clang/AST/RawCommentList.h
@@ -11,9 +11,9 @@
#include "clang/Basic/CommentOptions.h"
#include "clang/Basic/SourceLocation.h"
-#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Allocator.h"
#include <map>
namespace clang {
@@ -21,7 +21,9 @@ namespace clang {
class ASTContext;
class ASTReader;
class Decl;
+class DiagnosticsEngine;
class Preprocessor;
+class SourceManager;
namespace comments {
class FullComment;
@@ -173,23 +175,6 @@ private:
friend class ASTReader;
};
-/// Compare comments' source locations.
-template<>
-class BeforeThanCompare<RawComment> {
- const SourceManager &SM;
-
-public:
- explicit BeforeThanCompare(const SourceManager &SM) : SM(SM) { }
-
- bool operator()(const RawComment &LHS, const RawComment &RHS) {
- return SM.isBeforeInTranslationUnit(LHS.getBeginLoc(), RHS.getBeginLoc());
- }
-
- bool operator()(const RawComment *LHS, const RawComment *RHS) {
- return operator()(*LHS, *RHS);
- }
-};
-
/// This class represents all comments included in the translation unit,
/// sorted in order of appearance in the translation unit.
class RawCommentList {
diff --git a/contrib/llvm-project/clang/include/clang/AST/RecursiveASTVisitor.h b/contrib/llvm-project/clang/include/clang/AST/RecursiveASTVisitor.h
index 86521d82c6ff..3dcfc9fee629 100644
--- a/contrib/llvm-project/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -48,29 +48,6 @@
#include <cstddef>
#include <type_traits>
-// The following three macros are used for meta programming. The code
-// using them is responsible for defining macro OPERATOR().
-
-// All unary operators.
-#define UNARYOP_LIST() \
- OPERATOR(PostInc) OPERATOR(PostDec) OPERATOR(PreInc) OPERATOR(PreDec) \
- OPERATOR(AddrOf) OPERATOR(Deref) OPERATOR(Plus) OPERATOR(Minus) \
- OPERATOR(Not) OPERATOR(LNot) OPERATOR(Real) OPERATOR(Imag) \
- OPERATOR(Extension) OPERATOR(Coawait)
-
-// All binary operators (excluding compound assign operators).
-#define BINOP_LIST() \
- OPERATOR(PtrMemD) OPERATOR(PtrMemI) OPERATOR(Mul) OPERATOR(Div) \
- OPERATOR(Rem) OPERATOR(Add) OPERATOR(Sub) OPERATOR(Shl) OPERATOR(Shr) \
- OPERATOR(LT) OPERATOR(GT) OPERATOR(LE) OPERATOR(GE) OPERATOR(EQ) \
- OPERATOR(NE) OPERATOR(Cmp) OPERATOR(And) OPERATOR(Xor) OPERATOR(Or) \
- OPERATOR(LAnd) OPERATOR(LOr) OPERATOR(Assign) OPERATOR(Comma)
-
-// All compound assign operators.
-#define CAO_LIST() \
- OPERATOR(Mul) OPERATOR(Div) OPERATOR(Rem) OPERATOR(Add) OPERATOR(Sub) \
- OPERATOR(Shl) OPERATOR(Shr) OPERATOR(And) OPERATOR(Or) OPERATOR(Xor)
-
namespace clang {
// A helper macro to implement short-circuiting when recursing. It
@@ -83,6 +60,42 @@ namespace clang {
return false; \
} while (false)
+namespace detail {
+
+template <typename T, typename U>
+struct has_same_member_pointer_type : std::false_type {};
+template <typename T, typename U, typename R, typename... P>
+struct has_same_member_pointer_type<R (T::*)(P...), R (U::*)(P...)>
+ : std::true_type {};
+
+template <bool has_same_type> struct is_same_method_impl {
+ template <typename FirstMethodPtrTy, typename SecondMethodPtrTy>
+ static bool isSameMethod(FirstMethodPtrTy FirstMethodPtr,
+ SecondMethodPtrTy SecondMethodPtr) {
+ return false;
+ }
+};
+
+template <> struct is_same_method_impl<true> {
+ template <typename FirstMethodPtrTy, typename SecondMethodPtrTy>
+ static bool isSameMethod(FirstMethodPtrTy FirstMethodPtr,
+ SecondMethodPtrTy SecondMethodPtr) {
+ return FirstMethodPtr == SecondMethodPtr;
+ }
+};
+
+/// Returns true if and only if \p FirstMethodPtr and \p SecondMethodPtr
+/// are pointers to the same non-static member function.
+template <typename FirstMethodPtrTy, typename SecondMethodPtrTy>
+bool isSameMethod(FirstMethodPtrTy FirstMethodPtr,
+ SecondMethodPtrTy SecondMethodPtr) {
+ return is_same_method_impl<has_same_member_pointer_type<
+ FirstMethodPtrTy,
+ SecondMethodPtrTy>::value>::isSameMethod(FirstMethodPtr, SecondMethodPtr);
+}
+
+} // end namespace detail
+
/// A class that does preorder or postorder
/// depth-first traversal on the entire Clang AST and visits each node.
///
@@ -325,26 +338,20 @@ public:
Stmt::child_range getStmtChildren(Stmt *S) { return S->children(); }
private:
- template<typename T, typename U>
- struct has_same_member_pointer_type : std::false_type {};
- template<typename T, typename U, typename R, typename... P>
- struct has_same_member_pointer_type<R (T::*)(P...), R (U::*)(P...)>
- : std::true_type {};
-
// Traverse the given statement. If the most-derived traverse function takes a
// data recursion queue, pass it on; otherwise, discard it. Note that the
// first branch of this conditional must compile whether or not the derived
// class can take a queue, so if we're taking the second arm, make the first
// arm call our function rather than the derived class version.
#define TRAVERSE_STMT_BASE(NAME, CLASS, VAR, QUEUE) \
- (has_same_member_pointer_type<decltype( \
- &RecursiveASTVisitor::Traverse##NAME), \
- decltype(&Derived::Traverse##NAME)>::value \
- ? static_cast<typename std::conditional< \
- has_same_member_pointer_type< \
+ (::clang::detail::has_same_member_pointer_type< \
+ decltype(&RecursiveASTVisitor::Traverse##NAME), \
+ decltype(&Derived::Traverse##NAME)>::value \
+ ? static_cast<std::conditional_t< \
+ ::clang::detail::has_same_member_pointer_type< \
decltype(&RecursiveASTVisitor::Traverse##NAME), \
decltype(&Derived::Traverse##NAME)>::value, \
- Derived &, RecursiveASTVisitor &>::type>(*this) \
+ Derived &, RecursiveASTVisitor &>>(*this) \
.Traverse##NAME(static_cast<CLASS *>(VAR), QUEUE) \
: getDerived().Traverse##NAME(static_cast<CLASS *>(VAR)))
@@ -377,60 +384,6 @@ public:
bool Visit##CLASS(CLASS *S) { return true; }
#include "clang/AST/StmtNodes.inc"
-// Define Traverse*(), WalkUpFrom*(), and Visit*() for unary
-// operator methods. Unary operators are not classes in themselves
-// (they're all opcodes in UnaryOperator) but do have visitors.
-#define OPERATOR(NAME) \
- bool TraverseUnary##NAME(UnaryOperator *S, \
- DataRecursionQueue *Queue = nullptr) { \
- if (!getDerived().shouldTraversePostOrder()) \
- TRY_TO(WalkUpFromUnary##NAME(S)); \
- TRY_TO_TRAVERSE_OR_ENQUEUE_STMT(S->getSubExpr()); \
- return true; \
- } \
- bool WalkUpFromUnary##NAME(UnaryOperator *S) { \
- TRY_TO(WalkUpFromUnaryOperator(S)); \
- TRY_TO(VisitUnary##NAME(S)); \
- return true; \
- } \
- bool VisitUnary##NAME(UnaryOperator *S) { return true; }
-
- UNARYOP_LIST()
-#undef OPERATOR
-
-// Define Traverse*(), WalkUpFrom*(), and Visit*() for binary
-// operator methods. Binary operators are not classes in themselves
-// (they're all opcodes in BinaryOperator) but do have visitors.
-#define GENERAL_BINOP_FALLBACK(NAME, BINOP_TYPE) \
- bool TraverseBin##NAME(BINOP_TYPE *S, DataRecursionQueue *Queue = nullptr) { \
- if (!getDerived().shouldTraversePostOrder()) \
- TRY_TO(WalkUpFromBin##NAME(S)); \
- TRY_TO_TRAVERSE_OR_ENQUEUE_STMT(S->getLHS()); \
- TRY_TO_TRAVERSE_OR_ENQUEUE_STMT(S->getRHS()); \
- return true; \
- } \
- bool WalkUpFromBin##NAME(BINOP_TYPE *S) { \
- TRY_TO(WalkUpFrom##BINOP_TYPE(S)); \
- TRY_TO(VisitBin##NAME(S)); \
- return true; \
- } \
- bool VisitBin##NAME(BINOP_TYPE *S) { return true; }
-
-#define OPERATOR(NAME) GENERAL_BINOP_FALLBACK(NAME, BinaryOperator)
- BINOP_LIST()
-#undef OPERATOR
-
-// Define Traverse*(), WalkUpFrom*(), and Visit*() for compound
-// assignment methods. Compound assignment operators are not
-// classes in themselves (they're all opcodes in
-// CompoundAssignOperator) but do have visitors.
-#define OPERATOR(NAME) \
- GENERAL_BINOP_FALLBACK(NAME##Assign, CompoundAssignOperator)
-
- CAO_LIST()
-#undef OPERATOR
-#undef GENERAL_BINOP_FALLBACK
-
// ---- Methods on Types ----
// FIXME: revamp to take TypeLoc's rather than Types.
@@ -534,8 +487,8 @@ private:
bool TraverseOMPExecutableDirective(OMPExecutableDirective *S);
bool TraverseOMPLoopDirective(OMPLoopDirective *S);
bool TraverseOMPClause(OMPClause *C);
-#define OPENMP_CLAUSE(Name, Class) bool Visit##Class(Class *C);
-#include "clang/Basic/OpenMPKinds.def"
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) bool Visit##Class(Class *C);
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
/// Process clauses with list of variables.
template <typename T> bool VisitOMPClauseList(T *Node);
/// Process clauses with pre-initis.
@@ -549,42 +502,6 @@ private:
template <typename Derived>
bool RecursiveASTVisitor<Derived>::dataTraverseNode(Stmt *S,
DataRecursionQueue *Queue) {
-#define DISPATCH_STMT(NAME, CLASS, VAR) \
- return TRAVERSE_STMT_BASE(NAME, CLASS, VAR, Queue);
-
- // If we have a binary expr, dispatch to the subcode of the binop. A smart
- // optimizer (e.g. LLVM) will fold this comparison into the switch stmt
- // below.
- if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(S)) {
- switch (BinOp->getOpcode()) {
-#define OPERATOR(NAME) \
- case BO_##NAME: \
- DISPATCH_STMT(Bin##NAME, BinaryOperator, S);
-
- BINOP_LIST()
-#undef OPERATOR
-#undef BINOP_LIST
-
-#define OPERATOR(NAME) \
- case BO_##NAME##Assign: \
- DISPATCH_STMT(Bin##NAME##Assign, CompoundAssignOperator, S);
-
- CAO_LIST()
-#undef OPERATOR
-#undef CAO_LIST
- }
- } else if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(S)) {
- switch (UnOp->getOpcode()) {
-#define OPERATOR(NAME) \
- case UO_##NAME: \
- DISPATCH_STMT(Unary##NAME, UnaryOperator, S);
-
- UNARYOP_LIST()
-#undef OPERATOR
-#undef UNARYOP_LIST
- }
- }
-
// Top switch stmt: dispatch to TraverseFooStmt for each concrete FooStmt.
switch (S->getStmtClass()) {
case Stmt::NoStmtClass:
@@ -592,7 +509,7 @@ bool RecursiveASTVisitor<Derived>::dataTraverseNode(Stmt *S,
#define ABSTRACT_STMT(STMT)
#define STMT(CLASS, PARENT) \
case Stmt::CLASS##Class: \
- DISPATCH_STMT(CLASS, CLASS, S);
+ return TRAVERSE_STMT_BASE(CLASS, CLASS, S, Queue);
#include "clang/AST/StmtNodes.inc"
}
@@ -603,23 +520,44 @@ bool RecursiveASTVisitor<Derived>::dataTraverseNode(Stmt *S,
template <typename Derived>
bool RecursiveASTVisitor<Derived>::PostVisitStmt(Stmt *S) {
+ // In pre-order traversal mode, each Traverse##STMT method is responsible for
+ // calling WalkUpFrom. Therefore, if the user overrides Traverse##STMT and
+ // does not call the default implementation, the WalkUpFrom callback is not
+ // called. Post-order traversal mode should provide the same behavior
+ // regarding method overrides.
+ //
+ // In post-order traversal mode the Traverse##STMT method, when it receives a
+ // DataRecursionQueue, can't call WalkUpFrom after traversing children because
+ // it only enqueues the children and does not traverse them. TraverseStmt
+ // traverses the enqueued children, and we call WalkUpFrom here.
+ //
+ // However, to make pre-order and post-order modes identical with regards to
+ // whether they call WalkUpFrom at all, we call WalkUpFrom if and only if the
+ // user did not override the Traverse##STMT method. We implement the override
+ // check with isSameMethod calls below.
+
switch (S->getStmtClass()) {
case Stmt::NoStmtClass:
break;
#define ABSTRACT_STMT(STMT)
#define STMT(CLASS, PARENT) \
case Stmt::CLASS##Class: \
- TRY_TO(WalkUpFrom##CLASS(static_cast<CLASS *>(S))); break;
+ if (::clang::detail::isSameMethod(&RecursiveASTVisitor::Traverse##CLASS, \
+ &Derived::Traverse##CLASS)) { \
+ TRY_TO(WalkUpFrom##CLASS(static_cast<CLASS *>(S))); \
+ } \
+ break;
#define INITLISTEXPR(CLASS, PARENT) \
case Stmt::CLASS##Class: \
- { \
+ if (::clang::detail::isSameMethod(&RecursiveASTVisitor::Traverse##CLASS, \
+ &Derived::Traverse##CLASS)) { \
auto ILE = static_cast<CLASS *>(S); \
if (auto Syn = ILE->isSemanticForm() ? ILE->getSyntacticForm() : ILE) \
TRY_TO(WalkUpFrom##CLASS(Syn)); \
if (auto Sem = ILE->isSemanticForm() ? ILE : ILE->getSemanticForm()) \
TRY_TO(WalkUpFrom##CLASS(Sem)); \
- break; \
- }
+ } \
+ break;
#include "clang/AST/StmtNodes.inc"
}
@@ -669,9 +607,6 @@ bool RecursiveASTVisitor<Derived>::TraverseStmt(Stmt *S,
return true;
}
-#define DISPATCH(NAME, CLASS, VAR) \
- return getDerived().Traverse##NAME(static_cast<CLASS *>(VAR))
-
template <typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseType(QualType T) {
if (T.isNull())
@@ -681,7 +616,8 @@ bool RecursiveASTVisitor<Derived>::TraverseType(QualType T) {
#define ABSTRACT_TYPE(CLASS, BASE)
#define TYPE(CLASS, BASE) \
case Type::CLASS: \
- DISPATCH(CLASS##Type, CLASS##Type, const_cast<Type *>(T.getTypePtr()));
+ return getDerived().Traverse##CLASS##Type( \
+ static_cast<CLASS##Type *>(const_cast<Type *>(T.getTypePtr())));
#include "clang/AST/TypeNodes.inc"
}
@@ -731,8 +667,6 @@ bool RecursiveASTVisitor<Derived>::TraverseDecl(Decl *D) {
return true;
}
-#undef DISPATCH
-
template <typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseNestedNameSpecifier(
NestedNameSpecifier *NNS) {
@@ -1006,6 +940,17 @@ DEF_TRAVERSE_TYPE(VectorType, { TRY_TO(TraverseType(T->getElementType())); })
DEF_TRAVERSE_TYPE(ExtVectorType, { TRY_TO(TraverseType(T->getElementType())); })
+DEF_TRAVERSE_TYPE(ConstantMatrixType,
+ { TRY_TO(TraverseType(T->getElementType())); })
+
+DEF_TRAVERSE_TYPE(DependentSizedMatrixType, {
+ if (T->getRowExpr())
+ TRY_TO(TraverseStmt(T->getRowExpr()));
+ if (T->getColumnExpr())
+ TRY_TO(TraverseStmt(T->getColumnExpr()));
+ TRY_TO(TraverseType(T->getElementType()));
+})
+
DEF_TRAVERSE_TYPE(FunctionNoProtoType,
{ TRY_TO(TraverseType(T->getReturnType())); })
@@ -1115,6 +1060,10 @@ DEF_TRAVERSE_TYPE(AtomicType, { TRY_TO(TraverseType(T->getValueType())); })
DEF_TRAVERSE_TYPE(PipeType, { TRY_TO(TraverseType(T->getElementType())); })
+DEF_TRAVERSE_TYPE(ExtIntType, {})
+DEF_TRAVERSE_TYPE(DependentExtIntType,
+ { TRY_TO(TraverseStmt(T->getNumBitsExpr())); })
+
#undef DEF_TRAVERSE_TYPE
// ----------------- TypeLoc traversal -----------------
@@ -1127,10 +1076,17 @@ DEF_TRAVERSE_TYPE(PipeType, { TRY_TO(TraverseType(T->getElementType())); })
#define DEF_TRAVERSE_TYPELOC(TYPE, CODE) \
template <typename Derived> \
bool RecursiveASTVisitor<Derived>::Traverse##TYPE##Loc(TYPE##Loc TL) { \
- if (getDerived().shouldWalkTypesOfTypeLocs()) \
- TRY_TO(WalkUpFrom##TYPE(const_cast<TYPE *>(TL.getTypePtr()))); \
- TRY_TO(WalkUpFrom##TYPE##Loc(TL)); \
+ if (!getDerived().shouldTraversePostOrder()) { \
+ TRY_TO(WalkUpFrom##TYPE##Loc(TL)); \
+ if (getDerived().shouldWalkTypesOfTypeLocs()) \
+ TRY_TO(WalkUpFrom##TYPE(const_cast<TYPE *>(TL.getTypePtr()))); \
+ } \
{ CODE; } \
+ if (getDerived().shouldTraversePostOrder()) { \
+ TRY_TO(WalkUpFrom##TYPE##Loc(TL)); \
+ if (getDerived().shouldWalkTypesOfTypeLocs()) \
+ TRY_TO(WalkUpFrom##TYPE(const_cast<TYPE *>(TL.getTypePtr()))); \
+ } \
return true; \
}
@@ -1199,22 +1155,22 @@ bool RecursiveASTVisitor<Derived>::TraverseArrayTypeLocHelper(ArrayTypeLoc TL) {
DEF_TRAVERSE_TYPELOC(ConstantArrayType, {
TRY_TO(TraverseTypeLoc(TL.getElementLoc()));
- return TraverseArrayTypeLocHelper(TL);
+ TRY_TO(TraverseArrayTypeLocHelper(TL));
})
DEF_TRAVERSE_TYPELOC(IncompleteArrayType, {
TRY_TO(TraverseTypeLoc(TL.getElementLoc()));
- return TraverseArrayTypeLocHelper(TL);
+ TRY_TO(TraverseArrayTypeLocHelper(TL));
})
DEF_TRAVERSE_TYPELOC(VariableArrayType, {
TRY_TO(TraverseTypeLoc(TL.getElementLoc()));
- return TraverseArrayTypeLocHelper(TL);
+ TRY_TO(TraverseArrayTypeLocHelper(TL));
})
DEF_TRAVERSE_TYPELOC(DependentSizedArrayType, {
TRY_TO(TraverseTypeLoc(TL.getElementLoc()));
- return TraverseArrayTypeLocHelper(TL);
+ TRY_TO(TraverseArrayTypeLocHelper(TL));
})
DEF_TRAVERSE_TYPELOC(DependentAddressSpaceType, {
@@ -1247,6 +1203,18 @@ DEF_TRAVERSE_TYPELOC(ExtVectorType, {
TRY_TO(TraverseType(TL.getTypePtr()->getElementType()));
})
+DEF_TRAVERSE_TYPELOC(ConstantMatrixType, {
+ TRY_TO(TraverseStmt(TL.getAttrRowOperand()));
+ TRY_TO(TraverseStmt(TL.getAttrColumnOperand()));
+ TRY_TO(TraverseType(TL.getTypePtr()->getElementType()));
+})
+
+DEF_TRAVERSE_TYPELOC(DependentSizedMatrixType, {
+ TRY_TO(TraverseStmt(TL.getAttrRowOperand()));
+ TRY_TO(TraverseStmt(TL.getAttrColumnOperand()));
+ TRY_TO(TraverseType(TL.getTypePtr()->getElementType()));
+})
+
DEF_TRAVERSE_TYPELOC(FunctionNoProtoType,
{ TRY_TO(TraverseTypeLoc(TL.getReturnLoc())); })
@@ -1378,6 +1346,11 @@ DEF_TRAVERSE_TYPELOC(AtomicType, { TRY_TO(TraverseTypeLoc(TL.getValueLoc())); })
DEF_TRAVERSE_TYPELOC(PipeType, { TRY_TO(TraverseTypeLoc(TL.getValueLoc())); })
+DEF_TRAVERSE_TYPELOC(ExtIntType, {})
+DEF_TRAVERSE_TYPELOC(DependentExtIntType, {
+ TRY_TO(TraverseStmt(TL.getTypePtr()->getNumBitsExpr()));
+})
+
#undef DEF_TRAVERSE_TYPELOC
// ----------------- Decl traversal -----------------
@@ -1986,6 +1959,8 @@ DEF_TRAVERSE_DECL(BindingDecl, {
DEF_TRAVERSE_DECL(MSPropertyDecl, { TRY_TO(TraverseDeclaratorHelper(D)); })
+DEF_TRAVERSE_DECL(MSGuidDecl, {})
+
DEF_TRAVERSE_DECL(FieldDecl, {
TRY_TO(TraverseDeclaratorHelper(D));
if (D->isBitField())
@@ -2062,11 +2037,11 @@ bool RecursiveASTVisitor<Derived>::TraverseFunctionHelper(FunctionDecl *D) {
}
}
- bool VisitBody = D->isThisDeclarationADefinition();
- // If a method is set to default outside the class definition the compiler
- // generates the method body and adds it to the AST.
- if (const auto *MD = dyn_cast<CXXMethodDecl>(D))
- VisitBody &= !MD->isDefaulted() || getDerived().shouldVisitImplicitCode();
+ bool VisitBody =
+ D->isThisDeclarationADefinition() &&
+ // Don't visit the function body if the function definition is generated
+ // by clang.
+ (!D->isDefaulted() || getDerived().shouldVisitImplicitCode());
if (VisitBody) {
TRY_TO(TraverseStmt(D->getBody())); // Function body.
@@ -2179,8 +2154,13 @@ DEF_TRAVERSE_DECL(RequiresExprBodyDecl, {})
TRY_TO_TRAVERSE_OR_ENQUEUE_STMT(SubStmt); \
} \
} \
- if (!Queue && ReturnValue && getDerived().shouldTraversePostOrder()) \
+ /* Call WalkUpFrom if TRY_TO_TRAVERSE_OR_ENQUEUE_STMT has traversed the \
+ * children already. If TRY_TO_TRAVERSE_OR_ENQUEUE_STMT only enqueued the \
+ * children, PostVisitStmt will call WalkUpFrom after we are done visiting \
+ * children. */ \
+ if (!Queue && ReturnValue && getDerived().shouldTraversePostOrder()) { \
TRY_TO(WalkUpFrom##STMT(S)); \
+ } \
return ReturnValue; \
}
@@ -2314,6 +2294,10 @@ DEF_TRAVERSE_STMT(CXXFunctionalCastExpr, {
TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
})
+DEF_TRAVERSE_STMT(CXXAddrspaceCastExpr, {
+ TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
+})
+
DEF_TRAVERSE_STMT(CXXConstCastExpr, {
TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
})
@@ -2347,6 +2331,9 @@ bool RecursiveASTVisitor<Derived>::TraverseSynOrSemInitListExpr(
for (Stmt *SubStmt : S->children()) {
TRY_TO_TRAVERSE_OR_ENQUEUE_STMT(SubStmt);
}
+
+ if (!Queue && getDerived().shouldTraversePostOrder())
+ TRY_TO(WalkUpFromInitListExpr(S));
}
return true;
}
@@ -2543,7 +2530,10 @@ DEF_TRAVERSE_STMT(CXXMemberCallExpr, {})
// over the children.
DEF_TRAVERSE_STMT(AddrLabelExpr, {})
DEF_TRAVERSE_STMT(ArraySubscriptExpr, {})
+DEF_TRAVERSE_STMT(MatrixSubscriptExpr, {})
DEF_TRAVERSE_STMT(OMPArraySectionExpr, {})
+DEF_TRAVERSE_STMT(OMPArrayShapingExpr, {})
+DEF_TRAVERSE_STMT(OMPIteratorExpr, {})
DEF_TRAVERSE_STMT(BlockExpr, {
TRY_TO(TraverseDecl(S->getBlockDecl()));
@@ -2661,6 +2651,7 @@ DEF_TRAVERSE_STMT(CXXRewrittenBinaryOperator, {
})
DEF_TRAVERSE_STMT(OpaqueValueExpr, {})
DEF_TRAVERSE_STMT(TypoExpr, {})
+DEF_TRAVERSE_STMT(RecoveryExpr, {})
DEF_TRAVERSE_STMT(CUDAKernelCallExpr, {})
// These operators (all of them) do not need any action except
@@ -2842,6 +2833,12 @@ DEF_TRAVERSE_STMT(OMPCancelDirective,
DEF_TRAVERSE_STMT(OMPFlushDirective,
{ TRY_TO(TraverseOMPExecutableDirective(S)); })
+DEF_TRAVERSE_STMT(OMPDepobjDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
+DEF_TRAVERSE_STMT(OMPScanDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
DEF_TRAVERSE_STMT(OMPOrderedDirective,
{ TRY_TO(TraverseOMPExecutableDirective(S)); })
@@ -2941,16 +2938,15 @@ bool RecursiveASTVisitor<Derived>::TraverseOMPClause(OMPClause *C) {
if (!C)
return true;
switch (C->getClauseKind()) {
-#define OPENMP_CLAUSE(Name, Class) \
- case OMPC_##Name: \
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
+ case llvm::omp::Clause::Enum: \
TRY_TO(Visit##Class(static_cast<Class *>(C))); \
break;
-#include "clang/Basic/OpenMPKinds.def"
- case OMPC_threadprivate:
- case OMPC_uniform:
- case OMPC_device_type:
- case OMPC_match:
- case OMPC_unknown:
+#define OMP_CLAUSE_NO_CLASS(Enum, Str) \
+ case llvm::omp::Clause::Enum: \
+ break;
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+ default:
break;
}
return true;
@@ -3122,6 +3118,26 @@ bool RecursiveASTVisitor<Derived>::VisitOMPSeqCstClause(OMPSeqCstClause *) {
}
template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPAcqRelClause(OMPAcqRelClause *) {
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPAcquireClause(OMPAcquireClause *) {
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPReleaseClause(OMPReleaseClause *) {
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPRelaxedClause(OMPRelaxedClause *) {
+ return true;
+}
+
+template <typename Derived>
bool RecursiveASTVisitor<Derived>::VisitOMPThreadsClause(OMPThreadsClause *) {
return true;
}
@@ -3137,6 +3153,11 @@ bool RecursiveASTVisitor<Derived>::VisitOMPNogroupClause(OMPNogroupClause *) {
}
template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPDestroyClause(OMPDestroyClause *) {
+ return true;
+}
+
+template <typename Derived>
template <typename T>
bool RecursiveASTVisitor<Derived>::VisitOMPClauseList(T *Node) {
for (auto *E : Node->varlists()) {
@@ -3146,6 +3167,20 @@ bool RecursiveASTVisitor<Derived>::VisitOMPClauseList(T *Node) {
}
template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPInclusiveClause(
+ OMPInclusiveClause *C) {
+ TRY_TO(VisitOMPClauseList(C));
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPExclusiveClause(
+ OMPExclusiveClause *C) {
+ TRY_TO(VisitOMPClauseList(C));
+ return true;
+}
+
+template <typename Derived>
bool RecursiveASTVisitor<Derived>::VisitOMPPrivateClause(OMPPrivateClause *C) {
TRY_TO(VisitOMPClauseList(C));
for (auto *E : C->private_copies()) {
@@ -3272,6 +3307,17 @@ RecursiveASTVisitor<Derived>::VisitOMPReductionClause(OMPReductionClause *C) {
for (auto *E : C->reduction_ops()) {
TRY_TO(TraverseStmt(E));
}
+ if (C->getModifier() == OMPC_REDUCTION_inscan) {
+ for (auto *E : C->copy_ops()) {
+ TRY_TO(TraverseStmt(E));
+ }
+ for (auto *E : C->copy_array_temps()) {
+ TRY_TO(TraverseStmt(E));
+ }
+ for (auto *E : C->copy_array_elems()) {
+ TRY_TO(TraverseStmt(E));
+ }
+ }
return true;
}
@@ -3328,6 +3374,12 @@ bool RecursiveASTVisitor<Derived>::VisitOMPFlushClause(OMPFlushClause *C) {
}
template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPDepobjClause(OMPDepobjClause *C) {
+ TRY_TO(TraverseStmt(C->getDepobj()));
+ return true;
+}
+
+template <typename Derived>
bool RecursiveASTVisitor<Derived>::VisitOMPDependClause(OMPDependClause *C) {
TRY_TO(VisitOMPClauseList(C));
return true;
@@ -3426,6 +3478,13 @@ bool RecursiveASTVisitor<Derived>::VisitOMPUseDevicePtrClause(
}
template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPUseDeviceAddrClause(
+ OMPUseDeviceAddrClause *C) {
+ TRY_TO(VisitOMPClauseList(C));
+ return true;
+}
+
+template <typename Derived>
bool RecursiveASTVisitor<Derived>::VisitOMPIsDevicePtrClause(
OMPIsDevicePtrClause *C) {
TRY_TO(VisitOMPClauseList(C));
@@ -3442,6 +3501,37 @@ bool RecursiveASTVisitor<Derived>::VisitOMPNontemporalClause(
return true;
}
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPOrderClause(OMPOrderClause *) {
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPDetachClause(OMPDetachClause *C) {
+ TRY_TO(TraverseStmt(C->getEventHandler()));
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPUsesAllocatorsClause(
+ OMPUsesAllocatorsClause *C) {
+ for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
+ const OMPUsesAllocatorsClause::Data Data = C->getAllocatorData(I);
+ TRY_TO(TraverseStmt(Data.Allocator));
+ TRY_TO(TraverseStmt(Data.AllocatorTraits));
+ }
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPAffinityClause(
+ OMPAffinityClause *C) {
+ TRY_TO(TraverseStmt(C->getModifier()));
+ for (Expr *E : C->varlists())
+ TRY_TO(TraverseStmt(E));
+ return true;
+}
+
// FIXME: look at the following tricky-seeming exprs to see if we
// need to recurse on anything. These are ones that have methods
// returning decls or qualtypes or nestednamespecifier -- though I'm
diff --git a/contrib/llvm-project/clang/include/clang/AST/Stmt.h b/contrib/llvm-project/clang/include/clang/AST/Stmt.h
index 3aa2745937b1..d3fad58fcf59 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Stmt.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Stmt.h
@@ -14,12 +14,14 @@
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
@@ -98,14 +100,8 @@ protected:
/// The statement class.
unsigned sClass : 8;
-
- /// This bit is set only for the Stmts that are the structured-block of
- /// OpenMP executable directives. Directives that have a structured block
- /// are called "non-standalone" directives.
- /// I.e. those returned by OMPExecutableDirective::getStructuredBlock().
- unsigned IsOMPStructuredBlock : 1;
};
- enum { NumStmtBits = 9 };
+ enum { NumStmtBits = 8 };
class NullStmtBitfields {
friend class ASTStmtReader;
@@ -315,12 +311,9 @@ protected:
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
- unsigned TypeDependent : 1;
- unsigned ValueDependent : 1;
- unsigned InstantiationDependent : 1;
- unsigned ContainsUnexpandedParameterPack : 1;
+ unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>;
};
- enum { NumExprBits = NumStmtBits + 9 };
+ enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> };
class ConstantExprBitfields {
friend class ASTStmtReader;
@@ -329,24 +322,27 @@ protected:
unsigned : NumExprBits;
- /// The kind of result that is trail-allocated.
+ /// The kind of result that is tail-allocated.
unsigned ResultKind : 2;
- /// Kind of Result as defined by APValue::Kind
+ /// The kind of Result as defined by APValue::Kind.
unsigned APValueKind : 4;
- /// When ResultKind == RSK_Int64. whether the trail-allocated integer is
- /// signed.
+ /// When ResultKind == RSK_Int64, true if the tail-allocated integer is
+ /// unsigned.
unsigned IsUnsigned : 1;
- /// When ResultKind == RSK_Int64. the BitWidth of the trail-allocated
- /// integer. 7 bits because it is the minimal number of bit to represent a
- /// value from 0 to 64 (the size of the trail-allocated number).
+ /// When ResultKind == RSK_Int64. the BitWidth of the tail-allocated
+ /// integer. 7 bits because it is the minimal number of bits to represent a
+ /// value from 0 to 64 (the size of the tail-allocated integer).
unsigned BitWidth : 7;
- /// When ResultKind == RSK_APValue. Wether the ASTContext will cleanup the
- /// destructor on the trail-allocated APValue.
+ /// When ResultKind == RSK_APValue, true if the ASTContext will cleanup the
+ /// tail-allocated APValue.
unsigned HasCleanup : 1;
+
+ /// True if this ConstantExpr was created for immediate invocation.
+ unsigned IsImmediateInvocation : 1;
};
class PredefinedExprBitfields {
@@ -431,6 +427,11 @@ protected:
unsigned Opc : 5;
unsigned CanOverflow : 1;
+ //
+ /// This is only meaningful for operations on floating point
+ /// types when additional values need to be in trailing storage.
+ /// It is 0 otherwise.
+ unsigned HasFPFeatures : 1;
SourceLocation Loc;
};
@@ -444,8 +445,9 @@ protected:
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
- class ArraySubscriptExprBitfields {
+ class ArrayOrMatrixSubscriptExprBitfields {
friend class ArraySubscriptExpr;
+ friend class MatrixSubscriptExpr;
unsigned : NumExprBits;
@@ -529,8 +531,9 @@ protected:
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
- /// types and 0 otherwise.
- unsigned FPFeatures : 3;
+ /// types when additional values need to be in trailing storage.
+ /// It is 0 otherwise.
+ unsigned HasFPFeatures : 1;
SourceLocation OpLoc;
};
@@ -611,9 +614,6 @@ protected:
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
-
- // Only meaningful for floating point types.
- unsigned FPFeatures : 3;
};
class CXXRewrittenBinaryOperatorBitfields {
@@ -772,8 +772,10 @@ protected:
/// the trait evaluated true or false.
unsigned Value : 1;
- /// The number of arguments to this type trait.
- unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
+ /// The number of arguments to this type trait. According to [implimits]
+ /// 8 bits would be enough, but we require (and test for) at least 16 bits
+ /// to mirror FunctionType.
+ unsigned NumArgs;
};
class DependentScopeDeclRefExprBitfields {
@@ -922,6 +924,28 @@ protected:
SourceLocation NameLoc;
};
+ class LambdaExprBitfields {
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+ friend class LambdaExpr;
+
+ unsigned : NumExprBits;
+
+ /// The default capture kind, which is a value of type
+ /// LambdaCaptureDefault.
+ unsigned CaptureDefault : 2;
+
+ /// Whether this lambda had an explicit parameter list vs. an
+ /// implicit (and empty) parameter list.
+ unsigned ExplicitParams : 1;
+
+ /// Whether this lambda had the result type explicitly specified.
+ unsigned ExplicitResultType : 1;
+
+ /// The number of captures.
+ unsigned NumCaptures : 16;
+ };
+
class RequiresExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
@@ -997,7 +1021,7 @@ protected:
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
- ArraySubscriptExprBitfields ArraySubscriptExprBits;
+ ArrayOrMatrixSubscriptExprBitfields ArrayOrMatrixSubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
@@ -1034,6 +1058,7 @@ protected:
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
+ LambdaExprBitfields LambdaExprBits;
RequiresExprBitfields RequiresExprBits;
// C++ Coroutines TS expressions
@@ -1117,7 +1142,6 @@ public:
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
- StmtBits.IsOMPStructuredBlock = false;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
@@ -1127,11 +1151,6 @@ public:
const char *getStmtClassName() const;
- bool isOMPStructuredBlock() const { return StmtBits.IsOMPStructuredBlock; }
- void setIsOMPStructuredBlock(bool IsOMPStructuredBlock) {
- StmtBits.IsOMPStructuredBlock = IsOMPStructuredBlock;
- }
-
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
@@ -1147,9 +1166,7 @@ public:
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
- void dump(SourceManager &SM) const;
- void dump(raw_ostream &OS, SourceManager &SM) const;
- void dump(raw_ostream &OS) const;
+ void dump(raw_ostream &OS, const ASTContext &Context) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
@@ -2260,6 +2277,8 @@ class WhileStmt final : public Stmt,
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
+ SourceLocation LParenLoc, RParenLoc;
+
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
@@ -2270,7 +2289,8 @@ class WhileStmt final : public Stmt,
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
- SourceLocation WL);
+ SourceLocation WL, SourceLocation LParenLoc,
+ SourceLocation RParenLoc);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
@@ -2278,7 +2298,8 @@ class WhileStmt final : public Stmt,
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
- Stmt *Body, SourceLocation WL);
+ Stmt *Body, SourceLocation WL,
+ SourceLocation LParenLoc, SourceLocation RParenLoc);
/// Create an empty while statement optionally with storage for
/// a condition variable.
@@ -2342,6 +2363,11 @@ public:
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+ void setLParenLoc(SourceLocation L) { LParenLoc = L; }
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
@@ -3044,7 +3070,7 @@ public:
}
IdentifierInfo *getLabelIdentifier(unsigned i) const {
- return Names[i + NumInputs];
+ return Names[i + NumOutputs + NumInputs];
}
AddrLabelExpr *getLabelExpr(unsigned i) const;
@@ -3055,11 +3081,11 @@ public:
using labels_const_range = llvm::iterator_range<const_labels_iterator>;
labels_iterator begin_labels() {
- return &Exprs[0] + NumInputs;
+ return &Exprs[0] + NumOutputs + NumInputs;
}
labels_iterator end_labels() {
- return &Exprs[0] + NumInputs + NumLabels;
+ return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_range labels() {
@@ -3067,11 +3093,11 @@ public:
}
const_labels_iterator begin_labels() const {
- return &Exprs[0] + NumInputs;
+ return &Exprs[0] + NumOutputs + NumInputs;
}
const_labels_iterator end_labels() const {
- return &Exprs[0] + NumInputs + NumLabels;
+ return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_const_range labels() const {
diff --git a/contrib/llvm-project/clang/include/clang/AST/StmtOpenMP.h b/contrib/llvm-project/clang/include/clang/AST/StmtOpenMP.h
index 65f0afece224..bd87eafc9034 100644
--- a/contrib/llvm-project/clang/include/clang/AST/StmtOpenMP.h
+++ b/contrib/llvm-project/clang/include/clang/AST/StmtOpenMP.h
@@ -356,6 +356,9 @@ public:
///
class OMPParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
+ /// Special reference expression for handling task reduction. Used to store
+ /// the taskgroup descriptor returned by the runtime functions.
+ Expr *TaskRedRef = nullptr;
/// true if the construct has inner cancel directive.
bool HasCancel;
@@ -381,6 +384,9 @@ class OMPParallelDirective : public OMPExecutableDirective {
SourceLocation(), NumClauses, 1),
HasCancel(false) {}
+ /// Sets special task reduction descriptor.
+ void setTaskReductionRefExpr(Expr *E) { TaskRedRef = E; }
+
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
@@ -392,11 +398,14 @@ public:
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
+ /// \param TaskRedRef Task reduction special reference expression to handle
+ /// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
+ bool HasCancel);
/// Creates an empty directive with the place for \a N clauses.
///
@@ -406,6 +415,10 @@ public:
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
+ /// Returns special task reduction reference expression.
+ Expr *getTaskReductionRefExpr() { return TaskRedRef; }
+ const Expr *getTaskReductionRefExpr() const { return TaskRedRef; }
+
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
@@ -1258,7 +1271,9 @@ public:
///
class OMPForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
-
+ /// Special reference expression for handling task reduction. Used to store
+ /// the taskgroup descriptor returned by the runtime functions.
+ Expr *TaskRedRef = nullptr;
/// true if current directive has inner cancel directive.
bool HasCancel;
@@ -1286,6 +1301,9 @@ class OMPForDirective : public OMPLoopDirective {
NumClauses),
HasCancel(false) {}
+ /// Sets special task reduction descriptor.
+ void setTaskReductionRefExpr(Expr *E) { TaskRedRef = E; }
+
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
@@ -1299,13 +1317,15 @@ public:
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
+ /// \param TaskRedRef Task reduction special reference expression to handle
+ /// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs,
- bool HasCancel);
+ Expr *TaskRedRef, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
@@ -1317,6 +1337,10 @@ public:
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
+ /// Returns special task reduction reference expression.
+ Expr *getTaskReductionRefExpr() { return TaskRedRef; }
+ const Expr *getTaskReductionRefExpr() const { return TaskRedRef; }
+
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
@@ -1403,6 +1427,9 @@ public:
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
+ /// Special reference expression for handling task reduction. Used to store
+ /// the taskgroup descriptor returned by the runtime functions.
+ Expr *TaskRedRef = nullptr;
/// true if current directive has inner cancel directive.
bool HasCancel;
@@ -1429,6 +1456,9 @@ class OMPSectionsDirective : public OMPExecutableDirective {
SourceLocation(), NumClauses, 1),
HasCancel(false) {}
+ /// Sets special task reduction descriptor.
+ void setTaskReductionRefExpr(Expr *E) { TaskRedRef = E; }
+
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
@@ -1440,11 +1470,14 @@ public:
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
+ /// \param TaskRedRef Task reduction special reference expression to handle
+ /// taskgroup descriptor.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
+ bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
@@ -1455,6 +1488,10 @@ public:
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
+ /// Returns special task reduction reference expression.
+ Expr *getTaskReductionRefExpr() { return TaskRedRef; }
+ const Expr *getTaskReductionRefExpr() const { return TaskRedRef; }
+
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
@@ -1715,6 +1752,9 @@ public:
class OMPParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
+ /// Special reference expression for handling task reduction. Used to store
+ /// the taskgroup descriptor returned by the runtime functions.
+ Expr *TaskRedRef = nullptr;
/// true if current region has inner cancel directive.
bool HasCancel;
@@ -1743,6 +1783,9 @@ class OMPParallelForDirective : public OMPLoopDirective {
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
+ /// Sets special task reduction descriptor.
+ void setTaskReductionRefExpr(Expr *E) { TaskRedRef = E; }
+
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
@@ -1756,12 +1799,15 @@ public:
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
+ /// \param TaskRedRef Task reduction special reference expression to handle
+ /// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
+ Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
+ bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
@@ -1775,6 +1821,10 @@ public:
unsigned CollapsedNum,
EmptyShell);
+ /// Returns special task reduction reference expression.
+ Expr *getTaskReductionRefExpr() { return TaskRedRef; }
+ const Expr *getTaskReductionRefExpr() const { return TaskRedRef; }
+
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
@@ -1863,6 +1913,10 @@ public:
class OMPParallelMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
+ /// Special reference expression for handling task reduction. Used to store
+ /// the taskgroup descriptor returned by the runtime functions.
+ Expr *TaskRedRef = nullptr;
+
OMPParallelMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelMasterDirectiveClass,
@@ -1875,6 +1929,9 @@ class OMPParallelMasterDirective : public OMPExecutableDirective {
SourceLocation(), SourceLocation(), NumClauses,
1) {}
+ /// Sets special task reduction descriptor.
+ void setTaskReductionRefExpr(Expr *E) { TaskRedRef = E; }
+
public:
/// Creates directive with a list of \a Clauses.
///
@@ -1883,10 +1940,12 @@ public:
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
+ /// \param TaskRedRef Task reduction special reference expression to handle
+ /// taskgroup descriptor.
///
static OMPParallelMasterDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
@@ -1897,6 +1956,10 @@ public:
static OMPParallelMasterDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
+ /// Returns special task reduction reference expression.
+ Expr *getTaskReductionRefExpr() { return TaskRedRef; }
+ const Expr *getTaskReductionRefExpr() const { return TaskRedRef; }
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterDirectiveClass;
}
@@ -1914,6 +1977,9 @@ public:
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
+ /// Special reference expression for handling task reduction. Used to store
+ /// the taskgroup descriptor returned by the runtime functions.
+ Expr *TaskRedRef = nullptr;
/// true if current directive has inner cancel directive.
bool HasCancel;
@@ -1941,6 +2007,9 @@ class OMPParallelSectionsDirective : public OMPExecutableDirective {
1),
HasCancel(false) {}
+ /// Sets special task reduction descriptor.
+ void setTaskReductionRefExpr(Expr *E) { TaskRedRef = E; }
+
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
@@ -1952,11 +2021,14 @@ public:
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
+ /// \param TaskRedRef Task reduction special reference expression to handle
+ /// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
+ bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
@@ -1967,6 +2039,10 @@ public:
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
+ /// Returns special task reduction reference expression.
+ Expr *getTaskReductionRefExpr() { return TaskRedRef; }
+ const Expr *getTaskReductionRefExpr() const { return TaskRedRef; }
+
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
@@ -2314,6 +2390,64 @@ public:
}
};
+/// This represents '#pragma omp depobj' directive.
+///
+/// \code
+/// #pragma omp depobj(a) depend(in:x,y)
+/// \endcode
+/// In this example directive '#pragma omp depobj' initializes a depobj object
+/// 'a' with dependence type 'in' and a list with 'x' and 'y' locators.
+class OMPDepobjDirective final : public OMPExecutableDirective {
+ friend class ASTStmtReader;
+
+ /// Build directive with the given start and end location.
+ ///
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending location of the directive.
+ /// \param NumClauses Number of clauses.
+ ///
+ OMPDepobjDirective(SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned NumClauses)
+ : OMPExecutableDirective(this, OMPDepobjDirectiveClass,
+ llvm::omp::OMPD_depobj, StartLoc, EndLoc,
+ NumClauses, 0) {}
+
+ /// Build an empty directive.
+ ///
+ /// \param NumClauses Number of clauses.
+ ///
+ explicit OMPDepobjDirective(unsigned NumClauses)
+ : OMPExecutableDirective(this, OMPDepobjDirectiveClass,
+ llvm::omp::OMPD_depobj, SourceLocation(),
+ SourceLocation(), NumClauses, 0) {}
+
+public:
+ /// Creates directive with a list of \a Clauses.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending Location of the directive.
+ /// \param Clauses List of clauses.
+ ///
+ static OMPDepobjDirective *Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses);
+
+ /// Creates an empty directive with the place for \a NumClauses
+ /// clauses.
+ ///
+ /// \param C AST context.
+ /// \param NumClauses Number of clauses.
+ ///
+ static OMPDepobjDirective *CreateEmpty(const ASTContext &C,
+ unsigned NumClauses, EmptyShell);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPDepobjDirectiveClass;
+ }
+};
+
/// This represents '#pragma omp ordered' directive.
///
/// \code
@@ -2747,6 +2881,12 @@ public:
///
class OMPTargetParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
+ /// Special reference expression for handling task reduction. Used to store
+ /// the taskgroup descriptor returned by the runtime functions.
+ Expr *TaskRedRef = nullptr;
+ /// true if the construct has inner cancel directive.
+ bool HasCancel = false;
+
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
@@ -2769,6 +2909,11 @@ class OMPTargetParallelDirective : public OMPExecutableDirective {
SourceLocation(), SourceLocation(), NumClauses,
/*NumChildren=*/1) {}
+ /// Sets special task reduction descriptor.
+ void setTaskReductionRefExpr(Expr *E) { TaskRedRef = E; }
+ /// Set cancel state.
+ void setHasCancel(bool Has) { HasCancel = Has; }
+
public:
/// Creates directive with a list of \a Clauses.
///
@@ -2777,10 +2922,14 @@ public:
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
+ /// \param TaskRedRef Task reduction special reference expression to handle
+ /// taskgroup descriptor.
+ /// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTargetParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
+ bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
@@ -2791,6 +2940,13 @@ public:
static OMPTargetParallelDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
+ /// Returns special task reduction reference expression.
+ Expr *getTaskReductionRefExpr() { return TaskRedRef; }
+ const Expr *getTaskReductionRefExpr() const { return TaskRedRef; }
+
+ /// Return true if current directive has inner cancel directive.
+ bool hasCancel() const { return HasCancel; }
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelDirectiveClass;
}
@@ -2808,6 +2964,9 @@ public:
class OMPTargetParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
+ /// Special reference expression for handling task reduction. Used to store
+ /// the taskgroup descriptor returned by the runtime functions.
+ Expr *TaskRedRef = nullptr;
/// true if current region has inner cancel directive.
bool HasCancel;
@@ -2837,6 +2996,9 @@ class OMPTargetParallelForDirective : public OMPLoopDirective {
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
+ /// Sets special task reduction descriptor.
+ void setTaskReductionRefExpr(Expr *E) { TaskRedRef = E; }
+
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
@@ -2850,12 +3012,15 @@ public:
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
+ /// \param TaskRedRef Task reduction special reference expression to handle
+ /// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPTargetParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
+ Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
+ bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
@@ -2869,6 +3034,10 @@ public:
unsigned CollapsedNum,
EmptyShell);
+ /// Returns special task reduction reference expression.
+ Expr *getTaskReductionRefExpr() { return TaskRedRef; }
+ const Expr *getTaskReductionRefExpr() const { return TaskRedRef; }
+
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
@@ -3070,6 +3239,9 @@ public:
///
class OMPTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
+ /// true if the construct has inner cancel directive.
+ bool HasCancel;
+
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
@@ -3081,7 +3253,8 @@ class OMPTaskLoopDirective : public OMPLoopDirective {
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopDirectiveClass,
llvm::omp::OMPD_taskloop, StartLoc, EndLoc,
- CollapsedNum, NumClauses) {}
+ CollapsedNum, NumClauses),
+ HasCancel(false) {}
/// Build an empty directive.
///
@@ -3091,7 +3264,11 @@ class OMPTaskLoopDirective : public OMPLoopDirective {
explicit OMPTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopDirectiveClass,
llvm::omp::OMPD_taskloop, SourceLocation(),
- SourceLocation(), CollapsedNum, NumClauses) {}
+ SourceLocation(), CollapsedNum, NumClauses),
+ HasCancel(false) {}
+
+ /// Set cancel state.
+ void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
@@ -3103,11 +3280,12 @@ public:
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
+ /// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt, const HelperExprs &Exprs);
+ Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
@@ -3120,6 +3298,9 @@ public:
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
+ /// Return true if current directive has inner cancel directive.
+ bool hasCancel() const { return HasCancel; }
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopDirectiveClass;
}
@@ -3203,6 +3384,9 @@ public:
///
class OMPMasterTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
+ /// true if the construct has inner cancel directive.
+ bool HasCancel;
+
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
@@ -3214,7 +3398,8 @@ class OMPMasterTaskLoopDirective : public OMPLoopDirective {
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_master_taskloop, StartLoc, EndLoc,
- CollapsedNum, NumClauses) {}
+ CollapsedNum, NumClauses),
+ HasCancel(false) {}
/// Build an empty directive.
///
@@ -3225,7 +3410,11 @@ class OMPMasterTaskLoopDirective : public OMPLoopDirective {
unsigned NumClauses)
: OMPLoopDirective(this, OMPMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_master_taskloop, SourceLocation(),
- SourceLocation(), CollapsedNum, NumClauses) {}
+ SourceLocation(), CollapsedNum, NumClauses),
+ HasCancel(false) {}
+
+ /// Set cancel state.
+ void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
@@ -3237,11 +3426,12 @@ public:
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
+ /// \param HasCancel true if this directive has inner cancel directive.
///
static OMPMasterTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt, const HelperExprs &Exprs);
+ Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
@@ -3255,6 +3445,9 @@ public:
unsigned CollapsedNum,
EmptyShell);
+ /// Return true if current directive has inner cancel directive.
+ bool hasCancel() const { return HasCancel; }
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterTaskLoopDirectiveClass;
}
@@ -3339,6 +3532,9 @@ public:
///
class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
+ /// true if the construct has inner cancel directive.
+ bool HasCancel;
+
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
@@ -3351,7 +3547,8 @@ class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective {
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop, StartLoc,
- EndLoc, CollapsedNum, NumClauses) {}
+ EndLoc, CollapsedNum, NumClauses),
+ HasCancel(false) {}
/// Build an empty directive.
///
@@ -3363,7 +3560,11 @@ class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective {
: OMPLoopDirective(this, OMPParallelMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop,
SourceLocation(), SourceLocation(), CollapsedNum,
- NumClauses) {}
+ NumClauses),
+ HasCancel(false) {}
+
+ /// Set cancel state.
+ void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
@@ -3375,11 +3576,12 @@ public:
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
+ /// \param HasCancel true if this directive has inner cancel directive.
///
static OMPParallelMasterTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt, const HelperExprs &Exprs);
+ Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
@@ -3393,6 +3595,9 @@ public:
unsigned CollapsedNum,
EmptyShell);
+ /// Return true if current directive has inner cancel directive.
+ bool hasCancel() const { return HasCancel; }
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass;
}
@@ -3605,6 +3810,9 @@ public:
///
class OMPDistributeParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
+ /// Special reference expression for handling task reduction. Used to store
+ /// the taskgroup descriptor returned by the runtime functions.
+ Expr *TaskRedRef = nullptr;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
@@ -3636,6 +3844,9 @@ class OMPDistributeParallelForDirective : public OMPLoopDirective {
NumClauses),
HasCancel(false) {}
+ /// Sets special task reduction descriptor.
+ void setTaskReductionRefExpr(Expr *E) { TaskRedRef = E; }
+
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
@@ -3649,12 +3860,15 @@ public:
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
+ /// \param TaskRedRef Task reduction special reference expression to handle
+ /// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
+ Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
+ bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
@@ -3668,6 +3882,10 @@ public:
unsigned CollapsedNum,
EmptyShell);
+ /// Returns special task reduction reference expression.
+ Expr *getTaskReductionRefExpr() { return TaskRedRef; }
+ const Expr *getTaskReductionRefExpr() const { return TaskRedRef; }
+
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
@@ -4170,6 +4388,9 @@ public:
///
class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
+ /// Special reference expression for handling task reduction. Used to store
+ /// the taskgroup descriptor returned by the runtime functions.
+ Expr *TaskRedRef = nullptr;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
@@ -4202,6 +4423,9 @@ class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective {
NumClauses),
HasCancel(false) {}
+ /// Sets special task reduction descriptor.
+ void setTaskReductionRefExpr(Expr *E) { TaskRedRef = E; }
+
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
@@ -4215,12 +4439,15 @@ public:
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
+ /// \param TaskRedRef Task reduction special reference expression to handle
+ /// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
+ Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
+ bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
@@ -4232,6 +4459,10 @@ public:
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
+ /// Returns special task reduction reference expression.
+ Expr *getTaskReductionRefExpr() { return TaskRedRef; }
+ const Expr *getTaskReductionRefExpr() const { return TaskRedRef; }
+
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
@@ -4379,6 +4610,9 @@ public:
class OMPTargetTeamsDistributeParallelForDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
+ /// Special reference expression for handling task reduction. Used to store
+ /// the taskgroup descriptor returned by the runtime functions.
+ Expr *TaskRedRef = nullptr;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
@@ -4412,6 +4646,9 @@ class OMPTargetTeamsDistributeParallelForDirective final
SourceLocation(), SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
+ /// Sets special task reduction descriptor.
+ void setTaskReductionRefExpr(Expr *E) { TaskRedRef = E; }
+
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
@@ -4425,12 +4662,15 @@ public:
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
+ /// \param TaskRedRef Task reduction special reference expression to handle
+ /// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTargetTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
+ Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
+ bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
@@ -4442,6 +4682,10 @@ public:
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
+ /// Returns special task reduction reference expression.
+ Expr *getTaskReductionRefExpr() { return TaskRedRef; }
+ const Expr *getTaskReductionRefExpr() const { return TaskRedRef; }
+
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
@@ -4594,6 +4838,63 @@ public:
}
};
+/// This represents '#pragma omp scan' directive.
+///
+/// \code
+/// #pragma omp scan inclusive(a)
+/// \endcode
+/// In this example directive '#pragma omp scan' has clause 'inclusive' with
+/// list item 'a'.
+class OMPScanDirective final : public OMPExecutableDirective {
+ friend class ASTStmtReader;
+ /// Build directive with the given start and end location.
+ ///
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending location of the directive.
+ /// \param NumClauses Number of clauses.
+ ///
+ OMPScanDirective(SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned NumClauses)
+ : OMPExecutableDirective(this, OMPScanDirectiveClass,
+ llvm::omp::OMPD_scan, StartLoc, EndLoc,
+ NumClauses, 0) {}
+
+ /// Build an empty directive.
+ ///
+ /// \param NumClauses Number of clauses.
+ ///
+ explicit OMPScanDirective(unsigned NumClauses)
+ : OMPExecutableDirective(this, OMPScanDirectiveClass,
+ llvm::omp::OMPD_scan, SourceLocation(),
+ SourceLocation(), NumClauses, 0) {}
+
+public:
+ /// Creates directive with a list of \a Clauses.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending Location of the directive.
+ /// \param Clauses List of clauses (only single OMPFlushClause clause is
+ /// allowed).
+ ///
+ static OMPScanDirective *Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses);
+
+ /// Creates an empty directive with the place for \a NumClauses
+ /// clauses.
+ ///
+ /// \param C AST context.
+ /// \param NumClauses Number of clauses.
+ ///
+ static OMPScanDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
+ EmptyShell);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPScanDirectiveClass;
+ }
+};
+
} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/AST/TemplateBase.h b/contrib/llvm-project/clang/include/clang/AST/TemplateBase.h
index 93f7b62b8aea..51fd8ba51034 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TemplateBase.h
+++ b/contrib/llvm-project/clang/include/clang/AST/TemplateBase.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_AST_TEMPLATEBASE_H
#define LLVM_CLANG_AST_TEMPLATEBASE_H
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
@@ -81,8 +82,7 @@ public:
/// The template argument is an expression, and we've not resolved it to one
/// of the other forms yet, either because it's dependent or because we're
/// representing a non-canonical template argument (for instance, in a
- /// TemplateSpecializationType). Also used to represent a non-dependent
- /// __uuidof expression (a Microsoft extension).
+ /// TemplateSpecializationType).
Expression,
/// The template argument is actually a parameter pack. Arguments are stored
@@ -236,6 +236,8 @@ public:
/// Determine whether this template argument has no value.
bool isNull() const { return getKind() == Null; }
+ TemplateArgumentDependence getDependence() const;
+
/// Whether this template argument is dependent on a template
/// parameter such that its result can change from one instantiation to
/// another.
@@ -666,11 +668,13 @@ struct alignas(void *) ASTTemplateKWAndArgsInfo {
void initializeFrom(SourceLocation TemplateKWLoc,
const TemplateArgumentListInfo &List,
TemplateArgumentLoc *OutArgArray);
+ // FIXME: The parameter Deps is the result populated by this method, the
+ // caller doesn't need it since it is populated by computeDependence. remove
+ // it.
void initializeFrom(SourceLocation TemplateKWLoc,
const TemplateArgumentListInfo &List,
- TemplateArgumentLoc *OutArgArray, bool &Dependent,
- bool &InstantiationDependent,
- bool &ContainsUnexpandedParameterPack);
+ TemplateArgumentLoc *OutArgArray,
+ TemplateArgumentDependence &Deps);
void initializeFrom(SourceLocation TemplateKWLoc);
void copyInto(const TemplateArgumentLoc *ArgArray,
diff --git a/contrib/llvm-project/clang/include/clang/AST/TemplateName.h b/contrib/llvm-project/clang/include/clang/AST/TemplateName.h
index cbbcbf6af8ab..9bcf2838dcf1 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TemplateName.h
+++ b/contrib/llvm-project/clang/include/clang/AST/TemplateName.h
@@ -13,6 +13,7 @@
#ifndef LLVM_CLANG_AST_TEMPLATENAME_H
#define LLVM_CLANG_AST_TEMPLATENAME_H
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/FoldingSet.h"
@@ -295,6 +296,8 @@ public:
/// the template, including any default template arguments.
TemplateName getNameToSubstitute() const;
+ TemplateNameDependence getDependence() const;
+
/// Determines whether this is a dependent template name.
bool isDependent() const;
@@ -559,7 +562,7 @@ struct PointerLikeTypeTraits<clang::TemplateName> {
}
// No bits are available!
- enum { NumLowBitsAvailable = 0 };
+ static constexpr int NumLowBitsAvailable = 0;
};
} // namespace llvm.
diff --git a/contrib/llvm-project/clang/include/clang/AST/TextNodeDumper.h b/contrib/llvm-project/clang/include/clang/AST/TextNodeDumper.h
index d293ea190aa4..b4cfb5a380d1 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TextNodeDumper.h
+++ b/contrib/llvm-project/clang/include/clang/AST/TextNodeDumper.h
@@ -22,10 +22,13 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TemplateArgumentVisitor.h"
+#include "clang/AST/Type.h"
#include "clang/AST/TypeVisitor.h"
namespace clang {
+class APValue;
+
class TextTreeStructure {
raw_ostream &OS;
const bool ShowColors;
@@ -68,7 +71,7 @@ public:
// We need to capture an owning-string in the lambda because the lambda
// is invoked in a deferred manner.
- std::string LabelStr = Label;
+ std::string LabelStr(Label);
auto DumpWithIndent = [this, DoAddChild, LabelStr](bool IsLastChild) {
// Print out the appropriate tree structure and work out the prefix for
// children of this node. For instance:
@@ -139,19 +142,29 @@ class TextNodeDumper
const char *LastLocFilename = "";
unsigned LastLocLine = ~0U;
- const SourceManager *SM;
+ /// \p Context, \p SM, and \p Traits can be null. This is because we want
+ /// to be able to call \p dump() in a debugger without having to pass the
+ /// \p ASTContext to \p dump. Not all parts of the AST dump output will be
+ /// available without the \p ASTContext.
+ const ASTContext *Context = nullptr;
+ const SourceManager *SM = nullptr;
/// The policy to use for printing; can be defaulted.
- PrintingPolicy PrintPolicy;
+ PrintingPolicy PrintPolicy = LangOptions();
- const comments::CommandTraits *Traits;
+ const comments::CommandTraits *Traits = nullptr;
const char *getCommandName(unsigned CommandID);
+ void dumpAPValueChildren(const APValue &Value, QualType Ty,
+ const APValue &(*IdxToChildFun)(const APValue &,
+ unsigned),
+ unsigned NumChildren, StringRef LabelSingular,
+ StringRef LabelPlurial);
+
public:
- TextNodeDumper(raw_ostream &OS, bool ShowColors, const SourceManager *SM,
- const PrintingPolicy &PrintPolicy,
- const comments::CommandTraits *Traits);
+ TextNodeDumper(raw_ostream &OS, const ASTContext &Context, bool ShowColors);
+ TextNodeDumper(raw_ostream &OS, bool ShowColors);
void Visit(const comments::Comment *C, const comments::FullComment *FC);
@@ -176,6 +189,8 @@ public:
void Visit(const GenericSelectionExpr::ConstAssociation &A);
+ void Visit(const APValue &Value, QualType Ty);
+
void dumpPointer(const void *Ptr);
void dumpLocation(SourceLocation Loc);
void dumpSourceRange(SourceRange R);
@@ -184,6 +199,7 @@ public:
void dumpBareDeclRef(const Decl *D);
void dumpName(const NamedDecl *ND);
void dumpAccessSpecifier(AccessSpecifier AS);
+ void dumpCleanupObject(const ExprWithCleanups::CleanupObject &C);
void dumpDeclRef(const Decl *D, StringRef Label = {});
@@ -230,6 +246,7 @@ public:
void VisitCaseStmt(const CaseStmt *Node);
void VisitConstantExpr(const ConstantExpr *Node);
void VisitCallExpr(const CallExpr *Node);
+ void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *Node);
void VisitCastExpr(const CastExpr *Node);
void VisitImplicitCastExpr(const ImplicitCastExpr *Node);
void VisitDeclRefExpr(const DeclRefExpr *Node);
@@ -257,6 +274,9 @@ public:
void VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *Node);
void VisitCXXNewExpr(const CXXNewExpr *Node);
void VisitCXXDeleteExpr(const CXXDeleteExpr *Node);
+ void VisitTypeTraitExpr(const TypeTraitExpr *Node);
+ void VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *Node);
+ void VisitExpressionTraitExpr(const ExpressionTraitExpr *Node);
void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Node);
void VisitExprWithCleanups(const ExprWithCleanups *Node);
void VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *Node);
@@ -273,6 +293,7 @@ public:
void VisitObjCSubscriptRefExpr(const ObjCSubscriptRefExpr *Node);
void VisitObjCIvarRefExpr(const ObjCIvarRefExpr *Node);
void VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *Node);
+ void VisitOMPIteratorExpr(const OMPIteratorExpr *Node);
void VisitRValueReferenceType(const ReferenceType *T);
void VisitArrayType(const ArrayType *T);
diff --git a/contrib/llvm-project/clang/include/clang/AST/Type.h b/contrib/llvm-project/clang/include/clang/AST/Type.h
index 0fd5f2aace49..0fc50e0e799f 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Type.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Type.h
@@ -17,6 +17,7 @@
#ifndef LLVM_CLANG_AST_TYPE_H
#define LLVM_CLANG_AST_TYPE_H
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/TemplateName.h"
#include "clang/Basic/AddressSpaces.h"
@@ -44,8 +45,8 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
-#include "llvm/Support/type_traits.h"
#include "llvm/Support/TrailingObjects.h"
+#include "llvm/Support/type_traits.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
@@ -86,7 +87,7 @@ namespace llvm {
return static_cast< ::clang::Type*>(P);
}
- enum { NumLowBitsAvailable = clang::TypeAlignmentInBits };
+ static constexpr int NumLowBitsAvailable = clang::TypeAlignmentInBits;
};
template<>
@@ -97,7 +98,7 @@ namespace llvm {
return static_cast< ::clang::ExtQuals*>(P);
}
- enum { NumLowBitsAvailable = clang::TypeAlignmentInBits };
+ static constexpr int NumLowBitsAvailable = clang::TypeAlignmentInBits;
};
} // namespace llvm
@@ -943,6 +944,12 @@ public:
/// from non-class types (in C++) or all types (in C).
QualType getNonLValueExprType(const ASTContext &Context) const;
+ /// Remove an outer pack expansion type (if any) from this type. Used as part
+ /// of converting the type of a declaration to the type of an expression that
+ /// references that expression. It's meaningless for an expression to have a
+ /// pack expansion type.
+ QualType getNonPackExpansionType() const;
+
/// Return the specified type with any "sugar" removed from
/// the type. This takes off typedefs, typeof's etc. If the outer level of
/// the type is already concrete, it returns it unmodified. This is similar
@@ -1051,7 +1058,7 @@ public:
void dump(const char *s) const;
void dump() const;
- void dump(llvm::raw_ostream &OS) const;
+ void dump(llvm::raw_ostream &OS, const ASTContext &Context) const;
void Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddPointer(getAsOpaquePtr());
@@ -1063,6 +1070,21 @@ public:
/// Return the address space of this type.
inline LangAS getAddressSpace() const;
+ /// Returns true if address space qualifiers overlap with T address space
+ /// qualifiers.
+ /// OpenCL C defines conversion rules for pointers to different address spaces
+ /// and notion of overlapping address spaces.
+ /// CL1.1 or CL1.2:
+ /// address spaces overlap iff they are they same.
+ /// OpenCL C v2.0 s6.5.5 adds:
+ /// __generic overlaps with any address space except for __constant.
+ bool isAddressSpaceOverlapping(QualType T) const {
+ Qualifiers Q = getQualifiers();
+ Qualifiers TQ = T.getQualifiers();
+ // Address spaces overlap if at least one of them is a superset of another
+ return Q.isAddressSpaceSupersetOf(TQ) || TQ.isAddressSpaceSupersetOf(Q);
+ }
+
/// Returns gc attribute of this type.
inline Qualifiers::GC getObjCGCAttr() const;
@@ -1296,7 +1318,7 @@ struct PointerLikeTypeTraits<clang::QualType> {
}
// Various qualifiers go in low bits.
- enum { NumLowBitsAvailable = 0 };
+ static constexpr int NumLowBitsAvailable = 0;
};
} // namespace llvm
@@ -1465,19 +1487,8 @@ private:
/// TypeClass bitfield - Enum that specifies what subclass this belongs to.
unsigned TC : 8;
- /// Whether this type is a dependent type (C++ [temp.dep.type]).
- unsigned Dependent : 1;
-
- /// Whether this type somehow involves a template parameter, even
- /// if the resolution of the type does not depend on a template parameter.
- unsigned InstantiationDependent : 1;
-
- /// Whether this type is a variably-modified type (C99 6.7.5).
- unsigned VariablyModified : 1;
-
- /// Whether this type contains an unexpanded parameter pack
- /// (for C++11 variadic templates).
- unsigned ContainsUnexpandedParameterPack : 1;
+ /// Store information on the type dependency.
+ unsigned Dependence : llvm::BitWidth<TypeDependence>;
/// True if the cache (i.e. the bitfields here starting with
/// 'Cache') is valid.
@@ -1506,7 +1517,7 @@ private:
return CachedLocalOrUnnamed;
}
};
- enum { NumTypeBits = 18 };
+ enum { NumTypeBits = 8 + llvm::BitWidth<TypeDependence> + 6 };
protected:
// These classes allow subclasses to somewhat cleanly pack bitfields
@@ -1556,7 +1567,7 @@ protected:
/// Extra information which affects how the function is called, like
/// regparm and the calling convention.
- unsigned ExtInfo : 12;
+ unsigned ExtInfo : 13;
/// The ref-qualifier associated with a \c FunctionProtoType.
///
@@ -1660,11 +1671,21 @@ protected:
/// The kind of vector, either a generic vector type or some
/// target-specific vector type such as for AltiVec or Neon.
unsigned VecKind : 3;
-
/// The number of elements in the vector.
- unsigned NumElements : 29 - NumTypeBits;
+ uint32_t NumElements;
+ };
+
+ class ConstantMatrixTypeBitfields {
+ friend class ConstantMatrixType;
+
+ unsigned : NumTypeBits;
- enum { MaxNumElements = (1 << (29 - NumTypeBits)) - 1 };
+ /// Number of rows and columns. Using 20 bits allows supporting very large
+ /// matrixes, while keeping 24 bits to accommodate NumTypeBits.
+ unsigned NumRows : 20;
+ unsigned NumColumns : 20;
+
+ static constexpr uint32_t MaxElementsPerDimension = (1 << 20) - 1;
};
class AttributedTypeBitfields {
@@ -1776,46 +1797,47 @@ protected:
TypeWithKeywordBitfields TypeWithKeywordBits;
ElaboratedTypeBitfields ElaboratedTypeBits;
VectorTypeBitfields VectorTypeBits;
+ ConstantMatrixTypeBitfields ConstantMatrixTypeBits;
SubstTemplateTypeParmPackTypeBitfields SubstTemplateTypeParmPackTypeBits;
TemplateSpecializationTypeBitfields TemplateSpecializationTypeBits;
DependentTemplateSpecializationTypeBitfields
DependentTemplateSpecializationTypeBits;
PackExpansionTypeBitfields PackExpansionTypeBits;
- };
- static_assert(sizeof(TypeBitfields) <= 8,
- "TypeBitfields is larger than 8 bytes!");
- static_assert(sizeof(ArrayTypeBitfields) <= 8,
- "ArrayTypeBitfields is larger than 8 bytes!");
- static_assert(sizeof(AttributedTypeBitfields) <= 8,
- "AttributedTypeBitfields is larger than 8 bytes!");
- static_assert(sizeof(AutoTypeBitfields) <= 8,
- "AutoTypeBitfields is larger than 8 bytes!");
- static_assert(sizeof(BuiltinTypeBitfields) <= 8,
- "BuiltinTypeBitfields is larger than 8 bytes!");
- static_assert(sizeof(FunctionTypeBitfields) <= 8,
- "FunctionTypeBitfields is larger than 8 bytes!");
- static_assert(sizeof(ObjCObjectTypeBitfields) <= 8,
- "ObjCObjectTypeBitfields is larger than 8 bytes!");
- static_assert(sizeof(ReferenceTypeBitfields) <= 8,
- "ReferenceTypeBitfields is larger than 8 bytes!");
- static_assert(sizeof(TypeWithKeywordBitfields) <= 8,
- "TypeWithKeywordBitfields is larger than 8 bytes!");
- static_assert(sizeof(ElaboratedTypeBitfields) <= 8,
- "ElaboratedTypeBitfields is larger than 8 bytes!");
- static_assert(sizeof(VectorTypeBitfields) <= 8,
- "VectorTypeBitfields is larger than 8 bytes!");
- static_assert(sizeof(SubstTemplateTypeParmPackTypeBitfields) <= 8,
- "SubstTemplateTypeParmPackTypeBitfields is larger"
- " than 8 bytes!");
- static_assert(sizeof(TemplateSpecializationTypeBitfields) <= 8,
- "TemplateSpecializationTypeBitfields is larger"
- " than 8 bytes!");
- static_assert(sizeof(DependentTemplateSpecializationTypeBitfields) <= 8,
- "DependentTemplateSpecializationTypeBitfields is larger"
- " than 8 bytes!");
- static_assert(sizeof(PackExpansionTypeBitfields) <= 8,
- "PackExpansionTypeBitfields is larger than 8 bytes");
+ static_assert(sizeof(TypeBitfields) <= 8,
+ "TypeBitfields is larger than 8 bytes!");
+ static_assert(sizeof(ArrayTypeBitfields) <= 8,
+ "ArrayTypeBitfields is larger than 8 bytes!");
+ static_assert(sizeof(AttributedTypeBitfields) <= 8,
+ "AttributedTypeBitfields is larger than 8 bytes!");
+ static_assert(sizeof(AutoTypeBitfields) <= 8,
+ "AutoTypeBitfields is larger than 8 bytes!");
+ static_assert(sizeof(BuiltinTypeBitfields) <= 8,
+ "BuiltinTypeBitfields is larger than 8 bytes!");
+ static_assert(sizeof(FunctionTypeBitfields) <= 8,
+ "FunctionTypeBitfields is larger than 8 bytes!");
+ static_assert(sizeof(ObjCObjectTypeBitfields) <= 8,
+ "ObjCObjectTypeBitfields is larger than 8 bytes!");
+ static_assert(sizeof(ReferenceTypeBitfields) <= 8,
+ "ReferenceTypeBitfields is larger than 8 bytes!");
+ static_assert(sizeof(TypeWithKeywordBitfields) <= 8,
+ "TypeWithKeywordBitfields is larger than 8 bytes!");
+ static_assert(sizeof(ElaboratedTypeBitfields) <= 8,
+ "ElaboratedTypeBitfields is larger than 8 bytes!");
+ static_assert(sizeof(VectorTypeBitfields) <= 8,
+ "VectorTypeBitfields is larger than 8 bytes!");
+ static_assert(sizeof(SubstTemplateTypeParmPackTypeBitfields) <= 8,
+ "SubstTemplateTypeParmPackTypeBitfields is larger"
+ " than 8 bytes!");
+ static_assert(sizeof(TemplateSpecializationTypeBitfields) <= 8,
+ "TemplateSpecializationTypeBitfields is larger"
+ " than 8 bytes!");
+ static_assert(sizeof(DependentTemplateSpecializationTypeBitfields) <= 8,
+ "DependentTemplateSpecializationTypeBitfields is larger"
+ " than 8 bytes!");
+ static_assert(sizeof(PackExpansionTypeBitfields) <= 8,
+ "PackExpansionTypeBitfields is larger than 8 bytes");
+ };
private:
template <class T> friend class TypePropertyCache;
@@ -1828,16 +1850,11 @@ private:
protected:
friend class ASTContext;
- Type(TypeClass tc, QualType canon, bool Dependent,
- bool InstantiationDependent, bool VariablyModified,
- bool ContainsUnexpandedParameterPack)
+ Type(TypeClass tc, QualType canon, TypeDependence Dependence)
: ExtQualsTypeCommonBase(this,
canon.isNull() ? QualType(this_(), 0) : canon) {
TypeBits.TC = tc;
- TypeBits.Dependent = Dependent;
- TypeBits.InstantiationDependent = Dependent || InstantiationDependent;
- TypeBits.VariablyModified = VariablyModified;
- TypeBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
+ TypeBits.Dependence = static_cast<unsigned>(Dependence);
TypeBits.CacheValid = false;
TypeBits.CachedLocalOrUnnamed = false;
TypeBits.CachedLinkage = NoLinkage;
@@ -1847,20 +1864,11 @@ protected:
// silence VC++ warning C4355: 'this' : used in base member initializer list
Type *this_() { return this; }
- void setDependent(bool D = true) {
- TypeBits.Dependent = D;
- if (D)
- TypeBits.InstantiationDependent = true;
+ void setDependence(TypeDependence D) {
+ TypeBits.Dependence = static_cast<unsigned>(D);
}
- void setInstantiationDependent(bool D = true) {
- TypeBits.InstantiationDependent = D; }
-
- void setVariablyModified(bool VM = true) { TypeBits.VariablyModified = VM; }
-
- void setContainsUnexpandedParameterPack(bool PP = true) {
- TypeBits.ContainsUnexpandedParameterPack = PP;
- }
+ void addDependence(TypeDependence D) { setDependence(getDependence() | D); }
public:
friend class ASTReader;
@@ -1894,7 +1902,7 @@ public:
///
/// Note that this routine does not specify which
bool containsUnexpandedParameterPack() const {
- return TypeBits.ContainsUnexpandedParameterPack;
+ return getDependence() & TypeDependence::UnexpandedPack;
}
/// Determines if this type would be canonical if it had no further
@@ -1908,6 +1916,15 @@ public:
/// or QualType::getSingleStepDesugaredType(const ASTContext&).
QualType getLocallyUnqualifiedSingleStepDesugaredType() const;
+ /// As an extension, we classify types as one of "sized" or "sizeless";
+ /// every type is one or the other. Standard types are all sized;
+ /// sizeless types are purely an extension.
+ ///
+ /// Sizeless types contain data with no specified size, alignment,
+ /// or layout.
+ bool isSizelessType() const;
+ bool isSizelessBuiltinType() const;
+
/// Types are partitioned into 3 broad categories (C99 6.2.5p1):
/// object types, function types, and incomplete types.
@@ -1997,6 +2014,7 @@ public:
bool isFloatingType() const; // C99 6.2.5p11 (real floating + complex)
bool isHalfType() const; // OpenCL 6.1.1.1, NEON (IEEE 754-2008 half)
bool isFloat16Type() const; // C11 extension ISO/IEC TS 18661
+ bool isBFloat16Type() const;
bool isFloat128Type() const;
bool isRealType() const; // C99 6.2.5p17 (real floating + integer)
bool isArithmeticType() const; // C99 6.2.5p18 (integer + floating)
@@ -2039,6 +2057,8 @@ public:
bool isComplexIntegerType() const; // GCC _Complex integer type.
bool isVectorType() const; // GCC vector type.
bool isExtVectorType() const; // Extended vector type.
+ bool isMatrixType() const; // Matrix type.
+ bool isConstantMatrixType() const; // Constant matrix type.
bool isDependentAddressSpaceType() const; // value-dependent address space qualifier
bool isObjCObjectPointerType() const; // pointer to ObjC object
bool isObjCRetainableType() const; // ObjC object or block pointer
@@ -2119,6 +2139,7 @@ public:
bool isOCLExtOpaqueType() const; // Any OpenCL extension type
bool isPipeType() const; // OpenCL pipe type
+ bool isExtIntType() const; // Extended Int Type
bool isOpenCLSpecificType() const; // Any OpenCL specific type
/// Determines if this type, which must satisfy
@@ -2126,6 +2147,11 @@ public:
/// than implicitly __strong.
bool isObjCARCImplicitlyUnretainedType() const;
+ /// Check if the type is the CUDA device builtin surface type.
+ bool isCUDADeviceBuiltinSurfaceType() const;
+ /// Check if the type is the CUDA device builtin texture type.
+ bool isCUDADeviceBuiltinTextureType() const;
+
/// Return the implicit lifetime for this type, which must not be dependent.
Qualifiers::ObjCLifetime getObjCARCImplicitLifetime() const;
@@ -2145,16 +2171,27 @@ public:
/// Given that this is a scalar type, classify it.
ScalarTypeKind getScalarTypeKind() const;
+ TypeDependence getDependence() const {
+ return static_cast<TypeDependence>(TypeBits.Dependence);
+ }
+
+ /// Whether this type is an error type.
+ bool containsErrors() const {
+ return getDependence() & TypeDependence::Error;
+ }
+
/// Whether this type is a dependent type, meaning that its definition
/// somehow depends on a template parameter (C++ [temp.dep.type]).
- bool isDependentType() const { return TypeBits.Dependent; }
+ bool isDependentType() const {
+ return getDependence() & TypeDependence::Dependent;
+ }
/// Determine whether this type is an instantiation-dependent type,
/// meaning that the type involves a template parameter (even if the
/// definition does not actually depend on the type substituted for that
/// template parameter).
bool isInstantiationDependentType() const {
- return TypeBits.InstantiationDependent;
+ return getDependence() & TypeDependence::Instantiation;
}
/// Determine whether this type is an undeduced type, meaning that
@@ -2163,7 +2200,9 @@ public:
bool isUndeducedType() const;
/// Whether this type is a variably-modified type (C99 6.7.5).
- bool isVariablyModifiedType() const { return TypeBits.VariablyModified; }
+ bool isVariablyModifiedType() const {
+ return getDependence() & TypeDependence::VariablyModified;
+ }
/// Whether this type involves a variable-length array type
/// with a definite size.
@@ -2432,7 +2471,7 @@ public:
CanQualType getCanonicalTypeUnqualified() const; // in CanonicalType.h
void dump() const;
- void dump(llvm::raw_ostream &OS) const;
+ void dump(llvm::raw_ostream &OS, const ASTContext &Context) const;
};
/// This will check for a TypedefType by removing any existing sugar
@@ -2484,10 +2523,9 @@ private:
friend class ASTContext; // ASTContext creates these.
BuiltinType(Kind K)
- : Type(Builtin, QualType(), /*Dependent=*/(K == Dependent),
- /*InstantiationDependent=*/(K == Dependent),
- /*VariablyModified=*/false,
- /*Unexpanded parameter pack=*/false) {
+ : Type(Builtin, QualType(),
+ K == Dependent ? TypeDependence::DependentInstantiation
+ : TypeDependence::None) {
BuiltinTypeBits.Kind = K;
}
@@ -2557,10 +2595,7 @@ class ComplexType : public Type, public llvm::FoldingSetNode {
QualType ElementType;
ComplexType(QualType Element, QualType CanonicalPtr)
- : Type(Complex, CanonicalPtr, Element->isDependentType(),
- Element->isInstantiationDependentType(),
- Element->isVariablyModifiedType(),
- Element->containsUnexpandedParameterPack()),
+ : Type(Complex, CanonicalPtr, Element->getDependence()),
ElementType(Element) {}
public:
@@ -2587,11 +2622,7 @@ class ParenType : public Type, public llvm::FoldingSetNode {
QualType Inner;
ParenType(QualType InnerType, QualType CanonType)
- : Type(Paren, CanonType, InnerType->isDependentType(),
- InnerType->isInstantiationDependentType(),
- InnerType->isVariablyModifiedType(),
- InnerType->containsUnexpandedParameterPack()),
- Inner(InnerType) {}
+ : Type(Paren, CanonType, InnerType->getDependence()), Inner(InnerType) {}
public:
QualType getInnerType() const { return Inner; }
@@ -2617,31 +2648,12 @@ class PointerType : public Type, public llvm::FoldingSetNode {
QualType PointeeType;
PointerType(QualType Pointee, QualType CanonicalPtr)
- : Type(Pointer, CanonicalPtr, Pointee->isDependentType(),
- Pointee->isInstantiationDependentType(),
- Pointee->isVariablyModifiedType(),
- Pointee->containsUnexpandedParameterPack()),
+ : Type(Pointer, CanonicalPtr, Pointee->getDependence()),
PointeeType(Pointee) {}
public:
QualType getPointeeType() const { return PointeeType; }
- /// Returns true if address spaces of pointers overlap.
- /// OpenCL v2.0 defines conversion rules for pointers to different
- /// address spaces (OpenCLC v2.0 s6.5.5) and notion of overlapping
- /// address spaces.
- /// CL1.1 or CL1.2:
- /// address spaces overlap iff they are they same.
- /// CL2.0 adds:
- /// __generic overlaps with any address space except for __constant.
- bool isAddressSpaceOverlapping(const PointerType &other) const {
- Qualifiers thisQuals = PointeeType.getQualifiers();
- Qualifiers otherQuals = other.getPointeeType().getQualifiers();
- // Address spaces overlap if at least one of them is a superset of another
- return thisQuals.isAddressSpaceSupersetOf(otherQuals) ||
- otherQuals.isAddressSpaceSupersetOf(thisQuals);
- }
-
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
@@ -2668,10 +2680,7 @@ protected:
AdjustedType(TypeClass TC, QualType OriginalTy, QualType AdjustedTy,
QualType CanonicalPtr)
- : Type(TC, CanonicalPtr, OriginalTy->isDependentType(),
- OriginalTy->isInstantiationDependentType(),
- OriginalTy->isVariablyModifiedType(),
- OriginalTy->containsUnexpandedParameterPack()),
+ : Type(TC, CanonicalPtr, OriginalTy->getDependence()),
OriginalTy(OriginalTy), AdjustedTy(AdjustedTy) {}
public:
@@ -2720,10 +2729,7 @@ class BlockPointerType : public Type, public llvm::FoldingSetNode {
QualType PointeeType;
BlockPointerType(QualType Pointee, QualType CanonicalCls)
- : Type(BlockPointer, CanonicalCls, Pointee->isDependentType(),
- Pointee->isInstantiationDependentType(),
- Pointee->isVariablyModifiedType(),
- Pointee->containsUnexpandedParameterPack()),
+ : Type(BlockPointer, CanonicalCls, Pointee->getDependence()),
PointeeType(Pointee) {}
public:
@@ -2753,10 +2759,7 @@ class ReferenceType : public Type, public llvm::FoldingSetNode {
protected:
ReferenceType(TypeClass tc, QualType Referencee, QualType CanonicalRef,
bool SpelledAsLValue)
- : Type(tc, CanonicalRef, Referencee->isDependentType(),
- Referencee->isInstantiationDependentType(),
- Referencee->isVariablyModifiedType(),
- Referencee->containsUnexpandedParameterPack()),
+ : Type(tc, CanonicalRef, Referencee->getDependence()),
PointeeType(Referencee) {
ReferenceTypeBits.SpelledAsLValue = SpelledAsLValue;
ReferenceTypeBits.InnerRef = Referencee->isReferenceType();
@@ -2841,13 +2844,9 @@ class MemberPointerType : public Type, public llvm::FoldingSetNode {
MemberPointerType(QualType Pointee, const Type *Cls, QualType CanonicalPtr)
: Type(MemberPointer, CanonicalPtr,
- Cls->isDependentType() || Pointee->isDependentType(),
- (Cls->isInstantiationDependentType() ||
- Pointee->isInstantiationDependentType()),
- Pointee->isVariablyModifiedType(),
- (Cls->containsUnexpandedParameterPack() ||
- Pointee->containsUnexpandedParameterPack())),
- PointeeType(Pointee), Class(Cls) {}
+ (Cls->getDependence() & ~TypeDependence::VariablyModified) |
+ Pointee->getDependence()),
+ PointeeType(Pointee), Class(Cls) {}
public:
QualType getPointeeType() const { return PointeeType; }
@@ -3270,10 +3269,6 @@ public:
QualType getElementType() const { return ElementType; }
unsigned getNumElements() const { return VectorTypeBits.NumElements; }
- static bool isVectorSizeTooLarge(unsigned NumElements) {
- return NumElements > VectorTypeBitfields::MaxNumElements;
- }
-
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
@@ -3417,6 +3412,136 @@ public:
}
};
+/// Represents a matrix type, as defined in the Matrix Types clang extensions.
+/// __attribute__((matrix_type(rows, columns))), where "rows" specifies
+/// number of rows and "columns" specifies the number of columns.
+class MatrixType : public Type, public llvm::FoldingSetNode {
+protected:
+ friend class ASTContext;
+
+ /// The element type of the matrix.
+ QualType ElementType;
+
+ MatrixType(QualType ElementTy, QualType CanonElementTy);
+
+ MatrixType(TypeClass TypeClass, QualType ElementTy, QualType CanonElementTy,
+ const Expr *RowExpr = nullptr, const Expr *ColumnExpr = nullptr);
+
+public:
+ /// Returns type of the elements being stored in the matrix
+ QualType getElementType() const { return ElementType; }
+
+ /// Valid elements types are the following:
+ /// * an integer type (as in C2x 6.2.5p19), but excluding enumerated types
+ /// and _Bool
+ /// * the standard floating types float or double
+ /// * a half-precision floating point type, if one is supported on the target
+ static bool isValidElementType(QualType T) {
+ return T->isDependentType() ||
+ (T->isRealType() && !T->isBooleanType() && !T->isEnumeralType());
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ConstantMatrix ||
+ T->getTypeClass() == DependentSizedMatrix;
+ }
+};
+
+/// Represents a concrete matrix type with constant number of rows and columns
+class ConstantMatrixType final : public MatrixType {
+protected:
+ friend class ASTContext;
+
+ /// The element type of the matrix.
+ QualType ElementType;
+
+ ConstantMatrixType(QualType MatrixElementType, unsigned NRows,
+ unsigned NColumns, QualType CanonElementType);
+
+ ConstantMatrixType(TypeClass typeClass, QualType MatrixType, unsigned NRows,
+ unsigned NColumns, QualType CanonElementType);
+
+public:
+ /// Returns the number of rows in the matrix.
+ unsigned getNumRows() const { return ConstantMatrixTypeBits.NumRows; }
+
+ /// Returns the number of columns in the matrix.
+ unsigned getNumColumns() const { return ConstantMatrixTypeBits.NumColumns; }
+
+ /// Returns the number of elements required to embed the matrix into a vector.
+ unsigned getNumElementsFlattened() const {
+ return ConstantMatrixTypeBits.NumRows * ConstantMatrixTypeBits.NumColumns;
+ }
+
+ /// Returns true if \p NumElements is a valid matrix dimension.
+ static bool isDimensionValid(uint64_t NumElements) {
+ return NumElements > 0 &&
+ NumElements <= ConstantMatrixTypeBitfields::MaxElementsPerDimension;
+ }
+
+ /// Returns the maximum number of elements per dimension.
+ static unsigned getMaxElementsPerDimension() {
+ return ConstantMatrixTypeBitfields::MaxElementsPerDimension;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getElementType(), getNumRows(), getNumColumns(),
+ getTypeClass());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType ElementType,
+ unsigned NumRows, unsigned NumColumns,
+ TypeClass TypeClass) {
+ ID.AddPointer(ElementType.getAsOpaquePtr());
+ ID.AddInteger(NumRows);
+ ID.AddInteger(NumColumns);
+ ID.AddInteger(TypeClass);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ConstantMatrix;
+ }
+};
+
+/// Represents a matrix type where the type and the number of rows and columns
+/// is dependent on a template.
+class DependentSizedMatrixType final : public MatrixType {
+ friend class ASTContext;
+
+ const ASTContext &Context;
+ Expr *RowExpr;
+ Expr *ColumnExpr;
+
+ SourceLocation loc;
+
+ DependentSizedMatrixType(const ASTContext &Context, QualType ElementType,
+ QualType CanonicalType, Expr *RowExpr,
+ Expr *ColumnExpr, SourceLocation loc);
+
+public:
+ QualType getElementType() const { return ElementType; }
+ Expr *getRowExpr() const { return RowExpr; }
+ Expr *getColumnExpr() const { return ColumnExpr; }
+ SourceLocation getAttributeLoc() const { return loc; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == DependentSizedMatrix;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, Context, getElementType(), getRowExpr(), getColumnExpr());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ QualType ElementType, Expr *RowExpr, Expr *ColumnExpr);
+};
+
/// FunctionType - C99 6.7.5.3 - Function Declarators. This is the common base
/// class of FunctionNoProtoType and FunctionProtoType.
class FunctionType : public Type {
@@ -3533,39 +3658,41 @@ public:
class ExtInfo {
friend class FunctionType;
- // Feel free to rearrange or add bits, but if you go over 12,
- // you'll need to adjust both the Bits field below and
- // Type::FunctionTypeBitfields.
+ // Feel free to rearrange or add bits, but if you go over 16, you'll need to
+ // adjust the Bits field below, and if you add bits, you'll need to adjust
+ // Type::FunctionTypeBitfields::ExtInfo as well.
- // | CC |noreturn|produces|nocallersavedregs|regparm|nocfcheck|
- // |0 .. 4| 5 | 6 | 7 |8 .. 10| 11 |
+ // | CC |noreturn|produces|nocallersavedregs|regparm|nocfcheck|cmsenscall|
+ // |0 .. 4| 5 | 6 | 7 |8 .. 10| 11 | 12 |
//
// regparm is either 0 (no regparm attribute) or the regparm value+1.
enum { CallConvMask = 0x1F };
enum { NoReturnMask = 0x20 };
enum { ProducesResultMask = 0x40 };
enum { NoCallerSavedRegsMask = 0x80 };
- enum { NoCfCheckMask = 0x800 };
enum {
- RegParmMask = ~(CallConvMask | NoReturnMask | ProducesResultMask |
- NoCallerSavedRegsMask | NoCfCheckMask),
+ RegParmMask = 0x700,
RegParmOffset = 8
- }; // Assumed to be the last field
+ };
+ enum { NoCfCheckMask = 0x800 };
+ enum { CmseNSCallMask = 0x1000 };
uint16_t Bits = CC_C;
ExtInfo(unsigned Bits) : Bits(static_cast<uint16_t>(Bits)) {}
- public:
- // Constructor with no defaults. Use this when you know that you
- // have all the elements (when reading an AST file for example).
- ExtInfo(bool noReturn, bool hasRegParm, unsigned regParm, CallingConv cc,
- bool producesResult, bool noCallerSavedRegs, bool NoCfCheck) {
- assert((!hasRegParm || regParm < 7) && "Invalid regparm value");
- Bits = ((unsigned)cc) | (noReturn ? NoReturnMask : 0) |
- (producesResult ? ProducesResultMask : 0) |
- (noCallerSavedRegs ? NoCallerSavedRegsMask : 0) |
- (hasRegParm ? ((regParm + 1) << RegParmOffset) : 0) |
- (NoCfCheck ? NoCfCheckMask : 0);
+ public:
+ // Constructor with no defaults. Use this when you know that you
+ // have all the elements (when reading an AST file for example).
+ ExtInfo(bool noReturn, bool hasRegParm, unsigned regParm, CallingConv cc,
+ bool producesResult, bool noCallerSavedRegs, bool NoCfCheck,
+ bool cmseNSCall) {
+ assert((!hasRegParm || regParm < 7) && "Invalid regparm value");
+ Bits = ((unsigned)cc) | (noReturn ? NoReturnMask : 0) |
+ (producesResult ? ProducesResultMask : 0) |
+ (noCallerSavedRegs ? NoCallerSavedRegsMask : 0) |
+ (hasRegParm ? ((regParm + 1) << RegParmOffset) : 0) |
+ (NoCfCheck ? NoCfCheckMask : 0) |
+ (cmseNSCall ? CmseNSCallMask : 0);
}
// Constructor with all defaults. Use when for example creating a
@@ -3578,9 +3705,10 @@ public:
bool getNoReturn() const { return Bits & NoReturnMask; }
bool getProducesResult() const { return Bits & ProducesResultMask; }
+ bool getCmseNSCall() const { return Bits & CmseNSCallMask; }
bool getNoCallerSavedRegs() const { return Bits & NoCallerSavedRegsMask; }
bool getNoCfCheck() const { return Bits & NoCfCheckMask; }
- bool getHasRegParm() const { return (Bits >> RegParmOffset) != 0; }
+ bool getHasRegParm() const { return ((Bits & RegParmMask) >> RegParmOffset) != 0; }
unsigned getRegParm() const {
unsigned RegParm = (Bits & RegParmMask) >> RegParmOffset;
@@ -3615,6 +3743,13 @@ public:
return ExtInfo(Bits & ~ProducesResultMask);
}
+ ExtInfo withCmseNSCall(bool cmseNSCall) const {
+ if (cmseNSCall)
+ return ExtInfo(Bits | CmseNSCallMask);
+ else
+ return ExtInfo(Bits & ~CmseNSCallMask);
+ }
+
ExtInfo withNoCallerSavedRegs(bool noCallerSavedRegs) const {
if (noCallerSavedRegs)
return ExtInfo(Bits | NoCallerSavedRegsMask);
@@ -3661,14 +3796,9 @@ public:
};
protected:
- FunctionType(TypeClass tc, QualType res,
- QualType Canonical, bool Dependent,
- bool InstantiationDependent,
- bool VariablyModified, bool ContainsUnexpandedParameterPack,
- ExtInfo Info)
- : Type(tc, Canonical, Dependent, InstantiationDependent, VariablyModified,
- ContainsUnexpandedParameterPack),
- ResultType(res) {
+ FunctionType(TypeClass tc, QualType res, QualType Canonical,
+ TypeDependence Dependence, ExtInfo Info)
+ : Type(tc, Canonical, Dependence), ResultType(res) {
FunctionTypeBits.ExtInfo = Info.Bits;
}
@@ -3687,6 +3817,7 @@ public:
/// type.
bool getNoReturnAttr() const { return getExtInfo().getNoReturn(); }
+ bool getCmseNSCallAttr() const { return getExtInfo().getCmseNSCall(); }
CallingConv getCallConv() const { return getExtInfo().getCC(); }
ExtInfo getExtInfo() const { return ExtInfo(FunctionTypeBits.ExtInfo); }
@@ -3719,9 +3850,10 @@ class FunctionNoProtoType : public FunctionType, public llvm::FoldingSetNode {
FunctionNoProtoType(QualType Result, QualType Canonical, ExtInfo Info)
: FunctionType(FunctionNoProto, Result, Canonical,
- /*Dependent=*/false, /*InstantiationDependent=*/false,
- Result->isVariablyModifiedType(),
- /*ContainsUnexpandedParameterPack=*/false, Info) {}
+ Result->getDependence() &
+ ~(TypeDependence::DependentInstantiation |
+ TypeDependence::UnexpandedPack),
+ Info) {}
public:
// No additional state past what FunctionType provides.
@@ -4213,9 +4345,9 @@ class UnresolvedUsingType : public Type {
UnresolvedUsingTypenameDecl *Decl;
UnresolvedUsingType(const UnresolvedUsingTypenameDecl *D)
- : Type(UnresolvedUsing, QualType(), true, true, false,
- /*ContainsUnexpandedParameterPack=*/false),
- Decl(const_cast<UnresolvedUsingTypenameDecl*>(D)) {}
+ : Type(UnresolvedUsing, QualType(),
+ TypeDependence::DependentInstantiation),
+ Decl(const_cast<UnresolvedUsingTypenameDecl *>(D)) {}
public:
UnresolvedUsingTypenameDecl *getDecl() const { return Decl; }
@@ -4244,11 +4376,8 @@ protected:
friend class ASTContext; // ASTContext creates these.
TypedefType(TypeClass tc, const TypedefNameDecl *D, QualType can)
- : Type(tc, can, can->isDependentType(),
- can->isInstantiationDependentType(),
- can->isVariablyModifiedType(),
- /*ContainsUnexpandedParameterPack=*/false),
- Decl(const_cast<TypedefNameDecl*>(D)) {
+ : Type(tc, can, can->getDependence() & ~TypeDependence::UnexpandedPack),
+ Decl(const_cast<TypedefNameDecl *>(D)) {
assert(!isa<TypedefType>(can) && "Invalid canonical type");
}
@@ -4271,10 +4400,7 @@ class MacroQualifiedType : public Type {
MacroQualifiedType(QualType UnderlyingTy, QualType CanonTy,
const IdentifierInfo *MacroII)
- : Type(MacroQualified, CanonTy, UnderlyingTy->isDependentType(),
- UnderlyingTy->isInstantiationDependentType(),
- UnderlyingTy->isVariablyModifiedType(),
- UnderlyingTy->containsUnexpandedParameterPack()),
+ : Type(MacroQualified, CanonTy, UnderlyingTy->getDependence()),
UnderlyingTy(UnderlyingTy), MacroII(MacroII) {
assert(isa<AttributedType>(UnderlyingTy) &&
"Expected a macro qualified type to only wrap attributed types.");
@@ -4346,11 +4472,7 @@ class TypeOfType : public Type {
QualType TOType;
TypeOfType(QualType T, QualType can)
- : Type(TypeOf, can, T->isDependentType(),
- T->isInstantiationDependentType(),
- T->isVariablyModifiedType(),
- T->containsUnexpandedParameterPack()),
- TOType(T) {
+ : Type(TypeOf, can, T->getDependence()), TOType(T) {
assert(!isa<TypedefType>(can) && "Invalid canonical type");
}
@@ -4559,10 +4681,7 @@ private:
AttributedType(QualType canon, attr::Kind attrKind, QualType modified,
QualType equivalent)
- : Type(Attributed, canon, equivalent->isDependentType(),
- equivalent->isInstantiationDependentType(),
- equivalent->isVariablyModifiedType(),
- equivalent->containsUnexpandedParameterPack()),
+ : Type(Attributed, canon, equivalent->getDependence()),
ModifiedType(modified), EquivalentType(equivalent) {
AttributedTypeBits.AttrKind = attrKind;
}
@@ -4664,18 +4783,16 @@ class TemplateTypeParmType : public Type, public llvm::FoldingSetNode {
/// Build a non-canonical type.
TemplateTypeParmType(TemplateTypeParmDecl *TTPDecl, QualType Canon)
- : Type(TemplateTypeParm, Canon, /*Dependent=*/true,
- /*InstantiationDependent=*/true,
- /*VariablyModified=*/false,
- Canon->containsUnexpandedParameterPack()),
+ : Type(TemplateTypeParm, Canon,
+ TypeDependence::DependentInstantiation |
+ (Canon->getDependence() & TypeDependence::UnexpandedPack)),
TTPDecl(TTPDecl) {}
/// Build the canonical type.
TemplateTypeParmType(unsigned D, unsigned I, bool PP)
: Type(TemplateTypeParm, QualType(this, 0),
- /*Dependent=*/true,
- /*InstantiationDependent=*/true,
- /*VariablyModified=*/false, PP) {
+ TypeDependence::DependentInstantiation |
+ (PP ? TypeDependence::UnexpandedPack : TypeDependence::None)) {
CanTTPTInfo.Depth = D;
CanTTPTInfo.Index = I;
CanTTPTInfo.ParameterPack = PP;
@@ -4732,10 +4849,7 @@ class SubstTemplateTypeParmType : public Type, public llvm::FoldingSetNode {
const TemplateTypeParmType *Replaced;
SubstTemplateTypeParmType(const TemplateTypeParmType *Param, QualType Canon)
- : Type(SubstTemplateTypeParm, Canon, Canon->isDependentType(),
- Canon->isInstantiationDependentType(),
- Canon->isVariablyModifiedType(),
- Canon->containsUnexpandedParameterPack()),
+ : Type(SubstTemplateTypeParm, Canon, Canon->getDependence()),
Replaced(Param) {}
public:
@@ -4832,23 +4946,16 @@ public:
/// the latter case, it is also a dependent type.
class DeducedType : public Type {
protected:
- DeducedType(TypeClass TC, QualType DeducedAsType, bool IsDependent,
- bool IsInstantiationDependent, bool ContainsParameterPack)
+ DeducedType(TypeClass TC, QualType DeducedAsType,
+ TypeDependence ExtraDependence)
: Type(TC,
// FIXME: Retain the sugared deduced type?
DeducedAsType.isNull() ? QualType(this, 0)
: DeducedAsType.getCanonicalType(),
- IsDependent, IsInstantiationDependent,
- /*VariablyModified=*/false, ContainsParameterPack) {
- if (!DeducedAsType.isNull()) {
- if (DeducedAsType->isDependentType())
- setDependent();
- if (DeducedAsType->isInstantiationDependentType())
- setInstantiationDependent();
- if (DeducedAsType->containsUnexpandedParameterPack())
- setContainsUnexpandedParameterPack();
- }
- }
+ ExtraDependence | (DeducedAsType.isNull()
+ ? TypeDependence::None
+ : DeducedAsType->getDependence() &
+ ~TypeDependence::VariablyModified)) {}
public:
bool isSugared() const { return !isCanonicalUnqualified(); }
@@ -4877,7 +4984,7 @@ class alignas(8) AutoType : public DeducedType, public llvm::FoldingSetNode {
ConceptDecl *TypeConstraintConcept;
AutoType(QualType DeducedAsType, AutoTypeKeyword Keyword,
- bool IsDeducedAsDependent, bool IsDeducedAsPack, ConceptDecl *CD,
+ TypeDependence ExtraDependence, ConceptDecl *CD,
ArrayRef<TemplateArgument> TypeConstraintArgs);
const TemplateArgument *getArgBuffer() const {
@@ -4948,9 +5055,10 @@ class DeducedTemplateSpecializationType : public DeducedType,
QualType DeducedAsType,
bool IsDeducedAsDependent)
: DeducedType(DeducedTemplateSpecialization, DeducedAsType,
- IsDeducedAsDependent || Template.isDependent(),
- IsDeducedAsDependent || Template.isInstantiationDependent(),
- Template.containsUnexpandedParameterPack()),
+ toTypeDependence(Template.getDependence()) |
+ (IsDeducedAsDependent
+ ? TypeDependence::DependentInstantiation
+ : TypeDependence::None)),
Template(Template) {}
public:
@@ -5152,10 +5260,8 @@ class InjectedClassNameType : public Type {
QualType InjectedType;
InjectedClassNameType(CXXRecordDecl *D, QualType TST)
- : Type(InjectedClassName, QualType(), /*Dependent=*/true,
- /*InstantiationDependent=*/true,
- /*VariablyModified=*/false,
- /*ContainsUnexpandedParameterPack=*/false),
+ : Type(InjectedClassName, QualType(),
+ TypeDependence::DependentInstantiation),
Decl(D), InjectedType(TST) {
assert(isa<TemplateSpecializationType>(TST));
assert(!TST.hasQualifiers());
@@ -5234,11 +5340,8 @@ enum ElaboratedTypeKeyword {
class TypeWithKeyword : public Type {
protected:
TypeWithKeyword(ElaboratedTypeKeyword Keyword, TypeClass tc,
- QualType Canonical, bool Dependent,
- bool InstantiationDependent, bool VariablyModified,
- bool ContainsUnexpandedParameterPack)
- : Type(tc, Canonical, Dependent, InstantiationDependent, VariablyModified,
- ContainsUnexpandedParameterPack) {
+ QualType Canonical, TypeDependence Dependence)
+ : Type(tc, Canonical, Dependence) {
TypeWithKeywordBits.Keyword = Keyword;
}
@@ -5302,10 +5405,7 @@ class ElaboratedType final
ElaboratedType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
QualType NamedType, QualType CanonType, TagDecl *OwnedTagDecl)
: TypeWithKeyword(Keyword, Elaborated, CanonType,
- NamedType->isDependentType(),
- NamedType->isInstantiationDependentType(),
- NamedType->isVariablyModifiedType(),
- NamedType->containsUnexpandedParameterPack()),
+ NamedType->getDependence()),
NNS(NNS), NamedType(NamedType) {
ElaboratedTypeBits.HasOwnedTagDecl = false;
if (OwnedTagDecl) {
@@ -5376,10 +5476,9 @@ class DependentNameType : public TypeWithKeyword, public llvm::FoldingSetNode {
DependentNameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
const IdentifierInfo *Name, QualType CanonType)
- : TypeWithKeyword(Keyword, DependentName, CanonType, /*Dependent=*/true,
- /*InstantiationDependent=*/true,
- /*VariablyModified=*/false,
- NNS->containsUnexpandedParameterPack()),
+ : TypeWithKeyword(Keyword, DependentName, CanonType,
+ TypeDependence::DependentInstantiation |
+ toTypeDependence(NNS->getDependence())),
NNS(NNS), Name(Name) {}
public:
@@ -5516,10 +5615,9 @@ class PackExpansionType : public Type, public llvm::FoldingSetNode {
PackExpansionType(QualType Pattern, QualType Canon,
Optional<unsigned> NumExpansions)
- : Type(PackExpansion, Canon, /*Dependent=*/Pattern->isDependentType(),
- /*InstantiationDependent=*/true,
- /*VariablyModified=*/Pattern->isVariablyModifiedType(),
- /*ContainsUnexpandedParameterPack=*/false),
+ : Type(PackExpansion, Canon,
+ (Pattern->getDependence() | TypeDependence::Instantiation) &
+ ~TypeDependence::UnexpandedPack),
Pattern(Pattern) {
PackExpansionTypeBits.NumExpansions =
NumExpansions ? *NumExpansions + 1 : 0;
@@ -5658,6 +5756,7 @@ public:
void Profile(llvm::FoldingSetNodeID &ID);
static void Profile(llvm::FoldingSetNodeID &ID,
const ObjCTypeParamDecl *OTPDecl,
+ QualType CanonicalType,
ArrayRef<ObjCProtocolDecl *> protocols);
ObjCTypeParamDecl *getDecl() const { return OTPDecl; }
@@ -5738,8 +5837,8 @@ protected:
bool isKindOf);
ObjCObjectType(enum Nonce_ObjCInterface)
- : Type(ObjCInterface, QualType(), false, false, false, false),
- BaseType(QualType(this_(), 0)) {
+ : Type(ObjCInterface, QualType(), TypeDependence::None),
+ BaseType(QualType(this_(), 0)) {
ObjCObjectTypeBits.NumProtocols = 0;
ObjCObjectTypeBits.NumTypeArgs = 0;
ObjCObjectTypeBits.IsKindOf = 0;
@@ -5954,11 +6053,7 @@ class ObjCObjectPointerType : public Type, public llvm::FoldingSetNode {
QualType PointeeType;
ObjCObjectPointerType(QualType Canonical, QualType Pointee)
- : Type(ObjCObjectPointer, Canonical,
- Pointee->isDependentType(),
- Pointee->isInstantiationDependentType(),
- Pointee->isVariablyModifiedType(),
- Pointee->containsUnexpandedParameterPack()),
+ : Type(ObjCObjectPointer, Canonical, Pointee->getDependence()),
PointeeType(Pointee) {}
public:
@@ -6128,11 +6223,7 @@ class AtomicType : public Type, public llvm::FoldingSetNode {
QualType ValueType;
AtomicType(QualType ValTy, QualType Canonical)
- : Type(Atomic, Canonical, ValTy->isDependentType(),
- ValTy->isInstantiationDependentType(),
- ValTy->isVariablyModifiedType(),
- ValTy->containsUnexpandedParameterPack()),
- ValueType(ValTy) {}
+ : Type(Atomic, Canonical, ValTy->getDependence()), ValueType(ValTy) {}
public:
/// Gets the type contained by this atomic type, i.e.
@@ -6163,10 +6254,7 @@ class PipeType : public Type, public llvm::FoldingSetNode {
bool isRead;
PipeType(QualType elemType, QualType CanonicalPtr, bool isRead)
- : Type(Pipe, CanonicalPtr, elemType->isDependentType(),
- elemType->isInstantiationDependentType(),
- elemType->isVariablyModifiedType(),
- elemType->containsUnexpandedParameterPack()),
+ : Type(Pipe, CanonicalPtr, elemType->getDependence()),
ElementType(elemType), isRead(isRead) {}
public:
@@ -6192,6 +6280,64 @@ public:
bool isReadOnly() const { return isRead; }
};
+/// A fixed int type of a specified bitwidth.
+class ExtIntType final : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext;
+ unsigned IsUnsigned : 1;
+ unsigned NumBits : 24;
+
+protected:
+ ExtIntType(bool isUnsigned, unsigned NumBits);
+
+public:
+ bool isUnsigned() const { return IsUnsigned; }
+ bool isSigned() const { return !IsUnsigned; }
+ unsigned getNumBits() const { return NumBits; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, isUnsigned(), getNumBits());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, bool IsUnsigned,
+ unsigned NumBits) {
+ ID.AddBoolean(IsUnsigned);
+ ID.AddInteger(NumBits);
+ }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == ExtInt; }
+};
+
+class DependentExtIntType final : public Type, public llvm::FoldingSetNode {
+ friend class ASTContext;
+ const ASTContext &Context;
+ llvm::PointerIntPair<Expr*, 1, bool> ExprAndUnsigned;
+
+protected:
+ DependentExtIntType(const ASTContext &Context, bool IsUnsigned,
+ Expr *NumBits);
+
+public:
+ bool isUnsigned() const;
+ bool isSigned() const { return !isUnsigned(); }
+ Expr *getNumBitsExpr() const;
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, Context, isUnsigned(), getNumBitsExpr());
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ bool IsUnsigned, Expr *NumBitsExpr);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == DependentExtInt;
+ }
+};
+
/// A qualifier set is used to build a set of qualifiers.
class QualifierCollector : public Qualifiers {
public:
@@ -6611,6 +6757,14 @@ inline bool Type::isExtVectorType() const {
return isa<ExtVectorType>(CanonicalType);
}
+inline bool Type::isMatrixType() const {
+ return isa<MatrixType>(CanonicalType);
+}
+
+inline bool Type::isConstantMatrixType() const {
+ return isa<ConstantMatrixType>(CanonicalType);
+}
+
inline bool Type::isDependentAddressSpaceType() const {
return isa<DependentAddressSpaceType>(CanonicalType);
}
@@ -6711,6 +6865,10 @@ inline bool Type::isPipeType() const {
return isa<PipeType>(CanonicalType);
}
+inline bool Type::isExtIntType() const {
+ return isa<ExtIntType>(CanonicalType);
+}
+
#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
inline bool Type::is##Id##Type() const { \
return isSpecificBuiltinType(BuiltinType::Id); \
@@ -6742,9 +6900,9 @@ inline bool Type::isTemplateTypeParmType() const {
}
inline bool Type::isSpecificBuiltinType(unsigned K) const {
- if (const BuiltinType *BT = getAs<BuiltinType>())
- if (BT->getKind() == (BuiltinType::Kind) K)
- return true;
+ if (const BuiltinType *BT = getAs<BuiltinType>()) {
+ return BT->getKind() == static_cast<BuiltinType::Kind>(K);
+ }
return false;
}
@@ -6763,9 +6921,7 @@ inline const BuiltinType *Type::getAsPlaceholderType() const {
inline bool Type::isSpecificPlaceholderType(unsigned K) const {
assert(BuiltinType::isPlaceholderTypeKind((BuiltinType::Kind) K));
- if (const auto *BT = dyn_cast<BuiltinType>(this))
- return (BT->getKind() == (BuiltinType::Kind) K);
- return false;
+ return isSpecificBuiltinType(K);
}
inline bool Type::isNonOverloadPlaceholderType() const {
@@ -6775,34 +6931,28 @@ inline bool Type::isNonOverloadPlaceholderType() const {
}
inline bool Type::isVoidType() const {
- if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
- return BT->getKind() == BuiltinType::Void;
- return false;
+ return isSpecificBuiltinType(BuiltinType::Void);
}
inline bool Type::isHalfType() const {
- if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
- return BT->getKind() == BuiltinType::Half;
// FIXME: Should we allow complex __fp16? Probably not.
- return false;
+ return isSpecificBuiltinType(BuiltinType::Half);
}
inline bool Type::isFloat16Type() const {
- if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
- return BT->getKind() == BuiltinType::Float16;
- return false;
+ return isSpecificBuiltinType(BuiltinType::Float16);
+}
+
+inline bool Type::isBFloat16Type() const {
+ return isSpecificBuiltinType(BuiltinType::BFloat16);
}
inline bool Type::isFloat128Type() const {
- if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
- return BT->getKind() == BuiltinType::Float128;
- return false;
+ return isSpecificBuiltinType(BuiltinType::Float128);
}
inline bool Type::isNullPtrType() const {
- if (const auto *BT = getAs<BuiltinType>())
- return BT->getKind() == BuiltinType::NullPtr;
- return false;
+ return isSpecificBuiltinType(BuiltinType::NullPtr);
}
bool IsEnumDeclComplete(EnumDecl *);
@@ -6818,7 +6968,7 @@ inline bool Type::isIntegerType() const {
return IsEnumDeclComplete(ET->getDecl()) &&
!IsEnumDeclScoped(ET->getDecl());
}
- return false;
+ return isExtIntType();
}
inline bool Type::isFixedPointType() const {
@@ -6875,7 +7025,8 @@ inline bool Type::isScalarType() const {
isa<BlockPointerType>(CanonicalType) ||
isa<MemberPointerType>(CanonicalType) ||
isa<ComplexType>(CanonicalType) ||
- isa<ObjCObjectPointerType>(CanonicalType);
+ isa<ObjCObjectPointerType>(CanonicalType) ||
+ isExtIntType();
}
inline bool Type::isIntegralOrEnumerationType() const {
@@ -6888,7 +7039,7 @@ inline bool Type::isIntegralOrEnumerationType() const {
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
return IsEnumDeclComplete(ET->getDecl());
- return false;
+ return isExtIntType();
}
inline bool Type::isBooleanType() const {
diff --git a/contrib/llvm-project/clang/include/clang/AST/TypeLoc.h b/contrib/llvm-project/clang/include/clang/AST/TypeLoc.h
index 3fc53d823c37..72cc8ef098e7 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TypeLoc.h
+++ b/contrib/llvm-project/clang/include/clang/AST/TypeLoc.h
@@ -1735,6 +1735,7 @@ public:
void initializeLocal(ASTContext &Context, SourceLocation loc) {
setAttrNameLoc(loc);
+ setAttrOperandParensRange(loc);
setAttrOperandParensRange(SourceRange(loc));
setAttrExprOperand(getTypePtr()->getAddrSpaceExpr());
}
@@ -1774,6 +1775,68 @@ class DependentSizedExtVectorTypeLoc :
DependentSizedExtVectorType> {
};
+struct MatrixTypeLocInfo {
+ SourceLocation AttrLoc;
+ SourceRange OperandParens;
+ Expr *RowOperand;
+ Expr *ColumnOperand;
+};
+
+class MatrixTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc, MatrixTypeLoc,
+ MatrixType, MatrixTypeLocInfo> {
+public:
+ /// The location of the attribute name, i.e.
+ /// float __attribute__((matrix_type(4, 2)))
+ /// ^~~~~~~~~~~~~~~~~
+ SourceLocation getAttrNameLoc() const { return getLocalData()->AttrLoc; }
+ void setAttrNameLoc(SourceLocation loc) { getLocalData()->AttrLoc = loc; }
+
+ /// The attribute's row operand, if it has one.
+ /// float __attribute__((matrix_type(4, 2)))
+ /// ^
+ Expr *getAttrRowOperand() const { return getLocalData()->RowOperand; }
+ void setAttrRowOperand(Expr *e) { getLocalData()->RowOperand = e; }
+
+ /// The attribute's column operand, if it has one.
+ /// float __attribute__((matrix_type(4, 2)))
+ /// ^
+ Expr *getAttrColumnOperand() const { return getLocalData()->ColumnOperand; }
+ void setAttrColumnOperand(Expr *e) { getLocalData()->ColumnOperand = e; }
+
+ /// The location of the parentheses around the operand, if there is
+ /// an operand.
+ /// float __attribute__((matrix_type(4, 2)))
+ /// ^ ^
+ SourceRange getAttrOperandParensRange() const {
+ return getLocalData()->OperandParens;
+ }
+ void setAttrOperandParensRange(SourceRange range) {
+ getLocalData()->OperandParens = range;
+ }
+
+ SourceRange getLocalSourceRange() const {
+ SourceRange range(getAttrNameLoc());
+ range.setEnd(getAttrOperandParensRange().getEnd());
+ return range;
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation loc) {
+ setAttrNameLoc(loc);
+ setAttrOperandParensRange(loc);
+ setAttrRowOperand(nullptr);
+ setAttrColumnOperand(nullptr);
+ }
+};
+
+class ConstantMatrixTypeLoc
+ : public InheritingConcreteTypeLoc<MatrixTypeLoc, ConstantMatrixTypeLoc,
+ ConstantMatrixType> {};
+
+class DependentSizedMatrixTypeLoc
+ : public InheritingConcreteTypeLoc<MatrixTypeLoc,
+ DependentSizedMatrixTypeLoc,
+ DependentSizedMatrixType> {};
+
// FIXME: location of the '_Complex' keyword.
class ComplexTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
ComplexTypeLoc,
@@ -2450,6 +2513,12 @@ inline T TypeLoc::getAsAdjusted() const {
}
return Cur.getAs<T>();
}
+class ExtIntTypeLoc final
+ : public InheritingConcreteTypeLoc<TypeSpecTypeLoc, ExtIntTypeLoc,
+ ExtIntType> {};
+class DependentExtIntTypeLoc final
+ : public InheritingConcreteTypeLoc<TypeSpecTypeLoc, DependentExtIntTypeLoc,
+ DependentExtIntType> {};
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/AST/TypeLocVisitor.h b/contrib/llvm-project/clang/include/clang/AST/TypeLocVisitor.h
index ec780884e96c..168e9ac532ee 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TypeLocVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/AST/TypeLocVisitor.h
@@ -13,7 +13,6 @@
#define LLVM_CLANG_AST_TYPELOCVISITOR_H
#include "clang/AST/TypeLoc.h"
-#include "clang/AST/TypeVisitor.h"
#include "llvm/Support/ErrorHandling.h"
namespace clang {
diff --git a/contrib/llvm-project/clang/include/clang/AST/TypeProperties.td b/contrib/llvm-project/clang/include/clang/AST/TypeProperties.td
index 3cf56e5a5629..4540ea0e1952 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TypeProperties.td
+++ b/contrib/llvm-project/clang/include/clang/AST/TypeProperties.td
@@ -224,6 +224,41 @@ let Class = DependentSizedExtVectorType in {
}]>;
}
+let Class = MatrixType in {
+ def : Property<"elementType", QualType> {
+ let Read = [{ node->getElementType() }];
+ }
+}
+
+let Class = ConstantMatrixType in {
+ def : Property<"numRows", UInt32> {
+ let Read = [{ node->getNumRows() }];
+ }
+ def : Property<"numColumns", UInt32> {
+ let Read = [{ node->getNumColumns() }];
+ }
+
+ def : Creator<[{
+ return ctx.getConstantMatrixType(elementType, numRows, numColumns);
+ }]>;
+}
+
+let Class = DependentSizedMatrixType in {
+ def : Property<"rows", ExprRef> {
+ let Read = [{ node->getRowExpr() }];
+ }
+ def : Property<"columns", ExprRef> {
+ let Read = [{ node->getColumnExpr() }];
+ }
+ def : Property<"attributeLoc", SourceLocation> {
+ let Read = [{ node->getAttributeLoc() }];
+ }
+
+ def : Creator<[{
+ return ctx.getDependentSizedMatrixType(elementType, rows, columns, attributeLoc);
+ }]>;
+}
+
let Class = FunctionType in {
def : Property<"returnType", QualType> {
let Read = [{ node->getReturnType() }];
@@ -249,13 +284,17 @@ let Class = FunctionType in {
def : Property<"noCfCheck", Bool> {
let Read = [{ node->getExtInfo().getNoCfCheck() }];
}
+ def : Property<"cmseNSCall", Bool> {
+ let Read = [{ node->getExtInfo().getCmseNSCall() }];
+ }
}
let Class = FunctionNoProtoType in {
def : Creator<[{
auto extInfo = FunctionType::ExtInfo(noReturn, hasRegParm, regParm,
callingConvention, producesResult,
- noCallerSavedRegs, noCfCheck);
+ noCallerSavedRegs, noCfCheck,
+ cmseNSCall);
return ctx.getFunctionNoProtoType(returnType, extInfo);
}]>;
}
@@ -288,7 +327,8 @@ let Class = FunctionProtoType in {
def : Creator<[{
auto extInfo = FunctionType::ExtInfo(noReturn, hasRegParm, regParm,
callingConvention, producesResult,
- noCallerSavedRegs, noCfCheck);
+ noCallerSavedRegs, noCfCheck,
+ cmseNSCall);
FunctionProtoType::ExtProtoInfo epi;
epi.ExtInfo = extInfo;
epi.Variadic = variadic;
@@ -453,7 +493,9 @@ let Class = TagType in {
let Class = EnumType in {
def : Creator<[{
QualType result = ctx.getEnumType(cast<EnumDecl>(declaration));
- const_cast<Type*>(result.getTypePtr())->setDependent(dependent);
+ if (dependent)
+ const_cast<Type *>(result.getTypePtr())
+ ->addDependence(TypeDependence::DependentInstantiation);
return result;
}]>;
}
@@ -462,7 +504,9 @@ let Class = RecordType in {
def : Creator<[{
auto record = cast<RecordDecl>(declaration);
QualType result = ctx.getRecordType(record);
- const_cast<Type*>(result.getTypePtr())->setDependent(dependent);
+ if (dependent)
+ const_cast<Type *>(result.getTypePtr())
+ ->addDependence(TypeDependence::DependentInstantiation);
return result;
}]>;
}
@@ -605,7 +649,9 @@ let Class = TemplateSpecializationType in {
templateArguments,
*underlyingType);
}
- const_cast<Type*>(result.getTypePtr())->setDependent(dependent);
+ if (dependent)
+ const_cast<Type *>(result.getTypePtr())
+ ->addDependence(TypeDependence::DependentInstantiation);
return result;
}]>;
}
@@ -822,3 +868,28 @@ let Class = PipeType in {
return ctx.getPipeType(elementType, isReadOnly);
}]>;
}
+
+let Class = ExtIntType in {
+ def : Property<"isUnsigned", Bool> {
+ let Read = [{ node->isUnsigned() }];
+ }
+ def : Property <"numBits", UInt32> {
+ let Read = [{ node->getNumBits() }];
+ }
+
+ def : Creator<[{
+ return ctx.getExtIntType(isUnsigned, numBits);
+ }]>;
+}
+
+let Class = DependentExtIntType in {
+ def : Property<"isUnsigned", Bool> {
+ let Read = [{ node->isUnsigned() }];
+ }
+ def : Property <"numBitsExpr", ExprRef> {
+ let Read = [{ node->getNumBitsExpr() }];
+ }
+ def : Creator<[{
+ return ctx.getDependentExtIntType(isUnsigned, numBitsExpr);
+ }]>;
+}
diff --git a/contrib/llvm-project/clang/include/clang/AST/VTableBuilder.h b/contrib/llvm-project/clang/include/clang/AST/VTableBuilder.h
index 43c84292c091..241dd13f903e 100644
--- a/contrib/llvm-project/clang/include/clang/AST/VTableBuilder.h
+++ b/contrib/llvm-project/clang/include/clang/AST/VTableBuilder.h
@@ -238,6 +238,11 @@ public:
typedef llvm::DenseMap<BaseSubobject, AddressPointLocation>
AddressPointsMapTy;
+ // Mapping between the VTable index and address point index. This is useful
+ // when you don't care about the base subobjects and only want the address
+ // point for a given vtable index.
+ typedef llvm::SmallVector<unsigned, 4> AddressPointsIndexMapTy;
+
private:
// Stores the component indices of the first component of each virtual table in
// the virtual table group. To save a little memory in the common case where
@@ -253,6 +258,9 @@ private:
/// Address points for all vtables.
AddressPointsMapTy AddressPoints;
+ /// Address points for all vtable indices.
+ AddressPointsIndexMapTy AddressPointIndices;
+
public:
VTableLayout(ArrayRef<size_t> VTableIndices,
ArrayRef<VTableComponent> VTableComponents,
@@ -277,6 +285,10 @@ public:
return AddressPoints;
}
+ const AddressPointsIndexMapTy &getAddressPointIndices() const {
+ return AddressPointIndices;
+ }
+
size_t getNumVTables() const {
if (VTableIndices.empty())
return 1;
@@ -342,6 +354,9 @@ public:
}
bool IsMicrosoftABI;
+
+ /// Determine whether this function should be assigned a vtable slot.
+ static bool hasVtableSlot(const CXXMethodDecl *MD);
};
class ItaniumVTableContext : public VTableContextBase {
@@ -371,7 +386,17 @@ private:
void computeVTableRelatedInformation(const CXXRecordDecl *RD) override;
public:
- ItaniumVTableContext(ASTContext &Context);
+ enum VTableComponentLayout {
+ /// Components in the vtable are pointers to other structs/functions.
+ Pointer,
+
+ /// Components in the vtable are relative offsets between the vtable and the
+ /// other structs/functions.
+ Relative,
+ };
+
+ ItaniumVTableContext(ASTContext &Context,
+ VTableComponentLayout ComponentLayout = Pointer);
~ItaniumVTableContext() override;
const VTableLayout &getVTableLayout(const CXXRecordDecl *RD) {
@@ -402,6 +427,16 @@ public:
static bool classof(const VTableContextBase *VT) {
return !VT->isMicrosoft();
}
+
+ VTableComponentLayout getVTableComponentLayout() const {
+ return ComponentLayout;
+ }
+
+ bool isPointerLayout() const { return ComponentLayout == Pointer; }
+ bool isRelativeLayout() const { return ComponentLayout == Relative; }
+
+private:
+ VTableComponentLayout ComponentLayout;
};
/// Holds information about the inheritance path to a virtual base or function
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchFinder.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchFinder.h
index f8160d552c0d..0af98438ab52 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchFinder.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchFinder.h
@@ -182,10 +182,9 @@ public:
///
/// @{
template <typename T> void match(const T &Node, ASTContext &Context) {
- match(clang::ast_type_traits::DynTypedNode::create(Node), Context);
+ match(clang::DynTypedNode::create(Node), Context);
}
- void match(const clang::ast_type_traits::DynTypedNode &Node,
- ASTContext &Context);
+ void match(const clang::DynTypedNode &Node, ASTContext &Context);
/// @}
/// Finds all matches in the given AST.
@@ -242,9 +241,8 @@ SmallVector<BoundNodes, 1>
match(MatcherT Matcher, const NodeT &Node, ASTContext &Context);
template <typename MatcherT>
-SmallVector<BoundNodes, 1>
-match(MatcherT Matcher, const ast_type_traits::DynTypedNode &Node,
- ASTContext &Context);
+SmallVector<BoundNodes, 1> match(MatcherT Matcher, const DynTypedNode &Node,
+ ASTContext &Context);
/// @}
/// Returns the results of matching \p Matcher on the translation unit of
@@ -283,9 +281,8 @@ public:
}
template <typename MatcherT>
-SmallVector<BoundNodes, 1>
-match(MatcherT Matcher, const ast_type_traits::DynTypedNode &Node,
- ASTContext &Context) {
+SmallVector<BoundNodes, 1> match(MatcherT Matcher, const DynTypedNode &Node,
+ ASTContext &Context) {
internal::CollectMatchesCallback Callback;
MatchFinder Finder;
Finder.addMatcher(Matcher, &Callback);
@@ -296,7 +293,7 @@ match(MatcherT Matcher, const ast_type_traits::DynTypedNode &Node,
template <typename MatcherT, typename NodeT>
SmallVector<BoundNodes, 1>
match(MatcherT Matcher, const NodeT &Node, ASTContext &Context) {
- return match(Matcher, ast_type_traits::DynTypedNode::create(Node), Context);
+ return match(Matcher, DynTypedNode::create(Node), Context);
}
template <typename MatcherT>
@@ -310,8 +307,8 @@ match(MatcherT Matcher, ASTContext &Context) {
}
inline SmallVector<BoundNodes, 1>
-matchDynamic(internal::DynTypedMatcher Matcher,
- const ast_type_traits::DynTypedNode &Node, ASTContext &Context) {
+matchDynamic(internal::DynTypedMatcher Matcher, const DynTypedNode &Node,
+ ASTContext &Context) {
internal::CollectMatchesCallback Callback;
MatchFinder Finder;
Finder.addDynamicMatcher(Matcher, &Callback);
@@ -323,8 +320,7 @@ template <typename NodeT>
SmallVector<BoundNodes, 1> matchDynamic(internal::DynTypedMatcher Matcher,
const NodeT &Node,
ASTContext &Context) {
- return matchDynamic(Matcher, ast_type_traits::DynTypedNode::create(Node),
- Context);
+ return matchDynamic(Matcher, DynTypedNode::create(Node), Context);
}
inline SmallVector<BoundNodes, 1>
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h
index 9a5888b7572b..643419743a11 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h
@@ -47,6 +47,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
@@ -59,6 +60,7 @@
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
+#include "clang/AST/ParentMapContext.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
@@ -71,6 +73,7 @@
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
@@ -114,7 +117,7 @@ public:
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
- /// type of \c clang::ast_type_traits::DynTypedNode
+ /// type of \c clang::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
@@ -280,9 +283,10 @@ AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
-AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching,
- AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc),
- std::string, RegExp) {
+AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt,
+ TypeLoc),
+ RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
@@ -295,8 +299,27 @@ AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching,
}
auto Filename = FileEntry->getName();
- llvm::Regex RE(RegExp);
- return RE.match(Filename);
+ return RegExp->match(Filename);
+}
+
+/// Matches statements that are (transitively) expanded from the named macro.
+/// Does not match if only part of the statement is expanded from that macro or
+/// if different parts of the the statement are expanded from different
+/// appearances of the macro.
+///
+/// FIXME: Change to be a polymorphic matcher that works on any syntactic
+/// node. There's nothing `Stmt`-specific about it.
+AST_MATCHER_P(Stmt, isExpandedFromMacro, llvm::StringRef, MacroName) {
+ // Verifies that the statement' beginning and ending are both expanded from
+ // the same instance of the given macro.
+ auto& Context = Finder->getASTContext();
+ llvm::Optional<SourceLocation> B =
+ internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context);
+ if (!B) return false;
+ llvm::Optional<SourceLocation> E =
+ internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context);
+ if (!E) return false;
+ return *B == *E;
}
/// Matches declarations.
@@ -526,52 +549,72 @@ extern const internal::VariadicDynCastAllOfMatcher<Decl,
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
-/// Matches public C++ declarations.
+/// Matches public C++ declarations and C++ base specifers that specify public
+/// inheritance.
///
-/// Given
+/// Examples:
/// \code
/// class C {
-/// public: int a;
+/// public: int a; // fieldDecl(isPublic()) matches 'a'
/// protected: int b;
/// private: int c;
/// };
/// \endcode
-/// fieldDecl(isPublic())
-/// matches 'int a;'
-AST_MATCHER(Decl, isPublic) {
- return Node.getAccess() == AS_public;
+///
+/// \code
+/// class Base {};
+/// class Derived1 : public Base {}; // matches 'Base'
+/// struct Derived2 : Base {}; // matches 'Base'
+/// \endcode
+AST_POLYMORPHIC_MATCHER(isPublic,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
+ CXXBaseSpecifier)) {
+ return getAccessSpecifier(Node) == AS_public;
}
-/// Matches protected C++ declarations.
+/// Matches protected C++ declarations and C++ base specifers that specify
+/// protected inheritance.
///
-/// Given
+/// Examples:
/// \code
/// class C {
/// public: int a;
-/// protected: int b;
+/// protected: int b; // fieldDecl(isProtected()) matches 'b'
/// private: int c;
/// };
/// \endcode
-/// fieldDecl(isProtected())
-/// matches 'int b;'
-AST_MATCHER(Decl, isProtected) {
- return Node.getAccess() == AS_protected;
+///
+/// \code
+/// class Base {};
+/// class Derived : protected Base {}; // matches 'Base'
+/// \endcode
+AST_POLYMORPHIC_MATCHER(isProtected,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
+ CXXBaseSpecifier)) {
+ return getAccessSpecifier(Node) == AS_protected;
}
-/// Matches private C++ declarations.
+/// Matches private C++ declarations and C++ base specifers that specify private
+/// inheritance.
///
-/// Given
+/// Examples:
/// \code
/// class C {
/// public: int a;
/// protected: int b;
-/// private: int c;
+/// private: int c; // fieldDecl(isPrivate()) matches 'c'
/// };
/// \endcode
-/// fieldDecl(isPrivate())
-/// matches 'int c;'
-AST_MATCHER(Decl, isPrivate) {
- return Node.getAccess() == AS_private;
+///
+/// \code
+/// struct Base {};
+/// struct Derived1 : private Base {}; // matches 'Base'
+/// class Derived2 : Base {}; // matches 'Base'
+/// \endcode
+AST_POLYMORPHIC_MATCHER(isPrivate,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
+ CXXBaseSpecifier)) {
+ return getAccessSpecifier(Node) == AS_private;
}
/// Matches non-static data members that are bit-fields.
@@ -701,13 +744,13 @@ AST_POLYMORPHIC_MATCHER_P(
/// \endcode
/// The matcher
/// \code
-/// traverse(ast_type_traits::TK_IgnoreImplicitCastsAndParentheses,
+/// traverse(TK_IgnoreImplicitCastsAndParentheses,
/// varDecl(hasInitializer(floatLiteral().bind("init")))
/// )
/// \endcode
/// matches the variable declaration with "init" bound to the "3.0".
template <typename T>
-internal::Matcher<T> traverse(ast_type_traits::TraversalKind TK,
+internal::Matcher<T> traverse(TraversalKind TK,
const internal::Matcher<T> &InnerMatcher) {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
@@ -717,8 +760,7 @@ internal::Matcher<T> traverse(ast_type_traits::TraversalKind TK,
template <typename T>
internal::BindableMatcher<T>
-traverse(ast_type_traits::TraversalKind TK,
- const internal::BindableMatcher<T> &InnerMatcher) {
+traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) {
return internal::BindableMatcher<T>(
internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
@@ -728,7 +770,7 @@ traverse(ast_type_traits::TraversalKind TK,
template <typename... T>
internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>
-traverse(ast_type_traits::TraversalKind TK,
+traverse(TraversalKind TK,
const internal::VariadicOperatorMatcher<T...> &InnerMatcher) {
return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>(
TK, InnerMatcher);
@@ -738,9 +780,8 @@ template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename T, typename ToTypes>
internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>
-traverse(ast_type_traits::TraversalKind TK,
- const internal::ArgumentAdaptingMatcherFuncAdaptor<
- ArgumentAdapterT, T, ToTypes> &InnerMatcher) {
+traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor<
+ ArgumentAdapterT, T, ToTypes> &InnerMatcher) {
return internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T,
ToTypes>>(TK, InnerMatcher);
@@ -750,10 +791,8 @@ template <template <typename T, typename P1> class MatcherT, typename P1,
typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>
-traverse(
- ast_type_traits::TraversalKind TK,
- const internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>
- &InnerMatcher) {
+traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1<
+ MatcherT, P1, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>(
TK, InnerMatcher);
@@ -763,10 +802,8 @@ template <template <typename T, typename P1, typename P2> class MatcherT,
typename P1, typename P2, typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>
-traverse(
- ast_type_traits::TraversalKind TK,
- const internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>
- &InnerMatcher) {
+traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2<
+ MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>(
TK, InnerMatcher);
@@ -1194,6 +1231,20 @@ extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
+/// Matches tag declarations.
+///
+/// Example matches X, Z, U, S, E
+/// \code
+/// class X;
+/// template<class T> class Z {};
+/// struct S {};
+/// union U {};
+/// enum E {
+/// A, B, C
+/// };
+/// \endcode
+extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
+
/// Matches method declarations.
///
/// Example matches y
@@ -1823,6 +1874,22 @@ extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
+/// Matches noexcept expressions.
+///
+/// Given
+/// \code
+/// bool a() noexcept;
+/// bool b() noexcept(true);
+/// bool c() noexcept(false);
+/// bool d() noexcept(noexcept(a()));
+/// bool e = noexcept(b()) || noexcept(c());
+/// \endcode
+/// cxxNoexceptExpr()
+/// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`.
+/// doesn't match the noexcept specifier in the declarations a, b, c or d.
+extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
+ cxxNoexceptExpr;
+
/// Matches array subscript expressions.
///
/// Given
@@ -2225,6 +2292,10 @@ extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
+/// Matches fixed point literals
+extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral>
+ fixedPointLiteral;
+
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
@@ -2541,13 +2612,11 @@ extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
-/// Matches any node regardless of the submatchers.
-///
-/// However, \c optionally will generate a result binding for each matching
-/// submatcher.
+/// Matches any node regardless of the submatcher.
///
-/// Useful when additional information which may or may not present about a
-/// main matching node is desired.
+/// However, \c optionally will retain any bindings generated by the submatcher.
+/// Useful when additional information which may or may not present about a main
+/// matching node is desired.
///
/// For example, in:
/// \code
@@ -2567,9 +2636,7 @@ extern const internal::VariadicOperatorMatcherFunc<
/// member named "bar" in that class.
///
/// Usable as: Any Matcher
-extern const internal::VariadicOperatorMatcherFunc<
- 1, std::numeric_limits<unsigned>::max()>
- optionally;
+extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
@@ -2616,7 +2683,7 @@ AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
-inline internal::Matcher<Stmt> alignOfExpr(
+inline internal::BindableMatcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
@@ -2625,7 +2692,7 @@ inline internal::Matcher<Stmt> alignOfExpr(
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
-inline internal::Matcher<Stmt> sizeOfExpr(
+inline internal::BindableMatcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
@@ -2646,8 +2713,9 @@ inline internal::Matcher<Stmt> sizeOfExpr(
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
-inline internal::Matcher<NamedDecl> hasName(const std::string &Name) {
- return internal::Matcher<NamedDecl>(new internal::HasNameMatcher({Name}));
+inline internal::Matcher<NamedDecl> hasName(StringRef Name) {
+ return internal::Matcher<NamedDecl>(
+ new internal::HasNameMatcher({std::string(Name)}));
}
/// Matches NamedDecl nodes that have any of the specified names.
@@ -2680,11 +2748,9 @@ extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
-AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) {
- assert(!RegExp.empty());
+AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) {
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
- llvm::Regex RE(RegExp);
- return RE.match(FullNameString);
+ return RegExp->match(FullNameString);
}
/// Matches overloaded operator names.
@@ -2707,14 +2773,30 @@ AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) {
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcherWithParam1<
- internal::HasOverloadedOperatorNameMatcher, StringRef,
+ internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcherWithParam1<
- internal::HasOverloadedOperatorNameMatcher, StringRef,
- AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(Name);
+ internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(
+ {std::string(Name)});
}
+/// Matches overloaded operator names.
+///
+/// Matches overloaded operator names specified in strings without the
+/// "operator" prefix: e.g. "<<".
+///
+/// hasAnyOverloadesOperatorName("+", "-")
+/// Is equivalent to
+/// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-"))
+extern const internal::VariadicFunction<
+ internal::PolymorphicMatcherWithParam1<
+ internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>,
+ StringRef, internal::hasAnyOverloadedOperatorNameFunc>
+ hasAnyOverloadedOperatorName;
+
/// Matches C++ classes that are directly or indirectly derived from a class
/// matching \c Base, or Objective-C classes that directly or indirectly
/// subclass a class matching \c Base.
@@ -2776,6 +2858,46 @@ AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
+/// Matches C++ classes that have a direct or indirect base matching \p
+/// BaseSpecMatcher.
+///
+/// Example:
+/// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase"))))
+/// \code
+/// class Foo;
+/// class Bar : Foo {};
+/// class Baz : Bar {};
+/// class SpecialBase;
+/// class Proxy : SpecialBase {}; // matches Proxy
+/// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived
+/// \endcode
+///
+// FIXME: Refactor this and isDerivedFrom to reuse implementation.
+AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>,
+ BaseSpecMatcher) {
+ return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder);
+}
+
+/// Matches C++ classes that have a direct base matching \p BaseSpecMatcher.
+///
+/// Example:
+/// matcher hasDirectBase(hasType(cxxRecordDecl(hasName("SpecialBase"))))
+/// \code
+/// class Foo;
+/// class Bar : Foo {};
+/// class Baz : Bar {};
+/// class SpecialBase;
+/// class Proxy : SpecialBase {}; // matches Proxy
+/// class IndirectlyDerived : Proxy {}; // doesn't match
+/// \endcode
+AST_MATCHER_P(CXXRecordDecl, hasDirectBase, internal::Matcher<CXXBaseSpecifier>,
+ BaseSpecMatcher) {
+ return Node.hasDefinition() &&
+ llvm::any_of(Node.bases(), [&](const CXXBaseSpecifier &Base) {
+ return BaseSpecMatcher.matches(Base, Finder, Builder);
+ });
+}
+
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
@@ -3269,11 +3391,9 @@ extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
-AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) {
- assert(!RegExp.empty());
+AST_MATCHER_REGEX(ObjCMessageExpr, matchesSelector, RegExp) {
std::string SelectorString = Node.getSelector().getAsString();
- llvm::Regex RE(RegExp);
- return RE.match(SelectorString);
+ return RegExp->match(SelectorString);
}
/// Matches when the selector is the empty selector
@@ -3406,9 +3526,19 @@ AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
/// class Y { friend class X; };
/// \endcode
///
-/// Usable as: Matcher<Expr>, Matcher<ValueDecl>
+/// Example matches class Derived
+/// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base"))))))
+/// \code
+/// class Base {};
+/// class Derived : Base {};
+/// \endcode
+///
+/// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>,
+/// Matcher<CXXBaseSpecifier>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
- hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl),
+ hasType,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl,
+ CXXBaseSpecifier),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
@@ -4194,6 +4324,34 @@ AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
return Matched;
}
+/// Matches the ParmVarDecl nodes that are at the N'th position in the parameter
+/// list. The parameter list could be that of either a block, function, or
+/// objc-method.
+///
+///
+/// Given
+///
+/// \code
+/// void f(int a, int b, int c) {
+/// }
+/// \endcode
+///
+/// ``parmVarDecl(isAtPosition(0))`` matches ``int a``.
+///
+/// ``parmVarDecl(isAtPosition(1))`` matches ``int b``.
+AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) {
+ const clang::DeclContext *Context = Node.getParentFunctionOrMethod();
+
+ if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context))
+ return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
+ if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context))
+ return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
+ if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context))
+ return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
+
+ return false;
+}
+
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
@@ -4532,7 +4690,7 @@ AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
- Predicate.Node = ast_type_traits::DynTypedNode::create(Node);
+ Predicate.Node = DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
@@ -4716,6 +4874,19 @@ AST_POLYMORPHIC_MATCHER_P(hasOperatorName,
return Name == Node.getOpcodeStr(Node.getOpcode());
}
+/// Matches operator expressions (binary or unary) that have any of the
+/// specified names.
+///
+/// hasAnyOperatorName("+", "-")
+/// Is equivalent to
+/// anyOf(hasOperatorName("+"), hasOperatorName("-"))
+extern const internal::VariadicFunction<
+ internal::PolymorphicMatcherWithParam1<
+ internal::HasAnyOperatorNameMatcher, std::vector<std::string>,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator)>,
+ StringRef, internal::hasAnyOperatorNameFunc>
+ hasAnyOperatorName;
+
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
@@ -4728,7 +4899,7 @@ AST_POLYMORPHIC_MATCHER_P(hasOperatorName,
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
-/// void x() { S s1, s2; s1 = s2; })
+/// void x() { S s1, s2; s1 = s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
@@ -4736,6 +4907,26 @@ AST_POLYMORPHIC_MATCHER(isAssignmentOperator,
return Node.isAssignmentOp();
}
+/// Matches comparison operators.
+///
+/// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator()))
+/// \code
+/// if (a == b)
+/// a += b;
+/// \endcode
+///
+/// Example 2: matches s1 < s2
+/// (matcher = cxxOperatorCallExpr(isComparisonOperator()))
+/// \code
+/// struct S { bool operator<(const S& other); };
+/// void x(S s1, S s2) { bool b1 = s1 < s2; }
+/// \endcode
+AST_POLYMORPHIC_MATCHER(isComparisonOperator,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
+ CXXOperatorCallExpr)) {
+ return Node.isComparisonOp();
+}
+
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
@@ -4773,6 +4964,23 @@ inline internal::Matcher<BinaryOperator> hasEitherOperand(
return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher));
}
+/// Matches if both matchers match with opposite sides of the binary operator.
+///
+/// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1),
+/// integerLiteral(equals(2)))
+/// \code
+/// 1 + 2 // Match
+/// 2 + 1 // Match
+/// 1 + 1 // No match
+/// 2 + 2 // No match
+/// \endcode
+inline internal::Matcher<BinaryOperator>
+hasOperands(const internal::Matcher<Expr> &Matcher1,
+ const internal::Matcher<Expr> &Matcher2) {
+ return anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)),
+ allOf(hasLHS(Matcher2), hasRHS(Matcher1)));
+}
+
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
@@ -4821,7 +5029,7 @@ AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
-/// should be passed as a quoted string. e.g., ofKind("CK_NullToPointer").
+/// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
@@ -4845,42 +5053,58 @@ AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
-/// Matches RecordDecl object that are spelled with "struct."
+/// Matches TagDecl object that are spelled with "struct."
///
-/// Example matches S, but not C or U.
+/// Example matches S, but not C, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
+/// enum E {};
/// \endcode
-AST_MATCHER(RecordDecl, isStruct) {
+AST_MATCHER(TagDecl, isStruct) {
return Node.isStruct();
}
-/// Matches RecordDecl object that are spelled with "union."
+/// Matches TagDecl object that are spelled with "union."
///
-/// Example matches U, but not C or S.
+/// Example matches U, but not C, S or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
+/// enum E {};
/// \endcode
-AST_MATCHER(RecordDecl, isUnion) {
+AST_MATCHER(TagDecl, isUnion) {
return Node.isUnion();
}
-/// Matches RecordDecl object that are spelled with "class."
+/// Matches TagDecl object that are spelled with "class."
///
-/// Example matches C, but not S or U.
+/// Example matches C, but not S, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
+/// enum E {};
/// \endcode
-AST_MATCHER(RecordDecl, isClass) {
+AST_MATCHER(TagDecl, isClass) {
return Node.isClass();
}
+/// Matches TagDecl object that are spelled with "enum."
+///
+/// Example matches E, but not C, S or U.
+/// \code
+/// struct S {};
+/// class C {};
+/// union U {};
+/// enum E {};
+/// \endcode
+AST_MATCHER(TagDecl, isEnum) {
+ return Node.isEnum();
+}
+
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
@@ -5020,17 +5244,28 @@ AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
return Matched;
}
-/// Matches if the given method declaration is virtual.
+/// Matches declarations of virtual methods and C++ base specifers that specify
+/// virtual inheritance.
///
-/// Given
+/// Example:
/// \code
/// class A {
/// public:
-/// virtual void x();
+/// virtual void x(); // matches x
/// };
/// \endcode
-/// matches A::x
-AST_MATCHER(CXXMethodDecl, isVirtual) {
+///
+/// Example:
+/// \code
+/// class Base {};
+/// class DirectlyDerived : virtual Base {}; // matches Base
+/// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base
+/// \endcode
+///
+/// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier>
+AST_POLYMORPHIC_MATCHER(isVirtual,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl,
+ CXXBaseSpecifier)) {
return Node.isVirtual();
}
@@ -5982,6 +6217,21 @@ extern const AstTypeMatcher<EnumType> enumType;
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
+/// Matches C++17 deduced template specialization types, e.g. deduced class
+/// template types.
+///
+/// Given
+/// \code
+/// template <typename T>
+/// class C { public: C(T); };
+///
+/// C c(123);
+/// \endcode
+/// \c deducedTemplateSpecializationType() matches the type in the declaration
+/// of the variable \c c.
+extern const AstTypeMatcher<DeducedTemplateSpecializationType>
+ deducedTemplateSpecializationType;
+
/// Matches types nodes representing unary type transformations.
///
/// Given:
@@ -6652,8 +6902,7 @@ AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
- llvm::SmallVector<ast_type_traits::DynTypedNode, 8> Stack(Parents.begin(),
- Parents.end());
+ llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end());
while(!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
@@ -6735,6 +6984,35 @@ AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
+/// Matches placement new expression arguments.
+///
+/// Given:
+/// \code
+/// MyClass *p1 = new (Storage, 16) MyClass();
+/// \endcode
+/// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16))))
+/// matches the expression 'new (Storage, 16) MyClass()'.
+AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index,
+ internal::Matcher<Expr>, InnerMatcher) {
+ return Node.getNumPlacementArgs() > Index &&
+ InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder);
+}
+
+/// Matches any placement new expression arguments.
+///
+/// Given:
+/// \code
+/// MyClass *p1 = new (Storage) MyClass();
+/// \endcode
+/// cxxNewExpr(hasAnyPlacementArg(anything()))
+/// matches the expression 'new (Storage, 16) MyClass()'.
+AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>,
+ InnerMatcher) {
+ return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) {
+ return InnerMatcher.matches(*Arg, Finder, Builder);
+ });
+}
+
/// Matches array new expressions with a given array size.
///
/// Given:
@@ -6865,19 +7143,6 @@ AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
-/// Matches the Stmt AST node that is marked as being the structured-block
-/// of an OpenMP executable directive.
-///
-/// Given
-///
-/// \code
-/// #pragma omp parallel
-/// {}
-/// \endcode
-///
-/// ``stmt(isOMPStructuredBlock()))`` matches ``{}``.
-AST_MATCHER(Stmt, isOMPStructuredBlock) { return Node.isOMPStructuredBlock(); }
-
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
@@ -6925,10 +7190,12 @@ AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
+/// #pragma omp parallel default(firstprivate)
/// #pragma omp parallel
/// \endcode
///
-/// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``.
+/// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and
+/// ``default(firstprivate)``
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
@@ -6940,11 +7207,12 @@ extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
+/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
- return Node.getDefaultKind() == OMPC_DEFAULT_none;
+ return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
@@ -6955,11 +7223,30 @@ AST_MATCHER(OMPDefaultClause, isNoneKind) {
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
+/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
- return Node.getDefaultKind() == OMPC_DEFAULT_shared;
+ return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared;
+}
+
+/// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind
+/// specified.
+///
+/// Given
+///
+/// \code
+/// #pragma omp parallel
+/// #pragma omp parallel default(none)
+/// #pragma omp parallel default(shared)
+/// #pragma omp parallel default(firstprivate)
+/// \endcode
+///
+/// ``ompDefaultClause(isFirstPrivateKind())`` matches only
+/// ``default(firstprivate)``.
+AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) {
+ return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_firstprivate;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
@@ -6981,7 +7268,7 @@ AST_MATCHER(OMPDefaultClause, isSharedKind) {
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
- return isAllowedClauseForDirective(
+ return llvm::omp::isAllowedClauseForDirective(
Node.getDirectiveKind(), CKind,
Finder->getASTContext().getLangOpts().OpenMP);
}
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchersInternal.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
index c4b449fa9434..3992850c992d 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
@@ -40,7 +40,6 @@
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
-#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/NestedNameSpecifier.h"
@@ -61,11 +60,13 @@
#include "llvm/ADT/iterator.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/Regex.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <map>
+#include <memory>
#include <string>
#include <tuple>
#include <type_traits>
@@ -130,6 +131,9 @@ inline QualType getUnderlyingType(const FriendDecl &Node) {
return TSI->getType();
return QualType();
}
+inline QualType getUnderlyingType(const CXXBaseSpecifier &Node) {
+ return Node.getType();
+}
/// Unifies obtaining the FunctionProtoType pointer from both
/// FunctionProtoType and FunctionDecl nodes..
@@ -142,14 +146,23 @@ inline const FunctionProtoType *getFunctionProtoType(const FunctionDecl &Node) {
return Node.getType()->getAs<FunctionProtoType>();
}
+/// Unifies obtaining the access specifier from Decl and CXXBaseSpecifier nodes.
+inline clang::AccessSpecifier getAccessSpecifier(const Decl &Node) {
+ return Node.getAccess();
+}
+
+inline clang::AccessSpecifier getAccessSpecifier(const CXXBaseSpecifier &Node) {
+ return Node.getAccessSpecifier();
+}
+
/// Internal version of BoundNodes. Holds all the bound nodes.
class BoundNodesMap {
public:
/// Adds \c Node to the map with key \c ID.
///
/// The node's base type should be in NodeBaseType or it will be unaccessible.
- void addNode(StringRef ID, const ast_type_traits::DynTypedNode& DynNode) {
- NodeMap[ID] = DynNode;
+ void addNode(StringRef ID, const DynTypedNode &DynNode) {
+ NodeMap[std::string(ID)] = DynNode;
}
/// Returns the AST node bound to \c ID.
@@ -165,10 +178,10 @@ public:
return It->second.get<T>();
}
- ast_type_traits::DynTypedNode getNode(StringRef ID) const {
+ DynTypedNode getNode(StringRef ID) const {
IDToNodeMap::const_iterator It = NodeMap.find(ID);
if (It == NodeMap.end()) {
- return ast_type_traits::DynTypedNode();
+ return DynTypedNode();
}
return It->second;
}
@@ -183,8 +196,7 @@ public:
/// Note that we're using std::map here, as for memoization:
/// - we need a comparison operator
/// - we need an assignment operator
- using IDToNodeMap =
- std::map<std::string, ast_type_traits::DynTypedNode, std::less<>>;
+ using IDToNodeMap = std::map<std::string, DynTypedNode, std::less<>>;
const IDToNodeMap &getMap() const {
return NodeMap;
@@ -223,7 +235,7 @@ public:
};
/// Add a binding from an id to a node.
- void setBinding(StringRef Id, const ast_type_traits::DynTypedNode &DynNode) {
+ void setBinding(StringRef Id, const DynTypedNode &DynNode) {
if (Bindings.empty())
Bindings.emplace_back();
for (BoundNodesMap &Binding : Bindings)
@@ -280,11 +292,10 @@ public:
///
/// May bind \p DynNode to an ID via \p Builder, or recurse into
/// the AST via \p Finder.
- virtual bool dynMatches(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+ virtual bool dynMatches(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const = 0;
- virtual llvm::Optional<ast_type_traits::TraversalKind> TraversalKind() const {
+ virtual llvm::Optional<clang::TraversalKind> TraversalKind() const {
return llvm::None;
}
};
@@ -307,8 +318,7 @@ public:
ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const = 0;
- bool dynMatches(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+ bool dynMatches(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const override {
return matches(DynNode.getUnchecked<T>(), Finder, Builder);
}
@@ -347,7 +357,7 @@ public:
/// Takes ownership of the provided implementation pointer.
template <typename T>
DynTypedMatcher(MatcherInterface<T> *Implementation)
- : SupportedKind(ast_type_traits::ASTNodeKind::getFromNodeKind<T>()),
+ : SupportedKind(ASTNodeKind::getFromNodeKind<T>()),
RestrictKind(SupportedKind), Implementation(Implementation) {}
/// Construct from a variadic function.
@@ -375,40 +385,44 @@ public:
};
static DynTypedMatcher
- constructVariadic(VariadicOperator Op,
- ast_type_traits::ASTNodeKind SupportedKind,
+ constructVariadic(VariadicOperator Op, ASTNodeKind SupportedKind,
std::vector<DynTypedMatcher> InnerMatchers);
static DynTypedMatcher
constructRestrictedWrapper(const DynTypedMatcher &InnerMatcher,
- ast_type_traits::ASTNodeKind RestrictKind);
+ ASTNodeKind RestrictKind);
/// Get a "true" matcher for \p NodeKind.
///
/// It only checks that the node is of the right kind.
- static DynTypedMatcher trueMatcher(ast_type_traits::ASTNodeKind NodeKind);
+ static DynTypedMatcher trueMatcher(ASTNodeKind NodeKind);
void setAllowBind(bool AB) { AllowBind = AB; }
/// Check whether this matcher could ever match a node of kind \p Kind.
/// \return \c false if this matcher will never match such a node. Otherwise,
/// return \c true.
- bool canMatchNodesOfKind(ast_type_traits::ASTNodeKind Kind) const;
+ bool canMatchNodesOfKind(ASTNodeKind Kind) const;
/// Return a matcher that points to the same implementation, but
/// restricts the node types for \p Kind.
- DynTypedMatcher dynCastTo(const ast_type_traits::ASTNodeKind Kind) const;
+ DynTypedMatcher dynCastTo(const ASTNodeKind Kind) const;
+
+ /// Return a matcher that that points to the same implementation, but sets the
+ /// traversal kind.
+ ///
+ /// If the traversal kind is already set, then \c TK overrides it.
+ DynTypedMatcher withTraversalKind(TraversalKind TK);
/// Returns true if the matcher matches the given \c DynNode.
- bool matches(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder, BoundNodesTreeBuilder *Builder) const;
+ bool matches(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const;
/// Same as matches(), but skips the kind check.
///
/// It is faster, but the caller must ensure the node is valid for the
/// kind of this matcher.
- bool matchesNoKindCheck(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+ bool matchesNoKindCheck(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const;
/// Bind the specified \p ID to the matcher.
@@ -423,7 +437,7 @@ public:
/// include both in the ID to make it unique.
///
/// \c MatcherIDType supports operator< and provides strict weak ordering.
- using MatcherIDType = std::pair<ast_type_traits::ASTNodeKind, uint64_t>;
+ using MatcherIDType = std::pair<ASTNodeKind, uint64_t>;
MatcherIDType getID() const {
/// FIXME: Document the requirements this imposes on matcher
/// implementations (no new() implementation_ during a Matches()).
@@ -435,9 +449,7 @@ public:
///
/// \c matches() will always return false unless the node passed is of this
/// or a derived type.
- ast_type_traits::ASTNodeKind getSupportedKind() const {
- return SupportedKind;
- }
+ ASTNodeKind getSupportedKind() const { return SupportedKind; }
/// Returns \c true if the passed \c DynTypedMatcher can be converted
/// to a \c Matcher<T>.
@@ -445,9 +457,9 @@ public:
/// This method verifies that the underlying matcher in \c Other can process
/// nodes of types T.
template <typename T> bool canConvertTo() const {
- return canConvertTo(ast_type_traits::ASTNodeKind::getFromNodeKind<T>());
+ return canConvertTo(ASTNodeKind::getFromNodeKind<T>());
}
- bool canConvertTo(ast_type_traits::ASTNodeKind To) const;
+ bool canConvertTo(ASTNodeKind To) const;
/// Construct a \c Matcher<T> interface around the dynamic matcher.
///
@@ -465,37 +477,31 @@ public:
/// If it is not compatible, then this matcher will never match anything.
template <typename T> Matcher<T> unconditionalConvertTo() const;
+ /// Returns the \c TraversalKind respected by calls to `match()`, if any.
+ ///
+ /// Most matchers will not have a traversal kind set, instead relying on the
+ /// surrounding context. For those, \c llvm::None is returned.
+ llvm::Optional<clang::TraversalKind> getTraversalKind() const {
+ return Implementation->TraversalKind();
+ }
+
private:
- DynTypedMatcher(ast_type_traits::ASTNodeKind SupportedKind,
- ast_type_traits::ASTNodeKind RestrictKind,
- IntrusiveRefCntPtr<DynMatcherInterface> Implementation)
- : SupportedKind(SupportedKind), RestrictKind(RestrictKind),
- Implementation(std::move(Implementation)) {}
+ DynTypedMatcher(ASTNodeKind SupportedKind, ASTNodeKind RestrictKind,
+ IntrusiveRefCntPtr<DynMatcherInterface> Implementation)
+ : SupportedKind(SupportedKind), RestrictKind(RestrictKind),
+ Implementation(std::move(Implementation)) {}
bool AllowBind = false;
- ast_type_traits::ASTNodeKind SupportedKind;
+ ASTNodeKind SupportedKind;
/// A potentially stricter node kind.
///
/// It allows to perform implicit and dynamic cast of matchers without
/// needing to change \c Implementation.
- ast_type_traits::ASTNodeKind RestrictKind;
+ ASTNodeKind RestrictKind;
IntrusiveRefCntPtr<DynMatcherInterface> Implementation;
};
-/// Wrapper base class for a wrapping matcher.
-///
-/// This is just a container for a DynTypedMatcher that can be used as a base
-/// class for another matcher.
-template <typename T>
-class WrapperMatcherInterface : public MatcherInterface<T> {
-protected:
- explicit WrapperMatcherInterface(DynTypedMatcher &&InnerMatcher)
- : InnerMatcher(std::move(InnerMatcher)) {}
-
- const DynTypedMatcher InnerMatcher;
-};
-
/// Wrapper of a MatcherInterface<T> *that allows copying.
///
/// A Matcher<Base> can be used anywhere a Matcher<Derived> is
@@ -516,11 +522,11 @@ public:
/// Requires \c T to be derived from \c From.
template <typename From>
Matcher(const Matcher<From> &Other,
- typename std::enable_if<std::is_base_of<From, T>::value &&
- !std::is_same<From, T>::value>::type * = nullptr)
+ std::enable_if_t<std::is_base_of<From, T>::value &&
+ !std::is_same<From, T>::value> * = nullptr)
: Implementation(restrictMatcher(Other.Implementation)) {
assert(Implementation.getSupportedKind().isSame(
- ast_type_traits::ASTNodeKind::getFromNodeKind<T>()));
+ ASTNodeKind::getFromNodeKind<T>()));
}
/// Implicitly converts \c Matcher<Type> to \c Matcher<QualType>.
@@ -528,9 +534,8 @@ public:
/// The resulting matcher is not strict, i.e. ignores qualifiers.
template <typename TypeT>
Matcher(const Matcher<TypeT> &Other,
- typename std::enable_if<
- std::is_same<T, QualType>::value &&
- std::is_same<TypeT, Type>::value>::type* = nullptr)
+ std::enable_if_t<std::is_same<T, QualType>::value &&
+ std::is_same<TypeT, Type>::value> * = nullptr)
: Implementation(new TypeToQualType<TypeT>(Other)) {}
/// Convert \c this into a \c Matcher<T> by applying dyn_cast<> to the
@@ -546,8 +551,7 @@ public:
bool matches(const T &Node,
ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const {
- return Implementation.matches(ast_type_traits::DynTypedNode::create(Node),
- Finder, Builder);
+ return Implementation.matches(DynTypedNode::create(Node), Finder, Builder);
}
/// Returns an ID that uniquely identifies the matcher.
@@ -568,17 +572,19 @@ public:
/// does only matches in the absence of qualifiers, or not, i.e. simply
/// ignores any qualifiers.
template <typename TypeT>
- class TypeToQualType : public WrapperMatcherInterface<QualType> {
+ class TypeToQualType : public MatcherInterface<QualType> {
+ const DynTypedMatcher InnerMatcher;
+
public:
TypeToQualType(const Matcher<TypeT> &InnerMatcher)
- : TypeToQualType::WrapperMatcherInterface(InnerMatcher) {}
+ : InnerMatcher(InnerMatcher) {}
bool matches(const QualType &Node, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const override {
if (Node.isNull())
return false;
- return this->InnerMatcher.matches(
- ast_type_traits::DynTypedNode::create(*Node), Finder, Builder);
+ return this->InnerMatcher.matches(DynTypedNode::create(*Node), Finder,
+ Builder);
}
};
@@ -590,13 +596,13 @@ private:
friend class DynTypedMatcher;
static DynTypedMatcher restrictMatcher(const DynTypedMatcher &Other) {
- return Other.dynCastTo(ast_type_traits::ASTNodeKind::getFromNodeKind<T>());
+ return Other.dynCastTo(ASTNodeKind::getFromNodeKind<T>());
}
explicit Matcher(const DynTypedMatcher &Implementation)
: Implementation(restrictMatcher(Implementation)) {
- assert(this->Implementation.getSupportedKind()
- .isSame(ast_type_traits::ASTNodeKind::getFromNodeKind<T>()));
+ assert(this->Implementation.getSupportedKind().isSame(
+ ASTNodeKind::getFromNodeKind<T>()));
}
DynTypedMatcher Implementation;
@@ -616,9 +622,8 @@ inline Matcher<T> makeMatcher(MatcherInterface<T> *Implementation) {
template <>
inline Matcher<QualType> DynTypedMatcher::convertTo<QualType>() const {
assert(canConvertTo<QualType>());
- const ast_type_traits::ASTNodeKind SourceKind = getSupportedKind();
- if (SourceKind.isSame(
- ast_type_traits::ASTNodeKind::getFromNodeKind<Type>())) {
+ const ASTNodeKind SourceKind = getSupportedKind();
+ if (SourceKind.isSame(ASTNodeKind::getFromNodeKind<Type>())) {
// We support implicit conversion from Matcher<Type> to Matcher<QualType>
return unconditionalConvertTo<Type>();
}
@@ -681,12 +686,12 @@ class HasOverloadedOperatorNameMatcher : public SingleNodeMatcherInterface<T> {
static_assert(std::is_same<T, CXXOperatorCallExpr>::value ||
std::is_base_of<FunctionDecl, T>::value,
"unsupported class for matcher");
- static_assert(std::is_same<ArgT, StringRef>::value,
- "argument type must be StringRef");
+ static_assert(std::is_same<ArgT, std::vector<std::string>>::value,
+ "argument type must be std::vector<std::string>");
public:
- explicit HasOverloadedOperatorNameMatcher(const StringRef Name)
- : SingleNodeMatcherInterface<T>(), Name(Name) {}
+ explicit HasOverloadedOperatorNameMatcher(std::vector<std::string> Names)
+ : SingleNodeMatcherInterface<T>(), Names(std::move(Names)) {}
bool matchesNode(const T &Node) const override {
return matchesSpecialized(Node);
@@ -698,17 +703,18 @@ private:
/// so this function returns true if the call is to an operator of the given
/// name.
bool matchesSpecialized(const CXXOperatorCallExpr &Node) const {
- return getOperatorSpelling(Node.getOperator()) == Name;
+ return llvm::is_contained(Names, getOperatorSpelling(Node.getOperator()));
}
/// Returns true only if CXXMethodDecl represents an overloaded
/// operator and has the given operator name.
bool matchesSpecialized(const FunctionDecl &Node) const {
return Node.isOverloadedOperator() &&
- getOperatorSpelling(Node.getOverloadedOperator()) == Name;
+ llvm::is_contained(
+ Names, getOperatorSpelling(Node.getOverloadedOperator()));
}
- std::string Name;
+ const std::vector<std::string> Names;
};
/// Matches named declarations with a specific name.
@@ -760,13 +766,15 @@ Matcher<ObjCMessageExpr> hasAnySelectorFunc(
/// Type argument DeclMatcherT is required by PolymorphicMatcherWithParam1 but
/// not actually used.
template <typename T, typename DeclMatcherT>
-class HasDeclarationMatcher : public WrapperMatcherInterface<T> {
+class HasDeclarationMatcher : public MatcherInterface<T> {
static_assert(std::is_same<DeclMatcherT, Matcher<Decl>>::value,
"instantiated with wrong types");
+ const DynTypedMatcher InnerMatcher;
+
public:
explicit HasDeclarationMatcher(const Matcher<Decl> &InnerMatcher)
- : HasDeclarationMatcher::WrapperMatcherInterface(InnerMatcher) {}
+ : InnerMatcher(InnerMatcher) {}
bool matches(const T &Node, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const override {
@@ -920,9 +928,8 @@ private:
/// is \c NULL.
bool matchesDecl(const Decl *Node, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const {
- return Node != nullptr &&
- this->InnerMatcher.matches(
- ast_type_traits::DynTypedNode::create(*Node), Finder, Builder);
+ return Node != nullptr && this->InnerMatcher.matches(
+ DynTypedNode::create(*Node), Finder, Builder);
}
};
@@ -1004,8 +1011,8 @@ public:
template <typename T>
bool matchesChildOf(const T &Node, const DynTypedMatcher &Matcher,
- BoundNodesTreeBuilder *Builder,
- ast_type_traits::TraversalKind Traverse, BindKind Bind) {
+ BoundNodesTreeBuilder *Builder, TraversalKind Traverse,
+ BindKind Bind) {
static_assert(std::is_base_of<Decl, T>::value ||
std::is_base_of<Stmt, T>::value ||
std::is_base_of<NestedNameSpecifier, T>::value ||
@@ -1013,8 +1020,8 @@ public:
std::is_base_of<TypeLoc, T>::value ||
std::is_base_of<QualType, T>::value,
"unsupported type for recursive matching");
- return matchesChildOf(ast_type_traits::DynTypedNode::create(Node),
- getASTContext(), Matcher, Builder, Traverse, Bind);
+ return matchesChildOf(DynTypedNode::create(Node), getASTContext(), Matcher,
+ Builder, Traverse, Bind);
}
template <typename T>
@@ -1029,8 +1036,8 @@ public:
std::is_base_of<TypeLoc, T>::value ||
std::is_base_of<QualType, T>::value,
"unsupported type for recursive matching");
- return matchesDescendantOf(ast_type_traits::DynTypedNode::create(Node),
- getASTContext(), Matcher, Builder, Bind);
+ return matchesDescendantOf(DynTypedNode::create(Node), getASTContext(),
+ Matcher, Builder, Bind);
}
// FIXME: Implement support for BindKind.
@@ -1044,27 +1051,24 @@ public:
std::is_base_of<Stmt, T>::value ||
std::is_base_of<TypeLoc, T>::value,
"type not allowed for recursive matching");
- return matchesAncestorOf(ast_type_traits::DynTypedNode::create(Node),
- getASTContext(), Matcher, Builder, MatchMode);
+ return matchesAncestorOf(DynTypedNode::create(Node), getASTContext(),
+ Matcher, Builder, MatchMode);
}
virtual ASTContext &getASTContext() const = 0;
protected:
- virtual bool matchesChildOf(const ast_type_traits::DynTypedNode &Node,
- ASTContext &Ctx, const DynTypedMatcher &Matcher,
+ virtual bool matchesChildOf(const DynTypedNode &Node, ASTContext &Ctx,
+ const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder,
- ast_type_traits::TraversalKind Traverse,
- BindKind Bind) = 0;
+ TraversalKind Traverse, BindKind Bind) = 0;
- virtual bool matchesDescendantOf(const ast_type_traits::DynTypedNode &Node,
- ASTContext &Ctx,
+ virtual bool matchesDescendantOf(const DynTypedNode &Node, ASTContext &Ctx,
const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder,
BindKind Bind) = 0;
- virtual bool matchesAncestorOf(const ast_type_traits::DynTypedNode &Node,
- ASTContext &Ctx,
+ virtual bool matchesAncestorOf(const DynTypedNode &Node, ASTContext &Ctx,
const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder,
AncestorMatchMode MatchMode) = 0;
@@ -1181,43 +1185,40 @@ struct ArgumentAdaptingMatcherFunc {
}
};
-template <typename T>
-class TraversalMatcher : public WrapperMatcherInterface<T> {
- ast_type_traits::TraversalKind Traversal;
+template <typename T> class TraversalMatcher : public MatcherInterface<T> {
+ const DynTypedMatcher InnerMatcher;
+ clang::TraversalKind Traversal;
public:
- explicit TraversalMatcher(ast_type_traits::TraversalKind TK,
- const Matcher<T> &ChildMatcher)
- : TraversalMatcher::WrapperMatcherInterface(ChildMatcher), Traversal(TK) {
- }
+ explicit TraversalMatcher(clang::TraversalKind TK,
+ const Matcher<T> &InnerMatcher)
+ : InnerMatcher(InnerMatcher), Traversal(TK) {}
bool matches(const T &Node, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const override {
- return this->InnerMatcher.matches(
- ast_type_traits::DynTypedNode::create(Node), Finder, Builder);
+ return this->InnerMatcher.matches(DynTypedNode::create(Node), Finder,
+ Builder);
}
- llvm::Optional<ast_type_traits::TraversalKind>
- TraversalKind() const override {
+ llvm::Optional<clang::TraversalKind> TraversalKind() const override {
return Traversal;
}
};
template <typename MatcherType> class TraversalWrapper {
public:
- TraversalWrapper(ast_type_traits::TraversalKind TK,
- const MatcherType &InnerMatcher)
+ TraversalWrapper(TraversalKind TK, const MatcherType &InnerMatcher)
: TK(TK), InnerMatcher(InnerMatcher) {}
template <typename T> operator Matcher<T>() const {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
- ast_type_traits::ASTNodeKind::getFromNodeKind<T>())
+ ASTNodeKind::getFromNodeKind<T>())
.template unconditionalConvertTo<T>();
}
private:
- ast_type_traits::TraversalKind TK;
+ TraversalKind TK;
MatcherType InnerMatcher;
};
@@ -1300,8 +1301,7 @@ public:
template <typename T>
operator Matcher<T>() const {
- return DynTypedMatcher::trueMatcher(
- ast_type_traits::ASTNodeKind::getFromNodeKind<T>())
+ return DynTypedMatcher::trueMatcher(ASTNodeKind::getFromNodeKind<T>())
.template unconditionalConvertTo<T>();
}
};
@@ -1341,15 +1341,17 @@ public:
///
/// ChildT must be an AST base type.
template <typename T, typename ChildT>
-class HasMatcher : public WrapperMatcherInterface<T> {
+class HasMatcher : public MatcherInterface<T> {
+ const DynTypedMatcher InnerMatcher;
+
public:
- explicit HasMatcher(const Matcher<ChildT> &ChildMatcher)
- : HasMatcher::WrapperMatcherInterface(ChildMatcher) {}
+ explicit HasMatcher(const Matcher<ChildT> &InnerMatcher)
+ : InnerMatcher(InnerMatcher) {}
bool matches(const T &Node, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const override {
return Finder->matchesChildOf(Node, this->InnerMatcher, Builder,
- ast_type_traits::TraversalKind::TK_AsIs,
+ TraversalKind::TK_AsIs,
ASTMatchFinder::BK_First);
}
};
@@ -1360,19 +1362,21 @@ public:
/// As opposed to the HasMatcher, the ForEachMatcher will produce a match
/// for each child that matches.
template <typename T, typename ChildT>
-class ForEachMatcher : public WrapperMatcherInterface<T> {
+class ForEachMatcher : public MatcherInterface<T> {
static_assert(IsBaseType<ChildT>::value,
"for each only accepts base type matcher");
- public:
- explicit ForEachMatcher(const Matcher<ChildT> &ChildMatcher)
- : ForEachMatcher::WrapperMatcherInterface(ChildMatcher) {}
+ const DynTypedMatcher InnerMatcher;
- bool matches(const T& Node, ASTMatchFinder* Finder,
- BoundNodesTreeBuilder* Builder) const override {
+public:
+ explicit ForEachMatcher(const Matcher<ChildT> &InnerMatcher)
+ : InnerMatcher(InnerMatcher) {}
+
+ bool matches(const T &Node, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const override {
return Finder->matchesChildOf(
Node, this->InnerMatcher, Builder,
- ast_type_traits::TraversalKind::TK_IgnoreImplicitCastsAndParentheses,
+ TraversalKind::TK_IgnoreImplicitCastsAndParentheses,
ASTMatchFinder::BK_All);
}
};
@@ -1393,7 +1397,7 @@ public:
template <typename T> operator Matcher<T>() const {
return DynTypedMatcher::constructVariadic(
- Op, ast_type_traits::ASTNodeKind::getFromNodeKind<T>(),
+ Op, ASTNodeKind::getFromNodeKind<T>(),
getMatchers<T>(std::index_sequence_for<Ps...>()))
.template unconditionalConvertTo<T>();
}
@@ -1449,10 +1453,9 @@ BindableMatcher<T> makeAllOfComposite(
std::vector<DynTypedMatcher> DynMatchers(PI(InnerMatchers.begin()),
PI(InnerMatchers.end()));
return BindableMatcher<T>(
- DynTypedMatcher::constructVariadic(
- DynTypedMatcher::VO_AllOf,
- ast_type_traits::ASTNodeKind::getFromNodeKind<T>(),
- std::move(DynMatchers))
+ DynTypedMatcher::constructVariadic(DynTypedMatcher::VO_AllOf,
+ ASTNodeKind::getFromNodeKind<T>(),
+ std::move(DynMatchers))
.template unconditionalConvertTo<T>());
}
@@ -1474,17 +1477,19 @@ BindableMatcher<T> makeDynCastAllOfComposite(
///
/// DescendantT must be an AST base type.
template <typename T, typename DescendantT>
-class HasDescendantMatcher : public WrapperMatcherInterface<T> {
+class HasDescendantMatcher : public MatcherInterface<T> {
static_assert(IsBaseType<DescendantT>::value,
"has descendant only accepts base type matcher");
+ const DynTypedMatcher DescendantMatcher;
+
public:
explicit HasDescendantMatcher(const Matcher<DescendantT> &DescendantMatcher)
- : HasDescendantMatcher::WrapperMatcherInterface(DescendantMatcher) {}
+ : DescendantMatcher(DescendantMatcher) {}
bool matches(const T &Node, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const override {
- return Finder->matchesDescendantOf(Node, this->InnerMatcher, Builder,
+ return Finder->matchesDescendantOf(Node, this->DescendantMatcher, Builder,
ASTMatchFinder::BK_First);
}
};
@@ -1494,17 +1499,19 @@ public:
///
/// \c ParentT must be an AST base type.
template <typename T, typename ParentT>
-class HasParentMatcher : public WrapperMatcherInterface<T> {
+class HasParentMatcher : public MatcherInterface<T> {
static_assert(IsBaseType<ParentT>::value,
"has parent only accepts base type matcher");
+ const DynTypedMatcher ParentMatcher;
+
public:
explicit HasParentMatcher(const Matcher<ParentT> &ParentMatcher)
- : HasParentMatcher::WrapperMatcherInterface(ParentMatcher) {}
+ : ParentMatcher(ParentMatcher) {}
bool matches(const T &Node, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const override {
- return Finder->matchesAncestorOf(Node, this->InnerMatcher, Builder,
+ return Finder->matchesAncestorOf(Node, this->ParentMatcher, Builder,
ASTMatchFinder::AMM_ParentOnly);
}
};
@@ -1514,17 +1521,19 @@ public:
///
/// \c AncestorT must be an AST base type.
template <typename T, typename AncestorT>
-class HasAncestorMatcher : public WrapperMatcherInterface<T> {
+class HasAncestorMatcher : public MatcherInterface<T> {
static_assert(IsBaseType<AncestorT>::value,
"has ancestor only accepts base type matcher");
+ const DynTypedMatcher AncestorMatcher;
+
public:
explicit HasAncestorMatcher(const Matcher<AncestorT> &AncestorMatcher)
- : HasAncestorMatcher::WrapperMatcherInterface(AncestorMatcher) {}
+ : AncestorMatcher(AncestorMatcher) {}
bool matches(const T &Node, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const override {
- return Finder->matchesAncestorOf(Node, this->InnerMatcher, Builder,
+ return Finder->matchesAncestorOf(Node, this->AncestorMatcher, Builder,
ASTMatchFinder::AMM_All);
}
};
@@ -1536,18 +1545,20 @@ public:
/// As opposed to HasDescendantMatcher, ForEachDescendantMatcher will match
/// for each descendant node that matches instead of only for the first.
template <typename T, typename DescendantT>
-class ForEachDescendantMatcher : public WrapperMatcherInterface<T> {
+class ForEachDescendantMatcher : public MatcherInterface<T> {
static_assert(IsBaseType<DescendantT>::value,
"for each descendant only accepts base type matcher");
+ const DynTypedMatcher DescendantMatcher;
+
public:
explicit ForEachDescendantMatcher(
const Matcher<DescendantT> &DescendantMatcher)
- : ForEachDescendantMatcher::WrapperMatcherInterface(DescendantMatcher) {}
+ : DescendantMatcher(DescendantMatcher) {}
bool matches(const T &Node, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const override {
- return Finder->matchesDescendantOf(Node, this->InnerMatcher, Builder,
+ return Finder->matchesDescendantOf(Node, this->DescendantMatcher, Builder,
ASTMatchFinder::BK_All);
}
};
@@ -1640,10 +1651,12 @@ public:
/// Matches nodes of type \c TLoc for which the inner
/// \c Matcher<T> matches.
template <typename TLoc, typename T>
-class LocMatcher : public WrapperMatcherInterface<TLoc> {
+class LocMatcher : public MatcherInterface<TLoc> {
+ const DynTypedMatcher InnerMatcher;
+
public:
explicit LocMatcher(const Matcher<T> &InnerMatcher)
- : LocMatcher::WrapperMatcherInterface(InnerMatcher) {}
+ : InnerMatcher(InnerMatcher) {}
bool matches(const TLoc &Node, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const override {
@@ -1653,9 +1666,8 @@ public:
}
private:
- static ast_type_traits::DynTypedNode
- extract(const NestedNameSpecifierLoc &Loc) {
- return ast_type_traits::DynTypedNode::create(*Loc.getNestedNameSpecifier());
+ static DynTypedNode extract(const NestedNameSpecifierLoc &Loc) {
+ return DynTypedNode::create(*Loc.getNestedNameSpecifier());
}
};
@@ -1663,38 +1675,40 @@ private:
/// \c QualType.
///
/// Used to implement the \c loc() matcher.
-class TypeLocTypeMatcher : public WrapperMatcherInterface<TypeLoc> {
+class TypeLocTypeMatcher : public MatcherInterface<TypeLoc> {
+ const DynTypedMatcher InnerMatcher;
+
public:
explicit TypeLocTypeMatcher(const Matcher<QualType> &InnerMatcher)
- : TypeLocTypeMatcher::WrapperMatcherInterface(InnerMatcher) {}
+ : InnerMatcher(InnerMatcher) {}
bool matches(const TypeLoc &Node, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const override {
if (!Node)
return false;
- return this->InnerMatcher.matches(
- ast_type_traits::DynTypedNode::create(Node.getType()), Finder, Builder);
+ return this->InnerMatcher.matches(DynTypedNode::create(Node.getType()),
+ Finder, Builder);
}
};
/// Matches nodes of type \c T for which the inner matcher matches on a
/// another node of type \c T that can be reached using a given traverse
/// function.
-template <typename T>
-class TypeTraverseMatcher : public WrapperMatcherInterface<T> {
+template <typename T> class TypeTraverseMatcher : public MatcherInterface<T> {
+ const DynTypedMatcher InnerMatcher;
+
public:
explicit TypeTraverseMatcher(const Matcher<QualType> &InnerMatcher,
QualType (T::*TraverseFunction)() const)
- : TypeTraverseMatcher::WrapperMatcherInterface(InnerMatcher),
- TraverseFunction(TraverseFunction) {}
+ : InnerMatcher(InnerMatcher), TraverseFunction(TraverseFunction) {}
bool matches(const T &Node, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const override {
QualType NextNode = (Node.*TraverseFunction)();
if (NextNode.isNull())
return false;
- return this->InnerMatcher.matches(
- ast_type_traits::DynTypedNode::create(NextNode), Finder, Builder);
+ return this->InnerMatcher.matches(DynTypedNode::create(NextNode), Finder,
+ Builder);
}
private:
@@ -1705,20 +1719,21 @@ private:
/// matcher matches on a another node of type \c T that can be reached using a
/// given traverse function.
template <typename T>
-class TypeLocTraverseMatcher : public WrapperMatcherInterface<T> {
+class TypeLocTraverseMatcher : public MatcherInterface<T> {
+ const DynTypedMatcher InnerMatcher;
+
public:
explicit TypeLocTraverseMatcher(const Matcher<TypeLoc> &InnerMatcher,
TypeLoc (T::*TraverseFunction)() const)
- : TypeLocTraverseMatcher::WrapperMatcherInterface(InnerMatcher),
- TraverseFunction(TraverseFunction) {}
+ : InnerMatcher(InnerMatcher), TraverseFunction(TraverseFunction) {}
bool matches(const T &Node, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const override {
TypeLoc NextNode = (Node.*TraverseFunction)();
if (!NextNode)
return false;
- return this->InnerMatcher.matches(
- ast_type_traits::DynTypedNode::create(NextNode), Finder, Builder);
+ return this->InnerMatcher.matches(DynTypedNode::create(NextNode), Finder,
+ Builder);
}
private:
@@ -1818,7 +1833,7 @@ struct NotEqualsBoundNodePredicate {
}
std::string ID;
- ast_type_traits::DynTypedNode Node;
+ DynTypedNode Node;
};
template <typename Ty>
@@ -1872,6 +1887,72 @@ CompoundStmtMatcher<StmtExpr>::get(const StmtExpr &Node) {
return Node.getSubStmt();
}
+/// If \p Loc is (transitively) expanded from macro \p MacroName, returns the
+/// location (in the chain of expansions) at which \p MacroName was
+/// expanded. Since the macro may have been expanded inside a series of
+/// expansions, that location may itself be a MacroID.
+llvm::Optional<SourceLocation>
+getExpansionLocOfMacro(StringRef MacroName, SourceLocation Loc,
+ const ASTContext &Context);
+
+/// Matches overloaded operators with a specific name.
+///
+/// The type argument ArgT is not used by this matcher but is used by
+/// PolymorphicMatcherWithParam1 and should be std::vector<std::string>>.
+template <typename T, typename ArgT = std::vector<std::string>>
+class HasAnyOperatorNameMatcher : public SingleNodeMatcherInterface<T> {
+ static_assert(std::is_same<T, BinaryOperator>::value ||
+ std::is_same<T, UnaryOperator>::value,
+ "Matcher only supports `BinaryOperator` and `UnaryOperator`");
+ static_assert(std::is_same<ArgT, std::vector<std::string>>::value,
+ "Matcher ArgT must be std::vector<std::string>");
+
+public:
+ explicit HasAnyOperatorNameMatcher(std::vector<std::string> Names)
+ : SingleNodeMatcherInterface<T>(), Names(std::move(Names)) {}
+
+ bool matchesNode(const T &Node) const override {
+ StringRef OpName = getOpName(Node);
+ return llvm::any_of(
+ Names, [&](const std::string &Name) { return Name == OpName; });
+ }
+
+private:
+ static StringRef getOpName(const UnaryOperator &Node) {
+ return Node.getOpcodeStr(Node.getOpcode());
+ }
+ static StringRef getOpName(const BinaryOperator &Node) {
+ return Node.getOpcodeStr();
+ }
+
+ const std::vector<std::string> Names;
+};
+
+using HasOpNameMatcher =
+ PolymorphicMatcherWithParam1<HasAnyOperatorNameMatcher,
+ std::vector<std::string>,
+ void(TypeList<BinaryOperator, UnaryOperator>)>;
+
+HasOpNameMatcher hasAnyOperatorNameFunc(ArrayRef<const StringRef *> NameRefs);
+
+using HasOverloadOpNameMatcher = PolymorphicMatcherWithParam1<
+ HasOverloadedOperatorNameMatcher, std::vector<std::string>,
+ void(TypeList<CXXOperatorCallExpr, FunctionDecl>)>;
+
+HasOverloadOpNameMatcher
+hasAnyOverloadedOperatorNameFunc(ArrayRef<const StringRef *> NameRefs);
+
+/// Returns true if \p Node has a base specifier matching \p BaseSpec.
+///
+/// A class is not considered to be derived from itself.
+bool matchesAnyBase(const CXXRecordDecl &Node,
+ const Matcher<CXXBaseSpecifier> &BaseSpecMatcher,
+ ASTMatchFinder *Finder, BoundNodesTreeBuilder *Builder);
+
+std::shared_ptr<llvm::Regex> createAndVerifyRegex(StringRef Regex,
+ llvm::Regex::RegexFlags Flags,
+ StringRef MatcherID);
+
} // namespace internal
} // namespace ast_matchers
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchersMacros.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchersMacros.h
index 1d96ba6231cf..45e8b1a88b81 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchersMacros.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchersMacros.h
@@ -134,9 +134,8 @@
class matcher_##DefineMatcher##OverloadId##Matcher \
: public ::clang::ast_matchers::internal::MatcherInterface<Type> { \
public: \
- explicit matcher_##DefineMatcher##OverloadId##Matcher( \
- ParamType const &A##Param) \
- : Param(A##Param) {} \
+ explicit matcher_##DefineMatcher##OverloadId##Matcher(ParamType A##Param) \
+ : Param(std::move(A##Param)) {} \
bool matches(const Type &Node, \
::clang::ast_matchers::internal::ASTMatchFinder *Finder, \
::clang::ast_matchers::internal::BoundNodesTreeBuilder \
@@ -147,12 +146,13 @@
}; \
} \
inline ::clang::ast_matchers::internal::Matcher<Type> DefineMatcher( \
- ParamType const &Param) { \
+ ParamType Param) { \
return ::clang::ast_matchers::internal::makeMatcher( \
- new internal::matcher_##DefineMatcher##OverloadId##Matcher(Param)); \
+ new internal::matcher_##DefineMatcher##OverloadId##Matcher( \
+ std::move(Param))); \
} \
- typedef ::clang::ast_matchers::internal::Matcher<Type>( \
- &DefineMatcher##_Type##OverloadId)(ParamType const &Param); \
+ typedef ::clang::ast_matchers::internal::Matcher<Type> ( \
+ &DefineMatcher##_Type##OverloadId)(ParamType Param); \
inline bool internal::matcher_##DefineMatcher##OverloadId##Matcher::matches( \
const Type &Node, \
::clang::ast_matchers::internal::ASTMatchFinder *Finder, \
@@ -183,9 +183,9 @@
class matcher_##DefineMatcher##OverloadId##Matcher \
: public ::clang::ast_matchers::internal::MatcherInterface<Type> { \
public: \
- matcher_##DefineMatcher##OverloadId##Matcher(ParamType1 const &A##Param1, \
- ParamType2 const &A##Param2) \
- : Param1(A##Param1), Param2(A##Param2) {} \
+ matcher_##DefineMatcher##OverloadId##Matcher(ParamType1 A##Param1, \
+ ParamType2 A##Param2) \
+ : Param1(std::move(A##Param1)), Param2(std::move(A##Param2)) {} \
bool matches(const Type &Node, \
::clang::ast_matchers::internal::ASTMatchFinder *Finder, \
::clang::ast_matchers::internal::BoundNodesTreeBuilder \
@@ -197,14 +197,14 @@
}; \
} \
inline ::clang::ast_matchers::internal::Matcher<Type> DefineMatcher( \
- ParamType1 const &Param1, ParamType2 const &Param2) { \
+ ParamType1 Param1, ParamType2 Param2) { \
return ::clang::ast_matchers::internal::makeMatcher( \
- new internal::matcher_##DefineMatcher##OverloadId##Matcher(Param1, \
- Param2)); \
+ new internal::matcher_##DefineMatcher##OverloadId##Matcher( \
+ std::move(Param1), std::move(Param2))); \
} \
- typedef ::clang::ast_matchers::internal::Matcher<Type>( \
- &DefineMatcher##_Type##OverloadId)(ParamType1 const &Param1, \
- ParamType2 const &Param2); \
+ typedef ::clang::ast_matchers::internal::Matcher<Type> ( \
+ &DefineMatcher##_Type##OverloadId)(ParamType1 Param1, \
+ ParamType2 Param2); \
inline bool internal::matcher_##DefineMatcher##OverloadId##Matcher::matches( \
const Type &Node, \
::clang::ast_matchers::internal::ASTMatchFinder *Finder, \
@@ -272,9 +272,8 @@
class matcher_##DefineMatcher##OverloadId##Matcher \
: public ::clang::ast_matchers::internal::MatcherInterface<NodeType> { \
public: \
- explicit matcher_##DefineMatcher##OverloadId##Matcher( \
- ParamType const &A##Param) \
- : Param(A##Param) {} \
+ explicit matcher_##DefineMatcher##OverloadId##Matcher(ParamType A##Param) \
+ : Param(std::move(A##Param)) {} \
bool matches(const NodeType &Node, \
::clang::ast_matchers::internal::ASTMatchFinder *Finder, \
::clang::ast_matchers::internal::BoundNodesTreeBuilder \
@@ -287,15 +286,14 @@
inline ::clang::ast_matchers::internal::PolymorphicMatcherWithParam1< \
internal::matcher_##DefineMatcher##OverloadId##Matcher, ParamType, \
ReturnTypesF> \
- DefineMatcher(ParamType const &Param) { \
+ DefineMatcher(ParamType Param) { \
return ::clang::ast_matchers::internal::PolymorphicMatcherWithParam1< \
internal::matcher_##DefineMatcher##OverloadId##Matcher, ParamType, \
- ReturnTypesF>(Param); \
+ ReturnTypesF>(std::move(Param)); \
} \
typedef ::clang::ast_matchers::internal::PolymorphicMatcherWithParam1< \
internal::matcher_##DefineMatcher##OverloadId##Matcher, ParamType, \
- ReturnTypesF>(&DefineMatcher##_Type##OverloadId)( \
- ParamType const &Param); \
+ ReturnTypesF> (&DefineMatcher##_Type##OverloadId)(ParamType Param); \
template <typename NodeType, typename ParamT> \
bool internal:: \
matcher_##DefineMatcher##OverloadId##Matcher<NodeType, ParamT>::matches( \
@@ -325,9 +323,9 @@
class matcher_##DefineMatcher##OverloadId##Matcher \
: public ::clang::ast_matchers::internal::MatcherInterface<NodeType> { \
public: \
- matcher_##DefineMatcher##OverloadId##Matcher(ParamType1 const &A##Param1, \
- ParamType2 const &A##Param2) \
- : Param1(A##Param1), Param2(A##Param2) {} \
+ matcher_##DefineMatcher##OverloadId##Matcher(ParamType1 A##Param1, \
+ ParamType2 A##Param2) \
+ : Param1(std::move(A##Param1)), Param2(std::move(A##Param2)) {} \
bool matches(const NodeType &Node, \
::clang::ast_matchers::internal::ASTMatchFinder *Finder, \
::clang::ast_matchers::internal::BoundNodesTreeBuilder \
@@ -341,15 +339,15 @@
inline ::clang::ast_matchers::internal::PolymorphicMatcherWithParam2< \
internal::matcher_##DefineMatcher##OverloadId##Matcher, ParamType1, \
ParamType2, ReturnTypesF> \
- DefineMatcher(ParamType1 const &Param1, ParamType2 const &Param2) { \
+ DefineMatcher(ParamType1 Param1, ParamType2 Param2) { \
return ::clang::ast_matchers::internal::PolymorphicMatcherWithParam2< \
internal::matcher_##DefineMatcher##OverloadId##Matcher, ParamType1, \
- ParamType2, ReturnTypesF>(Param1, Param2); \
+ ParamType2, ReturnTypesF>(std::move(Param1), std::move(Param2)); \
} \
typedef ::clang::ast_matchers::internal::PolymorphicMatcherWithParam2< \
internal::matcher_##DefineMatcher##OverloadId##Matcher, ParamType1, \
- ParamType2, ReturnTypesF>(&DefineMatcher##_Type##OverloadId)( \
- ParamType1 const &Param1, ParamType2 const &Param2); \
+ ParamType2, ReturnTypesF> (&DefineMatcher##_Type##OverloadId)( \
+ ParamType1 Param1, ParamType2 Param2); \
template <typename NodeType, typename ParamT1, typename ParamT2> \
bool internal::matcher_##DefineMatcher##OverloadId##Matcher< \
NodeType, ParamT1, ParamT2>:: \
@@ -440,4 +438,122 @@
ReturnTypesF>::Func MatcherName##Loc; \
AST_TYPE_TRAVERSE_MATCHER(MatcherName, FunctionName##Type, ReturnTypesF)
+/// AST_MATCHER_REGEX(Type, DefineMatcher, Param) { ... }
+/// defines a function named DefineMatcher() that takes a regular expression
+/// string paramater and an optional RegexFlags parameter and returns a
+/// Matcher<Type> object.
+///
+/// The code between the curly braces has access to the following variables:
+///
+/// Node: the AST node being matched; its type is Type.
+/// Param: a pointer to an \ref llvm::Regex object
+/// Finder: an ASTMatchFinder*.
+/// Builder: a BoundNodesTreeBuilder*.
+///
+/// The code should return true if 'Node' matches.
+#define AST_MATCHER_REGEX(Type, DefineMatcher, Param) \
+ AST_MATCHER_REGEX_OVERLOAD(Type, DefineMatcher, Param, 0)
+
+#define AST_MATCHER_REGEX_OVERLOAD(Type, DefineMatcher, Param, OverloadId) \
+ namespace internal { \
+ class matcher_##DefineMatcher##OverloadId##Matcher \
+ : public ::clang::ast_matchers::internal::MatcherInterface<Type> { \
+ public: \
+ explicit matcher_##DefineMatcher##OverloadId##Matcher( \
+ std::shared_ptr<llvm::Regex> RE) \
+ : Param(std::move(RE)) {} \
+ bool matches(const Type &Node, \
+ ::clang::ast_matchers::internal::ASTMatchFinder *Finder, \
+ ::clang::ast_matchers::internal::BoundNodesTreeBuilder \
+ *Builder) const override; \
+ \
+ private: \
+ std::shared_ptr<llvm::Regex> const Param; \
+ }; \
+ } \
+ inline ::clang::ast_matchers::internal::Matcher<Type> DefineMatcher( \
+ llvm::StringRef Param, llvm::Regex::RegexFlags RegexFlags) { \
+ return ::clang::ast_matchers::internal::makeMatcher( \
+ new internal::matcher_##DefineMatcher##OverloadId##Matcher( \
+ ::clang::ast_matchers::internal::createAndVerifyRegex( \
+ Param, RegexFlags, #DefineMatcher))); \
+ } \
+ inline ::clang::ast_matchers::internal::Matcher<Type> DefineMatcher( \
+ llvm::StringRef Param) { \
+ return DefineMatcher(Param, llvm::Regex::NoFlags); \
+ } \
+ \
+ typedef ::clang::ast_matchers::internal::Matcher<Type> ( \
+ &DefineMatcher##_Type##OverloadId##Flags)(llvm::StringRef, \
+ llvm::Regex::RegexFlags); \
+ typedef ::clang::ast_matchers::internal::Matcher<Type> ( \
+ &DefineMatcher##_Type##OverloadId)(llvm::StringRef); \
+ inline bool internal::matcher_##DefineMatcher##OverloadId##Matcher::matches( \
+ const Type &Node, \
+ ::clang::ast_matchers::internal::ASTMatchFinder *Finder, \
+ ::clang::ast_matchers::internal::BoundNodesTreeBuilder *Builder) const
+
+/// AST_POLYMORPHIC_MATCHER_REGEX(DefineMatcher, ReturnTypesF, Param) { ... }
+/// defines a function named DefineMatcher() that takes a regular expression
+/// string paramater and an optional RegexFlags parameter that is polymorphic in
+/// the return type.
+///
+/// The variables are the same as for
+/// AST_MATCHER_REGEX, with the addition of NodeType, which specifies the node
+/// type of the matcher Matcher<NodeType> returned by the function matcher().
+#define AST_POLYMORPHIC_MATCHER_REGEX(DefineMatcher, ReturnTypesF, Param) \
+ AST_POLYMORPHIC_MATCHER_REGEX_OVERLOAD(DefineMatcher, ReturnTypesF, Param, 0)
+
+#define AST_POLYMORPHIC_MATCHER_REGEX_OVERLOAD(DefineMatcher, ReturnTypesF, \
+ Param, OverloadId) \
+ namespace internal { \
+ template <typename NodeType, typename ParamT> \
+ class matcher_##DefineMatcher##OverloadId##Matcher \
+ : public ::clang::ast_matchers::internal::MatcherInterface<NodeType> { \
+ public: \
+ explicit matcher_##DefineMatcher##OverloadId##Matcher( \
+ std::shared_ptr<llvm::Regex> RE) \
+ : Param(std::move(RE)) {} \
+ bool matches(const NodeType &Node, \
+ ::clang::ast_matchers::internal::ASTMatchFinder *Finder, \
+ ::clang::ast_matchers::internal::BoundNodesTreeBuilder \
+ *Builder) const override; \
+ \
+ private: \
+ std::shared_ptr<llvm::Regex> const Param; \
+ }; \
+ } \
+ inline ::clang::ast_matchers::internal::PolymorphicMatcherWithParam1< \
+ internal::matcher_##DefineMatcher##OverloadId##Matcher, \
+ std::shared_ptr<llvm::Regex>, ReturnTypesF> \
+ DefineMatcher(llvm::StringRef Param, llvm::Regex::RegexFlags RegexFlags) { \
+ return ::clang::ast_matchers::internal::PolymorphicMatcherWithParam1< \
+ internal::matcher_##DefineMatcher##OverloadId##Matcher, \
+ std::shared_ptr<llvm::Regex>, ReturnTypesF>( \
+ ::clang::ast_matchers::internal::createAndVerifyRegex( \
+ Param, RegexFlags, #DefineMatcher)); \
+ } \
+ inline ::clang::ast_matchers::internal::PolymorphicMatcherWithParam1< \
+ internal::matcher_##DefineMatcher##OverloadId##Matcher, \
+ std::shared_ptr<llvm::Regex>, ReturnTypesF> \
+ DefineMatcher(llvm::StringRef Param) { \
+ return DefineMatcher(Param, llvm::Regex::NoFlags); \
+ } \
+ typedef ::clang::ast_matchers::internal::PolymorphicMatcherWithParam1< \
+ internal::matcher_##DefineMatcher##OverloadId##Matcher, \
+ std::shared_ptr<llvm::Regex>, ReturnTypesF> ( \
+ &DefineMatcher##_Type##OverloadId##Flags)( \
+ llvm::StringRef Param, llvm::Regex::RegexFlags RegexFlags); \
+ typedef ::clang::ast_matchers::internal::PolymorphicMatcherWithParam1< \
+ internal::matcher_##DefineMatcher##OverloadId##Matcher, \
+ std::shared_ptr<llvm::Regex>, ReturnTypesF> ( \
+ &DefineMatcher##_Type##OverloadId)(llvm::StringRef Param); \
+ template <typename NodeType, typename ParamT> \
+ bool internal:: \
+ matcher_##DefineMatcher##OverloadId##Matcher<NodeType, ParamT>::matches( \
+ const NodeType &Node, \
+ ::clang::ast_matchers::internal::ASTMatchFinder *Finder, \
+ ::clang::ast_matchers::internal::BoundNodesTreeBuilder *Builder) \
+ const
+
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERSMACROS_H
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h
index 7dd304797c4f..f095dcdd60b0 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h
@@ -65,6 +65,7 @@ public:
ET_RegistryNotBindable = 4,
ET_RegistryAmbiguousOverload = 5,
ET_RegistryValueNotFound = 6,
+ ET_RegistryUnknownEnumWithReplace = 7,
ET_ParserStringError = 100,
ET_ParserNoOpenParen = 101,
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h
index 511472a4157c..e47b42a4f38c 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h
@@ -1,4 +1,5 @@
-//===--- VariantValue.h - Polymorphic value type -*- C++ -*-===/
+//===--- VariantValue.h - Polymorphic value type ----------------*- C++ -*-===//
+//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
@@ -43,11 +44,10 @@ class ArgKind {
ArgKind(Kind K) : K(K) { assert(K != AK_Matcher); }
/// Constructor for matcher types.
- ArgKind(ast_type_traits::ASTNodeKind MatcherKind)
- : K(AK_Matcher), MatcherKind(MatcherKind) {}
+ ArgKind(ASTNodeKind MatcherKind) : K(AK_Matcher), MatcherKind(MatcherKind) {}
Kind getArgKind() const { return K; }
- ast_type_traits::ASTNodeKind getMatcherKind() const {
+ ASTNodeKind getMatcherKind() const {
assert(K == AK_Matcher);
return MatcherKind;
}
@@ -71,7 +71,7 @@ class ArgKind {
private:
Kind K;
- ast_type_traits::ASTNodeKind MatcherKind;
+ ASTNodeKind MatcherKind;
};
using ast_matchers::internal::DynTypedMatcher;
@@ -93,7 +93,7 @@ class VariantMatcher {
/// Methods that depend on T from hasTypedMatcher/getTypedMatcher.
class MatcherOps {
public:
- MatcherOps(ast_type_traits::ASTNodeKind NodeKind) : NodeKind(NodeKind) {}
+ MatcherOps(ASTNodeKind NodeKind) : NodeKind(NodeKind) {}
bool canConstructFrom(const DynTypedMatcher &Matcher,
bool &IsExactMatch) const;
@@ -114,7 +114,7 @@ class VariantMatcher {
~MatcherOps() = default;
private:
- ast_type_traits::ASTNodeKind NodeKind;
+ ASTNodeKind NodeKind;
};
/// Payload interface to be specialized by each matcher type.
@@ -127,7 +127,7 @@ class VariantMatcher {
virtual std::string getTypeAsString() const = 0;
virtual llvm::Optional<DynTypedMatcher>
getTypedMatcher(const MatcherOps &Ops) const = 0;
- virtual bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind,
+ virtual bool isConvertibleTo(ASTNodeKind Kind,
unsigned *Specificity) const = 0;
};
@@ -184,8 +184,7 @@ public:
///
/// \param Specificity value corresponding to the "specificity" of the
/// conversion.
- bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind,
- unsigned *Specificity) const {
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity) const {
if (Value)
return Value->isConvertibleTo(Kind, Specificity);
return false;
@@ -223,8 +222,7 @@ private:
template <typename T>
struct VariantMatcher::TypedMatcherOps final : VariantMatcher::MatcherOps {
- TypedMatcherOps()
- : MatcherOps(ast_type_traits::ASTNodeKind::getFromNodeKind<T>()) {}
+ TypedMatcherOps() : MatcherOps(ASTNodeKind::getFromNodeKind<T>()) {}
typedef ast_matchers::internal::Matcher<T> MatcherT;
DynTypedMatcher
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/GtestMatchers.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/GtestMatchers.h
new file mode 100644
index 000000000000..4f8addcf744a
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/GtestMatchers.h
@@ -0,0 +1,45 @@
+//===- GtestMatchers.h - AST Matchers for GTest -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements matchers specific to structures in the Googletest
+// (gtest) framework.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ASTMATCHERS_GTESTMATCHERS_H
+#define LLVM_CLANG_ASTMATCHERS_GTESTMATCHERS_H
+
+#include "clang/AST/Stmt.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
+
+namespace clang {
+namespace ast_matchers {
+
+/// Gtest's comparison operations.
+enum class GtestCmp {
+ Eq,
+ Ne,
+ Ge,
+ Gt,
+ Le,
+ Lt,
+};
+
+/// Matcher for gtest's ASSERT_... macros.
+internal::BindableMatcher<Stmt> gtestAssert(GtestCmp Cmp, StatementMatcher Left,
+ StatementMatcher Right);
+
+/// Matcher for gtest's EXPECT_... macros.
+internal::BindableMatcher<Stmt> gtestExpect(GtestCmp Cmp, StatementMatcher Left,
+ StatementMatcher Right);
+
+} // namespace ast_matchers
+} // namespace clang
+
+#endif // LLVM_CLANG_ASTMATCHERS_GTESTMATCHERS_H
+
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Dominators.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Dominators.h
index 061c98137da2..95a661138df4 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Dominators.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Dominators.h
@@ -167,9 +167,7 @@ public:
}
/// Releases the memory held by the dominator tree.
- virtual void releaseMemory() {
- DT.releaseMemory();
- }
+ virtual void releaseMemory() { DT.reset(); }
/// Converts the dominator tree to human readable form.
virtual void print(raw_ostream &OS, const llvm::Module* M= nullptr) const {
@@ -351,7 +349,7 @@ ClangCFGPostDomReverseChildrenGetter::Get(
///
template <> struct GraphTraits<clang::DomTreeNode *> {
using NodeRef = ::clang::DomTreeNode *;
- using ChildIteratorType = ::clang::DomTreeNode::iterator;
+ using ChildIteratorType = ::clang::DomTreeNode::const_iterator;
static NodeRef getEntryNode(NodeRef N) { return N; }
static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/LiveVariables.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/LiveVariables.h
index a46c35ee5b30..2e7dd5d81678 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/LiveVariables.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/LiveVariables.h
@@ -70,8 +70,8 @@ public:
~LiveVariables() override;
/// Compute the liveness information for a given CFG.
- static LiveVariables *computeLiveness(AnalysisDeclContext &analysisContext,
- bool killAtAssign);
+ static std::unique_ptr<LiveVariables>
+ computeLiveness(AnalysisDeclContext &analysisContext, bool killAtAssign);
/// Return true if a variable is live at the end of a
/// specified block.
@@ -97,7 +97,8 @@ public:
void runOnAllBlocks(Observer &obs);
- static LiveVariables *create(AnalysisDeclContext &analysisContext) {
+ static std::unique_ptr<LiveVariables>
+ create(AnalysisDeclContext &analysisContext) {
return computeLiveness(analysisContext, true);
}
@@ -110,7 +111,8 @@ private:
class RelaxedLiveVariables : public LiveVariables {
public:
- static LiveVariables *create(AnalysisDeclContext &analysisContext) {
+ static std::unique_ptr<LiveVariables>
+ create(AnalysisDeclContext &analysisContext) {
return computeLiveness(analysisContext, false);
}
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h
index 08fda0982df4..100029894560 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h
@@ -108,7 +108,8 @@ public:
// Used by AnalyisContext to construct this object.
static const void *getTag();
- static PostOrderCFGView *create(AnalysisDeclContext &analysisContext);
+ static std::unique_ptr<PostOrderCFGView>
+ create(AnalysisDeclContext &analysisContext);
};
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafety.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafety.h
index 18659aa4e5bb..0d3dda1256fb 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafety.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafety.h
@@ -108,8 +108,10 @@ public:
/// \param LockName -- A StringRef name for the lock expression, to be printed
/// in the error message.
/// \param Loc -- The SourceLocation of the Unlock
+ /// \param LocPreviousUnlock -- If valid, the location of a previous Unlock.
virtual void handleUnmatchedUnlock(StringRef Kind, Name LockName,
- SourceLocation Loc) {}
+ SourceLocation Loc,
+ SourceLocation LocPreviousUnlock) {}
/// Warn about an unlock function call that attempts to unlock a lock with
/// the incorrect lock kind. For instance, a shared lock being unlocked
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UninitializedValues.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UninitializedValues.h
index 479be1fec048..a2b37deddcec 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UninitializedValues.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UninitializedValues.h
@@ -110,6 +110,10 @@ public:
virtual void handleUseOfUninitVariable(const VarDecl *vd,
const UninitUse &use) {}
+ /// Called when the uninitialized variable is used as const refernce argument.
+ virtual void handleConstRefUseOfUninitVariable(const VarDecl *vd,
+ const UninitUse &use) {}
+
/// Called when the uninitialized variable analysis detects the
/// idiom 'int x = x'. All other uses of 'x' within the initializer
/// are handled by handleUseOfUninitVariable.
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/AnalysisDeclContext.h b/contrib/llvm-project/clang/include/clang/Analysis/AnalysisDeclContext.h
index 9faa78cde89c..d12582f4f329 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/AnalysisDeclContext.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/AnalysisDeclContext.h
@@ -1,4 +1,4 @@
-// AnalysisDeclContext.h - Analysis context for Path Sens analysis -*- C++ -*-//
+//===- AnalysisDeclContext.h - Context for path sensitivity -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,8 +6,11 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines AnalysisDeclContext, a class that manages the analysis
-// context data for path sensitive analysis.
+/// \file
+/// This file defines AnalysisDeclContext, a class that manages the analysis
+/// context data for context sensitive and path sensitive analysis.
+/// It also defines the helper classes to model entering, leaving or inlining
+/// function calls.
//
//===----------------------------------------------------------------------===//
@@ -64,14 +67,14 @@ public:
// which creates the analysis object given an AnalysisDeclContext.
};
-/// AnalysisDeclContext contains the context data for the function or method
-/// under analysis.
+/// AnalysisDeclContext contains the context data for the function, method
+/// or block under analysis.
class AnalysisDeclContext {
- /// Backpoint to the AnalysisManager object that created this
- /// AnalysisDeclContext. This may be null.
- AnalysisDeclContextManager *Manager;
+ // Backpoint to the AnalysisManager object that created this
+ // AnalysisDeclContext. This may be null.
+ AnalysisDeclContextManager *ADCMgr;
- const Decl * const D;
+ const Decl *const D;
std::unique_ptr<CFG> cfg, completeCFG;
std::unique_ptr<CFGStmtMap> cfgStmtMap;
@@ -86,45 +89,36 @@ class AnalysisDeclContext {
llvm::BumpPtrAllocator A;
- llvm::DenseMap<const BlockDecl *,void *> *ReferencedBlockVars = nullptr;
+ llvm::DenseMap<const BlockDecl *, void *> *ReferencedBlockVars = nullptr;
void *ManagedAnalyses = nullptr;
public:
- AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
- const Decl *D);
+ AnalysisDeclContext(AnalysisDeclContextManager *Mgr, const Decl *D);
- AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
- const Decl *D,
- const CFG::BuildOptions &BuildOptions);
+ AnalysisDeclContext(AnalysisDeclContextManager *Mgr, const Decl *D,
+ const CFG::BuildOptions &BuildOptions);
~AnalysisDeclContext();
ASTContext &getASTContext() const { return D->getASTContext(); }
+
const Decl *getDecl() const { return D; }
- /// Return the AnalysisDeclContextManager (if any) that created
- /// this AnalysisDeclContext.
- AnalysisDeclContextManager *getManager() const {
- return Manager;
- }
+ AnalysisDeclContextManager *getManager() const { return ADCMgr; }
- /// Return the build options used to construct the CFG.
- CFG::BuildOptions &getCFGBuildOptions() {
- return cfgBuildOptions;
- }
+ CFG::BuildOptions &getCFGBuildOptions() { return cfgBuildOptions; }
const CFG::BuildOptions &getCFGBuildOptions() const {
return cfgBuildOptions;
}
- /// getAddEHEdges - Return true iff we are adding exceptional edges from
- /// callExprs. If this is false, then try/catch statements and blocks
- /// reachable from them can appear to be dead in the CFG, analysis passes must
- /// cope with that.
+ /// \returns Whether we are adding exception handling edges from CallExprs.
+ /// If this is false, then try/catch statements and blocks reachable from them
+ /// can appear to be dead in the CFG, analysis passes must cope with that.
bool getAddEHEdges() const { return cfgBuildOptions.AddEHEdges; }
bool getUseUnoptimizedCFG() const {
- return !cfgBuildOptions.PruneTriviallyFalseEdges;
+ return !cfgBuildOptions.PruneTriviallyFalseEdges;
}
bool getAddImplicitDtors() const { return cfgBuildOptions.AddImplicitDtors; }
bool getAddInitializers() const { return cfgBuildOptions.AddInitializers; }
@@ -132,25 +126,25 @@ public:
void registerForcedBlockExpression(const Stmt *stmt);
const CFGBlock *getBlockForRegisteredExpression(const Stmt *stmt);
- /// Get the body of the Declaration.
+ /// \returns The body of the stored Decl \c D.
Stmt *getBody() const;
- /// Get the body of the Declaration.
+ /// \copydoc AnalysisDeclContext::getBody()
/// \param[out] IsAutosynthesized Specifies if the body is auto-generated
/// by the BodyFarm.
Stmt *getBody(bool &IsAutosynthesized) const;
- /// Checks if the body of the Decl is generated by the BodyFarm.
+ /// \returns Whether the body of the Decl \c D is generated by the BodyFarm.
///
- /// Note, the lookup is not free. We are going to call getBody behind
+ /// \note The lookup is not free. We are going to call getBody behind
/// the scenes.
/// \sa getBody
bool isBodyAutosynthesized() const;
- /// Checks if the body of the Decl is generated by the BodyFarm from a
- /// model file.
+ /// \returns Whether the body of the Decl \c D is generated by the BodyFarm
+ /// from a model file.
///
- /// Note, the lookup is not free. We are going to call getBody behind
+ /// \note The lookup is not free. We are going to call getBody behind
/// the scenes.
/// \sa getBody
bool isBodyAutosynthesizedFromModelFile() const;
@@ -161,61 +155,64 @@ public:
CFGReverseBlockReachabilityAnalysis *getCFGReachablityAnalysis();
- /// Return a version of the CFG without any edges pruned.
+ /// \returns A version of the CFG without any edges pruned.
CFG *getUnoptimizedCFG();
void dumpCFG(bool ShowColors);
- /// Returns true if we have built a CFG for this analysis context.
- /// Note that this doesn't correspond to whether or not a valid CFG exists, it
+ /// \returns Whether we have built a CFG for this analysis context.
+ ///
+ /// \note This doesn't correspond to whether or not a valid CFG exists, it
/// corresponds to whether we *attempted* to build one.
bool isCFGBuilt() const { return builtCFG; }
ParentMap &getParentMap();
- using referenced_decls_iterator = const VarDecl * const *;
+ using referenced_decls_iterator = const VarDecl *const *;
llvm::iterator_range<referenced_decls_iterator>
getReferencedBlockVars(const BlockDecl *BD);
- /// Return the ImplicitParamDecl* associated with 'self' if this
- /// AnalysisDeclContext wraps an ObjCMethodDecl. Returns NULL otherwise.
+ /// \returns The ImplicitParamDecl associated with \c self if this
+ /// AnalysisDeclContext wraps an ObjCMethodDecl or nullptr otherwise.
const ImplicitParamDecl *getSelfDecl() const;
- const StackFrameContext *getStackFrame(LocationContext const *Parent,
+ /// \copydoc LocationContextManager::getStackFrame()
+ const StackFrameContext *getStackFrame(LocationContext const *ParentLC,
const Stmt *S, const CFGBlock *Blk,
- unsigned BlockCount, unsigned Idx);
+ unsigned BlockCount, unsigned Index);
+ /// \copydoc LocationContextManager::getBlockInvocationContext()
const BlockInvocationContext *
- getBlockInvocationContext(const LocationContext *parent,
- const BlockDecl *BD,
- const void *ContextData);
-
- /// Return the specified analysis object, lazily running the analysis if
- /// necessary. Return NULL if the analysis could not run.
- template <typename T>
- T *getAnalysis() {
+ getBlockInvocationContext(const LocationContext *ParentLC,
+ const BlockDecl *BD, const void *Data);
+
+ /// \returns The specified analysis object, lazily running the analysis if
+ /// necessary or nullptr if the analysis could not run.
+ template <typename T> T *getAnalysis() {
const void *tag = T::getTag();
- ManagedAnalysis *&data = getAnalysisImpl(tag);
- if (!data) {
+ std::unique_ptr<ManagedAnalysis> &data = getAnalysisImpl(tag);
+ if (!data)
data = T::create(*this);
- }
- return static_cast<T *>(data);
+ return static_cast<T *>(data.get());
}
- /// Returns true if the root namespace of the given declaration is the 'std'
- /// C++ namespace.
+ /// \returns Whether the root namespace of \p D is the \c std C++ namespace.
static bool isInStdNamespace(const Decl *D);
private:
- ManagedAnalysis *&getAnalysisImpl(const void* tag);
+ std::unique_ptr<ManagedAnalysis> &getAnalysisImpl(const void *tag);
LocationContextManager &getLocationContextManager();
};
+/// It wraps the AnalysisDeclContext to represent both the call stack with
+/// the help of StackFrameContext and inside the function calls the
+/// BlockInvocationContext. It is needed for context sensitive analysis to
+/// model entering, leaving or inlining function calls.
class LocationContext : public llvm::FoldingSetNode {
public:
- enum ContextKind { StackFrame, Scope, Block };
+ enum ContextKind { StackFrame, Block };
private:
ContextKind Kind;
@@ -229,8 +226,7 @@ private:
protected:
LocationContext(ContextKind k, AnalysisDeclContext *ctx,
- const LocationContext *parent,
- int64_t ID)
+ const LocationContext *parent, int64_t ID)
: Kind(k), Ctx(ctx), Parent(parent), ID(ID) {}
public:
@@ -238,9 +234,7 @@ public:
ContextKind getKind() const { return Kind; }
- int64_t getID() const {
- return ID;
- }
+ int64_t getID() const { return ID; }
AnalysisDeclContext *getAnalysisDeclContext() const { return Ctx; }
@@ -248,58 +242,61 @@ public:
bool isParentOf(const LocationContext *LC) const;
- const Decl *getDecl() const { return getAnalysisDeclContext()->getDecl(); }
+ const Decl *getDecl() const { return Ctx->getDecl(); }
- CFG *getCFG() const { return getAnalysisDeclContext()->getCFG(); }
+ CFG *getCFG() const { return Ctx->getCFG(); }
- template <typename T>
- T *getAnalysis() const {
- return getAnalysisDeclContext()->getAnalysis<T>();
- }
+ template <typename T> T *getAnalysis() const { return Ctx->getAnalysis<T>(); }
- const ParentMap &getParentMap() const {
- return getAnalysisDeclContext()->getParentMap();
- }
+ const ParentMap &getParentMap() const { return Ctx->getParentMap(); }
- const ImplicitParamDecl *getSelfDecl() const {
- return Ctx->getSelfDecl();
- }
+ /// \copydoc AnalysisDeclContext::getSelfDecl()
+ const ImplicitParamDecl *getSelfDecl() const { return Ctx->getSelfDecl(); }
const StackFrameContext *getStackFrame() const;
- /// Return true if the current LocationContext has no caller context.
+ /// \returns Whether the current LocationContext has no caller context.
virtual bool inTopFrame() const;
virtual void Profile(llvm::FoldingSetNodeID &ID) = 0;
- void dumpStack(
- raw_ostream &Out, const char *NL = "\n",
- std::function<void(const LocationContext *)> printMoreInfoPerContext =
- [](const LocationContext *) {}) const;
+ /// Prints out the call stack.
+ ///
+ /// \param Out The out stream.
+ LLVM_DUMP_METHOD void dumpStack(raw_ostream &Out) const;
+ /// Prints out the call stack in \c json format.
+ ///
+ /// \param Out The out stream.
+ /// \param NL The newline.
+ /// \param Space The space count for indentation.
+ /// \param IsDot Whether the output format is \c dot.
+ /// \param printMoreInfoPerContext
+ /// A callback to print more information for each context, for example:
+ /// \code
+ /// [&](const LocationContext *LC) { LC->dump(); }
+ /// \endcode
void printJson(
raw_ostream &Out, const char *NL = "\n", unsigned int Space = 0,
bool IsDot = false,
std::function<void(const LocationContext *)> printMoreInfoPerContext =
[](const LocationContext *) {}) const;
- void dump() const;
+ LLVM_DUMP_METHOD void dump() const;
-public:
- static void ProfileCommon(llvm::FoldingSetNodeID &ID,
- ContextKind ck,
+ static void ProfileCommon(llvm::FoldingSetNodeID &ID, ContextKind ck,
AnalysisDeclContext *ctx,
- const LocationContext *parent,
- const void *data);
+ const LocationContext *parent, const void *data);
};
+/// It represents a stack frame of the call stack (based on CallEvent).
class StackFrameContext : public LocationContext {
friend class LocationContextManager;
- // The callsite where this stack frame is established.
+ // The call site where this stack frame is established.
const Stmt *CallSite;
- // The parent block of the callsite.
+ // The parent block of the call site.
const CFGBlock *Block;
// The number of times the 'Block' has been visited.
@@ -307,14 +304,14 @@ class StackFrameContext : public LocationContext {
// called multiple times in a loop.
const unsigned BlockCount;
- // The index of the callsite in the CFGBlock.
+ // The index of the call site in the CFGBlock.
const unsigned Index;
- StackFrameContext(AnalysisDeclContext *ctx, const LocationContext *parent,
- const Stmt *s, const CFGBlock *blk, unsigned blockCount,
- unsigned idx, int64_t ID)
- : LocationContext(StackFrame, ctx, parent, ID), CallSite(s), Block(blk),
- BlockCount(blockCount), Index(idx) {}
+ StackFrameContext(AnalysisDeclContext *ADC, const LocationContext *ParentLC,
+ const Stmt *S, const CFGBlock *Block, unsigned BlockCount,
+ unsigned Index, int64_t ID)
+ : LocationContext(StackFrame, ADC, ParentLC, ID), CallSite(S),
+ Block(Block), BlockCount(BlockCount), Index(Index) {}
public:
~StackFrameContext() override = default;
@@ -323,117 +320,100 @@ public:
const CFGBlock *getCallSiteBlock() const { return Block; }
- /// Return true if the current LocationContext has no caller context.
- bool inTopFrame() const override { return getParent() == nullptr; }
+ bool inTopFrame() const override { return getParent() == nullptr; }
unsigned getIndex() const { return Index; }
+ CFGElement getCallSiteCFGElement() const { return (*Block)[Index]; }
+
void Profile(llvm::FoldingSetNodeID &ID) override;
- static void Profile(llvm::FoldingSetNodeID &ID, AnalysisDeclContext *ctx,
- const LocationContext *parent, const Stmt *s,
- const CFGBlock *blk, unsigned blockCount, unsigned idx) {
- ProfileCommon(ID, StackFrame, ctx, parent, s);
- ID.AddPointer(blk);
- ID.AddInteger(blockCount);
- ID.AddInteger(idx);
+ static void Profile(llvm::FoldingSetNodeID &ID, AnalysisDeclContext *ADC,
+ const LocationContext *ParentLC, const Stmt *S,
+ const CFGBlock *Block, unsigned BlockCount,
+ unsigned Index) {
+ ProfileCommon(ID, StackFrame, ADC, ParentLC, S);
+ ID.AddPointer(Block);
+ ID.AddInteger(BlockCount);
+ ID.AddInteger(Index);
}
- static bool classof(const LocationContext *Ctx) {
- return Ctx->getKind() == StackFrame;
- }
-};
-
-class ScopeContext : public LocationContext {
- friend class LocationContextManager;
-
- const Stmt *Enter;
-
- ScopeContext(AnalysisDeclContext *ctx, const LocationContext *parent,
- const Stmt *s, int64_t ID)
- : LocationContext(Scope, ctx, parent, ID), Enter(s) {}
-
-public:
- ~ScopeContext() override = default;
-
- void Profile(llvm::FoldingSetNodeID &ID) override;
-
- static void Profile(llvm::FoldingSetNodeID &ID, AnalysisDeclContext *ctx,
- const LocationContext *parent, const Stmt *s) {
- ProfileCommon(ID, Scope, ctx, parent, s);
- }
-
- static bool classof(const LocationContext *Ctx) {
- return Ctx->getKind() == Scope;
+ static bool classof(const LocationContext *LC) {
+ return LC->getKind() == StackFrame;
}
};
+/// It represents a block invocation (based on BlockCall).
class BlockInvocationContext : public LocationContext {
friend class LocationContextManager;
const BlockDecl *BD;
// FIXME: Come up with a more type-safe way to model context-sensitivity.
- const void *ContextData;
+ const void *Data;
- BlockInvocationContext(AnalysisDeclContext *ctx,
- const LocationContext *parent, const BlockDecl *bd,
- const void *contextData, int64_t ID)
- : LocationContext(Block, ctx, parent, ID), BD(bd),
- ContextData(contextData) {}
+ BlockInvocationContext(AnalysisDeclContext *ADC,
+ const LocationContext *ParentLC, const BlockDecl *BD,
+ const void *Data, int64_t ID)
+ : LocationContext(Block, ADC, ParentLC, ID), BD(BD), Data(Data) {}
public:
~BlockInvocationContext() override = default;
const BlockDecl *getBlockDecl() const { return BD; }
- const void *getContextData() const { return ContextData; }
+ const void *getData() const { return Data; }
void Profile(llvm::FoldingSetNodeID &ID) override;
- static void Profile(llvm::FoldingSetNodeID &ID, AnalysisDeclContext *ctx,
- const LocationContext *parent, const BlockDecl *bd,
- const void *contextData) {
- ProfileCommon(ID, Block, ctx, parent, bd);
- ID.AddPointer(contextData);
+ static void Profile(llvm::FoldingSetNodeID &ID, AnalysisDeclContext *ADC,
+ const LocationContext *ParentLC, const BlockDecl *BD,
+ const void *Data) {
+ ProfileCommon(ID, Block, ADC, ParentLC, BD);
+ ID.AddPointer(Data);
}
- static bool classof(const LocationContext *Ctx) {
- return Ctx->getKind() == Block;
+ static bool classof(const LocationContext *LC) {
+ return LC->getKind() == Block;
}
};
class LocationContextManager {
llvm::FoldingSet<LocationContext> Contexts;
- /// ID used for generating a new location context.
+ // ID used for generating a new location context.
int64_t NewID = 0;
public:
~LocationContextManager();
- const StackFrameContext *getStackFrame(AnalysisDeclContext *ctx,
- const LocationContext *parent,
- const Stmt *s, const CFGBlock *blk,
- unsigned blockCount, unsigned idx);
-
- const ScopeContext *getScope(AnalysisDeclContext *ctx,
- const LocationContext *parent,
- const Stmt *s);
-
+ /// Obtain a context of the call stack using its parent context.
+ ///
+ /// \param ADC The AnalysisDeclContext.
+ /// \param ParentLC The parent context of this newly created context.
+ /// \param S The call.
+ /// \param Block The basic block.
+ /// \param BlockCount The current count of entering into \p Blk.
+ /// \param Index The index of \p Blk.
+ /// \returns The context for \p D with parent context \p ParentLC.
+ const StackFrameContext *getStackFrame(AnalysisDeclContext *ADC,
+ const LocationContext *ParentLC,
+ const Stmt *S, const CFGBlock *Block,
+ unsigned BlockCount, unsigned Index);
+
+ /// Obtain a context of the block invocation using its parent context.
+ ///
+ /// \param ADC The AnalysisDeclContext.
+ /// \param ParentLC The parent context of this newly created context.
+ /// \param BD The BlockDecl.
+ /// \param Data The raw data to store as part of the context.
const BlockInvocationContext *
- getBlockInvocationContext(AnalysisDeclContext *ctx,
- const LocationContext *parent,
- const BlockDecl *BD,
- const void *ContextData);
+ getBlockInvocationContext(AnalysisDeclContext *ADC,
+ const LocationContext *ParentLC,
+ const BlockDecl *BD, const void *Data);
/// Discard all previously created LocationContext objects.
void clear();
-private:
- template <typename LOC, typename DATA>
- const LOC *getLocationContext(AnalysisDeclContext *ctx,
- const LocationContext *parent,
- const DATA *d);
};
class AnalysisDeclContextManager {
@@ -441,36 +421,31 @@ class AnalysisDeclContextManager {
llvm::DenseMap<const Decl *, std::unique_ptr<AnalysisDeclContext>>;
ContextMap Contexts;
- LocationContextManager LocContexts;
+ LocationContextManager LocCtxMgr;
CFG::BuildOptions cfgBuildOptions;
- /// Pointer to an interface that can provide function bodies for
- /// declarations from external source.
+ // Pointer to an interface that can provide function bodies for
+ // declarations from external source.
std::unique_ptr<CodeInjector> Injector;
- /// A factory for creating and caching implementations for common
- /// methods during the analysis.
+ // A factory for creating and caching implementations for common
+ // methods during the analysis.
BodyFarm FunctionBodyFarm;
- /// Flag to indicate whether or not bodies should be synthesized
- /// for well-known functions.
+ // Flag to indicate whether or not bodies should be synthesized
+ // for well-known functions.
bool SynthesizeBodies;
public:
- AnalysisDeclContextManager(ASTContext &ASTCtx, bool useUnoptimizedCFG = false,
- bool addImplicitDtors = false,
- bool addInitializers = false,
- bool addTemporaryDtors = false,
- bool addLifetime = false,
- bool addLoopExit = false,
- bool addScopes = false,
- bool synthesizeBodies = false,
- bool addStaticInitBranches = false,
- bool addCXXNewAllocator = true,
- bool addRichCXXConstructors = true,
- bool markElidedCXXConstructors = true,
- bool addVirtualBaseBranches = true,
- CodeInjector *injector = nullptr);
+ AnalysisDeclContextManager(
+ ASTContext &ASTCtx, bool useUnoptimizedCFG = false,
+ bool addImplicitDtors = false, bool addInitializers = false,
+ bool addTemporaryDtors = false, bool addLifetime = false,
+ bool addLoopExit = false, bool addScopes = false,
+ bool synthesizeBodies = false, bool addStaticInitBranches = false,
+ bool addCXXNewAllocator = true, bool addRichCXXConstructors = true,
+ bool markElidedCXXConstructors = true, bool addVirtualBaseBranches = true,
+ CodeInjector *injector = nullptr);
AnalysisDeclContext *getContext(const Decl *D);
@@ -478,37 +453,27 @@ public:
return !cfgBuildOptions.PruneTriviallyFalseEdges;
}
- CFG::BuildOptions &getCFGBuildOptions() {
- return cfgBuildOptions;
- }
+ CFG::BuildOptions &getCFGBuildOptions() { return cfgBuildOptions; }
- /// Return true if faux bodies should be synthesized for well-known
- /// functions.
+ /// \returns Whether faux bodies should be synthesized for known functions.
bool synthesizeBodies() const { return SynthesizeBodies; }
- const StackFrameContext *getStackFrame(AnalysisDeclContext *Ctx,
- const LocationContext *Parent,
- const Stmt *S, const CFGBlock *Blk,
- unsigned BlockCount, unsigned Idx) {
- return LocContexts.getStackFrame(Ctx, Parent, S, Blk, BlockCount, Idx);
- }
-
- // Get the top level stack frame.
+ /// Obtain the beginning context of the analysis.
+ ///
+ /// \returns The top level stack frame for \p D.
const StackFrameContext *getStackFrame(const Decl *D) {
- return LocContexts.getStackFrame(getContext(D), nullptr, nullptr, nullptr,
- 0, 0);
+ return LocCtxMgr.getStackFrame(getContext(D), nullptr, nullptr, nullptr, 0,
+ 0);
}
- // Get a stack frame with parent.
- StackFrameContext const *getStackFrame(const Decl *D,
+ /// \copydoc LocationContextManager::getStackFrame()
+ const StackFrameContext *getStackFrame(AnalysisDeclContext *ADC,
const LocationContext *Parent,
- const Stmt *S, const CFGBlock *Blk,
- unsigned BlockCount, unsigned Idx) {
- return LocContexts.getStackFrame(getContext(D), Parent, S, Blk, BlockCount,
- Idx);
+ const Stmt *S, const CFGBlock *Block,
+ unsigned BlockCount, unsigned Index) {
+ return LocCtxMgr.getStackFrame(ADC, Parent, S, Block, BlockCount, Index);
}
- /// Get a reference to {@code BodyFarm} instance.
BodyFarm &getBodyFarm();
/// Discard all previously created AnalysisDeclContexts.
@@ -517,9 +482,7 @@ public:
private:
friend class AnalysisDeclContext;
- LocationContextManager &getLocationContextManager() {
- return LocContexts;
- }
+ LocationContextManager &getLocationContextManager() { return LocCtxMgr; }
};
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/AnyCall.h b/contrib/llvm-project/clang/include/clang/Analysis/AnyCall.h
index 97a94d299e64..16371eb1da18 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/AnyCall.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/AnyCall.h
@@ -41,6 +41,9 @@ public:
/// An implicit or explicit C++ constructor call
Constructor,
+ /// A C++ inherited constructor produced by a "using T::T" directive
+ InheritedConstructor,
+
/// A C++ allocation function call (operator `new`), via C++ new-expression
Allocator,
@@ -84,6 +87,9 @@ public:
AnyCall(const CXXConstructExpr *NE)
: E(NE), D(NE->getConstructor()), K(Constructor) {}
+ AnyCall(const CXXInheritedCtorInitExpr *CIE)
+ : E(CIE), D(CIE->getConstructor()), K(InheritedConstructor) {}
+
AnyCall(const CXXDestructorDecl *D) : E(nullptr), D(D), K(Destructor) {}
AnyCall(const CXXConstructorDecl *D) : E(nullptr), D(D), K(Constructor) {}
@@ -114,6 +120,8 @@ public:
return AnyCall(CXDE);
} else if (const auto *CXCE = dyn_cast<CXXConstructExpr>(E)) {
return AnyCall(CXCE);
+ } else if (const auto *CXCIE = dyn_cast<CXXInheritedCtorInitExpr>(E)) {
+ return AnyCall(CXCIE);
} else {
return None;
}
@@ -169,6 +177,7 @@ public:
return cast<CallExpr>(E)->getCallReturnType(Ctx);
case Destructor:
case Constructor:
+ case InheritedConstructor:
case Allocator:
case Deallocator:
return cast<FunctionDecl>(D)->getReturnType();
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/CFG.h b/contrib/llvm-project/clang/include/clang/Analysis/CFG.h
index 93de3178e661..43fb523c863a 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/CFG.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/CFG.h
@@ -624,10 +624,10 @@ class CFGBlock {
template <bool IsOtherConst> friend class ElementRefImpl;
using CFGBlockPtr =
- typename std::conditional<IsConst, const CFGBlock *, CFGBlock *>::type;
+ std::conditional_t<IsConst, const CFGBlock *, CFGBlock *>;
- using CFGElementPtr = typename std::conditional<IsConst, const CFGElement *,
- CFGElement *>::type;
+ using CFGElementPtr =
+ std::conditional_t<IsConst, const CFGElement *, CFGElement *>;
protected:
CFGBlockPtr Parent;
@@ -675,15 +675,14 @@ class CFGBlock {
friend class ElementRefIterator;
using CFGBlockRef =
- typename std::conditional<IsConst, const CFGBlock *, CFGBlock *>::type;
+ std::conditional_t<IsConst, const CFGBlock *, CFGBlock *>;
- using UnderlayingIteratorTy = typename std::conditional<
+ using UnderlayingIteratorTy = std::conditional_t<
IsConst,
- typename std::conditional<IsReverse,
- ElementList::const_reverse_iterator,
- ElementList::const_iterator>::type,
- typename std::conditional<IsReverse, ElementList::reverse_iterator,
- ElementList::iterator>::type>::type;
+ std::conditional_t<IsReverse, ElementList::const_reverse_iterator,
+ ElementList::const_iterator>,
+ std::conditional_t<IsReverse, ElementList::reverse_iterator,
+ ElementList::iterator>>;
using IteratorTraits = typename std::iterator_traits<UnderlayingIteratorTy>;
using ElementRef = typename CFGBlock::ElementRefImpl<IsConst>;
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/CallGraph.h b/contrib/llvm-project/clang/include/clang/Analysis/CallGraph.h
index dae2b58ffc10..6f7159330f5d 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/CallGraph.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/CallGraph.h
@@ -24,6 +24,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
#include <memory>
namespace clang {
@@ -65,6 +66,11 @@ public:
/// Determine if a declaration should be included in the graph.
static bool includeInGraph(const Decl *D);
+ /// Determine if a declaration should be included in the graph for the
+ /// purposes of being a callee. This is similar to includeInGraph except
+ /// it permits declarations, not just definitions.
+ static bool includeCalleeInGraph(const Decl *D);
+
/// Lookup the node for the given declaration.
CallGraphNode *getNode(const Decl *) const;
@@ -136,14 +142,23 @@ public:
private:
/// Add the given declaration to the call graph.
void addNodeForDecl(Decl *D, bool IsGlobal);
-
- /// Allocate a new node in the graph.
- CallGraphNode *allocateNewNode(Decl *);
};
class CallGraphNode {
public:
- using CallRecord = CallGraphNode *;
+ struct CallRecord {
+ CallGraphNode *Callee;
+ Expr *CallExpr;
+
+ CallRecord() = default;
+
+ CallRecord(CallGraphNode *Callee_, Expr *CallExpr_)
+ : Callee(Callee_), CallExpr(CallExpr_) {}
+
+ // The call destination is the only important data here,
+ // allow to transparently unwrap into it.
+ operator CallGraphNode *() const { return Callee; }
+ };
private:
/// The function/method declaration.
@@ -164,24 +179,67 @@ public:
const_iterator begin() const { return CalledFunctions.begin(); }
const_iterator end() const { return CalledFunctions.end(); }
+ /// Iterator access to callees/children of the node.
+ llvm::iterator_range<iterator> callees() {
+ return llvm::make_range(begin(), end());
+ }
+ llvm::iterator_range<const_iterator> callees() const {
+ return llvm::make_range(begin(), end());
+ }
+
bool empty() const { return CalledFunctions.empty(); }
unsigned size() const { return CalledFunctions.size(); }
- void addCallee(CallGraphNode *N) {
- CalledFunctions.push_back(N);
- }
+ void addCallee(CallRecord Call) { CalledFunctions.push_back(Call); }
Decl *getDecl() const { return FD; }
+ FunctionDecl *getDefinition() const {
+ return getDecl()->getAsFunction()->getDefinition();
+ }
+
void print(raw_ostream &os) const;
void dump() const;
};
+// NOTE: we are comparing based on the callee only. So different call records
+// (with different call expressions) to the same callee will compare equal!
+inline bool operator==(const CallGraphNode::CallRecord &LHS,
+ const CallGraphNode::CallRecord &RHS) {
+ return LHS.Callee == RHS.Callee;
+}
+
} // namespace clang
-// Graph traits for iteration, viewing.
namespace llvm {
+// Specialize DenseMapInfo for clang::CallGraphNode::CallRecord.
+template <> struct DenseMapInfo<clang::CallGraphNode::CallRecord> {
+ static inline clang::CallGraphNode::CallRecord getEmptyKey() {
+ return clang::CallGraphNode::CallRecord(
+ DenseMapInfo<clang::CallGraphNode *>::getEmptyKey(),
+ DenseMapInfo<clang::Expr *>::getEmptyKey());
+ }
+
+ static inline clang::CallGraphNode::CallRecord getTombstoneKey() {
+ return clang::CallGraphNode::CallRecord(
+ DenseMapInfo<clang::CallGraphNode *>::getTombstoneKey(),
+ DenseMapInfo<clang::Expr *>::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const clang::CallGraphNode::CallRecord &Val) {
+ // NOTE: we are comparing based on the callee only.
+ // Different call records with the same callee will compare equal!
+ return DenseMapInfo<clang::CallGraphNode *>::getHashValue(Val.Callee);
+ }
+
+ static bool isEqual(const clang::CallGraphNode::CallRecord &LHS,
+ const clang::CallGraphNode::CallRecord &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Graph traits for iteration, viewing.
template <> struct GraphTraits<clang::CallGraphNode*> {
using NodeType = clang::CallGraphNode;
using NodeRef = clang::CallGraphNode *;
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/ConstructionContext.h b/contrib/llvm-project/clang/include/clang/Analysis/ConstructionContext.h
index f1564f9fe740..4fa5c8b454a0 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/ConstructionContext.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/ConstructionContext.h
@@ -110,6 +110,9 @@ public:
ConstructionContextItem(const CXXConstructExpr *CE, unsigned Index)
: Data(CE), Kind(ArgumentKind), Index(Index) {}
+ ConstructionContextItem(const CXXInheritedCtorInitExpr *CE, unsigned Index)
+ : Data(CE), Kind(ArgumentKind), Index(Index) {}
+
ConstructionContextItem(const ObjCMessageExpr *ME, unsigned Index)
: Data(ME), Kind(ArgumentKind), Index(Index) {}
@@ -117,7 +120,7 @@ public:
ConstructionContextItem(const Expr *E, unsigned Index)
: Data(E), Kind(ArgumentKind), Index(Index) {
assert(isa<CallExpr>(E) || isa<CXXConstructExpr>(E) ||
- isa<ObjCMessageExpr>(E));
+ isa<CXXInheritedCtorInitExpr>(E) || isa<ObjCMessageExpr>(E));
}
ConstructionContextItem(const CXXCtorInitializer *Init)
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h
index 709753339eb5..68d935c6a400 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h
@@ -168,4 +168,4 @@ protected:
};
} // end namespace clang
-#endif
+#endif // LLVM_CLANG_ANALYSES_DATAFLOW_VALUES
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowWorklist.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowWorklist.h
new file mode 100644
index 000000000000..90095735ad3d
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowWorklist.h
@@ -0,0 +1,94 @@
+//===- DataflowWorklist.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A simple and reusable worklist for flow-sensitive analyses.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWWORKLIST_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWWORKLIST_H
+
+#include "clang/Analysis/Analyses/PostOrderCFGView.h"
+#include "clang/Analysis/CFG.h"
+#include "llvm/ADT/PriorityQueue.h"
+
+namespace clang {
+/// A worklist implementation where the enqueued blocks will be dequeued based
+/// on the order defined by 'Comp'.
+template <typename Comp, unsigned QueueSize> class DataflowWorklistBase {
+ llvm::BitVector EnqueuedBlocks;
+ PostOrderCFGView *POV;
+ llvm::PriorityQueue<const CFGBlock *,
+ SmallVector<const CFGBlock *, QueueSize>, Comp>
+ WorkList;
+
+public:
+ DataflowWorklistBase(const CFG &Cfg, PostOrderCFGView *POV, Comp C)
+ : EnqueuedBlocks(Cfg.getNumBlockIDs()), POV(POV), WorkList(C) {}
+
+ const PostOrderCFGView *getCFGView() const { return POV; }
+
+ void enqueueBlock(const CFGBlock *Block) {
+ if (Block && !EnqueuedBlocks[Block->getBlockID()]) {
+ EnqueuedBlocks[Block->getBlockID()] = true;
+ WorkList.push(Block);
+ }
+ }
+
+ const CFGBlock *dequeue() {
+ if (WorkList.empty())
+ return nullptr;
+ const CFGBlock *B = WorkList.top();
+ WorkList.pop();
+ EnqueuedBlocks[B->getBlockID()] = false;
+ return B;
+ }
+};
+
+struct ReversePostOrderCompare {
+ PostOrderCFGView::BlockOrderCompare Cmp;
+ bool operator()(const CFGBlock *lhs, const CFGBlock *rhs) const {
+ return Cmp(rhs, lhs);
+ }
+};
+
+/// A worklist implementation for forward dataflow analysis. The enqueued
+/// blocks will be dequeued in reverse post order. The worklist cannot contain
+/// the same block multiple times at once.
+struct ForwardDataflowWorklist
+ : DataflowWorklistBase<ReversePostOrderCompare, 20> {
+ ForwardDataflowWorklist(const CFG &Cfg, AnalysisDeclContext &Ctx)
+ : DataflowWorklistBase(
+ Cfg, Ctx.getAnalysis<PostOrderCFGView>(),
+ ReversePostOrderCompare{
+ Ctx.getAnalysis<PostOrderCFGView>()->getComparator()}) {}
+
+ void enqueueSuccessors(const CFGBlock *Block) {
+ for (auto B : Block->succs())
+ enqueueBlock(B);
+ }
+};
+
+/// A worklist implementation for backward dataflow analysis. The enqueued
+/// block will be dequeued in post order. The worklist cannot contain the same
+/// block multiple times at once.
+struct BackwardDataflowWorklist
+ : DataflowWorklistBase<PostOrderCFGView::BlockOrderCompare, 20> {
+ BackwardDataflowWorklist(const CFG &Cfg, AnalysisDeclContext &Ctx)
+ : DataflowWorklistBase(
+ Cfg, Ctx.getAnalysis<PostOrderCFGView>(),
+ Ctx.getAnalysis<PostOrderCFGView>()->getComparator()) {}
+
+ void enqueuePredecessors(const CFGBlock *Block) {
+ for (auto B : Block->preds())
+ enqueueBlock(B);
+ }
+};
+
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_ANALYSES_CONSUMED_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/PathDiagnostic.h b/contrib/llvm-project/clang/include/clang/Analysis/PathDiagnostic.h
index 6730057cf0ad..c4b042a51bb5 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/PathDiagnostic.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/PathDiagnostic.h
@@ -561,7 +561,7 @@ public:
void setCallee(const CallEnter &CE, const SourceManager &SM);
bool hasCallStackMessage() { return !CallStackMessage.empty(); }
- void setCallStackMessage(StringRef st) { CallStackMessage = st; }
+ void setCallStackMessage(StringRef st) { CallStackMessage = std::string(st); }
PathDiagnosticLocation getLocation() const override { return callEnter; }
@@ -806,7 +806,7 @@ public:
meta_iterator meta_begin() const { return OtherDesc.begin(); }
meta_iterator meta_end() const { return OtherDesc.end(); }
- void addMeta(StringRef s) { OtherDesc.push_back(s); }
+ void addMeta(StringRef s) { OtherDesc.push_back(std::string(s)); }
const FilesToLineNumsMap &getExecutedLines() const {
return *ExecutedLines;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AArch64SVEACLETypes.def b/contrib/llvm-project/clang/include/clang/Basic/AArch64SVEACLETypes.def
index 7d387587dc29..b98a07436e94 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AArch64SVEACLETypes.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/AArch64SVEACLETypes.def
@@ -35,35 +35,95 @@
//
// - IsFP is true for vectors of floating-point elements.
//
+// - IsBF true for vector of brain float elements.
//===----------------------------------------------------------------------===//
#ifndef SVE_VECTOR_TYPE
-#define SVE_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, IsSigned, IsFP)\
+#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
+ IsSigned, IsFP, IsBF) \
SVE_TYPE(Name, Id, SingletonId)
#endif
#ifndef SVE_PREDICATE_TYPE
-#define SVE_PREDICATE_TYPE(Name, Id, SingletonId, ElKind)\
+#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
SVE_TYPE(Name, Id, SingletonId)
#endif
//===- Vector point types -----------------------------------------------===//
-SVE_VECTOR_TYPE("__SVInt8_t", SveInt8, SveInt8Ty, SveElSInt8, 8, true, false)
-SVE_VECTOR_TYPE("__SVInt16_t", SveInt16, SveInt16Ty, SveElSInt16, 16, true, false)
-SVE_VECTOR_TYPE("__SVInt32_t", SveInt32, SveInt32Ty, SveElSInt32, 32, true, false)
-SVE_VECTOR_TYPE("__SVInt64_t", SveInt64, SveInt64Ty, SveElSInt64, 64, true, false)
-SVE_VECTOR_TYPE("__SVUint8_t", SveUint8, SveUint8Ty, SveElUInt8, 8, false, false)
-SVE_VECTOR_TYPE("__SVUint16_t", SveUint16, SveUint16Ty, SveElUInt16, 16, false, false)
-SVE_VECTOR_TYPE("__SVUint32_t", SveUint32, SveUint32Ty, SveElUInt32, 32, false, false)
-SVE_VECTOR_TYPE("__SVUint64_t", SveUint64, SveUint64Ty, SveElUInt64, 64, false, false)
+SVE_VECTOR_TYPE("__SVInt8_t", "__SVInt8_t", SveInt8, SveInt8Ty, 16, 8, true, false, false)
+SVE_VECTOR_TYPE("__SVInt16_t", "__SVInt16_t", SveInt16, SveInt16Ty, 8, 16, true, false, false)
+SVE_VECTOR_TYPE("__SVInt32_t", "__SVInt32_t", SveInt32, SveInt32Ty, 4, 32, true, false, false)
+SVE_VECTOR_TYPE("__SVInt64_t", "__SVInt64_t", SveInt64, SveInt64Ty, 2, 64, true, false, false)
-SVE_VECTOR_TYPE("__SVFloat16_t", SveFloat16, SveFloat16Ty, SveElHalf, 16, true, true)
-SVE_VECTOR_TYPE("__SVFloat32_t", SveFloat32, SveFloat32Ty, SveElFloat, 32, true, true)
-SVE_VECTOR_TYPE("__SVFloat64_t", SveFloat64, SveFloat64Ty, SveElDouble, 64, true, true)
+SVE_VECTOR_TYPE("__SVUint8_t", "__SVUint8_t", SveUint8, SveUint8Ty, 16, 8, false, false, false)
+SVE_VECTOR_TYPE("__SVUint16_t", "__SVUint16_t", SveUint16, SveUint16Ty, 8, 16, false, false, false)
+SVE_VECTOR_TYPE("__SVUint32_t", "__SVUint32_t", SveUint32, SveUint32Ty, 4, 32, false, false, false)
+SVE_VECTOR_TYPE("__SVUint64_t", "__SVUint64_t", SveUint64, SveUint64Ty, 2, 64, false, false, false)
-SVE_PREDICATE_TYPE("__SVBool_t", SveBool, SveBoolTy, SveElBool)
+SVE_VECTOR_TYPE("__SVFloat16_t", "__SVFloat16_t", SveFloat16, SveFloat16Ty, 8, 16, true, true, false)
+SVE_VECTOR_TYPE("__SVFloat32_t", "__SVFloat32_t", SveFloat32, SveFloat32Ty, 4, 32, true, true, false)
+SVE_VECTOR_TYPE("__SVFloat64_t", "__SVFloat64_t", SveFloat64, SveFloat64Ty, 2, 64, true, true, false)
+
+SVE_VECTOR_TYPE("__SVBFloat16_t", "__SVBFloat16_t", SveBFloat16, SveBFloat16Ty, 8, 16, true, false, true)
+
+//
+// x2
+//
+SVE_VECTOR_TYPE("__clang_svint8x2_t", "svint8x2_t", SveInt8x2, SveInt8x2Ty, 32, 8, true, false, false)
+SVE_VECTOR_TYPE("__clang_svint16x2_t", "svint16x2_t", SveInt16x2, SveInt16x2Ty, 16, 16, true, false, false)
+SVE_VECTOR_TYPE("__clang_svint32x2_t", "svint32x2_t", SveInt32x2, SveInt32x2Ty, 8, 32, true, false, false)
+SVE_VECTOR_TYPE("__clang_svint64x2_t", "svint64x2_t", SveInt64x2, SveInt64x2Ty, 4, 64, true, false, false)
+
+SVE_VECTOR_TYPE("__clang_svuint8x2_t", "svuint8x2_t", SveUint8x2, SveUint8x2Ty, 32, 8, false, false, false)
+SVE_VECTOR_TYPE("__clang_svuint16x2_t", "svuint16x2_t", SveUint16x2, SveUint16x2Ty, 16, 16, false, false, false)
+SVE_VECTOR_TYPE("__clang_svuint32x2_t", "svuint32x2_t", SveUint32x2, SveUint32x2Ty, 8, 32, false, false, false)
+SVE_VECTOR_TYPE("__clang_svuint64x2_t", "svuint64x2_t", SveUint64x2, SveUint64x2Ty, 4, 64, false, false, false)
+
+SVE_VECTOR_TYPE("__clang_svfloat16x2_t", "svfloat16x2_t", SveFloat16x2, SveFloat16x2Ty, 16, 16, true, true, false)
+SVE_VECTOR_TYPE("__clang_svfloat32x2_t", "svfloat32x2_t", SveFloat32x2, SveFloat32x2Ty, 8, 32, true, true, false)
+SVE_VECTOR_TYPE("__clang_svfloat64x2_t", "svfloat64x2_t", SveFloat64x2, SveFloat64x2Ty, 4, 64, true, true, false)
+
+SVE_VECTOR_TYPE("__clang_svbfloat16x2_t", "svbfloat16x2_t", SveBFloat16x2, SveBFloat16x2Ty, 16, 16, true, false, true)
+//
+// x3
+//
+SVE_VECTOR_TYPE("__clang_svint8x3_t", "svint8x3_t", SveInt8x3, SveInt8x3Ty, 48, 8, true, false, false)
+SVE_VECTOR_TYPE("__clang_svint16x3_t", "svint16x3_t", SveInt16x3, SveInt16x3Ty, 24, 16, true, false, false)
+SVE_VECTOR_TYPE("__clang_svint32x3_t", "svint32x3_t", SveInt32x3, SveInt32x3Ty, 12, 32, true, false, false)
+SVE_VECTOR_TYPE("__clang_svint64x3_t", "svint64x3_t", SveInt64x3, SveInt64x3Ty, 6, 64, true, false, false)
+
+SVE_VECTOR_TYPE("__clang_svuint8x3_t", "svuint8x3_t", SveUint8x3, SveUint8x3Ty, 48, 8, false, false, false)
+SVE_VECTOR_TYPE("__clang_svuint16x3_t", "svuint16x3_t", SveUint16x3, SveUint16x3Ty, 24, 16, false, false, false)
+SVE_VECTOR_TYPE("__clang_svuint32x3_t", "svuint32x3_t", SveUint32x3, SveUint32x3Ty, 12, 32, false, false, false)
+SVE_VECTOR_TYPE("__clang_svuint64x3_t", "svuint64x3_t", SveUint64x3, SveUint64x3Ty, 6, 64, false, false, false)
+
+SVE_VECTOR_TYPE("__clang_svfloat16x3_t", "svfloat16x3_t", SveFloat16x3, SveFloat16x3Ty, 24, 16, true, true, false)
+SVE_VECTOR_TYPE("__clang_svfloat32x3_t", "svfloat32x3_t", SveFloat32x3, SveFloat32x3Ty, 12, 32, true, true, false)
+SVE_VECTOR_TYPE("__clang_svfloat64x3_t", "svfloat64x3_t", SveFloat64x3, SveFloat64x3Ty, 6, 64, true, true, false)
+
+SVE_VECTOR_TYPE("__clang_svbfloat16x3_t", "svbfloat16x3_t", SveBFloat16x3, SveBFloat16x3Ty, 24, 16, true, false, true)
+//
+// x4
+//
+SVE_VECTOR_TYPE("__clang_svint8x4_t", "svint8x4_t", SveInt8x4, SveInt8x4Ty, 64, 8, true, false, false)
+SVE_VECTOR_TYPE("__clang_svint16x4_t", "svint16x4_t", SveInt16x4, SveInt16x4Ty, 32, 16, true, false, false)
+SVE_VECTOR_TYPE("__clang_svint32x4_t", "svint32x4_t", SveInt32x4, SveInt32x4Ty, 16, 32, true, false, false)
+SVE_VECTOR_TYPE("__clang_svint64x4_t", "svint64x4_t", SveInt64x4, SveInt64x4Ty, 8, 64, true, false, false)
+
+SVE_VECTOR_TYPE("__clang_svuint8x4_t", "svuint8x4_t", SveUint8x4, SveUint8x4Ty, 64, 8, false, false, false)
+SVE_VECTOR_TYPE("__clang_svuint16x4_t", "svuint16x4_t", SveUint16x4, SveUint16x4Ty, 32, 16, false, false, false)
+SVE_VECTOR_TYPE("__clang_svuint32x4_t", "svuint32x4_t", SveUint32x4, SveUint32x4Ty, 16, 32, false, false, false)
+SVE_VECTOR_TYPE("__clang_svuint64x4_t", "svuint64x4_t", SveUint64x4, SveUint64x4Ty, 8, 64, false, false, false)
+
+SVE_VECTOR_TYPE("__clang_svfloat16x4_t", "svfloat16x4_t", SveFloat16x4, SveFloat16x4Ty, 32, 16, true, true, false)
+SVE_VECTOR_TYPE("__clang_svfloat32x4_t", "svfloat32x4_t", SveFloat32x4, SveFloat32x4Ty, 16, 32, true, true, false)
+SVE_VECTOR_TYPE("__clang_svfloat64x4_t", "svfloat64x4_t", SveFloat64x4, SveFloat64x4Ty, 8, 64, true, true, false)
+
+SVE_VECTOR_TYPE("__clang_svbfloat16x4_t", "svbfloat16x4_t", SveBFloat16x4, SveBFloat16x4Ty, 32, 16, true, false, true)
+
+SVE_PREDICATE_TYPE("__SVBool_t", "__SVBool_t", SveBool, SveBoolTy, 16)
#undef SVE_VECTOR_TYPE
#undef SVE_PREDICATE_TYPE
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Attr.td b/contrib/llvm-project/clang/include/clang/Basic/Attr.td
index 763b5b993e9a..bc4a380545af 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Attr.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/Attr.td
@@ -174,12 +174,31 @@ class IdentifierArgument<string name, bit opt = 0> : Argument<name, opt>;
class IntArgument<string name, bit opt = 0> : Argument<name, opt>;
class StringArgument<string name, bit opt = 0> : Argument<name, opt>;
class ExprArgument<string name, bit opt = 0> : Argument<name, opt>;
-class FunctionArgument<string name, bit opt = 0, bit fake = 0> : Argument<name,
- opt,
- fake>;
-class NamedArgument<string name, bit opt = 0, bit fake = 0> : Argument<name,
- opt,
- fake>;
+class DeclArgument<DeclNode kind, string name, bit opt = 0, bit fake = 0>
+ : Argument<name, opt, fake> {
+ DeclNode Kind = kind;
+}
+
+// An argument of a OMPDeclareVariantAttr that represents the `match`
+// clause of the declare variant by keeping the information (incl. nesting) in
+// an OMPTraitInfo object.
+//
+// With some exceptions, the `match(<context-selector>)` clause looks roughly
+// as follows:
+// context-selector := list<selector-set>
+// selector-set := <kind>={list<selector>}
+// selector := <kind>([score(<const-expr>):] list<trait>)
+// trait := <kind>
+//
+// The structure of an OMPTraitInfo object is a tree as defined below:
+//
+// OMPTraitInfo := {list<OMPTraitSet>}
+// OMPTraitSet := {Kind, list<OMPTraitSelector>}
+// OMPTraitSelector := {Kind, Expr, list<OMPTraitProperty>}
+// OMPTraitProperty := {Kind}
+//
+class OMPTraitInfoArgument<string name> : Argument<name, 0>;
+
class TypeArgument<string name, bit opt = 0> : Argument<name, opt>;
class UnsignedArgument<string name, bit opt = 0> : Argument<name, opt>;
class VariadicUnsignedArgument<string name> : Argument<name, 1>;
@@ -238,7 +257,6 @@ class VariadicEnumArgument<string name, string type, list<string> values,
class Spelling<string name, string variety> {
string Name = name;
string Variety = variety;
- bit KnownToGCC;
}
class GNU<string name> : Spelling<name, "GNU">;
@@ -258,11 +276,11 @@ class Pragma<string namespace, string name> : Spelling<name, "Pragma"> {
string Namespace = namespace;
}
-// The GCC spelling implies GNU<name> and CXX11<"gnu", name> and also sets
-// KnownToGCC to 1. This spelling should be used for any GCC-compatible
+// The GCC spelling implies GNU<name>, CXX11<"gnu", name>, and optionally,
+// C2x<"gnu", name>. This spelling should be used for any GCC-compatible
// attributes.
-class GCC<string name> : Spelling<name, "GCC"> {
- let KnownToGCC = 1;
+class GCC<string name, bit allowInC = 1> : Spelling<name, "GCC"> {
+ bit AllowInC = allowInC;
}
// The Clang spelling implies GNU<name>, CXX11<"clang", name>, and optionally,
@@ -291,6 +309,7 @@ class SubjectList<list<AttrSubject> subjects, SubjectDiag diag = WarnDiag,
}
class LangOpt<string name, code customCode = [{}]> {
+ // The language option to test; ignored when custom code is supplied.
string Name = name;
// A custom predicate, written as an expression evaluated in a context with
@@ -300,17 +319,16 @@ class LangOpt<string name, code customCode = [{}]> {
def MicrosoftExt : LangOpt<"MicrosoftExt">;
def Borland : LangOpt<"Borland">;
def CUDA : LangOpt<"CUDA">;
-def HIP : LangOpt<"HIP">;
def SYCL : LangOpt<"SYCLIsDevice">;
-def COnly : LangOpt<"COnly", "!LangOpts.CPlusPlus">;
+def COnly : LangOpt<"", "!LangOpts.CPlusPlus">;
def CPlusPlus : LangOpt<"CPlusPlus">;
def OpenCL : LangOpt<"OpenCL">;
def RenderScript : LangOpt<"RenderScript">;
def ObjC : LangOpt<"ObjC">;
def BlocksSupported : LangOpt<"Blocks">;
def ObjCAutoRefCount : LangOpt<"ObjCAutoRefCount">;
-def ObjCNonFragileRuntime : LangOpt<"ObjCNonFragileRuntime",
- "LangOpts.ObjCRuntime.allowsClassStubs()">;
+def ObjCNonFragileRuntime
+ : LangOpt<"", "LangOpts.ObjCRuntime.allowsClassStubs()">;
// Language option for CMSE extensions
def Cmse : LangOpt<"Cmse">;
@@ -337,6 +355,8 @@ class TargetArch<list<string> arches> : TargetSpec {
let Arches = arches;
}
def TargetARM : TargetArch<["arm", "thumb", "armeb", "thumbeb"]>;
+def TargetAArch64 : TargetArch<["aarch64"]>;
+def TargetAnyArm : TargetArch<!listconcat(TargetARM.Arches, TargetAArch64.Arches)>;
def TargetAVR : TargetArch<["avr"]>;
def TargetBPF : TargetArch<["bpfel", "bpfeb"]>;
def TargetMips32 : TargetArch<["mips", "mipsel"]>;
@@ -420,6 +440,7 @@ def SubjectMatcherForEnumConstant : AttrSubjectMatcherRule<"enum_constant",
def SubjectMatcherForVar : AttrSubjectMatcherRule<"variable", [Var], [
AttrSubjectMatcherSubRule<"is_thread_local", [TLSVar]>,
AttrSubjectMatcherSubRule<"is_global", [GlobalVar]>,
+ AttrSubjectMatcherSubRule<"is_local", [LocalVar]>,
AttrSubjectMatcherSubRule<"is_parameter", [ParmVar]>,
// unless(is_parameter)
AttrSubjectMatcherSubRule<"is_parameter", [NonParmVar], 1>
@@ -489,6 +510,8 @@ class Attr {
bit ASTNode = 1;
// Set to true for attributes which have handler in Sema.
bit SemaHandler = 1;
+ // Set to true if this attribute doesn't need custom handling in Sema.
+ bit SimpleHandler = 0;
// Set to true for attributes that are completely ignored.
bit Ignored = 0;
// Set to true if the attribute's parsing does not match its semantic
@@ -581,7 +604,7 @@ class IgnoredAttr : Attr {
//
def AbiTag : Attr {
- let Spellings = [GCC<"abi_tag">];
+ let Spellings = [GCC<"abi_tag", /*AllowInC*/0>];
let Args = [VariadicStringArgument<"Tags">];
let Subjects = SubjectList<[Struct, Var, Function, Namespace], ErrorDiag>;
let MeaningfulToClassTemplateDefinition = 1;
@@ -601,11 +624,11 @@ def Alias : Attr {
let Documentation = [Undocumented];
}
-def ArmMveAlias : InheritableAttr, TargetSpecificAttr<TargetARM> {
- let Spellings = [Clang<"__clang_arm_mve_alias">];
+def ArmBuiltinAlias : InheritableAttr, TargetSpecificAttr<TargetAnyArm> {
+ let Spellings = [Clang<"__clang_arm_builtin_alias">];
let Args = [IdentifierArgument<"BuiltinName">];
let Subjects = SubjectList<[Function], ErrorDiag>;
- let Documentation = [ArmMveAliasDocs];
+ let Documentation = [ArmBuiltinAliasDocs];
}
def Aligned : InheritableAttr {
@@ -655,8 +678,9 @@ def AlwaysInline : InheritableAttr {
def Artificial : InheritableAttr {
let Spellings = [GCC<"artificial">];
- let Subjects = SubjectList<[InlineFunction], WarnDiag>;
+ let Subjects = SubjectList<[InlineFunction]>;
let Documentation = [ArtificialDocs];
+ let SimpleHandler = 1;
}
def XRayInstrument : InheritableAttr {
@@ -668,6 +692,7 @@ def XRayInstrument : InheritableAttr {
Accessor<"neverXRayInstrument",
[Clang<"xray_never_instrument">]>];
let Documentation = [XRayDocs];
+ let SimpleHandler = 1;
}
def XRayLogArgs : InheritableAttr {
@@ -924,15 +949,29 @@ def OSConsumesThis : InheritableAttr {
let Spellings = [Clang<"os_consumes_this">];
let Subjects = SubjectList<[NonStaticCXXMethod]>;
let Documentation = [RetainBehaviorDocs];
+ let SimpleHandler = 1;
}
def Cleanup : InheritableAttr {
let Spellings = [GCC<"cleanup">];
- let Args = [FunctionArgument<"FunctionDecl">];
+ let Args = [DeclArgument<Function, "FunctionDecl">];
let Subjects = SubjectList<[LocalVar]>;
let Documentation = [Undocumented];
}
+def CmseNSEntry : InheritableAttr, TargetSpecificAttr<TargetARM> {
+ let Spellings = [GNU<"cmse_nonsecure_entry">];
+ let Subjects = SubjectList<[Function]>;
+ let LangOpts = [Cmse];
+ let Documentation = [ArmCmseNSEntryDocs];
+}
+
+def CmseNSCall : TypeAttr, TargetSpecificAttr<TargetARM> {
+ let Spellings = [GNU<"cmse_nonsecure_call">];
+ let LangOpts = [Cmse];
+ let Documentation = [ArmCmseNSCallDocs];
+}
+
def Cold : InheritableAttr {
let Spellings = [GCC<"cold">];
let Subjects = SubjectList<[Function]>;
@@ -948,6 +987,7 @@ def Common : InheritableAttr {
def Const : InheritableAttr {
let Spellings = [GCC<"const">, GCC<"__const">];
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def ConstInit : InheritableAttr {
@@ -959,6 +999,7 @@ def ConstInit : InheritableAttr {
let Accessors = [Accessor<"isConstinit", [Keyword<"constinit">]>];
let Documentation = [ConstInitDocs];
let LangOpts = [CPlusPlus];
+ let SimpleHandler = 1;
}
def Constructor : InheritableAttr {
@@ -1008,28 +1049,25 @@ def CUDADevice : InheritableAttr {
let Documentation = [Undocumented];
}
-def HIPPinnedShadow : InheritableAttr {
- let Spellings = [GNU<"hip_pinned_shadow">, Declspec<"__hip_pinned_shadow__">];
- let Subjects = SubjectList<[Var]>;
- let LangOpts = [HIP];
- let Documentation = [HIPPinnedShadowDocs];
-}
-
def CUDADeviceBuiltin : IgnoredAttr {
let Spellings = [GNU<"device_builtin">, Declspec<"__device_builtin__">];
let LangOpts = [CUDA];
}
-def CUDADeviceBuiltinSurfaceType : IgnoredAttr {
+def CUDADeviceBuiltinSurfaceType : InheritableAttr {
let Spellings = [GNU<"device_builtin_surface_type">,
Declspec<"__device_builtin_surface_type__">];
let LangOpts = [CUDA];
+ let Subjects = SubjectList<[CXXRecord]>;
+ let Documentation = [CUDADeviceBuiltinSurfaceTypeDocs];
}
-def CUDADeviceBuiltinTextureType : IgnoredAttr {
+def CUDADeviceBuiltinTextureType : InheritableAttr {
let Spellings = [GNU<"device_builtin_texture_type">,
Declspec<"__device_builtin_texture_type__">];
let LangOpts = [CUDA];
+ let Subjects = SubjectList<[CXXRecord]>;
+ let Documentation = [CUDADeviceBuiltinTextureTypeDocs];
}
def CUDAGlobal : InheritableAttr {
@@ -1089,6 +1127,7 @@ def CXX11NoReturn : InheritableAttr {
let Spellings = [CXX11<"", "noreturn", 200809>];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [CXX11NoReturnDocs];
+ let SimpleHandler = 1;
}
// Similar to CUDA, OpenCL attributes do not receive a [[]] spelling because
@@ -1097,6 +1136,7 @@ def OpenCLKernel : InheritableAttr {
let Spellings = [Keyword<"__kernel">, Keyword<"kernel">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def OpenCLUnrollHint : InheritableAttr {
@@ -1166,6 +1206,7 @@ def RenderScriptKernel : Attr {
let Subjects = SubjectList<[Function]>;
let Documentation = [RenderScriptKernelAttributeDocs];
let LangOpts = [RenderScript];
+ let SimpleHandler = 1;
}
def Deprecated : InheritableAttr {
@@ -1190,6 +1231,7 @@ def EmptyBases : InheritableAttr, TargetSpecificAttr<TargetMicrosoftCXXABI> {
let Spellings = [Declspec<"empty_bases">];
let Subjects = SubjectList<[CXXRecord]>;
let Documentation = [EmptyBasesDocs];
+ let SimpleHandler = 1;
}
def AllocSize : InheritableAttr {
@@ -1232,6 +1274,11 @@ def FallThrough : StmtAttr {
let Documentation = [FallthroughDocs];
}
+def NoMerge : StmtAttr {
+ let Spellings = [Clang<"nomerge">];
+ let Documentation = [NoMergeDocs];
+}
+
def FastCall : DeclOrTypeAttr {
let Spellings = [GCC<"fastcall">, Keyword<"__fastcall">,
Keyword<"_fastcall">];
@@ -1261,6 +1308,7 @@ def FlagEnum : InheritableAttr {
let Spellings = [Clang<"flag_enum">];
let Subjects = SubjectList<[Enum]>;
let Documentation = [FlagEnumDocs];
+ let SimpleHandler = 1;
}
def EnumExtensibility : InheritableAttr {
@@ -1275,6 +1323,7 @@ def Flatten : InheritableAttr {
let Spellings = [GCC<"flatten">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [FlattenDocs];
+ let SimpleHandler = 1;
}
def Format : InheritableAttr {
@@ -1320,6 +1369,7 @@ def IBAction : InheritableAttr {
// of the compiler. However, this node needs to exist in the AST because
// external tools rely on it.
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def IBOutlet : InheritableAttr {
@@ -1360,6 +1410,7 @@ def LifetimeBound : DeclOrTypeAttr {
let Subjects = SubjectList<[ParmVar, ImplicitObjectParameter], ErrorDiag>;
let Documentation = [LifetimeBoundDocs];
let LangOpts = [CPlusPlus];
+ let SimpleHandler = 1;
}
def TrivialABI : InheritableAttr {
@@ -1369,6 +1420,7 @@ def TrivialABI : InheritableAttr {
let Subjects = SubjectList<[CXXRecord]>;
let Documentation = [TrivialABIDocs];
let LangOpts = [CPlusPlus];
+ let SimpleHandler = 1;
}
def MaxFieldAlignment : InheritableAttr {
@@ -1383,6 +1435,7 @@ def MayAlias : InheritableAttr {
// FIXME: this is a type attribute in GCC, but a declaration attribute here.
let Spellings = [GCC<"may_alias">];
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def MIGServerRoutine : InheritableAttr {
@@ -1479,16 +1532,23 @@ def NeonVectorType : TypeAttr {
let ASTNode = 0;
}
+def ArmMveStrictPolymorphism : TypeAttr, TargetSpecificAttr<TargetARM> {
+ let Spellings = [Clang<"__clang_arm_mve_strict_polymorphism">];
+ let Documentation = [ArmMveStrictPolymorphismDocs];
+}
+
def NoUniqueAddress : InheritableAttr, TargetSpecificAttr<TargetItaniumCXXABI> {
let Spellings = [CXX11<"", "no_unique_address", 201803>];
let Subjects = SubjectList<[NonBitField], ErrorDiag>;
let Documentation = [NoUniqueAddressDocs];
+ let SimpleHandler = 1;
}
def ReturnsTwice : InheritableAttr {
let Spellings = [GCC<"returns_twice">];
let Subjects = SubjectList<[Function]>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def DisableTailCalls : InheritableAttr {
@@ -1501,12 +1561,14 @@ def NoAlias : InheritableAttr {
let Spellings = [Declspec<"noalias">];
let Subjects = SubjectList<[Function]>;
let Documentation = [NoAliasDocs];
+ let SimpleHandler = 1;
}
def NoCommon : InheritableAttr {
let Spellings = [GCC<"nocommon">];
let Subjects = SubjectList<[Var]>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def NoDebug : InheritableAttr {
@@ -1519,30 +1581,35 @@ def NoDuplicate : InheritableAttr {
let Spellings = [Clang<"noduplicate">];
let Subjects = SubjectList<[Function]>;
let Documentation = [NoDuplicateDocs];
+ let SimpleHandler = 1;
}
def Convergent : InheritableAttr {
let Spellings = [Clang<"convergent">];
let Subjects = SubjectList<[Function]>;
let Documentation = [ConvergentDocs];
+ let SimpleHandler = 1;
}
def NoInline : InheritableAttr {
let Spellings = [GCC<"noinline">, Declspec<"noinline">];
let Subjects = SubjectList<[Function]>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def NoMips16 : InheritableAttr, TargetSpecificAttr<TargetMips32> {
let Spellings = [GCC<"nomips16">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def NoMicroMips : InheritableAttr, TargetSpecificAttr<TargetMips32> {
let Spellings = [GCC<"nomicromips">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [MicroMipsDocs];
+ let SimpleHandler = 1;
}
def RISCVInterrupt : InheritableAttr, TargetSpecificAttr<TargetRISCV> {
@@ -1637,6 +1704,7 @@ def NoSplitStack : InheritableAttr {
let Spellings = [GCC<"no_split_stack">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [NoSplitStackDocs];
+ let SimpleHandler = 1;
}
def NonNull : InheritableParamAttr {
@@ -1734,6 +1802,7 @@ def NoInstrumentFunction : InheritableAttr {
let Spellings = [GCC<"no_instrument_function">];
let Subjects = SubjectList<[Function]>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def NotTailCalled : InheritableAttr {
@@ -1746,6 +1815,7 @@ def NoStackProtector : InheritableAttr {
let Spellings = [Clang<"no_stack_protector">];
let Subjects = SubjectList<[Function]>;
let Documentation = [NoStackProtectorDocs];
+ let SimpleHandler = 1;
}
def NoThrow : InheritableAttr {
@@ -1808,6 +1878,7 @@ def NSConsumesSelf : InheritableAttr {
let Spellings = [Clang<"ns_consumes_self">];
let Subjects = SubjectList<[ObjCMethod]>;
let Documentation = [RetainBehaviorDocs];
+ let SimpleHandler = 1;
}
def NSConsumed : InheritableParamAttr {
@@ -1820,6 +1891,7 @@ def ObjCException : InheritableAttr {
let Spellings = [Clang<"objc_exception">];
let Subjects = SubjectList<[ObjCInterface], ErrorDiag>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def ObjCMethodFamily : InheritableAttr {
@@ -1864,6 +1936,7 @@ def ObjCRootClass : InheritableAttr {
let Spellings = [Clang<"objc_root_class">];
let Subjects = SubjectList<[ObjCInterface], ErrorDiag>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def ObjCNonLazyClass : Attr {
@@ -1871,12 +1944,14 @@ def ObjCNonLazyClass : Attr {
let Subjects = SubjectList<[ObjCInterface, ObjCImpl], ErrorDiag>;
let LangOpts = [ObjC];
let Documentation = [ObjCNonLazyClassDocs];
+ let SimpleHandler = 1;
}
def ObjCSubclassingRestricted : InheritableAttr {
let Spellings = [Clang<"objc_subclassing_restricted">];
let Subjects = SubjectList<[ObjCInterface], ErrorDiag>;
let Documentation = [ObjCSubclassingRestrictedDocs];
+ let SimpleHandler = 1;
}
def ObjCExplicitProtocolImpl : InheritableAttr {
@@ -1900,7 +1975,7 @@ def ObjCDirect : Attr {
def ObjCDirectMembers : Attr {
let Spellings = [Clang<"objc_direct_members">];
- let Subjects = SubjectList<[ObjCImpl, ObjCCategory], ErrorDiag>;
+ let Subjects = SubjectList<[ObjCImpl, ObjCInterface, ObjCCategory], ErrorDiag>;
let LangOpts = [ObjC];
let Documentation = [ObjCDirectMembersDocs];
}
@@ -1916,6 +1991,7 @@ def ObjCRuntimeVisible : Attr {
let Spellings = [Clang<"objc_runtime_visible">];
let Subjects = SubjectList<[ObjCInterface], ErrorDiag>;
let Documentation = [ObjCRuntimeVisibleDocs];
+ let SimpleHandler = 1;
}
def ObjCClassStub : Attr {
@@ -1923,6 +1999,7 @@ def ObjCClassStub : Attr {
let Subjects = SubjectList<[ObjCInterface], ErrorDiag>;
let Documentation = [ObjCClassStubDocs];
let LangOpts = [ObjCNonFragileRuntime];
+ let SimpleHandler = 1;
}
def ObjCBoxable : Attr {
@@ -1941,6 +2018,7 @@ def Overloadable : Attr {
let Spellings = [Clang<"overloadable">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [OverloadableDocs];
+ let SimpleHandler = 1;
}
def Override : InheritableAttr {
@@ -1998,6 +2076,7 @@ def AArch64VectorPcs: DeclOrTypeAttr {
def Pure : InheritableAttr {
let Spellings = [GCC<"pure">];
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def Regparm : TypeAttr {
@@ -2033,7 +2112,7 @@ def WorkGroupSizeHint : InheritableAttr {
}
def InitPriority : InheritableAttr {
- let Spellings = [GCC<"init_priority">];
+ let Spellings = [GCC<"init_priority", /*AllowInC*/0>];
let Args = [UnsignedArgument<"Priority">];
let Subjects = SubjectList<[Var], ErrorDiag>;
let Documentation = [Undocumented];
@@ -2306,7 +2385,7 @@ def DiagnoseIf : InheritableAttr {
["error", "warning"],
["DT_Error", "DT_Warning"]>,
BoolArgument<"ArgDependent", 0, /*fake*/ 1>,
- NamedArgument<"Parent", 0, /*fake*/ 1>];
+ DeclArgument<Named, "Parent", 0, /*fake*/ 1>];
let InheritEvenIfAlreadyPresent = 1;
let LateParsed = 1;
let AdditionalMembers = [{
@@ -2321,6 +2400,7 @@ def ArcWeakrefUnavailable : InheritableAttr {
let Spellings = [Clang<"objc_arc_weak_reference_unavailable">];
let Subjects = SubjectList<[ObjCInterface], ErrorDiag>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def ObjCGC : TypeAttr {
@@ -2339,6 +2419,7 @@ def ObjCRequiresPropertyDefs : InheritableAttr {
let Spellings = [Clang<"objc_requires_property_definitions">];
let Subjects = SubjectList<[ObjCInterface], ErrorDiag>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def Unused : InheritableAttr {
@@ -2353,11 +2434,13 @@ def Used : InheritableAttr {
let Spellings = [GCC<"used">];
let Subjects = SubjectList<[NonLocalVar, Function, ObjCMethod]>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def Uuid : InheritableAttr {
let Spellings = [Declspec<"uuid">, Microsoft<"uuid">];
- let Args = [StringArgument<"Guid">];
+ let Args = [StringArgument<"Guid">,
+ DeclArgument<MSGuid, "GuidDecl", 0, /*fake=*/1>];
let Subjects = SubjectList<[Record, Enum]>;
// FIXME: Allow expressing logical AND for LangOpts. Our condition should be:
// CPlusPlus && (MicrosoftExt || Borland)
@@ -2381,6 +2464,15 @@ def VecTypeHint : InheritableAttr {
let Documentation = [Undocumented];
}
+def MatrixType : TypeAttr {
+ let Spellings = [Clang<"matrix_type">];
+ let Subjects = SubjectList<[TypedefName], ErrorDiag>;
+ let Args = [ExprArgument<"NumRows">, ExprArgument<"NumColumns">];
+ let Documentation = [Undocumented];
+ let ASTNode = 0;
+ let PragmaAttributeSupport = 0;
+}
+
def Visibility : InheritableAttr {
let Clone = 0;
let Spellings = [GCC<"visibility">];
@@ -2414,6 +2506,7 @@ def WarnUnused : InheritableAttr {
let Spellings = [GCC<"warn_unused">];
let Subjects = SubjectList<[Record]>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def WarnUnusedResult : InheritableAttr {
@@ -2436,6 +2529,7 @@ def Weak : InheritableAttr {
let Spellings = [GCC<"weak">];
let Subjects = SubjectList<[Var, Function, CXXRecord]>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def WeakImport : InheritableAttr {
@@ -2455,6 +2549,7 @@ def LTOVisibilityPublic : InheritableAttr {
let Spellings = [Clang<"lto_visibility_public">];
let Subjects = SubjectList<[Record]>;
let Documentation = [LTOVisibilityDocs];
+ let SimpleHandler = 1;
}
def AnyX86Interrupt : InheritableAttr, TargetSpecificAttr<TargetAnyX86> {
@@ -2471,6 +2566,7 @@ def AnyX86NoCallerSavedRegisters : InheritableAttr,
TargetSpecificAttr<TargetAnyX86> {
let Spellings = [GCC<"no_caller_saved_registers">];
let Documentation = [AnyX86NoCallerSavedRegistersDocs];
+ let SimpleHandler = 1;
}
def AnyX86NoCfCheck : DeclOrTypeAttr, TargetSpecificAttr<TargetAnyX86>{
@@ -2522,6 +2618,7 @@ def CFICanonicalJumpTable : InheritableAttr {
let Spellings = [Clang<"cfi_canonical_jump_table">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [CFICanonicalJumpTableDocs];
+ let SimpleHandler = 1;
}
// C/C++ Thread safety attributes (e.g. for deadlock, data race checking)
@@ -2534,6 +2631,7 @@ def GuardedVar : InheritableAttr {
let Spellings = [Clang<"guarded_var", 0>];
let Subjects = SubjectList<[Field, SharedVar]>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def PtGuardedVar : InheritableAttr {
@@ -2553,6 +2651,7 @@ def ScopedLockable : InheritableAttr {
let Spellings = [Clang<"scoped_lockable", 0>];
let Subjects = SubjectList<[Record]>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def Capability : InheritableAttr {
@@ -2562,10 +2661,6 @@ def Capability : InheritableAttr {
let Accessors = [Accessor<"isShared",
[Clang<"shared_capability", 0>]>];
let Documentation = [Undocumented];
- let AdditionalMembers = [{
- bool isMutex() const { return getName().equals_lower("mutex"); }
- bool isRole() const { return getName().equals_lower("role"); }
- }];
}
def AssertCapability : InheritableAttr {
@@ -2653,6 +2748,7 @@ def NoThreadSafetyAnalysis : InheritableAttr {
let Spellings = [Clang<"no_thread_safety_analysis">];
let Subjects = SubjectList<[Function]>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def GuardedBy : InheritableAttr {
@@ -2789,6 +2885,7 @@ def ConsumableAutoCast : InheritableAttr {
let Spellings = [Clang<"consumable_auto_cast_state", 0>];
let Subjects = SubjectList<[CXXRecord]>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def ConsumableSetOnRead : InheritableAttr {
@@ -2798,6 +2895,7 @@ def ConsumableSetOnRead : InheritableAttr {
let Spellings = [Clang<"consumable_set_state_on_read", 0>];
let Subjects = SubjectList<[CXXRecord]>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def CallableWhen : InheritableAttr {
@@ -2904,6 +3002,7 @@ def MSNoVTable : InheritableAttr, TargetSpecificAttr<TargetMicrosoftCXXABI> {
let Spellings = [Declspec<"novtable">];
let Subjects = SubjectList<[CXXRecord]>;
let Documentation = [MSNoVTableDocs];
+ let SimpleHandler = 1;
}
def : IgnoredAttr {
@@ -2929,6 +3028,7 @@ def MSStruct : InheritableAttr {
let Spellings = [GCC<"ms_struct">];
let Subjects = SubjectList<[Record]>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def DLLExport : InheritableAttr, TargetSpecificAttr<TargetWindows> {
@@ -2977,6 +3077,7 @@ def DLLImportStaticLocal : InheritableAttr, TargetSpecificAttr<TargetWindows> {
def SelectAny : InheritableAttr {
let Spellings = [Declspec<"selectany">, GCC<"selectany">];
let Documentation = [SelectAnyDocs];
+ let SimpleHandler = 1;
}
def Thread : Attr {
@@ -3108,57 +3209,14 @@ def LoopHint : Attr {
llvm_unreachable("Unhandled LoopHint option.");
}
- void printPrettyPragma(raw_ostream &OS, const PrintingPolicy &Policy) const {
- unsigned SpellingIndex = getAttributeSpellingListIndex();
- // For "#pragma unroll" and "#pragma nounroll" the string "unroll" or
- // "nounroll" is already emitted as the pragma name.
- if (SpellingIndex == Pragma_nounroll || SpellingIndex == Pragma_nounroll_and_jam)
- return;
- else if (SpellingIndex == Pragma_unroll || SpellingIndex == Pragma_unroll_and_jam) {
- OS << ' ' << getValueString(Policy);
- return;
- }
-
- assert(SpellingIndex == Pragma_clang_loop && "Unexpected spelling");
- OS << ' ' << getOptionName(option) << getValueString(Policy);
- }
+ void printPrettyPragma(raw_ostream &OS, const PrintingPolicy &Policy) const;
// Return a string containing the loop hint argument including the
// enclosing parentheses.
- std::string getValueString(const PrintingPolicy &Policy) const {
- std::string ValueName;
- llvm::raw_string_ostream OS(ValueName);
- OS << "(";
- if (state == Numeric)
- value->printPretty(OS, nullptr, Policy);
- else if (state == Enable)
- OS << "enable";
- else if (state == Full)
- OS << "full";
- else if (state == AssumeSafety)
- OS << "assume_safety";
- else
- OS << "disable";
- OS << ")";
- return OS.str();
- }
+ std::string getValueString(const PrintingPolicy &Policy) const;
// Return a string suitable for identifying this attribute in diagnostics.
- std::string getDiagnosticName(const PrintingPolicy &Policy) const {
- unsigned SpellingIndex = getAttributeSpellingListIndex();
- if (SpellingIndex == Pragma_nounroll)
- return "#pragma nounroll";
- else if (SpellingIndex == Pragma_unroll)
- return "#pragma unroll" + (option == UnrollCount ? getValueString(Policy) : "");
- else if (SpellingIndex == Pragma_nounroll_and_jam)
- return "#pragma nounroll_and_jam";
- else if (SpellingIndex == Pragma_unroll_and_jam)
- return "#pragma unroll_and_jam" +
- (option == UnrollAndJamCount ? getValueString(Policy) : "");
-
- assert(SpellingIndex == Pragma_clang_loop && "Unexpected spelling");
- return getOptionName(option) + getValueString(Policy);
- }
+ std::string getDiagnosticName(const PrintingPolicy &Policy) const;
}];
let Documentation = [LoopHintDocs, UnrollHintDocs];
@@ -3189,8 +3247,13 @@ def OMPCaptureKind : Attr {
// This attribute has no spellings as it is only ever created implicitly.
let Spellings = [];
let SemaHandler = 0;
- let Args = [UnsignedArgument<"CaptureKind">];
+ let Args = [UnsignedArgument<"CaptureKindVal">];
let Documentation = [Undocumented];
+ let AdditionalMembers = [{
+ llvm::omp::Clause getCaptureKind() const {
+ return static_cast<llvm::omp::Clause>(getCaptureKindVal());
+ }
+ }];
}
def OMPReferencedVar : Attr {
@@ -3218,53 +3281,7 @@ def OMPDeclareSimdDecl : Attr {
];
let AdditionalMembers = [{
void printPrettyPragma(raw_ostream & OS, const PrintingPolicy &Policy)
- const {
- if (getBranchState() != BS_Undefined)
- OS << ' ' << ConvertBranchStateTyToStr(getBranchState());
- if (auto *E = getSimdlen()) {
- OS << " simdlen(";
- E->printPretty(OS, nullptr, Policy);
- OS << ")";
- }
- if (uniforms_size() > 0) {
- OS << " uniform";
- StringRef Sep = "(";
- for (auto *E : uniforms()) {
- OS << Sep;
- E->printPretty(OS, nullptr, Policy);
- Sep = ", ";
- }
- OS << ")";
- }
- alignments_iterator NI = alignments_begin();
- for (auto *E : aligneds()) {
- OS << " aligned(";
- E->printPretty(OS, nullptr, Policy);
- if (*NI) {
- OS << ": ";
- (*NI)->printPretty(OS, nullptr, Policy);
- }
- OS << ")";
- ++NI;
- }
- steps_iterator I = steps_begin();
- modifiers_iterator MI = modifiers_begin();
- for (auto *E : linears()) {
- OS << " linear(";
- if (*MI != OMPC_LINEAR_unknown)
- OS << getOpenMPSimpleClauseTypeName(OMPC_linear, *MI) << "(";
- E->printPretty(OS, nullptr, Policy);
- if (*MI != OMPC_LINEAR_unknown)
- OS << ")";
- if (*I) {
- OS << ": ";
- (*I)->printPretty(OS, nullptr, Policy);
- }
- OS << ")";
- ++I;
- ++MI;
- }
- }
+ const;
}];
}
@@ -3282,30 +3299,10 @@ def OMPDeclareTargetDecl : InheritableAttr {
[ "DT_Host", "DT_NoHost", "DT_Any" ]>
];
let AdditionalMembers = [{
- void printPrettyPragma(raw_ostream &OS, const PrintingPolicy &Policy) const {
- // Use fake syntax because it is for testing and debugging purpose only.
- if (getDevType() != DT_Any)
- OS << " device_type(" << ConvertDevTypeTyToStr(getDevType()) << ")";
- if (getMapType() != MT_To)
- OS << ' ' << ConvertMapTypeTyToStr(getMapType());
- }
+ void printPrettyPragma(raw_ostream &OS, const PrintingPolicy &Policy) const;
static llvm::Optional<MapTypeTy>
- isDeclareTargetDeclaration(const ValueDecl *VD) {
- if (!VD->hasAttrs())
- return llvm::None;
- if (const auto *Attr = VD->getAttr<OMPDeclareTargetDeclAttr>())
- return Attr->getMapType();
-
- return llvm::None;
- }
- static llvm::Optional<DevTypeTy> getDeviceType(const ValueDecl *VD) {
- if (!VD->hasAttrs())
- return llvm::None;
- if (const auto *Attr = VD->getAttr<OMPDeclareTargetDeclAttr>())
- return Attr->getDevType();
-
- return llvm::None;
- }
+ isDeclareTargetDeclaration(const ValueDecl *VD);
+ static llvm::Optional<DevTypeTy> getDeviceType(const ValueDecl *VD);
}];
}
@@ -3316,14 +3313,16 @@ def OMPAllocateDecl : InheritableAttr {
let Args = [
EnumArgument<"AllocatorType", "AllocatorTypeTy",
[
- "omp_default_mem_alloc", "omp_large_cap_mem_alloc",
- "omp_const_mem_alloc", "omp_high_bw_mem_alloc",
- "omp_low_lat_mem_alloc", "omp_cgroup_mem_alloc",
- "omp_pteam_mem_alloc", "omp_thread_mem_alloc", ""
+ "omp_null_allocator", "omp_default_mem_alloc",
+ "omp_large_cap_mem_alloc", "omp_const_mem_alloc",
+ "omp_high_bw_mem_alloc", "omp_low_lat_mem_alloc",
+ "omp_cgroup_mem_alloc", "omp_pteam_mem_alloc",
+ "omp_thread_mem_alloc", ""
],
[
- "OMPDefaultMemAlloc", "OMPLargeCapMemAlloc",
- "OMPConstMemAlloc", "OMPHighBWMemAlloc", "OMPLowLatMemAlloc",
+ "OMPNullMemAlloc", "OMPDefaultMemAlloc",
+ "OMPLargeCapMemAlloc", "OMPConstMemAlloc",
+ "OMPHighBWMemAlloc", "OMPLowLatMemAlloc",
"OMPCGroupMemAlloc", "OMPPTeamMemAlloc", "OMPThreadMemAlloc",
"OMPUserDefinedMemAlloc"
]>,
@@ -3341,89 +3340,12 @@ def OMPDeclareVariant : InheritableAttr {
let Documentation = [OMPDeclareVariantDocs];
let Args = [
ExprArgument<"VariantFuncRef">,
- VariadicExprArgument<"Scores">,
- VariadicUnsignedArgument<"CtxSelectorSets">,
- VariadicUnsignedArgument<"CtxSelectors">,
- VariadicStringArgument<"ImplVendors">,
- VariadicStringArgument<"DeviceKinds">
+ OMPTraitInfoArgument<"TraitInfos">,
];
let AdditionalMembers = [{
- void printScore(raw_ostream & OS, const PrintingPolicy &Policy, unsigned I) const {
- if (const Expr *E = *std::next(scores_begin(), I)) {
- OS << "score(";
- E->printPretty(OS, nullptr, Policy);
- OS << "):";
- }
- }
+ OMPTraitInfo &getTraitInfo() { return *traitInfos; }
void printPrettyPragma(raw_ostream & OS, const PrintingPolicy &Policy)
- const {
- if (const Expr *E = getVariantFuncRef()) {
- OS << "(";
- E->printPretty(OS, nullptr, Policy);
- OS << ")";
- }
- // TODO: add printing of real context selectors.
- OS << " match(";
- int Used[OMP_CTX_SET_unknown] = {0};
- for (unsigned I = 0, E = ctxSelectorSets_size(); I < E; ++I) {
- auto CtxSet = static_cast<OpenMPContextSelectorSetKind>(
- *std::next(ctxSelectorSets_begin(), I));
- if (Used[CtxSet])
- continue;
- if (I > 0)
- OS << ",";
- switch (CtxSet) {
- case OMP_CTX_SET_implementation:
- OS << "implementation={";
- break;
- case OMP_CTX_SET_device:
- OS << "device={";
- break;
- case OMP_CTX_SET_unknown:
- llvm_unreachable("Unknown context selector set.");
- }
- Used[CtxSet] = 1;
- for (unsigned K = I, EK = ctxSelectors_size(); K < EK; ++K) {
- auto CtxSetK = static_cast<OpenMPContextSelectorSetKind>(
- *std::next(ctxSelectorSets_begin(), K));
- if (CtxSet != CtxSetK)
- continue;
- if (K != I)
- OS << ",";
- auto Ctx = static_cast<OpenMPContextSelectorKind>(
- *std::next(ctxSelectors_begin(), K));
- switch (Ctx) {
- case OMP_CTX_vendor:
- assert(CtxSet == OMP_CTX_SET_implementation &&
- "Expected implementation context selector set.");
- OS << "vendor(";
- printScore(OS, Policy, K);
- if (implVendors_size() > 0) {
- OS << *implVendors(). begin();
- for (StringRef VendorName : llvm::drop_begin(implVendors(), 1))
- OS << ", " << VendorName;
- }
- OS << ")";
- break;
- case OMP_CTX_kind:
- assert(CtxSet == OMP_CTX_SET_device &&
- "Expected device context selector set.");
- OS << "kind(";
- if (deviceKinds_size() > 0) {
- OS << *deviceKinds().begin();
- for (StringRef KindName : llvm::drop_begin(deviceKinds(), 1))
- OS << ", " << KindName;
- }
- OS << ")";
- break;
- case OMP_CTX_unknown:
- llvm_unreachable("Unknown context selector.");
- }
- }
- OS << "}";
- }
- OS << ")";
- }
+ const;
}];
}
@@ -3438,12 +3360,14 @@ def ExcludeFromExplicitInstantiation : InheritableAttr {
let Subjects = SubjectList<[Var, Function, CXXRecord]>;
let Documentation = [ExcludeFromExplicitInstantiationDocs];
let MeaningfulToClassTemplateDefinition = 1;
+ let SimpleHandler = 1;
}
def Reinitializes : InheritableAttr {
let Spellings = [Clang<"reinitializes", 0>];
let Subjects = SubjectList<[NonStaticNonConstCXXMethod], ErrorDiag>;
let Documentation = [ReinitializesDocs];
+ let SimpleHandler = 1;
}
def NoDestroy : InheritableAttr {
@@ -3473,9 +3397,16 @@ def NoSpeculativeLoadHardening : InheritableAttr {
def Uninitialized : InheritableAttr {
let Spellings = [Clang<"uninitialized", 0>];
let Subjects = SubjectList<[LocalVar]>;
+ let PragmaAttributeSupport = 1;
let Documentation = [UninitializedDocs];
}
+def LoaderUninitialized : Attr {
+ let Spellings = [Clang<"loader_uninitialized">];
+ let Subjects = SubjectList<[GlobalVar]>;
+ let Documentation = [LoaderUninitializedDocs];
+}
+
def ObjCExternallyRetained : InheritableAttr {
let LangOpts = [ObjCAutoRefCount];
let Spellings = [Clang<"objc_externally_retained">];
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td b/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
index 7976d08a5258..3cba3a3d96f9 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
@@ -140,7 +140,8 @@ def NoEscapeDocs : Documentation {
the compiler that the pointer cannot escape: that is, no reference to the object
the pointer points to that is derived from the parameter value will survive
after the function returns. Users are responsible for making sure parameters
-annotated with ``noescape`` do not actuallly escape.
+annotated with ``noescape`` do not actually escape. Calling ``free()`` on such
+a parameter does not constitute an escape.
For example:
@@ -260,6 +261,7 @@ The ``sycl_kernel`` attribute specifies that a function template will be used
to outline device code and to generate an OpenCL kernel.
Here is a code example of the SYCL program, which demonstrates the compiler's
outlining job:
+
.. code-block:: c++
int foo(int x) { return ++x; }
@@ -282,27 +284,29 @@ compilation of functions for the device part can be found in the SYCL 1.2.1
specification Section 6.4.
To show to the compiler entry point to the "device part" of the code, the SYCL
runtime can use the ``sycl_kernel`` attribute in the following way:
+
.. code-block:: c++
-namespace cl {
-namespace sycl {
-class handler {
- template <typename KernelName, typename KernelType/*, ...*/>
- __attribute__((sycl_kernel)) void sycl_kernel_function(KernelType KernelFuncObj) {
- // ...
- KernelFuncObj();
- }
- template <typename KernelName, typename KernelType, int Dims>
- void parallel_for(range<Dims> NumWorkItems, KernelType KernelFunc) {
-#ifdef __SYCL_DEVICE_ONLY__
- sycl_kernel_function<KernelName, KernelType, Dims>(KernelFunc);
-#else
- // Host implementation
-#endif
- }
-};
-} // namespace sycl
-} // namespace cl
+ namespace cl {
+ namespace sycl {
+ class handler {
+ template <typename KernelName, typename KernelType/*, ...*/>
+ __attribute__((sycl_kernel)) void sycl_kernel_function(KernelType KernelFuncObj) {
+ // ...
+ KernelFuncObj();
+ }
+
+ template <typename KernelName, typename KernelType, int Dims>
+ void parallel_for(range<Dims> NumWorkItems, KernelType KernelFunc) {
+ #ifdef __SYCL_DEVICE_ONLY__
+ sycl_kernel_function<KernelName, KernelType, Dims>(KernelFunc);
+ #else
+ // Host implementation
+ #endif
+ }
+ };
+ } // namespace sycl
+ } // namespace cl
The compiler will also generate an OpenCL kernel using the function marked with
the ``sycl_kernel`` attribute.
@@ -320,7 +324,7 @@ function marked with the ``sycl_kernel`` attribute:
compiler uses function object type fields to generate OpenCL kernel
parameters.
- The function must return void. The compiler reuses the body of marked functions to
- generate the OpenCL kernel body, and the OpenCL kernel must return `void`.
+ generate the OpenCL kernel body, and the OpenCL kernel must return ``void``.
The SYCL kernel in the previous code sample meets these expectations.
}];
@@ -346,6 +350,20 @@ that appears to be capable of returning to its caller.
}];
}
+def NoMergeDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+If a statement is marked ``nomerge`` and contains call experessions, those call
+expressions inside the statement will not be merged during optimization. This
+attribute can be used to prevent the optimizer from obscuring the source
+location of certain calls. For example, it will prevent tail merging otherwise
+identical code sequences that raise an exception or terminate the program. Tail
+merging normally reduces the precision of source location information, making
+stack traces less useful for debugging. This attribute gives the user control
+over the tradeoff between code size and debug information precision.
+ }];
+}
+
def AssertCapabilityDocs : Documentation {
let Category = DocCatFunction;
let Heading = "assert_capability, assert_shared_capability";
@@ -481,7 +499,7 @@ parameter.
Note that this attribute merely informs the compiler that a function always
returns a sufficiently aligned pointer. It does not cause the compiler to
emit code to enforce that alignment. The behavior is undefined if the returned
-poitner is not sufficiently aligned.
+pointer is not sufficiently aligned.
}];
}
@@ -939,11 +957,11 @@ The behavior of a function with respect to reference counting for Foundation
convention (e.g. functions starting with "get" are assumed to return at
``+0``).
-It can be overriden using a family of the following attributes. In
+It can be overridden using a family of the following attributes. In
Objective-C, the annotation ``__attribute__((ns_returns_retained))`` applied to
a function communicates that the object is returned at ``+1``, and the caller
is responsible for freeing it.
-Similiarly, the annotation ``__attribute__((ns_returns_not_retained))``
+Similarly, the annotation ``__attribute__((ns_returns_not_retained))``
specifies that the object is returned at ``+0`` and the ownership remains with
the callee.
The annotation ``__attribute__((ns_consumes_self))`` specifies that
@@ -1159,7 +1177,7 @@ def ObjCRuntimeNameDocs : Documentation {
let Category = DocCatDecl;
let Content = [{
By default, the Objective-C interface or protocol identifier is used
-in the metadata name for that object. The `objc_runtime_name`
+in the metadata name for that object. The ``objc_runtime_name``
attribute allows annotated interfaces or protocols to use the
specified string argument in the object's metadata name instead of the
default name.
@@ -1287,7 +1305,7 @@ correspond to different platforms. For most platforms, the availability
attribute with the platform corresponding to the target platform will be used;
any others will be ignored. However, the availability for ``watchOS`` and
``tvOS`` can be implicitly inferred from an ``iOS`` availability attribute.
-Any explicit availability attributes for those platforms are still prefered over
+Any explicit availability attributes for those platforms are still preferred over
the implicitly inferred availability attributes. If no availability attribute
specifies availability for the current target platform, the availability
attributes are ignored. Supported platforms are:
@@ -1395,7 +1413,7 @@ pragma rather than using the inferred ``iOS`` availability from the declaration:
void getsThePragmaTVOSAvailabilityAttribute(void) __attribute__((availability(iOS,introduced=11.0)));
#pragma clang attribute pop
-The compiler is also able to apply implicly inferred attributes from a pragma
+The compiler is also able to apply implicitly inferred attributes from a pragma
as well. For example, when targeting ``tvOS``, the function below will receive
a ``tvOS`` availability attribute that is implicitly inferred from the ``iOS``
availability attribute applied by the pragma:
@@ -1563,7 +1581,7 @@ expression are discarded under suspicious circumstances. A diagnostic is
generated when a function or its return type is marked with ``[[nodiscard]]``
(or ``__attribute__((warn_unused_result))``) and the function call appears as a
potentially-evaluated discarded-value expression that is not explicitly cast to
-`void`.
+``void``.
A string literal may optionally be provided to the attribute, which will be
reproduced in any resulting diagnostics. Redeclarations using different forms
@@ -1713,7 +1731,7 @@ def BPFPreserveAccessIndexDocs : Documentation {
Clang supports the ``__attribute__((preserve_access_index))``
attribute for the BPF target. This attribute may be attached to a
struct or union declaration, where if -g is specified, it enables
-preserving struct or union member access debuginfo indicies of this
+preserving struct or union member access debuginfo indices of this
struct or union, similar to clang ``__builtin_preserve_acceess_index()``.
}];
}
@@ -1729,7 +1747,7 @@ directly as an interrupt service routine.
By default, the compiler will produce a function prologue and epilogue suitable for
an interrupt service routine that handles an External Interrupt Controller (eic)
-generated interrupt. This behaviour can be explicitly requested with the "eic"
+generated interrupt. This behavior can be explicitly requested with the "eic"
argument.
Otherwise, for use with vectored interrupt mode, the argument passed should be
@@ -1767,7 +1785,7 @@ Clang supports the GNU style ``__attribute__((micromips))`` and
may be attached to a function definition and instructs the backend to generate
or not to generate microMIPS code for that function.
-These attributes override the `-mmicromips` and `-mno-micromips` options
+These attributes override the ``-mmicromips`` and ``-mno-micromips`` options
on the command line.
}];
}
@@ -2764,11 +2782,13 @@ The ``trivial_abi`` attribute can be applied to a C++ class, struct, or union.
It instructs the compiler to pass and return the type using the C ABI for the
underlying type when the type would otherwise be considered non-trivial for the
purpose of calls.
-A class annotated with `trivial_abi` can have non-trivial destructors or copy/move constructors without automatically becoming non-trivial for the purposes of calls. For example:
+A class annotated with ``trivial_abi`` can have non-trivial destructors or
+copy/move constructors without automatically becoming non-trivial for the
+purposes of calls. For example:
.. code-block:: c++
- // A is trivial for the purposes of calls because `trivial_abi` makes the
+ // A is trivial for the purposes of calls because ``trivial_abi`` makes the
// user-provided special functions trivial.
struct __attribute__((trivial_abi)) A {
~A();
@@ -2790,6 +2810,7 @@ destroy the object before returning.
Attribute ``trivial_abi`` has no effect in the following cases:
- The class directly declares a virtual base or virtual methods.
+- Copy constructors and move constructors of the class are all deleted.
- The class has a base class that is non-trivial for the purposes of calls.
- The class has a non-static data member whose type is non-trivial for the purposes of calls, which includes:
@@ -3296,15 +3317,15 @@ def OMPDeclareSimdDocs : Documentation {
let Category = DocCatFunction;
let Heading = "#pragma omp declare simd";
let Content = [{
-The `declare simd` construct can be applied to a function to enable the creation
+The ``declare simd`` construct can be applied to a function to enable the creation
of one or more versions that can process multiple arguments using SIMD
-instructions from a single invocation in a SIMD loop. The `declare simd`
-directive is a declarative directive. There may be multiple `declare simd`
-directives for a function. The use of a `declare simd` construct on a function
+instructions from a single invocation in a SIMD loop. The ``declare simd``
+directive is a declarative directive. There may be multiple ``declare simd``
+directives for a function. The use of a ``declare simd`` construct on a function
enables the creation of SIMD versions of the associated function that can be
used to process multiple arguments from a single invocation from a SIMD loop
concurrently.
-The syntax of the `declare simd` construct is as follows:
+The syntax of the ``declare simd`` construct is as follows:
.. code-block:: none
@@ -3331,7 +3352,7 @@ def OMPDeclareTargetDocs : Documentation {
let Category = DocCatFunction;
let Heading = "#pragma omp declare target";
let Content = [{
-The `declare target` directive specifies that variables and functions are mapped
+The ``declare target`` directive specifies that variables and functions are mapped
to a device for OpenMP offload mechanism.
The syntax of the declare target directive is as follows:
@@ -3369,10 +3390,10 @@ def OMPDeclareVariantDocs : Documentation {
let Category = DocCatFunction;
let Heading = "#pragma omp declare variant";
let Content = [{
-The `declare variant` directive declares a specialized variant of a base
- function and specifies the context in which that specialized variant is used.
- The declare variant directive is a declarative directive.
-The syntax of the `declare variant` construct is as follows:
+The ``declare variant`` directive declares a specialized variant of a base
+function and specifies the context in which that specialized variant is used.
+The declare variant directive is a declarative directive.
+The syntax of the ``declare variant`` construct is as follows:
.. code-block:: none
@@ -3387,8 +3408,23 @@ where clause is one of the following:
match(context-selector-specification)
-and where `variant-func-id` is the name of a function variant that is either a
- base language identifier or, for C++, a template-id.
+and where ``variant-func-id`` is the name of a function variant that is either a
+base language identifier or, for C++, a template-id.
+
+Clang provides the following context selector extensions, used via
+``implementation={extension(EXTENSION)}``:
+
+ .. code-block:: none
+
+ match_all
+ match_any
+ match_none
+
+The match extensions change when the *entire* context selector is considered a
+match for an OpenMP context. The default is ``all``, with ``none`` no trait in the
+selector is allowed to be in the OpenMP context, with ``any`` a single trait in
+both the selector and OpenMP context is sufficient. Only a single match
+extension trait is allowed per context selector.
}];
}
@@ -3473,7 +3509,7 @@ def NoThrowDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
Clang supports the GNU style ``__attribute__((nothrow))`` and Microsoft style
-``__declspec(nothrow)`` attribute as an equivalent of `noexcept` on function
+``__declspec(nothrow)`` attribute as an equivalent of ``noexcept`` on function
declarations. This attribute informs the compiler that the annotated function
does not throw an exception. This prevents exception-unwinding. This attribute
is particularly useful on functions in the C Standard Library that are
@@ -3676,7 +3712,7 @@ using the Swift calling convention for a function or function pointer.
The lowering for the Swift calling convention, as described by the Swift
ABI documentation, occurs in multiple phases. The first, "high-level"
phase breaks down the formal parameters and results into innately direct
-and indirect components, adds implicit paraameters for the generic
+and indirect components, adds implicit parameters for the generic
signature, and assigns the context and error ABI treatments to parameters
where applicable. The second phase breaks down the direct parameters
and results from the first phase and assigns them to registers or the
@@ -3718,7 +3754,7 @@ of the first phase, as follows:
``swiftcall`` does not support variadic arguments or unprototyped functions.
The parameter ABI treatment attributes are aspects of the function type.
-A function type which which applies an ABI treatment attribute to a
+A function type which applies an ABI treatment attribute to a
parameter is a different type from an otherwise-identical function type
that does not. A single parameter may not have multiple ABI treatment
attributes.
@@ -3847,7 +3883,7 @@ with different ABI versions supported. For example, a newer version of a class
could have a different set of data members and thus have a different size. Using
the ``abi_tag`` attribute, it is possible to have different mangled names for
a global variable of the class type. Therefore, the old code could keep using
-the old manged name and the new code will use the new mangled name with tags.
+the old mangled name and the new code will use the new mangled name with tags.
}];
}
@@ -3873,10 +3909,10 @@ that have a hot path and a cold path. The hot path is usually a small piece
of code that doesn't use many registers. The cold path might need to call out to
another function and therefore only needs to preserve the caller-saved
registers, which haven't already been saved by the caller. The
-`preserve_most` calling convention is very similar to the ``cold`` calling
+``preserve_most`` calling convention is very similar to the ``cold`` calling
convention in terms of caller/callee-saved registers, but they are used for
different types of function calls. ``coldcc`` is for function calls that are
-rarely executed, whereas `preserve_most` function calls are intended to be
+rarely executed, whereas ``preserve_most`` function calls are intended to be
on the hot path and definitely executed a lot. Furthermore ``preserve_most``
doesn't prevent the inliner from inlining the function call.
@@ -3930,11 +3966,11 @@ Consider the function declaration for a hypothetical function ``f``:
void f(void) __attribute__((deprecated("message", "replacement")));
-When spelled as `__attribute__((deprecated))`, the deprecated attribute can have
+When spelled as ``__attribute__((deprecated))``, the deprecated attribute can have
two optional string arguments. The first one is the message to display when
emitting the warning; the second one enables the compiler to provide a Fix-It
to replace the deprecated name with a new name. Otherwise, when spelled as
-`[[gnu::deprecated]] or [[deprecated]]`, the attribute can have one optional
+``[[gnu::deprecated]]`` or ``[[deprecated]]``, the attribute can have one optional
string argument which is the message to display when emitting the warning.
}];
}
@@ -4001,9 +4037,9 @@ takes precedence over the command line option ``-fpatchable-function-entry=N,M``
def TransparentUnionDocs : Documentation {
let Category = DocCatDecl;
let Content = [{
-This attribute can be applied to a union to change the behaviour of calls to
+This attribute can be applied to a union to change the behavior of calls to
functions that have an argument with a transparent union type. The compiler
-behaviour is changed in the following manner:
+behavior is changed in the following manner:
- A value whose type is any member of the transparent union can be passed as an
argument without the need to cast that value.
@@ -4033,7 +4069,7 @@ initialized classes. A non-lazy class will be initialized eagerly when the
Objective-C runtime is loaded. This is required for certain system classes which
have instances allocated in non-standard ways, such as the classes for blocks
and constant strings. Adding this attribute is essentially equivalent to
-providing a trivial `+load` method but avoids the (fairly small) load-time
+providing a trivial ``+load`` method but avoids the (fairly small) load-time
overheads associated with defining and calling such a method.
}];
}
@@ -4073,8 +4109,8 @@ ways:
including calling the ``+initialize`` method if present.
- The implicit ``_cmd`` parameter containing the method's selector is still defined.
- In order to minimize code-size costs, the implementation will not emit a reference
- to the selector if the parameter is unused within the method.
+ In order to minimize code-size costs, the implementation will not emit a reference
+ to the selector if the parameter is unused within the method.
Symbols for direct method implementations are implicitly given hidden
visibility, meaning that they can only be called within the same linkage unit.
@@ -4115,7 +4151,7 @@ documentation for more information.
def ObjCDirectMembersDocs : Documentation {
let Category = DocCatDecl;
let Content = [{
-The ``objc_direct_members`` attribute can be placed on an Objective-C
+The ``objc_direct_members`` attribute can be placed on an Objective-C
``@interface`` or ``@implementation`` to mark that methods declared
therein should be considered direct by default. See the documentation
for ``objc_direct`` for more information about direct methods.
@@ -4124,9 +4160,7 @@ When ``objc_direct_members`` is placed on an ``@interface`` block, every
method in the block is considered to be declared as direct. This includes any
implicit method declarations introduced by property declarations. If the method
redeclares a non-direct method, the declaration is ill-formed, exactly as if the
-method was annotated with the ``objc_direct`` attribute. ``objc_direct_members``
-cannot be placed on the primary interface of a class, only on category or class
-extension interfaces.
+method was annotated with the ``objc_direct`` attribute.
When ``objc_direct_members`` is placed on an ``@implementation`` block,
methods defined in the block are considered to be declared as direct unless
@@ -4329,7 +4363,7 @@ with this attribute. This is because previously constructed subobjects need to
be destroyed if an exception gets thrown before the initialization of the
complete object is complete. For instance:
-.. code-block::c++
+.. code-block:: c++
void f() {
try {
@@ -4340,7 +4374,7 @@ complete object is complete. For instance:
}
}
-Here, if the construction of `array[9]` fails with an exception, `array[0..8]`
+Here, if the construction of ``array[9]`` fails with an exception, ``array[0..8]``
will be destroyed, so the element's destructor needs to be accessible.
}];
}
@@ -4357,6 +4391,29 @@ it rather documents the programmer's intent.
}];
}
+def LoaderUninitializedDocs : Documentation {
+ let Category = DocCatVariable;
+ let Content = [{
+The ``loader_uninitialized`` attribute can be placed on global variables to
+indicate that the variable does not need to be zero initialized by the loader.
+On most targets, zero-initialization does not incur any additional cost.
+For example, most general purpose operating systems deliberately ensure
+that all memory is properly initialized in order to avoid leaking privileged
+information from the kernel or other programs. However, some targets
+do not make this guarantee, and on these targets, avoiding an unnecessary
+zero-initialization can have a significant impact on load times and/or code
+size.
+
+A declaration with this attribute is a non-tentative definition just as if it
+provided an initializer. Variables with this attribute are considered to be
+uninitialized in the same sense as a local variable, and the programs must
+write to them before reading from them. If the variable's type is a C++ class
+type with a non-trivial default constructor, or an array thereof, this attribute
+only suppresses the static zero-initialization of the variable, not the dynamic
+initialization provided by executing the default constructor.
+ }];
+}
+
def CallbackDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
@@ -4366,7 +4423,7 @@ arguments, are identified by their parameter name or position (starting with
1!) in the annotated function. The first position in the attribute identifies
the callback callee, the following positions declare describe its arguments.
The callback callee is required to be callable with the number, and order, of
-the specified arguments. The index `0`, or the identifier `this`, is used to
+the specified arguments. The index ``0``, or the identifier ``this``, is used to
represent an implicit "this" pointer in class methods. If there is no implicit
"this" pointer it shall not be referenced. The index '-1', or the name "__",
represents an unknown callback callee argument. This can be a value which is
@@ -4383,13 +4440,13 @@ position, it is undefined if that parameter is used for anything other than the
actual callback. Inspected, captured, or modified parameters shall not be
listed in the ``callback`` metadata.
-Example encodings for the callback performed by `pthread_create` are shown
+Example encodings for the callback performed by ``pthread_create`` are shown
below. The explicit attribute annotation indicates that the third parameter
-(`start_routine`) is called zero or more times by the `pthread_create` function,
-and that the fourth parameter (`arg`) is passed along. Note that the callback
-behavior of `pthread_create` is automatically recognized by Clang. In addition,
-the declarations of `__kmpc_fork_teams` and `__kmpc_fork_call`, generated for
-`#pragma omp target teams` and `#pragma omp parallel`, respectively, are also
+(``start_routine``) is called zero or more times by the ``pthread_create`` function,
+and that the fourth parameter (``arg``) is passed along. Note that the callback
+behavior of ``pthread_create`` is automatically recognized by Clang. In addition,
+the declarations of ``__kmpc_fork_teams`` and ``__kmpc_fork_call``, generated for
+``#pragma omp target teams`` and ``#pragma omp parallel``, respectively, are also
automatically recognized as broker functions. Further functions might be added
in the future.
@@ -4514,7 +4571,7 @@ When applied to the definition of a function, method, or block, every parameter
of the function with implicit strong retainable object pointer type is
considered externally-retained, and becomes ``const``. By explicitly annotating
a parameter with ``__strong``, you can opt back into the default
-non-externally-retained behaviour for that parameter. For instance,
+non-externally-retained behavior for that parameter. For instance,
``first_param`` is externally-retained below, but not ``second_param``:
.. code-block:: objc
@@ -4536,7 +4593,7 @@ def MIGConventionDocs : Documentation {
The Mach Interface Generator release-on-success convention dictates
functions that follow it to only release arguments passed to them when they
return "success" (a ``kern_return_t`` error code that indicates that
-no errors have occured). Otherwise the release is performed by the MIG client
+no errors have occurred). Otherwise the release is performed by the MIG client
that called the function. The annotation ``__attribute__((mig_server_routine))``
is applied in order to specify which functions are expected to follow the
convention. This allows the Static Analyzer to find bugs caused by violations of
@@ -4588,15 +4645,25 @@ only call one function.
}];
}
-def HIPPinnedShadowDocs : Documentation {
+def CUDADeviceBuiltinSurfaceTypeDocs : Documentation {
+ let Category = DocCatType;
+ let Content = [{
+The ``device_builtin_surface_type`` attribute can be applied to a class
+template when declaring the surface reference. A surface reference variable
+could be accessed on the host side and, on the device side, might be translated
+into an internal surface object, which is established through surface bind and
+unbind runtime APIs.
+ }];
+}
+
+def CUDADeviceBuiltinTextureTypeDocs : Documentation {
let Category = DocCatType;
let Content = [{
-The GNU style attribute __attribute__((hip_pinned_shadow)) or MSVC style attribute
-__declspec(hip_pinned_shadow) can be added to the definition of a global variable
-to indicate it is a HIP pinned shadow variable. A HIP pinned shadow variable can
-be accessed on both device side and host side. It has external linkage and is
-not initialized on device side. It has internal linkage and is initialized by
-the initializer on host side.
+The ``device_builtin_texture_type`` attribute can be applied to a class
+template when declaring the texture reference. A texture reference variable
+could be accessed on the host side and, on the device side, might be translated
+into an internal texture object, which is established through texture bind and
+unbind runtime APIs.
}];
}
@@ -4609,7 +4676,7 @@ def LifetimeOwnerDocs : Documentation {
The attribute ``[[gsl::Owner(T)]]`` applies to structs and classes that own an
object of type ``T``:
-.. code-block:: c++
+.. code::
class [[gsl::Owner(int)]] IntOwner {
private:
@@ -4635,7 +4702,7 @@ def LifetimePointerDocs : Documentation {
The attribute ``[[gsl::Pointer(T)]]`` applies to structs and classes that behave
like pointers to an object of type ``T``:
-.. code-block:: c++
+.. code::
class [[gsl::Pointer(int)]] IntPointer {
private:
@@ -4668,11 +4735,11 @@ When the Owner's lifetime ends, it will consider the Pointer to be dangling.
}];
}
-def ArmMveAliasDocs : Documentation {
+def ArmBuiltinAliasDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
-This attribute is used in the implementation of the ACLE intrinsics
-for the Arm MVE instruction set. It allows the intrinsic functions to
+This attribute is used in the implementation of the ACLE intrinsics.
+It allows the intrinsic functions to
be declared using the names defined in ACLE, and still be recognized
as clang builtins equivalent to the underlying name. For example,
``arm_mve.h`` declares the function ``vaddq_u32`` with
@@ -4683,8 +4750,8 @@ recognized as that clang builtin, and in the latter case, the choice
of which builtin to identify the function as can be deferred until
after overload resolution.
-This attribute can only be used to set up the aliases for the MVE
-intrinsic functions; it is intended for use only inside ``arm_mve.h``,
+This attribute can only be used to set up the aliases for certain Arm
+intrinsic functions; it is intended for use only inside ``arm_*.h``
and is not a general mechanism for declaring arbitrary aliases for
clang builtin functions.
}];
@@ -4694,7 +4761,7 @@ def NoBuiltinDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
.. Note:: This attribute is not yet fully implemented, it is validated but has
-no effect on the generated code.
+ no effect on the generated code.
The ``__attribute__((no_builtin))`` is similar to the ``-fno-builtin`` flag
except it is specific to the body of a function. The attribute may also be
@@ -4765,7 +4832,7 @@ def UseHandleDocs : Documentation {
let Category = HandleDocs;
let Content = [{
A function taking a handle by value might close the handle. If a function
-parameter is annotated with `use_handle` it is assumed to not to change
+parameter is annotated with ``use_handle`` it is assumed to not to change
the state of the handle. It is also assumed to require an open handle to work with.
.. code-block:: c++
@@ -4779,7 +4846,7 @@ the state of the handle. It is also assumed to require an open handle to work wi
def ReleaseHandleDocs : Documentation {
let Category = HandleDocs;
let Content = [{
-If a function parameter is annotated with `release_handle` it is assumed to
+If a function parameter is annotated with ``release_handle`` it is assumed to
close the handle. It is also assumed to require an open handle to work with.
.. code-block:: c++
@@ -4787,3 +4854,70 @@ close the handle. It is also assumed to require an open handle to work with.
zx_status_t zx_handle_close(zx_handle_t handle [[clang::release_handle]]);
}];
}
+
+def ArmMveStrictPolymorphismDocs : Documentation {
+ let Category = DocCatType;
+ let Content = [{
+This attribute is used in the implementation of the ACLE intrinsics for the Arm
+MVE instruction set. It is used to define the vector types used by the MVE
+intrinsics.
+
+Its effect is to modify the behavior of a vector type with respect to function
+overloading. If a candidate function for overload resolution has a parameter
+type with this attribute, then the selection of that candidate function will be
+disallowed if the actual argument can only be converted via a lax vector
+conversion. The aim is to prevent spurious ambiguity in ARM MVE polymorphic
+intrinsics.
+
+.. code-block:: c++
+
+ void overloaded(uint16x8_t vector, uint16_t scalar);
+ void overloaded(int32x4_t vector, int32_t scalar);
+ uint16x8_t myVector;
+ uint16_t myScalar;
+
+ // myScalar is promoted to int32_t as a side effect of the addition,
+ // so if lax vector conversions are considered for myVector, then
+ // the two overloads are equally good (one argument conversion
+ // each). But if the vector has the __clang_arm_mve_strict_polymorphism
+ // attribute, only the uint16x8_t,uint16_t overload will match.
+ overloaded(myVector, myScalar + 1);
+
+However, this attribute does not prohibit lax vector conversions in contexts
+other than overloading.
+
+.. code-block:: c++
+
+ uint16x8_t function();
+
+ // This is still permitted with lax vector conversion enabled, even
+ // if the vector types have __clang_arm_mve_strict_polymorphism
+ int32x4_t result = function();
+
+ }];
+}
+
+def ArmCmseNSCallDocs : Documentation {
+ let Category = DocCatType;
+ let Content = [{
+This attribute declares a non-secure function type. When compiling for secure
+state, a call to such a function would switch from secure to non-secure state.
+All non-secure function calls must happen only through a function pointer, and
+a non-secure function type should only be used as a base type of a pointer.
+See `ARMv8-M Security Extensions: Requirements on Development
+Tools - Engineering Specification Documentation
+<https://developer.arm.com/docs/ecm0359818/latest/>`_ for more information.
+ }];
+}
+
+def ArmCmseNSEntryDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+This attribute declares a function that can be called from non-secure state, or
+from secure state. Entering from and returning to non-secure state would switch
+to and from secure state, respectively, and prevent flow of information
+to non-secure state, except via return values. See `ARMv8-M Security Extensions:
+Requirements on Development Tools - Engineering Specification Documentation
+<https://developer.arm.com/docs/ecm0359818/latest/>`_ for more information.
+ }];
+}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AttributeCommonInfo.h b/contrib/llvm-project/clang/include/clang/Basic/AttributeCommonInfo.h
index 545e7e9a2b47..f4a5db84aa9f 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AttributeCommonInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/AttributeCommonInfo.h
@@ -134,6 +134,11 @@ public:
const IdentifierInfo *getScopeName() const { return ScopeName; }
SourceLocation getScopeLoc() const { return ScopeLoc; }
+ /// Gets the normalized full name, which consists of both scope and name and
+ /// with surrounding underscores removed as appropriate (e.g.
+ /// __gnu__::__attr__ will be normalized to gnu::attr).
+ std::string getNormalizedFullName() const;
+
bool isDeclspecAttribute() const { return SyntaxUsed == AS_Declspec; }
bool isMicrosoftAttribute() const { return SyntaxUsed == AS_Microsoft; }
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Builtins.def b/contrib/llvm-project/clang/include/clang/Basic/Builtins.def
index 1a6c85ce2dd3..1416a64543a4 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Builtins.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/Builtins.def
@@ -36,6 +36,7 @@
// a -> __builtin_va_list
// A -> "reference" to __builtin_va_list
// V -> Vector, followed by the number of elements and the base type.
+// q -> Scalable vector, followed by the number of elements and the base type.
// E -> ext_vector, followed by the number of elements and the base type.
// X -> _Complex, followed by the base type.
// Y -> ptrdiff_t
@@ -64,6 +65,7 @@
// & -> reference (optionally followed by an address space number)
// C -> const
// D -> volatile
+// R -> restrict
// The third value provided to the macro specifies information about attributes
// of the function. These must be kept in sync with the predicates in the
@@ -322,6 +324,9 @@ BUILTIN(__builtin_truncf, "ff", "Fnc")
BUILTIN(__builtin_truncl, "LdLd", "Fnc")
BUILTIN(__builtin_truncf16, "hh", "Fnc")
+// Access to floating point environment
+BUILTIN(__builtin_flt_rounds, "i", "n")
+
// C99 complex builtins
BUILTIN(__builtin_cabs, "dXd", "Fne")
BUILTIN(__builtin_cabsf, "fXf", "Fne")
@@ -471,7 +476,7 @@ BUILTIN(__builtin___NSStringMakeConstantString, "FC*cC*", "nc")
BUILTIN(__builtin_va_start, "vA.", "nt")
BUILTIN(__builtin_va_end, "vA", "n")
BUILTIN(__builtin_va_copy, "vAA", "n")
-BUILTIN(__builtin_stdarg_start, "vA.", "n")
+BUILTIN(__builtin_stdarg_start, "vA.", "nt")
BUILTIN(__builtin_assume_aligned, "v*vC*z.", "nc")
BUILTIN(__builtin_bcmp, "ivC*vC*z", "Fn")
BUILTIN(__builtin_bcopy, "vv*v*z", "n")
@@ -480,6 +485,7 @@ BUILTIN(__builtin_fprintf, "iP*cC*.", "Fp:1:")
BUILTIN(__builtin_memchr, "v*vC*iz", "nF")
BUILTIN(__builtin_memcmp, "ivC*vC*z", "nF")
BUILTIN(__builtin_memcpy, "v*v*vC*z", "nF")
+BUILTIN(__builtin_memcpy_inline, "vv*vC*Iz", "nt")
BUILTIN(__builtin_memmove, "v*v*vC*z", "nF")
BUILTIN(__builtin_mempcpy, "v*v*vC*z", "nF")
BUILTIN(__builtin_memset, "v*v*iz", "nF")
@@ -515,7 +521,6 @@ BUILTIN(__builtin_return_address, "v*IUi", "n")
BUILTIN(__builtin_extract_return_addr, "v*v*", "n")
BUILTIN(__builtin_frame_address, "v*IUi", "n")
BUILTIN(__builtin___clear_cache, "vc*c*", "n")
-BUILTIN(__builtin_flt_rounds, "i", "nc")
BUILTIN(__builtin_setjmp, "iv**", "j")
BUILTIN(__builtin_longjmp, "vv**i", "r")
BUILTIN(__builtin_unwind_init, "v", "")
@@ -562,6 +567,7 @@ BUILTIN(__builtin___vprintf_chk, "iicC*a", "FP:1:")
BUILTIN(__builtin_unpredictable, "LiLi" , "nc")
BUILTIN(__builtin_expect, "LiLiLi" , "nc")
+BUILTIN(__builtin_expect_with_probability, "LiLiLid", "nc")
BUILTIN(__builtin_prefetch, "vvC*.", "nc")
BUILTIN(__builtin_readcyclecounter, "ULLi", "n")
BUILTIN(__builtin_trap, "v", "nr")
@@ -573,6 +579,10 @@ BUILTIN(__builtin_alloca, "v*z" , "Fn")
BUILTIN(__builtin_alloca_with_align, "v*zIz", "Fn")
BUILTIN(__builtin_call_with_static_chain, "v.", "nt")
+BUILTIN(__builtin_matrix_transpose, "v.", "nFt")
+BUILTIN(__builtin_matrix_column_major_load, "v.", "nFt")
+BUILTIN(__builtin_matrix_column_major_store, "v.", "nFt")
+
// "Overloaded" Atomic operator builtins. These are overloaded to support data
// types of i8, i16, i32, i64, and i128. The front-end sees calls to the
// non-suffixed version of these (which has a bogus type) and transforms them to
@@ -722,7 +732,7 @@ ATOMIC_BUILTIN(__c11_atomic_fetch_max, "v.", "t")
ATOMIC_BUILTIN(__c11_atomic_fetch_min, "v.", "t")
BUILTIN(__c11_atomic_thread_fence, "vi", "n")
BUILTIN(__c11_atomic_signal_fence, "vi", "n")
-BUILTIN(__c11_atomic_is_lock_free, "iz", "n")
+BUILTIN(__c11_atomic_is_lock_free, "bz", "n")
// GNU atomic builtins.
ATOMIC_BUILTIN(__atomic_load, "v.", "t")
@@ -751,8 +761,8 @@ BUILTIN(__atomic_test_and_set, "bvD*i", "n")
BUILTIN(__atomic_clear, "vvD*i", "n")
BUILTIN(__atomic_thread_fence, "vi", "n")
BUILTIN(__atomic_signal_fence, "vi", "n")
-BUILTIN(__atomic_always_lock_free, "izvCD*", "n")
-BUILTIN(__atomic_is_lock_free, "izvCD*", "n")
+BUILTIN(__atomic_always_lock_free, "bzvCD*", "n")
+BUILTIN(__atomic_is_lock_free, "bzvCD*", "n")
// OpenCL 2.0 atomic builtins.
ATOMIC_BUILTIN(__opencl_atomic_init, "v.", "t")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAMDGPU.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAMDGPU.def
index 9b3a0f96798f..042a86368559 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -33,6 +33,10 @@ BUILTIN(__builtin_amdgcn_workitem_id_x, "Ui", "nc")
BUILTIN(__builtin_amdgcn_workitem_id_y, "Ui", "nc")
BUILTIN(__builtin_amdgcn_workitem_id_z, "Ui", "nc")
+BUILTIN(__builtin_amdgcn_workgroup_size_x, "Us", "nc")
+BUILTIN(__builtin_amdgcn_workgroup_size_y, "Us", "nc")
+BUILTIN(__builtin_amdgcn_workgroup_size_z, "Us", "nc")
+
BUILTIN(__builtin_amdgcn_mbcnt_hi, "UiUiUi", "nc")
BUILTIN(__builtin_amdgcn_mbcnt_lo, "UiUiUi", "nc")
@@ -40,6 +44,7 @@ BUILTIN(__builtin_amdgcn_mbcnt_lo, "UiUiUi", "nc")
// Instruction builtins.
//===----------------------------------------------------------------------===//
BUILTIN(__builtin_amdgcn_s_getreg, "UiIi", "n")
+BUILTIN(__builtin_amdgcn_s_setreg, "vIiUi", "n")
BUILTIN(__builtin_amdgcn_s_getpc, "LUi", "n")
BUILTIN(__builtin_amdgcn_s_waitcnt, "vIi", "n")
BUILTIN(__builtin_amdgcn_s_sendmsg, "vIiUi", "n")
@@ -53,6 +58,13 @@ BUILTIN(__builtin_amdgcn_ds_gws_barrier, "vUiUi", "n")
BUILTIN(__builtin_amdgcn_ds_gws_sema_v, "vUi", "n")
BUILTIN(__builtin_amdgcn_ds_gws_sema_br, "vUiUi", "n")
BUILTIN(__builtin_amdgcn_ds_gws_sema_p, "vUi", "n")
+BUILTIN(__builtin_amdgcn_fence, "vUicC*", "n")
+
+BUILTIN(__builtin_amdgcn_atomic_inc32, "UZiUZiD*UZiUicC*", "n")
+BUILTIN(__builtin_amdgcn_atomic_inc64, "UWiUWiD*UWiUicC*", "n")
+
+BUILTIN(__builtin_amdgcn_atomic_dec32, "UZiUZiD*UZiUicC*", "n")
+BUILTIN(__builtin_amdgcn_atomic_dec64, "UWiUWiD*UWiUicC*", "n")
// FIXME: Need to disallow constant address space.
BUILTIN(__builtin_amdgcn_div_scale, "dddbb*", "n")
@@ -65,6 +77,8 @@ BUILTIN(__builtin_amdgcn_trig_preop, "ddi", "nc")
BUILTIN(__builtin_amdgcn_trig_preopf, "ffi", "nc")
BUILTIN(__builtin_amdgcn_rcp, "dd", "nc")
BUILTIN(__builtin_amdgcn_rcpf, "ff", "nc")
+BUILTIN(__builtin_amdgcn_sqrt, "dd", "nc")
+BUILTIN(__builtin_amdgcn_sqrtf, "ff", "nc")
BUILTIN(__builtin_amdgcn_rsq, "dd", "nc")
BUILTIN(__builtin_amdgcn_rsqf, "ff", "nc")
BUILTIN(__builtin_amdgcn_rsq_clamp, "dd", "nc")
@@ -150,6 +164,7 @@ BUILTIN(__builtin_amdgcn_interp_mov, "fUiUiUiUi", "nc")
TARGET_BUILTIN(__builtin_amdgcn_div_fixuph, "hhhh", "nc", "16-bit-insts")
TARGET_BUILTIN(__builtin_amdgcn_rcph, "hh", "nc", "16-bit-insts")
+TARGET_BUILTIN(__builtin_amdgcn_sqrth, "hh", "nc", "16-bit-insts")
TARGET_BUILTIN(__builtin_amdgcn_rsqh, "hh", "nc", "16-bit-insts")
TARGET_BUILTIN(__builtin_amdgcn_sinh, "hh", "nc", "16-bit-insts")
TARGET_BUILTIN(__builtin_amdgcn_cosh, "hh", "nc", "16-bit-insts")
@@ -212,5 +227,30 @@ BUILTIN(__builtin_r600_read_tidig_z, "Ui", "nc")
BUILTIN(__builtin_r600_recipsqrt_ieee, "dd", "nc")
BUILTIN(__builtin_r600_recipsqrt_ieeef, "ff", "nc")
+//===----------------------------------------------------------------------===//
+// MFMA builtins.
+//===----------------------------------------------------------------------===//
+
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_32x32x1f32, "V32fffV32fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_16x16x1f32, "V16fffV16fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_4x4x1f32, "V4fffV4fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_32x32x2f32, "V16fffV16fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_16x16x4f32, "V4fffV4fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_32x32x4f16, "V32fV4hV4hV32fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_16x16x4f16, "V16fV4hV4hV16fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_4x4x4f16, "V4fV4hV4hV4fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_32x32x8f16, "V16fV4hV4hV16fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_16x16x16f16, "V4fV4hV4hV4fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_i32_32x32x4i8, "V32iiiV32iIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_i32_16x16x4i8, "V16iiiV16iIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_i32_4x4x4i8, "V4iiiV4iIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_i32_32x32x8i8, "V16iiiV16iIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_i32_16x16x16i8, "V4iiiV4iIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_32x32x2bf16, "V32fV2sV2sV32fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_16x16x2bf16, "V16fV2sV2sV16fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_4x4x2bf16, "V4fV2sV2sV4fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_32x32x4bf16, "V16fV2sV2sV16fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_16x16x8bf16, "V4fV2sV2sV4fIiIiIi", "nc", "mai-insts")
+
#undef BUILTIN
#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsARM.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsARM.def
index 848abb44ad36..be20c24aa28a 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsARM.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsARM.def
@@ -202,6 +202,8 @@ BUILTIN(__builtin_arm_wsrp, "vcC*vC*", "nc")
// aren't included from both here and BuiltinsAArch64.def.)
#include "clang/Basic/arm_mve_builtins.inc"
+#include "clang/Basic/arm_cde_builtins.inc"
+
// MSVC
LANGBUILTIN(__emit, "vIUiC", "", ALL_MS_LANGUAGES)
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsBPF.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsBPF.def
index bd96b9ef531b..237e9dc8784b 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsBPF.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsBPF.def
@@ -20,5 +20,8 @@
// Get record field information.
TARGET_BUILTIN(__builtin_preserve_field_info, "Ui.", "t", "")
+// Get BTF type id.
+TARGET_BUILTIN(__builtin_btf_type_id, "Ui.", "t", "")
+
#undef BUILTIN
#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagon.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagon.def
index 18029af56ff7..28aa222166f5 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagon.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagon.def
@@ -13,1805 +13,125 @@
// The format of this database matches clang/Basic/Builtins.def.
-// The builtins below are not autogenerated from iset.py.
-// Make sure you do not overwrite these.
-
-BUILTIN(__builtin_brev_ldd, "v*LLi*CLLi*iC", "")
-BUILTIN(__builtin_brev_ldw, "v*i*Ci*iC", "")
-BUILTIN(__builtin_brev_ldh, "v*s*Cs*iC", "")
-BUILTIN(__builtin_brev_lduh, "v*Us*CUs*iC", "")
-BUILTIN(__builtin_brev_ldb, "v*Sc*CSc*iC", "")
-BUILTIN(__builtin_brev_ldub, "v*Uc*CUc*iC", "")
-BUILTIN(__builtin_circ_ldd, "LLi*LLi*LLi*iIi", "")
-BUILTIN(__builtin_circ_ldw, "i*i*i*iIi", "")
-BUILTIN(__builtin_circ_ldh, "s*s*s*iIi", "")
-BUILTIN(__builtin_circ_lduh, "Us*Us*Us*iIi", "")
-BUILTIN(__builtin_circ_ldb, "c*c*c*iIi", "")
-BUILTIN(__builtin_circ_ldub, "Uc*Uc*Uc*iIi", "")
-BUILTIN(__builtin_brev_std, "LLi*CLLi*LLiiC", "")
-BUILTIN(__builtin_brev_stw, "i*Ci*iiC", "")
-BUILTIN(__builtin_brev_sth, "s*Cs*iiC", "")
-BUILTIN(__builtin_brev_sthhi, "s*Cs*iiC", "")
-BUILTIN(__builtin_brev_stb, "c*Cc*iiC", "")
-BUILTIN(__builtin_circ_std, "LLi*LLi*LLiiIi", "")
-BUILTIN(__builtin_circ_stw, "i*i*iiIi", "")
-BUILTIN(__builtin_circ_sth, "s*s*iiIi", "")
-BUILTIN(__builtin_circ_sthhi, "s*s*iiIi", "")
-BUILTIN(__builtin_circ_stb, "c*c*iiIi", "")
-BUILTIN(__builtin_HEXAGON_L2_loadrub_pci, "iv*IiivC*", "")
-BUILTIN(__builtin_HEXAGON_L2_loadrb_pci, "iv*IiivC*", "")
-BUILTIN(__builtin_HEXAGON_L2_loadruh_pci, "iv*IiivC*", "")
-BUILTIN(__builtin_HEXAGON_L2_loadrh_pci, "iv*IiivC*", "")
-BUILTIN(__builtin_HEXAGON_L2_loadri_pci, "iv*IiivC*", "")
-BUILTIN(__builtin_HEXAGON_L2_loadrd_pci, "LLiv*IiivC*", "")
-BUILTIN(__builtin_HEXAGON_L2_loadrub_pcr, "iv*ivC*", "")
-BUILTIN(__builtin_HEXAGON_L2_loadrb_pcr, "iv*ivC*", "")
-BUILTIN(__builtin_HEXAGON_L2_loadruh_pcr, "iv*ivC*", "")
-BUILTIN(__builtin_HEXAGON_L2_loadrh_pcr, "iv*ivC*", "")
-BUILTIN(__builtin_HEXAGON_L2_loadri_pcr, "iv*ivC*", "")
-BUILTIN(__builtin_HEXAGON_L2_loadrd_pcr, "LLiv*ivC*", "")
-
-BUILTIN(__builtin_HEXAGON_S2_storerb_pci, "vv*IiiivC*", "")
-BUILTIN(__builtin_HEXAGON_S2_storerh_pci, "vv*IiiivC*", "")
-BUILTIN(__builtin_HEXAGON_S2_storerf_pci, "vv*IiiivC*", "")
-BUILTIN(__builtin_HEXAGON_S2_storeri_pci, "vv*IiiivC*", "")
-BUILTIN(__builtin_HEXAGON_S2_storerd_pci, "vv*IiiLLivC*", "")
-BUILTIN(__builtin_HEXAGON_S2_storerb_pcr, "vv*iivC*", "")
-BUILTIN(__builtin_HEXAGON_S2_storerh_pcr, "vv*iivC*", "")
-BUILTIN(__builtin_HEXAGON_S2_storerf_pcr, "vv*iivC*", "")
-BUILTIN(__builtin_HEXAGON_S2_storeri_pcr, "vv*iivC*", "")
-BUILTIN(__builtin_HEXAGON_S2_storerd_pcr, "vv*iLLivC*", "")
-
-BUILTIN(__builtin_HEXAGON_prefetch,"vv*","")
-BUILTIN(__builtin_HEXAGON_Y2_dccleana,"vv*","")
-BUILTIN(__builtin_HEXAGON_Y2_dccleaninva,"vv*","")
-BUILTIN(__builtin_HEXAGON_Y2_dcinva,"vv*","")
-BUILTIN(__builtin_HEXAGON_Y2_dczeroa,"vv*","")
-BUILTIN(__builtin_HEXAGON_Y4_l2fetch,"vv*Ui","")
-BUILTIN(__builtin_HEXAGON_Y5_l2fetch,"vv*LLUi","")
-
-BUILTIN(__builtin_HEXAGON_V6_vS32b_qpred_ai,"vV16iv*V16i","")
-BUILTIN(__builtin_HEXAGON_V6_vS32b_nqpred_ai,"vV16iv*V16i","")
-BUILTIN(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai,"vV16iv*V16i","")
-BUILTIN(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai,"vV16iv*V16i","")
-BUILTIN(__builtin_HEXAGON_V6_vS32b_qpred_ai_128B,"vV32iv*V32i","")
-BUILTIN(__builtin_HEXAGON_V6_vS32b_nqpred_ai_128B,"vV32iv*V32i","")
-BUILTIN(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai_128B,"vV32iv*V32i","")
-BUILTIN(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai_128B,"vV32iv*V32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmaskedstoreq,"vV16iv*V16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmaskedstorenq,"vV16iv*V16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmaskedstorentq,"vV16iv*V16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmaskedstorentnq,"vV16iv*V16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmaskedstoreq_128B,"vV32iv*V32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmaskedstorenq_128B,"vV32iv*V32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmaskedstorentq_128B,"vV32iv*V32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmaskedstorentnq_128B,"vV32iv*V32i","")
-
-BUILTIN(__builtin_HEXAGON_V6_vgathermw,"vv*iiV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgathermw_128B,"vv*iiV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgathermh,"vv*iiV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgathermh_128B,"vv*iiV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgathermhw,"vv*iiV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgathermhw_128B,"vv*iiV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vgathermwq,"vv*V16iiiV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgathermwq_128B,"vv*V32iiiV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgathermhq,"vv*V16iiiV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgathermhq_128B,"vv*V32iiiV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgathermhwq,"vv*V16iiiV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgathermhwq_128B,"vv*V32iiiV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermw,"viiV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermw_128B,"viiV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermh,"viiV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermh_128B,"viiV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermw_add,"viiV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermw_add_128B,"viiV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermh_add,"viiV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermh_add_128B,"viiV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermwq,"vV16iiiV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermwq_128B,"vV32iiiV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermhq,"vV16iiiV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermhq_128B,"vV32iiiV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermhw,"viiV32iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermhw_128B,"viiV64iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermhwq,"vV16iiiV32iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermhwq_128B,"vV32iiiV64iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermhw_add,"viiV32iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vscattermhw_add_128B,"viiV64iV32i","")
-
-// ---------------------------------------------------------------------
-// Auto-generated definitions.
-
-// V5 Scalar Instructions.
-
-BUILTIN(__builtin_HEXAGON_S2_asr_r_p_or,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_S2_vsatwh,"iLLi","")
-BUILTIN(__builtin_HEXAGON_S2_tableidxd_goodsyntax,"iiiUIiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpysu_up,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_ll_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_ll_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_cmpysc_s1,"LLiii","")
-BUILTIN(__builtin_HEXAGON_M2_cmpysc_s0,"LLiii","")
-BUILTIN(__builtin_HEXAGON_M4_cmpyi_whc,"iLLii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_tableidxb_goodsyntax,"iiiUIiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_shuffoh,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_F2_sfmax,"fff","")
-BUILTIN(__builtin_HEXAGON_A2_vabswsat,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_r,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_p,"LLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_A4_combineri,"LLiiIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_M4_vpmpyh_acc,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_vcmpy_s0_sat_i,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_notp,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_hl_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_hl_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_C4_or_and,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_vmac2s_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_vmac2s_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_S2_brevp,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_M4_pmpyw_acc,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_S2_cl1,"ii","")
-BUILTIN(__builtin_HEXAGON_C4_cmplte,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mmpyul_s0,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vaddws,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_maxup,"ULLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A4_vcmphgti,"iLLiIi","")
-BUILTIN(__builtin_HEXAGON_S2_interleave,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vrcmpyi_s0,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_abssat,"ii","")
-BUILTIN(__builtin_HEXAGON_A2_vcmpwgtu,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_C2_cmpgtu,"iii","")
-BUILTIN(__builtin_HEXAGON_C2_cmpgtp,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A4_cmphgtui,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_C2_cmpgti,"iiIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyi,"iii","")
-BUILTIN(__builtin_HEXAGON_F2_conv_df2uw_chop,"id","")
-BUILTIN(__builtin_HEXAGON_A4_cmpheq,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_lh_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_lh_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_xacc,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_vrcnegh,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_S2_extractup,"LLiLLiUIiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,"LLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S4_ntstbit_r,"iii","")
-BUILTIN(__builtin_HEXAGON_F2_conv_w2sf,"fi","")
-BUILTIN(__builtin_HEXAGON_C2_not,"ii","")
-BUILTIN(__builtin_HEXAGON_C2_tfrpr,"ii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_ll_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_ll_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_A4_cmpbgt,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_r_and,"iiii","")
-BUILTIN(__builtin_HEXAGON_A4_rcmpneqi,"iiIi","")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_r_nac,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_subacc,"iiii","")
-BUILTIN(__builtin_HEXAGON_A2_orp,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_up,"Uiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_lh_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_vh,"LLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_vw,"LLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_A4_cmpbgtu,"iii","")
-BUILTIN(__builtin_HEXAGON_A4_vcmpbeq_any,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A4_cmpbgti,"iiIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_lh_s1,"LLiii","")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_p_nac,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_nac,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_addsp,"LLiiLLi","")
-BUILTIN(__builtin_HEXAGON_S4_vxsubaddw,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A4_vcmpheqi,"iLLiIi","")
-BUILTIN(__builtin_HEXAGON_S4_vxsubaddh,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M4_pmpyw,"LLiii","")
-BUILTIN(__builtin_HEXAGON_S2_vsathb,"iLLi","")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_p_and,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_lh_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_lh_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_acc,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_A2_pxorf,"iiii","")
-BUILTIN(__builtin_HEXAGON_C2_cmpgei,"iiIi","")
-BUILTIN(__builtin_HEXAGON_A2_vsubub,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_p,"LLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_r,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_A4_vrminuw,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_F2_sffma,"ffff","")
-BUILTIN(__builtin_HEXAGON_A2_absp,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_C2_all8,"ii","")
-BUILTIN(__builtin_HEXAGON_A4_vrminuh,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_F2_sffma_lib,"ffff","")
-BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_s0,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_s1,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_C2_bitsset,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpysip,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpysin,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_A4_boundscheck,"iiLLi","")
-BUILTIN(__builtin_HEXAGON_M5_vrmpybuu,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_C4_fastcorner9,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_vrcmpys_s1rp,"iLLii","")
-BUILTIN(__builtin_HEXAGON_A2_neg,"ii","")
-BUILTIN(__builtin_HEXAGON_A2_subsat,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_r,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_p,"LLiLLii","")
-BUILTIN(__builtin_HEXAGON_A2_vnavgh,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_F2_conv_ud2df,"dLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vnavgw,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_r_acc,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_S4_subi_lsr_ri,"iUIiiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_vzxthw,"LLii","")
-BUILTIN(__builtin_HEXAGON_F2_sfadd,"fff","")
-BUILTIN(__builtin_HEXAGON_A2_sub,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_vmac2su_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_vmac2su_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_dpmpyss_s0,"LLiii","")
-BUILTIN(__builtin_HEXAGON_S2_insert,"iiiUIiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_packhl,"LLiii","")
-BUILTIN(__builtin_HEXAGON_A4_vcmpwgti,"iLLiIi","")
-BUILTIN(__builtin_HEXAGON_A2_vavguwr,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_r_and,"iiii","")
-BUILTIN(__builtin_HEXAGON_A2_svsubhs,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_addh_l16_hl,"iii","")
-BUILTIN(__builtin_HEXAGON_M4_and_and,"iiii","")
-BUILTIN(__builtin_HEXAGON_F2_conv_d2df,"dLLi","")
-BUILTIN(__builtin_HEXAGON_C2_cmpgtui,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_vconj,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_vw,"LLiLLii","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_vh,"LLiLLii","")
-BUILTIN(__builtin_HEXAGON_A2_subh_l16_hl,"iii","")
-BUILTIN(__builtin_HEXAGON_S4_vxsubaddhr,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_clbp,"iLLi","")
-BUILTIN(__builtin_HEXAGON_S2_deinterleave,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_C2_any8,"ii","")
-BUILTIN(__builtin_HEXAGON_S2_togglebit_r,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_togglebit_i,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_F2_conv_uw2sf,"fi","")
-BUILTIN(__builtin_HEXAGON_S2_vsathb_nopack,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_cmacs_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_cmacs_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hh_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hh_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mmacuhs_s1,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mmacuhs_s0,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_clrbit_r,"iii","")
-BUILTIN(__builtin_HEXAGON_C4_or_andn,"iiii","")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_r_nac,"iiii","")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_p_acc,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_A4_vcmpwgtui,"iLLiUIi","")
-BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_acc_s0,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_acc_s1,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A4_vrmaxh,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_A2_vcmpbeq,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vcmphgt,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vnavgwcr,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vrcmacr_s0c,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vavgwcr,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_p_xacc,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_A4_vrmaxw,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_A2_vnavghr,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M4_cmpyi_wh,"iLLii","")
-BUILTIN(__builtin_HEXAGON_A2_tfrsi,"iIi","")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_r_acc,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_svnavgh,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_r,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_vmac2,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_A4_vcmphgtui,"iLLiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_svavgh,"iii","")
-BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_acc_s0,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_acc_s1,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_p,"LLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_combine_hl,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_up,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_combine_hh,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_negsat,"ii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_hl_s0,"LLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_hl_s1,"LLiii","")
-BUILTIN(__builtin_HEXAGON_A4_bitsplit,"LLiii","")
-BUILTIN(__builtin_HEXAGON_A2_vabshsat,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyui,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_addh_l16_sat_ll,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_and,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mmpyul_rs0,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_nac,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_C2_cmplt,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_cmacr_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M4_or_and,"iiii","")
-BUILTIN(__builtin_HEXAGON_M4_mpyrr_addi,"iUIiii","")
-BUILTIN(__builtin_HEXAGON_S4_or_andi,"iiiIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hl_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hl_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_M4_mpyrr_addr,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mmachs_rs0,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mmachs_rs1,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vrcmpyr_s0c,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hl_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_ll_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_F2_sffixupn,"fff","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_lh_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_lh_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hh_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hh_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_vadduhs,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vsubuhs,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_subh_h16_hl,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_subh_h16_hh,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_xorp,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A4_tfrpcp,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_addh_h16_lh,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_hl,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_addh_h16_ll,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_hh,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_zxtb,"ii","")
-BUILTIN(__builtin_HEXAGON_A2_zxth,"ii","")
-BUILTIN(__builtin_HEXAGON_A2_vnavgwr,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M4_or_xor,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hh_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hh_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M5_vmacbsu,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_acc_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hl_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hl_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_F2_sffms_lib,"ffff","")
-BUILTIN(__builtin_HEXAGON_C4_cmpneqi,"iiIi","")
-BUILTIN(__builtin_HEXAGON_M4_and_xor,"iiii","")
-BUILTIN(__builtin_HEXAGON_A2_sat,"iLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_lh_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_lh_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_A2_addsat,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_svavghs,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_vrsadub_acc,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_C2_bitsclri,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_hh,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_hl,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mmaculs_rs0,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mmaculs_rs1,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vradduh,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A4_addp_c,"LLiLLiLLiv*","")
-BUILTIN(__builtin_HEXAGON_C2_xor,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_acc,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mmpyh_rs1,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mmpyh_rs0,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_F2_conv_df2ud_chop,"LLid","")
-BUILTIN(__builtin_HEXAGON_C4_or_or,"iiii","")
-BUILTIN(__builtin_HEXAGON_S4_vxaddsubhr,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_vsathub,"iLLi","")
-BUILTIN(__builtin_HEXAGON_F2_conv_df2sf,"fd","")
-BUILTIN(__builtin_HEXAGON_M2_hmmpyh_rs1,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_hmmpyh_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_vavgwr,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_tableidxh_goodsyntax,"iiiUIiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_sxth,"ii","")
-BUILTIN(__builtin_HEXAGON_A2_sxtb,"ii","")
-BUILTIN(__builtin_HEXAGON_C4_or_orn,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_vrcmaci_s0c,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_sxtw,"LLii","")
-BUILTIN(__builtin_HEXAGON_M2_vabsdiffh,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_lh_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_lh_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_hmmpyl_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_cl1p,"iLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vabsdiffw,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A4_andnp,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_C2_vmux,"LLiiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_parityp,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_and,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_r_or,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_ll_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_ll_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_F2_sfcmpeq,"iff","")
-BUILTIN(__builtin_HEXAGON_A2_vaddb_map,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_nac,"iiii","")
-BUILTIN(__builtin_HEXAGON_A2_vcmpheq,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_clbnorm,"ii","")
-BUILTIN(__builtin_HEXAGON_M2_cnacsc_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_cnacsc_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_S4_subaddi,"iiIii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hl_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hl_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,"LLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_tstbit_r,"iii","")
-BUILTIN(__builtin_HEXAGON_S4_vrcrotate,"LLiLLiiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_mmachs_s1,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mmachs_s0,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_tstbit_i,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_up_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_extractu_rp,"iiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mmpyuh_rs0,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_vw,"LLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_ll_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_ll_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_M4_or_or,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_hh_s1,"Uiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_hh_s0,"Uiii","")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_p_acc,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_lh_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_lh_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_ll_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_ll_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_F2_conv_w2df,"di","")
-BUILTIN(__builtin_HEXAGON_A2_subh_l16_sat_hl,"iii","")
-BUILTIN(__builtin_HEXAGON_C2_cmpeqi,"iiIi","")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_r_and,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_vcnegh,"LLiLLii","")
-BUILTIN(__builtin_HEXAGON_A4_vcmpweqi,"iLLiIi","")
-BUILTIN(__builtin_HEXAGON_M2_vdmpyrs_s0,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vdmpyrs_s1,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M4_xor_xacc,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vdmpys_s1,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vdmpys_s0,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vavgubr,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_hl_s1,"Uiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_hl_s0,"Uiii","")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_r_acc,"iiii","")
-BUILTIN(__builtin_HEXAGON_S2_cl0p,"iLLi","")
-BUILTIN(__builtin_HEXAGON_S2_valignib,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_F2_sffixupd,"fff","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_cmacsc_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_cmacsc_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_S2_ct1,"ii","")
-BUILTIN(__builtin_HEXAGON_S2_ct0,"ii","")
-BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_nac_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mmpyul_rs1,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S4_ntstbit_i,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_F2_sffixupr,"ff","")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_p_xor,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hl_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hl_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_A2_vcmphgtu,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_C2_andn,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s0pack,"iii","")
-BUILTIN(__builtin_HEXAGON_S4_addaddi,"iiiIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_ll_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hl_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_A4_rcmpeqi,"iiIi","")
-BUILTIN(__builtin_HEXAGON_M4_xor_and,"iiii","")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_p_and,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_mmpyuh_rs1,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_r_or,"iiii","")
-BUILTIN(__builtin_HEXAGON_A4_round_ri,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_max,"iii","")
-BUILTIN(__builtin_HEXAGON_A4_round_rr,"iii","")
-BUILTIN(__builtin_HEXAGON_A4_combineii,"LLiIiUIi","")
-BUILTIN(__builtin_HEXAGON_A4_combineir,"LLiIii","")
-BUILTIN(__builtin_HEXAGON_C4_and_orn,"iiii","")
-BUILTIN(__builtin_HEXAGON_M5_vmacbuu,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_A4_rcmpeq,"iii","")
-BUILTIN(__builtin_HEXAGON_M4_cmpyr_whc,"iLLii","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_acc,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_vzxtbh,"LLii","")
-BUILTIN(__builtin_HEXAGON_M2_mmacuhs_rs1,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_r_sat,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_combinew,"LLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_ll_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_ll_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_cmpyi_s0,"LLiii","")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_p_or,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_S4_ori_asl_ri,"iUIiiUIi","")
-BUILTIN(__builtin_HEXAGON_C4_nbitsset,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hh_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hh_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_ll_s1,"Uiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_ll_s0,"Uiii","")
-BUILTIN(__builtin_HEXAGON_A2_addh_l16_ll,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_and,"iiii","")
-BUILTIN(__builtin_HEXAGON_A4_modwrapu,"iii","")
-BUILTIN(__builtin_HEXAGON_A4_rcmpneq,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hh_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hh_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_F2_sfimm_p,"fUIi","")
-BUILTIN(__builtin_HEXAGON_F2_sfimm_n,"fUIi","")
-BUILTIN(__builtin_HEXAGON_M4_cmpyr_wh,"iLLii","")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_and,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_A2_vavgub,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_F2_conv_d2sf,"fLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vavguh,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A4_cmpbeqi,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_F2_sfcmpuo,"iff","")
-BUILTIN(__builtin_HEXAGON_A2_vavguw,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_p_nac,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_vsatwh_nopack,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_hh_s0,"LLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_hh_s1,"LLiii","")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_or,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_A2_minu,"Uiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_lh_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_M4_or_andn,"iiii","")
-BUILTIN(__builtin_HEXAGON_A2_minp,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S4_or_andix,"iiiIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_lh_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_lh_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mmpyuh_s0,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mmpyuh_s1,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_lh_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_F2_sfcmpge,"iff","")
-BUILTIN(__builtin_HEXAGON_F2_sfmin,"fff","")
-BUILTIN(__builtin_HEXAGON_F2_sfcmpgt,"iff","")
-BUILTIN(__builtin_HEXAGON_M4_vpmpyh,"LLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mmacuhs_rs0,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_lh_s1,"LLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_lh_s0,"LLiii","")
-BUILTIN(__builtin_HEXAGON_A2_roundsat,"iLLi","")
-BUILTIN(__builtin_HEXAGON_S2_ct1p,"iLLi","")
-BUILTIN(__builtin_HEXAGON_S4_extract_rp,"iiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_or,"iiii","")
-BUILTIN(__builtin_HEXAGON_C4_cmplteui,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_S4_addi_lsr_ri,"iUIiiUIi","")
-BUILTIN(__builtin_HEXAGON_A4_tfrcpp,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_svw_trun,"iLLiUIi","")
-BUILTIN(__builtin_HEXAGON_A4_cmphgti,"iiIi","")
-BUILTIN(__builtin_HEXAGON_A4_vrminh,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_A4_vrminw,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_A4_cmphgtu,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_insertp_rp,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vnavghcr,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S4_subi_asl_ri,"iUIiiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_vh,"LLiLLii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_hh_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_vsubws,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_sath,"ii","")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_p_xor,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_A2_satb,"ii","")
-BUILTIN(__builtin_HEXAGON_C2_cmpltu,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_insertp,"LLiLLiLLiUIiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_ll_s1,"LLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_ll_s0,"LLiii","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_nac,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_extractup_rp,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S4_vxaddsubw,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S4_vxaddsubh,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_asrh,"ii","")
-BUILTIN(__builtin_HEXAGON_S4_extractp_rp,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_acc,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_ll_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_ll_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_C2_or,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mmpyul_s1,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vrcmacr_s0,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_xor,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_add,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_vsububs,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s1,"LLiii","")
-BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s0,"LLiii","")
-BUILTIN(__builtin_HEXAGON_A2_vraddub_acc,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_F2_sfinvsqrta,"ff","")
-BUILTIN(__builtin_HEXAGON_S2_ct0p,"iLLi","")
-BUILTIN(__builtin_HEXAGON_A2_svaddh,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_vcrotate,"LLiLLii","")
-BUILTIN(__builtin_HEXAGON_A2_aslh,"ii","")
-BUILTIN(__builtin_HEXAGON_A2_subh_h16_lh,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_subh_h16_ll,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_hmmpyl_rs1,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_p,"LLiLLii","")
-BUILTIN(__builtin_HEXAGON_S2_vsplatrh,"LLii","")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_r,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_addh_h16_hl,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_vsplatrb,"ii","")
-BUILTIN(__builtin_HEXAGON_A2_addh_h16_hh,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_cmpyr_s0,"LLiii","")
-BUILTIN(__builtin_HEXAGON_M2_dpmpyss_rnd_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_C2_muxri,"iiIii","")
-BUILTIN(__builtin_HEXAGON_M2_vmac2es_s0,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vmac2es_s1,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_C2_pxfer_map,"ii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_lh_s1,"Uiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_lh_s0,"Uiii","")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_r_or,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hl_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hl_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_p_nac,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_A2_vaddw,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_r_and,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_vaddh,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_C2_cmpeqp,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M4_mpyri_addi,"iUIiiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_not,"ii","")
-BUILTIN(__builtin_HEXAGON_S4_andi_lsr_ri,"iUIiiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_macsip,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_tfrcrr,"ii","")
-BUILTIN(__builtin_HEXAGON_M2_macsin,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_C2_orn,"iii","")
-BUILTIN(__builtin_HEXAGON_M4_and_andn,"iiii","")
-BUILTIN(__builtin_HEXAGON_F2_sfmpy,"fff","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hh_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hh_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_acc,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_vw,"LLiLLii","")
-BUILTIN(__builtin_HEXAGON_M4_and_or,"iiii","")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_vh,"LLiLLii","")
-BUILTIN(__builtin_HEXAGON_C2_mask,"LLii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hh_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hh_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_up_s1_sat,"iii","")
-BUILTIN(__builtin_HEXAGON_A4_vcmpbgt,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M5_vrmacbsu,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_tableidxw_goodsyntax,"iiiUIiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_vrsadub,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_tfrrcr,"ii","")
-BUILTIN(__builtin_HEXAGON_M2_vrcmpys_acc_s1,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_F2_dfcmpge,"idd","")
-BUILTIN(__builtin_HEXAGON_M2_accii,"iiiIi","")
-BUILTIN(__builtin_HEXAGON_A5_vaddhubs,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vmaxw,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vmaxb,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vmaxh,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_vsxthw,"LLii","")
-BUILTIN(__builtin_HEXAGON_S4_andi_asl_ri,"iUIiiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_p_nac,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_xor,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_C2_cmpgt,"iii","")
-BUILTIN(__builtin_HEXAGON_F2_conv_df2d_chop,"LLid","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hl_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hl_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_F2_conv_sf2w,"if","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_or,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_F2_sfclass,"ifUIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_lh_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M4_xor_andn,"iiii","")
-BUILTIN(__builtin_HEXAGON_S2_addasl_rrri,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_M5_vdmpybsu,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hh_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hh_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_A2_addi,"iiIi","")
-BUILTIN(__builtin_HEXAGON_A2_addp,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s1pack,"iii","")
-BUILTIN(__builtin_HEXAGON_S4_clbpnorm,"iLLi","")
-BUILTIN(__builtin_HEXAGON_A4_round_rr_sat,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_nacci,"iiii","")
-BUILTIN(__builtin_HEXAGON_S2_shuffeh,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_and,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_F2_conv_sf2uw,"if","")
-BUILTIN(__builtin_HEXAGON_A2_vsubh,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_F2_conv_sf2ud,"LLif","")
-BUILTIN(__builtin_HEXAGON_A2_vsubw,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vcmpwgt,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M4_xor_or,"iiii","")
-BUILTIN(__builtin_HEXAGON_F2_conv_sf2uw_chop,"if","")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_vw,"LLiLLii","")
-BUILTIN(__builtin_HEXAGON_S2_vsatwuh_nopack,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_vh,"LLiLLii","")
-BUILTIN(__builtin_HEXAGON_A2_svsubuhs,"iii","")
-BUILTIN(__builtin_HEXAGON_M5_vmpybsu,"LLiii","")
-BUILTIN(__builtin_HEXAGON_A2_subh_l16_sat_ll,"iii","")
-BUILTIN(__builtin_HEXAGON_C4_and_and,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hl_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hl_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_p,"LLiLLii","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_r,"iii","")
-BUILTIN(__builtin_HEXAGON_A4_subp_c,"LLiLLiLLiv*","")
-BUILTIN(__builtin_HEXAGON_A2_vsubhs,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_C2_vitpack,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_vavguhr,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_vsplicerb,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_C4_nbitsclr,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_vcmpbgtu,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_cmpys_s1,"LLiii","")
-BUILTIN(__builtin_HEXAGON_M2_cmpys_s0,"LLiii","")
-BUILTIN(__builtin_HEXAGON_F2_dfcmpuo,"idd","")
-BUILTIN(__builtin_HEXAGON_S2_shuffob,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_C2_and,"iii","")
-BUILTIN(__builtin_HEXAGON_S5_popcountp,"iLLi","")
-BUILTIN(__builtin_HEXAGON_S4_extractp,"LLiLLiUIiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_cl0,"ii","")
-BUILTIN(__builtin_HEXAGON_A4_vcmpbgti,"iLLiIi","")
-BUILTIN(__builtin_HEXAGON_M2_mmacls_s1,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mmacls_s0,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_C4_cmpneq,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_vmac2es,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vdmacs_s0,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vdmacs_s1,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_ll_s0,"ULLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_ll_s1,"ULLiii","")
-BUILTIN(__builtin_HEXAGON_S2_clb,"ii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_ll_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_ll_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hl_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hl_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_maci,"iiii","")
-BUILTIN(__builtin_HEXAGON_A2_vmaxuh,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A4_bitspliti,"LLiiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_vmaxub,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_hh_s0,"ULLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_hh_s1,"ULLiii","")
-BUILTIN(__builtin_HEXAGON_M2_vrmac_s0,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_lh_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_r_sat,"iii","")
-BUILTIN(__builtin_HEXAGON_F2_conv_sf2d,"LLif","")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_r_nac,"iiii","")
-BUILTIN(__builtin_HEXAGON_F2_dfimm_n,"dUIi","")
-BUILTIN(__builtin_HEXAGON_A4_cmphgt,"iii","")
-BUILTIN(__builtin_HEXAGON_F2_dfimm_p,"dUIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_lh_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_vcmpy_s1_sat_r,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M4_mpyri_addr_u2,"iiUIii","")
-BUILTIN(__builtin_HEXAGON_M2_vcmpy_s1_sat_i,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_nac,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_M5_vrmacbuu,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,"iLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_vspliceib,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_dpmpyss_acc_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_cnacs_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_cnacs_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_A2_maxu,"Uiii","")
-BUILTIN(__builtin_HEXAGON_A2_maxp,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_andir,"iiIi","")
-BUILTIN(__builtin_HEXAGON_F2_sfrecipa,"fff","")
-BUILTIN(__builtin_HEXAGON_A2_combineii,"LLiIiIi","")
-BUILTIN(__builtin_HEXAGON_A4_orn,"iii","")
-BUILTIN(__builtin_HEXAGON_A4_cmpbgtui,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_or,"iiii","")
-BUILTIN(__builtin_HEXAGON_A4_vcmpbeqi,"iLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_r,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_p,"LLiLLii","")
-BUILTIN(__builtin_HEXAGON_A2_or,"iii","")
-BUILTIN(__builtin_HEXAGON_F2_dfcmpeq,"idd","")
-BUILTIN(__builtin_HEXAGON_C2_cmpeq,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_tfrp,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_C4_and_andn,"iiii","")
-BUILTIN(__builtin_HEXAGON_S2_vsathub_nopack,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_satuh,"ii","")
-BUILTIN(__builtin_HEXAGON_A2_satub,"ii","")
-BUILTIN(__builtin_HEXAGON_M2_vrcmpys_s1,"LLiLLii","")
-BUILTIN(__builtin_HEXAGON_S4_or_ori,"iiiIi","")
-BUILTIN(__builtin_HEXAGON_C4_fastcorner9_not,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_tfrih,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_tfril,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_M4_mpyri_addr,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_vtrunehb,"iLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vabsw,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vabsh,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_F2_sfsub,"fff","")
-BUILTIN(__builtin_HEXAGON_C2_muxii,"iiIiIi","")
-BUILTIN(__builtin_HEXAGON_C2_muxir,"iiiIi","")
-BUILTIN(__builtin_HEXAGON_A2_swiz,"ii","")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_p_and,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_cmpyrsc_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_cmpyrsc_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_vraddub,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A4_tlbmatch,"iLLii","")
-BUILTIN(__builtin_HEXAGON_F2_conv_df2w_chop,"id","")
-BUILTIN(__builtin_HEXAGON_A2_and,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_and,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_S4_extract,"iiUIiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_vcmpweq,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_acci,"iiii","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_acc,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_or,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_F2_conv_ud2sf,"fLLi","")
-BUILTIN(__builtin_HEXAGON_A2_tfr,"ii","")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_p_or,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_subri,"iIii","")
-BUILTIN(__builtin_HEXAGON_A4_vrmaxuw,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_M5_vmpybuu,"LLiii","")
-BUILTIN(__builtin_HEXAGON_A4_vrmaxuh,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_vw,"LLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_vavgw,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_brev,"ii","")
-BUILTIN(__builtin_HEXAGON_A2_vavgh,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_clrbit_i,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_vh,"LLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_or,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_nac,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mmpyl_rs1,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_hl_s1,"ULLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mmpyl_s0,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mmpyl_s1,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_naccii,"iiiIi","")
-BUILTIN(__builtin_HEXAGON_S2_vrndpackwhs,"iLLi","")
-BUILTIN(__builtin_HEXAGON_S2_vtrunewh,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_dpmpyss_nac_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_ll_s0,"LLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_ll_s1,"LLiii","")
-BUILTIN(__builtin_HEXAGON_M4_mac_up_s1_sat,"iiii","")
-BUILTIN(__builtin_HEXAGON_S4_vrcrotate_acc,"LLiLLiLLiiUIi","")
-BUILTIN(__builtin_HEXAGON_F2_conv_uw2df,"di","")
-BUILTIN(__builtin_HEXAGON_A2_vaddubs,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_r_acc,"iiii","")
-BUILTIN(__builtin_HEXAGON_A2_orir,"iiIi","")
-BUILTIN(__builtin_HEXAGON_A2_andp,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_lfsp,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_min,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpysmi,"iiIi","")
-BUILTIN(__builtin_HEXAGON_M2_vcmpy_s0_sat_r,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_ll_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_ll_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_svw_trun,"iLLii","")
-BUILTIN(__builtin_HEXAGON_M2_mmpyh_s0,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mmpyh_s1,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_F2_conv_sf2df,"df","")
-BUILTIN(__builtin_HEXAGON_S2_vtrunohb,"iLLi","")
-BUILTIN(__builtin_HEXAGON_F2_conv_sf2d_chop,"LLif","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_lh_s0,"LLiii","")
-BUILTIN(__builtin_HEXAGON_F2_conv_df2w,"id","")
-BUILTIN(__builtin_HEXAGON_S5_asrhub_sat,"iLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_r_xacc,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_F2_conv_df2d,"LLid","")
-BUILTIN(__builtin_HEXAGON_M2_mmaculs_s1,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mmaculs_s0,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_svadduhs,"iii","")
-BUILTIN(__builtin_HEXAGON_F2_conv_sf2w_chop,"if","")
-BUILTIN(__builtin_HEXAGON_S2_svsathub,"ii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hl_s1,"LLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hl_s0,"LLiii","")
-BUILTIN(__builtin_HEXAGON_S2_setbit_r,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_vavghr,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_F2_sffma_sc,"ffffi","")
-BUILTIN(__builtin_HEXAGON_F2_dfclass,"idUIi","")
-BUILTIN(__builtin_HEXAGON_F2_conv_df2ud,"LLid","")
-BUILTIN(__builtin_HEXAGON_F2_conv_df2uw,"id","")
-BUILTIN(__builtin_HEXAGON_M2_cmpyrs_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_cmpyrs_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_C4_cmpltei,"iiIi","")
-BUILTIN(__builtin_HEXAGON_C4_cmplteu,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_vsubb_map,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_subh_l16_ll,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_r_rnd,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_vrmpy_s0,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hh_s1,"LLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hh_s0,"LLiii","")
-BUILTIN(__builtin_HEXAGON_A2_minup,"ULLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_valignrb,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_p_acc,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_M2_mmpyl_rs0,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vrcmaci_s0,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vaddub,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_combine_lh,"iii","")
-BUILTIN(__builtin_HEXAGON_M5_vdmacbsu,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_combine_ll,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_hl_s0,"ULLiii","")
-BUILTIN(__builtin_HEXAGON_M2_vrcmpyi_s0c,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_p_rnd,"LLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_addpsat,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_svaddhs,"iii","")
-BUILTIN(__builtin_HEXAGON_S4_ori_lsr_ri,"iUIiiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_vminw,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vminh,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vrcmpyr_s0,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vminb,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vcmac_s0_sat_i,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_lh_s0,"ULLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_lh_s1,"ULLiii","")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_r_or,"iiii","")
-BUILTIN(__builtin_HEXAGON_S4_lsli,"iIii","")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_vw,"LLiLLii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_hh_s1,"iii","")
-BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_s0,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_s1,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_lh_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_lh_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_vraddh,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_C2_tfrrp,"ii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_ll_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_ll_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_S2_vtrunowh,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_abs,"ii","")
-BUILTIN(__builtin_HEXAGON_A4_cmpbeq,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_negp,"LLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_r_sat,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_addh_l16_sat_hl,"iii","")
-BUILTIN(__builtin_HEXAGON_S2_vsatwuh,"iLLi","")
-BUILTIN(__builtin_HEXAGON_F2_dfcmpgt,"idd","")
-BUILTIN(__builtin_HEXAGON_S2_svsathb,"ii","")
-BUILTIN(__builtin_HEXAGON_C2_cmpgtup,"iLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A4_cround_ri,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_S4_clbpaddi,"iLLiIi","")
-BUILTIN(__builtin_HEXAGON_A4_cround_rr,"iii","")
-BUILTIN(__builtin_HEXAGON_C2_mux,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_s0,"ULLiii","")
-BUILTIN(__builtin_HEXAGON_S2_shuffeb,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vminuw,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vaddhs,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_insert_rp,"iiiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vminuh,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vminub,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_extractu,"iiUIiUIi","")
-BUILTIN(__builtin_HEXAGON_A2_svsubh,"iii","")
-BUILTIN(__builtin_HEXAGON_S4_clbaddi,"iiIi","")
-BUILTIN(__builtin_HEXAGON_F2_sffms,"ffff","")
-BUILTIN(__builtin_HEXAGON_S2_vsxtbh,"LLii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_ll_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_ll_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_A2_subp,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vmpy2es_s1,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vmpy2es_s0,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S4_parity,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hh_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hh_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_S4_addi_asl_ri,"iUIiiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hh_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hh_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_r_nac,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_A4_cmpheqi,"iiIi","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_xor,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hl_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hl_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_F2_conv_sf2ud_chop,"LLif","")
-BUILTIN(__builtin_HEXAGON_C2_cmpgeui,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hh_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hh_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_p_and,"LLiLLiLLii","")
-BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_lh,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_ll,"iii","")
-BUILTIN(__builtin_HEXAGON_M4_nac_up_s1_sat,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_lh_s1,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_lh_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_A4_round_ri_sat,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hl_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hl_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_A2_vavghcr,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mmacls_rs0,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_mmacls_rs1,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M2_cmaci_s0,"LLiLLiii","")
-BUILTIN(__builtin_HEXAGON_S2_setbit_i,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_p_or,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_A4_andn,"iii","")
-BUILTIN(__builtin_HEXAGON_M5_vrmpybsu,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S2_vrndpackwh,"iLLi","")
-BUILTIN(__builtin_HEXAGON_M2_vcmac_s0_sat_r,"LLiLLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_vmaxuw,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_C2_bitsclr,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_xor_xacc,"iiii","")
-BUILTIN(__builtin_HEXAGON_A4_vcmpbgtui,"iLLiUIi","")
-BUILTIN(__builtin_HEXAGON_A4_ornp,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A2_tfrpi,"LLiIi","")
-BUILTIN(__builtin_HEXAGON_C4_and_or,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1,"iiii","")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0,"iiii","")
-BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_ll,"iii","")
-BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_lh,"iii","")
-BUILTIN(__builtin_HEXAGON_M2_vmpy2su_s1,"LLiii","")
-BUILTIN(__builtin_HEXAGON_M2_vmpy2su_s0,"LLiii","")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_p_acc,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_C4_nbitsclri,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_vh,"LLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_xacc,"LLiLLiLLiUIi","")
-
-// V55 Scalar Instructions.
-
-BUILTIN(__builtin_HEXAGON_A5_ACS,"LLiLLiLLiLLi","")
-
-// V60 Scalar Instructions.
-
-BUILTIN(__builtin_HEXAGON_S6_rol_i_p_and,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S6_rol_i_r_xacc,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_S6_rol_i_r_and,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_S6_rol_i_r_acc,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_S6_rol_i_p_xacc,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S6_rol_i_p,"LLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S6_rol_i_p_nac,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S6_rol_i_p_acc,"LLiLLiLLiUIi","")
-BUILTIN(__builtin_HEXAGON_S6_rol_i_r_or,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_S6_rol_i_r,"iiUIi","")
-BUILTIN(__builtin_HEXAGON_S6_rol_i_r_nac,"iiiUIi","")
-BUILTIN(__builtin_HEXAGON_S6_rol_i_p_or,"LLiLLiLLiUIi","")
-
-// V62 Scalar Instructions.
+#if defined(BUILTIN) && !defined(TARGET_BUILTIN)
+# define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS)
+#endif
+
+#pragma push_macro("V67")
+#define V67 "v67"
+#pragma push_macro("V66")
+#define V66 "v66|" V67
+#pragma push_macro("V65")
+#define V65 "v65|" V66
+#pragma push_macro("V62")
+#define V62 "v62|" V65
+#pragma push_macro("V60")
+#define V60 "v60|" V62
+#pragma push_macro("V55")
+#define V55 "v55|" V60
+#pragma push_macro("V5")
+#define V5 "v5|" V55
+
+#pragma push_macro("HVXV67")
+#define HVXV67 "hvxv67"
+#pragma push_macro("HVXV66")
+#define HVXV66 "hvxv66|" HVXV67
+#pragma push_macro("HVXV65")
+#define HVXV65 "hvxv65|" HVXV66
+#pragma push_macro("HVXV62")
+#define HVXV62 "hvxv62|" HVXV65
+#pragma push_macro("HVXV60")
+#define HVXV60 "hvxv60|" HVXV62
-BUILTIN(__builtin_HEXAGON_S6_vtrunehb_ppp,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_V6_ldntnt0,"V16ii","")
-BUILTIN(__builtin_HEXAGON_M6_vabsdiffub,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S6_vtrunohb_ppp,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_M6_vabsdiffb,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_A6_vminub_RdP,"LLiLLiLLi","")
-BUILTIN(__builtin_HEXAGON_S6_vsplatrbp,"LLii","")
-// V65 Scalar Instructions.
-
-BUILTIN(__builtin_HEXAGON_A6_vcmpbeq_notany,"iLLiLLi","")
-
-// V66 Scalar Instructions.
-
-BUILTIN(__builtin_HEXAGON_F2_dfsub,"ddd","")
-BUILTIN(__builtin_HEXAGON_F2_dfadd,"ddd","")
-BUILTIN(__builtin_HEXAGON_M2_mnaci,"iiii","")
-BUILTIN(__builtin_HEXAGON_S2_mask,"iUIiUIi","")
-
-// V60 HVX Instructions.
-
-BUILTIN(__builtin_HEXAGON_V6_veqb_or,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_veqb_or_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vminub,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vminub_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaslw_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vaslw_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyhvsrs,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyhvsrs_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsathub,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsathub_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddh_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddh_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybusi,"V32iV32iiUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybusi_128B,"V64iV64iiUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vshufoh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vshufoh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vasrwv,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vasrwv_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhsuisat,"V16iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhsuisat_128B,"V32iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vrsadubi_acc,"V32iV32iV32iiUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vrsadubi_acc_128B,"V64iV64iV64iiUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vnavgw,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vnavgw_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vnavgh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vnavgh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vavgub,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vavgub_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubb,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubb_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtw_and,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtw_and_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vavgubrnd,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vavgubrnd_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybusv,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybusv_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubbnq,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubbnq_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vroundhb,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vroundhb_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vadduhsat_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vadduhsat_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vsububsat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsububsat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpabus_acc,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpabus_acc_128B,"V64iV64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmux,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmux_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyhus,"V32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyhus_128B,"V64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vpackeb,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vpackeb_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubhnq,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubhnq_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vavghrnd,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vavghrnd_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vtran2x2_map,"V16iV16iv*i","")
-BUILTIN(__builtin_HEXAGON_V6_vtran2x2_map_128B,"V32iV32iv*i","")
-BUILTIN(__builtin_HEXAGON_V6_vdelta,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vdelta_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtuh_and,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtuh_and_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vtmpyhb,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vtmpyhb_128B,"V64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vpackob,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vpackob_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmaxh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmaxh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vtmpybus_acc,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vtmpybus_acc_128B,"V64iV64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vsubuhsat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubuhsat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vasrw_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasrw_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_pred_or,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_pred_or_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpyub_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpyub_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_lo,"V16iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_lo_128B,"V32iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubb_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubb_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubhsat_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubhsat_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiwh,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiwh_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiwb,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiwb_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_ldu0,"V16ii","")
-BUILTIN(__builtin_HEXAGON_V6_ldu0_128B,"V32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vgtuh_xor,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtuh_xor_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgth_or,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgth_or_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vavgh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vavgh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vlalignb,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vlalignb_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vsh,"V32iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsh_128B,"V64iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_pred_and_n,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_pred_and_n_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsb,"V32iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsb_128B,"V64iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vroundwuh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vroundwuh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vasrhv,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vasrhv_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vshuffh,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vshuffh_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddhsat_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddhsat_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vnavgub,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vnavgub_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybv,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybv_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vnormamth,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vnormamth_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhb,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vavguh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vavguh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vlsrwv,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vlsrwv_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vlsrhv,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vlsrhv_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhisat,"V16iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhisat_128B,"V32iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhvsat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhvsat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddw,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddw_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vzh,"V32iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vzh_128B,"V64iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmaxub,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmaxub_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyhv_acc,"V32iV32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyhv_acc_128B,"V64iV64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vadduhsat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vadduhsat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vshufoeh,"V32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vshufoeh_128B,"V64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyuhv_acc,"V32iV32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyuhv_acc_128B,"V64iV64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_veqh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_veqh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpabuuv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpabuuv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vasrwhsat,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasrwhsat_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vminuh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vminuh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vror,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vror_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmaxuh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmaxuh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vabsh_sat,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vabsh_sat_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_pred_or_n,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_pred_or_n_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdealb,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vdealb_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpybusv,"V32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpybusv_128B,"V64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vzb,"V32iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vzb_128B,"V64iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpybus_dv,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpybus_dv_128B,"V64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vaddbq,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddbq_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddb,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddb_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddwq,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddwq_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vasrhubrndsat,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasrhubrndsat_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasrhubsat,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasrhubsat_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vshufoeb,"V32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vshufoeb_128B,"V64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vpackhub_sat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vpackhub_sat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiwh_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiwh_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vtmpyb,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vtmpyb_128B,"V64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpabusv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpabusv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_pred_and,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_pred_and_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubwnq,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubwnq_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vpackwuh_sat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vpackwuh_sat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vswap,"V32iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vswap_128B,"V64iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpyubv_acc,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpyubv_acc_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtb_and,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtb_and_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaslw,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vaslw_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vpackhb_sat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vpackhb_sat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyih_acc,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyih_acc_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vshuffvdd,"V32iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vshuffvdd_128B,"V64iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vaddb_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddb_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vunpackub,"V32iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vunpackub_128B,"V64iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtuw,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtuw_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvwh,"V32iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvwh_128B,"V64iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vgtub,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtub_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyowh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyowh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyieoh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyieoh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_extractw,"iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_extractw_128B,"iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vavgwrnd,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vavgwrnd_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhsat_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhsat_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vgtub_xor,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtub_xor_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyub,"V32iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyub_128B,"V64iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyuh,"V32iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyuh_128B,"V64iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vunpackob,"V32iV32iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vunpackob_128B,"V64iV64iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpahb,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpahb_128B,"V64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_veqw_or,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_veqw_or_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vandqrt,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vandqrt_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vxor,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vxor_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vasrwhrndsat,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasrwhrndsat_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyhsat_acc,"V32iV32iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyhsat_acc_128B,"V64iV64iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybus_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybus_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vsubhw,"V32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubhw_128B,"V64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdealb4w,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vdealb4w_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyowh_sacc,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyowh_sacc_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpybv,"V32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpybv_128B,"V64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vabsdiffh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vabsdiffh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vshuffob,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vshuffob_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyub_acc,"V32iV32iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyub_acc_128B,"V64iV64iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vnormamtw,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vnormamtw_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vunpackuh,"V32iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vunpackuh_128B,"V64iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtuh_or,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtuh_or_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiewuh_acc,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiewuh_acc_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vunpackoh,"V32iV32iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vunpackoh_128B,"V64iV64iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhsat,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhsat_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyubv,"V32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyubv_128B,"V64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyhss,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyhss_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_hi,"V16iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_hi_128B,"V32iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vasrwuhsat,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasrwuhsat_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_veqw,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_veqw_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdsaduh,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdsaduh_128B,"V64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vsubw,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubw_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubw_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubw_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_veqb_and,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_veqb_and_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyih,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyih_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vtmpyb_acc,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vtmpyb_acc_128B,"V64iV64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybus,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybus_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpybus_acc,"V32iV32iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpybus_acc_128B,"V64iV64iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vgth_xor,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgth_xor_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubhsat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubhsat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpyubi_acc,"V32iV32iV32iiUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpyubi_acc_128B,"V64iV64iV64iiUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vabsw,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vabsw_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddwsat_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddwsat_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vlsrw,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vlsrw_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vabsh,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vabsh_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vlsrh,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vlsrh_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_valignb,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_valignb_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vsubhq,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubhq_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vpackoh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vpackoh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpybus_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpybus_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhvsat_acc,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybv_acc,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybv_acc_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddhsat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddhsat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vcombine,"V32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vcombine_128B,"V64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vandqrt_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vandqrt_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vaslhv,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaslhv_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vinsertwr,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vinsertwr_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vsubh_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubh_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vshuffb,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vshuffb_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vand,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vand_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyhv,"V32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyhv_128B,"V64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhsuisat_acc,"V16iV16iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B,"V32iV32iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vsububsat_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsububsat_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtb_xor,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtb_xor_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdsaduh_acc,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdsaduh_acc_128B,"V64iV64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpyub,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpyub_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyuh_acc,"V32iV32iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyuh_acc_128B,"V64iV64iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vcl0h,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vcl0h_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyhus_acc,"V32iV32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyhus_acc_128B,"V64iV64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpybv_acc,"V32iV32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpybv_acc_128B,"V64iV64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vrsadubi,"V32iV32iiUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vrsadubi_128B,"V64iV64iiUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_dv_acc,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B,"V64iV64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vshufeh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vshufeh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyewuh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyewuh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyhsrs,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyhsrs_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpybus_dv_acc,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B,"V64iV64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vaddubh,"V32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddubh_128B,"V64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vasrwh,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasrwh_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_ld0,"V16ii","")
-BUILTIN(__builtin_HEXAGON_V6_ld0_128B,"V32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vpopcounth,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vpopcounth_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_ldnt0,"V16ii","")
-BUILTIN(__builtin_HEXAGON_V6_ldnt0_128B,"V32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vgth_and,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgth_and_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddubsat_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddubsat_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vpackeh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vpackeh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyh,"V32iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyh_128B,"V64iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vminh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vminh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_pred_scalar2,"V16ii","")
-BUILTIN(__builtin_HEXAGON_V6_pred_scalar2_128B,"V32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdealh,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vdealh_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vpackwh_sat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vpackwh_sat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaslh,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vaslh_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vgtuw_and,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtuw_and_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vor,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vor_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvvb,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvvb_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiowh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiowh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvvb_oracc,"V16iV16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvvb_oracc_128B,"V32iV32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vandvrt,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vandvrt_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_veqh_xor,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_veqh_xor_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vadduhw,"V32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vadduhw_128B,"V64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vcl0w,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vcl0w_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyihb,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyihb_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vtmpybus,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vtmpybus_128B,"V64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vd0,"V16i","")
-BUILTIN(__builtin_HEXAGON_V6_vd0_128B,"V32i","")
-BUILTIN(__builtin_HEXAGON_V6_veqh_or,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_veqh_or_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtw_or,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtw_or_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpybus,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpybus_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vgtub_or,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtub_or_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpybus,"V32iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpybus_128B,"V64iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vandvrt_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vandvrt_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vassign,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vassign_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddwnq,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddwnq_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtub_and,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtub_and_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_dv,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_dv_128B,"V64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vunpackb,"V32iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vunpackb_128B,"V64iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vunpackh,"V32iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vunpackh_128B,"V64iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpahb_acc,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpahb_acc_128B,"V64iV64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vaddbnq,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddbnq_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vlalignbi,"V16iV16iV16iUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vlalignbi_128B,"V32iV32iV32iUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vsatwh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsatwh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtuh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtuh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyihb_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyihb_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybusv_acc,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybusv_acc_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vrdelta,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vrdelta_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vroundwh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vroundwh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddw_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddw_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiwb_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiwb_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vsubbq,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubbq_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_veqh_and,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_veqh_and_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_valignbi,"V16iV16iV16iUIi","")
-BUILTIN(__builtin_HEXAGON_V6_valignbi_128B,"V32iV32iV32iUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vaddwsat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddwsat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_veqw_and,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_veqw_and_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vabsdiffub,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vabsdiffub_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vshuffeb,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vshuffeb_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vabsdiffuh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vabsdiffuh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_veqw_xor,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_veqw_xor_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgth,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgth_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtuw_xor,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtuw_xor_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtb,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtb_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtw,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtw_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubwq,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubwq_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vnot,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vnot_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtb_or,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtb_or_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtuw_or,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtuw_or_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddubsat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddubsat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmaxw,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmaxw_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaslwv,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaslwv_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vabsw_sat,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vabsw_sat_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubwsat_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubwsat_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vroundhub,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vroundhub_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhisat_acc,"V16iV16iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhisat_acc_128B,"V32iV32iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpabus,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpabus_128B,"V64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vassignp,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vassignp_128B,"V64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_veqb,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_veqb_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsububh,"V32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsububh_128B,"V64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_lvsplatw,"V16ii","")
-BUILTIN(__builtin_HEXAGON_V6_lvsplatw_128B,"V32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vaddhnq,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddhnq_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhsusat,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhsusat_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_pred_not,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_pred_not_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvwh_oracc,"V32iV32iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvwh_oracc_128B,"V64iV64iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiewh_acc,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiewh_acc_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdealvdd,"V32iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdealvdd_128B,"V64iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vavgw,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vavgw_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhsusat_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vgtw_xor,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vgtw_xor_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vtmpyhb_acc,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vtmpyhb_acc_128B,"V64iV64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vaddhw,"V32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddhw_128B,"V64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddhq,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddhq_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpyubv,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpyubv_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpyubi,"V32iV32iiUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpyubi_128B,"V64iV64iiUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vminw,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vminw_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyubv_acc,"V32iV32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyubv_acc_128B,"V64iV64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_pred_xor,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_pred_xor_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_veqb_xor,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_veqb_xor_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiewuh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiewuh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpybusv_acc,"V32iV32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpybusv_acc_128B,"V64iV64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vavguhrnd,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vavguhrnd_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyowh_rnd,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyowh_rnd_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubwsat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubwsat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubuhw,"V32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubuhw_128B,"V64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybusi_acc,"V32iV32iV32iiUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybusi_acc_128B,"V64iV64iV64iiUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vasrw,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasrw_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasrh,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasrh_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyuhv,"V32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyuhv_128B,"V64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vasrhbrndsat,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasrhbrndsat_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vsubuhsat_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubuhsat_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vabsdiffw,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vabsdiffw_128B,"V32iV32iV32i","")
-
-// V62 HVX Instructions.
-
-BUILTIN(__builtin_HEXAGON_V6_vandnqrt_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vandnqrt_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vaddclbh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddclbh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyowh_64_acc,"V32iV32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyowh_64_acc_128B,"V64iV64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyewuh_64,"V32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyewuh_64_128B,"V64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsatuwuh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsatuwuh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_shuffeqh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_shuffeqh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_shuffeqw,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_shuffeqw_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_ldcnpnt0,"V16iii","")
-BUILTIN(__builtin_HEXAGON_V6_ldcnpnt0_128B,"V32iii","")
-BUILTIN(__builtin_HEXAGON_V6_vsubcarry,"V16iV16iV16iv*","")
-BUILTIN(__builtin_HEXAGON_V6_vsubcarry_128B,"V32iV32iV32iv*","")
-BUILTIN(__builtin_HEXAGON_V6_vasrhbsat,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasrhbsat_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vminb,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vminb_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpauhb_acc,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpauhb_acc_128B,"V64iV64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vaddhw_acc,"V32iV32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddhw_acc_128B,"V64iV64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vlsrb,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vlsrb_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvwhi,"V32iV16iV16iUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvwhi_128B,"V64iV32iV32iUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vaddububb_sat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddububb_sat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubbsat_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubbsat_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_ldtp0,"V16iii","")
-BUILTIN(__builtin_HEXAGON_V6_ldtp0_128B,"V32iii","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvvb_oracci,"V16iV16iV16iV16iUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvvb_oracci_128B,"V32iV32iV32iV32iUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vsubuwsat_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubuwsat_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_ldpnt0,"V16iii","")
-BUILTIN(__builtin_HEXAGON_V6_ldpnt0_128B,"V32iii","")
-BUILTIN(__builtin_HEXAGON_V6_vandvnqv,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vandvnqv_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_lvsplatb,"V16ii","")
-BUILTIN(__builtin_HEXAGON_V6_lvsplatb_128B,"V32ii","")
-BUILTIN(__builtin_HEXAGON_V6_lvsplath,"V16ii","")
-BUILTIN(__builtin_HEXAGON_V6_lvsplath_128B,"V32ii","")
-BUILTIN(__builtin_HEXAGON_V6_ldtpnt0,"V16iii","")
-BUILTIN(__builtin_HEXAGON_V6_ldtpnt0_128B,"V32iii","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvwh_nm,"V32iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvwh_nm_128B,"V64iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_ldnpnt0,"V16iii","")
-BUILTIN(__builtin_HEXAGON_V6_ldnpnt0_128B,"V32iii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpauhb,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpauhb_128B,"V64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_ldtnp0,"V16iii","")
-BUILTIN(__builtin_HEXAGON_V6_ldtnp0_128B,"V32iii","")
-BUILTIN(__builtin_HEXAGON_V6_vrounduhub,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vrounduhub_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vadduhw_acc,"V32iV32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vadduhw_acc_128B,"V64iV64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_ldcp0,"V16iii","")
-BUILTIN(__builtin_HEXAGON_V6_ldcp0_128B,"V32iii","")
-BUILTIN(__builtin_HEXAGON_V6_vadduwsat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vadduwsat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_ldtnpnt0,"V16iii","")
-BUILTIN(__builtin_HEXAGON_V6_ldtnpnt0_128B,"V32iii","")
-BUILTIN(__builtin_HEXAGON_V6_vaddbsat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddbsat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vandnqrt,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vandnqrt_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiwub_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiwub_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmaxb,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vmaxb_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vandvqv,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vandvqv_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddcarry,"V16iV16iV16iv*","")
-BUILTIN(__builtin_HEXAGON_V6_vaddcarry_128B,"V32iV32iV32iv*","")
-BUILTIN(__builtin_HEXAGON_V6_vasrwuhrndsat,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasrwuhrndsat_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvvbi,"V16iV16iV16iUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvvbi_128B,"V32iV32iV32iUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vsubuwsat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubuwsat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddbsat_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddbsat_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_ldnp0,"V16iii","")
-BUILTIN(__builtin_HEXAGON_V6_ldnp0_128B,"V32iii","")
-BUILTIN(__builtin_HEXAGON_V6_vasruwuhrndsat,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasruwuhrndsat_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vrounduwuh,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vrounduwuh_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvvb_nm,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvvb_nm_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_pred_scalar2v2,"V16ii","")
-BUILTIN(__builtin_HEXAGON_V6_pred_scalar2v2_128B,"V32ii","")
-BUILTIN(__builtin_HEXAGON_V6_ldp0,"V16iii","")
-BUILTIN(__builtin_HEXAGON_V6_ldp0_128B,"V32iii","")
-BUILTIN(__builtin_HEXAGON_V6_vaddubh_acc,"V32iV32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddubh_acc_128B,"V64iV64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddclbw,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddclbw_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_ldcpnt0,"V16iii","")
-BUILTIN(__builtin_HEXAGON_V6_ldcpnt0_128B,"V32iii","")
-BUILTIN(__builtin_HEXAGON_V6_vadduwsat_dv,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vadduwsat_dv_128B,"V64iV64iV64i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiwub,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyiwub_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vsubububb_sat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubububb_sat_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_ldcnp0,"V16iii","")
-BUILTIN(__builtin_HEXAGON_V6_ldcnp0_128B,"V32iii","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvwh_oracci,"V32iV32iV16iV16iUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vlutvwh_oracci_128B,"V64iV64iV32iV32iUIi","")
-BUILTIN(__builtin_HEXAGON_V6_vsubbsat,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsubbsat_128B,"V32iV32iV32i","")
-
-// V65 HVX Instructions.
-
-BUILTIN(__builtin_HEXAGON_V6_vasruhubrndsat,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasruhubrndsat_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybub_rtt,"V32iV16iLLi","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybub_rtt_128B,"V64iV32iLLi","")
-BUILTIN(__builtin_HEXAGON_V6_vmpahhsat,"V16iV16iV16iLLi","")
-BUILTIN(__builtin_HEXAGON_V6_vmpahhsat_128B,"V32iV32iV32iLLi","")
-BUILTIN(__builtin_HEXAGON_V6_vavguwrnd,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vavguwrnd_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vnavgb,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vnavgb_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vasrh_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasrh_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpauhuhsat,"V16iV16iV16iLLi","")
-BUILTIN(__builtin_HEXAGON_V6_vmpauhuhsat_128B,"V32iV32iV32iLLi","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyh_acc,"V32iV32iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyh_acc_128B,"V64iV64iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybub_rtt_acc,"V32iV32iV16iLLi","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B,"V64iV64iV32iLLi","")
-BUILTIN(__builtin_HEXAGON_V6_vavgb,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vavgb_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vaslh_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vaslh_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vavguw,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vavguw_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vlut4,"V16iV16iLLi","")
-BUILTIN(__builtin_HEXAGON_V6_vlut4_128B,"V32iV32iLLi","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyuhe_acc,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyuhe_acc_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpyub_rtt,"V32iV16iLLi","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpyub_rtt_128B,"V64iV32iLLi","")
-BUILTIN(__builtin_HEXAGON_V6_vmpsuhuhsat,"V16iV16iV16iLLi","")
-BUILTIN(__builtin_HEXAGON_V6_vmpsuhuhsat_128B,"V32iV32iV32iLLi","")
-BUILTIN(__builtin_HEXAGON_V6_vasruhubsat,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasruhubsat_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyuhe,"V16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpyuhe_128B,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpyub_rtt_acc,"V32iV32iV16iLLi","")
-BUILTIN(__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B,"V64iV64iV32iLLi","")
-BUILTIN(__builtin_HEXAGON_V6_vasruwuhsat,"V16iV16iV16ii","")
-BUILTIN(__builtin_HEXAGON_V6_vasruwuhsat_128B,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpabuu_acc,"V32iV32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpabuu_acc_128B,"V64iV64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vprefixqw,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vprefixqw_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vprefixqh,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vprefixqh_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vprefixqb,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vprefixqb_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vabsb,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vabsb_128B,"V32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vavgbrnd,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vavgbrnd_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdd0,"V32i","")
-BUILTIN(__builtin_HEXAGON_V6_vdd0_128B,"V64i","")
-BUILTIN(__builtin_HEXAGON_V6_vmpabuu,"V32iV32ii","")
-BUILTIN(__builtin_HEXAGON_V6_vmpabuu_128B,"V64iV64ii","")
-BUILTIN(__builtin_HEXAGON_V6_vabsb_sat,"V16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vabsb_sat_128B,"V32iV32i","")
-
-// V66 HVX Instructions.
-
-BUILTIN(__builtin_HEXAGON_V6_vaddcarrysat,"V16iV16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vaddcarrysat_128B,"V32iV32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vasr_into,"V32iV32iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vasr_into_128B,"V64iV64iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vsatdw,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vsatdw_128B,"V32iV32iV32i","")
-BUILTIN(__builtin_HEXAGON_V6_vrotr,"V16iV16iV16i","")
-BUILTIN(__builtin_HEXAGON_V6_vrotr_128B,"V32iV32iV32i","")
+// The builtins below are not autogenerated from iset.py.
+// Make sure you do not overwrite these.
+TARGET_BUILTIN(__builtin_SI_to_SXTHI_asrh, "ii", "", V5)
+TARGET_BUILTIN(__builtin_brev_ldd, "v*LLi*CLLi*iC", "", V5)
+TARGET_BUILTIN(__builtin_brev_ldw, "v*i*Ci*iC", "", V5)
+TARGET_BUILTIN(__builtin_brev_ldh, "v*s*Cs*iC", "", V5)
+TARGET_BUILTIN(__builtin_brev_lduh, "v*Us*CUs*iC", "", V5)
+TARGET_BUILTIN(__builtin_brev_ldb, "v*Sc*CSc*iC", "", V5)
+TARGET_BUILTIN(__builtin_brev_ldub, "v*Uc*CUc*iC", "", V5)
+TARGET_BUILTIN(__builtin_circ_ldd, "LLi*LLi*LLi*iIi", "", V5)
+TARGET_BUILTIN(__builtin_circ_ldw, "i*i*i*iIi", "", V5)
+TARGET_BUILTIN(__builtin_circ_ldh, "s*s*s*iIi", "", V5)
+TARGET_BUILTIN(__builtin_circ_lduh, "Us*Us*Us*iIi", "", V5)
+TARGET_BUILTIN(__builtin_circ_ldb, "c*c*c*iIi", "", V5)
+TARGET_BUILTIN(__builtin_circ_ldub, "Uc*Uc*Uc*iIi", "", V5)
+TARGET_BUILTIN(__builtin_brev_std, "LLi*CLLi*LLiiC", "", V5)
+TARGET_BUILTIN(__builtin_brev_stw, "i*Ci*iiC", "", V5)
+TARGET_BUILTIN(__builtin_brev_sth, "s*Cs*iiC", "", V5)
+TARGET_BUILTIN(__builtin_brev_sthhi, "s*Cs*iiC", "", V5)
+TARGET_BUILTIN(__builtin_brev_stb, "c*Cc*iiC", "", V5)
+TARGET_BUILTIN(__builtin_circ_std, "LLi*LLi*LLiiIi", "", V5)
+TARGET_BUILTIN(__builtin_circ_stw, "i*i*iiIi", "", V5)
+TARGET_BUILTIN(__builtin_circ_sth, "s*s*iiIi", "", V5)
+TARGET_BUILTIN(__builtin_circ_sthhi, "s*s*iiIi", "", V5)
+TARGET_BUILTIN(__builtin_circ_stb, "c*c*iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_L2_loadrub_pci, "iv*IiivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_L2_loadrb_pci, "iv*IiivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_L2_loadruh_pci, "iv*IiivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_L2_loadrh_pci, "iv*IiivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_L2_loadri_pci, "iv*IiivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_L2_loadrd_pci, "LLiv*IiivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_L2_loadrub_pcr, "iv*ivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_L2_loadrb_pcr, "iv*ivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_L2_loadruh_pcr, "iv*ivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_L2_loadrh_pcr, "iv*ivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_L2_loadri_pcr, "iv*ivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_L2_loadrd_pcr, "LLiv*ivC*", "", V5)
+
+TARGET_BUILTIN(__builtin_HEXAGON_S2_storerb_pci, "vv*IiiivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_storerh_pci, "vv*IiiivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_storerf_pci, "vv*IiiivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_storeri_pci, "vv*IiiivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_storerd_pci, "vv*IiiLLivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_storerb_pcr, "vv*iivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_storerh_pcr, "vv*iivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_storerf_pcr, "vv*iivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_storeri_pcr, "vv*iivC*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_storerd_pcr, "vv*iLLivC*", "", V5)
+
+TARGET_BUILTIN(__builtin_HEXAGON_prefetch,"vv*","", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A6_vminub_RdP,"LLiLLiLLi","", V62)
+
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaskedstoreq,"vV64bv*V16i","", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaskedstorenq,"vV64bv*V16i","", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaskedstorentq,"vV64bv*V16i","", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaskedstorentnq,"vV64bv*V16i","", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaskedstoreq_128B,"vV128bv*V32i","", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaskedstorenq_128B,"vV128bv*V32i","", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaskedstorentq_128B,"vV128bv*V32i","", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaskedstorentnq_128B,"vV128bv*V32i","", HVXV60)
+
+
+// These are only valid on v65
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybub_rtt,"V32iV16iLLi","", "hvxv65")
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybub_rtt_128B,"V64iV32iLLi","", "hvxv65")
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybub_rtt_acc,"V32iV32iV16iLLi","", "hvxv65")
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B,"V64iV64iV32iLLi","", "hvxv65")
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub_rtt,"V32iV16iLLi","", "hvxv65")
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub_rtt_128B,"V64iV32iLLi","", "hvxv65")
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub_rtt_acc,"V32iV32iV16iLLi","", "hvxv65")
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B,"V64iV64iV32iLLi","", "hvxv65")
+
+#include "clang/Basic/BuiltinsHexagonDep.def"
+
+#pragma pop_macro("HVXV60")
+#pragma pop_macro("HVXV62")
+#pragma pop_macro("HVXV65")
+#pragma pop_macro("HVXV66")
+#pragma pop_macro("HVXV67")
+
+#pragma pop_macro("V5")
+#pragma pop_macro("V55")
+#pragma pop_macro("V60")
+#pragma pop_macro("V62")
+#pragma pop_macro("V65")
+#pragma pop_macro("V66")
+#pragma pop_macro("V67")
#undef BUILTIN
+#undef TARGET_BUILTIN
+
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonDep.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonDep.def
new file mode 100644
index 000000000000..b694e4c35d3b
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonDep.def
@@ -0,0 +1,1721 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Automatically generated file, do not edit!
+//===----------------------------------------------------------------------===//
+
+
+// V5 Scalar Instructions.
+
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpeq, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgt, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgtu, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpeqp, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgtp, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgtup, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_rcmpeqi, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_rcmpneqi, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_rcmpeq, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_rcmpneq, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_bitsset, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_bitsclr, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_nbitsset, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_nbitsclr, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpeqi, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgti, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgtui, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgei, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgeui, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmplt, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpltu, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_bitsclri, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_nbitsclri, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_cmpneqi, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_cmpltei, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_cmplteui, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_cmpneq, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_cmplte, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_cmplteu, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_and, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_or, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_xor, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_andn, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_not, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_orn, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_and_and, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_and_or, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_or_and, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_or_or, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_and_andn, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_and_orn, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_or_andn, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_or_orn, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_pxfer_map, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_any8, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_all8, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_vitpack, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_mux, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_muxii, "iiIiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_muxir, "iiiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_muxri, "iiIii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_vmux, "LLiiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_mask, "LLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmpbeq, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpbeqi, "iLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpbeq_any, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmpbgtu, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpbgtui, "iLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpbgt, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpbgti, "iLLiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpbeq, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpbeqi, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpbgtu, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpbgtui, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpbgt, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpbgti, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmpheq, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmphgt, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmphgtu, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpheqi, "iLLiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmphgti, "iLLiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmphgtui, "iLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpheq, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cmphgt, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cmphgtu, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpheqi, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cmphgti, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cmphgtui, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmpweq, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmpwgt, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmpwgtu, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpweqi, "iLLiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpwgti, "iLLiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpwgtui, "iLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_boundscheck, "iiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_tlbmatch, "iLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_tfrpr, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_tfrrp, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_fastcorner9, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_fastcorner9_not, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hl_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hl_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_lh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_lh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_ll_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_ll_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hl_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hl_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_lh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_lh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_ll_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_ll_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hl_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hl_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_lh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_lh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_ll_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_ll_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_hh_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_hh_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_hl_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_hl_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_lh_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_lh_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_ll_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_ll_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hh_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hh_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hl_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hl_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_lh_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_lh_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_ll_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_ll_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hh_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hh_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hl_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hl_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_lh_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_lh_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_ll_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_ll_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hh_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hh_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hl_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hl_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_lh_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_lh_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_ll_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_ll_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hh_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hh_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hl_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hl_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_lh_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_lh_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_ll_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_ll_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_hh_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_hh_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_hl_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_hl_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_lh_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_lh_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_ll_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_ll_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hh_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hh_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hl_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hl_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_lh_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_lh_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_ll_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_ll_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hl_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hl_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_lh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_lh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_ll_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_ll_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hl_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hl_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_lh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_lh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_ll_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_ll_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_hh_s0, "Uiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_hh_s1, "Uiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_hl_s0, "Uiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_hl_s1, "Uiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_lh_s0, "Uiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_lh_s1, "Uiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_ll_s0, "Uiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_ll_s1, "Uiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hh_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hh_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hl_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hl_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_lh_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_lh_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_ll_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_ll_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hh_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hh_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hl_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hl_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_lh_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_lh_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_ll_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_ll_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_hh_s0, "ULLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_hh_s1, "ULLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_hl_s0, "ULLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_hl_s1, "ULLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_lh_s0, "ULLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_lh_s1, "ULLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_ll_s0, "ULLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_ll_s1, "ULLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpysmi, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_macsip, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_macsin, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyss_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyss_acc_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyss_nac_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_s0, "ULLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_acc_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_nac_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_up, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_up_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_up_s1_sat, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_up, "Uiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpysu_up, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyss_rnd_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_mac_up_s1_sat, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_nac_up_s1_sat, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyi, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyui, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_maci, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_acci, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_accii, "iiiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_nacci, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_naccii, "iiiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_subacc, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_mpyrr_addr, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_mpyri_addr_u2, "iiUIii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_mpyri_addr, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_mpyri_addi, "iUIiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_mpyrr_addi, "iUIiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2s_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2s_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2su_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2su_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2su_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2su_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s0pack, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s1pack, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2es_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2es_s1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2es_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2es_s1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2es, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrmac_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrmpy_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmpyrs_s0, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmpyrs_s1, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vrmpybuu, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vrmacbuu, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vrmpybsu, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vrmacbsu, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vmpybuu, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vmpybsu, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vmacbuu, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vmacbsu, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vdmpybsu, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vdmacbsu, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmacs_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmacs_s1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmpys_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmpys_s1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyrs_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyrs_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyrsc_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyrsc_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmacs_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmacs_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmacsc_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmacsc_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpys_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpys_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpysc_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpysc_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cnacs_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cnacs_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cnacsc_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cnacsc_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpys_s1, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpys_acc_s1, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpys_s1rp, "iLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacls_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacls_s1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmachs_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmachs_s1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyl_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyl_s1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyh_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyh_s1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacls_rs0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacls_rs1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmachs_rs0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmachs_rs1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyl_rs0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyl_rs1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyh_rs0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyh_rs1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_s1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_acc_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_acc_s1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_s1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_acc_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_acc_s1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_hmmpyl_rs1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_hmmpyh_rs1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_hmmpyl_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_hmmpyh_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmaculs_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmaculs_s1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacuhs_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacuhs_s1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyul_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyul_s1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyuh_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyuh_s1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmaculs_rs0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmaculs_rs1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacuhs_rs0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacuhs_rs1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyul_rs0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyul_rs1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyuh_rs0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyuh_rs1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmaci_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmacr_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmaci_s0c, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmacr_s0c, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmaci_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmacr_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpyi_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpyr_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpyi_s0c, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpyr_s0c, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyi_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyr_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_cmpyi_wh, "iLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_cmpyr_wh, "iLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_cmpyi_whc, "iLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_cmpyr_whc, "iLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmpy_s0_sat_i, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmpy_s0_sat_r, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmpy_s1_sat_i, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmpy_s1_sat_r, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmac_s0_sat_i, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmac_s0_sat_r, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vcrotate, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_vrcrotate_acc, "LLiLLiLLiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_vrcrotate, "LLiLLiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vcnegh, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vrcnegh, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_pmpyw, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vpmpyh, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_pmpyw_acc, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vpmpyh_acc, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_add, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_sub, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addsat, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subsat, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addi, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_l16_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_l16_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_l16_sat_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_l16_sat_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_l16_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_l16_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_l16_sat_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_l16_sat_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_lh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_hh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_lh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_hh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_lh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_hh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_lh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_hh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_aslh, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_asrh, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addpsat, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addsp, "LLiiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_neg, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_negsat, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_abs, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_abssat, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vconj, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_negp, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_absp, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_max, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_maxu, "Uiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_min, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_minu, "Uiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_maxp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_maxup, "ULLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_minp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_minup, "ULLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_tfr, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_tfrsi, "iIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_tfrp, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_tfrpi, "LLiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_zxtb, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_sxtb, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_zxth, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_sxth, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_combinew, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_combineri, "LLiiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_combineir, "LLiIii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_combineii, "LLiIiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_combine_hh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_combine_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_combine_lh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_combine_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_tfril, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_tfrih, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_and, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_or, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_xor, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_not, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_xor_xacc, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_xor_xacc, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_andn, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_orn, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_andnp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_ornp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_addaddi, "iiiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_subaddi, "iiIii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_and_and, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_and_andn, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_and_or, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_and_xor, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_or_and, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_or_andn, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_or_or, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_or_xor, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_or_andix, "iiiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_or_andi, "iiiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_or_ori, "iiiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_xor_and, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_xor_or, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_xor_andn, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subri, "iIii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_andir, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_orir, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_andp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_orp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_xorp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_notp, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_sxtw, "LLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_sat, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_roundsat, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_sath, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_satuh, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_satub, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_satb, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddub, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddb_map, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddubs, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddhs, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vadduhs, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A5_vaddhubs, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddws, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_vxaddsubw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_vxsubaddw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_vxaddsubh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_vxsubaddh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_vxaddsubhr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_vxsubaddhr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svavgh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svavghs, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svnavgh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svaddh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svaddhs, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svadduhs, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svsubh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svsubhs, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svsubuhs, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vraddub, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vraddub_acc, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vraddh, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vradduh, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubub, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubb_map, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vsububs, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubhs, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubuhs, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubws, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vabsh, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vabshsat, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vabsw, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vabswsat, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vabsdiffw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vabsdiffh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vrsadub, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vrsadub_acc, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgub, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavguh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavgh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavgw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgwr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavgwr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgwcr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavgwcr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavghcr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavghcr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavguw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavguwr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgubr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavguhr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavghr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavghr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_round_ri, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_round_rr, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_round_ri_sat, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_round_rr_sat, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cround_ri, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cround_rr, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vrminh, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vrmaxh, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vrminuh, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vrmaxuh, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vrminw, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vrmaxw, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vrminuw, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vrmaxuw, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vminb, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxb, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vminub, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxub, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vminh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vminuh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxuh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vminw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vminuw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxuw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_modwrapu, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfadd, "fff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfsub, "fff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfmpy, "fff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sffma, "ffff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sffma_sc, "ffffi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sffms, "ffff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sffma_lib, "ffff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sffms_lib, "ffff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfcmpeq, "iff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfcmpgt, "iff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfcmpge, "iff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfcmpuo, "iff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfmax, "fff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfmin, "fff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfclass, "ifUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfimm_p, "fUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfimm_n, "fUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sffixupn, "fff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sffixupd, "fff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sffixupr, "ff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfcmpeq, "idd", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfcmpgt, "idd", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfcmpge, "idd", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfcmpuo, "idd", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfclass, "idUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfimm_p, "dUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfimm_n, "dUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2df, "df", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2sf, "fd", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_uw2sf, "fi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_uw2df, "di", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_w2sf, "fi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_w2df, "di", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_ud2sf, "fLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_ud2df, "dLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_d2sf, "fLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_d2df, "dLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2uw, "if", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2w, "if", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2ud, "LLif", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2d, "LLif", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2uw, "id", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2w, "id", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2ud, "LLid", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2d, "LLid", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2uw_chop, "if", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2w_chop, "if", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2ud_chop, "LLif", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2d_chop, "LLif", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2uw_chop, "id", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2w_chop, "id", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2ud_chop, "LLid", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2d_chop, "LLid", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_p, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r_acc, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_r_acc, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_acc, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_acc, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p_acc, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_p_acc, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_acc, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_acc, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r_nac, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_r_nac, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_nac, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_nac, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p_nac, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_p_nac, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_nac, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_nac, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r_and, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_r_and, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_and, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_and, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r_or, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_r_or, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_or, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_or, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p_and, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_p_and, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_and, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_and, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p_or, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_p_or, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_or, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_or, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p_xor, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_p_xor, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_xor, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_xor, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r_sat, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_r_sat, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_r, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_r_acc, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_acc, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_acc, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p_acc, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_acc, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p_acc, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_r_nac, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_nac, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_nac, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p_nac, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_nac, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p_nac, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_xacc, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_xacc, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_xacc, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p_xacc, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_r_and, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_and, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_and, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_r_or, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_or, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_or, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p_and, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_and, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p_and, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p_or, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_or, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p_or, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_sat, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_r_rnd, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p_rnd, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_lsli, "iIii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_addasl_rrri, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_andi_asl_ri, "iUIiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_ori_asl_ri, "iUIiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_addi_asl_ri, "iUIiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_subi_asl_ri, "iUIiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_andi_lsr_ri, "iUIiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_ori_lsr_ri, "iUIiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_addi_lsr_ri, "iUIiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_subi_lsr_ri, "iUIiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_valignib, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_valignrb, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vspliceib, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsplicerb, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsplatrh, "LLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsplatrb, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_insert, "iiiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_tableidxb_goodsyntax, "iiiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_tableidxh_goodsyntax, "iiiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_tableidxw_goodsyntax, "iiiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_tableidxd_goodsyntax, "iiiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_bitspliti, "LLiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_bitsplit, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_extract, "iiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_extractu, "iiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_insertp, "LLiLLiLLiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_extractp, "LLiLLiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_extractup, "LLiLLiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_insert_rp, "iiiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_extract_rp, "iiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_extractu_rp, "iiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_insertp_rp, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_extractp_rp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_extractup_rp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_tstbit_i, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_ntstbit_i, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_setbit_i, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_togglebit_i, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_clrbit_i, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_tstbit_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_ntstbit_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_setbit_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_togglebit_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_clrbit_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_vh, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_vh, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_vh, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_vh, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, "iLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S5_asrhub_sat, "iLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_vh, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_vh, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_vh, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_vw, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_svw_trun, "iLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_svw_trun, "iLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_vw, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_vw, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_vw, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_vw, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_vw, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_vw, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vrndpackwh, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vrndpackwhs, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsxtbh, "LLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vzxtbh, "LLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsathub, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_svsathub, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_svsathb, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsathb, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vtrunohb, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vtrunewh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vtrunowh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vtrunehb, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsxthw, "LLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vzxthw, "LLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsatwh, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsatwuh, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_packhl, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_swiz, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsathub_nopack, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsathb_nopack, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsatwh_nopack, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsatwuh_nopack, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_shuffob, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_shuffeb, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_shuffoh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_shuffeh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S5_popcountp, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_parity, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_parityp, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lfsp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_clbnorm, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_clbaddi, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_clbpnorm, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_clbpaddi, "iLLiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_clb, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_cl0, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_cl1, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_clbp, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_cl0p, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_cl1p, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_brev, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_brevp, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_ct0, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_ct1, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_ct0p, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_ct1p, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_interleave, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_deinterleave, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_Y2_dcfetch, "vv*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_Y2_dczeroa, "vv*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_Y2_dccleana, "vv*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_Y2_dccleaninva, "vv*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_Y2_dcinva, "vv*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_Y4_l2fetch, "vv*i", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_Y5_l2fetch, "vv*LLi", "", V5)
+
+// V60 Scalar Instructions.
+
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_r, "iiUIi", "", V60)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_p, "LLiLLiUIi", "", V60)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_r_acc, "iiiUIi", "", V60)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_p_acc, "LLiLLiLLiUIi", "", V60)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_r_nac, "iiiUIi", "", V60)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_p_nac, "LLiLLiLLiUIi", "", V60)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_r_xacc, "iiiUIi", "", V60)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_p_xacc, "LLiLLiLLiUIi", "", V60)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_r_and, "iiiUIi", "", V60)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_r_or, "iiiUIi", "", V60)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_p_and, "LLiLLiLLiUIi", "", V60)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_p_or, "LLiLLiLLiUIi", "", V60)
+
+// V62 Scalar Instructions.
+
+TARGET_BUILTIN(__builtin_HEXAGON_M6_vabsdiffb, "LLiLLiLLi", "", V62)
+TARGET_BUILTIN(__builtin_HEXAGON_M6_vabsdiffub, "LLiLLiLLi", "", V62)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_vsplatrbp, "LLii", "", V62)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_vtrunehb_ppp, "LLiLLiLLi", "", V62)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_vtrunohb_ppp, "LLiLLiLLi", "", V62)
+
+// V65 Scalar Instructions.
+
+TARGET_BUILTIN(__builtin_HEXAGON_A6_vcmpbeq_notany, "iLLiLLi", "", V65)
+
+// V66 Scalar Instructions.
+
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mnaci, "iiii", "", V66)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfadd, "ddd", "", V66)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfsub, "ddd", "", V66)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_mask, "iUIiUIi", "", V66)
+
+// V67 Scalar Instructions.
+
+TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyrw, "LLiLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyrw_acc, "LLiLLiLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyrwc, "LLiLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyrwc_acc, "LLiLLiLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyiw, "LLiLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyiw_acc, "LLiLLiLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyiwc, "LLiLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyiwc_acc, "LLiLLiLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_vdmpy, "LLiLLiLLi", "", V67)
+TARGET_BUILTIN(__builtin_HEXAGON_M7_vdmpy_acc, "LLiLLiLLiLLi", "", V67)
+TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyrw, "iLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyrwc, "iLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyiw, "iLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyiwc, "iLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyrw_rnd, "iLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyrwc_rnd, "iLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyiw_rnd, "iLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyiwc_rnd, "iLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_A7_croundd_ri, "LLiLLiUIi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_A7_croundd_rr, "LLiLLii", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_A7_clip, "iiUIi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_A7_vclip, "LLiLLiUIi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmax, "ddd", "", V67)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmin, "ddd", "", V67)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmpyfix, "ddd", "", V67)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmpyll, "ddd", "", V67)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmpylh, "dddd", "", V67)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmpyhh, "dddd", "", V67)
+
+// V60 HVX Instructions.
+
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_qpred_ai, "vV64bv*V16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_qpred_ai_128B, "vV128bv*V32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_nqpred_ai, "vV64bv*V16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_nqpred_ai_128B, "vV128bv*V32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai, "vV64bv*V16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai_128B, "vV128bv*V32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai, "vV64bv*V16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai_128B, "vV128bv*V32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_valignb, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_valignb_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlalignb, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlalignb_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_valignbi, "V16iV16iV16iUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_valignbi_128B, "V32iV32iV32iUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlalignbi, "V16iV16iV16iUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlalignbi_128B, "V32iV32iV32iUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vror, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vror_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackub, "V32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackub_128B, "V64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackb, "V32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackb_128B, "V64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackuh, "V32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackuh_128B, "V64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackh, "V32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackh_128B, "V64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackob, "V32iV32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackob_128B, "V64iV64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackoh, "V32iV32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackoh_128B, "V64iV64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackeb, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackeb_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackeh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackeh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackob, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackob_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackoh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackoh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackhub_sat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackhub_sat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackhb_sat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackhb_sat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackwuh_sat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackwuh_sat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackwh_sat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackwh_sat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vzb, "V32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vzb_128B, "V64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsb, "V32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsb_128B, "V64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vzh, "V32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vzh_128B, "V64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsh, "V32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsh_128B, "V64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_dv, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_dv_128B, "V64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_dv_acc, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, "V64iV64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_dv, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_dv_128B, "V64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_dv_acc, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, "V64iV64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhvsat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhvsat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhvsat_acc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsat, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsat_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsat_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhisat, "V16iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhisat_128B, "V32iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhisat_acc, "V16iV16iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, "V32iV32iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsusat, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsusat_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsusat_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsuisat, "V16iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsuisat_128B, "V32iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsuisat_acc, "V16iV16iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, "V32iV32iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyb, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyb_128B, "V64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyb_acc, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyb_acc_128B, "V64iV64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpybus, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpybus_128B, "V64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpybus_acc, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpybus_acc_128B, "V64iV64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyhb, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyhb_128B, "V64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyhb_acc, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyhb_acc_128B, "V64iV64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubv_acc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubv_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybv_acc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybv_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubi, "V32iV32iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubi_128B, "V64iV64iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubi_acc, "V32iV32iV32iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubi_acc_128B, "V64iV64iV64iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybus, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybus_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybus_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybus_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusi, "V32iV32iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusi_128B, "V64iV64iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusi_acc, "V32iV32iV32iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusi_acc_128B, "V64iV64iV64iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusv_acc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusv_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdsaduh, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdsaduh_128B, "V64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdsaduh_acc, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdsaduh_acc_128B, "V64iV64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrsadubi, "V32iV32iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrsadubi_128B, "V64iV64iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrsadubi_acc, "V32iV32iV32iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrsadubi_acc_128B, "V64iV64iV64iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrw, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrw_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslw, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslw_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrw, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrw_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslwv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslwv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrwv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrwv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrh, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrh_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslh, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslh_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrh, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrh_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslhv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslhv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrhv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrhv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwh, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwh_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwhsat, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwhsat_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwhrndsat, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwhrndsat_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwuhsat, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwuhsat_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundwh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundwh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundwuh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundwuh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhubsat, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhubsat_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhubrndsat, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhubrndsat_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhbrndsat, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhbrndsat_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundhb, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundhb_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundhub, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundhub_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslw_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslw_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrw_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrw_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddb, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddb_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubb, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubb_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddb_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddb_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubb_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubb_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddh_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddh_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubh_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubh_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddw, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddw_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubw, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubw_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddw_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddw_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubw_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubw_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubsat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubsat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubsat_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubsat_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububsat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububsat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububsat_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububsat_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhsat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhsat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhsat_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhsat_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhsat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhsat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhsat_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhsat_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhsat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhsat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhsat_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhsat_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhsat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhsat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhsat_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhsat_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwsat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwsat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwsat_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwsat_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwsat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwsat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwsat_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwsat_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgub, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgub_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgubrnd, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgubrnd_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguhrnd, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguhrnd_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavghrnd, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavghrnd_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgw, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgw_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgwrnd, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgwrnd_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgw, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgw_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffub, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffub_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffuh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffuh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffw, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffw_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgub, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgub_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubh, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubh_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububh, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububh_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhw, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhw_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhw, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhw_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhw, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhw_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhw, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhw_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vd0, "V16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vd0_128B, "V32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbnq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbnq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbnq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbnq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhnq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhnq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhnq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhnq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwnq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwnq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwnq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwnq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsh, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsh_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsh_sat, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsh_sat_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsw, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsw_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsw_sat, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsw_sat_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybv, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybv_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybv_acc, "V32iV32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybv_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyubv, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyubv_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyubv_acc, "V32iV32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyubv_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybusv, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybusv_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybusv_acc, "V32iV32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybusv_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabusv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabusv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabuuv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabuuv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhv, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhv_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhv_acc, "V32iV32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhv_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhv, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhv_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhv_acc, "V32iV32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhv_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhvsrs, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhvsrs_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhus, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhus_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhus_acc, "V32iV32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhus_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyih, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyih_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyih_acc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyih_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyewuh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyewuh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_rnd, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_rnd_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_sacc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_sacc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyieoh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyieoh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewuh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewuh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiowh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiowh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewh_acc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewh_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewuh_acc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyub, "V32iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyub_128B, "V64iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyub_acc, "V32iV32iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyub_acc_128B, "V64iV64iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybus, "V32iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybus_128B, "V64iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybus_acc, "V32iV32iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybus_acc_128B, "V64iV64iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabus, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabus_128B, "V64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabus_acc, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabus_acc_128B, "V64iV64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpahb, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpahb_128B, "V64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpahb_acc, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpahb_acc_128B, "V64iV64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyh, "V32iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyh_128B, "V64iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhsat_acc, "V32iV32iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhsat_acc_128B, "V64iV64iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhss, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhss_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhsrs, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhsrs_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuh, "V32iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuh_128B, "V64iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuh_acc, "V32iV32iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuh_acc_128B, "V64iV64iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyihb, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyihb_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyihb_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyihb_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwb, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwb_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwb_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwb_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwh, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwh_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwh_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwh_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vand, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vand_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vor, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vor_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vxor, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vxor_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnot, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnot_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandqrt, "V16iV64bi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandqrt_128B, "V32iV128bi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandqrt_acc, "V16iV16iV64bi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandqrt_acc_128B, "V32iV32iV128bi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvrt, "V64bV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvrt_128B, "V128bV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvrt_acc, "V64bV64bV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvrt_acc_128B, "V128bV128bV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw, "V64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_128B, "V128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_and, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_and_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_or, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_or_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_xor, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw, "V64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_128B, "V128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_and, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_and_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_or, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_or_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_xor, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth, "V64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_128B, "V128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_and, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_and_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_or, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_or_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_xor, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqh, "V64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqh_128B, "V128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqh_and, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqh_and_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqh_or, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqh_or_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqh_xor, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqh_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtb, "V64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtb_128B, "V128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtb_and, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtb_and_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtb_or, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtb_or_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtb_xor, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtb_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb, "V64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_128B, "V128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_and, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_and_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_or, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_or_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_xor, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw, "V64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_128B, "V128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_and, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_and_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_or, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_or_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_xor, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh, "V64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_128B, "V128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_and, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_and_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_or, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_or_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_xor, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtub, "V64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtub_128B, "V128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtub_and, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtub_and_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtub_or, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtub_or_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtub_xor, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtub_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_or, "V64bV64bV64b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_or_128B, "V128bV128bV128b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_and, "V64bV64bV64b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_and_128B, "V128bV128bV128b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_not, "V64bV64b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_not_128B, "V128bV128b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_xor, "V64bV64bV64b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_xor_128B, "V128bV128bV128b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_and_n, "V64bV64bV64b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_and_n_128B, "V128bV128bV128b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_or_n, "V64bV64bV64b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_or_n_128B, "V128bV128bV128b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_scalar2, "V64bi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_scalar2_128B, "V128bi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmux, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmux_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vswap, "V32iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vswap_128B, "V64iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxub, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxub_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vminub, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vminub_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxuh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxuh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vminuh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vminuh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vminh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vminh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxw, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxw_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vminw, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vminw_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsathub, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsathub_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsatwh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsatwh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffeb, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffeb_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffob, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffob_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufeh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufeh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufoh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufoh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffvdd, "V32iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffvdd_128B, "V64iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealvdd, "V32iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealvdd_128B, "V64iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufoeh, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufoeh_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufoeb, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufoeb_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealh, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealh_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealb, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealb_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealb4w, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealb4w_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffh, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffh_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffb, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffb_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_extractw, "iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_extractw_128B, "iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vinsertwr, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vinsertwr_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplatw, "V16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplatw_128B, "V32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vassignp, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vassignp_128B, "V64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vassign, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vassign_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcombine, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcombine_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdelta, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdelta_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrdelta, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrdelta_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcl0w, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcl0w_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcl0h, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcl0h_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnormamtw, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnormamtw_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnormamth, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnormamth_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpopcounth, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpopcounth_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_oracc, "V16iV16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_oracc_128B, "V32iV32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh, "V32iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_128B, "V64iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_oracc, "V32iV32iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_oracc_128B, "V64iV64iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_hi, "V16iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_hi_128B, "V32iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_lo, "V16iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_lo_128B, "V32iV64i", "", HVXV60)
+
+// V62 HVX Instructions.
+
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrb, "V16iV16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrb_128B, "V32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwuhrndsat, "V16iV16iV16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwuhrndsat_128B, "V32iV32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruwuhrndsat, "V16iV16iV16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruwuhrndsat_128B, "V32iV32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhbsat, "V16iV16iV16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhbsat_128B, "V32iV32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrounduwuh, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrounduwuh_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrounduhub, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrounduhub_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduwsat, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduwsat_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduwsat_dv, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduwsat_dv_128B, "V64iV64iV64i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuwsat, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuwsat_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuwsat_dv, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuwsat_dv_128B, "V64iV64iV64i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbsat, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbsat_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbsat_dv, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbsat_dv_128B, "V64iV64iV64i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbsat, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbsat_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbsat_dv, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbsat_dv_128B, "V64iV64iV64i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddcarry, "V16iV16iV16iv*", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddcarry_128B, "V32iV32iV32iv*", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubcarry, "V16iV16iV16iv*", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubcarry_128B, "V32iV32iV32iv*", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddububb_sat, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddububb_sat_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubububb_sat, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubububb_sat_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhw_acc, "V32iV32iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhw_acc_128B, "V64iV64iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhw_acc, "V32iV32iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhw_acc_128B, "V64iV64iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubh_acc, "V32iV32iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubh_acc_128B, "V64iV64iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyewuh_64, "V32iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyewuh_64_128B, "V64iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_64_acc, "V32iV32iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, "V64iV64iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpauhb, "V32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpauhb_128B, "V64iV64ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpauhb_acc, "V32iV32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpauhb_acc_128B, "V64iV64iV64ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwub, "V16iV16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwub_128B, "V32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwub_acc, "V16iV16iV16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwub_acc_128B, "V32iV32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandnqrt, "V16iV64bi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandnqrt_128B, "V32iV128bi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandnqrt_acc, "V16iV16iV64bi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandnqrt_acc_128B, "V32iV32iV128bi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvqv, "V16iV64bV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvqv_128B, "V32iV128bV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvnqv, "V16iV64bV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvnqv_128B, "V32iV128bV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_scalar2v2, "V64bi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_scalar2v2_128B, "V128bi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_shuffeqw, "V64bV64bV64b", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_shuffeqw_128B, "V128bV128bV128b", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_shuffeqh, "V64bV64bV64b", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_shuffeqh_128B, "V128bV128bV128b", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxb, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxb_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vminb, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vminb_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsatuwuh, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsatuwuh_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplath, "V16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplath_128B, "V32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplatb, "V16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplatb_128B, "V32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddclbw, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddclbw_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddclbh, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddclbh_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvbi, "V16iV16iV16iUIi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvbi_128B, "V32iV32iV32iUIi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_oracci, "V16iV16iV16iV16iUIi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_oracci_128B, "V32iV32iV32iV32iUIi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwhi, "V32iV16iV16iUIi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwhi_128B, "V64iV32iV32iUIi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_oracci, "V32iV32iV16iV16iUIi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_oracci_128B, "V64iV64iV32iV32iUIi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_nm, "V16iV16iV16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_nm_128B, "V32iV32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_nm, "V32iV16iV16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_nm_128B, "V64iV32iV32ii", "", HVXV62)
+
+// V65 HVX Instructions.
+
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruwuhsat, "V16iV16iV16ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruwuhsat_128B, "V32iV32iV32ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruhubsat, "V16iV16iV16ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruhubsat_128B, "V32iV32iV32ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruhubrndsat, "V16iV16iV16ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruhubrndsat_128B, "V32iV32iV32ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslh_acc, "V16iV16iV16ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslh_acc_128B, "V32iV32iV32ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrh_acc, "V16iV16iV16ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrh_acc_128B, "V32iV32iV32ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguw, "V16iV16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguw_128B, "V32iV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguwrnd, "V16iV16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguwrnd_128B, "V32iV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgb, "V16iV16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgb_128B, "V32iV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgbrnd, "V16iV16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgbrnd_128B, "V32iV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgb, "V16iV16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgb_128B, "V32iV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdd0, "V32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdd0_128B, "V64i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsb, "V16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsb_128B, "V32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsb_sat, "V16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsb_sat_128B, "V32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabuu, "V32iV32ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabuu_128B, "V64iV64ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabuu_acc, "V32iV32iV32ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabuu_acc_128B, "V64iV64iV64ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyh_acc, "V32iV32iV16ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyh_acc_128B, "V64iV64iV32ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpahhsat, "V16iV16iV16iLLi", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpahhsat_128B, "V32iV32iV32iLLi", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpauhuhsat, "V16iV16iV16iLLi", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpauhuhsat_128B, "V32iV32iV32iLLi", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpsuhuhsat, "V16iV16iV16iLLi", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpsuhuhsat_128B, "V32iV32iV32iLLi", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlut4, "V16iV16iLLi", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlut4_128B, "V32iV32iLLi", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhe, "V16iV16ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhe_128B, "V32iV32ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhe_acc, "V16iV16iV16ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhe_acc_128B, "V32iV32iV32ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermw, "vv*iiV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermw_128B, "vv*iiV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermh, "vv*iiV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermh_128B, "vv*iiV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhw, "vv*iiV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhw_128B, "vv*iiV64i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermwq, "vv*V64biiV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermwq_128B, "vv*V128biiV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhq, "vv*V64biiV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhq_128B, "vv*V128biiV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhwq, "vv*V64biiV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhwq_128B, "vv*V128biiV64i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermw, "viiV16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermw_128B, "viiV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermh, "viiV16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermh_128B, "viiV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermw_add, "viiV16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermw_add_128B, "viiV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermh_add, "viiV16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermh_add_128B, "viiV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermwq, "vV64biiV16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermwq_128B, "vV128biiV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhq, "vV64biiV16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhq_128B, "vV128biiV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhw, "viiV32iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhw_128B, "viiV64iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhwq, "vV64biiV32iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhwq_128B, "vV128biiV64iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhw_add, "viiV32iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhw_add_128B, "viiV64iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqb, "V16iV64b", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqb_128B, "V32iV128b", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqh, "V16iV64b", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqh_128B, "V32iV128b", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqw, "V16iV64b", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqw_128B, "V32iV128b", "", HVXV65)
+
+// V66 HVX Instructions.
+
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrotr, "V16iV16iV16i", "", HVXV66)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrotr_128B, "V32iV32iV32i", "", HVXV66)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasr_into, "V32iV32iV16iV16i", "", HVXV66)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasr_into_128B, "V64iV64iV32iV32i", "", HVXV66)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddcarrysat, "V16iV16iV16iV64b", "", HVXV66)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddcarrysat_128B, "V32iV32iV32iV128b", "", HVXV66)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsatdw, "V16iV16iV16i", "", HVXV66)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsatdw_128B, "V32iV32iV32i", "", HVXV66)
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonMapCustomDep.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonMapCustomDep.def
new file mode 100644
index 000000000000..9478a1b3fd14
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonMapCustomDep.def
@@ -0,0 +1,206 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Automatically generated file, do not edit!
+//===----------------------------------------------------------------------===//
+
+CUSTOM_BUILTIN_MAPPING(M2_mpysmi, 0)
+CUSTOM_BUILTIN_MAPPING(M2_dpmpyss_s0, 0)
+CUSTOM_BUILTIN_MAPPING(M2_dpmpyuu_s0, 0)
+CUSTOM_BUILTIN_MAPPING(M2_mpyi, 0)
+CUSTOM_BUILTIN_MAPPING(M2_mpyui, 0)
+CUSTOM_BUILTIN_MAPPING(A2_add, 0)
+CUSTOM_BUILTIN_MAPPING(A2_sub, 0)
+CUSTOM_BUILTIN_MAPPING(A2_addi, 0)
+CUSTOM_BUILTIN_MAPPING(A2_addp, 0)
+CUSTOM_BUILTIN_MAPPING(A2_subp, 0)
+CUSTOM_BUILTIN_MAPPING(A2_neg, 0)
+CUSTOM_BUILTIN_MAPPING(A2_zxtb, 0)
+CUSTOM_BUILTIN_MAPPING(A2_sxtb, 0)
+CUSTOM_BUILTIN_MAPPING(A2_zxth, 0)
+CUSTOM_BUILTIN_MAPPING(A2_sxth, 0)
+CUSTOM_BUILTIN_MAPPING(A2_and, 0)
+CUSTOM_BUILTIN_MAPPING(A2_or, 0)
+CUSTOM_BUILTIN_MAPPING(A2_xor, 0)
+CUSTOM_BUILTIN_MAPPING(A2_not, 0)
+CUSTOM_BUILTIN_MAPPING(A2_subri, 0)
+CUSTOM_BUILTIN_MAPPING(A2_andir, 0)
+CUSTOM_BUILTIN_MAPPING(A2_orir, 0)
+CUSTOM_BUILTIN_MAPPING(S2_asr_i_r, 0)
+CUSTOM_BUILTIN_MAPPING(S2_lsr_i_r, 0)
+CUSTOM_BUILTIN_MAPPING(S2_asl_i_r, 0)
+CUSTOM_BUILTIN_MAPPING(S2_asr_i_p, 0)
+CUSTOM_BUILTIN_MAPPING(S2_lsr_i_p, 0)
+CUSTOM_BUILTIN_MAPPING(S2_asl_i_p, 0)
+CUSTOM_BUILTIN_MAPPING(V6_vS32b_qpred_ai, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vS32b_qpred_ai_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vS32b_nqpred_ai, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vS32b_nqpred_ai_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vS32b_nt_qpred_ai, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vS32b_nt_qpred_ai_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vS32b_nt_nqpred_ai, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vS32b_nt_nqpred_ai_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vaddbq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vaddbq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vsubbq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vsubbq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vaddbnq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vaddbnq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vsubbnq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vsubbnq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vaddhq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vaddhq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vsubhq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vsubhq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vaddhnq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vaddhnq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vsubhnq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vsubhnq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vaddwq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vaddwq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vsubwq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vsubwq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vaddwnq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vaddwnq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vsubwnq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vsubwnq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vandqrt, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vandqrt_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vandqrt_acc, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vandqrt_acc_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vandvrt, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vandvrt_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vandvrt_acc, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vandvrt_acc_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtw, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtw_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtw_and, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtw_and_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtw_or, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtw_or_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtw_xor, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtw_xor_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqw, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqw_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqw_and, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqw_and_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqw_or, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqw_or_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqw_xor, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqw_xor_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgth, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgth_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgth_and, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgth_and_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgth_or, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgth_or_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgth_xor, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgth_xor_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqh, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqh_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqh_and, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqh_and_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqh_or, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqh_or_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqh_xor, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqh_xor_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtb, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtb_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtb_and, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtb_and_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtb_or, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtb_or_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtb_xor, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtb_xor_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqb, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqb_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqb_and, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqb_and_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqb_or, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqb_or_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqb_xor, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqb_xor_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuw, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuw_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuw_and, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuw_and_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuw_or, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuw_or_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuw_xor, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuw_xor_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuh, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuh_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuh_and, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuh_and_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuh_or, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuh_or_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuh_xor, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuh_xor_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtub, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtub_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtub_and, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtub_and_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtub_or, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtub_or_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtub_xor, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtub_xor_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_pred_or, 64)
+CUSTOM_BUILTIN_MAPPING(V6_pred_or_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_pred_and, 64)
+CUSTOM_BUILTIN_MAPPING(V6_pred_and_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_pred_not, 64)
+CUSTOM_BUILTIN_MAPPING(V6_pred_not_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_pred_xor, 64)
+CUSTOM_BUILTIN_MAPPING(V6_pred_xor_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_pred_and_n, 64)
+CUSTOM_BUILTIN_MAPPING(V6_pred_and_n_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_pred_or_n, 64)
+CUSTOM_BUILTIN_MAPPING(V6_pred_or_n_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_pred_scalar2, 64)
+CUSTOM_BUILTIN_MAPPING(V6_pred_scalar2_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vmux, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vmux_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vswap, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vswap_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vaddcarry, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vaddcarry_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vsubcarry, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vsubcarry_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vandnqrt, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vandnqrt_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vandnqrt_acc, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vandnqrt_acc_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vandvqv, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vandvqv_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vandvnqv, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vandvnqv_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_pred_scalar2v2, 64)
+CUSTOM_BUILTIN_MAPPING(V6_pred_scalar2v2_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_shuffeqw, 64)
+CUSTOM_BUILTIN_MAPPING(V6_shuffeqw_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_shuffeqh, 64)
+CUSTOM_BUILTIN_MAPPING(V6_shuffeqh_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgathermwq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgathermwq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgathermhq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgathermhq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgathermhwq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgathermhwq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vscattermwq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vscattermwq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vscattermhq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vscattermhq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vscattermhwq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vscattermhwq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vprefixqb, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vprefixqb_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vprefixqh, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vprefixqh_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vprefixqw, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vprefixqw_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vaddcarrysat, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vaddcarrysat_128B, 128)
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsMips.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsMips.def
index 9ac75b7a174e..2aca4cb226bc 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsMips.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsMips.def
@@ -635,6 +635,9 @@ BUILTIN(__builtin_msa_ld_h, "V8Ssv*Ii", "nc")
BUILTIN(__builtin_msa_ld_w, "V4Siv*Ii", "nc")
BUILTIN(__builtin_msa_ld_d, "V2SLLiv*Ii", "nc")
+BUILTIN(__builtin_msa_ldr_d, "V2SLLiv*Ii", "nc")
+BUILTIN(__builtin_msa_ldr_w, "V4Siv*Ii", "nc")
+
BUILTIN(__builtin_msa_ldi_b, "V16cIi", "nc")
BUILTIN(__builtin_msa_ldi_h, "V8sIi", "nc")
BUILTIN(__builtin_msa_ldi_w, "V4iIi", "nc")
@@ -857,6 +860,9 @@ BUILTIN(__builtin_msa_st_h, "vV8Ssv*Ii", "nc")
BUILTIN(__builtin_msa_st_w, "vV4Siv*Ii", "nc")
BUILTIN(__builtin_msa_st_d, "vV2SLLiv*Ii", "nc")
+BUILTIN(__builtin_msa_str_d, "vV2SLLiv*Ii", "nc")
+BUILTIN(__builtin_msa_str_w, "vV4Siv*Ii", "nc")
+
BUILTIN(__builtin_msa_subs_s_b, "V16ScV16ScV16Sc", "nc")
BUILTIN(__builtin_msa_subs_s_h, "V8SsV8SsV8Ss", "nc")
BUILTIN(__builtin_msa_subs_s_w, "V4SiV4SiV4Si", "nc")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def
index 70be6182c7ac..759c91290a60 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def
@@ -20,7 +20,9 @@
#pragma push_macro("SM_70")
#pragma push_macro("SM_72")
#pragma push_macro("SM_75")
-#define SM_75 "sm_75"
+#pragma push_macro("SM_80")
+#define SM_80 "sm_80"
+#define SM_75 "sm_75|" SM_80
#define SM_72 "sm_72|" SM_75
#define SM_70 "sm_70|" SM_72
@@ -31,7 +33,11 @@
#pragma push_macro("PTX61")
#pragma push_macro("PTX63")
#pragma push_macro("PTX64")
-#define PTX64 "ptx64"
+#pragma push_macro("PTX65")
+#pragma push_macro("PTX70")
+#define PTX70 "ptx70"
+#define PTX65 "ptx65|" PTX70
+#define PTX64 "ptx64|" PTX65
#define PTX63 "ptx63|" PTX64
#define PTX61 "ptx61|" PTX63
#define PTX60 "ptx60|" PTX61
@@ -721,7 +727,10 @@ TARGET_BUILTIN(__imma_m8n8k32_st_c_i32, "vi*iC*UiIi", "", AND(SM_75,PTX63))
#pragma pop_macro("SM_70")
#pragma pop_macro("SM_72")
#pragma pop_macro("SM_75")
+#pragma pop_macro("SM_80")
#pragma pop_macro("PTX60")
#pragma pop_macro("PTX61")
#pragma pop_macro("PTX63")
#pragma pop_macro("PTX64")
+#pragma pop_macro("PTX65")
+#pragma pop_macro("PTX70")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsPPC.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsPPC.def
index 314e1cc05907..6b291e6b0806 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsPPC.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsPPC.def
@@ -298,6 +298,44 @@ BUILTIN(__builtin_altivec_vrldmi, "V2ULLiV2ULLiV2ULLiV2ULLi", "")
BUILTIN(__builtin_altivec_vrlwnm, "V4UiV4UiV4Ui", "")
BUILTIN(__builtin_altivec_vrldnm, "V2ULLiV2ULLiV2ULLi", "")
+// P10 Vector Parallel Bits built-ins.
+BUILTIN(__builtin_altivec_vpdepd, "V2ULLiV2ULLiV2ULLi", "")
+BUILTIN(__builtin_altivec_vpextd, "V2ULLiV2ULLiV2ULLi", "")
+
+// P10 Vector Centrifuge built-in.
+BUILTIN(__builtin_altivec_vcfuged, "V2ULLiV2ULLiV2ULLi", "")
+
+// P10 Vector Gather Every N-th Bit built-in.
+BUILTIN(__builtin_altivec_vgnb, "ULLiV1ULLLiIi", "")
+
+// P10 Vector Clear Bytes built-ins.
+BUILTIN(__builtin_altivec_vclrlb, "V16cV16cUi", "")
+BUILTIN(__builtin_altivec_vclrrb, "V16cV16cUi", "")
+
+// P10 Vector Count Leading / Trailing Zeroes under bit Mask built-ins.
+BUILTIN(__builtin_altivec_vclzdm, "V2ULLiV2ULLiV2ULLi", "")
+BUILTIN(__builtin_altivec_vctzdm, "V2ULLiV2ULLiV2ULLi", "")
+
+// P10 Vector Shift built-ins.
+BUILTIN(__builtin_altivec_vsldbi, "V16UcV16UcV16UcIi", "")
+BUILTIN(__builtin_altivec_vsrdbi, "V16UcV16UcV16UcIi", "")
+
+// P10 Vector Insert built-ins.
+BUILTIN(__builtin_altivec_vinsblx, "V16UcV16UcULLiULLi", "")
+BUILTIN(__builtin_altivec_vinsbrx, "V16UcV16UcULLiULLi", "")
+BUILTIN(__builtin_altivec_vinshlx, "V8UsV8UsULLiULLi", "")
+BUILTIN(__builtin_altivec_vinshrx, "V8UsV8UsULLiULLi", "")
+BUILTIN(__builtin_altivec_vinswlx, "V4UiV4UiULLiULLi", "")
+BUILTIN(__builtin_altivec_vinswrx, "V4UiV4UiULLiULLi", "")
+BUILTIN(__builtin_altivec_vinsdlx, "V2ULLiV2ULLiULLiULLi", "")
+BUILTIN(__builtin_altivec_vinsdrx, "V2ULLiV2ULLiULLiULLi", "")
+BUILTIN(__builtin_altivec_vinsbvlx, "V16UcV16UcULLiV16Uc", "")
+BUILTIN(__builtin_altivec_vinsbvrx, "V16UcV16UcULLiV16Uc", "")
+BUILTIN(__builtin_altivec_vinshvlx, "V8UsV8UsULLiV8Us", "")
+BUILTIN(__builtin_altivec_vinshvrx, "V8UsV8UsULLiV8Us", "")
+BUILTIN(__builtin_altivec_vinswvlx, "V4UiV4UiULLiV4Ui", "")
+BUILTIN(__builtin_altivec_vinswvrx, "V4UiV4UiULLiV4Ui", "")
+
// VSX built-ins.
BUILTIN(__builtin_vsx_lxvd2x, "V2divC*", "")
@@ -391,6 +429,11 @@ BUILTIN(__builtin_vsx_xvcpsgnsp, "V4fV4fV4f", "")
BUILTIN(__builtin_vsx_xvabssp, "V4fV4f", "")
BUILTIN(__builtin_vsx_xvabsdp, "V2dV2d", "")
+BUILTIN(__builtin_vsx_xxgenpcvbm, "V16UcV16Uci", "")
+BUILTIN(__builtin_vsx_xxgenpcvhm, "V8UsV8Usi", "")
+BUILTIN(__builtin_vsx_xxgenpcvwm, "V4UiV4Uii", "")
+BUILTIN(__builtin_vsx_xxgenpcvdm, "V2ULLiV2ULLii", "")
+
// vector Insert/Extract exponent/significand builtins
BUILTIN(__builtin_vsx_xviexpdp, "V2dV2ULLiV2ULLi", "")
BUILTIN(__builtin_vsx_xviexpsp, "V4fV4UiV4Ui", "")
@@ -422,6 +465,19 @@ BUILTIN(__builtin_vsx_extractuword, "V2ULLiV16UcIi", "")
BUILTIN(__builtin_vsx_xxpermdi, "v.", "t")
BUILTIN(__builtin_vsx_xxsldwi, "v.", "t")
+BUILTIN(__builtin_vsx_xxeval, "V2ULLiV2ULLiV2ULLiV2ULLiIi", "")
+
+BUILTIN(__builtin_vsx_xvtlsbb, "iV16Ucb", "")
+
+// P10 Vector Permute Extended built-in.
+BUILTIN(__builtin_vsx_xxpermx, "V16UcV16UcV16UcV16UcIi", "")
+
+// P10 Vector Blend built-ins.
+BUILTIN(__builtin_vsx_xxblendvb, "V16UcV16UcV16UcV16Uc", "")
+BUILTIN(__builtin_vsx_xxblendvh, "V8UsV8UsV8UsV8Us", "")
+BUILTIN(__builtin_vsx_xxblendvw, "V4UiV4UiV4UiV4Ui", "")
+BUILTIN(__builtin_vsx_xxblendvd, "V2ULLiV2ULLiV2ULLiV2ULLi", "")
+
// Float 128 built-ins
BUILTIN(__builtin_sqrtf128_round_to_odd, "LLdLLd", "")
BUILTIN(__builtin_addf128_round_to_odd, "LLdLLdLLd", "")
@@ -470,6 +526,11 @@ BUILTIN(__builtin_divweu, "UiUiUi", "")
BUILTIN(__builtin_divde, "SLLiSLLiSLLi", "")
BUILTIN(__builtin_divdeu, "ULLiULLiULLi", "")
BUILTIN(__builtin_bpermd, "SLLiSLLiSLLi", "")
+BUILTIN(__builtin_pdepd, "ULLiULLiULLi", "")
+BUILTIN(__builtin_pextd, "ULLiULLiULLi", "")
+BUILTIN(__builtin_cfuged, "ULLiULLiULLi", "")
+BUILTIN(__builtin_cntlzdm, "ULLiULLiULLi", "")
+BUILTIN(__builtin_cnttzdm, "ULLiULLiULLi", "")
// Vector int128 (un)pack
BUILTIN(__builtin_unpack_vector_int128, "ULLiV1LLLii", "")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsSVE.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsSVE.def
new file mode 100644
index 000000000000..2839ca992d98
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsSVE.def
@@ -0,0 +1,20 @@
+//===--- BuiltinsSVE.def - SVE Builtin function database --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SVE-specific builtin function database. Users of
+// this file must define the BUILTIN macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+// The format of this database matches clang/Basic/Builtins.def.
+
+#define GET_SVE_BUILTINS
+#include "clang/Basic/arm_sve_builtins.inc"
+#undef GET_SVE_BUILTINS
+
+#undef BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsWebAssembly.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsWebAssembly.def
index 38a2441b5fd4..ecee7782920f 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsWebAssembly.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsWebAssembly.def
@@ -25,10 +25,6 @@
BUILTIN(__builtin_wasm_memory_size, "zIi", "n")
BUILTIN(__builtin_wasm_memory_grow, "zIiz", "n")
-// Bulk memory builtins
-TARGET_BUILTIN(__builtin_wasm_memory_init, "vIUiIUiv*UiUi", "", "bulk-memory")
-TARGET_BUILTIN(__builtin_wasm_data_drop, "vIUi", "", "bulk-memory")
-
// Thread-local storage
TARGET_BUILTIN(__builtin_wasm_tls_size, "z", "nc", "bulk-memory")
TARGET_BUILTIN(__builtin_wasm_tls_align, "z", "nc", "bulk-memory")
@@ -45,9 +41,9 @@ TARGET_BUILTIN(__builtin_wasm_throw, "vIUiv*", "r", "exception-handling")
TARGET_BUILTIN(__builtin_wasm_rethrow_in_catch, "v", "r", "exception-handling")
// Atomic wait and notify.
-BUILTIN(__builtin_wasm_atomic_wait_i32, "ii*iLLi", "n")
-BUILTIN(__builtin_wasm_atomic_wait_i64, "iLLi*LLiLLi", "n")
-BUILTIN(__builtin_wasm_atomic_notify, "Uii*Ui", "n")
+TARGET_BUILTIN(__builtin_wasm_atomic_wait_i32, "ii*iLLi", "n", "atomics")
+TARGET_BUILTIN(__builtin_wasm_atomic_wait_i64, "iLLi*LLiLLi", "n", "atomics")
+TARGET_BUILTIN(__builtin_wasm_atomic_notify, "Uii*Ui", "n", "atomics")
// Trapping fp-to-int conversions
BUILTIN(__builtin_wasm_trunc_s_i32_f32, "if", "nc")
@@ -70,23 +66,23 @@ TARGET_BUILTIN(__builtin_wasm_trunc_saturate_s_i64_f64, "LLid", "nc", "nontrappi
TARGET_BUILTIN(__builtin_wasm_trunc_saturate_u_i64_f64, "LLid", "nc", "nontrapping-fptoint")
// SIMD builtins
-TARGET_BUILTIN(__builtin_wasm_swizzle_v8x16, "V16cV16cV16c", "nc", "unimplemented-simd128")
+TARGET_BUILTIN(__builtin_wasm_swizzle_v8x16, "V16cV16cV16c", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_extract_lane_s_i8x16, "iV16cIi", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extract_lane_u_i8x16, "iV16cIi", "nc", "unimplemented-simd128")
+TARGET_BUILTIN(__builtin_wasm_extract_lane_u_i8x16, "iV16cIi", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_extract_lane_s_i16x8, "iV8sIi", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extract_lane_u_i16x8, "iV8sIi", "nc", "unimplemented-simd128")
+TARGET_BUILTIN(__builtin_wasm_extract_lane_u_i16x8, "iV8sIi", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_extract_lane_i32x4, "iV4iIi", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extract_lane_i64x2, "LLiV2LLiIi", "nc", "unimplemented-simd128")
+TARGET_BUILTIN(__builtin_wasm_extract_lane_i64x2, "LLiV2LLiIi", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_extract_lane_f32x4, "fV4fIi", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extract_lane_f64x2, "dV2dIi", "nc", "unimplemented-simd128")
+TARGET_BUILTIN(__builtin_wasm_extract_lane_f64x2, "dV2dIi", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_replace_lane_i8x16, "V16cV16cIii", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_replace_lane_i16x8, "V8sV8sIii", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_replace_lane_i32x4, "V4iV4iIii", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_replace_lane_i64x2, "V2LLiV2LLiIiLLi", "nc", "unimplemented-simd128")
+TARGET_BUILTIN(__builtin_wasm_replace_lane_i64x2, "V2LLiV2LLiIiLLi", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_replace_lane_f32x4, "V4fV4fIif", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_replace_lane_f64x2, "V2dV2dIid", "nc", "unimplemented-simd128")
+TARGET_BUILTIN(__builtin_wasm_replace_lane_f64x2, "V2dV2dIid", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_add_saturate_s_i8x16, "V16cV16cV16c", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_add_saturate_u_i8x16, "V16cV16cV16c", "nc", "simd128")
@@ -98,10 +94,28 @@ TARGET_BUILTIN(__builtin_wasm_sub_saturate_u_i8x16, "V16cV16cV16c", "nc", "simd1
TARGET_BUILTIN(__builtin_wasm_sub_saturate_s_i16x8, "V8sV8sV8s", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_sub_saturate_u_i16x8, "V8sV8sV8s", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_avgr_u_i8x16, "V16cV16cV16c", "nc", "unimplemented-simd128")
-TARGET_BUILTIN(__builtin_wasm_avgr_u_i16x8, "V8sV8sV8s", "nc", "unimplemented-simd128")
+TARGET_BUILTIN(__builtin_wasm_abs_i8x16, "V16cV16c", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_abs_i16x8, "V8sV8s", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_abs_i32x4, "V4iV4i", "nc", "simd128")
+
+TARGET_BUILTIN(__builtin_wasm_min_s_i8x16, "V16cV16cV16c", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_min_u_i8x16, "V16cV16cV16c", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_max_s_i8x16, "V16cV16cV16c", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_max_u_i8x16, "V16cV16cV16c", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_min_s_i16x8, "V8sV8sV8s", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_min_u_i16x8, "V8sV8sV8s", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_max_s_i16x8, "V8sV8sV8s", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_max_u_i16x8, "V8sV8sV8s", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_min_s_i32x4, "V4iV4iV4i", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_min_u_i32x4, "V4iV4iV4i", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_max_s_i32x4, "V4iV4iV4i", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_max_u_i32x4, "V4iV4iV4i", "nc", "simd128")
+
+TARGET_BUILTIN(__builtin_wasm_avgr_u_i8x16, "V16cV16cV16c", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_avgr_u_i16x8, "V8sV8sV8s", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_bitselect, "V4iV4iV4iV4i", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_shuffle_v8x16, "V16cV16cV16cIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIi", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_any_true_i8x16, "iV16c", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_any_true_i16x8, "iV8s", "nc", "simd128")
@@ -112,28 +126,43 @@ TARGET_BUILTIN(__builtin_wasm_all_true_i16x8, "iV8s", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_all_true_i32x4, "iV4i", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_all_true_i64x2, "iV2LLi", "nc", "unimplemented-simd128")
+TARGET_BUILTIN(__builtin_wasm_bitmask_i8x16, "iV16c", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_bitmask_i16x8, "iV8s", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_bitmask_i32x4, "iV4i", "nc", "simd128")
+
TARGET_BUILTIN(__builtin_wasm_abs_f32x4, "V4fV4f", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_abs_f64x2, "V2dV2d", "nc", "unimplemented-simd128")
+TARGET_BUILTIN(__builtin_wasm_abs_f64x2, "V2dV2d", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_min_f32x4, "V4fV4fV4f", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_max_f32x4, "V4fV4fV4f", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_min_f64x2, "V2dV2dV2d", "nc", "unimplemented-simd128")
-TARGET_BUILTIN(__builtin_wasm_max_f64x2, "V2dV2dV2d", "nc", "unimplemented-simd128")
+TARGET_BUILTIN(__builtin_wasm_pmin_f32x4, "V4fV4fV4f", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_pmax_f32x4, "V4fV4fV4f", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_min_f64x2, "V2dV2dV2d", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_max_f64x2, "V2dV2dV2d", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_pmin_f64x2, "V2dV2dV2d", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_pmax_f64x2, "V2dV2dV2d", "nc", "simd128")
+
+TARGET_BUILTIN(__builtin_wasm_ceil_f32x4, "V4fV4f", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_floor_f32x4, "V4fV4f", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_trunc_f32x4, "V4fV4f", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_nearest_f32x4, "V4fV4f", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_ceil_f64x2, "V2dV2d", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_floor_f64x2, "V2dV2d", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_trunc_f64x2, "V2dV2d", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_nearest_f64x2, "V2dV2d", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_dot_s_i32x4_i16x8, "V4iV8sV8s", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_sqrt_f32x4, "V4fV4f", "nc", "unimplemented-simd128")
-TARGET_BUILTIN(__builtin_wasm_sqrt_f64x2, "V2dV2d", "nc", "unimplemented-simd128")
+TARGET_BUILTIN(__builtin_wasm_sqrt_f32x4, "V4fV4f", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_sqrt_f64x2, "V2dV2d", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_qfma_f32x4, "V4fV4fV4fV4f", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_qfms_f32x4, "V4fV4fV4fV4f", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_qfma_f32x4, "V4fV4fV4fV4f", "nc", "unimplemented-simd128")
+TARGET_BUILTIN(__builtin_wasm_qfms_f32x4, "V4fV4fV4fV4f", "nc", "unimplemented-simd128")
TARGET_BUILTIN(__builtin_wasm_qfma_f64x2, "V2dV2dV2dV2d", "nc", "unimplemented-simd128")
TARGET_BUILTIN(__builtin_wasm_qfms_f64x2, "V2dV2dV2dV2d", "nc", "unimplemented-simd128")
TARGET_BUILTIN(__builtin_wasm_trunc_saturate_s_i32x4_f32x4, "V4iV4f", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_trunc_saturate_u_i32x4_f32x4, "V4iV4f", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_trunc_saturate_s_i64x2_f64x2, "V2LLiV2d", "nc", "unimplemented-simd128")
-TARGET_BUILTIN(__builtin_wasm_trunc_saturate_u_i64x2_f64x2, "V2LLiV2d", "nc", "unimplemented-simd128")
TARGET_BUILTIN(__builtin_wasm_narrow_s_i8x16_i16x8, "V16cV8sV8s", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_narrow_u_i8x16_i16x8, "V16cV8sV8s", "nc", "simd128")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def
index d6aa46de02f9..35fb98352ec2 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def
@@ -1900,6 +1900,13 @@ TARGET_BUILTIN(__builtin_ia32_invpcid, "vUiv*", "nc", "invpcid")
TARGET_BUILTIN(__builtin_ia32_enqcmd, "Ucv*vC*", "n", "enqcmd")
TARGET_BUILTIN(__builtin_ia32_enqcmds, "Ucv*vC*", "n", "enqcmd")
+// SERIALIZE
+TARGET_BUILTIN(__builtin_ia32_serialize, "v", "n", "serialize")
+
+// TSXLDTRK
+TARGET_BUILTIN(__builtin_ia32_xsusldtrk, "v", "n", "tsxldtrk")
+TARGET_BUILTIN(__builtin_ia32_xresldtrk, "v", "n", "tsxldtrk")
+
// MSVC
TARGET_HEADER_BUILTIN(_BitScanForward, "UcUNi*UNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(_BitScanReverse, "UcUNi*UNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
@@ -1927,6 +1934,15 @@ TARGET_HEADER_BUILTIN(__readgsword, "UsUNi", "nh", "intrin.h", ALL_MS_LANGUAGES
TARGET_HEADER_BUILTIN(__readgsdword, "UNiUNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(__readgsqword, "ULLiUNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd64, "WiWiD*Wi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement64, "WiWiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange64, "WiWiD*Wi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64, "WiWiD*Wi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeSub64, "WiWiD*Wi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement64, "WiWiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr64, "WiWiD*Wi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor64, "WiWiD*Wi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+
#undef BUILTIN
#undef TARGET_BUILTIN
#undef TARGET_HEADER_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86_64.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86_64.def
index c535f43203e5..f66ae78f7e81 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86_64.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86_64.def
@@ -33,14 +33,6 @@ TARGET_HEADER_BUILTIN(__faststorefence, "v", "nh", "intrin.h", ALL_MS_LANGUAGES,
TARGET_HEADER_BUILTIN(__shiftleft128, "ULLiULLiULLiUc", "nch", "intrin.h", ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(__shiftright128, "ULLiULLiULLiUc", "nch", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement64, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeSub64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement64, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(_InterlockedCompareExchange128, "UcLLiD*LLiLLiLLi*", "nh", "intrin.h", ALL_MS_LANGUAGES, "cx16")
TARGET_BUILTIN(__builtin_ia32_readeflags_u64, "UOi", "n", "")
@@ -101,6 +93,22 @@ TARGET_BUILTIN(__builtin_ia32_cvtsi2ss64, "V4fV4fOiIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_cvtusi2sd64, "V2dV2dUOiIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_cvtusi2ss64, "V4fV4fUOiIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_directstore_u64, "vULi*ULi", "n", "movdiri")
+
+// AMX
+TARGET_BUILTIN(__builtin_ia32_tile_loadconfig, "vvC*", "n", "amx-tile")
+TARGET_BUILTIN(__builtin_ia32_tile_storeconfig, "vvC*", "n", "amx-tile")
+TARGET_BUILTIN(__builtin_ia32_tilerelease, "v", "n", "amx-tile")
+TARGET_BUILTIN(__builtin_ia32_tilezero, "vUc", "n", "amx-tile")
+
+TARGET_BUILTIN(__builtin_ia32_tileloadd64, "vIUcvC*z", "n", "amx-tile")
+TARGET_BUILTIN(__builtin_ia32_tileloaddt164, "vIUcvC*z", "n", "amx-tile")
+TARGET_BUILTIN(__builtin_ia32_tilestored64, "vIUcv*z", "n", "amx-tile")
+
+TARGET_BUILTIN(__builtin_ia32_tdpbssd, "vIUcIUcIUc", "n", "amx-int8")
+TARGET_BUILTIN(__builtin_ia32_tdpbsud, "vIUcIUcIUc", "n", "amx-int8")
+TARGET_BUILTIN(__builtin_ia32_tdpbusd, "vIUcIUcIUc", "n", "amx-int8")
+TARGET_BUILTIN(__builtin_ia32_tdpbuud, "vIUcIUcIUc", "n", "amx-int8")
+TARGET_BUILTIN(__builtin_ia32_tdpbf16ps, "vIUcIUcIUc", "n", "amx-bf16")
TARGET_BUILTIN(__builtin_ia32_ptwrite64, "vUOi", "n", "ptwrite")
#undef BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def b/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def
index 1ecae98b13b1..c7e01eb12851 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def
@@ -39,15 +39,15 @@ CODEGENOPT(ObjCAutoRefCountExceptions , 1, 0) ///< Whether ARC should be EH-safe
CODEGENOPT(Backchain , 1, 0) ///< -mbackchain
CODEGENOPT(ControlFlowGuardNoChecks , 1, 0) ///< -cfguard-no-checks
CODEGENOPT(ControlFlowGuard , 1, 0) ///< -cfguard
-CODEGENOPT(CoverageExtraChecksum, 1, 0) ///< Whether we need a second checksum for functions in GCNO files.
-CODEGENOPT(CoverageNoFunctionNamesInData, 1, 0) ///< Do not include function names in GCDA files.
-CODEGENOPT(CoverageExitBlockBeforeBody, 1, 0) ///< Whether to emit the exit block before the body blocks in GCNO files.
CODEGENOPT(CXAAtExit , 1, 1) ///< Use __cxa_atexit for calling destructors.
CODEGENOPT(RegisterGlobalDtorsWithAtExit, 1, 1) ///< Use atexit or __cxa_atexit to register global destructors.
CODEGENOPT(CXXCtorDtorAliases, 1, 0) ///< Emit complete ctors/dtors as linker
///< aliases to base ctors when possible.
CODEGENOPT(DataSections , 1, 0) ///< Set when -fdata-sections is enabled.
CODEGENOPT(UniqueSectionNames, 1, 1) ///< Set for -funique-section-names.
+CODEGENOPT(UniqueBasicBlockSectionNames, 1, 1) ///< Set for -funique-basic-block-section-names,
+ ///< Produce unique section names with
+ ///< basic block sections.
ENUM_CODEGENOPT(FramePointer, FramePointerKind, 2, FramePointerKind::None) /// frame-pointer: all,non-leaf,none
CODEGENOPT(DisableFree , 1, 0) ///< Don't free memory.
@@ -58,12 +58,15 @@ CODEGENOPT(DisableLLVMPasses , 1, 0) ///< Don't run any LLVM IR passes to get
///< frontend.
CODEGENOPT(DisableLifetimeMarkers, 1, 0) ///< Don't emit any lifetime markers
CODEGENOPT(DisableO0ImplyOptNone , 1, 0) ///< Don't annonate function with optnone at O0
+CODEGENOPT(ExperimentalStrictFloatingPoint, 1, 0) ///< Enables the new, experimental
+ ///< strict floating point.
CODEGENOPT(ExperimentalNewPassManager, 1, 0) ///< Enables the new, experimental
///< pass manager.
CODEGENOPT(DebugPassManager, 1, 0) ///< Prints debug information for the new
///< pass manager.
CODEGENOPT(DisableRedZone , 1, 0) ///< Set when -mno-red-zone is enabled.
-CODEGENOPT(EnableDebugEntryValues, 1, 0) ///< Emit call site parameter dbg info
+CODEGENOPT(EmitCallSiteInfo, 1, 0) ///< Emit call site info only in the case of
+ ///< '-g' + 'O>0' level.
CODEGENOPT(IndirectTlsSegRefs, 1, 0) ///< Set when -mno-tls-direct-seg-refs
///< is specified.
CODEGENOPT(DisableTailCalls , 1, 0) ///< Do not emit tail calls.
@@ -106,6 +109,13 @@ CODEGENOPT(XRayAlwaysEmitCustomEvents , 1, 0)
///< Set when -fxray-always-emit-typedevents is enabled.
CODEGENOPT(XRayAlwaysEmitTypedEvents , 1, 0)
+///< Set when -fxray-ignore-loops is enabled.
+CODEGENOPT(XRayIgnoreLoops , 1, 0)
+
+///< Set with -fno-xray-function-index to omit the index section.
+CODEGENOPT(XRayOmitFunctionIndex , 1, 0)
+
+
///< Set the minimum number of instructions in a function to determine selective
///< XRay instrumentation.
VALUE_CODEGENOPT(XRayInstructionThreshold , 32, 200)
@@ -146,16 +156,11 @@ CODEGENOPT(NoWarn , 1, 0) ///< Set when -Wa,--no-warn is enabled.
CODEGENOPT(EnableSegmentedStacks , 1, 0) ///< Set when -fsplit-stack is enabled.
CODEGENOPT(NoInlineLineTables, 1, 0) ///< Whether debug info should contain
///< inline line tables.
+CODEGENOPT(StackClashProtector, 1, 0) ///< Set when -fstack-clash-protection is enabled.
CODEGENOPT(NoImplicitFloat , 1, 0) ///< Set when -mno-implicit-float is enabled.
-CODEGENOPT(NoInfsFPMath , 1, 0) ///< Assume FP arguments, results not +-Inf.
-CODEGENOPT(NoSignedZeros , 1, 0) ///< Allow ignoring the signedness of FP zero
CODEGENOPT(NullPointerIsValid , 1, 0) ///< Assume Null pointer deference is defined.
-CODEGENOPT(Reassociate , 1, 0) ///< Allow reassociation of FP math ops
-CODEGENOPT(ReciprocalMath , 1, 0) ///< Allow FP divisions to be reassociated.
-CODEGENOPT(NoTrappingMath , 1, 0) ///< Set when -fno-trapping-math is enabled.
-CODEGENOPT(NoNaNsFPMath , 1, 0) ///< Assume FP arguments, results not NaN.
-CODEGENOPT(FlushDenorm , 1, 0) ///< Allow FP denorm numbers to be flushed to zero
CODEGENOPT(CorrectlyRoundedDivSqrt, 1, 0) ///< -cl-fp32-correctly-rounded-divide-sqrt
+CODEGENOPT(UniqueInternalLinkageNames, 1, 0) ///< Internal Linkage symbols get unique names.
/// When false, this attempts to generate code as if the result of an
/// overflowing conversion matches the overflowing behavior of a target's native
@@ -227,6 +232,7 @@ CODEGENOPT(SanitizeCoverageTracePC, 1, 0) ///< Enable PC tracing
CODEGENOPT(SanitizeCoverageTracePCGuard, 1, 0) ///< Enable PC tracing with guard
///< in sanitizer coverage.
CODEGENOPT(SanitizeCoverageInline8bitCounters, 1, 0) ///< Use inline 8bit counters.
+CODEGENOPT(SanitizeCoverageInlineBoolFlag, 1, 0) ///< Use inline bool flag.
CODEGENOPT(SanitizeCoveragePCTable, 1, 0) ///< Create a PC Table.
CODEGENOPT(SanitizeCoverageNoPrune, 1, 0) ///< Disable coverage pruning.
CODEGENOPT(SanitizeCoverageStackDepth, 1, 0) ///< Enable max stack depth tracing
@@ -244,7 +250,6 @@ VALUE_CODEGENOPT(TimeTraceGranularity, 32, 500) ///< Minimum time granularity (i
CODEGENOPT(UnrollLoops , 1, 0) ///< Control whether loops are unrolled.
CODEGENOPT(RerollLoops , 1, 0) ///< Control whether loops are rerolled.
CODEGENOPT(NoUseJumpTables , 1, 0) ///< Set when -fno-jump-tables is enabled.
-CODEGENOPT(UnsafeFPMath , 1, 0) ///< Allow unsafe floating point optzns.
CODEGENOPT(UnwindTables , 1, 0) ///< Emit unwind tables.
CODEGENOPT(VectorizeLoop , 1, 0) ///< Run loop vectorizer.
CODEGENOPT(VectorizeSLP , 1, 0) ///< Run SLP vectorizer.
@@ -301,6 +306,9 @@ CODEGENOPT(LTOVisibilityPublicStd, 1, 0)
/// or 0 if unspecified.
VALUE_CODEGENOPT(NumRegisterParameters, 32, 0)
+/// The threshold to put data into small data section.
+VALUE_CODEGENOPT(SmallDataLimit, 32, 0)
+
/// The lower bound for a buffer to be considered for stack protection.
VALUE_CODEGENOPT(SSPBufferSize, 32, 0)
@@ -378,14 +386,12 @@ CODEGENOPT(ForceEmitVTables, 1, 0)
/// Whether to emit an address-significance table into the object file.
CODEGENOPT(Addrsig, 1, 0)
-ENUM_CODEGENOPT(SignReturnAddress, SignReturnAddressScope, 2, SignReturnAddressScope::None)
-ENUM_CODEGENOPT(SignReturnAddressKey, SignReturnAddressKeyValue, 1, SignReturnAddressKeyValue::AKey)
-CODEGENOPT(BranchTargetEnforcement, 1, 0)
-
/// Whether to emit unused static constants.
CODEGENOPT(KeepStaticConsts, 1, 0)
+/// Whether to not follow the AAPCS that enforce at least one read before storing to a volatile bitfield
+CODEGENOPT(ForceAAPCSBitfieldLoad, 1, 0)
+
#undef CODEGENOPT
#undef ENUM_CODEGENOPT
#undef VALUE_CODEGENOPT
-
diff --git a/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h b/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h
index 21ac54e8ee12..83c4463c3639 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h
@@ -110,13 +110,21 @@ public:
Embed_Marker // Embed a marker as a placeholder for bitcode.
};
- enum class SignReturnAddressScope {
- None, // No signing for any function
- NonLeaf, // Sign the return address of functions that spill LR
- All // Sign the return address of all functions
- };
-
- enum class SignReturnAddressKeyValue { AKey, BKey };
+ // This field stores one of the allowed values for the option
+ // -fbasic-block-sections=. The allowed values with this option are:
+ // {"labels", "all", "list=<file>", "none"}.
+ //
+ // "labels": Only generate basic block symbols (labels) for all basic
+ // blocks, do not generate unique sections for basic blocks.
+ // Use the machine basic block id in the symbol name to
+ // associate profile info from virtual address to machine
+ // basic block.
+ // "all" : Generate basic block sections for all basic blocks.
+ // "list=<file>": Generate basic block sections for a subset of basic blocks.
+ // The functions and the machine basic block ids are specified
+ // in the file.
+ // "none": Disable sections/labels for basic blocks.
+ std::string BBSections;
enum class FramePointerKind {
None, // Omit all frame pointers.
@@ -164,7 +172,10 @@ public:
std::string FloatABI;
/// The floating-point denormal mode to use.
- llvm::DenormalMode FPDenormalMode = llvm::DenormalMode::Invalid;
+ llvm::DenormalMode FPDenormalMode = llvm::DenormalMode::getIEEE();
+
+ /// The floating-point denormal mode to use, for float.
+ llvm::DenormalMode FP32DenormalMode = llvm::DenormalMode::getIEEE();
/// The float precision limit to use, if non-empty.
std::string LimitFloatPrecision;
@@ -311,6 +322,21 @@ public:
/// List of dynamic shared object files to be loaded as pass plugins.
std::vector<std::string> PassPlugins;
+ /// Path to allowlist file specifying which objects
+ /// (files, functions) should exclusively be instrumented
+ /// by sanitizer coverage pass.
+ std::vector<std::string> SanitizeCoverageAllowlistFiles;
+
+ /// Path to blocklist file specifying which objects
+ /// (files, functions) listed for instrumentation by sanitizer
+ /// coverage pass should actually not be instrumented.
+ std::vector<std::string> SanitizeCoverageBlocklistFiles;
+
+ /// Executable and command-line used to create a given CompilerInvocation.
+ /// Most of the time this will be the full -cc1 command.
+ const char *Argv0 = nullptr;
+ ArrayRef<const char *> CommandLineArgs;
+
public:
// Define accessors/mutators for code generation options of enumeration type.
#define CODEGENOPT(Name, Bits, Default)
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Cuda.h b/contrib/llvm-project/clang/include/clang/Basic/Cuda.h
index da572957d10d..1716325a9931 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Cuda.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Cuda.h
@@ -27,7 +27,10 @@ enum class CudaVersion {
CUDA_92,
CUDA_100,
CUDA_101,
- LATEST = CUDA_101,
+ CUDA_102,
+ CUDA_110,
+ LATEST = CUDA_110,
+ LATEST_SUPPORTED = CUDA_101,
};
const char *CudaVersionToString(CudaVersion V);
// Input is "Major.Minor"
@@ -50,6 +53,7 @@ enum class CudaArch {
SM_70,
SM_72,
SM_75,
+ SM_80,
GFX600,
GFX601,
GFX700,
@@ -70,38 +74,23 @@ enum class CudaArch {
GFX1010,
GFX1011,
GFX1012,
+ GFX1030,
LAST,
};
-const char *CudaArchToString(CudaArch A);
-// The input should have the form "sm_20".
-CudaArch StringToCudaArch(llvm::StringRef S);
+static inline bool IsNVIDIAGpuArch(CudaArch A) {
+ return A >= CudaArch::SM_20 && A < CudaArch::GFX600;
+}
-enum class CudaVirtualArch {
- UNKNOWN,
- COMPUTE_20,
- COMPUTE_30,
- COMPUTE_32,
- COMPUTE_35,
- COMPUTE_37,
- COMPUTE_50,
- COMPUTE_52,
- COMPUTE_53,
- COMPUTE_60,
- COMPUTE_61,
- COMPUTE_62,
- COMPUTE_70,
- COMPUTE_72,
- COMPUTE_75,
- COMPUTE_AMDGCN,
-};
-const char *CudaVirtualArchToString(CudaVirtualArch A);
+static inline bool IsAMDGpuArch(CudaArch A) {
+ return A >= CudaArch::GFX600 && A < CudaArch::LAST;
+}
-// The input should have the form "compute_20".
-CudaVirtualArch StringToCudaVirtualArch(llvm::StringRef S);
+const char *CudaArchToString(CudaArch A);
+const char *CudaArchToVirtualArchString(CudaArch A);
-/// Get the compute_xx corresponding to an sm_yy.
-CudaVirtualArch VirtualArchForCudaArch(CudaArch A);
+// The input should have the form "sm_20".
+CudaArch StringToCudaArch(llvm::StringRef S);
/// Get the earliest CudaVersion that supports the given CudaArch.
CudaVersion MinVersionForCudaArch(CudaArch A);
@@ -117,6 +106,7 @@ enum class CudaFeature {
CUDA_USES_FATBIN_REGISTER_END,
};
+CudaVersion ToCudaVersion(llvm::VersionTuple);
bool CudaFeatureEnabled(llvm::VersionTuple, CudaFeature);
bool CudaFeatureEnabled(CudaVersion, CudaFeature);
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DeclNodes.td b/contrib/llvm-project/clang/include/clang/Basic/DeclNodes.td
index d5bbc604819f..866988ee3f01 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DeclNodes.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DeclNodes.td
@@ -40,6 +40,7 @@ def Named : DeclNode<Decl, "named declarations", 1>;
def Binding : DeclNode<Value>;
def OMPDeclareReduction : DeclNode<Value>, DeclContext;
def OMPDeclareMapper : DeclNode<Value>, DeclContext;
+ def MSGuid : DeclNode<Value>;
def Declarator : DeclNode<Value, "declarators", 1>;
def Field : DeclNode<Declarator, "non-static data members">;
def ObjCIvar : DeclNode<Field>;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.h b/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.h
index ce996b615bba..304207779c0f 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.h
@@ -21,11 +21,11 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Compiler.h"
-#include "llvm/Support/Error.h"
#include <cassert>
#include <cstdint>
#include <limits>
@@ -37,6 +37,10 @@
#include <utility>
#include <vector>
+namespace llvm {
+class Error;
+}
+
namespace clang {
class DeclContext;
@@ -95,7 +99,7 @@ public:
FixItHint Hint;
Hint.RemoveRange =
CharSourceRange::getCharRange(InsertionLoc, InsertionLoc);
- Hint.CodeToInsert = Code;
+ Hint.CodeToInsert = std::string(Code);
Hint.BeforePreviousInsertions = BeforePreviousInsertions;
return Hint;
}
@@ -130,7 +134,7 @@ public:
StringRef Code) {
FixItHint Hint;
Hint.RemoveRange = RemoveRange;
- Hint.CodeToInsert = Code;
+ Hint.CodeToInsert = std::string(Code);
return Hint;
}
@@ -1006,6 +1010,11 @@ protected:
/// RAII class that determines when any errors have occurred
/// between the time the instance was created and the time it was
/// queried.
+///
+/// Note that you almost certainly do not want to use this. It's usually
+/// meaningless to ask whether a particular scope triggered an error message,
+/// because error messages outside that scope can mark things invalid (or cause
+/// us to reach an error limit), which can suppress errors within that scope.
class DiagnosticErrorTrap {
DiagnosticsEngine &Diag;
unsigned NumErrors;
@@ -1155,7 +1164,7 @@ public:
assert(NumArgs < DiagnosticsEngine::MaxArguments &&
"Too many arguments to diagnostic!");
DiagObj->DiagArgumentsKind[NumArgs] = DiagnosticsEngine::ak_std_string;
- DiagObj->DiagArgumentsStr[NumArgs++] = S;
+ DiagObj->DiagArgumentsStr[NumArgs++] = std::string(S);
}
void AddTaggedVal(intptr_t V, DiagnosticsEngine::ArgumentKind Kind) const {
@@ -1177,7 +1186,7 @@ public:
DiagObj->DiagFixItHints.push_back(Hint);
}
- void addFlagValue(StringRef V) const { DiagObj->FlagValue = V; }
+ void addFlagValue(StringRef V) const { DiagObj->FlagValue = std::string(V); }
};
struct AddFlagValue {
@@ -1217,9 +1226,7 @@ inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB, int I) {
// We use enable_if here to prevent that this overload is selected for
// pointers or other arguments that are implicitly convertible to bool.
template <typename T>
-inline
-typename std::enable_if<std::is_same<T, bool>::value,
- const DiagnosticBuilder &>::type
+inline std::enable_if_t<std::is_same<T, bool>::value, const DiagnosticBuilder &>
operator<<(const DiagnosticBuilder &DB, T I) {
DB.AddTaggedVal(I, DiagnosticsEngine::ak_sint);
return DB;
@@ -1249,9 +1256,9 @@ inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
// other arguments that derive from DeclContext (e.g., RecordDecls) will not
// match.
template <typename T>
-inline typename std::enable_if<
- std::is_same<typename std::remove_const<T>::type, DeclContext>::value,
- const DiagnosticBuilder &>::type
+inline std::enable_if_t<
+ std::is_same<std::remove_const_t<T>, DeclContext>::value,
+ const DiagnosticBuilder &>
operator<<(const DiagnosticBuilder &DB, T *DC) {
DB.AddTaggedVal(reinterpret_cast<intptr_t>(DC),
DiagnosticsEngine::ak_declcontext);
@@ -1290,6 +1297,29 @@ inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
return DB;
}
+inline const DiagnosticBuilder &
+operator<<(const DiagnosticBuilder &DB,
+ const llvm::Optional<SourceRange> &Opt) {
+ if (Opt)
+ DB << *Opt;
+ return DB;
+}
+
+inline const DiagnosticBuilder &
+operator<<(const DiagnosticBuilder &DB,
+ const llvm::Optional<CharSourceRange> &Opt) {
+ if (Opt)
+ DB << *Opt;
+ return DB;
+}
+
+inline const DiagnosticBuilder &
+operator<<(const DiagnosticBuilder &DB, const llvm::Optional<FixItHint> &Opt) {
+ if (Opt)
+ DB << *Opt;
+ return DB;
+}
+
/// A nullability kind paired with a bit indicating whether it used a
/// context-sensitive keyword.
using DiagNullabilityKind = std::pair<NullabilityKind, bool>;
@@ -1307,11 +1337,8 @@ inline DiagnosticBuilder DiagnosticsEngine::Report(SourceLocation Loc,
return DiagnosticBuilder(this);
}
-inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
- llvm::Error &&E) {
- DB.AddString(toString(std::move(E)));
- return DB;
-}
+const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ llvm::Error &&E);
inline DiagnosticBuilder DiagnosticsEngine::Report(unsigned DiagID) {
return Report(SourceLocation(), DiagID);
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td
index a0c15ba1cb05..10bedaaf7aba 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td
@@ -57,6 +57,9 @@ def note_constexpr_non_global : Note<
def note_constexpr_dynamic_alloc : Note<
"%select{pointer|reference}0 to %select{|subobject of }1"
"heap-allocated object is not a constant expression">;
+def note_consteval_address_accessible : Note<
+ "%select{pointer|reference}0 to a consteval declaration "
+ "is not a constant expression">;
def note_constexpr_uninitialized : Note<
"%select{|sub}0object of type %1 is not initialized">;
def note_constexpr_subobject_declared_here : Note<
@@ -98,8 +101,16 @@ def note_constexpr_null_subobject : Note<
"access array element of|perform pointer arithmetic on|"
"access real component of|"
"access imaginary component of}0 null pointer">;
+def note_constexpr_function_param_value_unknown : Note<
+ "function parameter %0 with unknown value cannot be used in a constant "
+ "expression">;
+def note_constexpr_var_init_unknown : Note<
+ "initializer of %0 is unknown">;
def note_constexpr_var_init_non_constant : Note<
"initializer of %0 is not a constant expression">;
+def note_constexpr_var_init_weak : Note<
+ "initializer of weak variable %0 is not considered constant because "
+ "it may be different at runtime">;
def note_constexpr_typeid_polymorphic : Note<
"typeid applied to expression of polymorphic type %0 is "
"not allowed in a constant expression in C++ standards before C++20">;
@@ -156,6 +167,9 @@ def note_constexpr_access_mutable : Note<
"mutable member %1 is not allowed in a constant expression">;
def note_constexpr_ltor_non_const_int : Note<
"read of non-const variable %0 is not allowed in a constant expression">;
+def note_constexpr_ltor_non_integral : Note<
+ "read of variable %0 of non-integral, non-enumeration type %1 "
+ "is not allowed in a constant expression">;
def note_constexpr_ltor_non_constexpr : Note<
"read of non-constexpr variable %0 is not allowed in a constant expression">;
def note_constexpr_ltor_incomplete_type : Note<
@@ -183,6 +197,9 @@ def note_constexpr_access_inactive_union_member : Note<
"construction of subobject of|destruction of}0 "
"member %1 of union with %select{active member %3|no active member}2 "
"is not allowed in a constant expression">;
+def note_constexpr_union_member_change_during_init : Note<
+ "assignment would change active union member during the initialization of "
+ "a different member of the same union">;
def note_constexpr_access_static_temporary : Note<
"%select{read of|read of|assignment to|increment of|decrement of|"
"member call on|dynamic_cast of|typeid applied to|reconstruction of|"
@@ -238,6 +255,12 @@ def note_constexpr_unsupported_unsized_array : Note<
def note_constexpr_unsized_array_indexed : Note<
"indexing of array without known bound is not allowed "
"in a constant expression">;
+def note_constexpr_memcmp_unsupported : Note<
+ "constant evaluation of %0 between arrays of types %1 and %2 "
+ "is not supported; only arrays of narrow character types can be compared">;
+def note_constexpr_memchr_unsupported : Note<
+ "constant evaluation of %0 on array of type %1 "
+ "is not supported; only arrays of narrow character types can be searched">;
def note_constexpr_memcpy_null : Note<
"%select{source|destination}2 of "
"'%select{%select{memcpy|wmemcpy}1|%select{memmove|wmemmove}1}0' "
@@ -326,12 +349,17 @@ def note_constexpr_delete_base_nonvirt_dtor : Note<
def note_constexpr_memory_leak : Note<
"allocation performed here was not deallocated"
"%plural{0:|: (along with %0 other memory leak%s0)}0">;
+def note_constexpr_unsupported_layout : Note<
+ "type %0 has unexpected layout">;
def err_experimental_clang_interp_failed : Error<
"the experimental clang interpreter failed to evaluate an expression">;
def warn_integer_constant_overflow : Warning<
"overflow in expression; result is %0 with type %1">,
InGroup<DiagGroup<"integer-overflow">>;
+def warn_fixedpoint_constant_overflow : Warning<
+ "overflow in expression; result is %0 with type %1">,
+ InGroup<DiagGroup<"fixed-point-overflow">>;
// This is a temporary diagnostic, and shall be removed once our
// implementation is complete, and like the preceding constexpr notes belongs
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommonKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommonKinds.td
index 5c7803d71e12..65e3755efd22 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommonKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommonKinds.td
@@ -239,6 +239,18 @@ def note_invalid_subexpr_in_const_expr : Note<
let CategoryName = "Inline Assembly Issue" in {
def err_asm_invalid_type_in_input : Error<
"invalid type %0 in asm input for constraint '%1'">;
+
+ def err_asm_invalid_type : Error<
+ "invalid type %0 in asm %select{input|output}1">;
+
+ def warn_stack_clash_protection_inline_asm : Warning<
+ "Unable to protect inline asm that clobbers stack pointer against stack clash">,
+ InGroup<DiagGroup<"stack-protector">>;
+
+ def warn_slh_does_not_support_asm_goto
+ : Warning<"Speculative load hardening does not protect functions with "
+ "asm goto">,
+ InGroup<DiagGroup<"slh-asm-goto">>;
}
// Sema && Serialization
@@ -282,6 +294,9 @@ def err_file_modified : Error<
"file '%0' modified since it was first processed">, DefaultFatal;
def err_file_too_large : Error<
"sorry, unsupported: file '%0' is too large for Clang to process">;
+def err_include_too_large : Error<
+ "sorry, this include generates a translation unit too large for"
+ " Clang to process.">, DefaultFatal;
def err_unsupported_bom : Error<"%0 byte order mark detected in '%1', but "
"encoding is not supported">, DefaultFatal;
def err_unable_to_rename_temp : Error<
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td
index 48ece91d3c45..558639ecad6a 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -50,14 +50,22 @@ def warn_drv_avr_stdlib_not_linked: Warning<
InGroup<AVRRtlibLinkingQuirks>;
def err_drv_cuda_bad_gpu_arch : Error<"Unsupported CUDA gpu architecture: %0">;
def err_drv_no_cuda_installation : Error<
- "cannot find CUDA installation. Provide its path via --cuda-path, or pass "
+ "cannot find CUDA installation. Provide its path via --cuda-path, or pass "
"-nocudainc to build without CUDA includes.">;
def err_drv_no_cuda_libdevice : Error<
"cannot find libdevice for %0. Provide path to different CUDA installation "
"via --cuda-path, or pass -nocudalib to build without linking with libdevice.">;
+
+def err_drv_no_rocm_device_lib : Error<
+ "cannot find ROCm device library%select{| for %1}0. Provide its path via --rocm-path or "
+ "--rocm-device-lib-path, or pass -nogpulib to build without ROCm device library.">;
+def err_drv_no_hip_runtime : Error<
+ "cannot find HIP runtime. Provide its path via --rocm-path, or pass "
+ "-nogpuinc to build without HIP runtime.">;
+
def err_drv_cuda_version_unsupported : Error<
"GPU arch %0 is supported by CUDA versions between %1 and %2 (inclusive), "
- "but installation at %3 is %4. Use --cuda-path to specify a different CUDA "
+ "but installation at %3 is %4. Use --cuda-path to specify a different CUDA "
"install, pass a different GPU arch with --cuda-gpu-arch, or pass "
"--no-cuda-version-check.">;
def warn_drv_unknown_cuda_version: Warning<
@@ -150,6 +158,10 @@ def err_drv_invalid_argument_to_option : Error<
"invalid argument '%0' to -%1">;
def err_drv_malformed_sanitizer_blacklist : Error<
"malformed sanitizer blacklist: '%0'">;
+def err_drv_malformed_sanitizer_coverage_whitelist : Error<
+ "malformed sanitizer coverage whitelist: '%0'">;
+def err_drv_malformed_sanitizer_coverage_blacklist : Error<
+ "malformed sanitizer coverage blacklist: '%0'">;
def err_drv_duplicate_config : Error<
"no more than one option '--config' is allowed">;
def err_drv_config_file_not_exist : Error<
@@ -322,6 +334,8 @@ def warn_drv_object_size_disabled_O0 : Warning<
InGroup<InvalidCommandLineArgument>, DefaultWarnNoWerror;
def err_invalid_branch_protection: Error <
"invalid branch protection option '%0' in '%1'">;
+def err_invalid_sls_hardening : Error<
+ "invalid sls hardening option '%0' in '%1'">;
def note_drv_command_failed_diag_msg : Note<
"diagnostic msg: %0">;
@@ -344,6 +358,8 @@ def err_analyzer_checker_option_unknown : Error<
"checker '%0' has no option called '%1'">;
def err_analyzer_checker_option_invalid_input : Error<
"invalid input for checker option '%0', that expects %1">;
+def err_analyzer_checker_incompatible_analyzer_option : Error<
+ "checker cannot be enabled with analyzer option '%0' == %1">;
def err_drv_invalid_hvx_length : Error<
"-mhvx-length is not supported without a -mhvx/-mhvx= flag">;
@@ -377,6 +393,9 @@ def err_drv_ropi_incompatible_with_cxx : Error<
def err_stack_tagging_requires_hardware_feature : Error<
"'-fsanitize=memtag' requires hardware support (+memtag)">;
+def err_cmse_pi_are_incompatible : Error<
+ "cmse is not compatible with %select{RWPI|ROPI}0">;
+
def warn_target_unsupported_nan2008 : Warning<
"ignoring '-mnan=2008' option because the '%0' architecture does not support it">,
InGroup<UnsupportedNan>;
@@ -399,6 +418,9 @@ def warn_drv_unsupported_gpopt : Warning<
"ignoring '-mgpopt' option as it cannot be used with %select{|the implicit"
" usage of }0-mabicalls">,
InGroup<UnsupportedGPOpt>;
+def warn_drv_unsupported_sdata : Warning<
+ "ignoring '-msmall-data-limit=' with -mcmodel=large for -fpic or RV64">,
+ InGroup<OptionIgnored>;
def warn_drv_unsupported_longcalls : Warning<
"ignoring '-mlong-calls' option as it is not currently supported with "
"%select{|the implicit usage of }0-mabicalls">,
@@ -444,13 +466,13 @@ def note_drv_verify_prefix_spelling : Note<
"-verify prefixes must start with a letter and contain only alphanumeric"
" characters, hyphens, and underscores">;
-def warn_drv_experimental_isel_incomplete : Warning<
- "-fexperimental-isel support for the '%0' architecture is incomplete">,
- InGroup<ExperimentalISel>;
+def warn_drv_global_isel_incomplete : Warning<
+ "-fglobal-isel support for the '%0' architecture is incomplete">,
+ InGroup<GlobalISel>;
-def warn_drv_experimental_isel_incomplete_opt : Warning<
- "-fexperimental-isel support is incomplete for this architecture at the current optimization level">,
- InGroup<ExperimentalISel>;
+def warn_drv_global_isel_incomplete_opt : Warning<
+ "-fglobal-isel support is incomplete for this architecture at the current optimization level">,
+ InGroup<GlobalISel>;
def warn_drv_moutline_unsupported_opt : Warning<
"The '%0' architecture does not support -moutline; flag ignored">,
@@ -464,6 +486,12 @@ def err_drv_trivial_auto_var_init_zero_disabled : Error<
"-ftrivial-auto-var-init=zero hasn't been enabled. Enable it at your own peril for benchmarking purpose only with "
"-enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang">;
+def err_drv_trivial_auto_var_init_stop_after_missing_dependency : Error<
+ "-ftrivial-auto-var-init-stop-after=* is used without -ftrivial-auto-var-init=zero or -ftrivial-auto-var-init=pattern.">;
+
+def err_drv_trivial_auto_var_init_stop_after_invalid_value : Error<
+ "-ftrivial-auto-var-init-stop-after=* only accepts positive integers.">;
+
def warn_drv_msp430_hwmult_unsupported : Warning<"the given MCU does not "
"support hardware multiply, but -mhwmult is set to %0.">,
InGroup<InvalidCommandLineArgument>;
@@ -481,4 +509,6 @@ def warn_drv_libstdcxx_not_found : Warning<
InGroup<DiagGroup<"stdlibcxx-not-found">>;
def err_drv_cannot_mix_options : Error<"cannot specify '%1' along with '%0'">;
+
+def err_drv_invalid_object_mode : Error<"OBJECT_MODE setting %0 is not recognized and is not a valid setting.">;
}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontendKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontendKinds.td
index 87fdfc89c634..b202d2abffa0 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontendKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontendKinds.td
@@ -37,6 +37,12 @@ def note_fe_backend_plugin: Note<"%0">, BackendInfo;
def warn_fe_override_module : Warning<
"overriding the module target triple with %0">,
InGroup<DiagGroup<"override-module">>;
+def warn_fe_backend_unsupported_fp_rounding : Warning<
+ "overriding currently unsupported rounding mode on this target">,
+ InGroup<UnsupportedFPOpt>;
+def warn_fe_backend_unsupported_fp_exceptions : Warning<
+ "overriding currently unsupported use of floating point exceptions "
+ "on this target">, InGroup<UnsupportedFPOpt>;
def remark_fe_backend_optimization_remark : Remark<"%0">, BackendInfo,
InGroup<BackendOptimizationRemark>;
@@ -61,6 +67,7 @@ def note_fe_backend_invalid_loc : Note<"could "
"not determine the original source location for %0:%1:%2">, BackendInfo;
def err_fe_backend_unsupported : Error<"%0">, BackendInfo;
+def warn_fe_backend_unsupported : Warning<"%0">, BackendInfo;
def err_fe_invalid_code_complete_file : Error<
"cannot locate code-completion file %0">, DefaultFatal;
@@ -109,12 +116,18 @@ def warn_fe_concepts_ts_flag : Warning<
"-fconcepts-ts is deprecated - use '-std=c++20' for Concepts support">,
InGroup<Deprecated>;
+def err_fe_unable_to_load_basic_block_sections_file : Error<
+ "unable to load basic block sections function list: '%0'">;
+
def warn_fe_serialized_diag_merge_failure : Warning<
"unable to merge a subprocess's serialized diagnostics">,
InGroup<SerializedDiagnostics>;
def warn_fe_serialized_diag_failure : Warning<
"unable to open file %0 for serializing diagnostics (%1)">,
InGroup<SerializedDiagnostics>;
+def warn_fe_serialized_diag_failure_during_finalisation : Warning<
+ "Received warning after diagnostic serialization teardown was underway: %0">,
+ InGroup<SerializedDiagnostics>;
def err_verify_missing_line : Error<
"missing or invalid line number following '@' in expected %0">;
@@ -236,6 +249,12 @@ def err_function_needs_feature : Error<
"always_inline function %1 requires target feature '%2', but would "
"be inlined into function %0 that is compiled without support for '%2'">;
+def warn_avx_calling_convention
+ : Warning<"AVX vector %select{return|argument}0 of type %1 without '%2' "
+ "enabled changes the ABI">,
+ InGroup<DiagGroup<"psabi">>;
+def err_avx_calling_convention : Error<warn_avx_calling_convention.Text>;
+
def err_alias_to_undefined : Error<
"%select{alias|ifunc}0 must point to a defined "
"%select{variable or |}1function">;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td
index 8e43052f30e2..1e829be4028e 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td
@@ -60,6 +60,7 @@ def UndefinedBoolConversion : DiagGroup<"undefined-bool-conversion">;
def BoolConversion : DiagGroup<"bool-conversion", [PointerBoolConversion,
UndefinedBoolConversion]>;
def IntConversion : DiagGroup<"int-conversion">;
+def ClassConversion: DiagGroup<"class-conversion">;
def DeprecatedEnumCompareConditional :
DiagGroup<"deprecated-enum-compare-conditional">;
def EnumCompareConditional : DiagGroup<"enum-compare-conditional",
@@ -85,7 +86,9 @@ def ObjCSignedCharBoolImplicitIntConversion :
DiagGroup<"objc-signed-char-bool-implicit-int-conversion">;
def ImplicitIntConversion : DiagGroup<"implicit-int-conversion",
[ObjCSignedCharBoolImplicitIntConversion]>;
-def ImplicitIntFloatConversion : DiagGroup<"implicit-int-float-conversion">;
+def ImplicitConstIntFloatConversion : DiagGroup<"implicit-const-int-float-conversion">;
+def ImplicitIntFloatConversion : DiagGroup<"implicit-int-float-conversion",
+ [ImplicitConstIntFloatConversion]>;
def ObjCSignedCharBoolImplicitFloatConversion :
DiagGroup<"objc-signed-char-bool-implicit-float-conversion">;
def ImplicitFloatConversion : DiagGroup<"implicit-float-conversion",
@@ -99,10 +102,12 @@ def FloatConversion :
DiagGroup<"float-conversion", [FloatOverflowConversion,
FloatZeroConversion]>;
+def FrameAddress : DiagGroup<"frame-address">;
def DoublePromotion : DiagGroup<"double-promotion">;
def EnumTooLarge : DiagGroup<"enum-too-large">;
def UnsupportedNan : DiagGroup<"unsupported-nan">;
def UnsupportedAbs : DiagGroup<"unsupported-abs">;
+def UnsupportedFPOpt : DiagGroup<"unsupported-floating-point-opt">;
def UnsupportedCB : DiagGroup<"unsupported-cb">;
def UnsupportedGPOpt : DiagGroup<"unsupported-gpopt">;
def UnsupportedTargetOpt : DiagGroup<"unsupported-target-opt">;
@@ -192,6 +197,7 @@ def CXX20Designator : DiagGroup<"c++20-designator">;
// designators (including the warning controlled by -Wc++20-designator).
def C99Designator : DiagGroup<"c99-designator", [CXX20Designator]>;
def GNUDesignator : DiagGroup<"gnu-designator">;
+def DtorName : DiagGroup<"dtor-name">;
def DynamicExceptionSpec
: DiagGroup<"dynamic-exception-spec", [DeprecatedDynamicExceptionSpec]>;
@@ -274,9 +280,12 @@ def CXX98CompatPedantic : DiagGroup<"c++98-compat-pedantic",
def CXX11Narrowing : DiagGroup<"c++11-narrowing">;
-def CXX11WarnOverrideDestructor :
+def CXX11WarnInconsistentOverrideDestructor :
DiagGroup<"inconsistent-missing-destructor-override">;
-def CXX11WarnOverrideMethod : DiagGroup<"inconsistent-missing-override">;
+def CXX11WarnInconsistentOverrideMethod :
+ DiagGroup<"inconsistent-missing-override">;
+def CXX11WarnSuggestOverrideDestructor : DiagGroup<"suggest-destructor-override">;
+def CXX11WarnSuggestOverride : DiagGroup<"suggest-override">;
// Original name of this warning in Clang
def : DiagGroup<"c++0x-narrowing", [CXX11Narrowing]>;
@@ -346,6 +355,7 @@ def Dangling : DiagGroup<"dangling", [DanglingField,
DanglingGsl,
ReturnStackAddress]>;
def DistributedObjectModifiers : DiagGroup<"distributed-object-modifiers">;
+def ExcessInitializers : DiagGroup<"excess-initializers">;
def ExpansionToDefined : DiagGroup<"expansion-to-defined">;
def FlagEnum : DiagGroup<"flag-enum">;
def IncrementBool : DiagGroup<"increment-bool", [DeprecatedIncrementBool]>;
@@ -376,6 +386,8 @@ def IncompleteModule : DiagGroup<"incomplete-module",
def PrivateModule : DiagGroup<"private-module">;
def CXX11InlineNamespace : DiagGroup<"c++11-inline-namespace">;
+def InlineNamespaceReopenedNoninline
+ : DiagGroup<"inline-namespace-reopened-noninline">;
def InvalidNoreturn : DiagGroup<"invalid-noreturn">;
def InvalidSourceEncoding : DiagGroup<"invalid-source-encoding">;
def KNRPromotedParameter : DiagGroup<"knr-promoted-parameter">;
@@ -619,8 +631,10 @@ def Unicode : DiagGroup<"unicode">;
def UninitializedMaybe : DiagGroup<"conditional-uninitialized">;
def UninitializedSometimes : DiagGroup<"sometimes-uninitialized">;
def UninitializedStaticSelfInit : DiagGroup<"static-self-init">;
+def UninitializedConstReference : DiagGroup<"uninitialized-const-reference">;
def Uninitialized : DiagGroup<"uninitialized", [UninitializedSometimes,
- UninitializedStaticSelfInit]>;
+ UninitializedStaticSelfInit,
+ UninitializedConstReference]>;
def IgnoredPragmaIntrinsic : DiagGroup<"ignored-pragma-intrinsic">;
// #pragma optimize is often used to avoid to work around MSVC codegen bugs or
// to disable inlining. It's not completely clear what alternative to suggest
@@ -837,6 +851,13 @@ def IncompatibleExceptionSpec : DiagGroup<"incompatible-exception-spec">;
def IntToVoidPointerCast : DiagGroup<"int-to-void-pointer-cast">;
def IntToPointerCast : DiagGroup<"int-to-pointer-cast",
[IntToVoidPointerCast]>;
+def VoidPointerToEnumCast : DiagGroup<"void-pointer-to-enum-cast">;
+def VoidPointerToIntCast : DiagGroup<"void-pointer-to-int-cast",
+ [VoidPointerToEnumCast]>;
+def PointerToEnumCast : DiagGroup<"pointer-to-enum-cast",
+ [VoidPointerToEnumCast]>;
+def PointerToIntCast : DiagGroup<"pointer-to-int-cast",
+ [PointerToEnumCast, VoidPointerToIntCast]>;
def Move : DiagGroup<"move", [
PessimizingMove,
@@ -864,6 +885,7 @@ def Most : DiagGroup<"most", [
DeleteNonVirtualDtor,
Format,
ForLoopAnalysis,
+ FrameAddress,
Implicit,
InfiniteRecursion,
IntInBoolContext,
@@ -975,6 +997,9 @@ def C11 : DiagGroup<"c11-extensions">;
// A warning group for warnings about using C99 features as extensions.
def C99 : DiagGroup<"c99-extensions", [C99Designator]>;
+// A warning group for warnings about using C2x features as extensions.
+def C2x : DiagGroup<"c2x-extensions">;
+
// A warning group for warnings about GCC extensions.
def GNU : DiagGroup<"gnu", [GNUAlignofExpression, GNUAnonymousStruct,
GNUAutoType,
@@ -1078,11 +1103,15 @@ def ObjCSignedCharBool : DiagGroup<"objc-signed-char-bool",
ObjCBoolConstantConversion,
TautologicalObjCBoolCompare]>;
+def ObjCPotentiallyDirectSelector : DiagGroup<"potentially-direct-selector">;
+def ObjCStrictPotentiallyDirectSelector :
+ DiagGroup<"strict-potentially-direct-selector",
+ [ObjCPotentiallyDirectSelector]>;
+
// Inline ASM warnings.
def ASMOperandWidths : DiagGroup<"asm-operand-widths">;
-def ASMIgnoredQualifier : DiagGroup<"asm-ignored-qualifier">;
def ASM : DiagGroup<"asm", [
- ASMOperandWidths, ASMIgnoredQualifier
+ ASMOperandWidths
]>;
// OpenMP warnings.
@@ -1145,8 +1174,8 @@ def UnknownArgument : DiagGroup<"unknown-argument">;
// compiling OpenCL C/C++ but which is not compatible with the SPIR spec.
def SpirCompat : DiagGroup<"spir-compat">;
-// Warning for the experimental-isel options.
-def ExperimentalISel : DiagGroup<"experimental-isel">;
+// Warning for the GlobalISel options.
+def GlobalISel : DiagGroup<"global-isel">;
// A warning group specifically for warnings related to function
// multiversioning.
@@ -1160,3 +1189,37 @@ def CrossTU : DiagGroup<"ctu">;
def CTADMaybeUnsupported : DiagGroup<"ctad-maybe-unsupported">;
def FortifySource : DiagGroup<"fortify-source">;
+
+def MaxTokens : DiagGroup<"max-tokens"> {
+ code Documentation = [{
+The warning is issued if the number of pre-processor tokens exceeds
+the token limit, which can be set in three ways:
+
+1. As a limit at a specific point in a file, using the ``clang max_tokens_here``
+ pragma:
+
+ .. code-block: c++
+ #pragma clang max_tokens_here 1234
+
+2. As a per-translation unit limit, using the ``-fmax-tokens=`` command-line
+ flag:
+
+ .. code-block: console
+ clang -c a.cpp -fmax-tokens=1234
+
+3. As a per-translation unit limit using the ``clang max_tokens_total`` pragma,
+ which works like and overrides the ``-fmax-tokens=`` flag:
+
+ .. code-block: c++
+ #pragma clang max_tokens_total 1234
+
+These limits can be helpful in limiting code growth through included files.
+
+Setting a token limit of zero means no limit.
+
+Note that the warning is disabled by default, so -Wmax-tokens must be used
+in addition with the pragmas or -fmax-tokens flag to get any warnings.
+}];
+}
+
+def WebAssemblyExceptionSpec : DiagGroup<"wasm-exception-spec">;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h
index 5b9391b5a452..00c939650e54 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h
@@ -28,12 +28,12 @@ namespace clang {
// Size of each of the diagnostic categories.
enum {
DIAG_SIZE_COMMON = 300,
- DIAG_SIZE_DRIVER = 200,
+ DIAG_SIZE_DRIVER = 250,
DIAG_SIZE_FRONTEND = 150,
DIAG_SIZE_SERIALIZATION = 120,
DIAG_SIZE_LEX = 400,
DIAG_SIZE_PARSE = 600,
- DIAG_SIZE_AST = 200,
+ DIAG_SIZE_AST = 250,
DIAG_SIZE_COMMENT = 100,
DIAG_SIZE_CROSSTU = 100,
DIAG_SIZE_SEMA = 4000,
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLexKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLexKinds.td
index 9e0449d34104..9cb06cf5b5e1 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLexKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLexKinds.td
@@ -33,7 +33,7 @@ def warn_cxx98_compat_less_colon_colon : Warning<
def warn_cxx17_compat_spaceship : Warning<
"'<=>' operator is incompatible with C++ standards before C++20">,
InGroup<CXXPre20Compat>, DefaultIgnore;
-def warn_cxx2a_compat_spaceship : Warning<
+def warn_cxx20_compat_spaceship : Warning<
"'<=>' is a single token in C++20; "
"add a space to avoid a change in behavior">,
InGroup<CXX20Compat>;
@@ -78,7 +78,7 @@ def ext_token_used : Extension<"extension used">,
def warn_cxx11_keyword : Warning<"'%0' is a keyword in C++11">,
InGroup<CXX11Compat>, DefaultIgnore;
-def warn_cxx2a_keyword : Warning<"'%0' is a keyword in C++20">,
+def warn_cxx20_keyword : Warning<"'%0' is a keyword in C++20">,
InGroup<CXX20Compat>, DefaultIgnore;
def ext_unterminated_char_or_string : ExtWarn<
@@ -175,7 +175,7 @@ def ext_unknown_escape : ExtWarn<"unknown escape sequence '\\%0'">,
def err_invalid_digit : Error<
"invalid digit '%0' in %select{decimal|octal|binary}1 constant">;
def err_invalid_suffix_constant : Error<
- "invalid suffix '%0' on %select{integer|floating}1 constant">;
+ "invalid suffix '%0' on %select{integer|floating|fixed-point}1 constant">;
def warn_cxx11_compat_digit_separator : Warning<
"digit separators are incompatible with C++ standards before C++14">,
InGroup<CXXPre14Compat>, DefaultIgnore;
@@ -312,6 +312,9 @@ def pp_macro_not_used : Warning<"macro is not used">, DefaultIgnore,
def warn_pp_undef_identifier : Warning<
"%0 is not defined, evaluates to 0">,
InGroup<DiagGroup<"undef">>, DefaultIgnore;
+def warn_pp_undef_prefix : Warning<
+ "%0 is not defined, evaluates to 0">,
+ InGroup<DiagGroup<"undef-prefix">>, DefaultIgnore;
def warn_pp_ambiguous_macro : Warning<
"ambiguous expansion of macro %0">, InGroup<AmbiguousMacro>;
def note_pp_ambiguous_macro_chosen : Note<
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.def b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.def
index 6d1a1af92821..a946b5c6be8e 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.def
@@ -65,6 +65,7 @@ VALUE_DIAGOPT(ShowCategories, 2, 0) /// Show categories: 0 -> none, 1 -> Number,
ENUM_DIAGOPT(Format, TextDiagnosticFormat, 2, Clang) /// Format for diagnostics:
DIAGOPT(ShowColors, 1, 0) /// Show diagnostics with ANSI color sequences.
+DIAGOPT(UseANSIEscapeCodes, 1, 0)
ENUM_DIAGOPT(ShowOverloads, OverloadsShown, 1,
Ovl_All) /// Overload candidates to show.
DIAGOPT(VerifyDiagnostics, 1, 0) /// Check that diagnostics match the expected
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.h
index 3e3c4e50a9e0..7fbe534c5994 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.h
@@ -98,6 +98,10 @@ public:
/// prefixes removed.
std::vector<std::string> Warnings;
+ /// The list of prefixes from -Wundef-prefix=... used to generate warnings
+ /// for undefined macros.
+ std::vector<std::string> UndefPrefixes;
+
/// The list of -R... options used to alter the diagnostic mappings, with the
/// prefixes removed.
std::vector<std::string> Remarks;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
index 33adf093693f..1038a4119d4c 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -12,11 +12,10 @@
let Component = "Parse" in {
-def warn_asm_qualifier_ignored : Warning<
- "ignored %0 qualifier on asm">, CatInlineAsm, InGroup<ASMIgnoredQualifier>;
-def warn_file_asm_volatile : Warning<
- "meaningless 'volatile' on asm outside function">, CatInlineAsm,
- InGroup<ASMIgnoredQualifier>;
+def err_asm_qualifier_ignored : Error<
+ "expected 'volatile', 'inline', 'goto', or '('">, CatInlineAsm;
+def err_global_asm_qualifier_ignored : Error<
+ "meaningless '%0' on asm outside function">, CatInlineAsm;
let CategoryName = "Inline Assembly Issue" in {
def err_asm_empty : Error<"__asm used with no assembly instructions">;
@@ -27,8 +26,7 @@ def err_msasm_unable_to_create_target : Error<
"MS-style inline assembly is not available: %0">;
def err_gnu_inline_asm_disabled : Error<
"GNU-style inline assembly is disabled">;
-def err_asm_goto_cannot_have_output : Error<
- "'asm goto' cannot have output constraints">;
+def err_asm_duplicate_qual : Error<"duplicate asm qualifier '%0'">;
}
let CategoryName = "Parse Issue" in {
@@ -107,6 +105,25 @@ def ext_clang_c_enum_fixed_underlying_type : Extension<
def warn_cxx98_compat_enum_fixed_underlying_type : Warning<
"enumeration types with a fixed underlying type are incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
+def ext_enum_base_in_type_specifier : ExtWarn<
+ "non-defining declaration of enumeration with a fixed underlying type is "
+ "only permitted as a standalone declaration"
+ "%select{|; missing list of enumerators?}0">,
+ InGroup<DiagGroup<"elaborated-enum-base">>, DefaultError;
+def ext_elaborated_enum_class : ExtWarn<
+ "reference to enumeration must use 'enum' not 'enum %select{struct|class}0'">,
+ InGroup<DiagGroup<"elaborated-enum-class">>, DefaultError;
+def err_scoped_enum_missing_identifier : Error<
+ "scoped enumeration requires a name">;
+def ext_scoped_enum : ExtWarn<
+ "scoped enumerations are a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_scoped_enum : Warning<
+ "scoped enumerations are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_anonymous_enum_bitfield : Error<
+ "ISO C++ only allows ':' in member enumeration declaration to introduce "
+ "a fixed underlying type, not an anonymous bit-field">;
+
def warn_cxx98_compat_alignof : Warning<
"alignof expressions are incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
@@ -177,13 +194,12 @@ def err_function_declared_typedef : Error<
def err_at_defs_cxx : Error<"@defs is not supported in Objective-C++">;
def err_at_in_class : Error<"unexpected '@' in member specification">;
def err_unexpected_semi : Error<"unexpected ';' before %0">;
+def err_postfix_after_unary_requires_parens : Error<
+ "expression cannot be followed by a postfix %0 operator; add parentheses">;
def err_unparenthesized_non_primary_expr_in_requires_clause : Error<
"parentheses are required around this expression in a requires clause">;
def note_unparenthesized_non_primary_expr_in_requires_clause : Note<
"parentheses are required around this expression in a requires clause">;
-def err_potential_function_call_in_constraint_logical_or : Error<
- "function call must be parenthesized to be considered part of the requires "
- "clause">;
def err_expected_fn_body : Error<
"expected function body after function declarator">;
@@ -211,7 +227,6 @@ def err_invalid_token_after_declarator_suggest_equal : Error<
"invalid %0 at end of declaration; did you mean '='?">;
def err_expected_statement : Error<"expected statement">;
def err_expected_lparen_after : Error<"expected '(' after '%0'">;
-def err_expected_lbrace_after : Error<"expected '{' after '%0'">;
def err_expected_rparen_after : Error<"expected ')' after '%0'">;
def err_expected_punc : Error<"expected ')' or ',' after '%0'">;
def err_expected_less_after : Error<"expected '<' after '%0'">;
@@ -597,6 +612,8 @@ def warn_cxx17_compat_for_range_init_stmt : Warning<
def warn_empty_init_statement : Warning<
"empty initialization statement of '%select{if|switch|range-based for}0' "
"has no effect">, InGroup<EmptyInitStatement>, DefaultIgnore;
+def err_keyword_as_parameter : Error <
+ "invalid parameter name: '%0' is a keyword">;
// C++ derived classes
def err_dup_virtual : Error<"duplicate 'virtual' in base specifier">;
@@ -680,7 +697,7 @@ def err_ms_property_expected_comma_or_rparen : Error<
def err_ms_property_initializer : Error<
"property declaration cannot have an in-class initializer">;
-def warn_cxx2a_compat_explicit_bool : Warning<
+def warn_cxx20_compat_explicit_bool : Warning<
"this expression will be parsed as explicit(bool) in C++20">,
InGroup<CXX20Compat>, DefaultIgnore;
def warn_cxx17_compat_explicit_bool : Warning<
@@ -712,6 +729,8 @@ def err_id_after_template_in_nested_name_spec : Error<
"expected template name after 'template' keyword in nested name specifier">;
def err_unexpected_template_in_unqualified_id : Error<
"'template' keyword not permitted here">;
+def err_unexpected_template_in_destructor_name : Error<
+ "'template' keyword not permitted in destructor name">;
def err_unexpected_template_after_using : Error<
"'template' keyword not permitted after 'using' keyword">;
def err_two_right_angle_brackets_need_space : Error<
@@ -744,17 +763,10 @@ def err_friend_explicit_instantiation : Error<
def err_explicit_instantiation_enum : Error<
"enumerations cannot be explicitly instantiated">;
def err_expected_template_parameter : Error<"expected template parameter">;
-def note_ill_formed_requires_expression_outside_template : Note<
- "requires expression outside a template declaration may not contain invalid "
- "types or expressions">;
def err_empty_requires_expr : Error<
"a requires expression must contain at least one requirement">;
def err_requires_expr_parameter_list_ellipsis : Error<
"varargs not allowed in requires expression">;
-def err_requires_expr_type_req_illegal_identifier : Error<
- "expected identifier or template-id in type requirement">;
-def err_requires_expr_type_req_template_args_on_non_template : Error<
- "template arguments provided for non-template '%0'">;
def err_expected_semi_requirement : Error<
"expected ';' at end of requirement">;
def err_requires_expr_missing_arrow : Error<
@@ -764,9 +776,6 @@ def err_requires_expr_expected_type_constraint : Error<
def err_requires_expr_simple_requirement_noexcept : Error<
"'noexcept' can only be used in a compound requirement (with '{' '}' around "
"the expression)">;
-def err_requires_expr_simple_requirement_unexpected_tok : Error<
- "unexpected %0 after expression; did you intend to use a compound "
- "requirement (with '{' '}' around the expression)?">;
def warn_requires_expr_in_simple_requirement : Warning<
"this requires expression will only be checked for syntactic validity; did "
"you intend to place it in a nested requirement? (add another 'requires' "
@@ -826,7 +835,7 @@ def err_friend_decl_defines_type : Error<
"cannot define a type in a friend declaration">;
def err_missing_whitespace_digraph : Error<
"found '<::' after a "
- "%select{template name|const_cast|dynamic_cast|reinterpret_cast|static_cast}0"
+ "%select{template name|addrspace_cast|const_cast|dynamic_cast|reinterpret_cast|static_cast}0"
" which forms the digraph '<:' (aka '[') and a ':', did you mean '< ::'?">;
def ext_defaulted_deleted_function : ExtWarn<
@@ -897,14 +906,6 @@ def err_access_specifier_interface : Error<
def err_duplicate_virt_specifier : Error<
"class member already marked '%0'">;
-def err_scoped_enum_missing_identifier : Error<
- "scoped enumeration requires a name">;
-def ext_scoped_enum : ExtWarn<
- "scoped enumerations are a C++11 extension">, InGroup<CXX11>;
-def warn_cxx98_compat_scoped_enum : Warning<
- "scoped enumerations are incompatible with C++98">,
- InGroup<CXX98Compat>, DefaultIgnore;
-
def err_expected_parameter_pack : Error<
"expected the name of a parameter pack">;
def err_paren_sizeof_parameter_pack : Error<
@@ -1046,6 +1047,8 @@ def warn_pragma_expected_section_label_or_name : Warning<
def warn_pragma_expected_init_seg : Warning<
"expected 'compiler', 'lib', 'user', or a string literal for the section name in '#pragma %0' - ignored">,
InGroup<IgnoredPragmas>;
+
+def err_pragma_expected_integer : Error<"expected an integer argument in '#pragma %0'">;
def warn_pragma_expected_integer : Warning<
"expected integer between %0 and %1 inclusive in '#pragma %2' - ignored">,
InGroup<IgnoredPragmas>;
@@ -1107,9 +1110,9 @@ def warn_pragma_init_seg_unsupported_target : Warning<
"'#pragma init_seg' is only supported when targeting a "
"Microsoft environment">,
InGroup<IgnoredPragmas>;
-// - #pragma fp_contract
-def err_pragma_fp_contract_scope : Error<
- "'#pragma fp_contract' can only appear at file scope or at the start of a "
+// - #pragma restricted to file scope or start of compound statement
+def err_pragma_file_or_compound_scope : Error<
+ "'#pragma %0' can only appear at file scope or at the start of a "
"compound statement">;
// - #pragma stdc unknown
def ext_stdc_pragma_ignored : ExtWarn<"unknown pragma in STDC namespace">,
@@ -1128,6 +1131,10 @@ def warn_pragma_comment_ignored : Warning<"'#pragma comment %0' ignored">,
def err_pragma_detect_mismatch_malformed : Error<
"pragma detect_mismatch is malformed; it requires two comma-separated "
"string literals">;
+// - #pragma float_control
+def err_pragma_float_control_malformed : Error<
+ "pragma float_control is malformed; use 'float_control({push|pop})' or "
+ "'float_control({precise|except}, {on|off} [,push])'">;
// - #pragma pointers_to_members
def err_pragma_pointers_to_members_unknown_kind : Error<
"unexpected %0, expected to see one of %select{|'best_case', 'full_generality', }1"
@@ -1232,6 +1239,10 @@ def err_omp_expected_identifier_for_critical : Error<
"expected identifier specifying the name of the 'omp critical' directive">;
def err_omp_expected_reduction_identifier : Error<
"expected identifier or one of the following operators: '+', '-', '*', '&', '|', '^', '&&', or '||'">;
+def err_omp_expected_equal_in_iterator : Error<
+ "expected '=' in iterator specifier">;
+def err_omp_expected_punc_after_iterator : Error<
+ "expected ',' or ')' after iterator specifier">;
def err_omp_decl_in_declare_simd_variant : Error<
"function declaration is expected after 'declare %select{simd|variant}0' directive">;
def err_omp_unknown_map_type : Error<
@@ -1244,8 +1255,11 @@ def err_omp_map_type_modifier_missing : Error<
"missing map type modifier">;
def err_omp_declare_simd_inbranch_notinbranch : Error<
"unexpected '%0' clause, '%1' is specified already">;
-def err_expected_end_declare_target : Error<
- "expected '#pragma omp end declare target'">;
+def err_expected_end_declare_target_or_variant : Error<
+ "expected '#pragma omp end declare %select{target|variant}0'">;
+def err_expected_begin_declare_variant
+ : Error<"'#pragma omp end declare variant' with no matching '#pragma omp "
+ "begin declare variant'">;
def err_omp_declare_target_unexpected_clause: Error<
"unexpected '%0' clause, only %select{'to' or 'link'|'to', 'link' or 'device_type'}1 clauses expected">;
def err_omp_expected_clause: Error<
@@ -1256,30 +1270,72 @@ def err_omp_mapper_expected_declarator : Error<
"expected declarator on 'omp declare mapper' directive">;
def err_omp_declare_variant_wrong_clause : Error<
"expected '%0' clause on 'omp declare variant' directive">;
-def err_omp_declare_variant_no_ctx_selector : Error<
- "expected context selector in '%0' clause on 'omp declare variant' directive">;
-def err_omp_declare_variant_equal_expected : Error<
- "expected '=' after '%0' context selector set name on 'omp declare variant' directive">;
-def warn_omp_declare_variant_cs_name_expected : Warning<
- "unknown context selector in '%0' context selector set of 'omp declare variant' directive, ignored">,
- InGroup<OpenMPClauses>;
-def err_omp_declare_variant_item_expected : Error<
- "expected %0 in '%1' context selector of '%2' selector set of 'omp declare variant' directive">;
-def err_omp_declare_variant_ctx_set_mutiple_use : Error<
- "context selector set '%0' is used already in the same 'omp declare variant' directive">;
-def note_omp_declare_variant_ctx_set_used_here : Note<
- "previously context selector set '%0' used here">;
-def err_omp_expected_comma_brace : Error<"expected '}' or ',' after '%0'">;
-def err_omp_declare_variant_ctx_mutiple_use : Error<
- "context trait selector '%0' is used already in the same '%1' context selector set of 'omp declare variant' directive">;
-def note_omp_declare_variant_ctx_used_here : Note<
- "previously context trait selector '%0' used here">;
-def warn_omp_more_one_device_type_clause : Warning<
- "more than one 'device_type' clause is specified">,
- InGroup<OpenMPClauses>;
-def err_omp_wrong_device_kind_trait : Error<
- "unknown '%0' device kind trait in the 'device' context selector set, expected"
- " one of 'host', 'nohost', 'cpu', 'gpu' or 'fpga'">;
+def warn_omp_declare_variant_string_literal_or_identifier
+ : Warning<"expected identifier or string literal describing a context "
+ "%select{set|selector|property}0; "
+ "%select{set|selector|property}0 skipped">,
+ InGroup<OpenMPClauses>;
+def note_omp_declare_variant_ctx_options
+ : Note<"context %select{set|selector|property}0 options are: %1">;
+def warn_omp_declare_variant_expected
+ : Warning<"expected '%0' after the %1; '%0' assumed">,
+ InGroup<OpenMPClauses>;
+def warn_omp_declare_variant_ctx_not_a_property
+ : Warning<"'%0' is not a valid context property for the context selector "
+ "'%1' and the context set '%2'; property ignored">,
+ InGroup<OpenMPClauses>;
+def note_omp_declare_variant_ctx_is_a
+ : Note<"'%0' is a context %select{set|selector|property}1 not a context "
+ "%select{set|selector|property}2">;
+def note_omp_declare_variant_ctx_try : Note<"try 'match(%0={%1%2})'">;
+def warn_omp_declare_variant_ctx_not_a_selector
+ : Warning<"'%0' is not a valid context selector for the context set '%1'; "
+ "selector ignored">,
+ InGroup<OpenMPClauses>;
+def warn_omp_declare_variant_ctx_not_a_set
+ : Warning<"'%0' is not a valid context set in a `declare variant`; set "
+ "ignored">,
+ InGroup<OpenMPClauses>;
+def warn_omp_declare_variant_ctx_mutiple_use
+ : Warning<"the context %select{set|selector|property}0 '%1' was used "
+ "already in the same 'omp declare variant' directive; "
+ "%select{set|selector|property}0 ignored">,
+ InGroup<OpenMPClauses>;
+def note_omp_declare_variant_ctx_used_here
+ : Note<"the previous context %select{set|selector|property}0 '%1' used "
+ "here">;
+def note_omp_declare_variant_ctx_continue_here
+ : Note<"the ignored %select{set|selector|property}0 spans until here">;
+def warn_omp_ctx_incompatible_selector_for_set
+ : Warning<"the context selector '%0' is not valid for the context set "
+ "'%1'; selector ignored">,
+ InGroup<OpenMPClauses>;
+def note_omp_ctx_compatible_set_for_selector
+ : Note<"the context selector '%0' can be nested in the context set '%1'; "
+ "try 'match(%1={%0%select{|(property)}2})'">;
+def warn_omp_ctx_selector_without_properties
+ : Warning<"the context selector '%0' in context set '%1' requires a "
+ "context property defined in parentheses; selector ignored">,
+ InGroup<OpenMPClauses>;
+def warn_omp_ctx_incompatible_property_for_selector
+ : Warning<"the context property '%0' is not valid for the context selector "
+ "'%1' and the context set '%2'; property ignored">,
+ InGroup<OpenMPClauses>;
+def note_omp_ctx_compatible_set_and_selector_for_property
+ : Note<"the context property '%0' can be nested in the context selector "
+ "'%1' which is nested in the context set '%2'; try "
+ "'match(%2={%1(%0)})'">;
+def warn_omp_ctx_incompatible_score_for_property
+ : Warning<"the context selector '%0' in the context set '%1' cannot have a "
+ "score ('%2'); score ignored">,
+ InGroup<OpenMPClauses>;
+def warn_omp_more_one_device_type_clause
+ : Warning<"more than one 'device_type' clause is specified">,
+ InGroup<OpenMPClauses>;
+def err_omp_variant_ctx_second_match_extension : Error<
+ "only a single match extension allowed per OpenMP context selector">;
+def err_omp_invalid_dsa: Error<
+ "data-sharing attribute '%0' in '%1' clause requires OpenMP version %2 or above">;
// Pragma loop support.
def err_pragma_loop_missing_argument : Error<
@@ -1291,13 +1347,12 @@ def err_pragma_loop_invalid_option : Error<
"pipeline, pipeline_initiation_interval, vectorize_predicate, or distribute">;
def err_pragma_fp_invalid_option : Error<
- "%select{invalid|missing}0 option%select{ %1|}0; expected contract">;
+ "%select{invalid|missing}0 option%select{ %1|}0; expected 'contract' or 'reassociate'">;
def err_pragma_fp_invalid_argument : Error<
"unexpected argument '%0' to '#pragma clang fp %1'; "
- "expected 'on', 'fast' or 'off'">;
-def err_pragma_fp_scope : Error<
- "'#pragma clang fp' can only appear at file scope or at the start of a "
- "compound statement">;
+ "%select{"
+ "expected 'fast' or 'on' or 'off'|"
+ "expected 'on' or 'off'}2">;
def err_pragma_invalid_keyword : Error<
"invalid argument; expected 'enable'%select{|, 'full'}0%select{|, 'assume_safety'}1 or 'disable'">;
@@ -1375,4 +1430,14 @@ def err_placeholder_expected_auto_or_decltype_auto : Error<
"expected 'auto' or 'decltype(auto)' after concept name">;
}
+def warn_max_tokens : Warning<
+ "the number of preprocessor source tokens (%0) exceeds this token limit (%1)">,
+ InGroup<MaxTokens>, DefaultIgnore;
+
+def warn_max_tokens_total : Warning<
+ "the total number of preprocessor source tokens (%0) exceeds the token limit (%1)">,
+ InGroup<MaxTokens>, DefaultIgnore;
+
+def note_max_tokens_total_override : Note<"total token limit set here">;
+
} // end of Parser diagnostics
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 917377420505..aa4de2812312 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -171,8 +171,9 @@ def err_field_designator_unknown : Error<
def err_field_designator_nonfield : Error<
"field designator %0 does not refer to a non-static data member">;
def note_field_designator_found : Note<"field designator refers here">;
-def err_designator_for_scalar_init : Error<
- "designator in initializer for scalar type %0">;
+def err_designator_for_scalar_or_sizeless_init : Error<
+ "designator in initializer for %select{scalar|indivisible sizeless}0 "
+ "type %1">;
def warn_initializer_overrides : Warning<
"initializer %select{partially |}0overrides prior initialization of "
"this subobject">, InGroup<InitializerOverrides>;
@@ -258,6 +259,9 @@ def err_invalid_vector_float_decl_spec : Error<
def err_invalid_vector_double_decl_spec : Error <
"use of 'double' with '__vector' requires VSX support to be enabled "
"(available on POWER7 or later)">;
+def err_invalid_vector_bool_int128_decl_spec : Error <
+ "use of '__int128' with '__vector bool' requires VSX support enabled (on "
+ "POWER10 or later)">;
def err_invalid_vector_long_long_decl_spec : Error <
"use of 'long long' with '__vector bool' requires VSX support (available on "
"POWER7 or later) or extended Altivec support (available on POWER8 or later) "
@@ -275,7 +279,9 @@ def err_bad_parameter_name : Error<
"%0 cannot be the name of a parameter">;
def err_bad_parameter_name_template_id : Error<
"parameter name cannot have template arguments">;
-def err_parameter_name_omitted : Error<"parameter name omitted">;
+def ext_parameter_name_omitted_c2x : ExtWarn<
+ "omitting the parameter name in a function definition is a C2x extension">,
+ InGroup<C2x>;
def err_anyx86_interrupt_attribute : Error<
"%select{x86|x86-64}0 'interrupt' attribute only applies to functions that "
"have %select{a 'void' return type|"
@@ -639,6 +645,8 @@ def warn_redecl_library_builtin : Warning<
def err_builtin_definition : Error<"definition of builtin function %0">;
def err_builtin_redeclare : Error<"cannot redeclare builtin function %0">;
def err_arm_invalid_specialreg : Error<"invalid special register for builtin">;
+def err_arm_invalid_coproc : Error<"coprocessor %0 must be configured as "
+ "%select{GCP|CDE}1">;
def err_invalid_cpu_supports : Error<"invalid cpu feature string for builtin">;
def err_invalid_cpu_is : Error<"invalid cpu name for builtin">;
def err_invalid_cpu_specific_dispatch_value : Error<
@@ -741,6 +749,12 @@ def warn_fortify_source_size_mismatch : Warning<
"'%0' size argument is too large; destination buffer has size %1,"
" but size argument is %2">, InGroup<FortifySource>;
+def warn_fortify_source_format_overflow : Warning<
+ "'%0' will always overflow; destination buffer has size %1,"
+ " but format string expands to at least %2">,
+ InGroup<FortifySource>;
+
+
/// main()
// static main() is not an error in C, just in C++.
def warn_static_main : Warning<"'main' should not be declared static">,
@@ -782,10 +796,27 @@ def ext_no_declarators : ExtWarn<"declaration does not declare anything">,
def ext_typedef_without_a_name : ExtWarn<"typedef requires a name">,
InGroup<MissingDeclarations>;
def err_typedef_not_identifier : Error<"typedef name must be an identifier">;
-def err_typedef_changes_linkage : Error<"unsupported: typedef changes linkage"
- " of anonymous type, but linkage was already computed">;
-def note_typedef_changes_linkage : Note<"use a tag name here to establish "
- "linkage prior to definition">;
+
+def ext_non_c_like_anon_struct_in_typedef : ExtWarn<
+ "anonymous non-C-compatible type given name for linkage purposes "
+ "by %select{typedef|alias}0 declaration; "
+ "add a tag name here">, InGroup<DiagGroup<"non-c-typedef-for-linkage">>;
+def err_non_c_like_anon_struct_in_typedef : Error<
+ "anonymous non-C-compatible type given name for linkage purposes "
+ "by %select{typedef|alias}0 declaration after its linkage was computed; "
+ "add a tag name here to establish linkage prior to definition">;
+def err_typedef_changes_linkage : Error<
+ "unsupported: anonymous type given name for linkage purposes "
+ "by %select{typedef|alias}0 declaration after its linkage was computed; "
+ "add a tag name here to establish linkage prior to definition">;
+def note_non_c_like_anon_struct : Note<
+ "type is not C-compatible due to this "
+ "%select{base class|default member initializer|lambda expression|"
+ "friend declaration|member declaration}0">;
+def note_typedef_for_linkage_here : Note<
+ "type is given name %0 for linkage purposes by this "
+ "%select{typedef|alias}1 declaration">;
+
def err_statically_allocated_object : Error<
"interface type cannot be statically allocated">;
def err_object_cannot_be_passed_returned_by_value : Error<
@@ -799,8 +830,8 @@ def err_opencl_half_load_store : Error<
def err_opencl_cast_to_half : Error<"casting to type %0 is not allowed">;
def err_opencl_half_declaration : Error<
"declaring variable of type %0 is not allowed">;
-def err_opencl_half_param : Error<
- "declaring function parameter of type %0 is not allowed; did you forget * ?">;
+def err_opencl_invalid_param : Error<
+ "declaring function parameter of type %0 is not allowed%select{; did you forget * ?|}1">;
def err_opencl_invalid_return : Error<
"declaring function return value of type %0 is not allowed %select{; did you forget * ?|}1">;
def warn_enum_value_overflow : Warning<"overflow in enumeration value">;
@@ -831,6 +862,16 @@ def warn_pragma_pack_pop_identifier_and_alignment : Warning<
"specifying both a name and alignment to 'pop' is undefined">;
def warn_pragma_pop_failed : Warning<"#pragma %0(pop, ...) failed: %1">,
InGroup<IgnoredPragmas>;
+def err_pragma_fc_pp_scope : Error<
+ "'#pragma float_control push/pop' can only appear at file scope or namespace scope">;
+def err_pragma_fc_noprecise_requires_nofenv : Error<
+ "'#pragma float_control(precise, off)' is illegal when fenv_access is enabled">;
+def err_pragma_fc_except_requires_precise : Error<
+ "'#pragma float_control(except, on)' is illegal when precise is disabled">;
+def err_pragma_fc_noprecise_requires_noexcept : Error<
+ "'#pragma float_control(precise, off)' is illegal when except is enabled">;
+def err_pragma_fenv_requires_precise : Error<
+ "'#pragma STDC FENV_ACCESS ON' is illegal when precise is disabled">;
def warn_cxx_ms_struct :
Warning<"ms_struct may not produce Microsoft-compatible layouts for classes "
"with base classes or virtual functions">,
@@ -996,8 +1037,8 @@ def err_objc_direct_on_protocol : Error<
"'objc_direct' attribute cannot be applied to %select{methods|properties}0 "
"declared in an Objective-C protocol">;
def err_objc_direct_duplicate_decl : Error<
- "%select{|direct }0method declaration conflicts "
- "with previous %select{|direct }1declaration of method %2">;
+ "%select{|direct }0%select{method|property}1 declaration conflicts "
+ "with previous %select{|direct }2declaration of %select{method|property}1 %3">;
def err_objc_direct_impl_decl_mismatch : Error<
"direct method was declared in %select{the primary interface|an extension|a category}0 "
"but is implemented in %select{the primary interface|a category|a different category}1">;
@@ -1013,6 +1054,8 @@ def warn_objc_direct_ignored : Warning<
def warn_objc_direct_property_ignored : Warning<
"direct attribute on property %0 ignored (not implemented by this Objective-C runtime)">,
InGroup<IgnoredAttributes>;
+def err_objc_direct_dynamic_property : Error<
+ "direct property cannot be @dynamic">;
def warn_conflicting_overriding_ret_types : Warning<
"conflicting return type in "
@@ -1335,8 +1378,14 @@ def warn_multiple_selectors: Warning<
"several methods with selector %0 of mismatched types are found "
"for the @selector expression">,
InGroup<SelectorTypeMismatch>, DefaultIgnore;
-def err_direct_selector_expression: Error<
+def err_direct_selector_expression : Error<
"@selector expression formed with direct selector %0">;
+def warn_potentially_direct_selector_expression : Warning<
+ "@selector expression formed with potentially direct selector %0">,
+ InGroup<ObjCPotentiallyDirectSelector>;
+def warn_strict_potentially_direct_selector_expression : Warning<
+ warn_potentially_direct_selector_expression.Text>,
+ InGroup<ObjCStrictPotentiallyDirectSelector>, DefaultIgnore;
def err_objc_kindof_nonobject : Error<
"'__kindof' specifier cannot be applied to non-object type %0">;
@@ -1371,7 +1420,8 @@ def warn_cxx14_compat_inline_variable : Warning<
DefaultIgnore, InGroup<CXXPre17Compat>;
def warn_inline_namespace_reopened_noninline : Warning<
- "inline namespace reopened as a non-inline namespace">;
+ "inline namespace reopened as a non-inline namespace">,
+ InGroup<InlineNamespaceReopenedNoninline>;
def err_inline_namespace_mismatch : Error<
"non-inline namespace cannot be reopened as inline">;
@@ -1449,8 +1499,8 @@ def err_throw_abstract_type : Error<
def err_array_of_abstract_type : Error<"array of abstract class type %0">;
def err_capture_of_abstract_type : Error<
"by-copy capture of value of abstract type %0">;
-def err_capture_of_incomplete_type : Error<
- "by-copy capture of variable %0 with incomplete type %1">;
+def err_capture_of_incomplete_or_sizeless_type : Error<
+ "by-copy capture of variable %0 with %select{incomplete|sizeless}1 type %2">;
def err_capture_default_non_local : Error<
"non-local lambda expression cannot have a capture-default">;
@@ -1482,9 +1532,12 @@ def err_deleted_decl_not_first : Error<
def err_deleted_override : Error<
"deleted function %0 cannot override a non-deleted function">;
-
def err_non_deleted_override : Error<
"non-deleted function %0 cannot override a deleted function">;
+def err_consteval_override : Error<
+ "consteval function %0 cannot override a non-consteval function">;
+def err_non_consteval_override : Error<
+ "non-consteval function %0 cannot override a consteval function">;
def warn_weak_vtable : Warning<
"%0 has no out-of-line virtual method definitions; its vtable will be "
@@ -1507,6 +1560,9 @@ def err_distant_exception_spec : Error<
def err_incomplete_in_exception_spec : Error<
"%select{|pointer to |reference to }0incomplete type %1 is not allowed "
"in exception specification">;
+def err_sizeless_in_exception_spec : Error<
+ "%select{|reference to }0sizeless type %1 is not allowed "
+ "in exception specification">;
def ext_incomplete_in_exception_spec : ExtWarn<err_incomplete_in_exception_spec.Text>,
InGroup<MicrosoftExceptionSpec>;
def err_rref_in_exception_spec : Error<
@@ -1544,6 +1600,9 @@ def err_exception_spec_cycle : Error<
"exception specification of %0 uses itself">;
def err_exception_spec_incomplete_type : Error<
"exception specification needed for member of incomplete class %0">;
+def warn_wasm_dynamic_exception_spec_ignored : ExtWarn<
+ "dynamic exception specifications with types are currently ignored in wasm">,
+ InGroup<WebAssemblyExceptionSpec>;
// C++ access checking
def err_class_redeclared_with_different_access : Error<
@@ -1755,6 +1814,11 @@ def note_due_to_dllexported_class : Note<
def err_illegal_union_or_anon_struct_member : Error<
"%select{anonymous struct|union}0 member %1 has a non-trivial "
"%sub{select_special_member_kind}2">;
+
+def warn_frame_address : Warning<
+ "calling '%0' with a nonzero argument is unsafe">,
+ InGroup<FrameAddress>, DefaultIgnore;
+
def warn_cxx98_compat_nontrivial_union_or_anon_struct_member : Warning<
"%select{anonymous struct|union}0 member %1 with a non-trivial "
"%sub{select_special_member_kind}2 is incompatible with C++98">,
@@ -1790,8 +1854,13 @@ def note_nontrivial_objc_ownership : Note<
"because type %0 has a member with %select{no|no|__strong|__weak|"
"__autoreleasing}1 ownership">;
+/// Selector for a TagTypeKind value.
+def select_tag_type_kind : TextSubstitution<
+ "%select{struct|interface|union|class|enum}0">;
+
def err_static_data_member_not_allowed_in_anon_struct : Error<
- "static data member %0 not allowed in anonymous struct">;
+ "static data member %0 not allowed in anonymous "
+ "%sub{select_tag_type_kind}1">;
def ext_static_data_member_in_union : ExtWarn<
"static data member %0 in union is a C++11 extension">, InGroup<CXX11>;
def warn_cxx98_compat_static_data_member_in_union : Warning<
@@ -1881,19 +1950,36 @@ def err_destructor_return_type : Error<"destructor cannot have a return type">;
def err_destructor_redeclared : Error<"destructor cannot be redeclared">;
def err_destructor_with_params : Error<"destructor cannot have any parameters">;
def err_destructor_variadic : Error<"destructor cannot be variadic">;
-def err_destructor_typedef_name : Error<
- "destructor cannot be declared using a %select{typedef|type alias}1 %0 of the class name">;
+def ext_destructor_typedef_name : ExtWarn<
+ "destructor cannot be declared using a %select{typedef|type alias}1 %0 "
+ "of the class name">, DefaultError, InGroup<DiagGroup<"dtor-typedef">>;
+def err_undeclared_destructor_name : Error<
+ "undeclared identifier %0 in destructor name">;
def err_destructor_name : Error<
"expected the class name after '~' to name the enclosing class">;
-def err_destructor_class_name : Error<
- "expected the class name after '~' to name a destructor">;
-def err_ident_in_dtor_not_a_type : Error<
+def err_destructor_name_nontype : Error<
+ "identifier %0 after '~' in destructor name does not name a type">;
+def err_destructor_expr_mismatch : Error<
+ "identifier %0 in object destruction expression does not name the type "
+ "%1 of the object being destroyed">;
+def err_destructor_expr_nontype : Error<
"identifier %0 in object destruction expression does not name a type">;
def err_destructor_expr_type_mismatch : Error<
"destructor type %0 in object destruction expression does not match the "
"type %1 of the object being destroyed">;
def note_destructor_type_here : Note<
- "type %0 is declared here">;
+ "type %0 found by destructor name lookup">;
+def note_destructor_nontype_here : Note<
+ "non-type declaration found by destructor name lookup">;
+def ext_dtor_named_in_wrong_scope : Extension<
+ "ISO C++ requires the name after '::~' to be found in the same scope as "
+ "the name before '::~'">, InGroup<DtorName>;
+def ext_qualified_dtor_named_in_lexical_scope : ExtWarn<
+ "qualified destructor name only found in lexical scope; omit the qualifier "
+ "to find this type name by unqualified lookup">, InGroup<DtorName>;
+def ext_dtor_name_ambiguous : Extension<
+ "ISO C++ considers this destructor name lookup to be ambiguous">,
+ InGroup<DtorName>;
def err_destroy_attr_on_non_static_var : Error<
"%select{no_destroy|always_destroy}0 attribute can only be applied to a"
@@ -1947,7 +2033,7 @@ def err_reference_bind_init_list : Error<
def err_init_list_bad_dest_type : Error<
"%select{|non-aggregate }0type %1 cannot be initialized with an initializer "
"list">;
-def warn_cxx2a_compat_aggregate_init_with_ctors : Warning<
+def warn_cxx20_compat_aggregate_init_with_ctors : Warning<
"aggregate initialization of type %0 with user-declared constructors "
"is incompatible with C++20">, DefaultIgnore, InGroup<CXX20Compat>;
@@ -1956,6 +2042,8 @@ def err_reference_bind_to_bitfield : Error<
"bit-field%select{| %1}2">;
def err_reference_bind_to_vector_element : Error<
"%select{non-const|volatile}0 reference cannot bind to vector element">;
+def err_reference_bind_to_matrix_element : Error<
+ "%select{non-const|volatile}0 reference cannot bind to matrix element">;
def err_reference_var_requires_init : Error<
"declaration of reference variable %0 requires an initializer">;
def err_reference_without_init : Error<
@@ -2033,6 +2121,10 @@ def err_list_init_in_parens : Error<
"cannot initialize %select{non-class|reference}0 type %1 with a "
"parenthesized initializer list">;
+def warn_uninit_const_reference : Warning<
+ "variable %0 is uninitialized when passed as a const reference argument "
+ "here">, InGroup<UninitializedConstReference>, DefaultIgnore;
+
def warn_unsequenced_mod_mod : Warning<
"multiple unsequenced modifications to %0">, InGroup<Unsequenced>;
def warn_unsequenced_mod_use : Warning<
@@ -2275,12 +2367,22 @@ def override_keyword_hides_virtual_member_function : Error<
"%select{function|functions}1">;
def err_function_marked_override_not_overriding : Error<
"%0 marked 'override' but does not override any member functions">;
-def warn_destructor_marked_not_override_overriding : Warning <
- "%0 overrides a destructor but is not marked 'override'">,
- InGroup<CXX11WarnOverrideDestructor>, DefaultIgnore;
-def warn_function_marked_not_override_overriding : Warning <
- "%0 overrides a member function but is not marked 'override'">,
- InGroup<CXX11WarnOverrideMethod>;
+def warn_destructor_marked_not_override_overriding : TextSubstitution <
+ "%0 overrides a destructor but is not marked 'override'">;
+def warn_function_marked_not_override_overriding : TextSubstitution <
+ "%0 overrides a member function but is not marked 'override'">;
+def warn_inconsistent_destructor_marked_not_override_overriding : Warning <
+ "%sub{warn_destructor_marked_not_override_overriding}0">,
+ InGroup<CXX11WarnInconsistentOverrideDestructor>, DefaultIgnore;
+def warn_inconsistent_function_marked_not_override_overriding : Warning <
+ "%sub{warn_function_marked_not_override_overriding}0">,
+ InGroup<CXX11WarnInconsistentOverrideMethod>;
+def warn_suggest_destructor_marked_not_override_overriding : Warning <
+ "%sub{warn_destructor_marked_not_override_overriding}0">,
+ InGroup<CXX11WarnSuggestOverrideDestructor>, DefaultIgnore;
+def warn_suggest_function_marked_not_override_overriding : Warning <
+ "%sub{warn_function_marked_not_override_overriding}0">,
+ InGroup<CXX11WarnSuggestOverride>, DefaultIgnore;
def err_class_marked_final_used_as_base : Error<
"base %0 is marked '%select{final|sealed}1'">;
def warn_abstract_final_class : Warning<
@@ -2314,9 +2416,6 @@ def err_enum_redeclare_fixed_mismatch : Error<
"enumeration previously declared with %select{non|}0fixed underlying type">;
def err_enum_redeclare_scoped_mismatch : Error<
"enumeration previously declared as %select{un|}0scoped">;
-def err_enum_class_reference : Error<
- "reference to %select{|scoped }0enumeration must use 'enum' "
- "not 'enum class'">;
def err_only_enums_have_underlying_types : Error<
"only enumeration types have underlying types">;
def err_underlying_type_of_incomplete_enum : Error<
@@ -2374,20 +2473,19 @@ def note_for_range_invalid_iterator : Note <
"in implicit call to 'operator%select{!=|*|++}0' for iterator of type %1">;
def note_for_range_begin_end : Note<
"selected '%select{begin|end}0' %select{function|template }1%2 with iterator type %3">;
-def warn_for_range_const_reference_copy : Warning<
+def warn_for_range_const_ref_binds_temp_built_from_ref : Warning<
"loop variable %0 "
- "%diff{has type $ but is initialized with type $"
- "| is initialized with a value of a different type}1,2 resulting in a copy">,
+ "%diff{of type $ binds to a temporary constructed from type $"
+ "|binds to a temporary constructed from a different type}1,2">,
InGroup<RangeLoopConstruct>, DefaultIgnore;
def note_use_type_or_non_reference : Note<
- "use non-reference type %0 to keep the copy or type %1 to prevent copying">;
-def warn_for_range_variable_always_copy : Warning<
- "loop variable %0 is always a copy because the range of type %1 does not "
- "return a reference">,
+ "use non-reference type %0 to make construction explicit or type %1 to prevent copying">;
+def warn_for_range_ref_binds_ret_temp : Warning<
+ "loop variable %0 binds to a temporary value produced by a range of type %1">,
InGroup<RangeLoopBindReference>, DefaultIgnore;
def note_use_non_reference_type : Note<"use non-reference type %0">;
def warn_for_range_copy : Warning<
- "loop variable %0 of type %1 creates a copy from type %2">,
+ "loop variable %0 creates a copy from type %1">,
InGroup<RangeLoopConstruct>, DefaultIgnore;
def note_use_reference_type : Note<"use reference type %0 to prevent copying">;
def err_objc_for_range_init_stmt : Error<
@@ -2403,6 +2501,13 @@ def warn_cxx14_compat_constexpr_not_const : Warning<
"'constexpr' non-static member function will not be implicitly 'const' "
"in C++14; add 'const' to avoid a change in behavior">,
InGroup<DiagGroup<"constexpr-not-const">>;
+def err_invalid_consteval_take_address : Error<
+ "cannot take address of consteval function %0 outside"
+ " of an immediate invocation">;
+def err_invalid_consteval_call : Error<
+ "call to consteval function %q0 is not a constant expression">;
+def err_invalid_consteval_decl_kind : Error<
+ "%0 cannot be declared consteval">;
def err_invalid_constexpr : Error<
"%select{function parameter|typedef}0 "
"cannot be %sub{select_constexpr_spec_kind}1">;
@@ -2462,7 +2567,7 @@ def warn_cxx11_compat_constexpr_body_invalid_stmt : Warning<
"use of this statement in a constexpr %select{function|constructor}0 "
"is incompatible with C++ standards before C++14">,
InGroup<CXXPre14Compat>, DefaultIgnore;
-def ext_constexpr_body_invalid_stmt_cxx2a : ExtWarn<
+def ext_constexpr_body_invalid_stmt_cxx20 : ExtWarn<
"use of this statement in a constexpr %select{function|constructor}0 "
"is a C++20 extension">, InGroup<CXX20>;
def warn_cxx17_compat_constexpr_body_invalid_stmt : Warning<
@@ -2500,7 +2605,7 @@ def warn_cxx17_compat_constexpr_local_var_no_init : Warning<
"is incompatible with C++ standards before C++20">,
InGroup<CXXPre20Compat>, DefaultIgnore;
def ext_constexpr_function_never_constant_expr : ExtWarn<
- "constexpr %select{function|constructor}0 never produces a "
+ "%select{constexpr|consteval}1 %select{function|constructor}0 never produces a "
"constant expression">, InGroup<DiagGroup<"invalid-constexpr">>, DefaultError;
def err_attr_cond_never_constant_expr : Error<
"%0 attribute expression never produces a constant expression">;
@@ -2525,7 +2630,7 @@ def note_constexpr_body_previous_return : Note<
"previous return statement is here">;
// C++20 function try blocks in constexpr
-def ext_constexpr_function_try_block_cxx2a : ExtWarn<
+def ext_constexpr_function_try_block_cxx20 : ExtWarn<
"function try block in constexpr %select{function|constructor}0 is "
"a C++20 extension">, InGroup<CXX20>;
def warn_cxx17_compat_constexpr_function_try_block : Warning<
@@ -2581,8 +2686,6 @@ def err_concept_extra_headers : Error<
"extraneous template parameter list in concept definition">;
def err_concept_no_associated_constraints : Error<
"concept cannot have associated constraints">;
-def err_concept_not_implemented : Error<
- "sorry, unimplemented concepts feature %0 used">;
def err_non_constant_constraint_expression : Error<
"substitution into constraint expression resulted in a non-constant "
"expression">;
@@ -2591,8 +2694,6 @@ def err_non_bool_atomic_constraint : Error<
def err_template_arg_list_constraints_not_satisfied : Error<
"constraints not satisfied for %select{class template|function template|variable template|alias template|"
"template template parameter|template}0 %1%2">;
-def note_constraints_not_satisfied : Note<
- "constraints not satisfied">;
def note_substituted_constraint_expr_is_ill_formed : Note<
"because substituted constraint expression is ill-formed%0">;
def note_atomic_constraint_evaluated_to_false : Note<
@@ -2609,11 +2710,6 @@ def err_trailing_requires_clause_on_deduction_guide : Error<
"deduction guide cannot have a requires clause">;
def err_reference_to_function_with_unsatisfied_constraints : Error<
"invalid reference to function %0: constraints not satisfied">;
-def note_requires_expr_ill_formed_expr : Note<
- "expression is invalid: %0">;
-def note_requires_expr_no_implicit_conversion : Note<
- "no implicit conversion exists between expression type %0 and expected type "
- "%1">;
def err_requires_expr_local_parameter_default_argument : Error<
"default arguments not allowed for parameters of a requires expression">;
def err_requires_expr_parameter_referenced_in_evaluated_context : Error<
@@ -2636,12 +2732,6 @@ def note_type_requirement_substitution_error : Note<
"%select{and|because}0 '%1' would be invalid: %2">;
def note_type_requirement_unknown_substitution_error : Note<
"%select{and|because}0 '%1' would be invalid">;
-def err_type_requirement_non_type_template : Error<
- "'%0' refers to a %select{class template|function template|"
- "variable template|alias template|template template parameter|template}1, "
- "not a type template">;
-def err_type_requirement_no_such_type : Error<
- "'%0' does not name a type">;
def note_nested_requirement_substitution_error : Note<
"%select{and|because}0 '%1' would be invalid: %2">;
def note_nested_requirement_unknown_substitution_error : Note<
@@ -2698,6 +2788,10 @@ def warn_auto_var_is_id : Warning<
InGroup<DiagGroup<"auto-var-id">>;
// Attributes
+def warn_nomerge_attribute_ignored_in_stmt: Warning<
+ "%0 attribute is ignored because there exists no call expression inside the "
+ "statement">,
+ InGroup<IgnoredAttributes>;
def err_nsobject_attribute : Error<
"'NSObject' attribute is for pointer types only">;
def err_attributes_are_not_compatible : Error<
@@ -2713,6 +2807,7 @@ def err_attribute_too_many_arguments : Error<
def err_attribute_too_few_arguments : Error<
"%0 attribute takes at least %1 argument%s1">;
def err_attribute_invalid_vector_type : Error<"invalid vector element type %0">;
+def err_attribute_invalid_matrix_type : Error<"invalid matrix element type %0">;
def err_attribute_bad_neon_vector_size : Error<
"Neon vector size must be 64 or 128 bits">;
def err_attribute_requires_positive_integer : Error<
@@ -2744,6 +2839,8 @@ def err_alignas_mismatch : Error<
"redeclaration has different alignment requirement (%1 vs %0)">;
def err_alignas_underaligned : Error<
"requested alignment is less than minimum alignment of %1 for type %0">;
+def err_attribute_sizeless_type : Error<
+ "%0 attribute cannot be applied to sizeless type %1">;
def err_attribute_argument_n_type : Error<
"%0 attribute requires parameter %1 to be %select{int or bool|an integer "
"constant|a string|an identifier}2">;
@@ -2814,8 +2911,8 @@ def err_init_method_bad_return_type : Error<
"init methods must return an object pointer type, not %0">;
def err_attribute_invalid_size : Error<
"vector size not an integral multiple of component size">;
-def err_attribute_zero_size : Error<"zero vector size">;
-def err_attribute_size_too_large : Error<"vector size too large">;
+def err_attribute_zero_size : Error<"zero %0 size">;
+def err_attribute_size_too_large : Error<"%0 size too large">;
def err_typecheck_vector_not_convertable_implict_truncation : Error<
"cannot convert between %select{scalar|vector}0 type %1 and vector type"
" %2 as implicit conversion would cause truncation">;
@@ -2841,6 +2938,10 @@ def err_attribute_address_multiple_qualifiers : Error<
def warn_attribute_address_multiple_identical_qualifiers : Warning<
"multiple identical address spaces specified for type">,
InGroup<DuplicateDeclSpecifier>;
+def err_attribute_not_clinkage : Error<
+ "function type with %0 attribute must have C linkage">;
+def err_function_decl_cmse_ns_call : Error<
+ "functions may not be declared with 'cmse_nonsecure_call' attribute">;
def err_attribute_address_function_type : Error<
"function type may not be qualified with an address space">;
def err_as_qualified_auto_decl : Error<
@@ -2886,7 +2987,7 @@ def warn_objc_literal_comparison : Warning<
"a numeric literal|a boxed expression|}0 has undefined behavior">,
InGroup<ObjCLiteralComparison>;
def err_missing_atsign_prefix : Error<
- "string literal must be prefixed by '@' ">;
+ "%select{string|numeric}0 literal must be prefixed by '@'">;
def warn_objc_string_literal_comparison : Warning<
"direct comparison of a string literal has undefined behavior">,
InGroup<ObjCStringComparison>;
@@ -2900,6 +3001,11 @@ def warn_objc_collection_literal_element : Warning<
"object of type %0 is not compatible with "
"%select{array element type|dictionary key type|dictionary value type}1 %2">,
InGroup<ObjCLiteralConversion>;
+def warn_nsdictionary_duplicate_key : Warning<
+ "duplicate key in dictionary literal">,
+ InGroup<DiagGroup<"objc-dictionary-duplicate-keys">>;
+def note_nsdictionary_duplicate_key_here : Note<
+ "previous equal key is here">;
def err_swift_param_attr_not_swiftcall : Error<
"'%0' parameter can only be used with swiftcall calling convention">;
def err_swift_indirect_result_not_first : Error<
@@ -2979,6 +3085,9 @@ def err_alignment_too_big : Error<
"requested alignment must be %0 or smaller">;
def err_alignment_not_power_of_two : Error<
"requested alignment is not a power of 2">;
+def warn_alignment_not_power_of_two : Warning<
+ err_alignment_not_power_of_two.Text>,
+ InGroup<DiagGroup<"non-power-of-two-alignment">>;
def err_alignment_dependent_typedef_name : Error<
"requested alignment is dependent but declaration is not dependent">;
@@ -3058,6 +3167,13 @@ def warn_attribute_weak_on_local : Warning<
InGroup<IgnoredAttributes>;
def warn_weak_identifier_undeclared : Warning<
"weak identifier %0 never declared">;
+def warn_attribute_cmse_entry_static : Warning<
+ "'cmse_nonsecure_entry' cannot be applied to functions with internal linkage">,
+ InGroup<IgnoredAttributes>;
+def warn_cmse_nonsecure_union : Warning<
+ "passing union across security boundary via %select{parameter %1|return value}0 "
+ "may leak information">,
+ InGroup<DiagGroup<"cmse-union-leak">>;
def err_attribute_weak_static : Error<
"weak declaration cannot have internal linkage">;
def err_attribute_selectany_non_extern_data : Error<
@@ -3205,6 +3321,12 @@ def err_attribute_output_parameter : Error<
def ext_cannot_use_trivial_abi : ExtWarn<
"'trivial_abi' cannot be applied to %0">, InGroup<IgnoredAttributes>;
+def note_cannot_use_trivial_abi_reason : Note<
+ "'trivial_abi' is disallowed on %0 because %select{"
+ "its copy constructors and move constructors are all deleted|"
+ "it is polymorphic|"
+ "it has a base of a non-trivial class type|it has a virtual base|"
+ "it has a __weak field|it has a field of a non-trivial class type}1">;
// Availability attribute
def warn_availability_unknown_platform : Warning<
@@ -3254,9 +3376,6 @@ def warn_at_available_unchecked_use : Warning<
InGroup<DiagGroup<"unsupported-availability-guard">>;
// Thread Safety Attributes
-def warn_invalid_capability_name : Warning<
- "invalid capability name '%0'; capability name must be 'mutex' or 'role'">,
- InGroup<ThreadSafetyAttributes>, DefaultIgnore;
def warn_thread_attribute_ignored : Warning<
"ignoring %0 attribute because its argument is invalid">,
InGroup<ThreadSafetyAttributes>, DefaultIgnore;
@@ -3274,7 +3393,7 @@ def warn_thread_attribute_argument_not_lockable : Warning<
InGroup<ThreadSafetyAttributes>, DefaultIgnore;
def warn_thread_attribute_decl_not_lockable : Warning<
"%0 attribute can only be applied in a context annotated "
- "with 'capability(\"mutex\")' attribute">,
+ "with 'capability' attribute">,
InGroup<ThreadSafetyAttributes>, DefaultIgnore;
def warn_thread_attribute_decl_not_pointer : Warning<
"%0 only applies to pointer types; type here is %1">,
@@ -3308,6 +3427,7 @@ def warn_expecting_lock_held_on_loop : Warning<
"expecting %0 '%1' to be held at start of each loop">,
InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def note_locked_here : Note<"%0 acquired here">;
+def note_unlocked_here : Note<"%0 released here">;
def warn_lock_exclusive_and_shared : Warning<
"%0 '%1' is acquired exclusively and shared in the same scope">,
InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
@@ -3393,7 +3513,7 @@ def warn_use_of_temp_in_invalid_state : Warning<
"invalid invocation of method '%0' on a temporary object while it is in the "
"'%1' state">, InGroup<Consumed>, DefaultIgnore;
def warn_attr_on_unconsumable_class : Warning<
- "consumed analysis attribute is attached to member of class '%0' which isn't "
+ "consumed analysis attribute is attached to member of class %0 which isn't "
"marked as consumable">, InGroup<Consumed>, DefaultIgnore;
def warn_return_typestate_for_unconsumable_type : Warning<
"return state set for an unconsumable type '%0'">, InGroup<Consumed>,
@@ -3487,7 +3607,7 @@ def warn_impcast_integer_float_precision : Warning<
InGroup<ImplicitIntFloatConversion>, DefaultIgnore;
def warn_impcast_integer_float_precision_constant : Warning<
"implicit conversion from %2 to %3 changes value from %0 to %1">,
- InGroup<ImplicitIntFloatConversion>;
+ InGroup<ImplicitConstIntFloatConversion>;
def warn_impcast_float_to_integer : Warning<
"implicit conversion from %0 to %1 changes value from %2 to %3">,
@@ -3609,6 +3729,18 @@ def warn_int_to_pointer_cast : Warning<
def warn_int_to_void_pointer_cast : Warning<
"cast to %1 from smaller integer type %0">,
InGroup<IntToVoidPointerCast>;
+def warn_pointer_to_int_cast : Warning<
+ "cast to smaller integer type %1 from %0">,
+ InGroup<PointerToIntCast>;
+def warn_pointer_to_enum_cast : Warning<
+ warn_pointer_to_int_cast.Text>,
+ InGroup<PointerToEnumCast>;
+def warn_void_pointer_to_int_cast : Warning<
+ "cast to smaller integer type %1 from %0">,
+ InGroup<VoidPointerToIntCast>;
+def warn_void_pointer_to_enum_cast : Warning<
+ warn_void_pointer_to_int_cast.Text>,
+ InGroup<VoidPointerToEnumCast>;
def warn_attribute_ignored_for_field_of_type : Warning<
"%0 attribute ignored for field of type %1">,
@@ -3880,6 +4012,8 @@ def err_use_of_default_argument_to_function_declared_later : Error<
def note_default_argument_declared_here : Note<
"default argument declared here">;
def err_recursive_default_argument : Error<"recursive evaluation of default argument">;
+def note_recursive_default_argument_used_here : Note<
+ "default argument used here">;
def ext_param_promoted_not_compatible_with_prototype : ExtWarn<
"%diff{promoted type $ of K&R function parameter is not compatible with the "
@@ -4127,13 +4261,13 @@ def err_ovl_no_conversion_in_cast : Error<
"cannot convert %1 to %2 without a conversion operator">;
def err_ovl_no_viable_conversion_in_cast : Error<
"no matching conversion for %select{|static_cast|reinterpret_cast|"
- "dynamic_cast|C-style cast|functional-style cast}0 from %1 to %2">;
+ "dynamic_cast|C-style cast|functional-style cast|}0 from %1 to %2">;
def err_ovl_ambiguous_conversion_in_cast : Error<
"ambiguous conversion for %select{|static_cast|reinterpret_cast|"
- "dynamic_cast|C-style cast|functional-style cast}0 from %1 to %2">;
+ "dynamic_cast|C-style cast|functional-style cast|}0 from %1 to %2">;
def err_ovl_deleted_conversion_in_cast : Error<
"%select{|static_cast|reinterpret_cast|dynamic_cast|C-style cast|"
- "functional-style cast}0 from %1 to %2 uses deleted function">;
+ "functional-style cast|}0 from %1 to %2 uses deleted function">;
def err_ovl_ambiguous_init : Error<"call to constructor of %0 is ambiguous">;
def err_ref_init_ambiguous : Error<
"reference initialization of type %0 with initializer of type %1 is ambiguous">;
@@ -4149,11 +4283,16 @@ def err_ovl_ambiguous_oper_binary : Error<
"use of overloaded operator '%0' is ambiguous (with operand types %1 and %2)">;
def ext_ovl_ambiguous_oper_binary_reversed : ExtWarn<
"ISO C++20 considers use of overloaded operator '%0' (with operand types %1 "
- "and %2) to be ambiguous despite there being a unique best viable function">,
+ "and %2) to be ambiguous despite there being a unique best viable function"
+ "%select{ with non-reversed arguments|}3">,
InGroup<DiagGroup<"ambiguous-reversed-operator">>, SFINAEFailure;
-def note_ovl_ambiguous_oper_binary_reversed_candidate : Note<
+def note_ovl_ambiguous_oper_binary_reversed_self : Note<
"ambiguity is between a regular call to this operator and a call with the "
"argument order reversed">;
+def note_ovl_ambiguous_oper_binary_selected_candidate : Note<
+ "candidate function with non-reversed arguments">;
+def note_ovl_ambiguous_oper_binary_reversed_candidate : Note<
+ "ambiguous candidate function with reversed arguments">;
def err_ovl_no_viable_oper : Error<"no viable overloaded '%0'">;
def note_assign_lhs_incomplete : Note<"type %0 is incomplete">;
def err_ovl_deleted_oper : Error<
@@ -4167,6 +4306,10 @@ def err_ovl_deleted_comparison : Error<
def err_ovl_rewrite_equalequal_not_bool : Error<
"return type %0 of selected 'operator==' function for rewritten "
"'%1' comparison is not 'bool'">;
+def ext_ovl_rewrite_equalequal_not_bool : ExtWarn<
+ "ISO C++20 requires return type of selected 'operator==' function for "
+ "rewritten '%1' comparison to be 'bool', not %0">,
+ InGroup<DiagGroup<"rewrite-not-bool">>, SFINAEFailure;
def err_ovl_no_viable_subscript :
Error<"no viable overloaded operator[] for type %0">;
def err_ovl_no_oper :
@@ -4835,9 +4978,13 @@ def note_using_value_decl_missing_typename : Note<
"add 'typename' to treat this using declaration as a type">;
def err_template_kw_refers_to_non_template : Error<
- "%0 following the 'template' keyword does not refer to a template">;
+ "%0%select{| following the 'template' keyword}1 "
+ "does not refer to a template">;
def note_template_kw_refers_to_non_template : Note<
"declared as a non-template here">;
+def err_template_kw_refers_to_dependent_non_template : Error<
+ "%0%select{| following the 'template' keyword}1 "
+ "cannot refer to a dependent template">;
def err_template_kw_refers_to_class_template : Error<
"'%0%1' instantiated to a class template, not a function template">;
def note_referenced_class_template : Note<
@@ -5258,9 +5405,6 @@ def ext_typecheck_zero_array_size : Extension<
"zero size arrays are an extension">, InGroup<ZeroLengthArray>;
def err_typecheck_zero_array_size : Error<
"zero-length arrays are not permitted in C++">;
-def warn_typecheck_zero_static_array_size : Warning<
- "'static' has no effect on zero-length arrays">,
- InGroup<ArrayBounds>;
def err_array_size_non_int : Error<"size of array has non-integer type %0">;
def err_init_element_not_constant : Error<
"initializer element is not a compile-time constant">;
@@ -5268,6 +5412,17 @@ def ext_aggregate_init_not_constant : Extension<
"initializer for aggregate is not a compile-time constant">, InGroup<C99>;
def err_local_cant_init : Error<
"'__local' variable cannot have an initializer">;
+def err_loader_uninitialized_cant_init
+ : Error<"variable with 'loader_uninitialized' attribute cannot have an "
+ "initializer">;
+def err_loader_uninitialized_trivial_ctor
+ : Error<"variable with 'loader_uninitialized' attribute must have a "
+ "trivial default constructor">;
+def err_loader_uninitialized_redeclaration
+ : Error<"redeclaration cannot add 'loader_uninitialized' attribute">;
+def err_loader_uninitialized_extern_decl
+ : Error<"variable %0 cannot be declared both 'extern' and with the "
+ "'loader_uninitialized' attribute">;
def err_block_extern_cant_init : Error<
"'extern' variable cannot have an initializer">;
def warn_extern_init : Warning<"'extern' variable has an initializer">,
@@ -5277,30 +5432,44 @@ def err_variable_object_no_init : Error<
def err_excess_initializers : Error<
"excess elements in %select{array|vector|scalar|union|struct}0 initializer">;
def ext_excess_initializers : ExtWarn<
- "excess elements in %select{array|vector|scalar|union|struct}0 initializer">;
+ "excess elements in %select{array|vector|scalar|union|struct}0 initializer">,
+ InGroup<ExcessInitializers>;
+def err_excess_initializers_for_sizeless_type : Error<
+ "excess elements in initializer for indivisible sizeless type %0">;
+def ext_excess_initializers_for_sizeless_type : ExtWarn<
+ "excess elements in initializer for indivisible sizeless type %0">,
+ InGroup<ExcessInitializers>;
def err_excess_initializers_in_char_array_initializer : Error<
"excess elements in char array initializer">;
def ext_excess_initializers_in_char_array_initializer : ExtWarn<
- "excess elements in char array initializer">;
+ "excess elements in char array initializer">,
+ InGroup<ExcessInitializers>;
def err_initializer_string_for_char_array_too_long : Error<
"initializer-string for char array is too long">;
def ext_initializer_string_for_char_array_too_long : ExtWarn<
- "initializer-string for char array is too long">;
+ "initializer-string for char array is too long">,
+ InGroup<ExcessInitializers>;
def warn_missing_field_initializers : Warning<
"missing field %0 initializer">,
InGroup<MissingFieldInitializers>, DefaultIgnore;
-def warn_braces_around_scalar_init : Warning<
- "braces around scalar initializer">, InGroup<DiagGroup<"braced-scalar-init">>;
-def ext_many_braces_around_scalar_init : ExtWarn<
- "too many braces around scalar initializer">,
+def warn_braces_around_init : Warning<
+ "braces around %select{scalar |}0initializer">,
+ InGroup<DiagGroup<"braced-scalar-init">>;
+def ext_many_braces_around_init : ExtWarn<
+ "too many braces around %select{scalar |}0initializer">,
InGroup<DiagGroup<"many-braces-around-scalar-init">>, SFINAEFailure;
def ext_complex_component_init : Extension<
"complex initialization specifying real and imaginary components "
"is an extension">, InGroup<DiagGroup<"complex-component-init">>;
def err_empty_scalar_initializer : Error<"scalar initializer cannot be empty">;
+def err_empty_sizeless_initializer : Error<
+ "initializer for sizeless type %0 cannot be empty">;
def warn_cxx98_compat_empty_scalar_initializer : Warning<
"scalar initialized from empty initializer list is incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
+def warn_cxx98_compat_empty_sizeless_initializer : Warning<
+ "initializing %0 from an empty initializer list is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def warn_cxx98_compat_reference_list_init : Warning<
"reference initialized from initializer list is incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
@@ -5350,6 +5519,8 @@ def err_bitfield_width_exceeds_type_width : Error<
def err_anon_bitfield_width_exceeds_type_width : Error<
"width of anonymous bit-field (%0 bits) exceeds %select{width|size}1 "
"of its type (%2 bit%s2)">;
+def err_anon_bitfield_init : Error<
+ "anonymous bit-field cannot have a default member initializer">;
def err_incorrect_number_of_vector_initializers : Error<
"number of elements must be either one or match the size of the vector">;
@@ -5468,6 +5639,8 @@ def note_enters_block_captures_weak : Note<
def note_enters_block_captures_non_trivial_c_struct : Note<
"jump enters lifetime of block which captures a C struct that is non-trivial "
"to destroy">;
+def note_enters_compound_literal_scope : Note<
+ "jump enters lifetime of a compound literal that is non-trivial to destruct">;
def note_exits_cleanup : Note<
"jump exits scope of variable with __attribute__((cleanup))">;
@@ -5511,6 +5684,8 @@ def note_exits_block_captures_weak : Note<
def note_exits_block_captures_non_trivial_c_struct : Note<
"jump exits lifetime of block which captures a C struct that is non-trivial "
"to destroy">;
+def note_exits_compound_literal_scope : Note<
+ "jump exits lifetime of a compound literal that is non-trivial to destruct">;
def err_func_returning_qualified_void : ExtWarn<
"function cannot return qualified void type %0">,
@@ -5518,7 +5693,8 @@ def err_func_returning_qualified_void : ExtWarn<
def err_func_returning_array_function : Error<
"function cannot return %select{array|function}0 type %1">;
def err_field_declared_as_function : Error<"field %0 declared as a function">;
-def err_field_incomplete : Error<"field has incomplete type %0">;
+def err_field_incomplete_or_sizeless : Error<
+ "field has %select{incomplete|sizeless}0 type %1">;
def ext_variable_sized_type_in_struct : ExtWarn<
"field %0 with variable sized type %1 not at the end of a struct or class is"
" a GNU extension">, InGroup<GNUVariableSizedTypeNotAtEnd>;
@@ -5810,8 +5986,8 @@ def err_flexible_array_init_needs_braces : Error<
"flexible array requires brace-enclosed initializer">;
def err_illegal_decl_array_of_functions : Error<
"'%0' declared as array of functions of type %1">;
-def err_illegal_decl_array_incomplete_type : Error<
- "array has incomplete element type %0">;
+def err_array_incomplete_or_sizeless_type : Error<
+ "array has %select{incomplete|sizeless}0 element type %1">;
def err_illegal_message_expr_incomplete_type : Error<
"Objective-C message has incomplete result type %0">;
def err_illegal_decl_array_of_references : Error<
@@ -5842,30 +6018,24 @@ def err_block_return_missing_expr : Error<
"non-void block should return a value">;
def err_func_def_incomplete_result : Error<
"incomplete result type %0 in function definition">;
-def err_atomic_specifier_bad_type : Error<
- "_Atomic cannot be applied to "
- "%select{incomplete |array |function |reference |atomic |qualified |}0type "
- "%1 %select{||||||which is not trivially copyable}0">;
+def err_atomic_specifier_bad_type
+ : Error<"_Atomic cannot be applied to "
+ "%select{incomplete |array |function |reference |atomic |qualified "
+ "|sizeless ||integer |integer }0type "
+ "%1 %select{|||||||which is not trivially copyable|with less than "
+ "1 byte of precision|with a non power of 2 precision}0">;
// Expressions.
-def select_unary_expr_or_type_trait_kind : TextSubstitution<
- "%select{sizeof|alignof|vec_step|__builtin_omp_required_simd_align|"
- "__alignof}0">;
def ext_sizeof_alignof_function_type : Extension<
- "invalid application of '%sub{select_unary_expr_or_type_trait_kind}0' "
- "to a function type">, InGroup<PointerArith>;
+ "invalid application of '%0' to a function type">, InGroup<PointerArith>;
def ext_sizeof_alignof_void_type : Extension<
- "invalid application of '%sub{select_unary_expr_or_type_trait_kind}0' "
- "to a void type">, InGroup<PointerArith>;
+ "invalid application of '%0' to a void type">, InGroup<PointerArith>;
def err_opencl_sizeof_alignof_type : Error<
- "invalid application of '%sub{select_unary_expr_or_type_trait_kind}0' "
- "to a void type">;
-def err_sizeof_alignof_incomplete_type : Error<
- "invalid application of '%sub{select_unary_expr_or_type_trait_kind}0' "
- "to an incomplete type %1">;
+ "invalid application of '%0' to a void type">;
+def err_sizeof_alignof_incomplete_or_sizeless_type : Error<
+ "invalid application of '%0' to %select{an incomplete|sizeless}1 type %2">;
def err_sizeof_alignof_function_type : Error<
- "invalid application of '%sub{select_unary_expr_or_type_trait_kind}0' "
- "to a function type">;
+ "invalid application of '%0' to a function type">;
def err_openmp_default_simd_align_expr : Error<
"invalid application of '__builtin_omp_required_simd_align' to an expression, only type is allowed">;
def err_sizeof_alignof_typeof_bitfield : Error<
@@ -6041,8 +6211,8 @@ def err_typecheck_subscript_not_integer : Error<
"array subscript is not an integer">;
def err_subscript_function_type : Error<
"subscript of pointer to function type %0">;
-def err_subscript_incomplete_type : Error<
- "subscript of pointer to incomplete type %0">;
+def err_subscript_incomplete_or_sizeless_type : Error<
+ "subscript of pointer to %select{incomplete|sizeless}0 type %1">;
def err_dereference_incomplete_type : Error<
"dereference of pointer to incomplete type %0">;
def ext_gnu_subscript_void_type : Extension<
@@ -6152,8 +6322,8 @@ def err_typecheck_illegal_increment_decrement : Error<
"cannot %select{decrement|increment}1 value of type %0">;
def err_typecheck_expect_int : Error<
"used type %0 where integer is required">;
-def err_typecheck_arithmetic_incomplete_type : Error<
- "arithmetic on a pointer to an incomplete type %0">;
+def err_typecheck_arithmetic_incomplete_or_sizeless_type : Error<
+ "arithmetic on a pointer to %select{an incomplete|sizeless}0 type %1">;
def err_typecheck_pointer_arith_function_type : Error<
"arithmetic on%select{ a|}0 pointer%select{|s}0 to%select{ the|}2 "
"function type%select{|s}2 %1%select{| and %3}2">;
@@ -6187,10 +6357,10 @@ def note_array_init_plain_string_into_char8_t : Note<
def err_array_init_utf8_string_into_char : Error<
"%select{|ISO C++20 does not permit }0initialization of char array with "
"UTF-8 string literal%select{ is not permitted by '-fchar8_t'|}0">;
-def warn_cxx2a_compat_utf8_string : Warning<
+def warn_cxx20_compat_utf8_string : Warning<
"type of UTF-8 string literal will change from array of const char to "
"array of const char8_t in C++20">, InGroup<CXX20Compat>, DefaultIgnore;
-def note_cxx2a_compat_utf8_string_remove_u8 : Note<
+def note_cxx20_compat_utf8_string_remove_u8 : Note<
"remove 'u8' prefix to avoid a change of behavior; "
"Clang encodes unprefixed narrow string literals as UTF-8">;
def err_array_init_different_type : Error<
@@ -6227,7 +6397,7 @@ def err_typecheck_sclass_func : Error<"illegal storage class on function">;
def err_static_block_func : Error<
"function declared in block scope cannot have 'static' storage class">;
def err_typecheck_address_of : Error<"address of %select{bit-field"
- "|vector element|property expression|register variable}0 requested">;
+ "|vector element|property expression|register variable|matrix element}0 requested">;
def ext_typecheck_addrof_void : Extension<
"ISO C forbids taking the address of an expression of type 'void'">;
def err_unqualified_pointer_member_function : Error<
@@ -6293,6 +6463,12 @@ def err_typecheck_ordered_comparison_of_pointer_and_zero : Error<
"ordered comparison between pointer and zero (%0 and %1)">;
def err_typecheck_three_way_comparison_of_pointer_and_zero : Error<
"three-way comparison between pointer and zero">;
+def ext_typecheck_compare_complete_incomplete_pointers : Extension<
+ "pointer comparisons before C11 "
+ "need to be between two complete or two incomplete types; "
+ "%0 is %select{|in}2complete and "
+ "%1 is %select{|in}3complete">,
+ InGroup<C11>;
def ext_typecheck_ordered_comparison_of_function_pointers : ExtWarn<
"ordered comparison of function pointers (%0 and %1)">,
InGroup<DiagGroup<"ordered-compare-function-pointers">>;
@@ -6331,7 +6507,7 @@ def warn_arith_conv_enum_float : Warning<
"%plural{2:with|4:from|:and}0 "
"%select{enumeration|floating-point}1 type %3">,
InGroup<EnumFloatConversion>, DefaultIgnore;
-def warn_arith_conv_enum_float_cxx2a : Warning<
+def warn_arith_conv_enum_float_cxx20 : Warning<
"%sub{select_arith_conv_kind}0 "
"%select{floating-point|enumeration}1 type %2 "
"%plural{2:with|4:from|:and}0 "
@@ -6341,27 +6517,27 @@ def warn_arith_conv_mixed_enum_types : Warning<
"%sub{select_arith_conv_kind}0 "
"different enumeration types%diff{ ($ and $)|}1,2">,
InGroup<EnumEnumConversion>, DefaultIgnore;
-def warn_arith_conv_mixed_enum_types_cxx2a : Warning<
+def warn_arith_conv_mixed_enum_types_cxx20 : Warning<
"%sub{select_arith_conv_kind}0 "
"different enumeration types%diff{ ($ and $)|}1,2 is deprecated">,
InGroup<DeprecatedEnumEnumConversion>;
def warn_arith_conv_mixed_anon_enum_types : Warning<
warn_arith_conv_mixed_enum_types.Text>,
InGroup<AnonEnumEnumConversion>, DefaultIgnore;
-def warn_arith_conv_mixed_anon_enum_types_cxx2a : Warning<
- warn_arith_conv_mixed_enum_types_cxx2a.Text>,
+def warn_arith_conv_mixed_anon_enum_types_cxx20 : Warning<
+ warn_arith_conv_mixed_enum_types_cxx20.Text>,
InGroup<DeprecatedAnonEnumEnumConversion>;
def warn_conditional_mixed_enum_types : Warning<
warn_arith_conv_mixed_enum_types.Text>,
InGroup<EnumCompareConditional>, DefaultIgnore;
-def warn_conditional_mixed_enum_types_cxx2a : Warning<
- warn_arith_conv_mixed_enum_types_cxx2a.Text>,
+def warn_conditional_mixed_enum_types_cxx20 : Warning<
+ warn_arith_conv_mixed_enum_types_cxx20.Text>,
InGroup<DeprecatedEnumCompareConditional>;
def warn_comparison_mixed_enum_types : Warning<
warn_arith_conv_mixed_enum_types.Text>,
InGroup<EnumCompare>;
-def warn_comparison_mixed_enum_types_cxx2a : Warning<
- warn_arith_conv_mixed_enum_types_cxx2a.Text>,
+def warn_comparison_mixed_enum_types_cxx20 : Warning<
+ warn_arith_conv_mixed_enum_types_cxx20.Text>,
InGroup<DeprecatedEnumCompare>;
def warn_comparison_of_mixed_enum_types_switch : Warning<
"comparison of different enumeration types in switch statement"
@@ -6649,8 +6825,10 @@ def warn_objc_unsafe_perform_selector : Warning<
InGroup<DiagGroup<"objc-unsafe-perform-selector">>;
def note_objc_unsafe_perform_selector_method_declared_here : Note<
"method %0 that returns %1 declared here">;
-def err_attribute_arm_mve_alias : Error<
- "'__clang_arm_mve_alias' attribute can only be applied to an ARM MVE builtin">;
+def err_attribute_arm_builtin_alias : Error<
+ "'__clang_arm_builtin_alias' attribute can only be applied to an ARM builtin">;
+def err_attribute_arm_mve_polymorphism : Error<
+ "'__clang_arm_mve_strict_polymorphism' attribute can only be applied to an MVE/NEON vector type">;
def warn_setter_getter_impl_required : Warning<
"property %0 requires method %1 to be defined - "
@@ -6700,34 +6878,34 @@ def err_bad_cstyle_cast_overload : Error<
def err_bad_cxx_cast_generic : Error<
- "%select{const_cast|static_cast|reinterpret_cast|dynamic_cast|C-style cast|"
- "functional-style cast}0 from %1 to %2 is not allowed">;
+ "%select{const_cast|static_cast|reinterpret_cast|dynamic_cast|"
+ "C-style cast|functional-style cast|addrspace_cast}0 from %1 to %2 is not allowed">;
def err_bad_cxx_cast_unrelated_class : Error<
"%select{const_cast|static_cast|reinterpret_cast|dynamic_cast|C-style cast|"
- "functional-style cast}0 from %1 to %2, which are not related by "
+ "functional-style cast|}0 from %1 to %2, which are not related by "
"inheritance, is not allowed">;
def note_type_incomplete : Note<"%0 is incomplete">;
def err_bad_cxx_cast_rvalue : Error<
"%select{const_cast|static_cast|reinterpret_cast|dynamic_cast|C-style cast|"
- "functional-style cast}0 from rvalue to reference type %2">;
+ "functional-style cast|addrspace_cast}0 from rvalue to reference type %2">;
def err_bad_cxx_cast_bitfield : Error<
"%select{const_cast|static_cast|reinterpret_cast|dynamic_cast|C-style cast|"
- "functional-style cast}0 from bit-field lvalue to reference type %2">;
+ "functional-style cast|}0 from bit-field lvalue to reference type %2">;
def err_bad_cxx_cast_qualifiers_away : Error<
"%select{const_cast|static_cast|reinterpret_cast|dynamic_cast|C-style cast|"
- "functional-style cast}0 from %1 to %2 casts away qualifiers">;
+ "functional-style cast|}0 from %1 to %2 casts away qualifiers">;
def err_bad_cxx_cast_addr_space_mismatch : Error<
- "%select{const_cast|static_cast|reinterpret_cast|dynamic_cast|C-style cast|"
- "functional-style cast}0 from %1 to %2 converts between mismatching address"
+ "%select{const_cast|static_cast|reinterpret_cast|dynamic_cast|"
+ "C-style cast|functional-style cast|addrspace_cast}0 from %1 to %2 converts between mismatching address"
" spaces">;
def ext_bad_cxx_cast_qualifiers_away_incoherent : ExtWarn<
"ISO C++ does not allow "
"%select{const_cast|static_cast|reinterpret_cast|dynamic_cast|C-style cast|"
- "functional-style cast}0 from %1 to %2 because it casts away qualifiers, "
+ "functional-style cast|}0 from %1 to %2 because it casts away qualifiers, "
"even though the source and destination types are unrelated">,
SFINAEFailure, InGroup<DiagGroup<"cast-qual-unrelated">>;
def err_bad_const_cast_dest : Error<
- "%select{const_cast||||C-style cast|functional-style cast}0 to %2, "
+ "%select{const_cast||||C-style cast|functional-style cast|}0 to %2, "
"which is not a reference, pointer-to-object, or pointer-to-data-member">;
def ext_cast_fn_obj : Extension<
"cast between pointer-to-function and pointer-to-object is an extension">;
@@ -6740,13 +6918,13 @@ def warn_cxx98_compat_cast_fn_obj : Warning<
def err_bad_reinterpret_cast_small_int : Error<
"cast from pointer to smaller type %2 loses information">;
def err_bad_cxx_cast_vector_to_scalar_different_size : Error<
- "%select{||reinterpret_cast||C-style cast|}0 from vector %1 "
+ "%select{||reinterpret_cast||C-style cast||}0 from vector %1 "
"to scalar %2 of different size">;
def err_bad_cxx_cast_scalar_to_vector_different_size : Error<
- "%select{||reinterpret_cast||C-style cast|}0 from scalar %1 "
+ "%select{||reinterpret_cast||C-style cast||}0 from scalar %1 "
"to vector %2 of different size">;
def err_bad_cxx_cast_vector_to_vector_different_size : Error<
- "%select{||reinterpret_cast||C-style cast|}0 from vector %1 "
+ "%select{||reinterpret_cast||C-style cast||}0 from vector %1 "
"to vector %2 of different size">;
def warn_bad_cxx_cast_nested_pointer_addr_space : Warning<
"%select{reinterpret_cast|C-style cast}0 from %1 to %2 "
@@ -6763,7 +6941,7 @@ def err_bad_static_cast_pointer_nonpointer : Error<
def err_bad_static_cast_member_pointer_nonmp : Error<
"cannot cast from type %1 to member pointer type %2">;
def err_bad_cxx_cast_member_pointer_size : Error<
- "cannot %select{||reinterpret_cast||C-style cast|}0 from member pointer "
+ "cannot %select{||reinterpret_cast||C-style cast||}0 from member pointer "
"type %1 to member pointer type %2 of different size">;
def err_bad_reinterpret_cast_reference : Error<
"reinterpret_cast of a %0 to %1 needs its address, which is not allowed">;
@@ -6807,8 +6985,8 @@ def err_array_new_needs_size : Error<
"array size must be specified in new expression with no initializer">;
def err_bad_new_type : Error<
"cannot allocate %select{function|reference}1 type %0 with new">;
-def err_new_incomplete_type : Error<
- "allocation of incomplete type %0">;
+def err_new_incomplete_or_sizeless_type : Error<
+ "allocation of %select{incomplete|sizeless}0 type %1">;
def err_new_array_nonconst : Error<
"only the first dimension of an allocated array may have dynamic size">;
def err_new_array_size_unknown_from_init : Error<
@@ -6925,6 +7103,8 @@ def err_catch_incomplete_ptr : Error<
def err_catch_incomplete_ref : Error<
"cannot catch reference to incomplete type %0">;
def err_catch_incomplete : Error<"cannot catch incomplete type %0">;
+def err_catch_sizeless : Error<
+ "cannot catch %select{|reference to }0sizeless type %1">;
def err_catch_rvalue_ref : Error<"cannot catch exceptions by rvalue reference">;
def err_catch_variably_modified : Error<
"cannot catch variably modified type %0">;
@@ -7030,6 +7210,8 @@ def err_throw_incomplete : Error<
"cannot throw object of incomplete type %0">;
def err_throw_incomplete_ptr : Error<
"cannot throw pointer to object of incomplete type %0">;
+def err_throw_sizeless : Error<
+ "cannot throw object of sizeless type %0">;
def warn_throw_underaligned_obj : Warning<
"underaligned exception object thrown">,
InGroup<UnderalignedExceptionObject>;
@@ -7140,7 +7322,7 @@ let CategoryName = "Lambda Issue" in {
def warn_cxx17_compat_equals_this_lambda_capture : Warning<
"explicit capture of 'this' with a capture default of '=' is incompatible "
"with C++ standards before C++20">, InGroup<CXXPre20Compat>, DefaultIgnore;
- def ext_equals_this_lambda_capture_cxx2a : ExtWarn<
+ def ext_equals_this_lambda_capture_cxx20 : ExtWarn<
"explicit capture of 'this' with a capture default of '=' "
"is a C++20 extension">, InGroup<CXX20>;
def warn_deprecated_this_capture : Warning<
@@ -7302,6 +7484,21 @@ def warn_incompatible_qualified_id : Warning<
"sending type to parameter of incompatible type}0,1"
"|%diff{casting $ to incompatible type $|"
"casting type to incompatible type}0,1}2">;
+def err_incompatible_qualified_id : Error<
+ "%select{%diff{assigning to $ from incompatible type $|"
+ "assigning to type from incompatible type}0,1"
+ "|%diff{passing $ to parameter of incompatible type $|"
+ "passing type to parameter of incompatible type}0,1"
+ "|%diff{returning $ from a function with incompatible result type $|"
+ "returning type from a function with incompatible result type}0,1"
+ "|%diff{converting $ to incompatible type $|"
+ "converting type to incompatible type}0,1"
+ "|%diff{initializing $ with an expression of incompatible type $|"
+ "initializing type with an expression of incompatible type}0,1"
+ "|%diff{sending $ to parameter of incompatible type $|"
+ "sending type to parameter of incompatible type}0,1"
+ "|%diff{casting $ to incompatible type $|"
+ "casting type to incompatible type}0,1}2">;
def ext_typecheck_convert_pointer_int : ExtWarn<
"incompatible pointer to integer conversion "
"%select{%diff{assigning to $ from $|assigning to different types}0,1"
@@ -7320,6 +7517,23 @@ def ext_typecheck_convert_pointer_int : ExtWarn<
"; remove *|"
"; remove &}3">,
InGroup<IntConversion>;
+def err_typecheck_convert_pointer_int : Error<
+ "incompatible pointer to integer conversion "
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2"
+ "%select{|; dereference with *|"
+ "; take the address with &|"
+ "; remove *|"
+ "; remove &}3">;
def ext_typecheck_convert_int_pointer : ExtWarn<
"incompatible integer to pointer conversion "
"%select{%diff{assigning to $ from $|assigning to different types}0,1"
@@ -7338,6 +7552,23 @@ def ext_typecheck_convert_int_pointer : ExtWarn<
"; remove *|"
"; remove &}3">,
InGroup<IntConversion>, SFINAEFailure;
+def err_typecheck_convert_int_pointer : Error<
+ "incompatible integer to pointer conversion "
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2"
+ "%select{|; dereference with *|"
+ "; take the address with &|"
+ "; remove *|"
+ "; remove &}3">;
def ext_typecheck_convert_pointer_void_func : Extension<
"%select{%diff{assigning to $ from $|assigning to different types}0,1"
"|%diff{passing $ to parameter of type $|"
@@ -7351,6 +7582,19 @@ def ext_typecheck_convert_pointer_void_func : Extension<
"sending to parameter of different type}0,1"
"|%diff{casting $ to type $|casting between types}0,1}2"
" converts between void pointer and function pointer">;
+def err_typecheck_convert_pointer_void_func : Error<
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2"
+ " converts between void pointer and function pointer">;
def ext_typecheck_convert_incompatible_pointer_sign : ExtWarn<
"%select{%diff{assigning to $ from $|assigning to different types}0,1"
"|%diff{passing $ to parameter of type $|"
@@ -7365,6 +7609,19 @@ def ext_typecheck_convert_incompatible_pointer_sign : ExtWarn<
"|%diff{casting $ to type $|casting between types}0,1}2"
" converts between pointers to integer types with different sign">,
InGroup<DiagGroup<"pointer-sign">>;
+def err_typecheck_convert_incompatible_pointer_sign : Error<
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2"
+ " converts between pointers to integer types with different sign">;
def ext_typecheck_convert_incompatible_pointer : ExtWarn<
"incompatible pointer types "
"%select{%diff{assigning to $ from $|assigning to different types}0,1"
@@ -7383,6 +7640,23 @@ def ext_typecheck_convert_incompatible_pointer : ExtWarn<
"; remove *|"
"; remove &}3">,
InGroup<IncompatiblePointerTypes>;
+def err_typecheck_convert_incompatible_pointer : Error<
+ "incompatible pointer types "
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2"
+ "%select{|; dereference with *|"
+ "; take the address with &|"
+ "; remove *|"
+ "; remove &}3">;
def ext_typecheck_convert_incompatible_function_pointer : ExtWarn<
"incompatible function pointer types "
"%select{%diff{assigning to $ from $|assigning to different types}0,1"
@@ -7401,6 +7675,23 @@ def ext_typecheck_convert_incompatible_function_pointer : ExtWarn<
"; remove *|"
"; remove &}3">,
InGroup<IncompatibleFunctionPointerTypes>;
+def err_typecheck_convert_incompatible_function_pointer : Error<
+ "incompatible function pointer types "
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2"
+ "%select{|; dereference with *|"
+ "; take the address with &|"
+ "; remove *|"
+ "; remove &}3">;
def ext_typecheck_convert_discards_qualifiers : ExtWarn<
"%select{%diff{assigning to $ from $|assigning to different types}0,1"
"|%diff{passing $ to parameter of type $|"
@@ -7415,6 +7706,19 @@ def ext_typecheck_convert_discards_qualifiers : ExtWarn<
"|%diff{casting $ to type $|casting between types}0,1}2"
" discards qualifiers">,
InGroup<IncompatiblePointerTypesDiscardsQualifiers>;
+def err_typecheck_convert_discards_qualifiers : Error<
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2"
+ " discards qualifiers">;
def ext_nested_pointer_qualifier_mismatch : ExtWarn<
"%select{%diff{assigning to $ from $|assigning to different types}0,1"
"|%diff{passing $ to parameter of type $|"
@@ -7429,6 +7733,19 @@ def ext_nested_pointer_qualifier_mismatch : ExtWarn<
"|%diff{casting $ to type $|casting between types}0,1}2"
" discards qualifiers in nested pointer types">,
InGroup<IncompatiblePointerTypesDiscardsQualifiers>;
+def err_nested_pointer_qualifier_mismatch : Error<
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2"
+ " discards qualifiers in nested pointer types">;
def warn_incompatible_vectors : Warning<
"incompatible vector types "
"%select{%diff{assigning to $ from $|assigning to different types}0,1"
@@ -7443,6 +7760,19 @@ def warn_incompatible_vectors : Warning<
"sending to parameter of different type}0,1"
"|%diff{casting $ to type $|casting between types}0,1}2">,
InGroup<VectorConversion>, DefaultIgnore;
+def err_incompatible_vectors : Error<
+ "incompatible vector types "
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2">;
def err_int_to_block_pointer : Error<
"invalid block pointer conversion "
"%select{%diff{assigning to $ from $|assigning to different types}0,1"
@@ -7609,6 +7939,8 @@ def err_atomic_builtin_pointer_size : Error<
def err_atomic_exclusive_builtin_pointer_size : Error<
"address argument to load or store exclusive builtin must be a pointer to"
" 1,2,4 or 8 byte type (%0 invalid)">;
+def err_atomic_builtin_ext_int_size : Error<
+ "Atomic memory operand must have a power-of-two size">;
def err_atomic_op_needs_atomic : Error<
"address argument to atomic operation must be a pointer to _Atomic "
"type (%0 invalid)">;
@@ -7641,6 +7973,9 @@ def err_overflow_builtin_must_be_int : Error<
def err_overflow_builtin_must_be_ptr_int : Error<
"result argument to overflow builtin must be a pointer "
"to a non-const integer (%0 invalid)">;
+def err_overflow_builtin_ext_int_max_size : Error<
+ "__builtin_mul_overflow does not support signed _ExtInt operands of more "
+ "than %0 bits">;
def err_atomic_load_store_uses_lib : Error<
"atomic %select{load|store}0 requires runtime support that is not "
@@ -7673,6 +8008,10 @@ def err_ref_bad_target : Error<
def err_ref_bad_target_global_initializer : Error<
"reference to %select{__device__|__global__|__host__|__host__ __device__}0 "
"function %1 in global initializer">;
+def err_capture_bad_target : Error<
+ "capture host variable %0 by reference in device or host device lambda function">;
+def err_capture_bad_target_this_ptr : Error<
+ "capture host side class data member by this pointer in device or host device lambda function">;
def warn_kern_is_method : Extension<
"kernel function %0 is a member function; this may not be accepted by nvcc">,
InGroup<CudaCompat>;
@@ -7716,6 +8055,22 @@ def err_cuda_ovl_target : Error<
def note_cuda_ovl_candidate_target_mismatch : Note<
"candidate template ignored: target attributes do not match">;
+def err_cuda_device_builtin_surftex_cls_template : Error<
+ "illegal device builtin %select{surface|texture}0 reference "
+ "class template %1 declared here">;
+def note_cuda_device_builtin_surftex_cls_should_have_n_args : Note<
+ "%0 needs to have exactly %1 template parameters">;
+def note_cuda_device_builtin_surftex_cls_should_have_match_arg : Note<
+ "the %select{1st|2nd|3rd}1 template parameter of %0 needs to be "
+ "%select{a type|an integer or enum value}2">;
+
+def err_cuda_device_builtin_surftex_ref_decl : Error<
+ "illegal device builtin %select{surface|texture}0 reference "
+ "type %1 declared here">;
+def note_cuda_device_builtin_surftex_should_be_template_class : Note<
+ "%0 needs to be instantiated from a class template with proper "
+ "template arguments">;
+
def warn_non_pod_vararg_with_format_string : Warning<
"cannot pass %select{non-POD|non-trivial}0 object of type %1 to variadic "
"%select{function|block|method|constructor}2; expected type from format "
@@ -7797,6 +8152,8 @@ def warn_bad_function_cast : Warning<
InGroup<BadFunctionCast>, DefaultIgnore;
def err_cast_pointer_to_non_pointer_int : Error<
"pointer cannot be cast to type %0">;
+def err_cast_to_bfloat16 : Error<"cannot type-cast to __bf16">;
+def err_cast_from_bfloat16 : Error<"cannot type-cast from __bf16">;
def err_typecheck_expect_scalar_operand : Error<
"operand of type %0 where arithmetic or pointer type is required">;
def err_typecheck_cond_incompatible_operands : Error<
@@ -7852,7 +8209,7 @@ def ext_cxx14_attr : Extension<
"use of the %0 attribute is a C++14 extension">, InGroup<CXX14>;
def ext_cxx17_attr : Extension<
"use of the %0 attribute is a C++17 extension">, InGroup<CXX17>;
-def ext_cxx2a_attr : Extension<
+def ext_cxx20_attr : Extension<
"use of the %0 attribute is a C++20 extension">, InGroup<CXX20>;
def warn_unused_comparison : Warning<
@@ -8106,7 +8463,7 @@ def err_reference_to_local_in_enclosing_context : Error<
"%select{%3|block literal|lambda expression|context}2">;
def err_static_data_member_not_allowed_in_local_class : Error<
- "static data member %0 not allowed in local class %1">;
+ "static data member %0 not allowed in local %sub{select_tag_type_kind}2 %1">;
// C++ derived classes
def err_base_clause_on_union : Error<"unions cannot have base classes">;
@@ -8262,11 +8619,14 @@ def err_conv_function_with_complex_decl : Error<
def err_conv_function_redeclared : Error<
"conversion function cannot be redeclared">;
def warn_conv_to_self_not_used : Warning<
- "conversion function converting %0 to itself will never be used">;
+ "conversion function converting %0 to itself will never be used">,
+ InGroup<ClassConversion>;
def warn_conv_to_base_not_used : Warning<
- "conversion function converting %0 to its base class %1 will never be used">;
+ "conversion function converting %0 to its base class %1 will never be used">,
+ InGroup<ClassConversion>;
def warn_conv_to_void_not_used : Warning<
- "conversion function converting %0 to %1 will never be used">;
+ "conversion function converting %0 to %1 will never be used">,
+ InGroup<ClassConversion>;
def warn_not_compound_assign : Warning<
"use of unary operator that may be intended as compound assignment (%0=)">;
@@ -8914,6 +9274,8 @@ def err_block_on_nonlocal : Error<
"__block attribute not allowed, only allowed on local variables">;
def err_block_on_vm : Error<
"__block attribute not allowed on declaration with a variably modified type">;
+def err_sizeless_nonlocal : Error<
+ "non-local variable with sizeless type %0">;
def err_vec_builtin_non_vector : Error<
"first two arguments to %0 must be vectors">;
@@ -8965,6 +9327,10 @@ def err_argument_not_shifted_byte : Error<
"argument should be an 8-bit value shifted by a multiple of 8 bits">;
def err_argument_not_shifted_byte_or_xxff : Error<
"argument should be an 8-bit value shifted by a multiple of 8 bits, or in the form 0x??FF">;
+def err_rotation_argument_to_cadd
+ : Error<"argument should be the value 90 or 270">;
+def err_rotation_argument_to_cmla
+ : Error<"argument should be the value 0, 90, 180 or 270">;
def warn_neon_vector_initializer_non_portable : Warning<
"vector initializers are not compatible with NEON intrinsics in big endian "
"mode">, InGroup<DiagGroup<"nonportable-vector-initialization">>;
@@ -8995,12 +9361,8 @@ def err_x86_builtin_invalid_rounding : Error<
"invalid rounding argument">;
def err_x86_builtin_invalid_scale : Error<
"scale argument must be 1, 2, 4, or 8">;
-def err_hexagon_builtin_unsupported_cpu : Error<
- "builtin is not supported on this CPU">;
-def err_hexagon_builtin_requires_hvx : Error<
- "builtin requires HVX">;
-def err_hexagon_builtin_unsupported_hvx : Error<
- "builtin is not supported on this version of HVX">;
+def err_x86_builtin_tile_arg_duplicate : Error<
+ "tile arguments must refer to different tiles">;
def err_builtin_target_unsupported : Error<
"builtin is not supported on this target">;
@@ -9418,7 +9780,7 @@ def err_omp_expected_var_name_member_expr : Error<
def err_omp_expected_var_name_member_expr_or_array_item : Error<
"expected variable name%select{|, data member of current class}0, array element or array section">;
def err_omp_expected_addressable_lvalue_or_array_item : Error<
- "expected addressable lvalue expression, array element or array section">;
+ "expected addressable lvalue expression, array element%select{ or array section|, array section or array shaping expression}0%select{| of non 'omp_depend_t' type}1">;
def err_omp_expected_named_var_member_or_array_expression: Error<
"expected expression containing only member accesses and/or array sections based on named variables">;
def err_omp_bit_fields_forbidden_in_clause : Error<
@@ -9493,6 +9855,12 @@ def note_omp_conversion_here : Note<
def err_omp_ambiguous_conversion : Error<
"ambiguous conversion from type %0 to an integral or unscoped "
"enumeration type">;
+def err_omp_iterator_not_integral_or_pointer : Error<
+ "expected integral or pointer type as the iterator-type, not %0">;
+def err_omp_iterator_step_not_integral : Error<
+ "iterator step expression %0 is not the integral expression">;
+def err_omp_iterator_step_constant_zero : Error<
+ "iterator step expression %0 evaluates to 0">;
def err_omp_required_access : Error<
"%0 variable must be %1">;
def err_omp_const_variable : Error<
@@ -9581,8 +9949,6 @@ def err_omp_reduction_in_task : Error<
"reduction variables may not be accessed in an explicit task">;
def err_omp_reduction_id_not_compatible : Error<
"list item of type %0 is not valid for specified reduction operation: unable to provide default initialization value">;
-def err_omp_in_reduction_not_task_reduction : Error<
- "in_reduction variable must appear in a task_reduction clause">;
def err_omp_reduction_identifier_mismatch : Error<
"in_reduction variable must have the same reduction operation as in a task_reduction clause">;
def note_omp_previous_reduction_identifier : Note<
@@ -9592,15 +9958,20 @@ def err_omp_prohibited_region : Error<
"%select{|; perhaps you forget to enclose 'omp %3' directive into a parallel region?|"
"; perhaps you forget to enclose 'omp %3' directive into a for or a parallel for region with 'ordered' clause?|"
"; perhaps you forget to enclose 'omp %3' directive into a target region?|"
- "; perhaps you forget to enclose 'omp %3' directive into a teams region?}2">;
+ "; perhaps you forget to enclose 'omp %3' directive into a teams region?|"
+ "; perhaps you forget to enclose 'omp %3' directive into a for, simd, for simd, parallel for, or parallel for simd region?}2">;
def err_omp_prohibited_region_simd : Error<
- "OpenMP constructs may not be nested inside a simd region%select{| except for ordered simd, simd or atomic directive}0">;
+ "OpenMP constructs may not be nested inside a simd region%select{| except for ordered simd, simd, scan, or atomic directive}0">;
def err_omp_prohibited_region_atomic : Error<
"OpenMP constructs may not be nested inside an atomic region">;
def err_omp_prohibited_region_critical_same_name : Error<
"cannot nest 'critical' regions having the same name %0">;
def note_omp_previous_critical_region : Note<
"previous 'critical' region starts here">;
+def err_omp_several_directives_in_region : Error<
+ "exactly one '%0' directive must appear in the loop body of an enclosing directive">;
+def note_omp_previous_directive : Note<
+ "previous '%0' directive used here">;
def err_omp_sections_not_compound_stmt : Error<
"the statement for '#pragma omp sections' must be a compound statement">;
def err_omp_parallel_sections_not_compound_stmt : Error<
@@ -9644,7 +10015,11 @@ def note_omp_atomic_capture: Note<
"%select{expected assignment expression|expected compound statement|expected exactly two expression statements|expected in right hand side of the first expression}0">;
def err_omp_atomic_several_clauses : Error<
"directive '#pragma omp atomic' cannot contain more than one 'read', 'write', 'update' or 'capture' clause">;
-def note_omp_atomic_previous_clause : Note<
+def err_omp_several_mem_order_clauses : Error<
+ "directive '#pragma omp %0' cannot contain more than one %select{'seq_cst', 'relaxed', |}1'acq_rel', 'acquire' or 'release' clause">;
+def err_omp_atomic_incompatible_mem_order_clause : Error<
+ "directive '#pragma omp atomic%select{ %0|}1' cannot be used with '%2' clause">;
+def note_omp_previous_mem_order_clause : Note<
"'%0' clause used here">;
def err_omp_target_contains_not_only_teams : Error<
"target construct with nested teams region contains statements outside of the teams construct">;
@@ -9678,10 +10053,16 @@ def err_omp_declare_mapper_redefinition : Error<
def err_omp_invalid_mapper: Error<
"cannot find a valid user-defined mapper for type %0 with name %1">;
def err_omp_array_section_use : Error<"OpenMP array section is not allowed here">;
+def err_omp_array_shaping_use : Error<"OpenMP array shaping operation is not allowed here">;
+def err_omp_iterator_use : Error<"OpenMP iterator is not allowed here">;
def err_omp_typecheck_section_value : Error<
"subscripted value is not an array or pointer">;
def err_omp_typecheck_section_not_integer : Error<
"array section %select{lower bound|length}0 is not an integer">;
+def err_omp_typecheck_shaping_not_integer : Error<
+ "array shaping operation dimension is not an integer">;
+def err_omp_shaping_dimension_not_positive : Error<
+ "array shaping dimension is evaluated to a non-positive value %0">;
def err_omp_section_function_type : Error<
"section of pointer to function type %0">;
def warn_omp_section_is_char : Warning<"array section %select{lower bound|length}0 is of type 'char'">,
@@ -9692,6 +10073,8 @@ def err_omp_section_not_subset_of_array : Error<
"array section must be a subset of the original array">;
def err_omp_section_length_negative : Error<
"section length is evaluated to a negative value %0">;
+def err_omp_section_stride_non_positive : Error<
+ "section stride is evaluated to a non-positive value %0">;
def err_omp_section_length_undefined : Error<
"section length is unspecified and cannot be inferred because subscripted value is %select{not an array|an array of unknown bound}0">;
def err_omp_wrong_linear_modifier : Error<
@@ -9713,7 +10096,7 @@ def err_omp_ordered_directive_with_param : Error<
def err_omp_ordered_directive_without_param : Error<
"'ordered' directive with 'depend' clause cannot be closely nested inside ordered region without specified parameter">;
def note_omp_ordered_param : Note<
- "'ordered' clause with specified parameter">;
+ "'ordered' clause%select{| with specified parameter}0">;
def err_omp_expected_base_var_name : Error<
"expected variable name as a base of the array %select{subscript|section}0">;
def err_omp_map_shared_storage : Error<
@@ -9728,9 +10111,9 @@ def err_omp_wrong_ordered_loop_count : Error<
"the parameter of the 'ordered' clause must be greater than or equal to the parameter of the 'collapse' clause">;
def note_collapse_loop_count : Note<
"parameter of the 'collapse' clause">;
-def err_omp_grainsize_num_tasks_mutually_exclusive : Error<
+def err_omp_clauses_mutually_exclusive : Error<
"'%0' and '%1' clause are mutually exclusive and may not appear on the same directive">;
-def note_omp_previous_grainsize_num_tasks : Note<
+def note_omp_previous_clause : Note<
"'%0' clause is specified here">;
def err_omp_hint_clause_no_name : Error<
"the name of the construct must be specified in presence of 'hint' clause">;
@@ -9752,14 +10135,18 @@ def err_omp_depend_sink_source_not_allowed : Error<
"'depend(%select{source|sink:vec}0)' clause%select{|s}0 cannot be mixed with 'depend(%select{sink:vec|source}0)' clause%select{s|}0">;
def err_omp_depend_zero_length_array_section_not_allowed : Error<
"zero-length array section is not allowed in 'depend' clause">;
+def err_omp_depend_sink_source_with_modifier : Error<
+ "depend modifier cannot be used with 'sink' or 'source' depend type">;
+def err_omp_depend_modifier_not_iterator : Error<
+ "expected iterator specification as depend modifier">;
def err_omp_linear_ordered : Error<
"'linear' clause cannot be specified along with 'ordered' clause with a parameter">;
def err_omp_unexpected_schedule_modifier : Error<
"modifier '%0' cannot be used along with modifier '%1'">;
def err_omp_schedule_nonmonotonic_static : Error<
"'nonmonotonic' modifier can only be specified with 'dynamic' or 'guided' schedule kind">;
-def err_omp_schedule_nonmonotonic_ordered : Error<
- "'schedule' clause with 'nonmonotonic' modifier cannot be specified if an 'ordered' clause is specified">;
+def err_omp_simple_clause_incompatible_with_ordered : Error<
+ "'%0' clause with '%1' modifier cannot be specified if an 'ordered' clause is specified">;
def err_omp_ordered_simd : Error<
"'ordered' clause with a parameter can not be specified in '#pragma omp %0' directive">;
def err_omp_variable_in_given_clause_and_dsa : Error<
@@ -9781,7 +10168,8 @@ def warn_omp_nesting_simd : Warning<
InGroup<SourceUsesOpenMP>;
def err_omp_orphaned_device_directive : Error<
"orphaned 'omp %0' directives are prohibited"
- "; perhaps you forget to enclose the directive into a %select{|||target |teams }1region?">;
+ "; perhaps you forget to enclose the directive into a "
+ "%select{|||target |teams|for, simd, for simd, parallel for, or parallel for simd }1region?">;
def err_omp_reduction_non_addressable_expression : Error<
"expected addressable reduction item for the task-based directives">;
def err_omp_reduction_with_nogroup : Error<
@@ -9797,10 +10185,10 @@ def err_omp_requires_clause_redeclaration : Error <
"Only one %0 clause can appear on a requires directive in a single translation unit">;
def note_omp_requires_previous_clause : Note <
"%0 clause previously used here">;
-def err_omp_target_before_requires : Error <
- "target region encountered before requires directive with '%0' clause">;
-def note_omp_requires_encountered_target : Note <
- "target previously encountered here">;
+def err_omp_directive_before_requires : Error <
+ "'%0' region encountered before requires directive with '%1' clause">;
+def note_omp_requires_encountered_directive : Note <
+ "'%0' previously encountered here">;
def err_omp_invalid_scope : Error <
"'#pragma omp %0' directive must appear only in file scope">;
def note_omp_invalid_length_on_this_ptr_mapping : Note <
@@ -9811,8 +10199,29 @@ def note_omp_invalid_subscript_on_this_ptr_map : Note <
"expected 'this' subscript expression on map clause to be 'this[0]'">;
def err_omp_invalid_map_this_expr : Error <
"invalid 'this' expression on 'map' clause">;
-def err_implied_omp_allocator_handle_t_not_found : Error<
- "omp_allocator_handle_t type not found; include <omp.h>">;
+def err_omp_implied_type_not_found : Error<
+ "'%0' type not found; include <omp.h>">;
+def err_omp_expected_omp_depend_t_lvalue : Error<
+ "expected lvalue expression%select{ of 'omp_depend_t' type, not %1|}0">;
+def err_omp_depobj_expected : Error<
+ "expected depobj expression">;
+def err_omp_depobj_single_clause_expected : Error<
+ "exactly one of 'depend', 'destroy', or 'update' clauses is expected">;
+def err_omp_scan_single_clause_expected : Error<
+ "exactly one of 'inclusive' or 'exclusive' clauses is expected">;
+def err_omp_inclusive_exclusive_not_reduction : Error<
+ "the list item must appear in 'reduction' clause with the 'inscan' modifier "
+ "of the parent directive">;
+def err_omp_reduction_not_inclusive_exclusive : Error<
+ "the inscan reduction list item must appear as a list item in an 'inclusive' or"
+ " 'exclusive' clause on an inner 'omp scan' directive">;
+def err_omp_wrong_inscan_reduction : Error<
+ "'inscan' modifier can be used only in 'omp for', 'omp simd', 'omp for simd',"
+ " 'omp parallel for', or 'omp parallel for simd' directive">;
+def err_omp_inscan_reduction_expected : Error<
+ "expected 'reduction' clause with the 'inscan' modifier">;
+def note_omp_previous_inscan_reduction : Note<
+ "'reduction' clause with 'inscan' modifier is used here">;
def err_omp_expected_predefined_allocator : Error<
"expected one of the predefined allocators for the variables with the static "
"storage: 'omp_default_mem_alloc', 'omp_large_cap_mem_alloc', "
@@ -9841,8 +10250,8 @@ def err_omp_invariant_or_linear_dependency : Error<
"expected loop invariant expression or '<invariant1> * %0 + <invariant2>' kind of expression">;
def err_omp_wrong_dependency_iterator_type : Error<
"expected an integer or a pointer type of the outer loop counter '%0' for non-rectangular nests">;
-def err_omp_unsupported_type : Error <
- "host requires %0 bit size %1 type support, but device '%2' does not support it">;
+def err_device_unsupported_type : Error <
+ "%0 requires %1 bit size %2 type support, but device '%3' does not support it">;
def err_omp_lambda_capture_in_declare_target_not_to : Error<
"variable captured in declare target region must appear in a to clause">;
def err_omp_device_type_mismatch : Error<
@@ -9855,6 +10264,13 @@ def warn_omp_declare_target_after_first_use : Warning<
InGroup<OpenMPTarget>;
def err_omp_declare_variant_incompat_attributes : Error<
"'#pragma omp declare variant' is not compatible with any target-specific attributes">;
+def warn_omp_declare_variant_score_not_constant
+ : Warning<"score expressions in the OpenMP context selector need to be "
+ "constant; %0 is not and will be ignored">,
+ InGroup<SourceUsesOpenMP>;
+def err_omp_declare_variant_user_condition_not_constant
+ : Error<"the user condition in the OpenMP context selector needs to be "
+ "constant; %0 is not">;
def warn_omp_declare_variant_after_used : Warning<
"'#pragma omp declare variant' cannot be applied for function after first "
"usage; the original function might be used">, InGroup<SourceUsesOpenMP>;
@@ -9882,6 +10298,37 @@ def err_omp_one_defaultmap_each_category: Error<
def err_omp_lastprivate_conditional_non_scalar : Error<
"expected list item of scalar type in 'lastprivate' clause with 'conditional' modifier"
>;
+def err_omp_flush_order_clause_and_list : Error<
+ "'flush' directive with memory order clause '%0' cannot have the list">;
+def note_omp_flush_order_clause_here : Note<
+ "memory order clause '%0' is specified here">;
+def err_omp_non_lvalue_in_map_or_motion_clauses: Error<
+ "expected addressable lvalue in '%0' clause">;
+def err_omp_var_expected : Error<
+ "expected variable of the '%0' type%select{|, not %2}1">;
+def warn_nested_declare_variant
+ : Warning<"nesting `omp begin/end declare variant` is not supported yet; "
+ "nested context ignored">,
+ InGroup<SourceUsesOpenMP>;
+def err_omp_non_pointer_type_array_shaping_base : Error<
+ "expected expression with a pointer to a complete type as a base of an array "
+ "shaping operation">;
+def err_omp_reduction_task_not_parallel_or_worksharing : Error<
+ "'reduction' clause with 'task' modifier allowed only on non-simd parallel or"
+ " worksharing constructs">;
+def err_omp_expected_array_alloctraits : Error<
+ "expected constant sized array of 'omp_alloctrait_t' elements, not %0">;
+def err_omp_predefined_allocator_with_traits : Error<
+ "predefined allocator cannot have traits specified">;
+def note_omp_predefined_allocator : Note<
+ "predefined trait '%0' used here">;
+def err_omp_nonpredefined_allocator_without_traits : Error<
+ "non-predefined allocator must have traits specified">;
+def err_omp_allocator_used_in_clauses : Error<
+ "allocators used in 'uses_allocators' clause cannot appear in other "
+ "data-sharing or data-mapping attribute clauses">;
+def err_omp_allocator_not_in_uses_allocators : Error<
+ "allocator must be specified in the 'uses_allocators' clause">;
} // end of OpenMP category
let CategoryName = "Related Result Type Issue" in {
@@ -9950,11 +10397,6 @@ def err_module_unimported_use : Error<
"explicit specialization|partial specialization}0 of %1 must be imported "
"from module '%2' before it is required">;
def err_module_unimported_use_header : Error<
- "missing '#include %3'; "
- "%select{declaration|definition|default argument|"
- "explicit specialization|partial specialization}0 of %1 must be imported "
- "from module '%2' before it is required">;
-def err_module_unimported_use_global_module_fragment : Error<
"%select{missing '#include'|missing '#include %3'}2; "
"%select{||default argument of |explicit specialization of |"
"partial specialization of }0%1 must be "
@@ -9964,6 +10406,10 @@ def err_module_unimported_use_multiple : Error<
"%select{declaration|definition|default argument|"
"explicit specialization|partial specialization}0 of %1 must be imported "
"from one of the following modules before it is required:%2">;
+def note_unreachable_entity : Note<
+ "%select{declaration|definition|default argument declared|"
+ "explicit specialization declared|partial specialization declared}0 here "
+ "is not %select{visible|reachable|reachable|reachable|reachable|reachable}0">;
def ext_module_import_in_extern_c : ExtWarn<
"import of C++ module '%0' appears within extern \"C\" language linkage "
"specification">, DefaultError,
@@ -10104,7 +10550,16 @@ def err_await_suspend_invalid_return_type : Error<
def note_await_ready_no_bool_conversion : Note<
"return type of 'await_ready' is required to be contextually convertible to 'bool'"
>;
-}
+def warn_coroutine_handle_address_invalid_return_type : Warning <
+ "return type of 'coroutine_handle<>::address should be 'void*' (have %0) in order to get capability with existing async C API.">,
+ InGroup<Coroutine>;
+def err_coroutine_promise_final_suspend_requires_nothrow : Error<
+ "the expression 'co_await __promise.final_suspend()' is required to be non-throwing"
+>;
+def note_coroutine_function_declare_noexcept : Note<
+ "must be declared with 'noexcept'"
+>;
+} // end of coroutines issue category
let CategoryName = "Documentation Issue" in {
def warn_not_a_doxygen_trailing_member_comment : Warning<
@@ -10349,10 +10804,46 @@ def err_builtin_launder_invalid_arg : Error<
"%select{non-pointer|function pointer|void pointer}0 argument to "
"'__builtin_launder' is not allowed">;
+def err_builtin_matrix_disabled: Error<
+ "matrix types extension is disabled. Pass -fenable-matrix to enable it">;
+def err_matrix_index_not_integer: Error<
+ "matrix %select{row|column}0 index is not an integer">;
+def err_matrix_index_outside_range: Error<
+ "matrix %select{row|column}0 index is outside the allowed range [0, %1)">;
+def err_matrix_incomplete_index: Error<
+ "single subscript expressions are not allowed for matrix values">;
+def err_matrix_separate_incomplete_index: Error<
+ "matrix row and column subscripts cannot be separated by any expression">;
+def err_matrix_subscript_comma: Error<
+ "comma expressions are not allowed as indices in matrix subscript expressions">;
+def err_builtin_matrix_arg: Error<"1st argument must be a matrix">;
+def err_builtin_matrix_scalar_unsigned_arg: Error<
+ "%0 argument must be a constant unsigned integer expression">;
+def err_builtin_matrix_pointer_arg: Error<
+ "%ordinal0 argument must be a pointer to a valid matrix element type">;
+def err_builtin_matrix_pointer_arg_mismatch: Error<
+ "the pointee of the 2nd argument must match the element type of the 1st argument (%0 != %1)">;
+def err_builtin_matrix_store_to_const: Error<
+ "cannot store matrix to read-only pointer">;
+def err_builtin_matrix_stride_too_small: Error<
+ "stride must be greater or equal to the number of rows">;
+def err_builtin_matrix_invalid_dimension: Error<
+ "%0 dimension is outside the allowed range [1, %1]">;
+
+def warn_mismatched_import : Warning<
+ "import %select{module|name}0 (%1) does not match the import %select{module|name}0 (%2) of the "
+ "previous declaration">,
+ InGroup<IgnoredAttributes>;
+def warn_import_on_definition : Warning<
+ "import %select{module|name}0 cannot be applied to a function with a definition">,
+ InGroup<IgnoredAttributes>;
+
def err_preserve_field_info_not_field : Error<
"__builtin_preserve_field_info argument %0 not a field access">;
def err_preserve_field_info_not_const: Error<
"__builtin_preserve_field_info argument %0 not a constant">;
+def err_btf_type_id_not_const: Error<
+ "__builtin_btf_type_id argument %0 not a constant">;
def err_bit_cast_non_trivially_copyable : Error<
"__builtin_bit_cast %select{source|destination}0 type must be trivially copyable">;
@@ -10373,4 +10864,16 @@ def warn_sycl_kernel_return_type : Warning<
"function template with 'sycl_kernel' attribute must have a 'void' return type">,
InGroup<IgnoredAttributes>;
+def err_ext_int_bad_size : Error<"%select{signed|unsigned}0 _ExtInt must "
+ "have a bit size of at least %select{2|1}0">;
+def err_ext_int_max_size : Error<"%select{signed|unsigned}0 _ExtInt of bit "
+ "sizes greater than %1 not supported">;
+
+// errors of expect.with.probability
+def err_probability_not_constant_float : Error<
+ "probability argument to __builtin_expect_with_probability must be constant "
+ "floating-point expression">;
+def err_probability_out_of_range : Error<
+ "probability argument to __builtin_expect_with_probability is outside the "
+ "range [0.0, 1.0]">;
} // end of sema component.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/ExpressionTraits.h b/contrib/llvm-project/clang/include/clang/Basic/ExpressionTraits.h
index 85005330a0af..b38ebd9ac60b 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/ExpressionTraits.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/ExpressionTraits.h
@@ -14,12 +14,24 @@
#ifndef LLVM_CLANG_BASIC_EXPRESSIONTRAITS_H
#define LLVM_CLANG_BASIC_EXPRESSIONTRAITS_H
+#include "llvm/Support/Compiler.h"
+
namespace clang {
- enum ExpressionTrait {
- ET_IsLValueExpr,
- ET_IsRValueExpr
- };
-}
+enum ExpressionTrait {
+#define EXPRESSION_TRAIT(Spelling, Name, Key) ET_##Name,
+#include "clang/Basic/TokenKinds.def"
+ ET_Last = -1 // ET_Last == last ET_XX in the enum.
+#define EXPRESSION_TRAIT(Spelling, Name, Key) +1
+#include "clang/Basic/TokenKinds.def"
+};
+
+/// Return the internal name of type trait \p T. Never null.
+const char *getTraitName(ExpressionTrait T) LLVM_READONLY;
+
+/// Return the spelling of the type trait \p TT. Never null.
+const char *getTraitSpelling(ExpressionTrait T) LLVM_READONLY;
+
+} // namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Basic/FPOptions.def b/contrib/llvm-project/clang/include/clang/Basic/FPOptions.def
new file mode 100644
index 000000000000..6b6789b8ecc8
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/FPOptions.def
@@ -0,0 +1,26 @@
+//===--- FPOptions.def - Floating Point Options database --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// This file defines the Floating Point language options. Users of this file
+// must define the FPOPT macro to make use of this information.
+#ifndef OPTION
+# error Define the OPTION macro to handle floating point language options
+#endif
+
+// OPTION(name, type, width, previousName)
+OPTION(FPContractMode, LangOptions::FPModeKind, 2, First)
+OPTION(RoundingMode, RoundingMode, 3, FPContractMode)
+OPTION(FPExceptionMode, LangOptions::FPExceptionModeKind, 2, RoundingMode)
+OPTION(AllowFEnvAccess, bool, 1, FPExceptionMode)
+OPTION(AllowFPReassociate, bool, 1, AllowFEnvAccess)
+OPTION(NoHonorNaNs, bool, 1, AllowFPReassociate)
+OPTION(NoHonorInfs, bool, 1, NoHonorNaNs)
+OPTION(NoSignedZero, bool, 1, NoHonorInfs)
+OPTION(AllowReciprocal, bool, 1, NoSignedZero)
+OPTION(AllowApproxFunc, bool, 1, AllowReciprocal)
+#undef OPTION
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Features.def b/contrib/llvm-project/clang/include/clang/Basic/Features.def
index 28eb694ba9a8..999bcb7e2e29 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Features.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/Features.def
@@ -36,6 +36,7 @@
#define EXTENSION(Name, Predicate)
#endif
+FEATURE(speculative_load_hardening, LangOpts.SpeculativeLoadHardening)
FEATURE(address_sanitizer,
LangOpts.Sanitize.hasOneOf(SanitizerKind::Address |
SanitizerKind::KernelAddress))
@@ -252,6 +253,8 @@ EXTENSION(overloadable_unmarked, true)
EXTENSION(pragma_clang_attribute_namespaces, true)
EXTENSION(pragma_clang_attribute_external_declaration, true)
EXTENSION(gnu_asm, LangOpts.GNUAsm)
+EXTENSION(gnu_asm_goto_with_outputs, LangOpts.GNUAsm)
+EXTENSION(matrix_types, LangOpts.MatrixTypes)
#undef EXTENSION
#undef FEATURE
diff --git a/contrib/llvm-project/clang/include/clang/Basic/FileManager.h b/contrib/llvm-project/clang/include/clang/Basic/FileManager.h
index fed43786d410..089304e1d1e6 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/FileManager.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/FileManager.h
@@ -18,6 +18,7 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
@@ -378,15 +379,19 @@ public:
/// Open the specified file as a MemoryBuffer, returning a new
/// MemoryBuffer if successful, otherwise returning null.
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
- getBufferForFile(const FileEntry *Entry, bool isVolatile = false);
+ getBufferForFile(const FileEntry *Entry, bool isVolatile = false,
+ bool RequiresNullTerminator = true);
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
- getBufferForFile(StringRef Filename, bool isVolatile = false) {
- return getBufferForFileImpl(Filename, /*FileSize=*/-1, isVolatile);
+ getBufferForFile(StringRef Filename, bool isVolatile = false,
+ bool RequiresNullTerminator = true) {
+ return getBufferForFileImpl(Filename, /*FileSize=*/-1, isVolatile,
+ RequiresNullTerminator);
}
private:
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
- getBufferForFileImpl(StringRef Filename, int64_t FileSize, bool isVolatile);
+ getBufferForFileImpl(StringRef Filename, int64_t FileSize, bool isVolatile,
+ bool RequiresNullTerminator);
public:
/// Get the 'stat' information for the given \p Path.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/FixedPoint.h b/contrib/llvm-project/clang/include/clang/Basic/FixedPoint.h
index a931e21e18f1..0d181f30907f 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/FixedPoint.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/FixedPoint.h
@@ -28,8 +28,8 @@ class QualType;
/// The fixed point semantics work similarly to llvm::fltSemantics. The width
/// specifies the whole bit width of the underlying scaled integer (with padding
/// if any). The scale represents the number of fractional bits in this type.
-/// When HasUnsignedPadding is true and this type is signed, the first bit
-/// in the value this represents is treaded as padding.
+/// When HasUnsignedPadding is true and this type is unsigned, the first bit
+/// in the value this represents is treated as padding.
class FixedPointSemantics {
public:
FixedPointSemantics(unsigned Width, unsigned Scale, bool IsSigned,
@@ -75,11 +75,11 @@ public:
}
private:
- unsigned Width;
- unsigned Scale;
- bool IsSigned;
- bool IsSaturated;
- bool HasUnsignedPadding;
+ unsigned Width : 16;
+ unsigned Scale : 13;
+ unsigned IsSigned : 1;
+ unsigned IsSaturated : 1;
+ unsigned HasUnsignedPadding : 1;
};
/// The APFixedPoint class works similarly to APInt/APSInt in that it is a
@@ -93,49 +93,52 @@ private:
/// point types and should eventually be moved to LLVM if fixed point types gain
/// native IR support.
class APFixedPoint {
- public:
- APFixedPoint(const llvm::APInt &Val, const FixedPointSemantics &Sema)
- : Val(Val, !Sema.isSigned()), Sema(Sema) {
- assert(Val.getBitWidth() == Sema.getWidth() &&
- "The value should have a bit width that matches the Sema width");
- }
-
- APFixedPoint(uint64_t Val, const FixedPointSemantics &Sema)
- : APFixedPoint(llvm::APInt(Sema.getWidth(), Val, Sema.isSigned()),
- Sema) {}
-
- // Zero initialization.
- APFixedPoint(const FixedPointSemantics &Sema) : APFixedPoint(0, Sema) {}
-
- llvm::APSInt getValue() const { return llvm::APSInt(Val, !Sema.isSigned()); }
- inline unsigned getWidth() const { return Sema.getWidth(); }
- inline unsigned getScale() const { return Sema.getScale(); }
- inline bool isSaturated() const { return Sema.isSaturated(); }
- inline bool isSigned() const { return Sema.isSigned(); }
- inline bool hasPadding() const { return Sema.hasUnsignedPadding(); }
- FixedPointSemantics getSemantics() const { return Sema; }
-
- bool getBoolValue() const { return Val.getBoolValue(); }
-
- // Convert this number to match the semantics provided. If the overflow
- // parameter is provided, set this value to true or false to indicate if this
- // operation results in an overflow.
- APFixedPoint convert(const FixedPointSemantics &DstSema,
- bool *Overflow = nullptr) const;
-
- // Perform binary operations on a fixed point type. The resulting fixed point
- // value will be in the common, full precision semantics that can represent
- // the precision and ranges os both input values. See convert() for an
- // explanation of the Overflow parameter.
- APFixedPoint add(const APFixedPoint &Other, bool *Overflow = nullptr) const;
-
- /// Perform a unary negation (-X) on this fixed point type, taking into
- /// account saturation if applicable.
- APFixedPoint negate(bool *Overflow = nullptr) const;
-
- APFixedPoint shr(unsigned Amt) const {
- return APFixedPoint(Val >> Amt, Sema);
- }
+public:
+ APFixedPoint(const llvm::APInt &Val, const FixedPointSemantics &Sema)
+ : Val(Val, !Sema.isSigned()), Sema(Sema) {
+ assert(Val.getBitWidth() == Sema.getWidth() &&
+ "The value should have a bit width that matches the Sema width");
+ }
+
+ APFixedPoint(uint64_t Val, const FixedPointSemantics &Sema)
+ : APFixedPoint(llvm::APInt(Sema.getWidth(), Val, Sema.isSigned()),
+ Sema) {}
+
+ // Zero initialization.
+ APFixedPoint(const FixedPointSemantics &Sema) : APFixedPoint(0, Sema) {}
+
+ llvm::APSInt getValue() const { return llvm::APSInt(Val, !Sema.isSigned()); }
+ inline unsigned getWidth() const { return Sema.getWidth(); }
+ inline unsigned getScale() const { return Sema.getScale(); }
+ inline bool isSaturated() const { return Sema.isSaturated(); }
+ inline bool isSigned() const { return Sema.isSigned(); }
+ inline bool hasPadding() const { return Sema.hasUnsignedPadding(); }
+ FixedPointSemantics getSemantics() const { return Sema; }
+
+ bool getBoolValue() const { return Val.getBoolValue(); }
+
+ // Convert this number to match the semantics provided. If the overflow
+ // parameter is provided, set this value to true or false to indicate if this
+ // operation results in an overflow.
+ APFixedPoint convert(const FixedPointSemantics &DstSema,
+ bool *Overflow = nullptr) const;
+
+ // Perform binary operations on a fixed point type. The resulting fixed point
+ // value will be in the common, full precision semantics that can represent
+ // the precision and ranges of both input values. See convert() for an
+ // explanation of the Overflow parameter.
+ APFixedPoint add(const APFixedPoint &Other, bool *Overflow = nullptr) const;
+ APFixedPoint sub(const APFixedPoint &Other, bool *Overflow = nullptr) const;
+ APFixedPoint mul(const APFixedPoint &Other, bool *Overflow = nullptr) const;
+ APFixedPoint div(const APFixedPoint &Other, bool *Overflow = nullptr) const;
+
+ /// Perform a unary negation (-X) on this fixed point type, taking into
+ /// account saturation if applicable.
+ APFixedPoint negate(bool *Overflow = nullptr) const;
+
+ APFixedPoint shr(unsigned Amt) const {
+ return APFixedPoint(Val >> Amt, Sema);
+ }
APFixedPoint shl(unsigned Amt) const {
return APFixedPoint(Val << Amt, Sema);
@@ -165,7 +168,7 @@ class APFixedPoint {
std::string toString() const {
llvm::SmallString<40> S;
toString(S);
- return S.str();
+ return std::string(S.str());
}
// If LHS > RHS, return 1. If LHS == RHS, return 0. If LHS < RHS, return -1.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h b/contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h
index ea5d7adeb2da..fc554a35e721 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h
@@ -48,6 +48,8 @@ using IdentifierLocPair = std::pair<IdentifierInfo *, SourceLocation>;
/// of a pointer to one of these classes.
enum { IdentifierInfoAlignment = 8 };
+static constexpr int ObjCOrBuiltinIDBits = 15;
+
/// One of these records is kept for each identifier that
/// is lexed. This contains information about whether the token was \#define'd,
/// is a language keyword, or if it is a front-end token of some sort (e.g. a
@@ -63,7 +65,7 @@ class alignas(IdentifierInfoAlignment) IdentifierInfo {
// ObjC keyword ('protocol' in '@protocol') or builtin (__builtin_inf).
// First NUM_OBJC_KEYWORDS values are for Objective-C,
// the remaining values are for builtins.
- unsigned ObjCOrBuiltinID : 13;
+ unsigned ObjCOrBuiltinID : ObjCOrBuiltinIDBits;
// True if there is a #define for this.
unsigned HasMacro : 1;
@@ -108,7 +110,10 @@ class alignas(IdentifierInfoAlignment) IdentifierInfo {
// True if this is the 'import' contextual keyword.
unsigned IsModulesImport : 1;
- // 29 bits left in a 64-bit word.
+ // True if this is a mangled OpenMP variant name.
+ unsigned IsMangledOpenMPVariantName : 1;
+
+ // 28 bits left in a 64-bit word.
// Managed by the language front-end.
void *FETokenInfo = nullptr;
@@ -121,7 +126,7 @@ class alignas(IdentifierInfoAlignment) IdentifierInfo {
IsPoisoned(false), IsCPPOperatorKeyword(false),
NeedsHandleIdentifier(false), IsFromAST(false), ChangedAfterLoad(false),
FEChangedAfterLoad(false), RevertedTokenID(false), OutOfDate(false),
- IsModulesImport(false) {}
+ IsModulesImport(false), IsMangledOpenMPVariantName(false) {}
public:
IdentifierInfo(const IdentifierInfo &) = delete;
@@ -371,6 +376,12 @@ public:
RecomputeNeedsHandleIdentifier();
}
+ /// Determine whether this is the mangled name of an OpenMP variant.
+ bool isMangledOpenMPVariantName() const { return IsMangledOpenMPVariantName; }
+
+ /// Set whether this is the mangled name of an OpenMP variant.
+ void setMangledOpenMPVariantName(bool I) { IsMangledOpenMPVariantName = I; }
+
/// Return true if this identifier is an editor placeholder.
///
/// Editor placeholders are produced by the code-completion engine and are
@@ -967,7 +978,7 @@ struct PointerLikeTypeTraits<clang::Selector> {
return clang::Selector(reinterpret_cast<uintptr_t>(P));
}
- enum { NumLowBitsAvailable = 0 };
+ static constexpr int NumLowBitsAvailable = 0;
};
// Provide PointerLikeTypeTraits for IdentifierInfo pointers, which
@@ -982,7 +993,7 @@ struct PointerLikeTypeTraits<clang::IdentifierInfo*> {
return static_cast<clang::IdentifierInfo*>(P);
}
- enum { NumLowBitsAvailable = 1 };
+ static constexpr int NumLowBitsAvailable = 1;
};
template<>
@@ -995,7 +1006,7 @@ struct PointerLikeTypeTraits<const clang::IdentifierInfo*> {
return static_cast<const clang::IdentifierInfo*>(P);
}
- enum { NumLowBitsAvailable = 1 };
+ static constexpr int NumLowBitsAvailable = 1;
};
} // namespace llvm
diff --git a/contrib/llvm-project/clang/include/clang/Basic/JsonSupport.h b/contrib/llvm-project/clang/include/clang/Basic/JsonSupport.h
index bbcc747e6847..8b02e440df44 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/JsonSupport.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/JsonSupport.h
@@ -13,7 +13,7 @@
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
-
+#include <iterator>
namespace clang {
@@ -97,9 +97,22 @@ inline void printSourceLocationAsJson(raw_ostream &Out, SourceLocation Loc,
// The macro expansion and spelling pos is identical for file locs.
if (AddBraces)
Out << "{ ";
+ std::string filename(PLoc.getFilename());
+#ifdef _WIN32
+ // Remove forbidden Windows path characters
+ auto RemoveIt =
+ std::remove_if(filename.begin(), filename.end(), [](auto Char) {
+ static const char ForbiddenChars[] = "<>*?\"|";
+ return std::find(std::begin(ForbiddenChars), std::end(ForbiddenChars),
+ Char) != std::end(ForbiddenChars);
+ });
+ filename.erase(RemoveIt, filename.end());
+ // Handle windows-specific path delimiters.
+ std::replace(filename.begin(), filename.end(), '\\', '/');
+#endif
Out << "\"line\": " << PLoc.getLine()
<< ", \"column\": " << PLoc.getColumn()
- << ", \"file\": \"" << PLoc.getFilename() << "\"";
+ << ", \"file\": \"" << filename << "\"";
if (AddBraces)
Out << " }";
return;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/LangOptions.def b/contrib/llvm-project/clang/include/clang/Basic/LangOptions.def
index 3319a3123976..70f68d664bb7 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/LangOptions.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/LangOptions.def
@@ -91,7 +91,7 @@ LANGOPT(CPlusPlus , 1, 0, "C++")
LANGOPT(CPlusPlus11 , 1, 0, "C++11")
LANGOPT(CPlusPlus14 , 1, 0, "C++14")
LANGOPT(CPlusPlus17 , 1, 0, "C++17")
-LANGOPT(CPlusPlus2a , 1, 0, "C++2a")
+LANGOPT(CPlusPlus20 , 1, 0, "C++20")
LANGOPT(ObjC , 1, 0, "Objective-C")
BENIGN_LANGOPT(ObjCDefaultSynthProperties , 1, 0,
"Objective-C auto-synthesized properties")
@@ -132,6 +132,7 @@ LANGOPT(DWARFExceptions , 1, 0, "dwarf exception handling")
LANGOPT(SjLjExceptions , 1, 0, "setjmp-longjump exception handling")
LANGOPT(SEHExceptions , 1, 0, "SEH .xdata exception handling")
LANGOPT(WasmExceptions , 1, 0, "WebAssembly exception handling")
+LANGOPT(IgnoreExceptions , 1, 0, "ignore exceptions")
LANGOPT(ExternCNoUnwind , 1, 0, "Assume extern C functions don't unwind")
LANGOPT(TraditionalCPP , 1, 0, "traditional CPP emulation")
LANGOPT(RTTI , 1, 1, "run-time type information")
@@ -147,6 +148,9 @@ LANGOPT(RelaxedTemplateTemplateArgs, 1, 0, "C++17 relaxed matching of template t
LANGOPT(DoubleSquareBracketAttributes, 1, 0, "'[[]]' attributes extension for all language standard modes")
+COMPATIBLE_LANGOPT(RecoveryAST, 1, 0, "Preserve expressions in AST when encountering errors")
+COMPATIBLE_LANGOPT(RecoveryASTType, 1, 0, "Preserve the type in recovery expressions")
+
BENIGN_LANGOPT(ThreadsafeStatics , 1, 1, "thread-safe static initializers")
LANGOPT(POSIXThreads , 1, 0, "POSIX thread support")
LANGOPT(Blocks , 1, 0, "blocks extension to C")
@@ -161,6 +165,7 @@ BENIGN_ENUM_LANGOPT(CompilingModule, CompilingModuleKind, 2, CMK_None,
BENIGN_LANGOPT(CompilingPCH, 1, 0, "building a pch")
BENIGN_LANGOPT(BuildingPCHWithObjectFile, 1, 0, "building a pch which has a corresponding object file")
BENIGN_LANGOPT(CacheGeneratedPCH, 1, 0, "cache generated PCH files in memory")
+BENIGN_LANGOPT(PCHInstantiateTemplates, 1, 0, "instantiate templates while building a PCH")
COMPATIBLE_LANGOPT(ModulesDeclUse , 1, 0, "require declaration of module uses")
BENIGN_LANGOPT(ModulesSearchAll , 1, 1, "searching even non-imported modules to find unresolved references")
COMPATIBLE_LANGOPT(ModulesStrictDeclUse, 1, 0, "requiring declaration of module uses and all headers to be in modules")
@@ -175,6 +180,7 @@ VALUE_LANGOPT(PackStruct , 32, 0,
VALUE_LANGOPT(MaxTypeAlign , 32, 0,
"default maximum alignment for types")
VALUE_LANGOPT(AlignDouble , 1, 0, "Controls if doubles should be aligned to 8 bytes (x86 only)")
+VALUE_LANGOPT(DoubleSize , 32, 0, "width of double")
VALUE_LANGOPT(LongDoubleSize , 32, 0, "width of long double")
LANGOPT(PPCIEEELongDouble , 1, 0, "use IEEE 754 quadruple-precision for long double")
COMPATIBLE_VALUE_LANGOPT(PICLevel , 2, 0, "__PIC__ level")
@@ -187,6 +193,12 @@ COMPATIBLE_LANGOPT(Deprecated , 1, 0, "__DEPRECATED predefined macro")
COMPATIBLE_LANGOPT(FastMath , 1, 0, "fast FP math optimizations, and __FAST_MATH__ predefined macro")
COMPATIBLE_LANGOPT(FiniteMathOnly , 1, 0, "__FINITE_MATH_ONLY__ predefined macro")
COMPATIBLE_LANGOPT(UnsafeFPMath , 1, 0, "Unsafe Floating Point Math")
+BENIGN_LANGOPT(AllowFPReassoc , 1, 0, "Permit Floating Point reassociation")
+BENIGN_LANGOPT(NoHonorNaNs , 1, 0, "Permit Floating Point optimization without regard to NaN")
+BENIGN_LANGOPT(NoHonorInfs , 1, 0, "Permit Floating Point optimization without regard to infinities")
+BENIGN_LANGOPT(NoSignedZero , 1, 0, "Permit Floating Point optimization without regard to signed zeros")
+BENIGN_LANGOPT(AllowRecip , 1, 0, "Permit Floating Point reciprocal")
+BENIGN_LANGOPT(ApproxFunc , 1, 0, "Permit Floating Point approximation")
BENIGN_LANGOPT(ObjCGCBitmapPrint , 1, 0, "printing of GC's bitmap layout for __weak/__strong ivars")
@@ -219,6 +231,7 @@ LANGOPT(OpenMPCUDANumSMs , 32, 0, "Number of SMs for CUDA devices.")
LANGOPT(OpenMPCUDABlocksPerSM , 32, 0, "Number of blocks per SM for CUDA devices.")
LANGOPT(OpenMPCUDAReductionBufNum , 32, 1024, "Number of the reduction records in the intermediate reduction buffer used for the teams reductions.")
LANGOPT(OpenMPOptimisticCollapse , 1, 0, "Use at most 32 bits to represent the collapsed loop nest counter.")
+LANGOPT(OpenMPCUDATargetParallel, 1, 0, "Support parallel execution of target region on Cuda-based devices.")
LANGOPT(RenderScript , 1, 0, "RenderScript")
LANGOPT(CUDAIsDevice , 1, 0, "compiling for CUDA device")
@@ -227,9 +240,11 @@ LANGOPT(CUDAHostDeviceConstexpr, 1, 1, "treating unattributed constexpr function
LANGOPT(CUDADeviceApproxTranscendentals, 1, 0, "using approximate transcendental functions")
LANGOPT(GPURelocatableDeviceCode, 1, 0, "generate relocatable device code")
LANGOPT(GPUAllowDeviceInit, 1, 0, "allowing device side global init functions for HIP")
-LANGOPT(GPUMaxThreadsPerBlock, 32, 256, "default max threads per block for kernel launch bounds for HIP")
+LANGOPT(GPUMaxThreadsPerBlock, 32, 1024, "default max threads per block for kernel launch bounds for HIP")
+LANGOPT(SYCL , 1, 0, "SYCL")
LANGOPT(SYCLIsDevice , 1, 0, "Generate code for SYCL device")
+LANGOPT(SYCLVersion , 32, 0, "Version of the SYCL standard used")
LANGOPT(HIPUseNewLaunchAPI, 1, 0, "Use new kernel launching API for HIP")
@@ -237,7 +252,7 @@ LANGOPT(SizedDeallocation , 1, 0, "sized deallocation")
LANGOPT(AlignedAllocation , 1, 0, "aligned allocation")
LANGOPT(AlignedAllocationUnavailable, 1, 0, "aligned allocation functions are unavailable")
LANGOPT(NewAlignOverride , 32, 0, "maximum alignment guaranteed by '::operator new(size_t)'")
-LANGOPT(ConceptSatisfactionCaching , 1, 1, "enable satisfaction caching for C++2a Concepts")
+LANGOPT(ConceptSatisfactionCaching , 1, 1, "enable satisfaction caching for C++20 Concepts")
BENIGN_LANGOPT(ModulesCodegen , 1, 0, "Modules code generation")
BENIGN_LANGOPT(ModulesDebugInfo , 1, 0, "Modules debug info")
BENIGN_LANGOPT(ElideConstructors , 1, 1, "C++ copy constructor elision")
@@ -256,15 +271,19 @@ BENIGN_LANGOPT(SpellChecking , 1, 1, "spell-checking")
LANGOPT(SinglePrecisionConstants , 1, 0, "treating double-precision floating point constants as single precision constants")
LANGOPT(FastRelaxedMath , 1, 0, "OpenCL fast relaxed math")
/// FP_CONTRACT mode (on/off/fast).
-ENUM_LANGOPT(DefaultFPContractMode, FPContractModeKind, 2, FPC_Off, "FP contraction type")
-ENUM_LANGOPT(FPRoundingMode, FPRoundingModeKind, 3, FPR_ToNearest, "FP Rounding Mode type")
-ENUM_LANGOPT(FPExceptionMode, FPExceptionModeKind, 2, FPE_Ignore, "FP Exception Behavior Mode type")
+BENIGN_ENUM_LANGOPT(DefaultFPContractMode, FPModeKind, 2, FPM_Off, "FP contraction type")
+COMPATIBLE_LANGOPT(ExpStrictFP, 1, false, "Enable experimental strict floating point")
+BENIGN_ENUM_LANGOPT(FPRoundingMode, RoundingMode, 3, RoundingMode::NearestTiesToEven, "FP Rounding Mode type")
+BENIGN_ENUM_LANGOPT(FPExceptionMode, FPExceptionModeKind, 2, FPE_Ignore, "FP Exception Behavior Mode type")
LANGOPT(NoBitFieldTypeAlign , 1, 0, "bit-field type alignment")
LANGOPT(HexagonQdsp6Compat , 1, 0, "hexagon-qdsp6 backward compatibility")
LANGOPT(ObjCAutoRefCount , 1, 0, "Objective-C automated reference counting")
LANGOPT(ObjCWeakRuntime , 1, 0, "__weak support in the ARC runtime")
LANGOPT(ObjCWeak , 1, 0, "Objective-C __weak in ARC and MRC files")
LANGOPT(ObjCSubscriptingLegacyRuntime , 1, 0, "Subscripting support in legacy ObjectiveC runtime")
+BENIGN_LANGOPT(CompatibilityQualifiedIdBlockParamTypeChecking, 1, 0,
+ "compatibility mode for type checking block parameters "
+ "involving qualified id types")
LANGOPT(CFProtectionBranch , 1, 0, "Control-Flow Branch Protection enabled")
LANGOPT(FakeAddressSpaceMap , 1, 0, "OpenCL fake address space map")
ENUM_LANGOPT(AddressSpaceMapMangling , AddrSpaceMapMangling, 2, ASMM_Target, "OpenCL address space map mangling mode")
@@ -284,10 +303,14 @@ ENUM_LANGOPT(TypeVisibilityMode, Visibility, 3, DefaultVisibility,
"default visibility for types [-ftype-visibility]")
LANGOPT(SetVisibilityForExternDecls, 1, 0,
"apply global symbol visibility to external declarations without an explicit visibility")
+BENIGN_LANGOPT(SemanticInterposition , 1, 0, "semantic interposition")
+BENIGN_LANGOPT(ExplicitNoSemanticInterposition, 1, 0, "explicitly no semantic interposition")
ENUM_LANGOPT(StackProtector, StackProtectorMode, 2, SSPOff,
"stack protector mode")
ENUM_LANGOPT(TrivialAutoVarInit, TrivialAutoVarInitKind, 2, TrivialAutoVarInitKind::Uninitialized,
"trivial automatic variable initialization")
+VALUE_LANGOPT(TrivialAutoVarInitStopAfter, 32, 0,
+ "stop trivial automatic variable initialization after the specified number of instances. Must be greater than 0.")
ENUM_LANGOPT(SignedOverflowBehavior, SignedOverflowBehaviorTy, 2, SOB_Undefined,
"signed integer overflow handling")
@@ -344,6 +367,21 @@ LANGOPT(PaddingOnUnsignedFixedPoint, 1, 0,
LANGOPT(RegisterStaticDestructors, 1, 1, "Register C++ static destructors")
+LANGOPT(MatrixTypes, 1, 0, "Enable or disable the builtin matrix type")
+
+COMPATIBLE_VALUE_LANGOPT(MaxTokens, 32, 0, "Max number of tokens per TU or 0")
+
+ENUM_LANGOPT(SignReturnAddressScope, SignReturnAddressScopeKind, 2, SignReturnAddressScopeKind::None,
+ "Scope of return address signing")
+ENUM_LANGOPT(SignReturnAddressKey, SignReturnAddressKeyKind, 1, SignReturnAddressKeyKind::AKey,
+ "Key used for return address signing")
+LANGOPT(BranchTargetEnforcement, 1, 0, "Branch-target enforcement enabled")
+
+LANGOPT(SpeculativeLoadHardening, 1, 0, "Speculative load hardening enabled")
+
+LANGOPT(RelativeCXXABIVTables, 1, 0,
+ "Use an ABI-incompatible v-table layout that uses relative references")
+
#undef LANGOPT
#undef COMPATIBLE_LANGOPT
#undef BENIGN_LANGOPT
diff --git a/contrib/llvm-project/clang/include/clang/Basic/LangOptions.h b/contrib/llvm-project/clang/include/clang/Basic/LangOptions.h
index ae4a4b2b9e87..a9213b7d8668 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/LangOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/LangOptions.h
@@ -19,6 +19,7 @@
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/Sanitizers.h"
#include "clang/Basic/Visibility.h"
+#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include <string>
@@ -53,6 +54,7 @@ enum class MSVtorDispMode { Never, ForVBaseOverride, ForVFTable };
class LangOptions : public LangOptionsBase {
public:
using Visibility = clang::Visibility;
+ using RoundingMode = llvm::RoundingMode;
enum GCMode { NonGC, GCOnly, HybridGC };
enum StackProtectorMode { SSPOff, SSPOn, SSPStrong, SSPReq };
@@ -172,41 +174,20 @@ public:
Swift4_1,
};
- enum FPContractModeKind {
- // Form fused FP ops only where result will not be affected.
- FPC_Off,
+ enum FPModeKind {
+ // Disable the floating point pragma
+ FPM_Off,
- // Form fused FP ops according to FP_CONTRACT rules.
- FPC_On,
+ // Enable the floating point pragma
+ FPM_On,
// Aggressively fuse FP ops (E.g. FMA).
- FPC_Fast
+ FPM_Fast
};
- // TODO: merge FEnvAccessModeKind and FPContractModeKind
- enum FEnvAccessModeKind {
- FEA_Off,
-
- FEA_On
- };
-
- // Values of the following enumerations correspond to metadata arguments
- // specified for constrained floating-point intrinsics:
- // http://llvm.org/docs/LangRef.html#constrained-floating-point-intrinsics.
-
- /// Possible rounding modes.
- enum FPRoundingModeKind {
- /// Rounding to nearest, corresponds to "round.tonearest".
- FPR_ToNearest,
- /// Rounding toward -Inf, corresponds to "round.downward".
- FPR_Downward,
- /// Rounding toward +Inf, corresponds to "round.upward".
- FPR_Upward,
- /// Rounding toward zero, corresponds to "round.towardzero".
- FPR_TowardZero,
- /// Is determined by runtime environment, corresponds to "round.dynamic".
- FPR_Dynamic
- };
+ /// Alias for RoundingMode::NearestTiesToEven.
+ static constexpr unsigned FPR_ToNearest =
+ static_cast<unsigned>(llvm::RoundingMode::NearestTiesToEven);
/// Possible floating point exception behavior.
enum FPExceptionModeKind {
@@ -229,6 +210,22 @@ public:
All,
};
+ enum class SignReturnAddressScopeKind {
+ /// No signing for any function.
+ None,
+ /// Sign the return address of functions that spill LR.
+ NonLeaf,
+ /// Sign the return address of all functions,
+ All
+ };
+
+ enum class SignReturnAddressKeyKind {
+ /// Return address signing uses APIA key.
+ AKey,
+ /// Return address signing uses APIB key.
+ BKey
+ };
+
public:
/// Set of enabled sanitizers.
SanitizerSet Sanitize;
@@ -351,61 +348,193 @@ public:
/// Return the OpenCL C or C++ version as a VersionTuple.
VersionTuple getOpenCLVersionTuple() const;
+
+ /// Check if return address signing is enabled.
+ bool hasSignReturnAddress() const {
+ return getSignReturnAddressScope() != SignReturnAddressScopeKind::None;
+ }
+
+ /// Check if return address signing uses AKey.
+ bool isSignReturnAddressWithAKey() const {
+ return getSignReturnAddressKey() == SignReturnAddressKeyKind::AKey;
+ }
+
+ /// Check if leaf functions are also signed.
+ bool isSignReturnAddressScopeAll() const {
+ return getSignReturnAddressScope() == SignReturnAddressScopeKind::All;
+ }
};
/// Floating point control options
+class FPOptionsOverride;
class FPOptions {
public:
- FPOptions() : fp_contract(LangOptions::FPC_Off),
- fenv_access(LangOptions::FEA_Off) {}
+ // We start by defining the layout.
+ using storage_type = uint16_t;
+
+ using RoundingMode = llvm::RoundingMode;
+
+ // Define a fake option named "First" so that we have a PREVIOUS even for the
+ // real first option.
+ static constexpr storage_type FirstShift = 0, FirstWidth = 0;
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
+ static constexpr storage_type NAME##Shift = \
+ PREVIOUS##Shift + PREVIOUS##Width; \
+ static constexpr storage_type NAME##Width = WIDTH; \
+ static constexpr storage_type NAME##Mask = ((1 << NAME##Width) - 1) \
+ << NAME##Shift;
+#include "clang/Basic/FPOptions.def"
- // Used for serializing.
- explicit FPOptions(unsigned I)
- : fp_contract(static_cast<LangOptions::FPContractModeKind>(I & 3)),
- fenv_access(static_cast<LangOptions::FEnvAccessModeKind>((I >> 2) & 1))
- {}
+private:
+ storage_type Value;
- explicit FPOptions(const LangOptions &LangOpts)
- : fp_contract(LangOpts.getDefaultFPContractMode()),
- fenv_access(LangOptions::FEA_Off) {}
- // FIXME: Use getDefaultFEnvAccessMode() when available.
+public:
+ FPOptions() : Value(0) {
+ setFPContractMode(LangOptions::FPM_Off);
+ setRoundingMode(static_cast<RoundingMode>(LangOptions::FPR_ToNearest));
+ setFPExceptionMode(LangOptions::FPE_Ignore);
+ }
+ // Used for serializing.
+ explicit FPOptions(unsigned I) { getFromOpaqueInt(I); }
+
+ explicit FPOptions(const LangOptions &LO) {
+ Value = 0;
+ setFPContractMode(LO.getDefaultFPContractMode());
+ setRoundingMode(LO.getFPRoundingMode());
+ setFPExceptionMode(LO.getFPExceptionMode());
+ setAllowFEnvAccess(LangOptions::FPM_Off),
+ setAllowFPReassociate(LO.AllowFPReassoc);
+ setNoHonorNaNs(LO.NoHonorNaNs);
+ setNoHonorInfs(LO.NoHonorInfs);
+ setNoSignedZero(LO.NoSignedZero);
+ setAllowReciprocal(LO.AllowRecip);
+ setAllowApproxFunc(LO.ApproxFunc);
+ }
bool allowFPContractWithinStatement() const {
- return fp_contract == LangOptions::FPC_On;
+ return getFPContractMode() == LangOptions::FPM_On;
+ }
+ void setAllowFPContractWithinStatement() {
+ setFPContractMode(LangOptions::FPM_On);
}
bool allowFPContractAcrossStatement() const {
- return fp_contract == LangOptions::FPC_Fast;
+ return getFPContractMode() == LangOptions::FPM_Fast;
}
+ void setAllowFPContractAcrossStatement() {
+ setFPContractMode(LangOptions::FPM_Fast);
+ }
+
+ bool isFPConstrained() const {
+ return getRoundingMode() !=
+ static_cast<unsigned>(RoundingMode::NearestTiesToEven) ||
+ getFPExceptionMode() != LangOptions::FPE_Ignore ||
+ getAllowFEnvAccess();
+ }
+
+ bool operator==(FPOptions other) const { return Value == other.Value; }
+
+ /// Return the default value of FPOptions that's used when trailing
+ /// storage isn't required.
+ static FPOptions defaultWithoutTrailingStorage(const LangOptions &LO);
+
+ storage_type getAsOpaqueInt() const { return Value; }
+ void getFromOpaqueInt(storage_type value) { Value = value; }
+
+ // We can define most of the accessors automatically:
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
+ unsigned get##NAME() const { \
+ return static_cast<unsigned>(TYPE((Value & NAME##Mask) >> NAME##Shift)); \
+ } \
+ void set##NAME(TYPE value) { \
+ Value = (Value & ~NAME##Mask) | (storage_type(value) << NAME##Shift); \
+ }
+#include "clang/Basic/FPOptions.def"
+ LLVM_DUMP_METHOD void dump();
+};
+
+/// The FPOptions override type is value of the new FPOptions
+/// plus a mask showing which fields are actually set in it:
+class FPOptionsOverride {
+ FPOptions Options;
+ FPOptions::storage_type OverrideMask = 0;
+
+public:
+ using RoundingMode = llvm::RoundingMode;
+ FPOptionsOverride() {}
+
+ // Used for serializing.
+ explicit FPOptionsOverride(unsigned I) { getFromOpaqueInt(I); }
+
+ bool requiresTrailingStorage() const { return OverrideMask != 0; }
void setAllowFPContractWithinStatement() {
- fp_contract = LangOptions::FPC_On;
+ setFPContractModeOverride(LangOptions::FPM_On);
}
void setAllowFPContractAcrossStatement() {
- fp_contract = LangOptions::FPC_Fast;
+ setFPContractModeOverride(LangOptions::FPM_Fast);
}
- void setDisallowFPContract() { fp_contract = LangOptions::FPC_Off; }
-
- bool allowFEnvAccess() const {
- return fenv_access == LangOptions::FEA_On;
+ void setDisallowFPContract() {
+ setFPContractModeOverride(LangOptions::FPM_Off);
}
- void setAllowFEnvAccess() {
- fenv_access = LangOptions::FEA_On;
+ void setFPPreciseEnabled(bool Value) {
+ setAllowFPReassociateOverride(!Value);
+ setNoHonorNaNsOverride(!Value);
+ setNoHonorInfsOverride(!Value);
+ setNoSignedZeroOverride(!Value);
+ setAllowReciprocalOverride(!Value);
+ setAllowApproxFuncOverride(!Value);
+ if (Value)
+ /* Precise mode implies fp_contract=on and disables ffast-math */
+ setAllowFPContractWithinStatement();
+ else
+ /* Precise mode disabled sets fp_contract=fast and enables ffast-math */
+ setAllowFPContractAcrossStatement();
}
- void setDisallowFEnvAccess() { fenv_access = LangOptions::FEA_Off; }
+ unsigned getAsOpaqueInt() const {
+ return Options.getAsOpaqueInt() << 16 | OverrideMask;
+ }
+ void getFromOpaqueInt(unsigned I) {
+ OverrideMask = I & 0xffff;
+ Options.getFromOpaqueInt(I >> 16);
+ }
- /// Used to serialize this.
- unsigned getInt() const { return fp_contract | (fenv_access << 2); }
+ FPOptions applyOverrides(const LangOptions &LO) {
+ FPOptions Base(LO);
+ FPOptions result((Base.getAsOpaqueInt() & ~OverrideMask) |
+ (Options.getAsOpaqueInt() & OverrideMask));
+ return result;
+ }
-private:
- /// Adjust BinaryOperator::FPFeatures to match the total bit-field size
- /// of these two.
- unsigned fp_contract : 2;
- unsigned fenv_access : 1;
+ bool operator==(FPOptionsOverride other) const {
+ return Options == other.Options && OverrideMask == other.OverrideMask;
+ }
+ bool operator!=(FPOptionsOverride other) const { return !(*this == other); }
+
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
+ bool has##NAME##Override() const { \
+ return OverrideMask & FPOptions::NAME##Mask; \
+ } \
+ unsigned get##NAME##Override() const { \
+ assert(has##NAME##Override()); \
+ return Options.get##NAME(); \
+ } \
+ void clear##NAME##Override() { \
+ /* Clear the actual value so that we don't have spurious differences when \
+ * testing equality. */ \
+ Options.set##NAME(TYPE(0)); \
+ OverrideMask &= ~FPOptions::NAME##Mask; \
+ } \
+ void set##NAME##Override(TYPE value) { \
+ Options.set##NAME(value); \
+ OverrideMask |= FPOptions::NAME##Mask; \
+ }
+#include "clang/Basic/FPOptions.def"
+ LLVM_DUMP_METHOD void dump();
};
/// Describes the kind of translation unit being processed.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/LangStandard.h b/contrib/llvm-project/clang/include/clang/Basic/LangStandard.h
index e7deb7d64638..ad7f7510b234 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/LangStandard.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/LangStandard.h
@@ -48,7 +48,7 @@ enum LangFeatures {
CPlusPlus11 = (1 << 6),
CPlusPlus14 = (1 << 7),
CPlusPlus17 = (1 << 8),
- CPlusPlus2a = (1 << 9),
+ CPlusPlus20 = (1 << 9),
Digraphs = (1 << 10),
GNUMode = (1 << 11),
HexFloat = (1 << 12),
@@ -108,8 +108,8 @@ public:
/// isCPlusPlus17 - Language is a C++17 variant (or later).
bool isCPlusPlus17() const { return Flags & CPlusPlus17; }
- /// isCPlusPlus2a - Language is a post-C++17 variant (or later).
- bool isCPlusPlus2a() const { return Flags & CPlusPlus2a; }
+ /// isCPlusPlus20 - Language is a C++20 variant (or later).
+ bool isCPlusPlus20() const { return Flags & CPlusPlus20; }
/// hasDigraphs - Language supports digraphs.
bool hasDigraphs() const { return Flags & Digraphs; }
diff --git a/contrib/llvm-project/clang/include/clang/Basic/LangStandards.def b/contrib/llvm-project/clang/include/clang/Basic/LangStandards.def
index 7f1a24db7e9b..b09568e8b3e8 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/LangStandards.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/LangStandards.def
@@ -143,13 +143,13 @@ LANGSTANDARD_ALIAS_DEPR(gnucxx17, "gnu++1z")
LANGSTANDARD(cxx20, "c++20",
CXX, "ISO C++ 2020 DIS",
LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | CPlusPlus17 |
- CPlusPlus2a | Digraphs | HexFloat)
+ CPlusPlus20 | Digraphs | HexFloat)
LANGSTANDARD_ALIAS_DEPR(cxx20, "c++2a")
LANGSTANDARD(gnucxx20, "gnu++20",
CXX, "ISO C++ 2020 DIS with GNU extensions",
LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | CPlusPlus17 |
- CPlusPlus2a | Digraphs | HexFloat | GNUMode)
+ CPlusPlus20 | Digraphs | HexFloat | GNUMode)
LANGSTANDARD_ALIAS_DEPR(gnucxx20, "gnu++2a")
// OpenCL
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Module.h b/contrib/llvm-project/clang/include/clang/Basic/Module.h
index 0f2549f09943..6b932a9a84d0 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Module.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Module.h
@@ -15,16 +15,14 @@
#ifndef LLVM_CLANG_BASIC_MODULE_H
#define LLVM_CLANG_BASIC_MODULE_H
-#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
-#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
@@ -32,6 +30,7 @@
#include <cassert>
#include <cstdint>
#include <ctime>
+#include <iterator>
#include <string>
#include <utility>
#include <vector>
@@ -44,6 +43,9 @@ class raw_ostream;
namespace clang {
+class DirectoryEntry;
+class FileEntry;
+class FileManager;
class LangOptions;
class TargetInfo;
@@ -51,12 +53,33 @@ class TargetInfo;
using ModuleId = SmallVector<std::pair<std::string, SourceLocation>, 2>;
/// The signature of a module, which is a hash of the AST content.
-struct ASTFileSignature : std::array<uint32_t, 5> {
- ASTFileSignature(std::array<uint32_t, 5> S = {{0}})
- : std::array<uint32_t, 5>(std::move(S)) {}
+struct ASTFileSignature : std::array<uint8_t, 20> {
+ using BaseT = std::array<uint8_t, 20>;
+
+ static constexpr size_t size = std::tuple_size<BaseT>::value;
+
+ ASTFileSignature(BaseT S = {{0}}) : BaseT(std::move(S)) {}
- explicit operator bool() const {
- return *this != std::array<uint32_t, 5>({{0}});
+ explicit operator bool() const { return *this != BaseT({{0}}); }
+
+ static ASTFileSignature create(StringRef Bytes) {
+ return create(Bytes.bytes_begin(), Bytes.bytes_end());
+ }
+
+ static ASTFileSignature createDISentinel() {
+ ASTFileSignature Sentinel;
+ Sentinel.fill(0xFF);
+ return Sentinel;
+ }
+
+ template <typename InputIt>
+ static ASTFileSignature create(InputIt First, InputIt Last) {
+ assert(std::distance(First, Last) == size &&
+ "Wrong amount of bytes to create an ASTFileSignature");
+
+ ASTFileSignature Signature;
+ std::copy(First, Last, Signature.begin());
+ return Signature;
}
};
@@ -101,7 +124,7 @@ public:
std::string PresumedModuleMapFile;
/// The umbrella header or directory.
- llvm::PointerUnion<const DirectoryEntry *, const FileEntry *> Umbrella;
+ const void *Umbrella = nullptr;
/// The module signature.
ASTFileSignature Signature;
@@ -206,8 +229,10 @@ public:
/// A module with the same name that shadows this module.
Module *ShadowingModule = nullptr;
- /// Whether this module is missing a feature from \c Requirements.
- unsigned IsMissingRequirement : 1;
+ /// Whether this module has declared itself unimportable, either because
+ /// it's missing a requirement from \p Requirements or because it's been
+ /// shadowed by another module.
+ unsigned IsUnimportable : 1;
/// Whether we tried and failed to load a module file for this module.
unsigned HasIncompatibleModuleFile : 1;
@@ -268,6 +293,9 @@ public:
/// to a regular (public) module map.
unsigned ModuleMapIsPrivate : 1;
+ /// Whether Umbrella is a directory or header.
+ unsigned HasUmbrellaDir : 1;
+
/// Describes the visibility of the various names within a
/// particular module.
enum NameVisibilityKind {
@@ -380,6 +408,25 @@ public:
~Module();
+ /// Determine whether this module has been declared unimportable.
+ bool isUnimportable() const { return IsUnimportable; }
+
+ /// Determine whether this module has been declared unimportable.
+ ///
+ /// \param LangOpts The language options used for the current
+ /// translation unit.
+ ///
+ /// \param Target The target options used for the current translation unit.
+ ///
+ /// \param Req If this module is unimportable because of a missing
+ /// requirement, this parameter will be set to one of the requirements that
+ /// is not met for use of this module.
+ ///
+ /// \param ShadowingModule If this module is unimportable because it is
+ /// shadowed, this parameter will be set to the shadowing module.
+ bool isUnimportable(const LangOptions &LangOpts, const TargetInfo &Target,
+ Requirement &Req, Module *&ShadowingModule) const;
+
/// Determine whether this module is available for use within the
/// current translation unit.
bool isAvailable() const { return IsAvailable; }
@@ -487,26 +534,22 @@ public:
/// Retrieve the header that serves as the umbrella header for this
/// module.
Header getUmbrellaHeader() const {
- if (auto *E = Umbrella.dyn_cast<const FileEntry *>())
- return Header{UmbrellaAsWritten, E};
+ if (!HasUmbrellaDir)
+ return Header{UmbrellaAsWritten,
+ static_cast<const FileEntry *>(Umbrella)};
return Header{};
}
/// Determine whether this module has an umbrella directory that is
/// not based on an umbrella header.
- bool hasUmbrellaDir() const {
- return Umbrella && Umbrella.is<const DirectoryEntry *>();
- }
+ bool hasUmbrellaDir() const { return Umbrella && HasUmbrellaDir; }
/// Add a top-level header associated with this module.
- void addTopHeader(const FileEntry *File) {
- assert(File);
- TopHeaders.insert(File);
- }
+ void addTopHeader(const FileEntry *File);
/// Add a top-level header filename associated with this module.
void addTopHeaderFilename(StringRef Filename) {
- TopHeaderNames.push_back(Filename);
+ TopHeaderNames.push_back(std::string(Filename));
}
/// The top-level headers associated with this module.
@@ -535,7 +578,7 @@ public:
const TargetInfo &Target);
/// Mark this module and all of its submodules as unavailable.
- void markUnavailable(bool MissingRequirement = false);
+ void markUnavailable(bool Unimportable);
/// Find the submodule with the given name.
///
@@ -654,6 +697,32 @@ private:
unsigned Generation = 0;
};
+/// Abstracts clang modules and precompiled header files and holds
+/// everything needed to generate debug info for an imported module
+/// or PCH.
+class ASTSourceDescriptor {
+ StringRef PCHModuleName;
+ StringRef Path;
+ StringRef ASTFile;
+ ASTFileSignature Signature;
+ Module *ClangModule = nullptr;
+
+public:
+ ASTSourceDescriptor() = default;
+ ASTSourceDescriptor(StringRef Name, StringRef Path, StringRef ASTFile,
+ ASTFileSignature Signature)
+ : PCHModuleName(std::move(Name)), Path(std::move(Path)),
+ ASTFile(std::move(ASTFile)), Signature(Signature) {}
+ ASTSourceDescriptor(Module &M);
+
+ std::string getModuleName() const;
+ StringRef getPath() const { return Path; }
+ StringRef getASTFile() const { return ASTFile; }
+ ASTFileSignature getSignature() const { return Signature; }
+ Module *getModuleOrNull() const { return ClangModule; }
+};
+
+
} // namespace clang
#endif // LLVM_CLANG_BASIC_MODULE_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/ObjCRuntime.h b/contrib/llvm-project/clang/include/clang/Basic/ObjCRuntime.h
index 1c4a69269dee..26403bfa98c9 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/ObjCRuntime.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/ObjCRuntime.h
@@ -476,6 +476,10 @@ public:
friend bool operator!=(const ObjCRuntime &left, const ObjCRuntime &right) {
return !(left == right);
}
+
+ friend llvm::hash_code hash_value(const ObjCRuntime &OCR) {
+ return llvm::hash_combine(OCR.getKind(), OCR.getVersion());
+ }
};
raw_ostream &operator<<(raw_ostream &out, const ObjCRuntime &value);
diff --git a/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensions.def b/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensions.def
index 5536a6e8e4df..1ae36b32fb0a 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensions.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensions.def
@@ -70,9 +70,17 @@ OPENCLEXT_INTERNAL(cl_khr_spir, 120, ~0U)
OPENCLEXT_INTERNAL(cl_khr_egl_event, 200, ~0U)
OPENCLEXT_INTERNAL(cl_khr_egl_image, 200, ~0U)
OPENCLEXT_INTERNAL(cl_khr_mipmap_image, 200, ~0U)
+OPENCLEXT_INTERNAL(cl_khr_mipmap_image_writes, 200, ~0U)
OPENCLEXT_INTERNAL(cl_khr_srgb_image_writes, 200, ~0U)
OPENCLEXT_INTERNAL(cl_khr_subgroups, 200, ~0U)
OPENCLEXT_INTERNAL(cl_khr_terminate_context, 200, ~0U)
+OPENCLEXT_INTERNAL(cl_khr_subgroup_extended_types, 200, ~0U)
+OPENCLEXT_INTERNAL(cl_khr_subgroup_non_uniform_vote, 200, ~0U)
+OPENCLEXT_INTERNAL(cl_khr_subgroup_ballot, 200, ~0U)
+OPENCLEXT_INTERNAL(cl_khr_subgroup_non_uniform_arithmetic, 200, ~0U)
+OPENCLEXT_INTERNAL(cl_khr_subgroup_shuffle, 200, ~0U)
+OPENCLEXT_INTERNAL(cl_khr_subgroup_shuffle_relative, 200, ~0U)
+OPENCLEXT_INTERNAL(cl_khr_subgroup_clustered_reduce, 200, ~0U)
// Clang Extensions.
OPENCLEXT_INTERNAL(cl_clang_storage_class_specifiers, 100, ~0U)
diff --git a/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.def b/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.def
index 38e8c3204475..9f2bf1abc287 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.def
@@ -11,105 +11,6 @@
///
//===----------------------------------------------------------------------===//
-#ifndef OPENMP_CLAUSE
-# define OPENMP_CLAUSE(Name, Class)
-#endif
-#ifndef OPENMP_PARALLEL_CLAUSE
-# define OPENMP_PARALLEL_CLAUSE(Name)
-#endif
-#ifndef OPENMP_SIMD_CLAUSE
-# define OPENMP_SIMD_CLAUSE(Name)
-#endif
-#ifndef OPENMP_FOR_CLAUSE
-# define OPENMP_FOR_CLAUSE(Name)
-#endif
-#ifndef OPENMP_FOR_SIMD_CLAUSE
-# define OPENMP_FOR_SIMD_CLAUSE(Name)
-#endif
-#ifndef OPENMP_SECTIONS_CLAUSE
-# define OPENMP_SECTIONS_CLAUSE(Name)
-#endif
-#ifndef OPENMP_SINGLE_CLAUSE
-# define OPENMP_SINGLE_CLAUSE(Name)
-#endif
-#ifndef OPENMP_PARALLEL_FOR_CLAUSE
-# define OPENMP_PARALLEL_FOR_CLAUSE(Name)
-#endif
-#ifndef OPENMP_PARALLEL_FOR_SIMD_CLAUSE
-# define OPENMP_PARALLEL_FOR_SIMD_CLAUSE(Name)
-#endif
-#ifndef OPENMP_PARALLEL_MASTER_CLAUSE
-# define OPENMP_PARALLEL_MASTER_CLAUSE(Name)
-#endif
-#ifndef OPENMP_PARALLEL_SECTIONS_CLAUSE
-# define OPENMP_PARALLEL_SECTIONS_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TASK_CLAUSE
-# define OPENMP_TASK_CLAUSE(Name)
-#endif
-#ifndef OPENMP_ATOMIC_CLAUSE
-# define OPENMP_ATOMIC_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TARGET_CLAUSE
-# define OPENMP_TARGET_CLAUSE(Name)
-#endif
-#ifndef OPENMP_REQUIRES_CLAUSE
-# define OPENMP_REQUIRES_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TARGET_DATA_CLAUSE
-# define OPENMP_TARGET_DATA_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TARGET_ENTER_DATA_CLAUSE
-#define OPENMP_TARGET_ENTER_DATA_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TARGET_EXIT_DATA_CLAUSE
-#define OPENMP_TARGET_EXIT_DATA_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TARGET_PARALLEL_CLAUSE
-# define OPENMP_TARGET_PARALLEL_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TARGET_PARALLEL_FOR_CLAUSE
-# define OPENMP_TARGET_PARALLEL_FOR_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TARGET_UPDATE_CLAUSE
-# define OPENMP_TARGET_UPDATE_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TEAMS_CLAUSE
-# define OPENMP_TEAMS_CLAUSE(Name)
-#endif
-#ifndef OPENMP_CANCEL_CLAUSE
-# define OPENMP_CANCEL_CLAUSE(Name)
-#endif
-#ifndef OPENMP_ORDERED_CLAUSE
-# define OPENMP_ORDERED_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TASKLOOP_CLAUSE
-# define OPENMP_TASKLOOP_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TASKLOOP_SIMD_CLAUSE
-# define OPENMP_TASKLOOP_SIMD_CLAUSE(Name)
-#endif
-#ifndef OPENMP_MASTER_TASKLOOP_CLAUSE
-# define OPENMP_MASTER_TASKLOOP_CLAUSE(Name)
-#endif
-#ifndef OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE
-# define OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(Name)
-#endif
-#ifndef OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE
-# define OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(Name)
-#endif
-#ifndef OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE
-# define OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(Name)
-#endif
-#ifndef OPENMP_CRITICAL_CLAUSE
-# define OPENMP_CRITICAL_CLAUSE(Name)
-#endif
-#ifndef OPENMP_DISTRIBUTE_CLAUSE
-#define OPENMP_DISTRIBUTE_CLAUSE(Name)
-#endif
-#ifndef OPENMP_DEFAULT_KIND
-# define OPENMP_DEFAULT_KIND(Name)
-#endif
#ifndef OPENMP_SCHEDULE_KIND
#define OPENMP_SCHEDULE_KIND(Name)
#endif
@@ -146,215 +47,22 @@
#ifndef OPENMP_DEFAULTMAP_MODIFIER
#define OPENMP_DEFAULTMAP_MODIFIER(Name)
#endif
-#ifndef OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE
-#define OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE(Name)
-#endif
-#ifndef OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE
-#define OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(Name)
-#endif
-#ifndef OPENMP_DISTRIBUTE_SIMD_CLAUSE
-#define OPENMP_DISTRIBUTE_SIMD_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE
-#define OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TARGET_SIMD_CLAUSE
-#define OPENMP_TARGET_SIMD_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TEAMS_DISTRIBUTE_CLAUSE
-#define OPENMP_TEAMS_DISTRIBUTE_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE
-#define OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE
-#define OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE
-#define OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TARGET_TEAMS_CLAUSE
-#define OPENMP_TARGET_TEAMS_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE
-#define OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE
-#define OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE
-#define OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE
-#define OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(Name)
-#endif
-#ifndef OPENMP_TASKGROUP_CLAUSE
-#define OPENMP_TASKGROUP_CLAUSE(Name)
-#endif
-#ifndef OPENMP_DECLARE_MAPPER_CLAUSE
-#define OPENMP_DECLARE_MAPPER_CLAUSE(Name)
-#endif
-#ifndef OPENMP_ALLOCATE_CLAUSE
-# define OPENMP_ALLOCATE_CLAUSE(Name)
-#endif
#ifndef OPENMP_DEVICE_TYPE_KIND
#define OPENMP_DEVICE_TYPE_KIND(Name)
#endif
-#ifndef OPENMP_DECLARE_VARIANT_CLAUSE
-#define OPENMP_DECLARE_VARIANT_CLAUSE(Name)
+#ifndef OPENMP_LASTPRIVATE_KIND
+#define OPENMP_LASTPRIVATE_KIND(Name)
#endif
-#ifndef OPENMP_CONTEXT_SELECTOR_SET
-#define OPENMP_CONTEXT_SELECTOR_SET(Name)
+#ifndef OPENMP_ORDER_KIND
+#define OPENMP_ORDER_KIND(Name)
#endif
-#ifndef OPENMP_CONTEXT_SELECTOR
-#define OPENMP_CONTEXT_SELECTOR(Name)
+#ifndef OPENMP_DEVICE_MODIFIER
+#define OPENMP_DEVICE_MODIFIER(Name)
#endif
-#ifndef OPENMP_LASTPRIVATE_KIND
-#define OPENMP_LASTPRIVATE_KIND(Name)
+#ifndef OPENMP_REDUCTION_MODIFIER
+#define OPENMP_REDUCTION_MODIFIER(Name)
#endif
-// OpenMP context selector sets.
-OPENMP_CONTEXT_SELECTOR_SET(implementation)
-OPENMP_CONTEXT_SELECTOR_SET(device)
-
-// OpenMP context selectors.
-OPENMP_CONTEXT_SELECTOR(vendor)
-OPENMP_CONTEXT_SELECTOR(kind)
-
-// OpenMP clauses.
-OPENMP_CLAUSE(allocator, OMPAllocatorClause)
-OPENMP_CLAUSE(if, OMPIfClause)
-OPENMP_CLAUSE(final, OMPFinalClause)
-OPENMP_CLAUSE(num_threads, OMPNumThreadsClause)
-OPENMP_CLAUSE(safelen, OMPSafelenClause)
-OPENMP_CLAUSE(simdlen, OMPSimdlenClause)
-OPENMP_CLAUSE(collapse, OMPCollapseClause)
-OPENMP_CLAUSE(default, OMPDefaultClause)
-OPENMP_CLAUSE(private, OMPPrivateClause)
-OPENMP_CLAUSE(firstprivate, OMPFirstprivateClause)
-OPENMP_CLAUSE(lastprivate, OMPLastprivateClause)
-OPENMP_CLAUSE(shared, OMPSharedClause)
-OPENMP_CLAUSE(reduction, OMPReductionClause)
-OPENMP_CLAUSE(linear, OMPLinearClause)
-OPENMP_CLAUSE(aligned, OMPAlignedClause)
-OPENMP_CLAUSE(copyin, OMPCopyinClause)
-OPENMP_CLAUSE(copyprivate, OMPCopyprivateClause)
-OPENMP_CLAUSE(proc_bind, OMPProcBindClause)
-OPENMP_CLAUSE(schedule, OMPScheduleClause)
-OPENMP_CLAUSE(ordered, OMPOrderedClause)
-OPENMP_CLAUSE(nowait, OMPNowaitClause)
-OPENMP_CLAUSE(untied, OMPUntiedClause)
-OPENMP_CLAUSE(mergeable, OMPMergeableClause)
-OPENMP_CLAUSE(flush, OMPFlushClause)
-OPENMP_CLAUSE(read, OMPReadClause)
-OPENMP_CLAUSE(write, OMPWriteClause)
-OPENMP_CLAUSE(update, OMPUpdateClause)
-OPENMP_CLAUSE(capture, OMPCaptureClause)
-OPENMP_CLAUSE(seq_cst, OMPSeqCstClause)
-OPENMP_CLAUSE(depend, OMPDependClause)
-OPENMP_CLAUSE(device, OMPDeviceClause)
-OPENMP_CLAUSE(threads, OMPThreadsClause)
-OPENMP_CLAUSE(simd, OMPSIMDClause)
-OPENMP_CLAUSE(map, OMPMapClause)
-OPENMP_CLAUSE(num_teams, OMPNumTeamsClause)
-OPENMP_CLAUSE(thread_limit, OMPThreadLimitClause)
-OPENMP_CLAUSE(priority, OMPPriorityClause)
-OPENMP_CLAUSE(grainsize, OMPGrainsizeClause)
-OPENMP_CLAUSE(nogroup, OMPNogroupClause)
-OPENMP_CLAUSE(num_tasks, OMPNumTasksClause)
-OPENMP_CLAUSE(hint, OMPHintClause)
-OPENMP_CLAUSE(dist_schedule, OMPDistScheduleClause)
-OPENMP_CLAUSE(defaultmap, OMPDefaultmapClause)
-OPENMP_CLAUSE(to, OMPToClause)
-OPENMP_CLAUSE(from, OMPFromClause)
-OPENMP_CLAUSE(use_device_ptr, OMPUseDevicePtrClause)
-OPENMP_CLAUSE(is_device_ptr, OMPIsDevicePtrClause)
-OPENMP_CLAUSE(task_reduction, OMPTaskReductionClause)
-OPENMP_CLAUSE(in_reduction, OMPInReductionClause)
-OPENMP_CLAUSE(unified_address, OMPUnifiedAddressClause)
-OPENMP_CLAUSE(unified_shared_memory, OMPUnifiedSharedMemoryClause)
-OPENMP_CLAUSE(reverse_offload, OMPReverseOffloadClause)
-OPENMP_CLAUSE(dynamic_allocators, OMPDynamicAllocatorsClause)
-OPENMP_CLAUSE(atomic_default_mem_order, OMPAtomicDefaultMemOrderClause)
-OPENMP_CLAUSE(allocate, OMPAllocateClause)
-OPENMP_CLAUSE(nontemporal, OMPNontemporalClause)
-
-// Clauses allowed for OpenMP directive 'parallel'.
-OPENMP_PARALLEL_CLAUSE(if)
-OPENMP_PARALLEL_CLAUSE(num_threads)
-OPENMP_PARALLEL_CLAUSE(default)
-OPENMP_PARALLEL_CLAUSE(proc_bind)
-OPENMP_PARALLEL_CLAUSE(private)
-OPENMP_PARALLEL_CLAUSE(firstprivate)
-OPENMP_PARALLEL_CLAUSE(shared)
-OPENMP_PARALLEL_CLAUSE(reduction)
-OPENMP_PARALLEL_CLAUSE(copyin)
-OPENMP_PARALLEL_CLAUSE(allocate)
-
-// Clauses allowed for directive 'omp simd'.
-OPENMP_SIMD_CLAUSE(private)
-OPENMP_SIMD_CLAUSE(lastprivate)
-OPENMP_SIMD_CLAUSE(linear)
-OPENMP_SIMD_CLAUSE(aligned)
-OPENMP_SIMD_CLAUSE(safelen)
-OPENMP_SIMD_CLAUSE(simdlen)
-OPENMP_SIMD_CLAUSE(collapse)
-OPENMP_SIMD_CLAUSE(reduction)
-OPENMP_SIMD_CLAUSE(allocate)
-OPENMP_SIMD_CLAUSE(if)
-OPENMP_SIMD_CLAUSE(nontemporal)
-
-// Clauses allowed for directive 'omp for'.
-OPENMP_FOR_CLAUSE(private)
-OPENMP_FOR_CLAUSE(lastprivate)
-OPENMP_FOR_CLAUSE(firstprivate)
-OPENMP_FOR_CLAUSE(reduction)
-OPENMP_FOR_CLAUSE(collapse)
-OPENMP_FOR_CLAUSE(schedule)
-OPENMP_FOR_CLAUSE(ordered)
-OPENMP_FOR_CLAUSE(nowait)
-OPENMP_FOR_CLAUSE(linear)
-OPENMP_FOR_CLAUSE(allocate)
-
-// Clauses allowed for directive 'omp for simd'.
-OPENMP_FOR_SIMD_CLAUSE(private)
-OPENMP_FOR_SIMD_CLAUSE(firstprivate)
-OPENMP_FOR_SIMD_CLAUSE(lastprivate)
-OPENMP_FOR_SIMD_CLAUSE(reduction)
-OPENMP_FOR_SIMD_CLAUSE(schedule)
-OPENMP_FOR_SIMD_CLAUSE(collapse)
-OPENMP_FOR_SIMD_CLAUSE(nowait)
-OPENMP_FOR_SIMD_CLAUSE(safelen)
-OPENMP_FOR_SIMD_CLAUSE(simdlen)
-OPENMP_FOR_SIMD_CLAUSE(linear)
-OPENMP_FOR_SIMD_CLAUSE(aligned)
-OPENMP_FOR_SIMD_CLAUSE(ordered)
-OPENMP_FOR_SIMD_CLAUSE(allocate)
-OPENMP_FOR_SIMD_CLAUSE(if)
-OPENMP_FOR_SIMD_CLAUSE(nontemporal)
-
-// Clauses allowed for OpenMP directive 'omp sections'.
-OPENMP_SECTIONS_CLAUSE(private)
-OPENMP_SECTIONS_CLAUSE(lastprivate)
-OPENMP_SECTIONS_CLAUSE(firstprivate)
-OPENMP_SECTIONS_CLAUSE(reduction)
-OPENMP_SECTIONS_CLAUSE(nowait)
-OPENMP_SECTIONS_CLAUSE(allocate)
-
-// Clauses allowed for directive 'omp single'.
-OPENMP_SINGLE_CLAUSE(private)
-OPENMP_SINGLE_CLAUSE(firstprivate)
-OPENMP_SINGLE_CLAUSE(copyprivate)
-OPENMP_SINGLE_CLAUSE(nowait)
-OPENMP_SINGLE_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive 'cancel'.
-OPENMP_CANCEL_CLAUSE(if)
-
-// Static attributes for 'default' clause.
-OPENMP_DEFAULT_KIND(none)
-OPENMP_DEFAULT_KIND(shared)
-
// Static attributes for 'schedule' clause.
OPENMP_SCHEDULE_KIND(static)
OPENMP_SCHEDULE_KIND(dynamic)
@@ -367,6 +75,10 @@ OPENMP_SCHEDULE_MODIFIER(monotonic)
OPENMP_SCHEDULE_MODIFIER(nonmonotonic)
OPENMP_SCHEDULE_MODIFIER(simd)
+// Modifiers for 'device' clause.
+OPENMP_DEVICE_MODIFIER(ancestor)
+OPENMP_DEVICE_MODIFIER(device_num)
+
// Static attributes for 'defaultmap' clause.
OPENMP_DEFAULTMAP_KIND(scalar)
OPENMP_DEFAULTMAP_KIND(aggregate)
@@ -386,6 +98,7 @@ OPENMP_DEPEND_KIND(in)
OPENMP_DEPEND_KIND(out)
OPENMP_DEPEND_KIND(inout)
OPENMP_DEPEND_KIND(mutexinoutset)
+OPENMP_DEPEND_KIND(depobj)
OPENMP_DEPEND_KIND(source)
OPENMP_DEPEND_KIND(sink)
@@ -394,200 +107,11 @@ OPENMP_LINEAR_KIND(val)
OPENMP_LINEAR_KIND(ref)
OPENMP_LINEAR_KIND(uval)
-// Clauses allowed for OpenMP directive 'parallel for'.
-OPENMP_PARALLEL_FOR_CLAUSE(if)
-OPENMP_PARALLEL_FOR_CLAUSE(num_threads)
-OPENMP_PARALLEL_FOR_CLAUSE(default)
-OPENMP_PARALLEL_FOR_CLAUSE(proc_bind)
-OPENMP_PARALLEL_FOR_CLAUSE(private)
-OPENMP_PARALLEL_FOR_CLAUSE(firstprivate)
-OPENMP_PARALLEL_FOR_CLAUSE(shared)
-OPENMP_PARALLEL_FOR_CLAUSE(reduction)
-OPENMP_PARALLEL_FOR_CLAUSE(copyin)
-OPENMP_PARALLEL_FOR_CLAUSE(lastprivate)
-OPENMP_PARALLEL_FOR_CLAUSE(collapse)
-OPENMP_PARALLEL_FOR_CLAUSE(schedule)
-OPENMP_PARALLEL_FOR_CLAUSE(ordered)
-OPENMP_PARALLEL_FOR_CLAUSE(linear)
-OPENMP_PARALLEL_FOR_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive 'parallel for simd'.
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(if)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(num_threads)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(default)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(proc_bind)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(private)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(firstprivate)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(shared)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(reduction)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(copyin)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(lastprivate)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(collapse)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(schedule)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(safelen)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(simdlen)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(linear)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(aligned)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(ordered)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(allocate)
-OPENMP_PARALLEL_FOR_SIMD_CLAUSE(nontemporal)
-
-// Clauses allowed for OpenMP directive 'parallel master'.
-OPENMP_PARALLEL_MASTER_CLAUSE(if)
-OPENMP_PARALLEL_MASTER_CLAUSE(num_threads)
-OPENMP_PARALLEL_MASTER_CLAUSE(default)
-OPENMP_PARALLEL_MASTER_CLAUSE(private)
-OPENMP_PARALLEL_MASTER_CLAUSE(firstprivate)
-OPENMP_PARALLEL_MASTER_CLAUSE(shared)
-OPENMP_PARALLEL_MASTER_CLAUSE(copyin)
-OPENMP_PARALLEL_MASTER_CLAUSE(reduction)
-OPENMP_PARALLEL_MASTER_CLAUSE(proc_bind)
-OPENMP_PARALLEL_MASTER_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive 'parallel sections'.
-OPENMP_PARALLEL_SECTIONS_CLAUSE(if)
-OPENMP_PARALLEL_SECTIONS_CLAUSE(num_threads)
-OPENMP_PARALLEL_SECTIONS_CLAUSE(default)
-OPENMP_PARALLEL_SECTIONS_CLAUSE(proc_bind)
-OPENMP_PARALLEL_SECTIONS_CLAUSE(private)
-OPENMP_PARALLEL_SECTIONS_CLAUSE(firstprivate)
-OPENMP_PARALLEL_SECTIONS_CLAUSE(shared)
-OPENMP_PARALLEL_SECTIONS_CLAUSE(reduction)
-OPENMP_PARALLEL_SECTIONS_CLAUSE(copyin)
-OPENMP_PARALLEL_SECTIONS_CLAUSE(lastprivate)
-OPENMP_PARALLEL_SECTIONS_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive 'task'.
-OPENMP_TASK_CLAUSE(if)
-OPENMP_TASK_CLAUSE(final)
-OPENMP_TASK_CLAUSE(default)
-OPENMP_TASK_CLAUSE(private)
-OPENMP_TASK_CLAUSE(firstprivate)
-OPENMP_TASK_CLAUSE(shared)
-OPENMP_TASK_CLAUSE(untied)
-OPENMP_TASK_CLAUSE(mergeable)
-OPENMP_TASK_CLAUSE(depend)
-OPENMP_TASK_CLAUSE(priority)
-OPENMP_TASK_CLAUSE(in_reduction)
-OPENMP_TASK_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive 'atomic'.
-OPENMP_ATOMIC_CLAUSE(read)
-OPENMP_ATOMIC_CLAUSE(write)
-OPENMP_ATOMIC_CLAUSE(update)
-OPENMP_ATOMIC_CLAUSE(capture)
-OPENMP_ATOMIC_CLAUSE(seq_cst)
-
-// Clauses allowed for OpenMP directive 'target'.
-OPENMP_TARGET_CLAUSE(if)
-OPENMP_TARGET_CLAUSE(device)
-OPENMP_TARGET_CLAUSE(map)
-OPENMP_TARGET_CLAUSE(private)
-OPENMP_TARGET_CLAUSE(nowait)
-OPENMP_TARGET_CLAUSE(depend)
-OPENMP_TARGET_CLAUSE(defaultmap)
-OPENMP_TARGET_CLAUSE(firstprivate)
-OPENMP_TARGET_CLAUSE(is_device_ptr)
-OPENMP_TARGET_CLAUSE(reduction)
-OPENMP_TARGET_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive 'requires'.
-OPENMP_REQUIRES_CLAUSE(unified_address)
-OPENMP_REQUIRES_CLAUSE(unified_shared_memory)
-OPENMP_REQUIRES_CLAUSE(reverse_offload)
-OPENMP_REQUIRES_CLAUSE(dynamic_allocators)
-OPENMP_REQUIRES_CLAUSE(atomic_default_mem_order)
-
-// Clauses allowed for OpenMP directive 'allocate'.
-OPENMP_ALLOCATE_CLAUSE(allocator)
-
// Modifiers for 'atomic_default_mem_order' clause.
OPENMP_ATOMIC_DEFAULT_MEM_ORDER_KIND(seq_cst)
OPENMP_ATOMIC_DEFAULT_MEM_ORDER_KIND(acq_rel)
OPENMP_ATOMIC_DEFAULT_MEM_ORDER_KIND(relaxed)
-// Clauses allowed for OpenMP directive 'target data'.
-OPENMP_TARGET_DATA_CLAUSE(if)
-OPENMP_TARGET_DATA_CLAUSE(device)
-OPENMP_TARGET_DATA_CLAUSE(map)
-OPENMP_TARGET_DATA_CLAUSE(use_device_ptr)
-
-// Clauses allowed for OpenMP directive 'target enter data'.
-OPENMP_TARGET_ENTER_DATA_CLAUSE(if)
-OPENMP_TARGET_ENTER_DATA_CLAUSE(device)
-OPENMP_TARGET_ENTER_DATA_CLAUSE(map)
-OPENMP_TARGET_ENTER_DATA_CLAUSE(nowait)
-OPENMP_TARGET_ENTER_DATA_CLAUSE(depend)
-
-// Clauses allowed for OpenMP directive 'target exit data'.
-OPENMP_TARGET_EXIT_DATA_CLAUSE(if)
-OPENMP_TARGET_EXIT_DATA_CLAUSE(device)
-OPENMP_TARGET_EXIT_DATA_CLAUSE(map)
-OPENMP_TARGET_EXIT_DATA_CLAUSE(nowait)
-OPENMP_TARGET_EXIT_DATA_CLAUSE(depend)
-
-// Clauses allowed for OpenMP directive 'target parallel'.
-OPENMP_TARGET_PARALLEL_CLAUSE(if)
-OPENMP_TARGET_PARALLEL_CLAUSE(device)
-OPENMP_TARGET_PARALLEL_CLAUSE(map)
-OPENMP_TARGET_PARALLEL_CLAUSE(private)
-OPENMP_TARGET_PARALLEL_CLAUSE(firstprivate)
-OPENMP_TARGET_PARALLEL_CLAUSE(nowait)
-OPENMP_TARGET_PARALLEL_CLAUSE(depend)
-OPENMP_TARGET_PARALLEL_CLAUSE(defaultmap)
-OPENMP_TARGET_PARALLEL_CLAUSE(num_threads)
-OPENMP_TARGET_PARALLEL_CLAUSE(default)
-OPENMP_TARGET_PARALLEL_CLAUSE(proc_bind)
-OPENMP_TARGET_PARALLEL_CLAUSE(shared)
-OPENMP_TARGET_PARALLEL_CLAUSE(reduction)
-OPENMP_TARGET_PARALLEL_CLAUSE(is_device_ptr)
-OPENMP_TARGET_PARALLEL_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive 'target parallel for'.
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(if)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(device)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(map)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(private)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(firstprivate)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(lastprivate)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(nowait)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(depend)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(defaultmap)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(num_threads)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(default)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(proc_bind)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(shared)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(reduction)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(collapse)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(schedule)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(ordered)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(linear)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(is_device_ptr)
-OPENMP_TARGET_PARALLEL_FOR_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive 'target update'.
-OPENMP_TARGET_UPDATE_CLAUSE(if)
-OPENMP_TARGET_UPDATE_CLAUSE(device)
-OPENMP_TARGET_UPDATE_CLAUSE(to)
-OPENMP_TARGET_UPDATE_CLAUSE(from)
-OPENMP_TARGET_UPDATE_CLAUSE(nowait)
-OPENMP_TARGET_UPDATE_CLAUSE(depend)
-
-// Clauses allowed for OpenMP directive 'teams'.
-OPENMP_TEAMS_CLAUSE(default)
-OPENMP_TEAMS_CLAUSE(private)
-OPENMP_TEAMS_CLAUSE(firstprivate)
-OPENMP_TEAMS_CLAUSE(shared)
-OPENMP_TEAMS_CLAUSE(reduction)
-OPENMP_TEAMS_CLAUSE(num_teams)
-OPENMP_TEAMS_CLAUSE(thread_limit)
-OPENMP_TEAMS_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive 'ordered'.
-OPENMP_ORDERED_CLAUSE(threads)
-OPENMP_ORDERED_CLAUSE(simd)
-OPENMP_ORDERED_CLAUSE(depend)
-
// Map types for 'map' clause.
OPENMP_MAP_KIND(alloc)
OPENMP_MAP_KIND(to)
@@ -607,518 +131,40 @@ OPENMP_TO_MODIFIER_KIND(mapper)
// Modifiers for 'from' clause.
OPENMP_FROM_MODIFIER_KIND(mapper)
-// Clauses allowed for OpenMP directive 'taskloop'.
-OPENMP_TASKLOOP_CLAUSE(if)
-OPENMP_TASKLOOP_CLAUSE(shared)
-OPENMP_TASKLOOP_CLAUSE(private)
-OPENMP_TASKLOOP_CLAUSE(firstprivate)
-OPENMP_TASKLOOP_CLAUSE(lastprivate)
-OPENMP_TASKLOOP_CLAUSE(default)
-OPENMP_TASKLOOP_CLAUSE(collapse)
-OPENMP_TASKLOOP_CLAUSE(final)
-OPENMP_TASKLOOP_CLAUSE(untied)
-OPENMP_TASKLOOP_CLAUSE(mergeable)
-OPENMP_TASKLOOP_CLAUSE(priority)
-OPENMP_TASKLOOP_CLAUSE(grainsize)
-OPENMP_TASKLOOP_CLAUSE(nogroup)
-OPENMP_TASKLOOP_CLAUSE(num_tasks)
-OPENMP_TASKLOOP_CLAUSE(reduction)
-OPENMP_TASKLOOP_CLAUSE(in_reduction)
-OPENMP_TASKLOOP_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive 'taskloop simd'.
-OPENMP_TASKLOOP_SIMD_CLAUSE(if)
-OPENMP_TASKLOOP_SIMD_CLAUSE(shared)
-OPENMP_TASKLOOP_SIMD_CLAUSE(private)
-OPENMP_TASKLOOP_SIMD_CLAUSE(firstprivate)
-OPENMP_TASKLOOP_SIMD_CLAUSE(lastprivate)
-OPENMP_TASKLOOP_SIMD_CLAUSE(default)
-OPENMP_TASKLOOP_SIMD_CLAUSE(collapse)
-OPENMP_TASKLOOP_SIMD_CLAUSE(final)
-OPENMP_TASKLOOP_SIMD_CLAUSE(untied)
-OPENMP_TASKLOOP_SIMD_CLAUSE(mergeable)
-OPENMP_TASKLOOP_SIMD_CLAUSE(priority)
-OPENMP_TASKLOOP_SIMD_CLAUSE(linear)
-OPENMP_TASKLOOP_SIMD_CLAUSE(aligned)
-OPENMP_TASKLOOP_SIMD_CLAUSE(safelen)
-OPENMP_TASKLOOP_SIMD_CLAUSE(simdlen)
-OPENMP_TASKLOOP_SIMD_CLAUSE(grainsize)
-OPENMP_TASKLOOP_SIMD_CLAUSE(nogroup)
-OPENMP_TASKLOOP_SIMD_CLAUSE(num_tasks)
-OPENMP_TASKLOOP_SIMD_CLAUSE(reduction)
-OPENMP_TASKLOOP_SIMD_CLAUSE(in_reduction)
-OPENMP_TASKLOOP_SIMD_CLAUSE(allocate)
-OPENMP_TASKLOOP_SIMD_CLAUSE(nontemporal)
-
-// Clauses allowed for OpenMP directive 'master taskloop'.
-OPENMP_MASTER_TASKLOOP_CLAUSE(if)
-OPENMP_MASTER_TASKLOOP_CLAUSE(shared)
-OPENMP_MASTER_TASKLOOP_CLAUSE(private)
-OPENMP_MASTER_TASKLOOP_CLAUSE(firstprivate)
-OPENMP_MASTER_TASKLOOP_CLAUSE(lastprivate)
-OPENMP_MASTER_TASKLOOP_CLAUSE(default)
-OPENMP_MASTER_TASKLOOP_CLAUSE(collapse)
-OPENMP_MASTER_TASKLOOP_CLAUSE(final)
-OPENMP_MASTER_TASKLOOP_CLAUSE(untied)
-OPENMP_MASTER_TASKLOOP_CLAUSE(mergeable)
-OPENMP_MASTER_TASKLOOP_CLAUSE(priority)
-OPENMP_MASTER_TASKLOOP_CLAUSE(grainsize)
-OPENMP_MASTER_TASKLOOP_CLAUSE(nogroup)
-OPENMP_MASTER_TASKLOOP_CLAUSE(num_tasks)
-OPENMP_MASTER_TASKLOOP_CLAUSE(reduction)
-OPENMP_MASTER_TASKLOOP_CLAUSE(in_reduction)
-OPENMP_MASTER_TASKLOOP_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive 'master taskloop simd'.
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(if)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(shared)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(private)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(firstprivate)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(lastprivate)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(default)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(collapse)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(final)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(untied)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(mergeable)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(priority)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(linear)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(aligned)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(safelen)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(simdlen)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(grainsize)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(nogroup)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(num_tasks)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(reduction)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(in_reduction)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(allocate)
-OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(nontemporal)
-
-// Clauses allowed for OpenMP directive 'parallel master taskloop'.
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(if)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(shared)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(private)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(firstprivate)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(lastprivate)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(default)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(collapse)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(final)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(untied)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(mergeable)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(priority)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(grainsize)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(nogroup)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(num_tasks)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(reduction)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(allocate)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(num_threads)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(proc_bind)
-OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(copyin)
-
-// Clauses allowed for OpenMP directive 'parallel master taskloop simd'.
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(if)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(shared)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(private)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(firstprivate)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(lastprivate)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(default)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(collapse)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(final)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(untied)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(mergeable)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(priority)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(grainsize)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(nogroup)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(num_tasks)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(reduction)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(allocate)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(num_threads)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(proc_bind)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(copyin)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(linear)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(aligned)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(safelen)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(simdlen)
-OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(nontemporal)
-
-// Clauses allowed for OpenMP directive 'critical'.
-OPENMP_CRITICAL_CLAUSE(hint)
-
-// Clauses allowed for OpenMP directive 'distribute'
-OPENMP_DISTRIBUTE_CLAUSE(private)
-OPENMP_DISTRIBUTE_CLAUSE(firstprivate)
-OPENMP_DISTRIBUTE_CLAUSE(lastprivate)
-OPENMP_DISTRIBUTE_CLAUSE(collapse)
-OPENMP_DISTRIBUTE_CLAUSE(dist_schedule)
-OPENMP_DISTRIBUTE_CLAUSE(allocate)
-
// Static attributes for 'dist_schedule' clause.
OPENMP_DIST_SCHEDULE_KIND(static)
-// Clauses allowed for OpenMP directive 'distribute parallel for'
-OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE(firstprivate)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE(lastprivate)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE(collapse)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE(dist_schedule)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE(if)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE(num_threads)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE(default)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE(proc_bind)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE(private)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE(shared)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE(reduction)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE(copyin)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE(schedule)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive 'distribute parallel for simd'
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(firstprivate)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(lastprivate)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(collapse)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(dist_schedule)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(if)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(num_threads)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(default)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(proc_bind)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(private)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(shared)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(reduction)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(copyin)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(schedule)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(linear)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(aligned)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(safelen)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(simdlen)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(allocate)
-OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(nontemporal)
-
-// Clauses allowed for OpenMP directive 'distribute simd'
-OPENMP_DISTRIBUTE_SIMD_CLAUSE(private)
-OPENMP_DISTRIBUTE_SIMD_CLAUSE(firstprivate)
-OPENMP_DISTRIBUTE_SIMD_CLAUSE(lastprivate)
-OPENMP_DISTRIBUTE_SIMD_CLAUSE(collapse)
-OPENMP_DISTRIBUTE_SIMD_CLAUSE(dist_schedule)
-OPENMP_DISTRIBUTE_SIMD_CLAUSE(linear)
-OPENMP_DISTRIBUTE_SIMD_CLAUSE(aligned)
-OPENMP_DISTRIBUTE_SIMD_CLAUSE(safelen)
-OPENMP_DISTRIBUTE_SIMD_CLAUSE(simdlen)
-OPENMP_DISTRIBUTE_SIMD_CLAUSE(reduction)
-OPENMP_DISTRIBUTE_SIMD_CLAUSE(allocate)
-OPENMP_DISTRIBUTE_SIMD_CLAUSE(if)
-OPENMP_DISTRIBUTE_SIMD_CLAUSE(nontemporal)
-
-// Clauses allowed for OpenMP directive 'target parallel for simd'.
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(if)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(device)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(map)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(private)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(firstprivate)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(lastprivate)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(nowait)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(depend)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(defaultmap)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(num_threads)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(default)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(proc_bind)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(shared)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(reduction)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(collapse)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(schedule)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(ordered)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(linear)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(safelen)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(simdlen)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(aligned)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(is_device_ptr)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(allocate)
-OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(nontemporal)
-
-// Clauses allowed for OpenMP directive 'target simd'.
-OPENMP_TARGET_SIMD_CLAUSE(if)
-OPENMP_TARGET_SIMD_CLAUSE(device)
-OPENMP_TARGET_SIMD_CLAUSE(map)
-OPENMP_TARGET_SIMD_CLAUSE(private)
-OPENMP_TARGET_SIMD_CLAUSE(nowait)
-OPENMP_TARGET_SIMD_CLAUSE(depend)
-OPENMP_TARGET_SIMD_CLAUSE(defaultmap)
-OPENMP_TARGET_SIMD_CLAUSE(firstprivate)
-OPENMP_TARGET_SIMD_CLAUSE(is_device_ptr)
-OPENMP_TARGET_SIMD_CLAUSE(lastprivate)
-OPENMP_TARGET_SIMD_CLAUSE(linear)
-OPENMP_TARGET_SIMD_CLAUSE(aligned)
-OPENMP_TARGET_SIMD_CLAUSE(safelen)
-OPENMP_TARGET_SIMD_CLAUSE(simdlen)
-OPENMP_TARGET_SIMD_CLAUSE(collapse)
-OPENMP_TARGET_SIMD_CLAUSE(reduction)
-OPENMP_TARGET_SIMD_CLAUSE(allocate)
-OPENMP_TARGET_SIMD_CLAUSE(nontemporal)
-
-// Clauses allowed for OpenMP directive 'teams distribute'.
-OPENMP_TEAMS_DISTRIBUTE_CLAUSE(default)
-OPENMP_TEAMS_DISTRIBUTE_CLAUSE(private)
-OPENMP_TEAMS_DISTRIBUTE_CLAUSE(firstprivate)
-OPENMP_TEAMS_DISTRIBUTE_CLAUSE(shared)
-OPENMP_TEAMS_DISTRIBUTE_CLAUSE(reduction)
-OPENMP_TEAMS_DISTRIBUTE_CLAUSE(num_teams)
-OPENMP_TEAMS_DISTRIBUTE_CLAUSE(thread_limit)
-OPENMP_TEAMS_DISTRIBUTE_CLAUSE(lastprivate)
-OPENMP_TEAMS_DISTRIBUTE_CLAUSE(collapse)
-OPENMP_TEAMS_DISTRIBUTE_CLAUSE(dist_schedule)
-OPENMP_TEAMS_DISTRIBUTE_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive 'teams distribute simd'
-OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(default)
-OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(private)
-OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(firstprivate)
-OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(shared)
-OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(reduction)
-OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(num_teams)
-OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(thread_limit)
-OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(lastprivate)
-OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(collapse)
-OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(dist_schedule)
-OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(linear)
-OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(aligned)
-OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(safelen)
-OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(simdlen)
-OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(allocate)
-OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(if)
-OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(nontemporal)
-
-// Clauses allowed for OpenMP directive 'teams distribute parallel for simd'
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(firstprivate)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(lastprivate)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(collapse)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(dist_schedule)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(if)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(num_threads)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(default)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(proc_bind)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(private)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(shared)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(reduction)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(schedule)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(linear)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(aligned)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(safelen)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(simdlen)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(num_teams)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(thread_limit)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(allocate)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(nontemporal)
-
-// Clauses allowed for OpenMP directive 'teams distribute parallel for'
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(firstprivate)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(lastprivate)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(collapse)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(dist_schedule)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(if)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(num_threads)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(default)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(proc_bind)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(private)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(shared)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(reduction)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(schedule)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(num_teams)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(thread_limit)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(copyin)
-OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive 'target teams'.
-OPENMP_TARGET_TEAMS_CLAUSE(if)
-OPENMP_TARGET_TEAMS_CLAUSE(device)
-OPENMP_TARGET_TEAMS_CLAUSE(map)
-OPENMP_TARGET_TEAMS_CLAUSE(private)
-OPENMP_TARGET_TEAMS_CLAUSE(nowait)
-OPENMP_TARGET_TEAMS_CLAUSE(depend)
-OPENMP_TARGET_TEAMS_CLAUSE(defaultmap)
-OPENMP_TARGET_TEAMS_CLAUSE(firstprivate)
-OPENMP_TARGET_TEAMS_CLAUSE(is_device_ptr)
-OPENMP_TARGET_TEAMS_CLAUSE(default)
-OPENMP_TARGET_TEAMS_CLAUSE(shared)
-OPENMP_TARGET_TEAMS_CLAUSE(reduction)
-OPENMP_TARGET_TEAMS_CLAUSE(num_teams)
-OPENMP_TARGET_TEAMS_CLAUSE(thread_limit)
-OPENMP_TARGET_TEAMS_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive 'target teams distribute'.
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(if)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(device)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(map)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(private)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(nowait)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(depend)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(defaultmap)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(firstprivate)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(is_device_ptr)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(default)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(shared)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(reduction)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(num_teams)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(thread_limit)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(lastprivate)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(collapse)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(dist_schedule)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive 'target teams distribute parallel for'.
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(if)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(device)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(map)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(private)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(nowait)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(depend)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(defaultmap)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(firstprivate)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(is_device_ptr)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(default)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(shared)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(reduction)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(num_teams)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(thread_limit)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(lastprivate)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(collapse)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(dist_schedule)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(num_threads)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(proc_bind)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(schedule)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive
-// 'target teams distribute parallel for simd'.
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(if)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(device)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(map)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(private)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(nowait)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(depend)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(defaultmap)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(firstprivate)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(is_device_ptr)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(default)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(shared)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(reduction)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(num_teams)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(thread_limit)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(lastprivate)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(collapse)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(dist_schedule)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(num_threads)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(proc_bind)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(schedule)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(linear)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(aligned)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(safelen)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(simdlen)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(allocate)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(nontemporal)
-
-// Clauses allowed for OpenMP directive 'target teams distribute simd'.
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(if)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(device)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(map)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(private)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(nowait)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(depend)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(defaultmap)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(firstprivate)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(lastprivate)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(is_device_ptr)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(shared)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(reduction)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(num_teams)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(thread_limit)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(collapse)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(dist_schedule)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(linear)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(aligned)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(safelen)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(simdlen)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(allocate)
-OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(nontemporal)
-
-// Clauses allowed for OpenMP directive 'taskgroup'.
-OPENMP_TASKGROUP_CLAUSE(task_reduction)
-OPENMP_TASKGROUP_CLAUSE(allocate)
-
-// Clauses allowed for OpenMP directive 'declare mapper'.
-OPENMP_DECLARE_MAPPER_CLAUSE(map)
-
// Device types for 'device_type' clause.
OPENMP_DEVICE_TYPE_KIND(host)
OPENMP_DEVICE_TYPE_KIND(nohost)
OPENMP_DEVICE_TYPE_KIND(any)
-// Clauses allowed for OpenMP directive 'declare variant'.
-OPENMP_DECLARE_VARIANT_CLAUSE(match)
-
// Type of the 'lastprivate' clause.
OPENMP_LASTPRIVATE_KIND(conditional)
+// Type of the 'order' clause.
+OPENMP_ORDER_KIND(concurrent)
+
+// Modifiers for 'reduction' clause.
+OPENMP_REDUCTION_MODIFIER(default)
+OPENMP_REDUCTION_MODIFIER(inscan)
+OPENMP_REDUCTION_MODIFIER(task)
+
+#undef OPENMP_REDUCTION_MODIFIER
+#undef OPENMP_DEVICE_MODIFIER
+#undef OPENMP_ORDER_KIND
#undef OPENMP_LASTPRIVATE_KIND
-#undef OPENMP_CONTEXT_SELECTOR
-#undef OPENMP_CONTEXT_SELECTOR_SET
-#undef OPENMP_DECLARE_VARIANT_CLAUSE
#undef OPENMP_DEVICE_TYPE_KIND
-#undef OPENMP_ALLOCATE_CLAUSE
-#undef OPENMP_DECLARE_MAPPER_CLAUSE
-#undef OPENMP_TASKGROUP_CLAUSE
-#undef OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE
-#undef OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE
-#undef OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE
-#undef OPENMP_MASTER_TASKLOOP_CLAUSE
-#undef OPENMP_TASKLOOP_SIMD_CLAUSE
-#undef OPENMP_TASKLOOP_CLAUSE
#undef OPENMP_LINEAR_KIND
#undef OPENMP_DEPEND_KIND
#undef OPENMP_SCHEDULE_MODIFIER
#undef OPENMP_SCHEDULE_KIND
-#undef OPENMP_DEFAULT_KIND
-#undef OPENMP_CLAUSE
-#undef OPENMP_CRITICAL_CLAUSE
-#undef OPENMP_ORDERED_CLAUSE
-#undef OPENMP_CANCEL_CLAUSE
-#undef OPENMP_SINGLE_CLAUSE
-#undef OPENMP_SECTIONS_CLAUSE
-#undef OPENMP_PARALLEL_CLAUSE
-#undef OPENMP_PARALLEL_FOR_CLAUSE
-#undef OPENMP_PARALLEL_FOR_SIMD_CLAUSE
-#undef OPENMP_PARALLEL_MASTER_CLAUSE
-#undef OPENMP_PARALLEL_SECTIONS_CLAUSE
-#undef OPENMP_TASK_CLAUSE
-#undef OPENMP_ATOMIC_CLAUSE
-#undef OPENMP_TARGET_CLAUSE
-#undef OPENMP_REQUIRES_CLAUSE
#undef OPENMP_ATOMIC_DEFAULT_MEM_ORDER_KIND
-#undef OPENMP_TARGET_DATA_CLAUSE
-#undef OPENMP_TARGET_ENTER_DATA_CLAUSE
-#undef OPENMP_TARGET_EXIT_DATA_CLAUSE
-#undef OPENMP_TARGET_PARALLEL_CLAUSE
-#undef OPENMP_TARGET_PARALLEL_FOR_CLAUSE
-#undef OPENMP_TEAMS_CLAUSE
-#undef OPENMP_SIMD_CLAUSE
-#undef OPENMP_FOR_CLAUSE
-#undef OPENMP_FOR_SIMD_CLAUSE
#undef OPENMP_MAP_KIND
#undef OPENMP_MAP_MODIFIER_KIND
#undef OPENMP_TO_MODIFIER_KIND
#undef OPENMP_FROM_MODIFIER_KIND
-#undef OPENMP_DISTRIBUTE_CLAUSE
#undef OPENMP_DIST_SCHEDULE_KIND
#undef OPENMP_DEFAULTMAP_KIND
#undef OPENMP_DEFAULTMAP_MODIFIER
-#undef OPENMP_TARGET_UPDATE_CLAUSE
-#undef OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE
-#undef OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE
-#undef OPENMP_DISTRIBUTE_SIMD_CLAUSE
-#undef OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE
-#undef OPENMP_TARGET_SIMD_CLAUSE
-#undef OPENMP_TEAMS_DISTRIBUTE_CLAUSE
-#undef OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE
-#undef OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE
-#undef OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE
-#undef OPENMP_TARGET_TEAMS_CLAUSE
-#undef OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE
-#undef OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE
-#undef OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE
-#undef OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE
+
diff --git a/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.h b/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.h
index 302312325308..08aaf2d43bfd 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.h
@@ -19,67 +19,11 @@
namespace clang {
-/// OpenMP context selector sets.
-enum OpenMPContextSelectorSetKind {
-#define OPENMP_CONTEXT_SELECTOR_SET(Name) OMP_CTX_SET_##Name,
-#include "clang/Basic/OpenMPKinds.def"
- OMP_CTX_SET_unknown,
-};
-
-/// OpenMP context selectors.
-enum OpenMPContextSelectorKind {
-#define OPENMP_CONTEXT_SELECTOR(Name) OMP_CTX_##Name,
-#include "clang/Basic/OpenMPKinds.def"
- OMP_CTX_unknown,
-};
-
-OpenMPContextSelectorSetKind getOpenMPContextSelectorSet(llvm::StringRef Str);
-llvm::StringRef
-getOpenMPContextSelectorSetName(OpenMPContextSelectorSetKind Kind);
-OpenMPContextSelectorKind getOpenMPContextSelector(llvm::StringRef Str);
-llvm::StringRef getOpenMPContextSelectorName(OpenMPContextSelectorKind Kind);
-
-/// Struct to store the context selectors info.
-template <typename VectorType, typename ScoreT> struct OpenMPCtxSelectorData {
- OpenMPContextSelectorSetKind CtxSet = OMP_CTX_SET_unknown;
- OpenMPContextSelectorKind Ctx = OMP_CTX_unknown;
- ScoreT Score;
- VectorType Names;
- explicit OpenMPCtxSelectorData() = default;
- explicit OpenMPCtxSelectorData(OpenMPContextSelectorSetKind CtxSet,
- OpenMPContextSelectorKind Ctx,
- const ScoreT &Score, VectorType &&Names)
- : CtxSet(CtxSet), Ctx(Ctx), Score(Score), Names(Names) {}
- template <typename U>
- explicit OpenMPCtxSelectorData(OpenMPContextSelectorSetKind CtxSet,
- OpenMPContextSelectorKind Ctx,
- const ScoreT &Score, const U &Names)
- : CtxSet(CtxSet), Ctx(Ctx), Score(Score),
- Names(Names.begin(), Names.end()) {}
-};
-
/// OpenMP directives.
using OpenMPDirectiveKind = llvm::omp::Directive;
/// OpenMP clauses.
-enum OpenMPClauseKind {
-#define OPENMP_CLAUSE(Name, Class) \
- OMPC_##Name,
-#include "clang/Basic/OpenMPKinds.def"
- OMPC_threadprivate,
- OMPC_uniform,
- OMPC_device_type,
- OMPC_match,
- OMPC_unknown
-};
-
-/// OpenMP attributes for 'default' clause.
-enum OpenMPDefaultClauseKind {
-#define OPENMP_DEFAULT_KIND(Name) \
- OMPC_DEFAULT_##Name,
-#include "clang/Basic/OpenMPKinds.def"
- OMPC_DEFAULT_unknown
-};
+using OpenMPClauseKind = llvm::omp::Clause;
/// OpenMP attributes for 'schedule' clause.
enum OpenMPScheduleClauseKind {
@@ -98,6 +42,13 @@ enum OpenMPScheduleClauseModifier {
OMPC_SCHEDULE_MODIFIER_last
};
+/// OpenMP modifiers for 'device' clause.
+enum OpenMPDeviceClauseModifier {
+#define OPENMP_DEVICE_MODIFIER(Name) OMPC_DEVICE_##Name,
+#include "clang/Basic/OpenMPKinds.def"
+ OMPC_DEVICE_unknown,
+};
+
/// OpenMP attributes for 'depend' clause.
enum OpenMPDependClauseKind {
#define OPENMP_DEPEND_KIND(Name) \
@@ -131,6 +82,10 @@ enum OpenMPMapModifierKind {
OMPC_MAP_MODIFIER_last
};
+ /// Number of allowed map-type-modifiers.
+static constexpr unsigned NumberOfOMPMapClauseModifiers =
+ OMPC_MAP_MODIFIER_last - OMPC_MAP_MODIFIER_unknown - 1;
+
/// OpenMP modifier kind for 'to' clause.
enum OpenMPToModifierKind {
#define OPENMP_TO_MODIFIER_KIND(Name) \
@@ -194,6 +149,13 @@ enum OpenMPLastprivateModifier {
OMPC_LASTPRIVATE_unknown,
};
+/// OpenMP attributes for 'order' clause.
+enum OpenMPOrderClauseKind {
+#define OPENMP_ORDER_KIND(Name) OMPC_ORDER_##Name,
+#include "clang/Basic/OpenMPKinds.def"
+ OMPC_ORDER_unknown,
+};
+
/// Scheduling data for loop-based OpenMP directives.
struct OpenMPScheduleTy final {
OpenMPScheduleClauseKind Schedule = OMPC_SCHEDULE_unknown;
@@ -201,16 +163,16 @@ struct OpenMPScheduleTy final {
OpenMPScheduleClauseModifier M2 = OMPC_SCHEDULE_MODIFIER_unknown;
};
-OpenMPClauseKind getOpenMPClauseKind(llvm::StringRef Str);
-const char *getOpenMPClauseName(OpenMPClauseKind Kind);
+/// OpenMP modifiers for 'reduction' clause.
+enum OpenMPReductionClauseModifier {
+#define OPENMP_REDUCTION_MODIFIER(Name) OMPC_REDUCTION_##Name,
+#include "clang/Basic/OpenMPKinds.def"
+ OMPC_REDUCTION_unknown,
+};
unsigned getOpenMPSimpleClauseType(OpenMPClauseKind Kind, llvm::StringRef Str);
const char *getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind, unsigned Type);
-bool isAllowedClauseForDirective(OpenMPDirectiveKind DKind,
- OpenMPClauseKind CKind,
- unsigned OpenMPVersion);
-
/// Checks if the specified directive is a directive with an associated
/// loop construct.
/// \param DKind Specified directive.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/PartialDiagnostic.h b/contrib/llvm-project/clang/include/clang/Basic/PartialDiagnostic.h
index 799951b82a6c..107d621f0dec 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/PartialDiagnostic.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/PartialDiagnostic.h
@@ -285,7 +285,7 @@ public:
"Too many arguments to diagnostic!");
DiagStorage->DiagArgumentsKind[DiagStorage->NumDiagArgs]
= DiagnosticsEngine::ak_std_string;
- DiagStorage->DiagArgumentsStr[DiagStorage->NumDiagArgs++] = V;
+ DiagStorage->DiagArgumentsStr[DiagStorage->NumDiagArgs++] = std::string(V);
}
void Emit(const DiagnosticBuilder &DB) const {
@@ -378,10 +378,9 @@ public:
// so that we only match those arguments that are (statically) DeclContexts;
// other arguments that derive from DeclContext (e.g., RecordDecls) will not
// match.
- template<typename T>
- friend inline
- typename std::enable_if<std::is_same<T, DeclContext>::value,
- const PartialDiagnostic &>::type
+ template <typename T>
+ friend inline std::enable_if_t<std::is_same<T, DeclContext>::value,
+ const PartialDiagnostic &>
operator<<(const PartialDiagnostic &PD, T *DC) {
PD.AddTaggedVal(reinterpret_cast<intptr_t>(DC),
DiagnosticsEngine::ak_declcontext);
diff --git a/contrib/llvm-project/clang/include/clang/Basic/PragmaKinds.h b/contrib/llvm-project/clang/include/clang/Basic/PragmaKinds.h
index 103b97db718b..82c0d5f0a551 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/PragmaKinds.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/PragmaKinds.h
@@ -25,6 +25,15 @@ enum PragmaMSStructKind {
PMSST_ON // #pragms ms_struct on
};
+enum PragmaFloatControlKind {
+ PFC_Unknown,
+ PFC_Precise, // #pragma float_control(precise, [,on])
+ PFC_NoPrecise, // #pragma float_control(precise, off)
+ PFC_Except, // #pragma float_control(except [,on])
+ PFC_NoExcept, // #pragma float_control(except, off)
+ PFC_Push, // #pragma float_control(push)
+ PFC_Pop // #pragma float_control(pop)
+};
}
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Basic/SanitizerBlacklist.h b/contrib/llvm-project/clang/include/clang/Basic/SanitizerBlacklist.h
index 29af28b84365..c874ff28aacc 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/SanitizerBlacklist.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/SanitizerBlacklist.h
@@ -14,15 +14,17 @@
#define LLVM_CLANG_BASIC_SANITIZERBLACKLIST_H
#include "clang/Basic/LLVM.h"
-#include "clang/Basic/SanitizerSpecialCaseList.h"
-#include "clang/Basic/Sanitizers.h"
#include "clang/Basic/SourceLocation.h"
-#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/StringRef.h"
#include <memory>
+#include <vector>
namespace clang {
+class SanitizerMask;
+class SourceManager;
+class SanitizerSpecialCaseList;
+
class SanitizerBlacklist {
std::unique_ptr<SanitizerSpecialCaseList> SSCL;
SourceManager &SM;
@@ -30,6 +32,7 @@ class SanitizerBlacklist {
public:
SanitizerBlacklist(const std::vector<std::string> &BlacklistPaths,
SourceManager &SM);
+ ~SanitizerBlacklist();
bool isBlacklistedGlobal(SanitizerMask Mask, StringRef GlobalName,
StringRef Category = StringRef()) const;
bool isBlacklistedType(SanitizerMask Mask, StringRef MangledTypeName,
diff --git a/contrib/llvm-project/clang/include/clang/Basic/SanitizerSpecialCaseList.h b/contrib/llvm-project/clang/include/clang/Basic/SanitizerSpecialCaseList.h
index 88d31a6cbcce..c84894dae298 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/SanitizerSpecialCaseList.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/SanitizerSpecialCaseList.h
@@ -10,6 +10,7 @@
// SanitizerMask.
//
//===----------------------------------------------------------------------===//
+
#ifndef LLVM_CLANG_BASIC_SANITIZERSPECIALCASELIST_H
#define LLVM_CLANG_BASIC_SANITIZERSPECIALCASELIST_H
@@ -17,8 +18,14 @@
#include "clang/Basic/Sanitizers.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/SpecialCaseList.h"
-#include "llvm/Support/VirtualFileSystem.h"
#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace vfs {
+class FileSystem;
+}
+} // namespace llvm
namespace clang {
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Sanitizers.def b/contrib/llvm-project/clang/include/clang/Basic/Sanitizers.def
index 0037cc2146f2..2912bdd44b2d 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Sanitizers.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/Sanitizers.def
@@ -156,6 +156,8 @@ SANITIZER_GROUP("implicit-integer-arithmetic-value-change",
ImplicitIntegerArithmeticValueChange,
ImplicitIntegerSignChange | ImplicitSignedIntegerTruncation)
+SANITIZER("objc-cast", ObjCCast)
+
// FIXME:
//SANITIZER_GROUP("implicit-integer-conversion", ImplicitIntegerConversion,
// ImplicitIntegerArithmeticValueChange |
diff --git a/contrib/llvm-project/clang/include/clang/Basic/SourceLocation.h b/contrib/llvm-project/clang/include/clang/Basic/SourceLocation.h
index d3d18537dcc1..3735b904ef47 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/SourceLocation.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/SourceLocation.h
@@ -482,7 +482,7 @@ namespace llvm {
// Teach SmallPtrSet how to handle SourceLocation.
template<>
struct PointerLikeTypeTraits<clang::SourceLocation> {
- enum { NumLowBitsAvailable = 0 };
+ static constexpr int NumLowBitsAvailable = 0;
static void *getAsVoidPointer(clang::SourceLocation L) {
return L.getPtrEncoding();
diff --git a/contrib/llvm-project/clang/include/clang/Basic/SourceManager.h b/contrib/llvm-project/clang/include/clang/Basic/SourceManager.h
index d87e9ac810fa..5c666c1760b4 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/SourceManager.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/SourceManager.h
@@ -35,7 +35,6 @@
#define LLVM_CLANG_BASIC_SOURCEMANAGER_H
#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
@@ -60,6 +59,9 @@ namespace clang {
class ASTReader;
class ASTWriter;
+class FileManager;
+class FileEntry;
+class FileEntryRef;
class LineTableInfo;
class SourceManager;
@@ -811,6 +813,11 @@ public:
MainFileID = FID;
}
+ /// Returns true when the given FileEntry corresponds to the main file.
+ ///
+ /// The main file should be set prior to calling this function.
+ bool isMainFile(FileEntryRef SourceFile);
+
/// Set the file ID for the precompiled preamble.
void setPreambleFileID(FileID Preamble) {
assert(PreambleFileID.isInvalid() && "PreambleFileID already set!");
@@ -830,24 +837,11 @@ public:
/// This translates NULL into standard input.
FileID createFileID(const FileEntry *SourceFile, SourceLocation IncludePos,
SrcMgr::CharacteristicKind FileCharacter,
- int LoadedID = 0, unsigned LoadedOffset = 0) {
- assert(SourceFile && "Null source file!");
- const SrcMgr::ContentCache *IR =
- getOrCreateContentCache(SourceFile, isSystem(FileCharacter));
- assert(IR && "getOrCreateContentCache() cannot return NULL");
- return createFileID(IR, SourceFile->getName(), IncludePos, FileCharacter,
- LoadedID, LoadedOffset);
- }
+ int LoadedID = 0, unsigned LoadedOffset = 0);
FileID createFileID(FileEntryRef SourceFile, SourceLocation IncludePos,
SrcMgr::CharacteristicKind FileCharacter,
- int LoadedID = 0, unsigned LoadedOffset = 0) {
- const SrcMgr::ContentCache *IR = getOrCreateContentCache(
- &SourceFile.getFileEntry(), isSystem(FileCharacter));
- assert(IR && "getOrCreateContentCache() cannot return NULL");
- return createFileID(IR, SourceFile.getName(), IncludePos, FileCharacter,
- LoadedID, LoadedOffset);
- }
+ int LoadedID = 0, unsigned LoadedOffset = 0);
/// Create a new FileID that represents the specified memory buffer.
///
@@ -856,12 +850,7 @@ public:
FileID createFileID(std::unique_ptr<llvm::MemoryBuffer> Buffer,
SrcMgr::CharacteristicKind FileCharacter = SrcMgr::C_User,
int LoadedID = 0, unsigned LoadedOffset = 0,
- SourceLocation IncludeLoc = SourceLocation()) {
- StringRef Name = Buffer->getBufferIdentifier();
- return createFileID(
- createMemBufferContentCache(Buffer.release(), /*DoNotFree*/ false),
- Name, IncludeLoc, FileCharacter, LoadedID, LoadedOffset);
- }
+ SourceLocation IncludeLoc = SourceLocation());
enum UnownedTag { Unowned };
@@ -872,20 +861,12 @@ public:
FileID createFileID(UnownedTag, const llvm::MemoryBuffer *Buffer,
SrcMgr::CharacteristicKind FileCharacter = SrcMgr::C_User,
int LoadedID = 0, unsigned LoadedOffset = 0,
- SourceLocation IncludeLoc = SourceLocation()) {
- return createFileID(createMemBufferContentCache(Buffer, /*DoNotFree*/ true),
- Buffer->getBufferIdentifier(), IncludeLoc,
- FileCharacter, LoadedID, LoadedOffset);
- }
+ SourceLocation IncludeLoc = SourceLocation());
/// Get the FileID for \p SourceFile if it exists. Otherwise, create a
/// new FileID for the \p SourceFile.
FileID getOrCreateFileID(const FileEntry *SourceFile,
- SrcMgr::CharacteristicKind FileCharacter) {
- FileID ID = translateFile(SourceFile);
- return ID.isValid() ? ID : createFileID(SourceFile, SourceLocation(),
- FileCharacter);
- }
+ SrcMgr::CharacteristicKind FileCharacter);
/// Return a new SourceLocation that encodes the
/// fact that a token from SpellingLoc should actually be referenced from
@@ -1025,17 +1006,7 @@ public:
}
/// Returns the FileEntryRef for the provided FileID.
- Optional<FileEntryRef> getFileEntryRefForID(FileID FID) const {
- bool Invalid = false;
- const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
- if (Invalid || !Entry.isFile())
- return None;
-
- const SrcMgr::ContentCache *Content = Entry.getFile().getContentCache();
- if (!Content || !Content->OrigEntry)
- return None;
- return FileEntryRef(Entry.getFile().getName(), *Content->OrigEntry);
- }
+ Optional<FileEntryRef> getFileEntryRefForID(FileID FID) const;
/// Returns the FileEntry record for the provided SLocEntry.
const FileEntry *getFileEntryForSLocEntry(const SrcMgr::SLocEntry &sloc) const
@@ -1098,11 +1069,7 @@ public:
}
/// Return the filename of the file containing a SourceLocation.
- StringRef getFilename(SourceLocation SpellingLoc) const {
- if (const FileEntry *F = getFileEntryForID(getFileID(SpellingLoc)))
- return F->getName();
- return StringRef();
- }
+ StringRef getFilename(SourceLocation SpellingLoc) const;
/// Return the source location corresponding to the first byte of
/// the specified file.
@@ -1678,8 +1645,7 @@ public:
unsigned local_sloc_entry_size() const { return LocalSLocEntryTable.size(); }
/// Get a local SLocEntry. This is exposed for indexing.
- const SrcMgr::SLocEntry &getLocalSLocEntry(unsigned Index,
- bool *Invalid = nullptr) const {
+ const SrcMgr::SLocEntry &getLocalSLocEntry(unsigned Index) const {
assert(Index < LocalSLocEntryTable.size() && "Invalid index");
return LocalSLocEntryTable[Index];
}
@@ -1772,12 +1738,13 @@ private:
const SrcMgr::SLocEntry &loadSLocEntry(unsigned Index, bool *Invalid) const;
/// Get the entry with the given unwrapped FileID.
+ /// Invalid will not be modified for Local IDs.
const SrcMgr::SLocEntry &getSLocEntryByID(int ID,
bool *Invalid = nullptr) const {
assert(ID != -1 && "Using FileID sentinel value");
if (ID < 0)
return getLoadedSLocEntryByID(ID, Invalid);
- return getLocalSLocEntry(static_cast<unsigned>(ID), Invalid);
+ return getLocalSLocEntry(static_cast<unsigned>(ID));
}
const SrcMgr::SLocEntry &
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Specifiers.h b/contrib/llvm-project/clang/include/clang/Basic/Specifiers.h
index 73823dc01ec7..2834dea20d00 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Specifiers.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Specifiers.h
@@ -67,10 +67,12 @@ namespace clang {
TST_char32, // C++11 char32_t
TST_int,
TST_int128,
+ TST_extint, // Extended Int types.
TST_half, // OpenCL half, ARM NEON __fp16
TST_Float16, // C11 extension ISO/IEC TS 18661-3
TST_Accum, // ISO/IEC JTC1 SC22 WG14 N1169 Extension
TST_Fract,
+ TST_BFloat16,
TST_float,
TST_double,
TST_float128,
@@ -153,7 +155,10 @@ namespace clang {
/// An Objective-C array/dictionary subscripting which reads an
/// object or writes at the subscripted array/dictionary element via
/// Objective-C method calls.
- OK_ObjCSubscript
+ OK_ObjCSubscript,
+
+ /// A matrix component is a single element of a matrix.
+ OK_MatrixComponent
};
/// The reason why a DeclRefExpr does not constitute an odr-use.
@@ -364,6 +369,20 @@ namespace clang {
};
llvm::StringRef getParameterABISpelling(ParameterABI kind);
+
+ inline llvm::StringRef getAccessSpelling(AccessSpecifier AS) {
+ switch (AS) {
+ case AccessSpecifier::AS_public:
+ return "public";
+ case AccessSpecifier::AS_protected:
+ return "protected";
+ case AccessSpecifier::AS_private:
+ return "private";
+ case AccessSpecifier::AS_none:
+ return {};
+ }
+ llvm_unreachable("Unknown AccessSpecifier");
+ }
} // end namespace clang
#endif // LLVM_CLANG_BASIC_SPECIFIERS_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/StmtNodes.td b/contrib/llvm-project/clang/include/clang/Basic/StmtNodes.td
index 233320296819..5965e8b9902a 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/StmtNodes.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/StmtNodes.td
@@ -69,7 +69,9 @@ def UnaryOperator : StmtNode<Expr>;
def OffsetOfExpr : StmtNode<Expr>;
def UnaryExprOrTypeTraitExpr : StmtNode<Expr>;
def ArraySubscriptExpr : StmtNode<Expr>;
+def MatrixSubscriptExpr : StmtNode<Expr>;
def OMPArraySectionExpr : StmtNode<Expr>;
+def OMPIteratorExpr : StmtNode<Expr>;
def CallExpr : StmtNode<Expr>;
def MemberExpr : StmtNode<Expr>;
def CastExpr : StmtNode<Expr, 1>;
@@ -81,6 +83,7 @@ def BinaryConditionalOperator : StmtNode<AbstractConditionalOperator>;
def ImplicitCastExpr : StmtNode<CastExpr>;
def ExplicitCastExpr : StmtNode<CastExpr, 1>;
def CStyleCastExpr : StmtNode<ExplicitCastExpr>;
+def OMPArrayShapingExpr : StmtNode<Expr>;
def CompoundLiteralExpr : StmtNode<Expr>;
def ExtVectorElementExpr : StmtNode<Expr>;
def InitListExpr : StmtNode<Expr>;
@@ -118,6 +121,7 @@ def CXXStaticCastExpr : StmtNode<CXXNamedCastExpr>;
def CXXDynamicCastExpr : StmtNode<CXXNamedCastExpr>;
def CXXReinterpretCastExpr : StmtNode<CXXNamedCastExpr>;
def CXXConstCastExpr : StmtNode<CXXNamedCastExpr>;
+def CXXAddrspaceCastExpr : StmtNode<CXXNamedCastExpr>;
def CXXFunctionalCastExpr : StmtNode<ExplicitCastExpr>;
def CXXTypeidExpr : StmtNode<Expr>;
def UserDefinedLiteral : StmtNode<CallExpr>;
@@ -195,6 +199,7 @@ def ConvertVectorExpr : StmtNode<Expr>;
def BlockExpr : StmtNode<Expr>;
def OpaqueValueExpr : StmtNode<Expr>;
def TypoExpr : StmtNode<Expr>;
+def RecoveryExpr : StmtNode<Expr>;
def BuiltinBitCastExpr : StmtNode<ExplicitCastExpr>;
// Microsoft Extensions.
@@ -232,6 +237,8 @@ def OMPBarrierDirective : StmtNode<OMPExecutableDirective>;
def OMPTaskwaitDirective : StmtNode<OMPExecutableDirective>;
def OMPTaskgroupDirective : StmtNode<OMPExecutableDirective>;
def OMPFlushDirective : StmtNode<OMPExecutableDirective>;
+def OMPDepobjDirective : StmtNode<OMPExecutableDirective>;
+def OMPScanDirective : StmtNode<OMPExecutableDirective>;
def OMPOrderedDirective : StmtNode<OMPExecutableDirective>;
def OMPAtomicDirective : StmtNode<OMPExecutableDirective>;
def OMPTargetDirective : StmtNode<OMPExecutableDirective>;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TargetBuiltins.h b/contrib/llvm-project/clang/include/clang/Basic/TargetBuiltins.h
index 0e2f0753b0c5..b472547012f0 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TargetBuiltins.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TargetBuiltins.h
@@ -15,8 +15,10 @@
#ifndef LLVM_CLANG_BASIC_TARGETBUILTINS_H
#define LLVM_CLANG_BASIC_TARGETBUILTINS_H
+#include <algorithm>
#include <stdint.h>
#include "clang/Basic/Builtins.h"
+#include "llvm/Support/MathExtras.h"
#undef PPC
namespace clang {
@@ -41,11 +43,22 @@ namespace clang {
};
}
+ namespace SVE {
+ enum {
+ LastNEONBuiltin = NEON::FirstTSBuiltin - 1,
+#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#include "clang/Basic/BuiltinsSVE.def"
+ FirstTSBuiltin,
+ };
+ }
+
/// AArch64 builtins
namespace AArch64 {
enum {
LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1,
LastNEONBuiltin = NEON::FirstTSBuiltin - 1,
+ FirstSVEBuiltin = NEON::FirstTSBuiltin,
+ LastSVEBuiltin = SVE::FirstTSBuiltin - 1,
#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
#include "clang/Basic/BuiltinsAArch64.def"
LastTSBuiltin
@@ -106,6 +119,11 @@ namespace clang {
};
}
+ /// VE builtins
+ namespace VE {
+ enum { LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1, LastTSBuiltin };
+ }
+
/// Flags to identify the types for overloaded Neon builtins.
///
/// These must be kept in sync with the flags in utils/TableGen/NeonEmitter.h.
@@ -129,7 +147,8 @@ namespace clang {
Poly128,
Float16,
Float32,
- Float64
+ Float64,
+ BFloat16
};
NeonTypeFlags(unsigned F) : Flags(F) {}
@@ -143,12 +162,104 @@ namespace clang {
EltType getEltType() const { return (EltType)(Flags & EltTypeMask); }
bool isPoly() const {
EltType ET = getEltType();
- return ET == Poly8 || ET == Poly16;
+ return ET == Poly8 || ET == Poly16 || ET == Poly64;
}
bool isUnsigned() const { return (Flags & UnsignedFlag) != 0; }
bool isQuad() const { return (Flags & QuadFlag) != 0; }
};
+ /// Flags to identify the types for overloaded SVE builtins.
+ class SVETypeFlags {
+ uint64_t Flags;
+ unsigned EltTypeShift;
+ unsigned MemEltTypeShift;
+ unsigned MergeTypeShift;
+ unsigned SplatOperandMaskShift;
+
+ public:
+#define LLVM_GET_SVE_TYPEFLAGS
+#include "clang/Basic/arm_sve_typeflags.inc"
+#undef LLVM_GET_SVE_TYPEFLAGS
+
+ enum EltType {
+#define LLVM_GET_SVE_ELTTYPES
+#include "clang/Basic/arm_sve_typeflags.inc"
+#undef LLVM_GET_SVE_ELTTYPES
+ };
+
+ enum MemEltType {
+#define LLVM_GET_SVE_MEMELTTYPES
+#include "clang/Basic/arm_sve_typeflags.inc"
+#undef LLVM_GET_SVE_MEMELTTYPES
+ };
+
+ enum MergeType {
+#define LLVM_GET_SVE_MERGETYPES
+#include "clang/Basic/arm_sve_typeflags.inc"
+#undef LLVM_GET_SVE_MERGETYPES
+ };
+
+ enum ImmCheckType {
+#define LLVM_GET_SVE_IMMCHECKTYPES
+#include "clang/Basic/arm_sve_typeflags.inc"
+#undef LLVM_GET_SVE_IMMCHECKTYPES
+ };
+
+ SVETypeFlags(uint64_t F) : Flags(F) {
+ EltTypeShift = llvm::countTrailingZeros(EltTypeMask);
+ MemEltTypeShift = llvm::countTrailingZeros(MemEltTypeMask);
+ MergeTypeShift = llvm::countTrailingZeros(MergeTypeMask);
+ SplatOperandMaskShift = llvm::countTrailingZeros(SplatOperandMask);
+ }
+
+ EltType getEltType() const {
+ return (EltType)((Flags & EltTypeMask) >> EltTypeShift);
+ }
+
+ MemEltType getMemEltType() const {
+ return (MemEltType)((Flags & MemEltTypeMask) >> MemEltTypeShift);
+ }
+
+ MergeType getMergeType() const {
+ return (MergeType)((Flags & MergeTypeMask) >> MergeTypeShift);
+ }
+
+ unsigned getSplatOperand() const {
+ return ((Flags & SplatOperandMask) >> SplatOperandMaskShift) - 1;
+ }
+
+ bool hasSplatOperand() const {
+ return Flags & SplatOperandMask;
+ }
+
+ bool isLoad() const { return Flags & IsLoad; }
+ bool isStore() const { return Flags & IsStore; }
+ bool isGatherLoad() const { return Flags & IsGatherLoad; }
+ bool isScatterStore() const { return Flags & IsScatterStore; }
+ bool isStructLoad() const { return Flags & IsStructLoad; }
+ bool isStructStore() const { return Flags & IsStructStore; }
+ bool isZExtReturn() const { return Flags & IsZExtReturn; }
+ bool isByteIndexed() const { return Flags & IsByteIndexed; }
+ bool isOverloadNone() const { return Flags & IsOverloadNone; }
+ bool isOverloadWhile() const { return Flags & IsOverloadWhile; }
+ bool isOverloadDefault() const { return !(Flags & OverloadKindMask); }
+ bool isOverloadWhileRW() const { return Flags & IsOverloadWhileRW; }
+ bool isOverloadCvt() const { return Flags & IsOverloadCvt; }
+ bool isPrefetch() const { return Flags & IsPrefetch; }
+ bool isReverseCompare() const { return Flags & ReverseCompare; }
+ bool isAppendSVALL() const { return Flags & IsAppendSVALL; }
+ bool isInsertOp1SVALL() const { return Flags & IsInsertOp1SVALL; }
+ bool isGatherPrefetch() const { return Flags & IsGatherPrefetch; }
+ bool isReverseUSDOT() const { return Flags & ReverseUSDOT; }
+ bool isUndef() const { return Flags & IsUndef; }
+ bool isTupleCreate() const { return Flags & IsTupleCreate; }
+ bool isTupleGet() const { return Flags & IsTupleGet; }
+ bool isTupleSet() const { return Flags & IsTupleSet; }
+
+ uint64_t getBits() const { return Flags; }
+ bool isFlagSet(uint64_t Flag) const { return Flags & Flag; }
+ };
+
/// Hexagon builtins
namespace Hexagon {
enum {
@@ -209,6 +320,14 @@ namespace clang {
};
}
+ static constexpr uint64_t LargestBuiltinID = std::max<uint64_t>(
+ {NEON::FirstTSBuiltin, ARM::LastTSBuiltin, SVE::FirstTSBuiltin,
+ AArch64::LastTSBuiltin, BPF::LastTSBuiltin, PPC::LastTSBuiltin,
+ NVPTX::LastTSBuiltin, AMDGPU::LastTSBuiltin, X86::LastTSBuiltin,
+ Hexagon::LastTSBuiltin, Mips::LastTSBuiltin, XCore::LastTSBuiltin,
+ Le64::LastTSBuiltin, SystemZ::LastTSBuiltin,
+ WebAssembly::LastTSBuiltin});
+
} // end namespace clang.
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TargetCXXABI.h b/contrib/llvm-project/clang/include/clang/Basic/TargetCXXABI.h
index 1ab45d2ce9a1..93f70fc70dd8 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TargetCXXABI.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TargetCXXABI.h
@@ -109,6 +109,13 @@ public:
/// - constructors and destructors return 'this', as in ARM.
Fuchsia,
+ /// The XL ABI is the ABI used by IBM xlclang compiler and is a modified
+ /// version of the Itanium ABI.
+ ///
+ /// The relevant changes from the Itanium ABI are:
+ /// - static initialization is adjusted to use sinit and sterm functions;
+ XL,
+
/// The Microsoft ABI is the ABI used by Microsoft Visual Studio (and
/// compatible compilers).
///
@@ -148,6 +155,7 @@ public:
case WatchOS:
case GenericMIPS:
case WebAssembly:
+ case XL:
return true;
case Microsoft:
@@ -168,6 +176,7 @@ public:
case WatchOS:
case GenericMIPS:
case WebAssembly:
+ case XL:
return false;
case Microsoft:
@@ -202,6 +211,7 @@ public:
case iOS64:
case WatchOS:
case Microsoft:
+ case XL:
return true;
}
llvm_unreachable("bad ABI kind");
@@ -278,6 +288,7 @@ public:
case iOS: // old iOS compilers did not follow this rule
case Microsoft:
case GenericMIPS:
+ case XL:
return true;
}
llvm_unreachable("bad ABI kind");
@@ -315,6 +326,7 @@ public:
case GenericARM:
case iOS:
case GenericMIPS:
+ case XL:
return UseTailPaddingUnlessPOD03;
// iOS on ARM64 and WebAssembly use the C++11 POD rules. They do not honor
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h b/contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h
index 3a8e35524695..2ee3b1659630 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h
@@ -15,8 +15,9 @@
#define LLVM_CLANG_BASIC_TARGETINFO_H
#include "clang/Basic/AddressSpaces.h"
-#include "clang/Basic/LLVM.h"
#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TargetCXXABI.h"
#include "clang/Basic/TargetOptions.h"
@@ -29,6 +30,7 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Frontend/OpenMP/OMPGridValues.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/VersionTuple.h"
#include <cassert>
@@ -59,6 +61,7 @@ struct TransferrableTargetInfo {
unsigned char BoolWidth, BoolAlign;
unsigned char IntWidth, IntAlign;
unsigned char HalfWidth, HalfAlign;
+ unsigned char BFloat16Width, BFloat16Align;
unsigned char FloatWidth, FloatAlign;
unsigned char DoubleWidth, DoubleAlign;
unsigned char LongDoubleWidth, LongDoubleAlign, Float128Align;
@@ -100,8 +103,8 @@ struct TransferrableTargetInfo {
unsigned short MaxVectorAlign;
unsigned short MaxTLSAlign;
- const llvm::fltSemantics *HalfFormat, *FloatFormat, *DoubleFormat,
- *LongDoubleFormat, *Float128Format;
+ const llvm::fltSemantics *HalfFormat, *BFloat16Format, *FloatFormat,
+ *DoubleFormat, *LongDoubleFormat, *Float128Format;
///===---- Target Data Type Query Methods -------------------------------===//
enum IntType {
@@ -159,6 +162,18 @@ protected:
unsigned ZeroLengthBitfieldBoundary;
};
+/// OpenCL type kinds.
+enum OpenCLTypeKind : uint8_t {
+ OCLTK_Default,
+ OCLTK_ClkEvent,
+ OCLTK_Event,
+ OCLTK_Image,
+ OCLTK_Pipe,
+ OCLTK_Queue,
+ OCLTK_ReserveID,
+ OCLTK_Sampler,
+};
+
/// Exposes information about the current target.
///
class TargetInfo : public virtual TransferrableTargetInfo,
@@ -176,6 +191,8 @@ protected:
// LLVM IR type.
bool HasFloat128;
bool HasFloat16;
+ bool HasBFloat16;
+ bool HasStrictFP;
unsigned char MaxAtomicPromoteWidth, MaxAtomicInlineWidth;
unsigned short SimdDefaultAlign;
@@ -184,6 +201,9 @@ protected:
unsigned char RegParmMax, SSERegParmMax;
TargetCXXABI TheCXXABI;
const LangASMap *AddrSpaceMap;
+ const unsigned *GridValues =
+ nullptr; // Array of target-specific GPU grid values that must be
+ // consistent between host RTL (plugin), device RTL, and clang.
mutable StringRef PlatformName;
mutable VersionTuple PlatformMinVersion;
@@ -198,6 +218,10 @@ protected:
unsigned HasAArch64SVETypes : 1;
+ unsigned ARMCDECoprocMask : 8;
+
+ unsigned MaxOpenCLWorkGroupSize;
+
// TargetInfo Constructor. Default initializes all fields.
TargetInfo(const llvm::Triple &T);
@@ -259,7 +283,14 @@ public:
// void *__overflow_arg_area;
// void *__reg_save_area;
// } va_list[1];
- SystemZBuiltinVaList
+ SystemZBuiltinVaList,
+
+ // typedef struct __va_list_tag {
+ // void *__current_saved_reg_area_pointer;
+ // void *__saved_reg_area_end_pointer;
+ // void *__overflow_area_pointer;
+ //} va_list;
+ HexagonBuiltinVaList
};
protected:
@@ -345,8 +376,13 @@ public:
virtual IntType getLeastIntTypeByWidth(unsigned BitWidth,
bool IsSigned) const;
- /// Return floating point type with specified width.
- RealType getRealTypeByWidth(unsigned BitWidth) const;
+ /// Return floating point type with specified width. On PPC, there are
+ /// three possible types for 128-bit floating point: "PPC double-double",
+ /// IEEE 754R quad precision, and "long double" (which under the covers
+ /// is represented as one of those two). At this time, there is no support
+ /// for an explicit "PPC double-double" type (i.e. __ibm128) so we only
+ /// need to differentiate between "long double" and IEEE quad precision.
+ RealType getRealTypeByWidth(unsigned BitWidth, bool ExplicitIEEE) const;
/// Return the alignment (in bits) of the specified integer type enum.
///
@@ -524,6 +560,12 @@ public:
return (getPointerWidth(0) >= 64) || getTargetOpts().ForceEnableInt128;
} // FIXME
+ /// Determine whether the _ExtInt type is supported on this target. This
+ /// limitation is put into place for ABI reasons.
+ virtual bool hasExtIntType() const {
+ return false;
+ }
+
/// Determine whether _Float16 is supported on this target.
virtual bool hasLegalHalfType() const { return HasLegalHalfType; }
@@ -533,6 +575,12 @@ public:
/// Determine whether the _Float16 type is supported on this target.
virtual bool hasFloat16Type() const { return HasFloat16; }
+ /// Determine whether the _BFloat16 type is supported on this target.
+ virtual bool hasBFloat16Type() const { return HasBFloat16; }
+
+ /// Determine whether constrained floating point is supported on this target.
+ virtual bool hasStrictFP() const { return HasStrictFP; }
+
/// Return the alignment that is suitable for storing any
/// object with a fundamental alignment requirement.
unsigned getSuitableAlign() const { return SuitableAlign; }
@@ -581,6 +629,11 @@ public:
unsigned getFloatAlign() const { return FloatAlign; }
const llvm::fltSemantics &getFloatFormat() const { return *FloatFormat; }
+ /// getBFloat16Width/Align/Format - Return the size/align/format of '__bf16'.
+ unsigned getBFloat16Width() const { return BFloat16Width; }
+ unsigned getBFloat16Align() const { return BFloat16Align; }
+ const llvm::fltSemantics &getBFloat16Format() const { return *BFloat16Format; }
+
/// getDoubleWidth/Align/Format - Return the size/align/format of 'double'.
unsigned getDoubleWidth() const { return DoubleWidth; }
unsigned getDoubleAlign() const { return DoubleAlign; }
@@ -608,6 +661,11 @@ public:
/// Return the mangled code of __float128.
virtual const char *getFloat128Mangling() const { return "g"; }
+ /// Return the mangled code of bfloat.
+ virtual const char *getBFloat16Mangling() const {
+ llvm_unreachable("bfloat not implemented on this target");
+ }
+
/// Return the value for the C99 FLT_EVAL_METHOD macro.
virtual unsigned getFloatEvalMethod() const { return 0; }
@@ -642,6 +700,8 @@ public:
/// types for the given target.
unsigned getSimdDefaultAlign() const { return SimdDefaultAlign; }
+ unsigned getMaxOpenCLWorkGroupSize() const { return MaxOpenCLWorkGroupSize; }
+
/// Return the alignment (in bits) of the thrown exception object. This is
/// only meaningful for targets that allocate C++ exceptions in a system
/// runtime, such as those using the Itanium C++ ABI.
@@ -796,6 +856,10 @@ public:
/// available on this target.
bool hasAArch64SVETypes() const { return HasAArch64SVETypes; }
+ /// For ARM targets returns a mask defining which coprocessors are configured
+ /// as Custom Datapath.
+ uint32_t getARMCDECoprocMask() const { return ARMCDECoprocMask; }
+
/// Returns whether the passed in string is a valid clobber in an
/// inline asm statement.
///
@@ -816,6 +880,8 @@ public:
StringRef getNormalizedGCCRegisterName(StringRef Name,
bool ReturnCanonical = false) const;
+ virtual bool isSPRegName(StringRef) const { return false; }
+
/// Extracts a register from the passed constraint (if it is a
/// single-register constraint) and the asm label expression related to a
/// variable in the input or output list of an inline asm statement.
@@ -1107,10 +1173,10 @@ public:
}
struct BranchProtectionInfo {
- CodeGenOptions::SignReturnAddressScope SignReturnAddr =
- CodeGenOptions::SignReturnAddressScope::None;
- CodeGenOptions::SignReturnAddressKeyValue SignKey =
- CodeGenOptions::SignReturnAddressKeyValue::AKey;
+ LangOptions::SignReturnAddressScopeKind SignReturnAddr =
+ LangOptions::SignReturnAddressScopeKind::None;
+ LangOptions::SignReturnAddressKeyKind SignKey =
+ LangOptions::SignReturnAddressKeyKind::AKey;
bool BranchTargetEnforcement = false;
};
@@ -1186,6 +1252,10 @@ public:
"cpu_specific Multiversioning not implemented on this target");
}
+ // Get the cache line size of a given cpu. This method switches over
+ // the given cpu and returns "None" if the CPU is not found.
+ virtual Optional<unsigned> getCPUCacheLineSize() const { return None; }
+
// Returns maximal number of args passed in registers.
unsigned getRegParmMax() const {
assert(RegParmMax < 7 && "RegParmMax value is larger than AST can handle");
@@ -1260,6 +1330,12 @@ public:
return LangAS::Default;
}
+ /// Return a target-specific GPU grid value based on the GVIDX enum \p gv
+ unsigned getGridValue(llvm::omp::GVIDX gv) const {
+ assert(GridValues != nullptr && "GridValues not initialized");
+ return GridValues[gv];
+ }
+
/// Retrieve the name of the platform as it is used in the
/// availability attribute.
StringRef getPlatformName() const { return PlatformName; }
@@ -1345,17 +1421,6 @@ public:
return getTargetOpts().SupportedOpenCLOptions;
}
- enum OpenCLTypeKind {
- OCLTK_Default,
- OCLTK_ClkEvent,
- OCLTK_Event,
- OCLTK_Image,
- OCLTK_Pipe,
- OCLTK_Queue,
- OCLTK_ReserveID,
- OCLTK_Sampler,
- };
-
/// Get address space for OpenCL type.
virtual LangAS getOpenCLTypeAddrSpace(OpenCLTypeKind TK) const;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def b/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def
index fec029ae995e..2b353269ed52 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def
@@ -26,14 +26,14 @@
#ifndef CXX11_KEYWORD
#define CXX11_KEYWORD(X,Y) KEYWORD(X,KEYCXX11|(Y))
#endif
-#ifndef CXX2A_KEYWORD
-#define CXX2A_KEYWORD(X,Y) KEYWORD(X,KEYCXX2A|(Y))
+#ifndef CXX20_KEYWORD
+#define CXX20_KEYWORD(X,Y) KEYWORD(X,KEYCXX20|(Y))
#endif
#ifndef CONCEPTS_KEYWORD
-#define CONCEPTS_KEYWORD(X) CXX2A_KEYWORD(X,KEYCONCEPTS)
+#define CONCEPTS_KEYWORD(X) CXX20_KEYWORD(X,KEYCONCEPTS)
#endif
#ifndef COROUTINES_KEYWORD
-#define COROUTINES_KEYWORD(X) CXX2A_KEYWORD(X,KEYCOROUTINES)
+#define COROUTINES_KEYWORD(X) CXX20_KEYWORD(X,KEYCOROUTINES)
#endif
#ifndef MODULES_KEYWORD
#define MODULES_KEYWORD(X) KEYWORD(X,KEYMODULES)
@@ -50,6 +50,18 @@
#ifndef TYPE_TRAIT_N
#define TYPE_TRAIT_N(I,E,K) TYPE_TRAIT(0,I,K)
#endif
+#ifndef ARRAY_TYPE_TRAIT
+#define ARRAY_TYPE_TRAIT(I,E,K) KEYWORD(I,K)
+#endif
+#ifndef UNARY_EXPR_OR_TYPE_TRAIT
+#define UNARY_EXPR_OR_TYPE_TRAIT(I,E,K) KEYWORD(I,K)
+#endif
+#ifndef CXX11_UNARY_EXPR_OR_TYPE_TRAIT
+#define CXX11_UNARY_EXPR_OR_TYPE_TRAIT(I,E,K) CXX11_KEYWORD(I,K)
+#endif
+#ifndef EXPRESSION_TRAIT
+#define EXPRESSION_TRAIT(I,E,K) KEYWORD(I,K)
+#endif
#ifndef ALIAS
#define ALIAS(X,Y,Z)
#endif
@@ -244,7 +256,7 @@ PUNCTUATOR(caretcaret, "^^")
// implementation namespace
// KEYNOCXX - This is a keyword in every non-C++ dialect.
// KEYCXX11 - This is a C++ keyword introduced to C++ in C++11
-// KEYCXX2A - This is a C++ keyword introduced to C++ in C++2a
+// KEYCXX20 - This is a C++ keyword introduced to C++ in C++20
// KEYCONCEPTS - This is a keyword if the C++ extensions for concepts
// are enabled.
// KEYMODULES - This is a keyword if the C++ extensions for modules
@@ -285,13 +297,14 @@ KEYWORD(goto , KEYALL)
KEYWORD(if , KEYALL)
KEYWORD(inline , KEYC99|KEYCXX|KEYGNU)
KEYWORD(int , KEYALL)
+KEYWORD(_ExtInt , KEYALL)
KEYWORD(long , KEYALL)
KEYWORD(register , KEYALL)
KEYWORD(restrict , KEYC99)
KEYWORD(return , KEYALL)
KEYWORD(short , KEYALL)
KEYWORD(signed , KEYALL)
-KEYWORD(sizeof , KEYALL)
+UNARY_EXPR_OR_TYPE_TRAIT(sizeof, SizeOf, KEYALL)
KEYWORD(static , KEYALL)
KEYWORD(struct , KEYALL)
KEYWORD(switch , KEYALL)
@@ -363,7 +376,8 @@ CXX_KEYWORD_OPERATOR(xor_eq , caretequal)
// C++11 keywords
CXX11_KEYWORD(alignas , 0)
-CXX11_KEYWORD(alignof , 0)
+// alignof and _Alignof return the required ABI alignment
+CXX11_UNARY_EXPR_OR_TYPE_TRAIT(alignof, AlignOf, 0)
CXX11_KEYWORD(char16_t , KEYNOMS18)
CXX11_KEYWORD(char32_t , KEYNOMS18)
CXX11_KEYWORD(constexpr , 0)
@@ -373,11 +387,11 @@ CXX11_KEYWORD(nullptr , 0)
CXX11_KEYWORD(static_assert , KEYMSCOMPAT)
CXX11_KEYWORD(thread_local , 0)
-// C++2a keywords
+// C++20 keywords
CONCEPTS_KEYWORD(concept)
CONCEPTS_KEYWORD(requires)
-// C++2a / coroutines TS keywords
+// C++20 / coroutines TS keywords
COROUTINES_KEYWORD(co_await)
COROUTINES_KEYWORD(co_return)
COROUTINES_KEYWORD(co_yield)
@@ -387,9 +401,10 @@ MODULES_KEYWORD(module)
MODULES_KEYWORD(import)
// C++20 keywords.
-CXX2A_KEYWORD(char8_t , CHAR8SUPPORT)
-CXX2A_KEYWORD(consteval , 0)
-CXX2A_KEYWORD(constinit , 0)
+CXX20_KEYWORD(consteval , 0)
+CXX20_KEYWORD(constinit , 0)
+// Not a CXX20_KEYWORD because it is disabled by -fno-char8_t.
+KEYWORD(char8_t , CHAR8SUPPORT)
// C11 Extension
KEYWORD(_Float16 , KEYALL)
@@ -404,7 +419,9 @@ KEYWORD(_Decimal32 , KEYALL)
KEYWORD(_Decimal64 , KEYALL)
KEYWORD(_Decimal128 , KEYALL)
KEYWORD(__null , KEYCXX)
-KEYWORD(__alignof , KEYALL)
+// __alignof returns the preferred alignment of a type, the alignment
+// clang will attempt to give an object of the type if allowed by ABI.
+UNARY_EXPR_OR_TYPE_TRAIT(__alignof, PreferredAlignOf, KEYALL)
KEYWORD(__attribute , KEYALL)
KEYWORD(__builtin_choose_expr , KEYALL)
KEYWORD(__builtin_offsetof , KEYALL)
@@ -492,8 +509,8 @@ KEYWORD(__underlying_type , KEYCXX)
TYPE_TRAIT_2(__reference_binds_to_temporary, ReferenceBindsToTemporary, KEYCXX)
// Embarcadero Expression Traits
-KEYWORD(__is_lvalue_expr , KEYCXX)
-KEYWORD(__is_rvalue_expr , KEYCXX)
+EXPRESSION_TRAIT(__is_lvalue_expr, IsLValueExpr, KEYCXX)
+EXPRESSION_TRAIT(__is_rvalue_expr, IsRValueExpr, KEYCXX)
// Embarcadero Unary Type Traits
TYPE_TRAIT_1(__is_arithmetic, IsArithmetic, KEYCXX)
@@ -522,8 +539,8 @@ TYPE_TRAIT_1(__is_unsigned, IsUnsigned, KEYCXX)
// Embarcadero Binary Type Traits
TYPE_TRAIT_2(__is_same, IsSame, KEYCXX)
TYPE_TRAIT_2(__is_convertible, IsConvertible, KEYCXX)
-KEYWORD(__array_rank , KEYCXX)
-KEYWORD(__array_extent , KEYCXX)
+ARRAY_TYPE_TRAIT(__array_rank, ArrayRank, KEYCXX)
+ARRAY_TYPE_TRAIT(__array_extent, ArrayExtent, KEYCXX)
// Name for GCC 6 compatibility.
ALIAS("__is_same_as", __is_same, KEYCXX)
@@ -569,14 +586,15 @@ ALIAS("write_only", __write_only , KEYOPENCLC | KEYOPENCLCXX)
ALIAS("read_write", __read_write , KEYOPENCLC | KEYOPENCLCXX)
// OpenCL builtins
KEYWORD(__builtin_astype , KEYOPENCLC | KEYOPENCLCXX)
-KEYWORD(vec_step , KEYOPENCLC | KEYALTIVEC | KEYZVECTOR)
+UNARY_EXPR_OR_TYPE_TRAIT(vec_step, VecStep, KEYOPENCLC | KEYALTIVEC | KEYZVECTOR)
#define GENERIC_IMAGE_TYPE(ImgType, Id) KEYWORD(ImgType##_t, KEYOPENCLC | KEYOPENCLCXX)
#include "clang/Basic/OpenCLImageTypes.def"
+KEYWORD(pipe , KEYOPENCLC | KEYOPENCLCXX)
+// C++ for OpenCL s2.3.1: addrspace_cast operator
+KEYWORD(addrspace_cast , KEYOPENCLCXX)
// OpenMP Type Traits
-KEYWORD(__builtin_omp_required_simd_align, KEYALL)
-
-KEYWORD(pipe , KEYOPENCLC | KEYOPENCLCXX)
+UNARY_EXPR_OR_TYPE_TRAIT(__builtin_omp_required_simd_align, OpenMPRequiredSimdAlign, KEYALL)
// Borland Extensions.
KEYWORD(__pascal , KEYALL)
@@ -588,6 +606,7 @@ KEYWORD(__bool , KEYALTIVEC|KEYZVECTOR)
// ARM NEON extensions.
ALIAS("__fp16", half , KEYALL)
+KEYWORD(__bf16 , KEYALL)
// OpenCL Extension.
KEYWORD(half , HALFSUPPORT)
@@ -674,11 +693,12 @@ ALIAS("_declspec" , __declspec , KEYMS)
ALIAS("_pascal" , __pascal , KEYBORLAND)
// Clang Extensions.
-KEYWORD(__builtin_convertvector , KEYALL)
-ALIAS("__char16_t" , char16_t , KEYCXX)
-ALIAS("__char32_t" , char32_t , KEYCXX)
-KEYWORD(__builtin_bit_cast , KEYALL)
-KEYWORD(__builtin_available , KEYALL)
+KEYWORD(__builtin_convertvector , KEYALL)
+ALIAS("__char16_t" , char16_t , KEYCXX)
+ALIAS("__char32_t" , char32_t , KEYCXX)
+KEYWORD(__builtin_bit_cast , KEYALL)
+KEYWORD(__builtin_available , KEYALL)
+KEYWORD(__builtin_unique_stable_name, KEYALL)
// Clang-specific keywords enabled only in testing.
TESTING_KEYWORD(__unknown_anytype , KEYALL)
@@ -738,6 +758,9 @@ ANNOTATION(non_type_undeclared) // annotation for an undeclared identifier that
ANNOTATION(non_type_dependent) // annotation for an assumed non-type member of
// a dependent base class
ANNOTATION(primary_expr) // annotation for a primary expression
+ANNOTATION(
+ uneval_primary_expr) // annotation for a primary expression which should be
+ // transformed to potentially evaluated
ANNOTATION(decltype) // annotation for a decltype expression,
// e.g., "decltype(foo.bar())"
@@ -806,6 +829,11 @@ PRAGMA_ANNOTATION(pragma_fp_contract)
// handles them.
PRAGMA_ANNOTATION(pragma_fenv_access)
+// Annotation for #pragma float_control
+// The lexer produces these so that they only take effect when the parser
+// handles them.
+PRAGMA_ANNOTATION(pragma_float_control)
+
// Annotation for #pragma pointers_to_members...
// The lexer produces these so that they only take effect when the parser
// handles them.
@@ -858,12 +886,16 @@ ANNOTATION(header_unit)
#undef CXX_KEYWORD_OPERATOR
#undef PPKEYWORD
#undef ALIAS
+#undef EXPRESSION_TRAIT
+#undef CXX11_UNARY_EXPR_OR_TYPE_TRAIT
+#undef UNARY_EXPR_OR_TYPE_TRAIT
+#undef ARRAY_TYPE_TRAIT
#undef TYPE_TRAIT_N
#undef TYPE_TRAIT_2
#undef TYPE_TRAIT_1
#undef TYPE_TRAIT
#undef CONCEPTS_KEYWORD
-#undef CXX2A_KEYWORD
+#undef CXX20_KEYWORD
#undef CXX11_KEYWORD
#undef KEYWORD
#undef PUNCTUATOR
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.h b/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.h
index c25181e6827c..4e66aa1c8c2d 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_BASIC_TOKENKINDS_H
#define LLVM_CLANG_BASIC_TOKENKINDS_H
+#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/Compiler.h"
namespace clang {
@@ -95,7 +96,25 @@ bool isAnnotation(TokenKind K);
/// Return true if this is an annotation token representing a pragma.
bool isPragmaAnnotation(TokenKind K);
-} // end namespace tok
-} // end namespace clang
+} // end namespace tok
+} // end namespace clang
+
+namespace llvm {
+template <> struct DenseMapInfo<clang::tok::PPKeywordKind> {
+ static inline clang::tok::PPKeywordKind getEmptyKey() {
+ return clang::tok::PPKeywordKind::pp_not_keyword;
+ }
+ static inline clang::tok::PPKeywordKind getTombstoneKey() {
+ return clang::tok::PPKeywordKind::NUM_PP_KEYWORDS;
+ }
+ static unsigned getHashValue(const clang::tok::PPKeywordKind &Val) {
+ return static_cast<unsigned>(Val);
+ }
+ static bool isEqual(const clang::tok::PPKeywordKind &LHS,
+ const clang::tok::PPKeywordKind &RHS) {
+ return LHS == RHS;
+ }
+};
+} // namespace llvm
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td b/contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td
index 96d9472a488a..a4e3002b9075 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td
@@ -69,6 +69,9 @@ def DependentAddressSpaceType : TypeNode<Type>, AlwaysDependent;
def VectorType : TypeNode<Type>;
def DependentVectorType : TypeNode<Type>, AlwaysDependent;
def ExtVectorType : TypeNode<VectorType>;
+def MatrixType : TypeNode<Type, 1>;
+def ConstantMatrixType : TypeNode<MatrixType>;
+def DependentSizedMatrixType : TypeNode<MatrixType>, AlwaysDependent;
def FunctionType : TypeNode<Type, 1>;
def FunctionProtoType : TypeNode<FunctionType>;
def FunctionNoProtoType : TypeNode<FunctionType>;
@@ -104,3 +107,5 @@ def ObjCInterfaceType : TypeNode<ObjCObjectType>, LeafType;
def ObjCObjectPointerType : TypeNode<Type>;
def PipeType : TypeNode<Type>;
def AtomicType : TypeNode<Type>;
+def ExtIntType : TypeNode<Type>;
+def DependentExtIntType : TypeNode<Type>, AlwaysDependent;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TypeTraits.h b/contrib/llvm-project/clang/include/clang/Basic/TypeTraits.h
index 7c1b571f640c..a0f06bec6697 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TypeTraits.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TypeTraits.h
@@ -14,97 +14,59 @@
#ifndef LLVM_CLANG_BASIC_TYPETRAITS_H
#define LLVM_CLANG_BASIC_TYPETRAITS_H
+#include "llvm/Support/Compiler.h"
+
namespace clang {
+/// Names for traits that operate specifically on types.
+enum TypeTrait {
+#define TYPE_TRAIT_1(Spelling, Name, Key) UTT_##Name,
+#include "clang/Basic/TokenKinds.def"
+ UTT_Last = -1 // UTT_Last == last UTT_XX in the enum.
+#define TYPE_TRAIT_1(Spelling, Name, Key) +1
+#include "clang/Basic/TokenKinds.def"
+ ,
+#define TYPE_TRAIT_2(Spelling, Name, Key) BTT_##Name,
+#include "clang/Basic/TokenKinds.def"
+ BTT_Last = UTT_Last // BTT_Last == last BTT_XX in the enum.
+#define TYPE_TRAIT_2(Spelling, Name, Key) +1
+#include "clang/Basic/TokenKinds.def"
+ ,
+#define TYPE_TRAIT_N(Spelling, Name, Key) TT_##Name,
+#include "clang/Basic/TokenKinds.def"
+ TT_Last = BTT_Last // TT_Last == last TT_XX in the enum.
+#define TYPE_TRAIT_N(Spelling, Name, Key) +1
+#include "clang/Basic/TokenKinds.def"
+};
+
+/// Names for the array type traits.
+enum ArrayTypeTrait {
+#define ARRAY_TYPE_TRAIT(Spelling, Name, Key) ATT_##Name,
+#include "clang/Basic/TokenKinds.def"
+ ATT_Last = -1 // ATT_Last == last ATT_XX in the enum.
+#define ARRAY_TYPE_TRAIT(Spelling, Name, Key) +1
+#include "clang/Basic/TokenKinds.def"
+};
- /// Names for traits that operate specifically on types.
- enum TypeTrait {
- UTT_HasNothrowAssign,
- UTT_HasNothrowMoveAssign,
- UTT_HasNothrowCopy,
- UTT_HasNothrowConstructor,
- UTT_HasTrivialAssign,
- UTT_HasTrivialMoveAssign,
- UTT_HasTrivialCopy,
- UTT_HasTrivialDefaultConstructor,
- UTT_HasTrivialMoveConstructor,
- UTT_HasTrivialDestructor,
- UTT_HasVirtualDestructor,
- UTT_IsAbstract,
- UTT_IsAggregate,
- UTT_IsArithmetic,
- UTT_IsArray,
- UTT_IsClass,
- UTT_IsCompleteType,
- UTT_IsCompound,
- UTT_IsConst,
- UTT_IsDestructible,
- UTT_IsEmpty,
- UTT_IsEnum,
- UTT_IsFinal,
- UTT_IsFloatingPoint,
- UTT_IsFunction,
- UTT_IsFundamental,
- UTT_IsIntegral,
- UTT_IsInterfaceClass,
- UTT_IsLiteral,
- UTT_IsLvalueReference,
- UTT_IsMemberFunctionPointer,
- UTT_IsMemberObjectPointer,
- UTT_IsMemberPointer,
- UTT_IsNothrowDestructible,
- UTT_IsObject,
- UTT_IsPOD,
- UTT_IsPointer,
- UTT_IsPolymorphic,
- UTT_IsReference,
- UTT_IsRvalueReference,
- UTT_IsScalar,
- UTT_IsSealed,
- UTT_IsSigned,
- UTT_IsStandardLayout,
- UTT_IsTrivial,
- UTT_IsTriviallyCopyable,
- UTT_IsTriviallyDestructible,
- UTT_IsUnion,
- UTT_IsUnsigned,
- UTT_IsVoid,
- UTT_IsVolatile,
- UTT_HasUniqueObjectRepresentations,
- UTT_Last = UTT_HasUniqueObjectRepresentations,
- BTT_IsBaseOf,
- BTT_IsConvertible,
- BTT_IsConvertibleTo,
- BTT_IsSame,
- BTT_TypeCompatible,
- BTT_IsAssignable,
- BTT_IsNothrowAssignable,
- BTT_IsTriviallyAssignable,
- BTT_ReferenceBindsToTemporary,
- BTT_Last = BTT_ReferenceBindsToTemporary,
- TT_IsConstructible,
- TT_IsNothrowConstructible,
- TT_IsTriviallyConstructible
- };
+/// Names for the "expression or type" traits.
+enum UnaryExprOrTypeTrait {
+#define UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) UETT_##Name,
+#define CXX11_UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) UETT_##Name,
+#include "clang/Basic/TokenKinds.def"
+ UETT_Last = -1 // UETT_Last == last UETT_XX in the enum.
+#define UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) +1
+#define CXX11_UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) +1
+#include "clang/Basic/TokenKinds.def"
+};
- /// Names for the array type traits.
- enum ArrayTypeTrait {
- ATT_ArrayRank,
- ATT_ArrayExtent
- };
+/// Return the internal name of type trait \p T. Never null.
+const char *getTraitName(TypeTrait T) LLVM_READONLY;
+const char *getTraitName(ArrayTypeTrait T) LLVM_READONLY;
+const char *getTraitName(UnaryExprOrTypeTrait T) LLVM_READONLY;
- /// Names for the "expression or type" traits.
- enum UnaryExprOrTypeTrait {
- UETT_SizeOf,
- /// Used for C's _Alignof and C++'s alignof.
- /// _Alignof and alignof return the required ABI alignment.
- UETT_AlignOf,
- UETT_VecStep,
- UETT_OpenMPRequiredSimdAlign,
- /// Used for GCC's __alignof.
- /// __alignof returns the preferred alignment of a type, the alignment
- /// clang will attempt to give an object of the type if allowed by ABI.
- UETT_PreferredAlignOf,
- };
-}
+/// Return the spelling of the type trait \p TT. Never null.
+const char *getTraitSpelling(TypeTrait T) LLVM_READONLY;
+const char *getTraitSpelling(ArrayTypeTrait T) LLVM_READONLY;
+const char *getTraitSpelling(UnaryExprOrTypeTrait T) LLVM_READONLY;
+} // namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Basic/X86Target.def b/contrib/llvm-project/clang/include/clang/Basic/X86Target.def
index ba4e5981e7dc..70f3879f33a1 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/X86Target.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/X86Target.def
@@ -11,19 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#ifndef PROC_WITH_FEAT
-#define PROC_WITH_FEAT(ENUM, STRING, IS64BIT, KEYFEATURE) \
- PROC(ENUM, STRING, IS64BIT)
-#endif
-
-#ifndef PROC
-#define PROC(ENUM, STRING, IS64BIT)
-#endif
-
-#ifndef PROC_ALIAS
-#define PROC_ALIAS(ENUM, ALIAS)
-#endif
-
#ifndef FEATURE
#define FEATURE(ENUM)
#endif
@@ -36,230 +23,6 @@
#define CPU_SPECIFIC_ALIAS(NEW_NAME, NAME)
#endif
-#define PROC_64_BIT true
-#define PROC_32_BIT false
-
-/// \name i386
-/// i386-generation processors.
-//@{
-PROC(i386, "i386", PROC_32_BIT)
-//@}
-
-/// \name i486
-/// i486-generation processors.
-//@{
-PROC(i486, "i486", PROC_32_BIT)
-PROC(WinChipC6, "winchip-c6", PROC_32_BIT)
-PROC(WinChip2, "winchip2", PROC_32_BIT)
-PROC(C3, "c3", PROC_32_BIT)
-//@}
-
-/// \name i586
-/// i586-generation processors, P5 microarchitecture based.
-//@{
-PROC(i586, "i586", PROC_32_BIT)
-PROC(Pentium, "pentium", PROC_32_BIT)
-PROC(PentiumMMX, "pentium-mmx", PROC_32_BIT)
-//@}
-
-/// \name i686
-/// i686-generation processors, P6 / Pentium M microarchitecture based.
-//@{
-PROC(PentiumPro, "pentiumpro", PROC_32_BIT)
-PROC(i686, "i686", PROC_32_BIT)
-PROC(Pentium2, "pentium2", PROC_32_BIT)
-PROC(Pentium3, "pentium3", PROC_32_BIT)
-PROC_ALIAS(Pentium3, "pentium3m")
-PROC(PentiumM, "pentium-m", PROC_32_BIT)
-PROC(C3_2, "c3-2", PROC_32_BIT)
-
-/// This enumerator is a bit odd, as GCC no longer accepts -march=yonah.
-/// Clang however has some logic to support this.
-// FIXME: Warn, deprecate, and potentially remove this.
-PROC(Yonah, "yonah", PROC_32_BIT)
-//@}
-
-/// \name Netburst
-/// Netburst microarchitecture based processors.
-//@{
-PROC(Pentium4, "pentium4", PROC_32_BIT)
-PROC_ALIAS(Pentium4, "pentium4m")
-
-PROC(Prescott, "prescott", PROC_32_BIT)
-PROC(Nocona, "nocona", PROC_64_BIT)
-//@}
-
-/// \name Core
-/// Core microarchitecture based processors.
-//@{
-PROC_WITH_FEAT(Core2, "core2", PROC_64_BIT, FEATURE_SSSE3)
-
-/// This enumerator, like Yonah, is a bit odd. It is another
-/// codename which GCC no longer accepts as an option to -march, but Clang
-/// has some logic for recognizing it.
-// FIXME: Warn, deprecate, and potentially remove this.
-PROC(Penryn, "penryn", PROC_64_BIT)
-//@}
-
-/// \name Atom
-/// Atom processors
-//@{
-PROC_WITH_FEAT(Bonnell, "bonnell", PROC_64_BIT, FEATURE_SSSE3)
-PROC_ALIAS(Bonnell, "atom")
-
-PROC_WITH_FEAT(Silvermont, "silvermont", PROC_64_BIT, FEATURE_SSE4_2)
-PROC_ALIAS(Silvermont, "slm")
-
-PROC(Goldmont, "goldmont", PROC_64_BIT)
-PROC(GoldmontPlus, "goldmont-plus", PROC_64_BIT)
-
-PROC(Tremont, "tremont", PROC_64_BIT)
-//@}
-
-/// \name Nehalem
-/// Nehalem microarchitecture based processors.
-PROC_WITH_FEAT(Nehalem, "nehalem", PROC_64_BIT, FEATURE_SSE4_2)
-PROC_ALIAS(Nehalem, "corei7")
-
-/// \name Westmere
-/// Westmere microarchitecture based processors.
-PROC_WITH_FEAT(Westmere, "westmere", PROC_64_BIT, FEATURE_PCLMUL)
-
-/// \name Sandy Bridge
-/// Sandy Bridge microarchitecture based processors.
-PROC_WITH_FEAT(SandyBridge, "sandybridge", PROC_64_BIT, FEATURE_AVX)
-PROC_ALIAS(SandyBridge, "corei7-avx")
-
-/// \name Ivy Bridge
-/// Ivy Bridge microarchitecture based processors.
-PROC_WITH_FEAT(IvyBridge, "ivybridge", PROC_64_BIT, FEATURE_AVX)
-PROC_ALIAS(IvyBridge, "core-avx-i")
-
-/// \name Haswell
-/// Haswell microarchitecture based processors.
-PROC_WITH_FEAT(Haswell, "haswell", PROC_64_BIT, FEATURE_AVX2)
-PROC_ALIAS(Haswell, "core-avx2")
-
-/// \name Broadwell
-/// Broadwell microarchitecture based processors.
-PROC_WITH_FEAT(Broadwell, "broadwell", PROC_64_BIT, FEATURE_AVX2)
-
-/// \name Skylake Client
-/// Skylake client microarchitecture based processors.
-PROC_WITH_FEAT(SkylakeClient, "skylake", PROC_64_BIT, FEATURE_AVX2)
-
-/// \name Skylake Server
-/// Skylake server microarchitecture based processors.
-PROC_WITH_FEAT(SkylakeServer, "skylake-avx512", PROC_64_BIT, FEATURE_AVX512F)
-PROC_ALIAS(SkylakeServer, "skx")
-
-/// \name Cascadelake Server
-/// Cascadelake Server microarchitecture based processors.
-PROC_WITH_FEAT(Cascadelake, "cascadelake", PROC_64_BIT, FEATURE_AVX512VNNI)
-
-/// \name Cooperlake Server
-/// Cooperlake Server microarchitecture based processors.
-PROC_WITH_FEAT(Cooperlake, "cooperlake", PROC_64_BIT, FEATURE_AVX512BF16)
-
-/// \name Cannonlake Client
-/// Cannonlake client microarchitecture based processors.
-PROC_WITH_FEAT(Cannonlake, "cannonlake", PROC_64_BIT, FEATURE_AVX512VBMI)
-
-/// \name Icelake Client
-/// Icelake client microarchitecture based processors.
-PROC(IcelakeClient, "icelake-client", PROC_64_BIT)
-
-/// \name Icelake Server
-/// Icelake server microarchitecture based processors.
-PROC(IcelakeServer, "icelake-server", PROC_64_BIT)
-
-/// \name Tigerlake
-/// Tigerlake microarchitecture based processors.
-PROC(Tigerlake, "tigerlake", PROC_64_BIT)
-
-/// \name Knights Landing
-/// Knights Landing processor.
-PROC_WITH_FEAT(KNL, "knl", PROC_64_BIT, FEATURE_AVX512F)
-
-/// \name Knights Mill
-/// Knights Mill processor.
-PROC_WITH_FEAT(KNM, "knm", PROC_64_BIT, FEATURE_AVX5124FMAPS)
-
-/// \name Lakemont
-/// Lakemont microarchitecture based processors.
-PROC(Lakemont, "lakemont", PROC_32_BIT)
-
-/// \name K6
-/// K6 architecture processors.
-//@{
-PROC(K6, "k6", PROC_32_BIT)
-PROC(K6_2, "k6-2", PROC_32_BIT)
-PROC(K6_3, "k6-3", PROC_32_BIT)
-//@}
-
-/// \name K7
-/// K7 architecture processors.
-//@{
-PROC(Athlon, "athlon", PROC_32_BIT)
-PROC_ALIAS(Athlon, "athlon-tbird")
-
-PROC(AthlonXP, "athlon-xp", PROC_32_BIT)
-PROC_ALIAS(AthlonXP, "athlon-mp")
-PROC_ALIAS(AthlonXP, "athlon-4")
-//@}
-
-/// \name K8
-/// K8 architecture processors.
-//@{
-PROC(K8, "k8", PROC_64_BIT)
-PROC_ALIAS(K8, "athlon64")
-PROC_ALIAS(K8, "athlon-fx")
-PROC_ALIAS(K8, "opteron")
-
-PROC(K8SSE3, "k8-sse3", PROC_64_BIT)
-PROC_ALIAS(K8SSE3, "athlon64-sse3")
-PROC_ALIAS(K8SSE3, "opteron-sse3")
-
-PROC_WITH_FEAT(AMDFAM10, "amdfam10", PROC_64_BIT, FEATURE_SSE4_A)
-PROC_ALIAS(AMDFAM10, "barcelona")
-//@}
-
-/// \name Bobcat
-/// Bobcat architecture processors.
-//@{
-PROC_WITH_FEAT(BTVER1, "btver1", PROC_64_BIT, FEATURE_SSE4_A)
-PROC_WITH_FEAT(BTVER2, "btver2", PROC_64_BIT, FEATURE_BMI)
-//@}
-
-/// \name Bulldozer
-/// Bulldozer architecture processors.
-//@{
-PROC_WITH_FEAT(BDVER1, "bdver1", PROC_64_BIT, FEATURE_XOP)
-PROC_WITH_FEAT(BDVER2, "bdver2", PROC_64_BIT, FEATURE_FMA)
-PROC_WITH_FEAT(BDVER3, "bdver3", PROC_64_BIT, FEATURE_FMA)
-PROC_WITH_FEAT(BDVER4, "bdver4", PROC_64_BIT, FEATURE_AVX2)
-//@}
-
-/// \name zen
-/// Zen architecture processors.
-//@{
-PROC_WITH_FEAT(ZNVER1, "znver1", PROC_64_BIT, FEATURE_AVX2)
-PROC_WITH_FEAT(ZNVER2, "znver2", PROC_64_BIT, FEATURE_AVX2)
-//@}
-
-/// This specification is deprecated and will be removed in the future.
-/// Users should prefer K8.
-// FIXME: Warn on this when the CPU is set to it.
-//@{
-PROC(x86_64, "x86-64", PROC_64_BIT)
-//@}
-
-/// \name Geode
-/// Geode processors.
-//@{
-PROC(Geode, "geode", PROC_32_BIT)
-//@}
-
// List of CPU Supports features in order. These need to remain in the order
// required by attribute 'target' checking. Note that not all are supported/
// prioritized by GCC, so synchronization with GCC's implementation may require
@@ -345,6 +108,3 @@ CPU_SPECIFIC("knm", 'j', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+mo
#undef PROC_64_BIT
#undef PROC_32_BIT
#undef FEATURE
-#undef PROC
-#undef PROC_ALIAS
-#undef PROC_WITH_FEAT
diff --git a/contrib/llvm-project/clang/include/clang/Basic/XRayInstr.h b/contrib/llvm-project/clang/include/clang/Basic/XRayInstr.h
index 48e88848f580..42ca7773fcce 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/XRayInstr.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/XRayInstr.h
@@ -28,17 +28,19 @@ namespace XRayInstrKind {
// TODO: Auto-generate these as we add more instrumentation kinds.
enum XRayInstrOrdinal : XRayInstrMask {
- XRIO_Function,
+ XRIO_FunctionEntry,
+ XRIO_FunctionExit,
XRIO_Custom,
XRIO_Typed,
XRIO_Count
};
constexpr XRayInstrMask None = 0;
-constexpr XRayInstrMask Function = 1U << XRIO_Function;
+constexpr XRayInstrMask FunctionEntry = 1U << XRIO_FunctionEntry;
+constexpr XRayInstrMask FunctionExit = 1U << XRIO_FunctionExit;
constexpr XRayInstrMask Custom = 1U << XRIO_Custom;
constexpr XRayInstrMask Typed = 1U << XRIO_Typed;
-constexpr XRayInstrMask All = Function | Custom | Typed;
+constexpr XRayInstrMask All = FunctionEntry | FunctionExit | Custom | Typed;
} // namespace XRayInstrKind
@@ -51,7 +53,6 @@ struct XRayInstrSet {
bool hasOneOf(XRayInstrMask K) const { return Mask & K; }
void set(XRayInstrMask K, bool Value) {
- assert(llvm::isPowerOf2_32(K));
Mask = Value ? (Mask | K) : (Mask & ~K);
}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/XRayLists.h b/contrib/llvm-project/clang/include/clang/Basic/XRayLists.h
index cf464f9e5478..7ea9d9789aab 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/XRayLists.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/XRayLists.h
@@ -14,14 +14,18 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
-#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/SpecialCaseList.h"
#include <memory>
+namespace llvm {
+class SpecialCaseList;
+}
+
namespace clang {
+class SourceManager;
+
class XRayFunctionFilter {
std::unique_ptr<llvm::SpecialCaseList> AlwaysInstrument;
std::unique_ptr<llvm::SpecialCaseList> NeverInstrument;
@@ -32,6 +36,7 @@ public:
XRayFunctionFilter(ArrayRef<std::string> AlwaysInstrumentPaths,
ArrayRef<std::string> NeverInstrumentPaths,
ArrayRef<std::string> AttrListPaths, SourceManager &SM);
+ ~XRayFunctionFilter();
enum class ImbueAttribute {
NONE,
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_bf16.td b/contrib/llvm-project/clang/include/clang/Basic/arm_bf16.td
new file mode 100644
index 000000000000..d837a7666d40
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_bf16.td
@@ -0,0 +1,14 @@
+//===--- arm_fp16.td - ARM BF16 compiler interface ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TableGen definitions from which the ARM BF16 header
+// file will be generated.
+//
+//===----------------------------------------------------------------------===//
+
+include "arm_neon_incl.td"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_cde.td b/contrib/llvm-project/clang/include/clang/Basic/arm_cde.td
new file mode 100644
index 000000000000..6a00e669864c
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_cde.td
@@ -0,0 +1,232 @@
+//===--- arm_cde.td - ACLE intrinsic functions for CDE --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the set of ACLE-specified source-level intrinsic
+// functions wrapping the CDE instructions.
+//
+//===----------------------------------------------------------------------===//
+
+include "arm_mve_defs.td"
+
+// f64 is not defined in arm_mve_defs.td because MVE instructions only work with
+// f16 and f32
+def f64: PrimitiveType<"f", 64>;
+
+// Float<t> expects t to be a scalar type, and expands to the floating-point
+// type of the same width.
+class Float<Type t>: ComplexType<(CTO_CopyKind t, f32)>;
+def FScalar: Float<Scalar>;
+
+// ACLE CDE intrinsic
+class CDEIntrinsic<Type ret, dag args, dag codegen>
+ : Intrinsic<ret, args, codegen> {
+ let builtinExtension = "cde";
+}
+
+// Immediate (in range [0, 2^numBits - 1])
+class IB_ConstBits<int numBits> : IB_ConstRange<0, !add(!shl(1, numBits), -1)>;
+// numBits-wide immediate of type u32
+class CDEImmediateBits<int numBits> : Immediate<u32, IB_ConstBits<numBits>>;
+
+// LLVM IR CDE intrinsic
+class CDEIRInt<string name, list<Type> params = [], bit appendKind = 0>
+ : IRIntBase<"arm_cde_" # name, params, appendKind>;
+
+// Class for generating function macros in arm_cde.h:
+// "#define <name>(<params>) <definition>"
+class FunctionMacro<list<string> params_, string definition_> {
+ list<string> params = params_;
+ string definition = definition_;
+}
+
+// Coprocessor immediate
+def imm_coproc : Immediate<sint, IB_ConstRange<0, 7>>;
+
+// Immediate integer parameters
+def imm_3b : CDEImmediateBits<3>;
+def imm_4b : CDEImmediateBits<4>;
+def imm_6b : CDEImmediateBits<6>;
+def imm_7b : CDEImmediateBits<7>;
+def imm_9b : CDEImmediateBits<9>;
+def imm_11b : CDEImmediateBits<11>;
+def imm_12b : CDEImmediateBits<12>;
+def imm_13b : CDEImmediateBits<13>;
+
+// CX* instructions operating on GPRs
+multiclass CDE_CX_m<dag argsImm, dag argsReg, dag cgArgs> {
+ defvar cp = (args imm_coproc:$cp);
+ let pnt = PNT_None, params = T.None in {
+ def "" : CDEIntrinsic<u32, !con(cp, argsReg, argsImm),
+ !con((CDEIRInt<NAME> $cp), cgArgs, (? $imm))>;
+ def a : CDEIntrinsic<u32, !con(cp, (args u32:$acc), argsReg, argsImm),
+ !con((CDEIRInt<NAME # "a"> $cp, $acc),
+ cgArgs, (? $imm))>;
+
+ def d :
+ CDEIntrinsic<u64, !con(cp, argsReg, argsImm),
+ (seq !con((CDEIRInt<NAME # "d"> $cp), cgArgs, (? $imm)):$pair,
+ (or (shl (u64 (xval $pair, 1)), (u64 32)),
+ (u64 (xval $pair, 0))))>;
+ def da :
+ CDEIntrinsic<u64, !con(cp, (args u64:$acc), argsReg, argsImm),
+ (seq (u32 (lshr $acc, (u64 32))):$acc_hi,
+ (u32 $acc):$acc_lo,
+ !con((CDEIRInt<NAME # "da"> $cp, $acc_lo, $acc_hi), cgArgs,
+ (? $imm)):$pair,
+ (or (shl (u64 (xval $pair, 1)), (u64 32)),
+ (u64 (xval $pair, 0))))>;
+ }
+}
+
+defm cx1 : CDE_CX_m<(args imm_13b:$imm), (args), (?)>;
+defm cx2 : CDE_CX_m<(args imm_9b:$imm), (args u32:$n), (? $n)>;
+defm cx3 : CDE_CX_m<(args imm_6b:$imm), (args u32:$n, u32:$m), (? $n, $m)>;
+
+// VCX* instructions operating on VFP registers
+multiclass CDE_VCXFP_m<dag argsImm, dag argsReg32, dag argsReg64, dag cgArgs> {
+ defvar cp = (args imm_coproc:$cp);
+ let pnt = PNT_None, params = [u32] in {
+ def "" : CDEIntrinsic<u32, !con(cp, argsReg32, argsImm),
+ (bitcast !con((CDEIRInt<NAME, [f32]> $cp), cgArgs, (? $imm)),
+ Scalar)>;
+ def a : CDEIntrinsic<u32, !con(cp, (args u32:$acc), argsReg32, argsImm),
+ (bitcast !con((CDEIRInt<NAME # "a", [f32]> $cp,
+ (bitcast $acc, FScalar)), cgArgs, (? $imm)), Scalar)>;
+ }
+ let pnt = PNT_None, params = [u64] in {
+ def d : CDEIntrinsic<u64, !con(cp, argsReg64, argsImm),
+ (bitcast !con((CDEIRInt<NAME, [f64]> $cp), cgArgs, (? $imm)),
+ Scalar)>;
+ def da : CDEIntrinsic<u64, !con(cp, (args u64:$acc), argsReg64, argsImm),
+ (bitcast !con((CDEIRInt<NAME # "a", [f64]> $cp,
+ (bitcast $acc, FScalar)), cgArgs, (? $imm)), Scalar)>;
+ }
+}
+
+defm vcx1: CDE_VCXFP_m<(args imm_11b:$imm), (args), (args), (?)>;
+defm vcx2: CDE_VCXFP_m<(args imm_6b:$imm), (args u32:$n), (args u64:$n),
+ (? (bitcast $n, FScalar))>;
+defm vcx3: CDE_VCXFP_m<(args imm_3b:$imm),
+ (args u32:$n, u32:$m), (args u64:$n, u64:$m),
+ (? (bitcast $n, FScalar), (bitcast $m, FScalar))>;
+
+// VCX* instructions operating on Q vector registers
+
+def v16u8 : VecOf<u8>;
+
+let pnt = PNT_None, params = [u8] in
+def vcx1q : CDEIntrinsic<Vector, (args imm_coproc:$cp, imm_12b:$imm),
+ (CDEIRInt<"vcx1q"> $cp, $imm)>;
+
+let pnt = PNT_Type, params = T.All, polymorphicOnly = 1 in {
+ def vcx1qa :
+ CDEIntrinsic<Vector, (args imm_coproc:$cp, Vector:$acc, imm_12b:$imm),
+ (bitcast (CDEIRInt<"vcx1qa"> $cp, (bitcast $acc, v16u8), $imm),
+ Vector)>;
+
+ def vcx2q :
+ CDEIntrinsic<Vector, (args imm_coproc:$cp, Vector:$n, imm_7b:$imm),
+ (bitcast (CDEIRInt<"vcx2q"> $cp, (bitcast $n, VecOf<u8>), $imm),
+ Vector)>;
+ def vcx2q_u8 :
+ CDEIntrinsic<v16u8, (args imm_coproc:$cp, Vector:$n, imm_7b:$imm),
+ (CDEIRInt<"vcx2q"> $cp, (bitcast $n, VecOf<u8>), $imm)>;
+
+ def vcx2qa_impl :
+ CDEIntrinsic<Vector,
+ (args imm_coproc:$cp, Vector:$acc, v16u8:$n, imm_7b:$imm),
+ (bitcast (CDEIRInt<"vcx2qa"> $cp, (bitcast $acc, v16u8), $n, $imm),
+ Vector)>;
+
+ def vcx3q_impl :
+ CDEIntrinsic<Vector,
+ (args imm_coproc:$cp, Vector:$n, v16u8:$m, imm_4b:$imm),
+ (bitcast (CDEIRInt<"vcx3q"> $cp, (bitcast $n, v16u8), $m, $imm),
+ Vector)>;
+ def vcx3q_u8_impl :
+ CDEIntrinsic<v16u8,
+ (args imm_coproc:$cp, Vector:$n, v16u8:$m, imm_4b:$imm),
+ (CDEIRInt<"vcx3q"> $cp, (bitcast $n, v16u8), $m, $imm)>;
+ def vcx3qa_impl :
+ CDEIntrinsic<Vector,
+ (args imm_coproc:$cp, Vector:$acc, v16u8:$n, v16u8:$m, imm_4b:$imm),
+ (bitcast (CDEIRInt<"vcx3qa"> $cp, (bitcast $acc, v16u8), $n, $m,
+ $imm),
+ Vector)>;
+}
+
+// Reinterpret intrinsics required to implement __arm_vcx*q with 2 or 3
+// polymorphic paramters.
+let params = [/* no u8 */ s8, u16, s16, u32, s32, u64, s64, f16, f32],
+ headerOnly = 1, polymorphicOnly = 1 in
+def vreinterpretq_u8 :
+ Intrinsic<v16u8, (args Vector:$x), (vreinterpret $x, v16u8)>;
+
+// We need vreinterpretq_u8_u8 to avoid doing smart tricks in the macros
+let params = [u8], polymorphicOnly = 1 in
+def vreinterpretq_u8_cde :
+ CDEIntrinsic<v16u8, (args Vector:$x), (id $x)>,
+ NameOverride<"vreinterpretq_u8">;
+
+
+def vcx2qa : FunctionMacro<
+ ["cp", "acc", "n", "imm"],
+ "__arm_vcx2qa_impl((cp), (acc), __arm_vreinterpretq_u8(n), (imm))">;
+
+def vcx3q : FunctionMacro<
+ ["cp", "n", "m", "imm"],
+ "__arm_vcx3q_impl((cp), (n), __arm_vreinterpretq_u8(m), (imm))">;
+def vcx3q_u8 : FunctionMacro<
+ ["cp", "n", "m", "imm"],
+ "__arm_vcx3q_u8_impl((cp), (n), __arm_vreinterpretq_u8(m), (imm))">;
+def vcx3qa : FunctionMacro<
+ ["cp", "acc", "n", "m", "imm"],
+ "__arm_vcx3qa_impl((cp), (acc), __arm_vreinterpretq_u8(n), "
+ "__arm_vreinterpretq_u8(m), (imm))">;
+
+class CDEIntrinsicMasked<string irname, dag argsReg, dag imm, dag cgArgs>
+ : CDEIntrinsic<Vector,
+ !con((args imm_coproc:$cp, Vector:$inactive_or_acc),
+ argsReg, imm, (args Predicate:$pred)),
+ !con((CDEIRInt<irname # "_predicated", [Vector,Predicate]>
+ $cp, $inactive_or_acc), cgArgs, (? $imm, $pred))> {
+ let params = T.All;
+ let polymorphicOnly = 1;
+}
+
+def vcx1q_m : CDEIntrinsicMasked<"vcx1q", (args), (args imm_12b:$imm), (?)>;
+def vcx1qa_m : CDEIntrinsicMasked<"vcx1qa", (args), (args imm_12b:$imm), (?)>;
+
+multiclass VCXPredicated<dag argsReg, dag imm, dag cgArgs,
+ list<string> macroArgs, string macro> {
+ def _m_impl : CDEIntrinsicMasked<NAME, argsReg, imm, cgArgs>;
+ def a_m_impl : CDEIntrinsicMasked<NAME#"a", argsReg, imm, cgArgs>;
+
+ def _m: FunctionMacro<
+ !listconcat(["cp", "inactive"], macroArgs, ["imm", "pred"]),
+ "__arm_"#NAME#"_m_impl((cp), (inactive), "#macro#" (imm), (pred))">;
+ def a_m: FunctionMacro<
+ !listconcat(["cp", "acc"], macroArgs, ["imm", "pred"]),
+ "__arm_"#NAME#"a_m_impl((cp), (acc), "#macro#" (imm), (pred))">;
+}
+
+defm vcx2q :
+ VCXPredicated<(args v16u8:$n), (args imm_7b:$imm), (? $n), ["n"],
+ "__arm_vreinterpretq_u8(n),">;
+defm vcx3q :
+ VCXPredicated<(args v16u8:$n, v16u8:$m), (args imm_4b:$imm), (? $n, $m),
+ ["n", "m"], "__arm_vreinterpretq_u8(n), "
+ "__arm_vreinterpretq_u8(m),">;
+
+// vreinterpretq intrinsics required by the ACLE CDE specification
+
+foreach desttype = [/* no u8 */ s8, u16, s16, u32, s32, u64, s64, f16, f32] in {
+ let params = [u8], headerOnly = 1, pnt = PNT_None in
+ def "vreinterpretq_" # desttype : Intrinsic<
+ VecOf<desttype>, (args Vector:$x), (vreinterpret $x, VecOf<desttype>)>;
+}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_mve.td b/contrib/llvm-project/clang/include/clang/Basic/arm_mve.td
index 0e023b85459c..25daae2a0a25 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/arm_mve.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_mve.td
@@ -43,6 +43,12 @@ def vqaddq: Intrinsic<Vector, (args Vector:$a, Vector:$b),
(IRIntBase<"sadd_sat", [Vector]> $a, $b)>;
def vqsubq: Intrinsic<Vector, (args Vector:$a, Vector:$b),
(IRIntBase<"ssub_sat", [Vector]> $a, $b)>;
+let pnt = PNT_NType in {
+ def vqaddq_n: Intrinsic<Vector, (args Vector:$a, unpromoted<Scalar>:$b),
+ (IRIntBase<"sadd_sat", [Vector]> $a, (splat $b))>;
+ def vqsubq_n: Intrinsic<Vector, (args Vector:$a, unpromoted<Scalar>:$b),
+ (IRIntBase<"ssub_sat", [Vector]> $a, (splat $b))>;
+}
}
let params = T.Unsigned in {
def vqaddq_u: Intrinsic<Vector, (args Vector:$a, Vector:$b),
@@ -51,6 +57,14 @@ def vqaddq_u: Intrinsic<Vector, (args Vector:$a, Vector:$b),
def vqsubq_u: Intrinsic<Vector, (args Vector:$a, Vector:$b),
(IRIntBase<"usub_sat", [Vector]> $a, $b)>,
NameOverride<"vqsubq">;
+let pnt = PNT_NType in {
+ def vqaddq_u_n: Intrinsic<Vector, (args Vector:$a, unpromoted<Scalar>:$b),
+ (IRIntBase<"uadd_sat", [Vector]> $a, (splat $b))>,
+ NameOverride<"vqaddq_n">;
+ def vqsubq_u_n: Intrinsic<Vector, (args Vector:$a, unpromoted<Scalar>:$b),
+ (IRIntBase<"usub_sat", [Vector]> $a, (splat $b))>,
+ NameOverride<"vqsubq_n">;
+}
}
// Some intrinsics below are implemented not as IR fragments, but as
@@ -85,12 +99,32 @@ def vmullbq_int: Intrinsic<DblVector, (args Vector:$a, Vector:$b),
def vmulltq_int: Intrinsic<DblVector, (args Vector:$a, Vector:$b),
(IRInt<"vmull", [DblVector, Vector]>
$a, $b, (unsignedflag Scalar), 1)>;
+let pnt = PNT_NType in {
+ def vaddq_n: Intrinsic<Vector, (args Vector:$a, unpromoted<Scalar>:$b),
+ (add $a, (splat $b))>;
+ def vsubq_n: Intrinsic<Vector, (args Vector:$a, unpromoted<Scalar>:$b),
+ (sub $a, (splat $b))>;
+ def vmulq_n: Intrinsic<Vector, (args Vector:$a, unpromoted<Scalar>:$b),
+ (mul $a, (splat $b))>;
+ def vhaddq_n: Intrinsic<Vector, (args Vector:$a, unpromoted<Scalar>:$b),
+ (IRInt<"vhadd", [Vector]> $a, (splat $b),
+ (unsignedflag Scalar))>;
+ def vhsubq_n: Intrinsic<Vector, (args Vector:$a, unpromoted<Scalar>:$b),
+ (IRInt<"vhsub", [Vector]> $a, (splat $b),
+ (unsignedflag Scalar))>;
+}
}
let params = T.Signed in {
def vqdmulhq: Intrinsic<Vector, (args Vector:$a, Vector:$b),
(IRInt<"vqdmulh", [Vector]> $a, $b)>;
def vqrdmulhq: Intrinsic<Vector, (args Vector:$a, Vector:$b),
(IRInt<"vqrdmulh", [Vector]> $a, $b)>;
+let pnt = PNT_NType in {
+ def vqdmulhq_n: Intrinsic<Vector, (args Vector:$a, unpromoted<Scalar>:$b),
+ (IRInt<"vqdmulh", [Vector]> $a, (splat $b))>;
+ def vqrdmulhq_n: Intrinsic<Vector, (args Vector:$a, unpromoted<Scalar>:$b),
+ (IRInt<"vqrdmulh", [Vector]> $a, (splat $b))>;
+}
}
let params = T.Poly, overrideKindLetter = "p" in {
@@ -114,6 +148,240 @@ def vsubqf: Intrinsic<Vector, (args Vector:$a, Vector:$b), (fsub $a, $b)>,
NameOverride<"vsubq">;
def vmulqf: Intrinsic<Vector, (args Vector:$a, Vector:$b), (fmul $a, $b)>,
NameOverride<"vmulq">;
+
+let pnt = PNT_NType in {
+ def vaddqf_n: Intrinsic<Vector, (args Vector:$a, unpromoted<Scalar>:$b),
+ (fadd $a, (splat $b))>,
+ NameOverride<"vaddq_n">;
+ def vsubqf_n: Intrinsic<Vector, (args Vector:$a, unpromoted<Scalar>:$b),
+ (fsub $a, (splat $b))>,
+ NameOverride<"vsubq_n">;
+ def vmulqf_n: Intrinsic<Vector, (args Vector:$a, unpromoted<Scalar>:$b),
+ (fmul $a, (splat $b))>,
+ NameOverride<"vmulq_n">;
+}
+}
+
+multiclass FMA<bit add> {
+ // FMS instructions are defined in the ArmARM as if they negate the
+ // second multiply input.
+ defvar m2_cg = !if(add, (id $m2), (fneg $m2));
+
+ defvar unpred_cg = (IRIntBase<"fma", [Vector]> $m1, m2_cg, $addend);
+ defvar pred_cg = (IRInt<"fma_predicated", [Vector, Predicate]>
+ $m1, m2_cg, $addend, $pred);
+
+ def q: Intrinsic<Vector, (args Vector:$addend, Vector:$m1, Vector:$m2),
+ unpred_cg>;
+
+ def q_m: Intrinsic<Vector, (args Vector:$addend, Vector:$m1, Vector:$m2,
+ Predicate:$pred), pred_cg>;
+
+ // Only FMA has the vector/scalar variants, not FMS
+ if add then let pnt = PNT_NType in {
+
+ def q_n: Intrinsic<Vector, (args Vector:$addend, Vector:$m1,
+ unpromoted<Scalar>:$m2_s),
+ (seq (splat $m2_s):$m2, unpred_cg)>;
+ def sq_n: Intrinsic<Vector, (args Vector:$m1, Vector:$m2,
+ unpromoted<Scalar>:$addend_s),
+ (seq (splat $addend_s):$addend, unpred_cg)>;
+ def q_m_n: Intrinsic<Vector, (args Vector:$addend, Vector:$m1,
+ unpromoted<Scalar>:$m2_s,
+ Predicate:$pred),
+ (seq (splat $m2_s):$m2, pred_cg)>;
+ def sq_m_n: Intrinsic<Vector, (args Vector:$m1, Vector:$m2,
+ unpromoted<Scalar>:$addend_s,
+ Predicate:$pred),
+ (seq (splat $addend_s):$addend, pred_cg)>;
+ }
+}
+
+let params = T.Float in {
+ defm vfma: FMA<1>;
+ defm vfms: FMA<0>;
+}
+
+let params = T.Int, pnt = PNT_NType in {
+ def vmlaq_n: Intrinsic<
+ Vector, (args Vector:$addend, Vector:$m1, unpromoted<Scalar>:$m2_s),
+ (add (mul $m1, (splat $m2_s)), $addend)>;
+ def vmlasq_n: Intrinsic<
+ Vector, (args Vector:$m1, Vector:$m2, unpromoted<Scalar>:$addend_s),
+ (add (mul $m1, $m2), (splat $addend_s))>;
+
+ def vmlaq_m_n: Intrinsic<
+ Vector, (args Vector:$addend, Vector:$m1, Scalar:$m2_s, Predicate:$pred),
+ (IRInt<"vmla_n_predicated", [Vector, Predicate]>
+ $addend, $m1, $m2_s, $pred)>;
+ def vmlasq_m_n: Intrinsic<
+ Vector, (args Vector:$m1, Vector:$m2, Scalar:$addend_s, Predicate:$pred),
+ (IRInt<"vmlas_n_predicated", [Vector, Predicate]>
+ $m1, $m2, $addend_s, $pred)>;
+}
+
+multiclass VQDMLA {
+ def hq_n: Intrinsic<
+ Vector, (args Vector:$addend, Vector:$m1, Scalar:$m2_s),
+ (IRInt<NAME # "h", [Vector]> $addend, $m1, $m2_s)>;
+ def shq_n: Intrinsic<
+ Vector, (args Vector:$m1, Vector:$m2, Scalar:$addend_s),
+ (IRInt<NAME # "sh", [Vector]> $m1, $m2, $addend_s)>;
+
+ def hq_m_n: Intrinsic<
+ Vector, (args Vector:$addend, Vector:$m1, Scalar:$m2_s, Predicate:$pred),
+ (IRInt<NAME # "h_predicated", [Vector, Predicate]>
+ $addend, $m1, $m2_s, $pred)>;
+ def shq_m_n: Intrinsic<
+ Vector, (args Vector:$m1, Vector:$m2, Scalar:$addend_s, Predicate:$pred),
+ (IRInt<NAME # "sh_predicated", [Vector, Predicate]>
+ $m1, $m2, $addend_s, $pred)>;
+}
+
+let params = T.Signed, pnt = PNT_NType in {
+ defm vqdmla: VQDMLA;
+ defm vqrdmla: VQDMLA;
+}
+
+multiclass VQDMLAD<int exchange, int round, int subtract> {
+ def "": Intrinsic<Vector, (args Vector:$a, Vector:$b, Vector:$c),
+ (IRInt<"vqdmlad", [Vector]> $a, $b, $c,
+ (u32 exchange), (u32 round), (u32 subtract))>;
+ def _m: Intrinsic<Vector, (args Vector:$a, Vector:$b, Vector:$c,
+ Predicate:$pred),
+ (IRInt<"vqdmlad_predicated", [Vector, Predicate]> $a, $b, $c,
+ (u32 exchange), (u32 round), (u32 subtract), $pred)>;
+}
+let params = T.Signed in {
+ defm vqdmladhq: VQDMLAD<0, 0, 0>;
+ defm vqdmladhxq: VQDMLAD<1, 0, 0>;
+ defm vqdmlsdhq: VQDMLAD<0, 0, 1>;
+ defm vqdmlsdhxq: VQDMLAD<1, 0, 1>;
+ defm vqrdmladhq: VQDMLAD<0, 1, 0>;
+ defm vqrdmladhxq: VQDMLAD<1, 1, 0>;
+ defm vqrdmlsdhq: VQDMLAD<0, 1, 1>;
+ defm vqrdmlsdhxq: VQDMLAD<1, 1, 1>;
+}
+
+let params = !listconcat(T.Int16, T.Int32) in {
+ let pnt = PNT_None in {
+ def vmvnq_n: Intrinsic<Vector, (args imm_simd_vmvn:$imm),
+ (not (splat (Scalar $imm)))>;
+ }
+ defm vmvnq: IntrinsicMX<Vector, (args imm_simd_vmvn:$imm, Predicate:$pred),
+ (select $pred, (not (splat (Scalar $imm))), $inactive),
+ 1, "_n", PNT_NType, PNT_None>;
+ let pnt = PNT_NType in {
+ def vbicq_n: Intrinsic<Vector, (args Vector:$v, imm_simd_restrictive:$imm),
+ (and $v, (not (splat (Scalar $imm))))>;
+ def vorrq_n: Intrinsic<Vector, (args Vector:$v, imm_simd_restrictive:$imm),
+ (or $v, (splat (Scalar $imm)))>;
+ }
+ def vbicq_m_n: Intrinsic<
+ Vector, (args Vector:$v, imm_simd_restrictive:$imm, Predicate:$pred),
+ (select $pred, (and $v, (not (splat (Scalar $imm)))), $v)>;
+ def vorrq_m_n: Intrinsic<
+ Vector, (args Vector:$v, imm_simd_restrictive:$imm, Predicate:$pred),
+ (select $pred, (or $v, (splat (Scalar $imm))), $v)>;
+}
+
+let params = T.Usual in {
+ let pnt = PNT_None in
+ def vdupq_n: Intrinsic<Vector, (args unpromoted<Scalar>:$s), (splat $s)>;
+
+ defm vdupq: IntrinsicMX<
+ Vector, (args unpromoted<Scalar>:$s, Predicate:$pred),
+ (select $pred, (splat $s), $inactive), 1, "_n", PNT_NType, PNT_None>;
+}
+
+multiclass vxdup_mc<dag paramsIn, dag paramsOut> {
+ defvar UnpredInt = IRInt<NAME, [Vector]>;
+ defvar PredInt = IRInt<NAME # "_predicated", [Vector, Predicate]>;
+ defvar UnpredIntCall = !con((UnpredInt $base), paramsOut);
+ defvar PredIntCall = !con((PredInt $inactive, $base), paramsOut, (? $pred));
+
+ // Straightforward case with neither writeback nor predication
+ let pnt = PNT_N in
+ def q_n: Intrinsic<Vector, !con((args u32:$base), paramsIn),
+ (xval UnpredIntCall, 0)>;
+
+ // Predicated form without writeback
+ defm q: IntrinsicMX<
+ Vector, !con((args u32:$base), paramsIn, (? Predicate:$pred)),
+ (xval PredIntCall, 0), 1, "_n", PNT_NType, PNT_N>;
+
+ // Writeback without predication
+ let pnt = PNT_WB in
+ def q_wb: Intrinsic<
+ Vector, !con((args Ptr<u32>:$baseaddr), paramsIn),
+ (seq (load $baseaddr):$base,
+ UnpredIntCall:$pair,
+ (store (xval $pair, 1), $baseaddr),
+ (xval $pair, 0))>;
+
+ // Both writeback and predicated
+ defm q: IntrinsicMX<
+ Vector, !con((args Ptr<u32>:$baseaddr), paramsIn, (? Predicate:$pred)),
+ (seq (load $baseaddr):$base,
+ PredIntCall:$pair,
+ (store (xval $pair, 1), $baseaddr),
+ (xval $pair, 0)), 1, "_wb", PNT_WBType, PNT_WB>;
+}
+
+let params = T.Unsigned in {
+ defm vidup: vxdup_mc<(? imm_1248:$step), (? $step)>;
+ defm vddup: vxdup_mc<(? imm_1248:$step), (? $step)>;
+ defm viwdup: vxdup_mc<(? u32:$limit, imm_1248:$step), (? $limit, $step)>;
+ defm vdwdup: vxdup_mc<(? u32:$limit, imm_1248:$step), (? $limit, $step)>;
+}
+
+let params = T.Int in {
+ def vmvnq: Intrinsic<Vector, (args Vector:$a),
+ (xor $a, (uint_max Vector))>;
+ defm vmvnq: IntrinsicMX<Vector, (args Vector:$a, Predicate:$pred),
+ (IRInt<"mvn_predicated", [Vector, Predicate]> $a, $pred, $inactive)>;
+ def vclzq: Intrinsic<Vector, (args Vector:$a),
+ (IRIntBase<"ctlz", [Vector]> $a, (i1 0))>;
+ defm vclzq: IntrinsicMX<Vector, (args Vector:$a, Predicate:$pred),
+ (IRInt<"clz_predicated", [Vector, Predicate]> $a, $pred, $inactive)>;
+}
+let params = T.Signed in {
+ def vclsq: Intrinsic<Vector, (args Vector:$a), (IRInt<"vcls", [Vector]> $a)>;
+ defm vclsq: IntrinsicMX<Vector, (args Vector:$a, Predicate:$pred),
+ (IRInt<"cls_predicated", [Vector, Predicate]> $a, $pred, $inactive)>;
+
+ def vnegq: Intrinsic<Vector, (args Vector:$a),
+ (sub (zeroinit Vector), $a)>;
+ def vabsq: Intrinsic<Vector, (args Vector:$a),
+ (select (icmp_slt $a, (zeroinit Vector)),
+ (sub (zeroinit Vector), $a), $a)>;
+ def vqnegq: Intrinsic<Vector, (args Vector:$a),
+ (select (icmp_eq $a, (int_min Vector)),
+ (int_max Vector),
+ (sub (zeroinit Vector), $a))>;
+ def vqabsq: Intrinsic<Vector, (args Vector:$a),
+ (select (icmp_sgt $a, (zeroinit Vector)), $a,
+ (select (icmp_eq $a, (int_min Vector)),
+ (int_max Vector),
+ (sub (zeroinit Vector), $a)))>;
+
+ foreach name = ["qneg", "qabs"] in {
+ defm v#name#q: IntrinsicMX<Vector, (args Vector:$a, Predicate:$pred),
+ (IRInt<name#"_predicated", [Vector, Predicate]> $a, $pred, $inactive),
+ 0 /* no _x variant for saturating intrinsics */>;
+ }
+}
+let params = !listconcat(T.Signed, T.Float) in {
+ foreach name = ["neg", "abs"] in {
+ defm v#name#q: IntrinsicMX<Vector, (args Vector:$a, Predicate:$pred),
+ (IRInt<name#"_predicated", [Vector, Predicate]> $a, $pred, $inactive)>;
+ }
+}
+let params = T.Float in {
+ def vnegq_f: Intrinsic<Vector, (args Vector:$a), (fneg $a)>,
+ NameOverride<"vnegq">;
+ def vabsq_f: Intrinsic<Vector, (args Vector:$a),
+ (IRIntBase<"fabs", [Vector]> $a)>, NameOverride<"vabsq">;
}
// The bitcasting below is not overcomplicating the IR because while
@@ -145,6 +413,16 @@ multiclass VectorVectorArithmetic<string operation, dag extraArgs = (?),
extraArgs, (? $pred, $inactive)), wantXVariant>;
}
+multiclass VectorScalarArithmetic<string operation, string basename,
+ dag extraArgs = (?),
+ int wantXVariant = 1> {
+ defm "" : IntrinsicMXNameOverride<
+ Vector, (args Vector:$a, unpromoted<Scalar>:$b, Predicate:$pred),
+ !con((IRInt<operation, [Vector, Predicate]> $a, (splat $b)),
+ extraArgs, (? $pred, $inactive)), basename, wantXVariant, "_n",
+ PNT_NType, PNT_NType>;
+}
+
multiclass VectorVectorArithmeticBitcast<string operation> {
defm "" : IntrinsicMX<Vector, (args Vector:$a, Vector:$b,
Predicate:$pred),
@@ -166,13 +444,28 @@ let params = T.Usual in {
defm veorq : VectorVectorArithmeticBitcast<"eor_predicated">;
defm vornq : VectorVectorArithmeticBitcast<"orn_predicated">;
defm vorrq : VectorVectorArithmeticBitcast<"orr_predicated">;
+
+ defm : VectorScalarArithmetic<"add_predicated", "vaddq">;
+ defm : VectorScalarArithmetic<"sub_predicated", "vsubq">;
+ defm : VectorScalarArithmetic<"mul_predicated", "vmulq">;
}
-multiclass DblVectorVectorArithmetic<string operation, dag extraArgs = (?)> {
+multiclass DblVectorVectorArithmetic<string operation, dag extraArgs = (?),
+ int wantXVariant = 1> {
defm "" : IntrinsicMX<
- DblVector, (args Vector:$a, Vector:$b, Predicate:$pred),
- !con((IRInt<operation, [DblVector, Vector, Predicate]> $a, $b),
- extraArgs, (? $pred, $inactive))>;
+ DblVector, (args Vector:$a, Vector:$b, DblPredicate:$pred),
+ !con((IRInt<operation, [DblVector, Vector, DblPredicate]> $a, $b),
+ extraArgs, (? $pred, $inactive)), wantXVariant>;
+}
+
+multiclass DblVectorScalarArithmetic<string operation, string basename,
+ dag extraArgs = (?),
+ int wantXVariant = 1> {
+ defm "" : IntrinsicMXNameOverride<
+ DblVector, (args Vector:$a, unpromoted<Scalar>:$b, DblPredicate:$pred),
+ !con((IRInt<operation, [DblVector, Vector, DblPredicate]> $a, (splat $b)),
+ extraArgs, (? $pred, $inactive)), basename, wantXVariant, "_n",
+ PNT_NType, PNT_NType>;
}
// Predicated intrinsics - Int types only
@@ -188,10 +481,22 @@ let params = T.Int in {
defm vhsubq : VectorVectorArithmetic<"hsub_predicated", (? (unsignedflag Scalar))>;
defm vmullbq_int : DblVectorVectorArithmetic<"mull_int_predicated", (? (unsignedflag Scalar), (u32 0))>;
defm vmulltq_int : DblVectorVectorArithmetic<"mull_int_predicated", (? (unsignedflag Scalar), (u32 1))>;
+
+ defm : VectorScalarArithmetic<"qadd_predicated", "vqaddq", (? (unsignedflag Scalar)), 0>;
+ defm : VectorScalarArithmetic<"hadd_predicated", "vhaddq", (? (unsignedflag Scalar))>;
+ defm : VectorScalarArithmetic<"qsub_predicated", "vqsubq", (? (unsignedflag Scalar)), 0>;
+ defm : VectorScalarArithmetic<"hsub_predicated", "vhsubq", (? (unsignedflag Scalar))>;
}
let params = T.Signed in {
defm vqdmulhq : VectorVectorArithmetic<"qdmulh_predicated", (?), 0>;
defm vqrdmulhq : VectorVectorArithmetic<"qrdmulh_predicated", (?), 0>;
+ def vminaq_m: Intrinsic<UVector, (args UVector:$a, Vector:$b, Predicate:$pred),
+ (IRInt<"vmina_predicated", [UVector,Predicate]> $a, $b, $pred)>;
+ def vmaxaq_m: Intrinsic<UVector, (args UVector:$a, Vector:$b, Predicate:$pred),
+ (IRInt<"vmaxa_predicated", [UVector,Predicate]> $a, $b, $pred)>;
+
+ defm : VectorScalarArithmetic<"qdmulh_predicated", "vqdmulhq", (?), 0>;
+ defm : VectorScalarArithmetic<"qrdmulh_predicated", "vqrdmulhq", (?), 0>;
}
let params = T.Poly, overrideKindLetter = "p" in {
@@ -199,17 +504,74 @@ let params = T.Poly, overrideKindLetter = "p" in {
defm vmulltq_poly : DblVectorVectorArithmetic<"mull_poly_predicated", (? (u32 1))>;
}
+let params = [s16, s32] in {
+ def vqdmullbq: Intrinsic<DblVector, (args Vector:$a, Vector:$b),
+ (IRInt<"vqdmull", [DblVector, Vector]> $a, $b, 0)>;
+ def vqdmulltq: Intrinsic<DblVector, (args Vector:$a, Vector:$b),
+ (IRInt<"vqdmull", [DblVector, Vector]> $a, $b, 1)>;
+ defm vqdmullbq: DblVectorVectorArithmetic<"vqdmull_predicated", (? (u32 0)), 0>;
+ defm vqdmulltq: DblVectorVectorArithmetic<"vqdmull_predicated", (? (u32 1)), 0>;
+
+ let pnt = PNT_NType in {
+ def vqdmullbq_n: Intrinsic<DblVector, (args Vector:$a, unpromoted<Scalar>:$b),
+ (IRInt<"vqdmull", [DblVector, Vector]>
+ $a, (splat $b), 0)>;
+ def vqdmulltq_n: Intrinsic<DblVector, (args Vector:$a, unpromoted<Scalar>:$b),
+ (IRInt<"vqdmull", [DblVector, Vector]>
+ $a, (splat $b), 1)>;
+ }
+ defm vqdmullbq_n: DblVectorScalarArithmetic<"vqdmull_predicated",
+ "vqdmullbq", (? (u32 0)), 0>;
+ defm vqdmulltq_n: DblVectorScalarArithmetic<"vqdmull_predicated",
+ "vqdmulltq", (? (u32 1)), 0>;
+}
+
// Predicated intrinsics - Float types only
let params = T.Float in {
defm vminnmq : VectorVectorArithmetic<"min_predicated", (? (u32 0))>;
defm vmaxnmq : VectorVectorArithmetic<"max_predicated", (? (u32 0))>;
+ def vminnmaq_m: Intrinsic<Vector, (args Vector:$a, Vector:$b, Predicate:$pred),
+ (IRInt<"vminnma_predicated", [Vector,Predicate]> $a, $b, $pred)>;
+ def vmaxnmaq_m: Intrinsic<Vector, (args Vector:$a, Vector:$b, Predicate:$pred),
+ (IRInt<"vmaxnma_predicated", [Vector,Predicate]> $a, $b, $pred)>;
+}
+
+multiclass Reduction<Type Accumulator, string basename, list<Type> basetypes,
+ bit needSign = 0,
+ dag postCG = (seq (id $ret)),
+ dag accArg = (args Accumulator:$prev),
+ dag preCG = (seq)> {
+ defvar intArgsBase = (? $prev, $vec);
+ defvar intArgsUnpred = !con(intArgsBase,
+ !if(needSign, (? (unsignedflag Scalar)), (?)));
+ defvar intArgsPred = !con(intArgsUnpred, (? $pred));
+ defvar intUnpred = !setop(intArgsUnpred, IRInt<basename, basetypes>);
+ defvar intPred = !setop(intArgsPred, IRInt<
+ basename#"_predicated", !listconcat(basetypes, [Predicate])>);
+
+ def "": Intrinsic<
+ Accumulator, !con(accArg, (args Vector:$vec)),
+ !con(preCG, (seq intUnpred:$ret), postCG)>;
+ def _p: Intrinsic<
+ Accumulator, !con(accArg, (args Vector:$vec, Predicate:$pred)),
+ !con(preCG, (seq intPred:$ret), postCG)>;
}
let params = T.Int in {
-def vminvq: Intrinsic<Scalar, (args Scalar:$prev, Vector:$vec),
- (Scalar (IRInt<"minv", [Vector], 1> $prev, $vec))>;
-def vmaxvq: Intrinsic<Scalar, (args Scalar:$prev, Vector:$vec),
- (Scalar (IRInt<"maxv", [Vector], 1> $prev, $vec))>;
+defm vminvq: Reduction<Scalar, "minv", [Vector], 1, (seq (Scalar $ret))>;
+defm vmaxvq: Reduction<Scalar, "maxv", [Vector], 1, (seq (Scalar $ret))>;
+}
+
+let params = T.Signed in {
+defm vminavq: Reduction<UScalar, "minav", [Vector], 0, (seq (UScalar $ret))>;
+defm vmaxavq: Reduction<UScalar, "maxav", [Vector], 0, (seq (UScalar $ret))>;
+}
+
+let params = T.Float in {
+defm vminnmvq: Reduction<Scalar, "minnmv", [Scalar, Vector]>;
+defm vmaxnmvq: Reduction<Scalar, "maxnmv", [Scalar, Vector]>;
+defm vminnmavq: Reduction<Scalar, "minnmav", [Scalar, Vector]>;
+defm vmaxnmavq: Reduction<Scalar, "maxnmav", [Scalar, Vector]>;
}
foreach half = [ "b", "t" ] in {
@@ -223,8 +585,133 @@ foreach half = [ "b", "t" ] in {
VecOf<f16>, (args VecOf<f16>:$inactive, Vector:$a, PredOf<f32>:$pred),
(IRInt<"vcvt_narrow_predicated"> $inactive, $a, halfconst, $pred)>;
} // params = [f32], pnt = PNT_None
+
+ let params = [f16], pnt = PNT_None in {
+ def vcvt#half#q_f32: Intrinsic<VecOf<f32>, (args Vector:$a),
+ (IRInt<"vcvt_widen"> $a, halfconst)>;
+ defm vcvt#half#q: IntrinsicMX<
+ VecOf<f32>, (args Vector:$a, PredOf<f32>:$pred),
+ (IRInt<"vcvt_widen_predicated"> $inactive, $a, halfconst, $pred),
+ 1, "_f32">;
+ } // params = [f16], pnt = PNT_None
} // loop over half = "b", "t"
+multiclass float_int_conversions<Type FScalar, Type IScalar, IRBuilderBase ftoi, IRBuilderBase itof> {
+ defvar FVector = VecOf<FScalar>;
+ defvar IVector = VecOf<IScalar>;
+
+ let params = [IScalar] in {
+ let pnt = PNT_2Type in {
+ def : Intrinsic<FVector, (args IVector:$a), (itof $a, FVector)>,
+ NameOverride<"vcvtq_" # FScalar>;
+ }
+ defm vcvtq: IntrinsicMX<FVector, (args IVector:$a, Predicate:$pred),
+ (IRInt<"vcvt_fp_int_predicated", [FVector, IVector, Predicate]>
+ $a, (unsignedflag IScalar), $pred, $inactive),
+ 1, "_" # FScalar, PNT_2Type, PNT_2Type>;
+ }
+ let params = [FScalar] in {
+ let pnt = PNT_None in {
+ def : Intrinsic<IVector, (args FVector:$a), (ftoi $a, IVector)>,
+ NameOverride<"vcvtq_" # IScalar>;
+
+ foreach suffix = ["a","n","p","m"] in
+ def : Intrinsic<IVector, (args FVector:$a),
+ (IRInt<"vcvt"#suffix, [IVector, FVector]>
+ (unsignedflag IScalar), $a)>,
+ NameOverride<"vcvt"#suffix#"q_" # IScalar>;
+ }
+ defm vcvtq: IntrinsicMX<IVector, (args FVector:$a, Predicate:$pred),
+ (IRInt<"vcvt_fp_int_predicated", [IVector, FVector, Predicate]>
+ $a, (unsignedflag IScalar), $pred, $inactive),
+ 1, "_" # IScalar, PNT_2Type, PNT_None>;
+
+ foreach suffix = ["a","n","p","m"] in {
+ defm "vcvt"#suffix#"q" : IntrinsicMX<
+ IVector, (args FVector:$a, Predicate:$pred),
+ (IRInt<"vcvt"#suffix#"_predicated", [IVector, FVector, Predicate]>
+ (unsignedflag IScalar), $inactive, $a, $pred),
+ 1, "_" # IScalar, PNT_2Type, PNT_None>;
+ }
+ }
+}
+
+defm "" : float_int_conversions<f32, u32, fptoui, uitofp>;
+defm "" : float_int_conversions<f16, u16, fptoui, uitofp>;
+defm "" : float_int_conversions<f32, s32, fptosi, sitofp>;
+defm "" : float_int_conversions<f16, s16, fptosi, sitofp>;
+
+multiclass vmovl<bit top> {
+ let params = [s8, u8, s16, u16] in {
+ def "": Intrinsic<DblVector, (args Vector:$a),
+ (extend (unzip $a, top), DblVector, (unsignedflag Scalar))>;
+ defm "": IntrinsicMX<DblVector, (args Vector:$a, DblPredicate:$pred),
+ (IRInt<"vmovl_predicated", [DblVector, Vector, DblPredicate]>
+ $a, (unsignedflag Scalar), top, $pred, $inactive)>;
+ }
+}
+
+defm vmovlbq: vmovl<0>;
+defm vmovltq: vmovl<1>;
+
+multiclass vmovn<bit top, dag wide_result> {
+ let params = [s16, u16, s32, u32] in {
+ def "": Intrinsic<HalfVector, (args HalfVector:$inactive, Vector:$a),
+ (trunc wide_result, HalfVector)>;
+ def _m: Intrinsic<HalfVector, (args HalfVector:$inactive, Vector:$a,
+ Predicate:$pred),
+ (IRInt<"vmovn_predicated", [HalfVector, Vector, Predicate]>
+ $inactive, $a, top, $pred)>;
+ }
+}
+
+defm vmovntq: vmovn<1, (zip (vreinterpret $inactive, Vector), $a)>;
+defm vmovnbq: vmovn<0,
+ (zip $a, (vreinterpret (vrev $inactive, (bitsize Scalar)), Vector))>;
+
+multiclass vqmovn<bit top, Type RetScalar> {
+ defvar RetVector = VecOf<RetScalar>;
+
+ let params = [s16, u16, s32, u32] in {
+ def : Intrinsic<
+ RetVector, (args RetVector:$inactive, Vector:$a),
+ (IRInt<"vqmovn", [RetVector, Vector]>
+ $inactive, $a, (unsignedflag RetScalar), (unsignedflag Scalar), top)>,
+ NameOverride<NAME>;
+ def: Intrinsic<
+ RetVector, (args RetVector:$inactive, Vector:$a, Predicate:$pred),
+ (IRInt<"vqmovn_predicated", [RetVector, Vector, Predicate]>
+ $inactive, $a, (unsignedflag RetScalar), (unsignedflag Scalar),
+ top, $pred)>,
+ NameOverride<NAME # "_m">;
+ }
+}
+
+let params = [s16, s32, u16, u32] in {
+ defm vqmovntq: vqmovn<1, HalfScalar>;
+ defm vqmovnbq: vqmovn<0, HalfScalar>;
+}
+let params = [s16, s32] in {
+ defm vqmovuntq: vqmovn<1, UHalfScalar>;
+ defm vqmovunbq: vqmovn<0, UHalfScalar>;
+}
+
+multiclass vrnd<IRIntBase ir_int, string suffix> {
+ let params = T.Float in {
+ def "": Intrinsic<Vector, (args Vector:$a), (ir_int $a)>;
+ defm "": IntrinsicMX<Vector, (args Vector:$a, Predicate:$pred),
+ (IRInt<"vrint"#suffix#"_predicated", [Vector, Predicate]>
+ $a, $pred, $inactive)>;
+ }
+}
+
+defm vrndq: vrnd<IRIntBase<"trunc", [Vector]>, "z">;
+defm vrndmq: vrnd<IRIntBase<"floor", [Vector]>, "m">;
+defm vrndpq: vrnd<IRIntBase<"ceil", [Vector]>, "p">;
+defm vrndaq: vrnd<IRIntBase<"round", [Vector]>, "a">;
+defm vrndxq: vrnd<IRIntBase<"rint", [Vector]>, "x">;
+defm vrndnq: vrnd<IRInt<"vrintn", [Vector]>, "n">;
+
multiclass compare_with_pred<string condname, dag arguments,
dag cmp, string suffix> {
// Make the predicated and unpredicated versions of a single comparison.
@@ -275,6 +762,14 @@ let params = T.Signed in {
(select (icmp_sle $a, $b), $a, $b)>;
def vmaxq: Intrinsic<Vector, (args Vector:$a, Vector:$b),
(select (icmp_sge $a, $b), $a, $b)>;
+ def vminaq: Intrinsic<UVector, (args UVector:$a, Vector:$b),
+ (seq (select (icmp_slt $b, (zeroinit Vector)),
+ (sub (zeroinit Vector), $b), $b):$absb,
+ (select (icmp_ule $a, $absb), $a, $absb))>;
+ def vmaxaq: Intrinsic<UVector, (args UVector:$a, Vector:$b),
+ (seq (select (icmp_slt $b, (zeroinit Vector)),
+ (sub (zeroinit Vector), $b), $b):$absb,
+ (select (icmp_uge $a, $absb), $a, $absb))>;
}
let params = T.Unsigned in {
def vminqu: Intrinsic<Vector, (args Vector:$a, Vector:$b),
@@ -286,9 +781,17 @@ let params = T.Unsigned in {
}
let params = T.Float in {
def vminnmq: Intrinsic<Vector, (args Vector:$a, Vector:$b),
- (IRIntBase<"minnum", [Vector]> $a, $b)>;
+ (IRIntBase<"minnum", [Vector]> $a, $b)>;
def vmaxnmq: Intrinsic<Vector, (args Vector:$a, Vector:$b),
- (IRIntBase<"maxnum", [Vector]> $a, $b)>;
+ (IRIntBase<"maxnum", [Vector]> $a, $b)>;
+ def vminnmaq: Intrinsic<Vector, (args Vector:$a, Vector:$b),
+ (IRIntBase<"minnum", [Vector]>
+ (IRIntBase<"fabs", [Vector]> $a),
+ (IRIntBase<"fabs", [Vector]> $b))>;
+ def vmaxnmaq: Intrinsic<Vector, (args Vector:$a, Vector:$b),
+ (IRIntBase<"maxnum", [Vector]>
+ (IRIntBase<"fabs", [Vector]> $a),
+ (IRIntBase<"fabs", [Vector]> $b))>;
}
def vpselq: Intrinsic<Vector, (args Vector:$t, Vector:$f, Predicate:$pred),
@@ -640,8 +1143,8 @@ multiclass vshll_imm<int top> {
(IRInt<"vshll_imm", [DblVector, Vector]>
$v, $sh, (unsignedflag Scalar), top)>;
defm "": IntrinsicMX<DblVector, (args Vector:$v, imm_1toN:$sh,
- Predicate:$pred),
- (IRInt<"vshll_imm_predicated", [DblVector, Vector, Predicate]>
+ DblPredicate:$pred),
+ (IRInt<"vshll_imm_predicated", [DblVector, Vector, DblPredicate]>
$v, $sh, (unsignedflag Scalar), top, $pred, $inactive), 1, "_n">;
}
}
@@ -768,27 +1271,47 @@ defm sqrshr: ScalarSaturatingShiftReg<s32, s64>;
def lsll: LongScalarShift<u64, (args s32:$sh), (IRInt<"lsll"> $lo, $hi, $sh)>;
def asrl: LongScalarShift<s64, (args s32:$sh), (IRInt<"asrl"> $lo, $hi, $sh)>;
+multiclass vadcsbc {
+ def q: Intrinsic<Vector, (args Vector:$a, Vector:$b, Ptr<uint>:$carry),
+ (seq (IRInt<NAME, [Vector]> $a, $b, (shl (load $carry), 29)):$pair,
+ (store (and 1, (lshr (xval $pair, 1), 29)), $carry),
+ (xval $pair, 0))>;
+ def iq: Intrinsic<Vector, (args Vector:$a, Vector:$b, Ptr<uint>:$carry),
+ (seq (IRInt<NAME, [Vector]> $a, $b, 0):$pair,
+ (store (and 1, (lshr (xval $pair, 1), 29)), $carry),
+ (xval $pair, 0))>;
+ def q_m: Intrinsic<Vector, (args Vector:$inactive, Vector:$a, Vector:$b,
+ Ptr<uint>:$carry, Predicate:$pred),
+ (seq (IRInt<NAME # "_predicated", [Vector, Predicate]> $inactive, $a, $b,
+ (shl (load $carry), 29), $pred):$pair,
+ (store (and 1, (lshr (xval $pair, 1), 29)), $carry),
+ (xval $pair, 0))>;
+ def iq_m: Intrinsic<Vector, (args Vector:$inactive, Vector:$a, Vector:$b,
+ Ptr<uint>:$carry, Predicate:$pred),
+ (seq (IRInt<NAME # "_predicated", [Vector, Predicate]> $inactive, $a, $b,
+ 0, $pred):$pair,
+ (store (and 1, (lshr (xval $pair, 1), 29)), $carry),
+ (xval $pair, 0))>;
+}
let params = T.Int32 in {
-def vadcq: Intrinsic<Vector, (args Vector:$a, Vector:$b, Ptr<uint>:$carry),
- (seq (IRInt<"vadc", [Vector]> $a, $b, (shl (load $carry), 29)):$pair,
- (store (and 1, (lshr (xval $pair, 1), 29)), $carry),
- (xval $pair, 0))>;
-def vadciq: Intrinsic<Vector, (args Vector:$a, Vector:$b, Ptr<uint>:$carry),
- (seq (IRInt<"vadc", [Vector]> $a, $b, 0):$pair,
- (store (and 1, (lshr (xval $pair, 1), 29)), $carry),
- (xval $pair, 0))>;
-def vadcq_m: Intrinsic<Vector, (args Vector:$inactive, Vector:$a, Vector:$b,
- Ptr<uint>:$carry, Predicate:$pred),
- (seq (IRInt<"vadc_predicated", [Vector, Predicate]> $inactive, $a, $b,
- (shl (load $carry), 29), $pred):$pair,
- (store (and 1, (lshr (xval $pair, 1), 29)), $carry),
- (xval $pair, 0))>;
-def vadciq_m: Intrinsic<Vector, (args Vector:$inactive, Vector:$a, Vector:$b,
- Ptr<uint>:$carry, Predicate:$pred),
- (seq (IRInt<"vadc_predicated", [Vector, Predicate]> $inactive, $a, $b,
- 0, $pred):$pair,
- (store (and 1, (lshr (xval $pair, 1), 29)), $carry),
- (xval $pair, 0))>;
+ defm vadc: vadcsbc;
+ defm vsbc: vadcsbc;
+}
+
+let params = T.Int in {
+ def vshlcq: Intrinsic<
+ Vector, (args Vector:$v, Ptr<u32>:$ps, imm_1to32:$imm),
+ (seq (load $ps):$s,
+ (IRInt<"vshlc", [Vector]> $v, $s, $imm):$pair,
+ (store (xval $pair, 0), $ps),
+ (xval $pair, 1))>;
+ def vshlcq_m: Intrinsic<
+ Vector, (args Vector:$v, Ptr<u32>:$ps, imm_1to32:$imm, Predicate:$pred),
+ (seq (load $ps):$s,
+ (IRInt<"vshlc_predicated", [Vector, Predicate]>
+ $v, $s, $imm, $pred):$pair,
+ (store (xval $pair, 0), $ps),
+ (xval $pair, 1))>;
}
multiclass VectorComplexAddPred<dag not_halving, dag angle> {
@@ -922,6 +1445,33 @@ multiclass MVEBinaryVectorHoriz64R<dag subtract, dag exchange, string xsuffix> {
"vrmlldavha">;
}
+multiclass VADDV<bit acc, bit pred, string intbase, Type Scalar> {
+ defvar accArg = !if(acc, (args Scalar:$acc), (args));
+ defvar predArg = !if(pred, (args Predicate:$pred), (args));
+ defvar intrinsic = !if(pred,
+ IRInt<intbase # "_predicated", [Vector, Predicate]>,
+ IRInt<intbase, [Vector]>);
+ defvar intCG = !con((intrinsic $v, (unsignedflag Scalar)),
+ !if(pred, (? $pred), (?)));
+ defvar accCG = !if(acc, (add intCG, $acc), intCG);
+
+ def "": Intrinsic<Scalar, !con(accArg, (args Vector:$v), predArg), accCG>;
+}
+
+let params = T.Int in {
+defm vaddvq : VADDV<0, 0, "addv", Scalar32>;
+defm vaddvaq : VADDV<1, 0, "addv", Scalar32>;
+defm vaddvq_p : VADDV<0, 1, "addv", Scalar32>;
+defm vaddvaq_p : VADDV<1, 1, "addv", Scalar32>;
+}
+
+let params = [s32, u32] in {
+defm vaddlvq : VADDV<0, 0, "addlv", Scalar64>;
+defm vaddlvaq : VADDV<1, 0, "addlv", Scalar64>;
+defm vaddlvq_p : VADDV<0, 1, "addlv", Scalar64>;
+defm vaddlvaq_p : VADDV<1, 1, "addlv", Scalar64>;
+}
+
let params = T.Int in {
def vabavq : Intrinsic<u32, (args u32:$a, Vector:$b, Vector:$c),
(IRInt<"vabav", [Vector]> (unsignedflag Scalar), $a, $b, $c)>;
@@ -957,6 +1507,25 @@ defm vrmlsldavh : MVEBinaryVectorHoriz64R<V.True, V.False, "">;
defm vrmlsldavh : MVEBinaryVectorHoriz64R<V.True, V.True, "x">;
}
+multiclass vrev_predicated<int revsize> {
+ defm "" : IntrinsicMX<Vector, (args Vector:$a, Predicate:$pred),
+ (IRInt<"vrev_predicated", [Vector, Predicate]>
+ $a, revsize, $pred, $inactive)>;
+}
+
+let params = T.All8 in {
+ def vrev16q : Intrinsic<Vector, (args Vector:$a), (vrev $a, 16)>;
+ defm vrev16q: vrev_predicated<16>;
+}
+let params = !listconcat(T.All8, T.All16) in {
+ def vrev32q : Intrinsic<Vector, (args Vector:$a), (vrev $a, 32)>;
+ defm vrev32q: vrev_predicated<32>;
+}
+let params = T.Usual in {
+ def vrev64q : Intrinsic<Vector, (args Vector:$a), (vrev $a, 64)>;
+ defm vrev64q: vrev_predicated<64>;
+}
+
foreach desttype = T.All in {
// We want a vreinterpretq between every pair of supported vector types
// _except_ that there shouldn't be one from a type to itself.
@@ -967,7 +1536,7 @@ foreach desttype = T.All in {
!if(!eq(!cast<string>(desttype),!cast<string>(srctype)),[],[srctype])))
in {
def "vreinterpretq_" # desttype: Intrinsic<
- VecOf<desttype>, (args Vector:$x), (bitcast $x, VecOf<desttype>)>;
+ VecOf<desttype>, (args Vector:$x), (vreinterpret $x, VecOf<desttype>)>;
}
}
@@ -1002,3 +1571,39 @@ let params = T.All in {
def vsetq_lane: Intrinsic<Vector, (args unpromoted<Scalar>:$e, Vector:$v, imm_lane:$lane),
(ielt_var $v, $e, $lane)>;
}
+
+foreach desttype = !listconcat(T.Int16, T.Int32, T.Float) in {
+ defvar is_dest_float = !eq(desttype.kind, "f");
+ defvar is_dest_unsigned = !eq(desttype.kind, "u");
+ // First immediate operand of the LLVM intrinsic
+ defvar unsigned_flag = !if(is_dest_float, (unsignedflag Scalar),
+ !if(is_dest_unsigned, V.True, V.False));
+ // For float->int conversions _n and _x_n intrinsics are not polymorphic
+ // because the signedness of the destination type cannot be inferred.
+ defvar pnt_nx = !if(is_dest_float, PNT_2Type, PNT_None);
+
+ let params = !if(is_dest_float,
+ !if(!eq(desttype.size, 16), T.Int16, T.Int32),
+ !if(!eq(desttype.size, 16), [f16], [f32])) in {
+ let pnt = pnt_nx in
+ def "vcvtq_n_"#desttype : Intrinsic<VecOf<desttype>,
+ (args Vector:$a, imm_1toN:$b),
+ (IRInt<"vcvt_fix", [VecOf<desttype>, Vector]> unsigned_flag, $a, $b)>;
+
+ defm "vcvtq" : IntrinsicMX<VecOf<desttype>,
+ (args Vector:$a, imm_1toN:$b, Predicate:$p),
+ (IRInt<"vcvt_fix_predicated", [VecOf<desttype>, Vector, Predicate]>
+ unsigned_flag, $inactive, $a, $b, $p),
+ 1, "_n_"#desttype, PNT_2Type, pnt_nx>;
+ }
+}
+
+let params = T.Usual in {
+let pnt = PNT_NType in
+def vbrsrq_n: Intrinsic<Vector, (args Vector:$a, s32:$b),
+ (IRInt<"vbrsr", [Vector]> $a, $b)>;
+defm vbrsrq : IntrinsicMX<Vector, (args Vector:$a, s32:$b, Predicate:$pred),
+ (IRInt<"vbrsr_predicated", [Vector, Predicate]>
+ $inactive, $a, $b, $pred), 1, "_n",
+ PNT_NType, PNT_NType>;
+}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_mve_defs.td b/contrib/llvm-project/clang/include/clang/Basic/arm_mve_defs.td
index a9afddb57968..4038a18027f8 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/arm_mve_defs.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_mve_defs.td
@@ -57,6 +57,10 @@ class CGHelperFn<string func> : IRBuilderBase {
// an argument.
let prefix = func # "(Builder, ";
}
+class CGFHelperFn<string func> : IRBuilderBase {
+ // Like CGHelperFn, but also takes the CodeGenFunction itself.
+ let prefix = func # "(Builder, this, ";
+}
def add: IRBuilder<"CreateAdd">;
def mul: IRBuilder<"CreateMul">;
def not: IRBuilder<"CreateNot">;
@@ -89,10 +93,14 @@ def ielt_var: IRBuilder<"CreateInsertElement">;
def xelt_var: IRBuilder<"CreateExtractElement">;
def trunc: IRBuilder<"CreateTrunc">;
def bitcast: IRBuilder<"CreateBitCast">;
+def vreinterpret: CGFHelperFn<"ARMMVEVectorReinterpret">;
def extend: CGHelperFn<"SignOrZeroExtend"> {
let special_params = [IRBuilderIntParam<2, "bool">];
}
def zeroinit: IRFunction<"llvm::Constant::getNullValue">;
+def int_min: CGHelperFn<"ARMMVEConstantSplat<1,0>">;
+def int_max: CGHelperFn<"ARMMVEConstantSplat<0,1>">;
+def uint_max: CGHelperFn<"ARMMVEConstantSplat<1,1>">;
def undef: IRFunction<"UndefValue::get">;
def icmp_eq: IRBuilder<"CreateICmpEQ">;
def icmp_ne: IRBuilder<"CreateICmpNE">;
@@ -112,6 +120,36 @@ def fcmp_lt: IRBuilder<"CreateFCmpOLT">;
def fcmp_le: IRBuilder<"CreateFCmpOLE">;
def splat: CGHelperFn<"ARMMVEVectorSplat">;
def select: IRBuilder<"CreateSelect">;
+def fneg: IRBuilder<"CreateFNeg">;
+def sitofp: IRBuilder<"CreateSIToFP">;
+def uitofp: IRBuilder<"CreateUIToFP">;
+def fptosi: IRBuilder<"CreateFPToSI">;
+def fptoui: IRBuilder<"CreateFPToUI">;
+def vrev: CGHelperFn<"ARMMVEVectorElementReverse"> {
+ let special_params = [IRBuilderIntParam<1, "unsigned">];
+}
+def unzip: CGHelperFn<"VectorUnzip"> {
+ let special_params = [IRBuilderIntParam<1, "bool">];
+}
+def zip: CGHelperFn<"VectorZip">;
+
+// Trivial 'codegen' function that just returns its argument. Useful
+// for wrapping up a variable name like $foo into a thing you can pass
+// around as type 'dag'.
+def id: IRBuilderBase {
+ // All the other cases of IRBuilderBase use 'prefix' to specify a function
+ // call, including the open parenthesis. MveEmitter puts the closing paren on
+ // the end. So if we _just_ specify an open paren with no function name
+ // before it, then the generated C++ code will simply wrap the input value in
+ // parentheses, returning it unchanged.
+ let prefix = "(";
+}
+
+// Helper for making boolean flags in IR
+def i1: IRBuilderBase {
+ let prefix = "llvm::ConstantInt::get(Builder.getInt1Ty(), ";
+ let special_params = [IRBuilderIntParam<0, "bool">];
+}
// A node that makes an Address out of a pointer-typed Value, by
// providing an alignment as the second argument.
@@ -162,6 +200,10 @@ def seq;
// and 0 for a signed (or floating) one.
def unsignedflag;
+// 'bitsize' also takes a scalar type, and expands into an integer
+// constant giving its size in bits.
+def bitsize;
+
// If you put CustomCodegen<"foo"> in an intrinsic's codegen field, it
// indicates that the IR generation for that intrinsic is done by handwritten
// C++ and not autogenerated at all. The effect in the MVE builtin codegen
@@ -288,11 +330,15 @@ def SScalar: Signed<Scalar>;
def SVector: VecOf<SScalar>;
// DblVector expands to a vector of scalars of size twice the size of Scalar.
+// DblPredicate expands to a predicate corresponding to DblVector
// HalfVector, similarly, expands to a vector of half-sized scalars. And
// UHalfVector is a vector of half-sized _unsigned integers_.
def DblVector: VecOf<DoubleSize<Scalar>>;
-def HalfVector: VecOf<HalfSize<Scalar>>;
-def UHalfVector: VecOf<Unsigned<HalfSize<Scalar>>>;
+def DblPredicate: PredOf<DoubleSize<Scalar>>;
+def HalfScalar: HalfSize<Scalar>;
+def HalfVector: VecOf<HalfScalar>;
+def UHalfScalar: Unsigned<HalfSize<Scalar>>;
+def UHalfVector: VecOf<UHalfScalar>;
// Expands to the 32-bit integer of the same signedness as Scalar.
def Scalar32: CopyKind<u32, Scalar>;
@@ -319,6 +365,7 @@ class IB_EltBit<int base_, Type type_ = Scalar> : ImmediateBounds {
int base = base_;
Type type = type_;
}
+def IB_ExtraArg_LaneSize;
// -----------------------------------------------------------------------------
// End-user definitions for immediate arguments.
@@ -327,11 +374,13 @@ class IB_EltBit<int base_, Type type_ = Scalar> : ImmediateBounds {
// intrinsics like vmvnq or vorrq. imm_simd_restrictive has to be an 8-bit
// value shifted left by a whole number of bytes; imm_simd_vmvn can also be of
// the form 0xXXFF for some byte value XX.
-def imm_simd_restrictive : Immediate<u32, IB_UEltValue> {
+def imm_simd_restrictive : Immediate<Scalar, IB_UEltValue> {
let extra = "ShiftedByte";
+ let extraarg = "!lanesize";
}
-def imm_simd_vmvn : Immediate<u32, IB_UEltValue> {
+def imm_simd_vmvn : Immediate<Scalar, IB_UEltValue> {
let extra = "ShiftedByteOrXXFF";
+ let extraarg = "!lanesize";
}
// imm_1toN can take any value from 1 to N inclusive, where N is the number of
@@ -356,7 +405,7 @@ def imm_lane : Immediate<sint, IB_LaneIndex>;
def imm_1to32 : Immediate<sint, IB_ConstRange<1, 32>>;
// imm_1248 can be 1, 2, 4 or 8. (e.g. vidupq)
-def imm_1248 : Immediate<u32, IB_ConstRange<1, 8>> {
+def imm_1248 : Immediate<sint, IB_ConstRange<1, 8>> {
let extra = "Power2";
}
@@ -439,9 +488,16 @@ class Intrinsic<Type ret_, dag args_, dag codegen_> {
// True if the builtin has to avoid evaluating its arguments.
bit nonEvaluating = 0;
+ // True if the intrinsic needs only the C header part (no codegen, semantic
+ // checks, etc). Used for redeclaring MVE intrinsics in the arm_cde.h header.
+ bit headerOnly = 0;
+
// Use to override the suffix letter to make e.g.vfooq_p16
// with an override suffix letter of "p".
string overrideKindLetter = "";
+
+ // Name of the architecture extension, used in the Clang builtin name
+ string builtinExtension = "mve";
}
// Sometimes you have to use two separate Intrinsic declarations to
@@ -457,37 +513,66 @@ class NameOverride<string basename_> {
// A wrapper to define both _m and _x versions of a predicated
// intrinsic.
+//
+// We provide optional parameters to override the polymorphic name
+// types separately for the _m and _x variants, because sometimes they
+// polymorph differently (typically because the type of the inactive
+// parameter can be used as a disambiguator if it's present).
multiclass IntrinsicMX<Type rettype, dag arguments, dag cg,
int wantXVariant = 1,
string nameSuffix = "",
+ PolymorphicNameType pnt_m = PNT_Type,
PolymorphicNameType pnt_x = PNT_Type> {
// The _m variant takes an initial parameter called $inactive, which
// provides the input value of the output register, i.e. all the
// inactive lanes in the predicated operation take their values from
// this.
- def "_m" # nameSuffix:
- Intrinsic<rettype, !con((args rettype:$inactive), arguments), cg>;
+ def : Intrinsic<rettype, !con((args rettype:$inactive), arguments), cg>,
+ NameOverride<NAME # "_m" # nameSuffix> {
+ let pnt = pnt_m;
+ }
foreach unusedVar = !if(!eq(wantXVariant, 1), [1], []<int>) in {
// The _x variant leaves off that parameter, and simply uses an
// undef value of the same type.
+
+ def : Intrinsic<rettype, arguments, (seq (undef rettype):$inactive, cg)>,
+ NameOverride<NAME # "_x" # nameSuffix> {
+ let pnt = pnt_x;
+ }
+ }
+}
+
+// Same as above, but with an additional parameter 'basename' which overrides
+// the C intrinsic base name
+multiclass IntrinsicMXNameOverride<Type rettype, dag arguments, dag cg,
+ string basename, int wantXVariant = 1,
+ string nameSuffix = "",
+ PolymorphicNameType pnt_m = PNT_Type,
+ PolymorphicNameType pnt_x = PNT_Type> {
+ def "_m" # nameSuffix:
+ Intrinsic<rettype, !con((args rettype:$inactive), arguments), cg>,
+ NameOverride<basename # "_m" # nameSuffix> {
+ let pnt = pnt_m;
+ }
+
+ foreach unusedVar = !if(!eq(wantXVariant, 1), [1], []<int>) in {
def "_x" # nameSuffix:
- Intrinsic<rettype, arguments, (seq (undef rettype):$inactive, cg)> {
- // Allow overriding of the polymorphic name type, because
- // sometimes the _m and _x variants polymorph differently
- // (typically because the type of the inactive parameter can be
- // used as a disambiguator if it's present).
+ Intrinsic<rettype, arguments, (seq (undef rettype):$inactive, cg)>,
+ NameOverride<basename # "_x" # nameSuffix> {
let pnt = pnt_x;
}
}
}
+
// -----------------------------------------------------------------------------
// Convenience lists of parameter types. 'T' is just a container record, so you
// can define a typical intrinsic with 'let Params = T.Usual', or similar,
// instead of having to repeat a long list every time.
def T {
+ list<Type> None = [Void];
list<Type> Signed = [s8, s16, s32];
list<Type> Unsigned = [u8, u16, u32];
list<Type> Int = Signed # Unsigned;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_neon.td b/contrib/llvm-project/clang/include/clang/Basic/arm_neon.td
index a4dc21b64311..d0269f31c32d 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/arm_neon.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_neon.td
@@ -51,39 +51,39 @@ def OP_FMLA_N : Op<(call "vfma", $p0, $p1, (dup $p2))>;
def OP_FMLS_N : Op<(call "vfma", $p0, (op "-", $p1), (dup $p2))>;
def OP_MLAL_N : Op<(op "+", $p0, (call "vmull", $p1, (dup $p2)))>;
def OP_MLSL_N : Op<(op "-", $p0, (call "vmull", $p1, (dup $p2)))>;
-def OP_MUL_LN : Op<(op "*", $p0, (splat $p1, $p2))>;
-def OP_MULX_LN : Op<(call "vmulx", $p0, (splat $p1, $p2))>;
+def OP_MUL_LN : Op<(op "*", $p0, (call_mangled "splat_lane", $p1, $p2))>;
+def OP_MULX_LN : Op<(call "vmulx", $p0, (call_mangled "splat_lane", $p1, $p2))>;
def OP_MULL_N : Op<(call "vmull", $p0, (dup $p1))>;
-def OP_MULL_LN : Op<(call "vmull", $p0, (splat $p1, $p2))>;
-def OP_MULLHi_LN: Op<(call "vmull", (call "vget_high", $p0), (splat $p1, $p2))>;
-def OP_MLA_LN : Op<(op "+", $p0, (op "*", $p1, (splat $p2, $p3)))>;
-def OP_MLS_LN : Op<(op "-", $p0, (op "*", $p1, (splat $p2, $p3)))>;
-def OP_MLAL_LN : Op<(op "+", $p0, (call "vmull", $p1, (splat $p2, $p3)))>;
+def OP_MULL_LN : Op<(call "vmull", $p0, (call_mangled "splat_lane", $p1, $p2))>;
+def OP_MULLHi_LN: Op<(call "vmull", (call "vget_high", $p0), (call_mangled "splat_lane", $p1, $p2))>;
+def OP_MLA_LN : Op<(op "+", $p0, (op "*", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
+def OP_MLS_LN : Op<(op "-", $p0, (op "*", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
+def OP_MLAL_LN : Op<(op "+", $p0, (call "vmull", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
def OP_MLALHi_LN: Op<(op "+", $p0, (call "vmull", (call "vget_high", $p1),
- (splat $p2, $p3)))>;
-def OP_MLSL_LN : Op<(op "-", $p0, (call "vmull", $p1, (splat $p2, $p3)))>;
+ (call_mangled "splat_lane", $p2, $p3)))>;
+def OP_MLSL_LN : Op<(op "-", $p0, (call "vmull", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
def OP_MLSLHi_LN : Op<(op "-", $p0, (call "vmull", (call "vget_high", $p1),
- (splat $p2, $p3)))>;
+ (call_mangled "splat_lane", $p2, $p3)))>;
def OP_QDMULL_N : Op<(call "vqdmull", $p0, (dup $p1))>;
-def OP_QDMULL_LN : Op<(call "vqdmull", $p0, (splat $p1, $p2))>;
+def OP_QDMULL_LN : Op<(call "vqdmull", $p0, (call_mangled "splat_lane", $p1, $p2))>;
def OP_QDMULLHi_LN : Op<(call "vqdmull", (call "vget_high", $p0),
- (splat $p1, $p2))>;
+ (call_mangled "splat_lane", $p1, $p2))>;
def OP_QDMLAL_N : Op<(call "vqdmlal", $p0, $p1, (dup $p2))>;
-def OP_QDMLAL_LN : Op<(call "vqdmlal", $p0, $p1, (splat $p2, $p3))>;
+def OP_QDMLAL_LN : Op<(call "vqdmlal", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
def OP_QDMLALHi_LN : Op<(call "vqdmlal", $p0, (call "vget_high", $p1),
- (splat $p2, $p3))>;
+ (call_mangled "splat_lane", $p2, $p3))>;
def OP_QDMLSL_N : Op<(call "vqdmlsl", $p0, $p1, (dup $p2))>;
-def OP_QDMLSL_LN : Op<(call "vqdmlsl", $p0, $p1, (splat $p2, $p3))>;
+def OP_QDMLSL_LN : Op<(call "vqdmlsl", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
def OP_QDMLSLHi_LN : Op<(call "vqdmlsl", $p0, (call "vget_high", $p1),
- (splat $p2, $p3))>;
+ (call_mangled "splat_lane", $p2, $p3))>;
def OP_QDMULH_N : Op<(call "vqdmulh", $p0, (dup $p1))>;
-def OP_QDMULH_LN : Op<(call "vqdmulh", $p0, (splat $p1, $p2))>;
-def OP_QRDMULH_LN : Op<(call "vqrdmulh", $p0, (splat $p1, $p2))>;
+def OP_QDMULH_LN : Op<(call "vqdmulh", $p0, (call_mangled "splat_lane", $p1, $p2))>;
+def OP_QRDMULH_LN : Op<(call "vqrdmulh", $p0, (call_mangled "splat_lane", $p1, $p2))>;
def OP_QRDMULH_N : Op<(call "vqrdmulh", $p0, (dup $p1))>;
def OP_QRDMLAH : Op<(call "vqadd", $p0, (call "vqrdmulh", $p1, $p2))>;
def OP_QRDMLSH : Op<(call "vqsub", $p0, (call "vqrdmulh", $p1, $p2))>;
-def OP_QRDMLAH_LN : Op<(call "vqadd", $p0, (call "vqrdmulh", $p1, (splat $p2, $p3)))>;
-def OP_QRDMLSH_LN : Op<(call "vqsub", $p0, (call "vqrdmulh", $p1, (splat $p2, $p3)))>;
+def OP_QRDMLAH_LN : Op<(call "vqadd", $p0, (call "vqrdmulh", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
+def OP_QRDMLSH_LN : Op<(call "vqsub", $p0, (call "vqrdmulh", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
def OP_FMS_LN : Op<(call "vfma_lane", $p0, (op "-", $p1), $p2, $p3)>;
def OP_FMS_LNQ : Op<(call "vfma_laneq", $p0, (op "-", $p1), $p2, $p3)>;
def OP_TRN1 : Op<(shuffle $p0, $p1, (interleave (decimate mask0, 2),
@@ -115,7 +115,7 @@ def OP_HI : Op<(shuffle $p0, $p0, (highhalf mask0))>;
def OP_LO : Op<(shuffle $p0, $p0, (lowhalf mask0))>;
def OP_CONC : Op<(shuffle $p0, $p1, (add mask0, mask1))>;
def OP_DUP : Op<(dup $p0)>;
-def OP_DUP_LN : Op<(splat $p0, $p1)>;
+def OP_DUP_LN : Op<(call_mangled "splat_lane", $p0, $p1)>;
def OP_SEL : Op<(cast "R", (op "|",
(op "&", $p0, (cast $p0, $p1)),
(op "&", (op "~", $p0), (cast $p0, $p2))))>;
@@ -207,10 +207,10 @@ def OP_SCALAR_HALF_SET_LNQ : Op<(bitcast "float16x8_t",
def OP_DOT_LN
: Op<(call "vdot", $p0, $p1,
- (bitcast $p1, (splat(bitcast "uint32x2_t", $p2), $p3)))>;
+ (bitcast $p1, (call_mangled "splat_lane", (bitcast "32", $p2), $p3)))>;
def OP_DOT_LNQ
: Op<(call "vdot", $p0, $p1,
- (bitcast $p1, (splat(bitcast "uint32x4_t", $p2), $p3)))>;
+ (bitcast $p1, (call_mangled "splat_lane", (bitcast "32", $p2), $p3)))>;
def OP_FMLAL_LN : Op<(call "vfmlal_low", $p0, $p1,
(dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
@@ -221,8 +221,85 @@ def OP_FMLAL_LN_Hi : Op<(call "vfmlal_high", $p0, $p1,
def OP_FMLSL_LN_Hi : Op<(call "vfmlsl_high", $p0, $p1,
(dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
+def OP_USDOT_LN
+ : Op<(call "vusdot", $p0, $p1,
+ (cast "8", "S", (call_mangled "splat_lane", (bitcast "int32x2_t", $p2), $p3)))>;
+def OP_USDOT_LNQ
+ : Op<(call "vusdot", $p0, $p1,
+ (cast "8", "S", (call_mangled "splat_lane", (bitcast "int32x4_t", $p2), $p3)))>;
+
+// sudot splats the second vector and then calls vusdot
+def OP_SUDOT_LN
+ : Op<(call "vusdot", $p0,
+ (cast "8", "U", (call_mangled "splat_lane", (bitcast "int32x2_t", $p2), $p3)), $p1)>;
+def OP_SUDOT_LNQ
+ : Op<(call "vusdot", $p0,
+ (cast "8", "U", (call_mangled "splat_lane", (bitcast "int32x4_t", $p2), $p3)), $p1)>;
+
+def OP_BFDOT_LN
+ : Op<(call "vbfdot", $p0, $p1,
+ (bitcast $p1, (call_mangled "splat_lane", (bitcast "float32x2_t", $p2), $p3)))>;
+
+def OP_BFDOT_LNQ
+ : Op<(call "vbfdot", $p0, $p1,
+ (bitcast $p1, (call_mangled "splat_lane", (bitcast "float32x4_t", $p2), $p3)))>;
+
+def OP_BFMLALB_LN
+ : Op<(call "vbfmlalb", $p0, $p1,
+ (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
+
+def OP_BFMLALT_LN
+ : Op<(call "vbfmlalt", $p0, $p1,
+ (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
+
+def OP_VCVT_F32_BF16
+ : Op<(bitcast "R",
+ (call "vshll_n", (bitcast "int16x4_t", $p0),
+ (literal "int32_t", "16")))>;
+def OP_VCVT_F32_BF16_LO
+ : Op<(call "vcvt_f32_bf16", (call "vget_low", $p0))>;
+def OP_VCVT_F32_BF16_HI
+ : Op<(call "vcvt_f32_bf16", (call "vget_high", $p0))>;
+
+def OP_VCVT_BF16_F32_LO_A64
+ : Op<(call "__a64_vcvtq_low_bf16", $p0)>;
+def OP_VCVT_BF16_F32_A64
+ : Op<(call "vget_low", (call "__a64_vcvtq_low_bf16", $p0))>;
+
+def OP_VCVT_BF16_F32_A32
+ : Op<(call "__a32_vcvt_bf16", $p0)>;
+
+def OP_VCVT_BF16_F32_LO_A32
+ : Op<(call "vcombine", (cast "bfloat16x4_t", (literal "uint64_t", "0ULL")),
+ (call "__a32_vcvt_bf16", $p0))>;
+def OP_VCVT_BF16_F32_HI_A32
+ : Op<(call "vcombine", (call "__a32_vcvt_bf16", $p1),
+ (call "vget_low", $p0))>;
+
+def OP_CVT_F32_BF16
+ : Op<(bitcast "R", (op "<<", (bitcast "int32_t", $p0),
+ (literal "int32_t", "16")))>;
+
+//===----------------------------------------------------------------------===//
+// Auxiliary Instructions
+//===----------------------------------------------------------------------===//
+
+// Splat operation - performs a range-checked splat over a vector
+def SPLAT : WInst<"splat_lane", ".(!q)I",
+ "UcUsUicsilPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUlhdQhQdPlQPl">;
+def SPLATQ : WInst<"splat_laneq", ".(!Q)I",
+ "UcUsUicsilPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUlhdQhQdPlQPl"> {
+ let isLaneQ = 1;
+}
+let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)" in {
+ def SPLAT_BF : WInst<"splat_lane", ".(!q)I", "bQb">;
+ def SPLATQ_BF : WInst<"splat_laneq", ".(!Q)I", "bQb"> {
+ let isLaneQ = 1;
+ }
+}
+
//===----------------------------------------------------------------------===//
-// Instructions
+// Intrinsics
//===----------------------------------------------------------------------===//
////////////////////////////////////////////////////////////////////////////////
@@ -528,9 +605,16 @@ def VMULL_LANE : SOpInst<"vmull_lane", "(>Q)..I", "siUsUi", OP_MULL_LN>;
def VQDMULL_N : SOpInst<"vqdmull_n", "(>Q).1", "si", OP_QDMULL_N>;
def VQDMULL_LANE : SOpInst<"vqdmull_lane", "(>Q)..I", "si", OP_QDMULL_LN>;
def VQDMULH_N : SOpInst<"vqdmulh_n", "..1", "siQsQi", OP_QDMULH_N>;
-def VQDMULH_LANE : SOpInst<"vqdmulh_lane", "..qI", "siQsQi", OP_QDMULH_LN>;
def VQRDMULH_N : SOpInst<"vqrdmulh_n", "..1", "siQsQi", OP_QRDMULH_N>;
+
+let ArchGuard = "!defined(__aarch64__)" in {
+def VQDMULH_LANE : SOpInst<"vqdmulh_lane", "..qI", "siQsQi", OP_QDMULH_LN>;
def VQRDMULH_LANE : SOpInst<"vqrdmulh_lane", "..qI", "siQsQi", OP_QRDMULH_LN>;
+}
+let ArchGuard = "defined(__aarch64__)" in {
+def A64_VQDMULH_LANE : SInst<"vqdmulh_lane", "..(!q)I", "siQsQi">;
+def A64_VQRDMULH_LANE : SInst<"vqrdmulh_lane", "..(!q)I", "siQsQi">;
+}
let ArchGuard = "defined(__ARM_FEATURE_QRDMX)" in {
def VQRDMLAH_LANE : SOpInst<"vqrdmlah_lane", "...qI", "siQsQi", OP_QRDMLAH_LN>;
@@ -587,11 +671,23 @@ def VZIP : WInst<"vzip", "2..", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
def VUZP : WInst<"vuzp", "2..", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
////////////////////////////////////////////////////////////////////////////////
+
+class REINTERPRET_CROSS_SELF<string Types> :
+ NoTestOpInst<"vreinterpret", "..", Types, OP_REINT> {
+ let CartesianProductWith = Types;
+}
+
+multiclass REINTERPRET_CROSS_TYPES<string TypesA, string TypesB> {
+ def AXB: NoTestOpInst<"vreinterpret", "..", TypesA, OP_REINT> {
+ let CartesianProductWith = TypesB;
+ }
+ def BXA: NoTestOpInst<"vreinterpret", "..", TypesB, OP_REINT> {
+ let CartesianProductWith = TypesA;
+ }
+}
+
// E.3.31 Vector reinterpret cast operations
-def VREINTERPRET
- : NoTestOpInst<"vreinterpret", "..",
- "csilUcUsUiUlhfPcPsQcQsQiQlQUcQUsQUiQUlQhQfQPcQPs", OP_REINT> {
- let CartesianProductOfTypes = 1;
+def VREINTERPRET : REINTERPRET_CROSS_SELF<"csilUcUsUiUlhfPcPsQcQsQiQlQUcQUsQUiQUlQhQfQPcQPs"> {
let ArchGuard = "!defined(__aarch64__)";
let BigEndianSafe = 1;
}
@@ -874,16 +970,22 @@ def COPY_LANE : IOpInst<"vcopy_lane", "..I.I",
def COPYQ_LANE : IOpInst<"vcopy_lane", "..IqI",
"QcQsQiQlQUcQUsQUiQUlQPcQPsQfQdQPl", OP_COPY_LN>;
def COPY_LANEQ : IOpInst<"vcopy_laneq", "..IQI",
- "csilPcPsPlUcUsUiUlfd", OP_COPY_LN>;
+ "csilPcPsPlUcUsUiUlfd", OP_COPY_LN> {
+ let isLaneQ = 1;
+}
def COPYQ_LANEQ : IOpInst<"vcopy_laneq", "..I.I",
- "QcQsQiQlQUcQUsQUiQUlQPcQPsQfQdQPl", OP_COPY_LN>;
+ "QcQsQiQlQUcQUsQUiQUlQPcQPsQfQdQPl", OP_COPY_LN> {
+ let isLaneQ = 1;
+}
////////////////////////////////////////////////////////////////////////////////
// Set all lanes to same value
def VDUP_LANE1: WOpInst<"vdup_lane", ".qI", "hdQhQdPlQPl", OP_DUP_LN>;
def VDUP_LANE2: WOpInst<"vdup_laneq", ".QI",
"csilUcUsUiUlPcPshfdQcQsQiQlQPcQPsQUcQUsQUiQUlQhQfQdPlQPl",
- OP_DUP_LN>;
+ OP_DUP_LN> {
+ let isLaneQ = 1;
+}
def DUP_N : WOpInst<"vdup_n", ".1", "dQdPlQPl", OP_DUP>;
def MOV_N : WOpInst<"vmov_n", ".1", "dQdPlQPl", OP_DUP>;
@@ -899,38 +1001,60 @@ def CREATE : NoTestOpInst<"vcreate", ".(IU>)", "dPl", OP_CAST> {
////////////////////////////////////////////////////////////////////////////////
def VMLA_LANEQ : IOpInst<"vmla_laneq", "...QI",
- "siUsUifQsQiQUsQUiQf", OP_MLA_LN>;
+ "siUsUifQsQiQUsQUiQf", OP_MLA_LN> {
+ let isLaneQ = 1;
+}
def VMLS_LANEQ : IOpInst<"vmls_laneq", "...QI",
- "siUsUifQsQiQUsQUiQf", OP_MLS_LN>;
+ "siUsUifQsQiQUsQUiQf", OP_MLS_LN> {
+ let isLaneQ = 1;
+}
def VFMA_LANE : IInst<"vfma_lane", "...qI", "fdQfQd">;
def VFMA_LANEQ : IInst<"vfma_laneq", "...QI", "fdQfQd"> {
let isLaneQ = 1;
}
def VFMS_LANE : IOpInst<"vfms_lane", "...qI", "fdQfQd", OP_FMS_LN>;
-def VFMS_LANEQ : IOpInst<"vfms_laneq", "...QI", "fdQfQd", OP_FMS_LNQ>;
+def VFMS_LANEQ : IOpInst<"vfms_laneq", "...QI", "fdQfQd", OP_FMS_LNQ> {
+ let isLaneQ = 1;
+}
-def VMLAL_LANEQ : SOpInst<"vmlal_laneq", "(>Q)(>Q).QI", "siUsUi", OP_MLAL_LN>;
+def VMLAL_LANEQ : SOpInst<"vmlal_laneq", "(>Q)(>Q).QI", "siUsUi", OP_MLAL_LN> {
+ let isLaneQ = 1;
+}
def VMLAL_HIGH_LANE : SOpInst<"vmlal_high_lane", "(>Q)(>Q)Q.I", "siUsUi",
OP_MLALHi_LN>;
def VMLAL_HIGH_LANEQ : SOpInst<"vmlal_high_laneq", "(>Q)(>Q)QQI", "siUsUi",
- OP_MLALHi_LN>;
-def VMLSL_LANEQ : SOpInst<"vmlsl_laneq", "(>Q)(>Q).QI", "siUsUi", OP_MLSL_LN>;
+ OP_MLALHi_LN> {
+ let isLaneQ = 1;
+}
+def VMLSL_LANEQ : SOpInst<"vmlsl_laneq", "(>Q)(>Q).QI", "siUsUi", OP_MLSL_LN> {
+ let isLaneQ = 1;
+}
def VMLSL_HIGH_LANE : SOpInst<"vmlsl_high_lane", "(>Q)(>Q)Q.I", "siUsUi",
OP_MLSLHi_LN>;
def VMLSL_HIGH_LANEQ : SOpInst<"vmlsl_high_laneq", "(>Q)(>Q)QQI", "siUsUi",
- OP_MLSLHi_LN>;
+ OP_MLSLHi_LN> {
+ let isLaneQ = 1;
+}
-def VQDMLAL_LANEQ : SOpInst<"vqdmlal_laneq", "(>Q)(>Q).QI", "si", OP_QDMLAL_LN>;
+def VQDMLAL_LANEQ : SOpInst<"vqdmlal_laneq", "(>Q)(>Q).QI", "si", OP_QDMLAL_LN> {
+ let isLaneQ = 1;
+}
def VQDMLAL_HIGH_LANE : SOpInst<"vqdmlal_high_lane", "(>Q)(>Q)Q.I", "si",
OP_QDMLALHi_LN>;
def VQDMLAL_HIGH_LANEQ : SOpInst<"vqdmlal_high_laneq", "(>Q)(>Q)QQI", "si",
- OP_QDMLALHi_LN>;
-def VQDMLSL_LANEQ : SOpInst<"vqdmlsl_laneq", "(>Q)(>Q).QI", "si", OP_QDMLSL_LN>;
+ OP_QDMLALHi_LN> {
+ let isLaneQ = 1;
+}
+def VQDMLSL_LANEQ : SOpInst<"vqdmlsl_laneq", "(>Q)(>Q).QI", "si", OP_QDMLSL_LN> {
+ let isLaneQ = 1;
+}
def VQDMLSL_HIGH_LANE : SOpInst<"vqdmlsl_high_lane", "(>Q)(>Q)Q.I", "si",
OP_QDMLSLHi_LN>;
def VQDMLSL_HIGH_LANEQ : SOpInst<"vqdmlsl_high_laneq", "(>Q)(>Q)QQI", "si",
- OP_QDMLSLHi_LN>;
+ OP_QDMLSLHi_LN> {
+ let isLaneQ = 1;
+}
// Newly add double parameter for vmul_lane in aarch64
// Note: d type is handled by SCALAR_VMUL_LANE
@@ -938,31 +1062,48 @@ def VMUL_LANE_A64 : IOpInst<"vmul_lane", "..qI", "Qd", OP_MUL_LN>;
// Note: d type is handled by SCALAR_VMUL_LANEQ
def VMUL_LANEQ : IOpInst<"vmul_laneq", "..QI",
- "sifUsUiQsQiQUsQUiQfQd", OP_MUL_LN>;
-def VMULL_LANEQ : SOpInst<"vmull_laneq", "(>Q).QI", "siUsUi", OP_MULL_LN>;
+ "sifUsUiQsQiQUsQUiQfQd", OP_MUL_LN> {
+ let isLaneQ = 1;
+}
+def VMULL_LANEQ : SOpInst<"vmull_laneq", "(>Q).QI", "siUsUi", OP_MULL_LN> {
+ let isLaneQ = 1;
+}
def VMULL_HIGH_LANE : SOpInst<"vmull_high_lane", "(>Q)Q.I", "siUsUi",
OP_MULLHi_LN>;
def VMULL_HIGH_LANEQ : SOpInst<"vmull_high_laneq", "(>Q)QQI", "siUsUi",
- OP_MULLHi_LN>;
+ OP_MULLHi_LN> {
+ let isLaneQ = 1;
+}
-def VQDMULL_LANEQ : SOpInst<"vqdmull_laneq", "(>Q).QI", "si", OP_QDMULL_LN>;
+def VQDMULL_LANEQ : SOpInst<"vqdmull_laneq", "(>Q).QI", "si", OP_QDMULL_LN> {
+ let isLaneQ = 1;
+}
def VQDMULL_HIGH_LANE : SOpInst<"vqdmull_high_lane", "(>Q)Q.I", "si",
OP_QDMULLHi_LN>;
def VQDMULL_HIGH_LANEQ : SOpInst<"vqdmull_high_laneq", "(>Q)QQI", "si",
- OP_QDMULLHi_LN>;
-
-def VQDMULH_LANEQ : SOpInst<"vqdmulh_laneq", "..QI", "siQsQi", OP_QDMULH_LN>;
-def VQRDMULH_LANEQ : SOpInst<"vqrdmulh_laneq", "..QI", "siQsQi", OP_QRDMULH_LN>;
+ OP_QDMULLHi_LN> {
+ let isLaneQ = 1;
+}
+let isLaneQ = 1 in {
+def VQDMULH_LANEQ : SInst<"vqdmulh_laneq", "..QI", "siQsQi">;
+def VQRDMULH_LANEQ : SInst<"vqrdmulh_laneq", "..QI", "siQsQi">;
+}
let ArchGuard = "defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)" in {
-def VQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "...QI", "siQsQi", OP_QRDMLAH_LN>;
-def VQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "...QI", "siQsQi", OP_QRDMLSH_LN>;
+def VQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "...QI", "siQsQi", OP_QRDMLAH_LN> {
+ let isLaneQ = 1;
+}
+def VQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "...QI", "siQsQi", OP_QRDMLSH_LN> {
+ let isLaneQ = 1;
+}
}
// Note: d type implemented by SCALAR_VMULX_LANE
def VMULX_LANE : IOpInst<"vmulx_lane", "..qI", "fQfQd", OP_MULX_LN>;
// Note: d type is implemented by SCALAR_VMULX_LANEQ
-def VMULX_LANEQ : IOpInst<"vmulx_laneq", "..QI", "fQfQd", OP_MULX_LN>;
+def VMULX_LANEQ : IOpInst<"vmulx_laneq", "..QI", "fQfQd", OP_MULX_LN> {
+ let isLaneQ = 1;
+}
////////////////////////////////////////////////////////////////////////////////
// Across vectors class
@@ -1095,12 +1236,9 @@ def VQTBX4_A64 : WInst<"vqtbx4", "..(4Q)U", "UccPcQUcQcQPc">;
// NeonEmitter implicitly takes the cartesian product of the type string with
// itself during generation so, unlike all other intrinsics, this one should
// include *all* types, not just additional ones.
-def VVREINTERPRET
- : NoTestOpInst<"vreinterpret", "..",
- "csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk", OP_REINT> {
- let CartesianProductOfTypes = 1;
- let BigEndianSafe = 1;
+def VVREINTERPRET : REINTERPRET_CROSS_SELF<"csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk"> {
let ArchGuard = "__ARM_ARCH >= 8 && defined(__aarch64__)";
+ let BigEndianSafe = 1;
}
////////////////////////////////////////////////////////////////////////////////
@@ -1372,11 +1510,15 @@ def SCALAR_UQXTN : SInst<"vqmovn", "(1<)1", "SUsSUiSUl">;
// Scalar Floating Point multiply (scalar, by element)
def SCALAR_FMUL_LANE : IOpInst<"vmul_lane", "11.I", "SfSd", OP_SCALAR_MUL_LN>;
-def SCALAR_FMUL_LANEQ : IOpInst<"vmul_laneq", "11QI", "SfSd", OP_SCALAR_MUL_LN>;
+def SCALAR_FMUL_LANEQ : IOpInst<"vmul_laneq", "11QI", "SfSd", OP_SCALAR_MUL_LN> {
+ let isLaneQ = 1;
+}
// Scalar Floating Point multiply extended (scalar, by element)
def SCALAR_FMULX_LANE : IOpInst<"vmulx_lane", "11.I", "SfSd", OP_SCALAR_MULX_LN>;
-def SCALAR_FMULX_LANEQ : IOpInst<"vmulx_laneq", "11QI", "SfSd", OP_SCALAR_MULX_LN>;
+def SCALAR_FMULX_LANEQ : IOpInst<"vmulx_laneq", "11QI", "SfSd", OP_SCALAR_MULX_LN> {
+ let isLaneQ = 1;
+}
def SCALAR_VMUL_N : IInst<"vmul_n", "..1", "d">;
@@ -1392,48 +1534,70 @@ def SCALAR_VMUL_LANEQ : IInst<"vmul_laneq", "..QI", "d"> {
def SCALAR_VMULX_LANE : IOpInst<"vmulx_lane", "..qI", "d", OP_SCALAR_VMULX_LN>;
// VMULX_LANEQ d type implemented using scalar vmulx_laneq
-def SCALAR_VMULX_LANEQ : IOpInst<"vmulx_laneq", "..QI", "d", OP_SCALAR_VMULX_LNQ>;
+def SCALAR_VMULX_LANEQ : IOpInst<"vmulx_laneq", "..QI", "d", OP_SCALAR_VMULX_LNQ> {
+ let isLaneQ = 1;
+}
// Scalar Floating Point fused multiply-add (scalar, by element)
def SCALAR_FMLA_LANE : IInst<"vfma_lane", "111.I", "SfSd">;
-def SCALAR_FMLA_LANEQ : IInst<"vfma_laneq", "111QI", "SfSd">;
+def SCALAR_FMLA_LANEQ : IInst<"vfma_laneq", "111QI", "SfSd"> {
+ let isLaneQ = 1;
+}
// Scalar Floating Point fused multiply-subtract (scalar, by element)
def SCALAR_FMLS_LANE : IOpInst<"vfms_lane", "111.I", "SfSd", OP_FMS_LN>;
-def SCALAR_FMLS_LANEQ : IOpInst<"vfms_laneq", "111QI", "SfSd", OP_FMS_LNQ>;
+def SCALAR_FMLS_LANEQ : IOpInst<"vfms_laneq", "111QI", "SfSd", OP_FMS_LNQ> {
+ let isLaneQ = 1;
+}
// Signed Saturating Doubling Multiply Long (scalar by element)
def SCALAR_SQDMULL_LANE : SOpInst<"vqdmull_lane", "(1>)1.I", "SsSi", OP_SCALAR_QDMULL_LN>;
-def SCALAR_SQDMULL_LANEQ : SOpInst<"vqdmull_laneq", "(1>)1QI", "SsSi", OP_SCALAR_QDMULL_LN>;
+def SCALAR_SQDMULL_LANEQ : SOpInst<"vqdmull_laneq", "(1>)1QI", "SsSi", OP_SCALAR_QDMULL_LN> {
+ let isLaneQ = 1;
+}
// Signed Saturating Doubling Multiply-Add Long (scalar by element)
def SCALAR_SQDMLAL_LANE : SInst<"vqdmlal_lane", "(1>)(1>)1.I", "SsSi">;
-def SCALAR_SQDMLAL_LANEQ : SInst<"vqdmlal_laneq", "(1>)(1>)1QI", "SsSi">;
+def SCALAR_SQDMLAL_LANEQ : SInst<"vqdmlal_laneq", "(1>)(1>)1QI", "SsSi"> {
+ let isLaneQ = 1;
+}
// Signed Saturating Doubling Multiply-Subtract Long (scalar by element)
def SCALAR_SQDMLS_LANE : SInst<"vqdmlsl_lane", "(1>)(1>)1.I", "SsSi">;
-def SCALAR_SQDMLS_LANEQ : SInst<"vqdmlsl_laneq", "(1>)(1>)1QI", "SsSi">;
+def SCALAR_SQDMLS_LANEQ : SInst<"vqdmlsl_laneq", "(1>)(1>)1QI", "SsSi"> {
+ let isLaneQ = 1;
+}
// Scalar Integer Saturating Doubling Multiply Half High (scalar by element)
def SCALAR_SQDMULH_LANE : SOpInst<"vqdmulh_lane", "11.I", "SsSi", OP_SCALAR_QDMULH_LN>;
-def SCALAR_SQDMULH_LANEQ : SOpInst<"vqdmulh_laneq", "11QI", "SsSi", OP_SCALAR_QDMULH_LN>;
+def SCALAR_SQDMULH_LANEQ : SOpInst<"vqdmulh_laneq", "11QI", "SsSi", OP_SCALAR_QDMULH_LN> {
+ let isLaneQ = 1;
+}
// Scalar Integer Saturating Rounding Doubling Multiply Half High
def SCALAR_SQRDMULH_LANE : SOpInst<"vqrdmulh_lane", "11.I", "SsSi", OP_SCALAR_QRDMULH_LN>;
-def SCALAR_SQRDMULH_LANEQ : SOpInst<"vqrdmulh_laneq", "11QI", "SsSi", OP_SCALAR_QRDMULH_LN>;
+def SCALAR_SQRDMULH_LANEQ : SOpInst<"vqrdmulh_laneq", "11QI", "SsSi", OP_SCALAR_QRDMULH_LN> {
+ let isLaneQ = 1;
+}
let ArchGuard = "defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)" in {
// Signed Saturating Rounding Doubling Multiply Accumulate Returning High Half
def SCALAR_SQRDMLAH_LANE : SOpInst<"vqrdmlah_lane", "111.I", "SsSi", OP_SCALAR_QRDMLAH_LN>;
-def SCALAR_SQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "111QI", "SsSi", OP_SCALAR_QRDMLAH_LN>;
+def SCALAR_SQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "111QI", "SsSi", OP_SCALAR_QRDMLAH_LN> {
+ let isLaneQ = 1;
+}
// Signed Saturating Rounding Doubling Multiply Subtract Returning High Half
def SCALAR_SQRDMLSH_LANE : SOpInst<"vqrdmlsh_lane", "111.I", "SsSi", OP_SCALAR_QRDMLSH_LN>;
-def SCALAR_SQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "111QI", "SsSi", OP_SCALAR_QRDMLSH_LN>;
+def SCALAR_SQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "111QI", "SsSi", OP_SCALAR_QRDMLSH_LN> {
+ let isLaneQ = 1;
+}
}
def SCALAR_VDUP_LANE : IInst<"vdup_lane", "1.I", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs">;
-def SCALAR_VDUP_LANEQ : IInst<"vdup_laneq", "1QI", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs">;
+def SCALAR_VDUP_LANEQ : IInst<"vdup_laneq", "1QI", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs"> {
+ let isLaneQ = 1;
+}
}
// ARMv8.2-A FP16 vector intrinsics for A32/A64.
@@ -1597,36 +1761,52 @@ let ArchGuard = "defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarc
// FMA lane
def VFMA_LANEH : IInst<"vfma_lane", "...qI", "hQh">;
- def VFMA_LANEQH : IInst<"vfma_laneq", "...QI", "hQh">;
+ def VFMA_LANEQH : IInst<"vfma_laneq", "...QI", "hQh"> {
+ let isLaneQ = 1;
+ }
// FMA lane with scalar argument
def FMLA_NH : SOpInst<"vfma_n", "...1", "hQh", OP_FMLA_N>;
// Scalar floating point fused multiply-add (scalar, by element)
def SCALAR_FMLA_LANEH : IInst<"vfma_lane", "111.I", "Sh">;
- def SCALAR_FMLA_LANEQH : IInst<"vfma_laneq", "111QI", "Sh">;
+ def SCALAR_FMLA_LANEQH : IInst<"vfma_laneq", "111QI", "Sh"> {
+ let isLaneQ = 1;
+ }
// FMS lane
def VFMS_LANEH : IOpInst<"vfms_lane", "...qI", "hQh", OP_FMS_LN>;
- def VFMS_LANEQH : IOpInst<"vfms_laneq", "...QI", "hQh", OP_FMS_LNQ>;
+ def VFMS_LANEQH : IOpInst<"vfms_laneq", "...QI", "hQh", OP_FMS_LNQ> {
+ let isLaneQ = 1;
+ }
// FMS lane with scalar argument
def FMLS_NH : SOpInst<"vfms_n", "...1", "hQh", OP_FMLS_N>;
// Scalar floating foint fused multiply-subtract (scalar, by element)
def SCALAR_FMLS_LANEH : IOpInst<"vfms_lane", "111.I", "Sh", OP_FMS_LN>;
- def SCALAR_FMLS_LANEQH : IOpInst<"vfms_laneq", "111QI", "Sh", OP_FMS_LNQ>;
+ def SCALAR_FMLS_LANEQH : IOpInst<"vfms_laneq", "111QI", "Sh", OP_FMS_LNQ> {
+ let isLaneQ = 1;
+ }
// Mul lane
- def VMUL_LANEQH : IOpInst<"vmul_laneq", "..QI", "hQh", OP_MUL_LN>;
+ def VMUL_LANEQH : IOpInst<"vmul_laneq", "..QI", "hQh", OP_MUL_LN> {
+ let isLaneQ = 1;
+ }
// Scalar floating point multiply (scalar, by element)
def SCALAR_FMUL_LANEH : IOpInst<"vmul_lane", "11.I", "Sh", OP_SCALAR_MUL_LN>;
- def SCALAR_FMUL_LANEQH : IOpInst<"vmul_laneq", "11QI", "Sh", OP_SCALAR_MUL_LN>;
+ def SCALAR_FMUL_LANEQH : IOpInst<"vmul_laneq", "11QI", "Sh", OP_SCALAR_MUL_LN> {
+ let isLaneQ = 1;
+ }
// Mulx lane
def VMULX_LANEH : IOpInst<"vmulx_lane", "..qI", "hQh", OP_MULX_LN>;
- def VMULX_LANEQH : IOpInst<"vmulx_laneq", "..QI", "hQh", OP_MULX_LN>;
+ def VMULX_LANEQH : IOpInst<"vmulx_laneq", "..QI", "hQh", OP_MULX_LN> {
+ let isLaneQ = 1;
+ }
def VMULX_NH : IOpInst<"vmulx_n", "..1", "hQh", OP_MULX_N>;
// Scalar floating point mulx (scalar, by element)
def SCALAR_FMULX_LANEH : IInst<"vmulx_lane", "11.I", "Sh">;
- def SCALAR_FMULX_LANEQH : IInst<"vmulx_laneq", "11QI", "Sh">;
+ def SCALAR_FMULX_LANEQH : IInst<"vmulx_laneq", "11QI", "Sh"> {
+ let isLaneQ = 1;
+ }
// ARMv8.2-A FP16 reduction vector intrinsics.
def VMAXVH : SInst<"vmaxv", "1.", "hQh">;
@@ -1643,7 +1823,9 @@ let ArchGuard = "defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarc
def VUZP2H : SOpInst<"vuzp2", "...", "hQh", OP_UZP2>;
def SCALAR_VDUP_LANEH : IInst<"vdup_lane", "1.I", "Sh">;
- def SCALAR_VDUP_LANEQH : IInst<"vdup_laneq", "1QI", "Sh">;
+ def SCALAR_VDUP_LANEQH : IInst<"vdup_laneq", "1QI", "Sh"> {
+ let isLaneQ = 1;
+ }
}
// v8.2-A dot product instructions.
@@ -1653,7 +1835,9 @@ let ArchGuard = "defined(__ARM_FEATURE_DOTPROD)" in {
}
let ArchGuard = "defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__)" in {
// Variants indexing into a 128-bit vector are A64 only.
- def UDOT_LANEQ : SOpInst<"vdot_laneq", "..(<<)(<<Q)I", "iUiQiQUi", OP_DOT_LNQ>;
+ def UDOT_LANEQ : SOpInst<"vdot_laneq", "..(<<)(<<Q)I", "iUiQiQUi", OP_DOT_LNQ> {
+ let isLaneQ = 1;
+ }
}
// v8.2-A FP16 fused multiply-add long instructions.
@@ -1668,10 +1852,54 @@ let ArchGuard = "defined(__ARM_FEATURE_FP16FML) && defined(__aarch64__)" in {
def VFMLAL_LANE_HIGH : SOpInst<"vfmlal_lane_high", "(F>)(F>)F(Fq)I", "hQh", OP_FMLAL_LN_Hi>;
def VFMLSL_LANE_HIGH : SOpInst<"vfmlsl_lane_high", "(F>)(F>)F(Fq)I", "hQh", OP_FMLSL_LN_Hi>;
- def VFMLAL_LANEQ_LOW : SOpInst<"vfmlal_laneq_low", "(F>)(F>)F(FQ)I", "hQh", OP_FMLAL_LN>;
- def VFMLSL_LANEQ_LOW : SOpInst<"vfmlsl_laneq_low", "(F>)(F>)F(FQ)I", "hQh", OP_FMLSL_LN>;
- def VFMLAL_LANEQ_HIGH : SOpInst<"vfmlal_laneq_high", "(F>)(F>)F(FQ)I", "hQh", OP_FMLAL_LN_Hi>;
- def VFMLSL_LANEQ_HIGH : SOpInst<"vfmlsl_laneq_high", "(F>)(F>)F(FQ)I", "hQh", OP_FMLSL_LN_Hi>;
+ def VFMLAL_LANEQ_LOW : SOpInst<"vfmlal_laneq_low", "(F>)(F>)F(FQ)I", "hQh", OP_FMLAL_LN> {
+ let isLaneQ = 1;
+ }
+ def VFMLSL_LANEQ_LOW : SOpInst<"vfmlsl_laneq_low", "(F>)(F>)F(FQ)I", "hQh", OP_FMLSL_LN> {
+ let isLaneQ = 1;
+ }
+ def VFMLAL_LANEQ_HIGH : SOpInst<"vfmlal_laneq_high", "(F>)(F>)F(FQ)I", "hQh", OP_FMLAL_LN_Hi> {
+ let isLaneQ = 1;
+ }
+ def VFMLSL_LANEQ_HIGH : SOpInst<"vfmlsl_laneq_high", "(F>)(F>)F(FQ)I", "hQh", OP_FMLSL_LN_Hi> {
+ let isLaneQ = 1;
+ }
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_MATMUL_INT8)" in {
+ def VMMLA : SInst<"vmmla", "..(<<)(<<)", "QUiQi">;
+ def VUSMMLA : SInst<"vusmmla", "..(<<U)(<<)", "Qi">;
+
+ def VUSDOT : SInst<"vusdot", "..(<<U)(<<)", "iQi">;
+
+ def VUSDOT_LANE : SOpInst<"vusdot_lane", "..(<<U)(<<q)I", "iQi", OP_USDOT_LN>;
+ def VSUDOT_LANE : SOpInst<"vsudot_lane", "..(<<)(<<qU)I", "iQi", OP_SUDOT_LN>;
+
+ let ArchGuard = "defined(__aarch64__)" in {
+ let isLaneQ = 1 in {
+ def VUSDOT_LANEQ : SOpInst<"vusdot_laneq", "..(<<U)(<<Q)I", "iQi", OP_USDOT_LNQ>;
+ def VSUDOT_LANEQ : SOpInst<"vsudot_laneq", "..(<<)(<<QU)I", "iQi", OP_SUDOT_LNQ>;
+ }
+ }
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)" in {
+ def VDOT_BF : SInst<"vbfdot", "..BB", "fQf">;
+ def VDOT_LANE_BF : SOpInst<"vbfdot_lane", "..B(Bq)I", "fQf", OP_BFDOT_LN>;
+ def VDOT_LANEQ_BF : SOpInst<"vbfdot_laneq", "..B(BQ)I", "fQf", OP_BFDOT_LNQ> {
+ let isLaneQ = 1;
+ }
+
+ def VFMMLA_BF : SInst<"vbfmmla", "..BB", "Qf">;
+
+ def VFMLALB_BF : SInst<"vbfmlalb", "..BB", "Qf">;
+ def VFMLALT_BF : SInst<"vbfmlalt", "..BB", "Qf">;
+
+ def VFMLALB_LANE_BF : SOpInst<"vbfmlalb_lane", "..B(Bq)I", "Qf", OP_BFMLALB_LN>;
+ def VFMLALB_LANEQ_BF : SOpInst<"vbfmlalb_laneq", "..B(BQ)I", "Qf", OP_BFMLALB_LN>;
+
+ def VFMLALT_LANE_BF : SOpInst<"vbfmlalt_lane", "..B(Bq)I", "Qf", OP_BFMLALT_LN>;
+ def VFMLALT_LANEQ_BF : SOpInst<"vbfmlalt_laneq", "..B(BQ)I", "Qf", OP_BFMLALT_LN>;
}
// v8.3-A Vector complex addition intrinsics
@@ -1690,4 +1918,102 @@ let ArchGuard = "defined(__ARM_FEATURE_COMPLEX)" in {
let ArchGuard = "defined(__ARM_FEATURE_COMPLEX) && defined(__aarch64__)" in {
def VCADDQ_ROT90_FP64 : SInst<"vcaddq_rot90", "QQQ", "d">;
def VCADDQ_ROT270_FP64 : SInst<"vcaddq_rot270", "QQQ", "d">;
-} \ No newline at end of file
+}
+
+// V8.2-A BFloat intrinsics
+let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)" in {
+ def VCREATE_BF : NoTestOpInst<"vcreate", ".(IU>)", "b", OP_CAST> {
+ let BigEndianSafe = 1;
+ }
+
+ def VDUP_N_BF : WOpInst<"vdup_n", ".1", "bQb", OP_DUP>;
+
+ def VDUP_LANE_BF : WOpInst<"vdup_lane", ".qI", "bQb", OP_DUP_LN>;
+ def VDUP_LANEQ_BF: WOpInst<"vdup_laneq", ".QI", "bQb", OP_DUP_LN> {
+ let isLaneQ = 1;
+ }
+
+ def VCOMBINE_BF : NoTestOpInst<"vcombine", "Q..", "b", OP_CONC>;
+
+ def VGET_HIGH_BF : NoTestOpInst<"vget_high", ".Q", "b", OP_HI>;
+ def VGET_LOW_BF : NoTestOpInst<"vget_low", ".Q", "b", OP_LO>;
+
+ def VGET_LANE_BF : IInst<"vget_lane", "1.I", "bQb">;
+ def VSET_LANE_BF : IInst<"vset_lane", ".1.I", "bQb">;
+ def SCALAR_VDUP_LANE_BF : IInst<"vdup_lane", "1.I", "Sb">;
+ def SCALAR_VDUP_LANEQ_BF : IInst<"vdup_laneq", "1QI", "Sb"> {
+ let isLaneQ = 1;
+ }
+
+ def VLD1_BF : WInst<"vld1", ".(c*!)", "bQb">;
+ def VLD2_BF : WInst<"vld2", "2(c*!)", "bQb">;
+ def VLD3_BF : WInst<"vld3", "3(c*!)", "bQb">;
+ def VLD4_BF : WInst<"vld4", "4(c*!)", "bQb">;
+
+ def VST1_BF : WInst<"vst1", "v*(.!)", "bQb">;
+ def VST2_BF : WInst<"vst2", "v*(2!)", "bQb">;
+ def VST3_BF : WInst<"vst3", "v*(3!)", "bQb">;
+ def VST4_BF : WInst<"vst4", "v*(4!)", "bQb">;
+
+ def VLD1_X2_BF : WInst<"vld1_x2", "2(c*!)", "bQb">;
+ def VLD1_X3_BF : WInst<"vld1_x3", "3(c*!)", "bQb">;
+ def VLD1_X4_BF : WInst<"vld1_x4", "4(c*!)", "bQb">;
+
+ def VST1_X2_BF : WInst<"vst1_x2", "v*(2!)", "bQb">;
+ def VST1_X3_BF : WInst<"vst1_x3", "v*(3!)", "bQb">;
+ def VST1_X4_BF : WInst<"vst1_x4", "v*(4!)", "bQb">;
+
+ def VLD1_LANE_BF : WInst<"vld1_lane", ".(c*!).I", "bQb">;
+ def VLD2_LANE_BF : WInst<"vld2_lane", "2(c*!)2I", "bQb">;
+ def VLD3_LANE_BF : WInst<"vld3_lane", "3(c*!)3I", "bQb">;
+ def VLD4_LANE_BF : WInst<"vld4_lane", "4(c*!)4I", "bQb">;
+ def VST1_LANE_BF : WInst<"vst1_lane", "v*(.!)I", "bQb">;
+ def VST2_LANE_BF : WInst<"vst2_lane", "v*(2!)I", "bQb">;
+ def VST3_LANE_BF : WInst<"vst3_lane", "v*(3!)I", "bQb">;
+ def VST4_LANE_BF : WInst<"vst4_lane", "v*(4!)I", "bQb">;
+
+ def VLD1_DUP_BF : WInst<"vld1_dup", ".(c*!)", "bQb">;
+ def VLD2_DUP_BF : WInst<"vld2_dup", "2(c*!)", "bQb">;
+ def VLD3_DUP_BF : WInst<"vld3_dup", "3(c*!)", "bQb">;
+ def VLD4_DUP_BF : WInst<"vld4_dup", "4(c*!)", "bQb">;
+
+ def VCVT_F32_BF16 : SOpInst<"vcvt_f32_bf16", "(F>)(Bq!)", "Qb", OP_VCVT_F32_BF16>;
+ def VCVT_LOW_F32_BF16 : SOpInst<"vcvt_low_f32", "(F>)(BQ!)", "Qb", OP_VCVT_F32_BF16_LO>;
+ def VCVT_HIGH_F32_BF16 : SOpInst<"vcvt_high_f32", "(F>)(BQ!)", "Qb", OP_VCVT_F32_BF16_HI>;
+
+ def SCALAR_CVT_BF16_F32 : SInst<"vcvth_bf16", "(1B)1", "f">;
+ def SCALAR_CVT_F32_BF16 : SOpInst<"vcvtah_f32", "(1F>)(1!)", "b", OP_CVT_F32_BF16>;
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && !defined(__aarch64__)" in {
+ def VCVT_BF16_F32_A32_INTERNAL : WInst<"__a32_vcvt_bf16", "BQ", "f">;
+ def VCVT_BF16_F32_A32 : SOpInst<"vcvt_bf16", "BQ", "f", OP_VCVT_BF16_F32_A32>;
+ def VCVT_LOW_BF16_F32_A32 : SOpInst<"vcvt_low_bf16", "BQ", "Qf", OP_VCVT_BF16_F32_LO_A32>;
+ def VCVT_HIGH_BF16_F32_A32 : SOpInst<"vcvt_high_bf16", "BBQ", "Qf", OP_VCVT_BF16_F32_HI_A32>;
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarch64__)" in {
+ def VCVT_LOW_BF16_F32_A64_INTERNAL : WInst<"__a64_vcvtq_low_bf16", "BQ", "Hf">;
+ def VCVT_LOW_BF16_F32_A64 : SOpInst<"vcvt_low_bf16", "BQ", "Qf", OP_VCVT_BF16_F32_LO_A64>;
+ def VCVT_HIGH_BF16_F32_A64 : SInst<"vcvt_high_bf16", "BBQ", "Qf">;
+ def VCVT_BF16_F32 : SOpInst<"vcvt_bf16", "BQ", "f", OP_VCVT_BF16_F32_A64>;
+
+ def COPY_LANE_BF16 : IOpInst<"vcopy_lane", "..I.I", "b", OP_COPY_LN>;
+ def COPYQ_LANE_BF16 : IOpInst<"vcopy_lane", "..IqI", "Qb", OP_COPY_LN>;
+ def COPY_LANEQ_BF16 : IOpInst<"vcopy_laneq", "..IQI", "b", OP_COPY_LN>;
+ def COPYQ_LANEQ_BF16 : IOpInst<"vcopy_laneq", "..I.I", "Qb", OP_COPY_LN>;
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_BF16) && !defined(__aarch64__)" in {
+ let BigEndianSafe = 1 in {
+ defm VREINTERPRET_BF : REINTERPRET_CROSS_TYPES<
+ "csilUcUsUiUlhfPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQPcQPsQPl", "bQb">;
+ }
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_BF16) && defined(__aarch64__)" in {
+ let BigEndianSafe = 1 in {
+ defm VVREINTERPRET_BF : REINTERPRET_CROSS_TYPES<
+ "csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk", "bQb">;
+ }
+}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_neon_incl.td b/contrib/llvm-project/clang/include/clang/Basic/arm_neon_incl.td
index 28b00d162a00..dd20b70433ef 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/arm_neon_incl.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_neon_incl.td
@@ -35,7 +35,7 @@ class LOp<list<dag> ops> : Operation<ops>;
// These defs and classes are used internally to implement the SetTheory
// expansion and should be ignored.
foreach Index = 0-63 in
- def sv##Index;
+ def sv#Index;
class MaskExpand;
//===----------------------------------------------------------------------===//
@@ -60,6 +60,15 @@ def op;
// example: (call "vget_high", $p0) -> "vgetq_high_s16(__p0)"
// (assuming $p0 has type int16x8_t).
def call;
+// call_mangled - Invoke another intrinsic matching the mangled name variation
+// of the caller's base type. If there is no intrinsic defined
+// that has the variation and takes the given types, an error
+// is generated at tblgen time.
+// example: (call_mangled "vfma_lane", $p0, $p1) -> "vfma_lane(__p0, __p1)"
+// (assuming non-LaneQ caller)
+// (call_mangled "vfma_lane", $p0, $p1) -> "vfma_laneq(__p0, __p1)"
+// (assuming LaneQ caller)
+def call_mangled;
// cast - Perform a cast to a different type. This gets emitted as a static
// C-style cast. For a pure reinterpret cast (T x = *(T*)&y), use
// "bitcast".
@@ -79,6 +88,7 @@ def call;
// - "D" - Double the number of lanes in the type.
// - "8" - Convert type to an equivalent vector of 8-bit signed
// integers.
+// - "32" - Convert type to an equivalent vector of 32-bit integers.
// example: (cast "R", "U", $p0) -> "(uint32x4_t)__p0" (assuming the return
// value is of type "int32x4_t".
// (cast $p0, "D", "8", $p1) -> "(int8x16_t)__p1" (assuming __p0
@@ -100,12 +110,6 @@ def dup;
// example: (dup_typed $p1, $p2) -> "(float16x4_t) {__p2, __p2, __p2, __p2}"
// (assuming __p1 is float16x4_t, and __p2 is a compatible scalar).
def dup_typed;
-// splat - Take a vector and a lane index, and return a vector of the same type
-// containing repeated instances of the source vector at the lane index.
-// example: (splat $p0, $p1) ->
-// "__builtin_shufflevector(__p0, __p0, __p1, __p1, __p1, __p1)"
-// (assuming __p0 has four elements).
-def splat;
// save_temp - Create a temporary (local) variable. The variable takes a name
// based on the zero'th parameter and can be referenced using
// using that name in subsequent DAGs in the same
@@ -211,6 +215,7 @@ def OP_UNAVAILABLE : Operation {
// f: float
// h: half-float
// d: double
+// b: bfloat16
//
// Typespec modifiers
// ------------------
@@ -232,6 +237,7 @@ def OP_UNAVAILABLE : Operation {
// S: change to signed integer category.
// U: change to unsigned integer category.
// F: change to floating category.
+// B: change to BFloat16
// P: change to polynomial category.
// p: change polynomial to equivalent integer category. Otherwise nop.
//
@@ -261,7 +267,6 @@ class Inst <string n, string p, string t, Operation o> {
string ArchGuard = "";
Operation Operation = o;
- bit CartesianProductOfTypes = 0;
bit BigEndianSafe = 0;
bit isShift = 0;
bit isScalarShift = 0;
@@ -283,6 +288,8 @@ class Inst <string n, string p, string t, Operation o> {
// this. Ex: vset_lane which outputs vmov instructions.
bit isHiddenWInst = 0;
bit isHiddenLInst = 0;
+
+ string CartesianProductWith = "";
}
// The following instruction classes are implemented via builtins.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_sve.td b/contrib/llvm-project/clang/include/clang/Basic/arm_sve.td
new file mode 100644
index 000000000000..19a42e79c36a
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_sve.td
@@ -0,0 +1,2083 @@
+//===--- arm_sve.td - ARM SVE compiler interface ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TableGen definitions from which the ARM SVE header
+// file will be generated. See:
+//
+// https://developer.arm.com/architectures/system-architectures/software-standards/acle
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Instruction definitions
+//===----------------------------------------------------------------------===//
+// Every intrinsic subclasses "Inst". An intrinsic has a name, a prototype and
+// a sequence of typespecs.
+//
+// The name is the base name of the intrinsic, for example "svld1". This is
+// then mangled by the tblgen backend to add type information ("svld1_s16").
+//
+// A typespec is a sequence of uppercase characters (modifiers) followed by one
+// lowercase character. A typespec encodes a particular "base type" of the
+// intrinsic.
+//
+// An example typespec is "Us" - unsigned short - svuint16_t. The available
+// typespec codes are given below.
+//
+// The string given to an Inst class is a sequence of typespecs. The intrinsic
+// is instantiated for every typespec in the sequence. For example "sdUsUd".
+//
+// The prototype is a string that defines the return type of the intrinsic
+// and the type of each argument. The return type and every argument gets a
+// "modifier" that can change in some way the "base type" of the intrinsic.
+//
+// The modifier 'd' means "default" and does not modify the base type in any
+// way. The available modifiers are given below.
+//
+// Typespecs
+// ---------
+// c: char
+// s: short
+// i: int
+// l: long
+// f: float
+// h: half-float
+// d: double
+// b: bfloat
+
+// Typespec modifiers
+// ------------------
+// P: boolean
+// U: unsigned
+
+// Prototype modifiers
+// -------------------
+// prototype: return (arg, arg, ...)
+//
+// 2,3,4: array of default vectors
+// v: void
+// x: vector of signed integers
+// u: vector of unsigned integers
+// d: default
+// c: const pointer type
+// P: predicate type
+// s: scalar of element type
+// a: scalar of element type (splat to vector type)
+// R: scalar of 1/2 width element type (splat to vector type)
+// r: scalar of 1/4 width element type (splat to vector type)
+// @: unsigned scalar of 1/4 width element type (splat to vector type)
+// e: 1/2 width unsigned elements, 2x element count
+// b: 1/4 width unsigned elements, 4x element count
+// h: 1/2 width elements, 2x element count
+// q: 1/4 width elements, 4x element count
+// o: 4x width elements, 1/4 element count
+//
+// w: vector of element type promoted to 64bits, vector maintains
+// signedness of its element type.
+// f: element type promoted to uint64_t (splat to vector type)
+// j: element type promoted to 64bits (splat to vector type)
+// K: element type bitcast to a signed integer (splat to vector type)
+// L: element type bitcast to an unsigned integer (splat to vector type)
+//
+// i: constant uint64_t
+// k: int32_t
+// l: int64_t
+// m: uint32_t
+// n: uint64_t
+
+// t: svint32_t
+// z: svuint32_t
+// g: svuint64_t
+// O: svfloat16_t
+// M: svfloat32_t
+// N: svfloat64_t
+
+// J: Prefetch type (sv_prfop)
+// A: pointer to int8_t
+// B: pointer to int16_t
+// C: pointer to int32_t
+// D: pointer to int64_t
+
+// E: pointer to uint8_t
+// F: pointer to uint16_t
+// G: pointer to uint32_t
+// H: pointer to uint64_t
+
+// Q: const pointer to void
+
+// S: const pointer to int8_t
+// T: const pointer to int16_t
+// U: const pointer to int32_t
+// V: const pointer to int64_t
+//
+// W: const pointer to uint8_t
+// X: const pointer to uint16_t
+// Y: const pointer to uint32_t
+// Z: const pointer to uint64_t
+
+class MergeType<int val, string suffix=""> {
+ int Value = val;
+ string Suffix = suffix;
+}
+def MergeNone : MergeType<0>;
+def MergeAny : MergeType<1, "_x">;
+def MergeOp1 : MergeType<2, "_m">;
+def MergeZero : MergeType<3, "_z">;
+def MergeAnyExp : MergeType<4, "_x">; // Use merged builtin with explicit
+def MergeZeroExp : MergeType<5, "_z">; // generation of its inactive argument.
+
+class EltType<int val> {
+ int Value = val;
+}
+def EltTyInvalid : EltType<0>;
+def EltTyInt8 : EltType<1>;
+def EltTyInt16 : EltType<2>;
+def EltTyInt32 : EltType<3>;
+def EltTyInt64 : EltType<4>;
+def EltTyFloat16 : EltType<5>;
+def EltTyFloat32 : EltType<6>;
+def EltTyFloat64 : EltType<7>;
+def EltTyBool8 : EltType<8>;
+def EltTyBool16 : EltType<9>;
+def EltTyBool32 : EltType<10>;
+def EltTyBool64 : EltType<11>;
+def EltTyBFloat16 : EltType<12>;
+
+class MemEltType<int val> {
+ int Value = val;
+}
+def MemEltTyDefault : MemEltType<0>;
+def MemEltTyInt8 : MemEltType<1>;
+def MemEltTyInt16 : MemEltType<2>;
+def MemEltTyInt32 : MemEltType<3>;
+def MemEltTyInt64 : MemEltType<4>;
+
+class FlagType<int val> {
+ int Value = val;
+}
+
+// These must be kept in sync with the flags in utils/TableGen/SveEmitter.h
+// and include/clang/Basic/TargetBuiltins.h
+def NoFlags : FlagType<0x00000000>;
+def FirstEltType : FlagType<0x00000001>;
+// : :
+// : :
+def EltTypeMask : FlagType<0x0000000f>;
+def FirstMemEltType : FlagType<0x00000010>;
+// : :
+// : :
+def MemEltTypeMask : FlagType<0x00000070>;
+def FirstMergeTypeMask : FlagType<0x00000080>;
+// : :
+// : :
+def MergeTypeMask : FlagType<0x00000380>;
+def FirstSplatOperand : FlagType<0x00000400>;
+// : :
+// These flags are used to specify which scalar operand
+// needs to be duplicated/splatted into a vector.
+// : :
+def SplatOperandMask : FlagType<0x00001C00>;
+def IsLoad : FlagType<0x00002000>;
+def IsStore : FlagType<0x00004000>;
+def IsGatherLoad : FlagType<0x00008000>;
+def IsScatterStore : FlagType<0x00010000>;
+def IsStructLoad : FlagType<0x00020000>;
+def IsStructStore : FlagType<0x00040000>;
+def IsZExtReturn : FlagType<0x00080000>; // Return value is sign-extend by default
+def IsOverloadNone : FlagType<0x00100000>; // Intrinsic does not take any overloaded types.
+def IsOverloadWhile : FlagType<0x00200000>; // Use {default type, typeof(operand1)} as overloaded types.
+def IsOverloadWhileRW : FlagType<0x00400000>; // Use {pred(default type), typeof(operand0)} as overloaded types.
+def IsOverloadCvt : FlagType<0x00800000>; // Use {typeof(operand0), typeof(last operand)} as overloaded types.
+def OverloadKindMask : FlagType<0x00E00000>; // When the masked values are all '0', the default type is used as overload type.
+def IsByteIndexed : FlagType<0x01000000>;
+def IsAppendSVALL : FlagType<0x02000000>; // Appends SV_ALL as the last operand.
+def IsInsertOp1SVALL : FlagType<0x04000000>; // Inserts SV_ALL as the second operand.
+def IsPrefetch : FlagType<0x08000000>; // Contiguous prefetches.
+def IsGatherPrefetch : FlagType<0x10000000>;
+def ReverseCompare : FlagType<0x20000000>; // Compare operands must be swapped.
+def ReverseUSDOT : FlagType<0x40000000>; // Unsigned/signed operands must be swapped.
+def IsUndef : FlagType<0x80000000>; // Codegen `undef` of given type.
+def IsTupleCreate : FlagType<0x100000000>;
+def IsTupleGet : FlagType<0x200000000>;
+def IsTupleSet : FlagType<0x400000000>;
+
+// These must be kept in sync with the flags in include/clang/Basic/TargetBuiltins.h
+class ImmCheckType<int val> {
+ int Value = val;
+}
+def ImmCheck0_31 : ImmCheckType<0>; // 0..31 (used for e.g. predicate patterns)
+def ImmCheck1_16 : ImmCheckType<1>; // 1..16
+def ImmCheckExtract : ImmCheckType<2>; // 0..(2048/sizeinbits(elt) - 1)
+def ImmCheckShiftRight : ImmCheckType<3>; // 1..sizeinbits(elt)
+def ImmCheckShiftRightNarrow : ImmCheckType<4>; // 1..sizeinbits(elt)/2
+def ImmCheckShiftLeft : ImmCheckType<5>; // 0..(sizeinbits(elt) - 1)
+def ImmCheck0_7 : ImmCheckType<6>; // 0..7
+def ImmCheckLaneIndex : ImmCheckType<7>; // 0..(128/(1*sizeinbits(elt)) - 1)
+def ImmCheckLaneIndexCompRotate : ImmCheckType<8>; // 0..(128/(2*sizeinbits(elt)) - 1)
+def ImmCheckLaneIndexDot : ImmCheckType<9>; // 0..(128/(4*sizeinbits(elt)) - 1)
+def ImmCheckComplexRot90_270 : ImmCheckType<10>; // [90,270]
+def ImmCheckComplexRotAll90 : ImmCheckType<11>; // [0, 90, 180,270]
+def ImmCheck0_13 : ImmCheckType<12>; // 0..13
+def ImmCheck0_1 : ImmCheckType<13>; // 0..1
+def ImmCheck0_2 : ImmCheckType<14>; // 0..2
+def ImmCheck0_3 : ImmCheckType<15>; // 0..3
+
+class ImmCheck<int arg, ImmCheckType kind, int eltSizeArg = -1> {
+ int Arg = arg;
+ int EltSizeArg = eltSizeArg;
+ ImmCheckType Kind = kind;
+}
+
+class Inst<string n, string p, string t, MergeType mt, string i,
+ list<FlagType> ft, list<ImmCheck> ch, MemEltType met> {
+ string Name = n;
+ string Prototype = p;
+ string Types = t;
+ string ArchGuard = "";
+ int Merge = mt.Value;
+ string MergeSuffix = mt.Suffix;
+ string LLVMIntrinsic = i;
+ list<FlagType> Flags = ft;
+ list<ImmCheck> ImmChecks = ch;
+ int MemEltType = met.Value;
+}
+
+// SInst: Instruction with signed/unsigned suffix (e.g., "s8", "u8")
+class SInst<string n, string p, string t, MergeType mt, string i = "",
+ list<FlagType> ft = [], list<ImmCheck> ch = []>
+ : Inst<n, p, t, mt, i, ft, ch, MemEltTyDefault> {
+}
+
+// MInst: Instructions which access memory
+class MInst<string n, string p, string t, list<FlagType> f,
+ MemEltType met = MemEltTyDefault, string i = "">
+ : Inst<n, p, t, MergeNone, i, f, [], met> {
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Loads
+
+// Load one vector (scalar base)
+def SVLD1 : MInst<"svld1[_{2}]", "dPc", "csilUcUsUiUlhfd", [IsLoad], MemEltTyDefault, "aarch64_sve_ld1">;
+def SVLD1SB : MInst<"svld1sb_{d}", "dPS", "silUsUiUl", [IsLoad], MemEltTyInt8, "aarch64_sve_ld1">;
+def SVLD1UB : MInst<"svld1ub_{d}", "dPW", "silUsUiUl", [IsLoad, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ld1">;
+def SVLD1SH : MInst<"svld1sh_{d}", "dPT", "ilUiUl", [IsLoad], MemEltTyInt16, "aarch64_sve_ld1">;
+def SVLD1UH : MInst<"svld1uh_{d}", "dPX", "ilUiUl", [IsLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ld1">;
+def SVLD1SW : MInst<"svld1sw_{d}", "dPU", "lUl", [IsLoad], MemEltTyInt32, "aarch64_sve_ld1">;
+def SVLD1UW : MInst<"svld1uw_{d}", "dPY", "lUl", [IsLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ld1">;
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ def SVLD1_BF : MInst<"svld1[_{2}]", "dPc", "b", [IsLoad], MemEltTyDefault, "aarch64_sve_ld1">;
+ def SVLD1_VNUM_BF : MInst<"svld1_vnum[_{2}]", "dPcl", "b", [IsLoad], MemEltTyDefault, "aarch64_sve_ld1">;
+}
+
+// Load one vector (scalar base, VL displacement)
+def SVLD1_VNUM : MInst<"svld1_vnum[_{2}]", "dPcl", "csilUcUsUiUlhfd", [IsLoad], MemEltTyDefault, "aarch64_sve_ld1">;
+def SVLD1SB_VNUM : MInst<"svld1sb_vnum_{d}", "dPSl", "silUsUiUl", [IsLoad], MemEltTyInt8, "aarch64_sve_ld1">;
+def SVLD1UB_VNUM : MInst<"svld1ub_vnum_{d}", "dPWl", "silUsUiUl", [IsLoad, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ld1">;
+def SVLD1SH_VNUM : MInst<"svld1sh_vnum_{d}", "dPTl", "ilUiUl", [IsLoad], MemEltTyInt16, "aarch64_sve_ld1">;
+def SVLD1UH_VNUM : MInst<"svld1uh_vnum_{d}", "dPXl", "ilUiUl", [IsLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ld1">;
+def SVLD1SW_VNUM : MInst<"svld1sw_vnum_{d}", "dPUl", "lUl", [IsLoad], MemEltTyInt32, "aarch64_sve_ld1">;
+def SVLD1UW_VNUM : MInst<"svld1uw_vnum_{d}", "dPYl", "lUl", [IsLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ld1">;
+
+// Load one vector (vector base)
+def SVLD1_GATHER_BASES_U : MInst<"svld1_gather[_{2}base]_{d}", "dPu", "ilUiUlfd", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ld1_gather_scalar_offset">;
+def SVLD1SB_GATHER_BASES_U : MInst<"svld1sb_gather[_{2}base]_{d}", "dPu", "ilUiUl", [IsGatherLoad], MemEltTyInt8, "aarch64_sve_ld1_gather_scalar_offset">;
+def SVLD1UB_GATHER_BASES_U : MInst<"svld1ub_gather[_{2}base]_{d}", "dPu", "ilUiUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ld1_gather_scalar_offset">;
+def SVLD1SH_GATHER_BASES_U : MInst<"svld1sh_gather[_{2}base]_{d}", "dPu", "ilUiUl", [IsGatherLoad], MemEltTyInt16, "aarch64_sve_ld1_gather_scalar_offset">;
+def SVLD1UH_GATHER_BASES_U : MInst<"svld1uh_gather[_{2}base]_{d}", "dPu", "ilUiUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ld1_gather_scalar_offset">;
+def SVLD1SW_GATHER_BASES_U : MInst<"svld1sw_gather[_{2}base]_{d}", "dPu", "lUl", [IsGatherLoad], MemEltTyInt32, "aarch64_sve_ld1_gather_scalar_offset">;
+def SVLD1UW_GATHER_BASES_U : MInst<"svld1uw_gather[_{2}base]_{d}", "dPu", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ld1_gather_scalar_offset">;
+
+// Load one vector (scalar base, signed vector offset in bytes)
+def SVLD1_GATHER_64B_OFFSETS_S : MInst<"svld1_gather_[{3}]offset[_{d}]", "dPcx", "lUld", [IsGatherLoad, IsByteIndexed], MemEltTyDefault, "aarch64_sve_ld1_gather">;
+def SVLD1SB_GATHER_64B_OFFSETS_S : MInst<"svld1sb_gather_[{3}]offset_{d}", "dPSx", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt8, "aarch64_sve_ld1_gather">;
+def SVLD1UB_GATHER_64B_OFFSETS_S : MInst<"svld1ub_gather_[{3}]offset_{d}", "dPWx", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ld1_gather">;
+def SVLD1SH_GATHER_64B_OFFSETS_S : MInst<"svld1sh_gather_[{3}]offset_{d}", "dPTx", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt16, "aarch64_sve_ld1_gather">;
+def SVLD1UH_GATHER_64B_OFFSETS_S : MInst<"svld1uh_gather_[{3}]offset_{d}", "dPXx", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ld1_gather">;
+def SVLD1SW_GATHER_64B_OFFSETS_S : MInst<"svld1sw_gather_[{3}]offset_{d}", "dPUx", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt32, "aarch64_sve_ld1_gather">;
+def SVLD1UW_GATHER_64B_OFFSETS_S : MInst<"svld1uw_gather_[{3}]offset_{d}", "dPYx", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ld1_gather">;
+
+def SVLD1_GATHER_32B_OFFSETS_S : MInst<"svld1_gather_[{3}]offset[_{d}]", "dPcx", "iUif", [IsGatherLoad, IsByteIndexed], MemEltTyDefault, "aarch64_sve_ld1_gather_sxtw">;
+def SVLD1SB_GATHER_32B_OFFSETS_S : MInst<"svld1sb_gather_[{3}]offset_{d}", "dPSx", "iUi", [IsGatherLoad, IsByteIndexed], MemEltTyInt8, "aarch64_sve_ld1_gather_sxtw">;
+def SVLD1UB_GATHER_32B_OFFSETS_S : MInst<"svld1ub_gather_[{3}]offset_{d}", "dPWx", "iUi", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ld1_gather_sxtw">;
+def SVLD1SH_GATHER_32B_OFFSETS_S : MInst<"svld1sh_gather_[{3}]offset_{d}", "dPTx", "iUi", [IsGatherLoad, IsByteIndexed], MemEltTyInt16, "aarch64_sve_ld1_gather_sxtw">;
+def SVLD1UH_GATHER_32B_OFFSETS_S : MInst<"svld1uh_gather_[{3}]offset_{d}", "dPXx", "iUi", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ld1_gather_sxtw">;
+
+// Load one vector (scalar base, unsigned vector offset in bytes)
+def SVLD1_GATHER_64B_OFFSETS_U : MInst<"svld1_gather_[{3}]offset[_{d}]", "dPcu", "lUld", [IsGatherLoad, IsByteIndexed], MemEltTyDefault, "aarch64_sve_ld1_gather">;
+def SVLD1SB_GATHER_64B_OFFSETS_U : MInst<"svld1sb_gather_[{3}]offset_{d}", "dPSu", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt8, "aarch64_sve_ld1_gather">;
+def SVLD1UB_GATHER_64B_OFFSETS_U : MInst<"svld1ub_gather_[{3}]offset_{d}", "dPWu", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ld1_gather">;
+def SVLD1SH_GATHER_64B_OFFSETS_U : MInst<"svld1sh_gather_[{3}]offset_{d}", "dPTu", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt16, "aarch64_sve_ld1_gather">;
+def SVLD1UH_GATHER_64B_OFFSETS_U : MInst<"svld1uh_gather_[{3}]offset_{d}", "dPXu", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ld1_gather">;
+def SVLD1SW_GATHER_64B_OFFSETS_U : MInst<"svld1sw_gather_[{3}]offset_{d}", "dPUu", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt32, "aarch64_sve_ld1_gather">;
+def SVLD1UW_GATHER_64B_OFFSETS_U : MInst<"svld1uw_gather_[{3}]offset_{d}", "dPYu", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ld1_gather">;
+
+def SVLD1_GATHER_32B_OFFSETS_U : MInst<"svld1_gather_[{3}]offset[_{d}]", "dPcu", "iUif", [IsGatherLoad, IsByteIndexed], MemEltTyDefault, "aarch64_sve_ld1_gather_uxtw">;
+def SVLD1SB_GATHER_32B_OFFSETS_U : MInst<"svld1sb_gather_[{3}]offset_{d}", "dPSu", "iUi", [IsGatherLoad, IsByteIndexed], MemEltTyInt8, "aarch64_sve_ld1_gather_uxtw">;
+def SVLD1UB_GATHER_32B_OFFSETS_U : MInst<"svld1ub_gather_[{3}]offset_{d}", "dPWu", "iUi", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ld1_gather_uxtw">;
+def SVLD1SH_GATHER_32B_OFFSETS_U : MInst<"svld1sh_gather_[{3}]offset_{d}", "dPTu", "iUi", [IsGatherLoad, IsByteIndexed], MemEltTyInt16, "aarch64_sve_ld1_gather_uxtw">;
+def SVLD1UH_GATHER_32B_OFFSETS_U : MInst<"svld1uh_gather_[{3}]offset_{d}", "dPXu", "iUi", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ld1_gather_uxtw">;
+
+// Load one vector (vector base, signed scalar offset in bytes)
+def SVLD1_GATHER_OFFSET_S : MInst<"svld1_gather[_{2}base]_offset_{d}", "dPul", "ilUiUlfd", [IsGatherLoad, IsByteIndexed], MemEltTyDefault, "aarch64_sve_ld1_gather_scalar_offset">;
+def SVLD1SB_GATHER_OFFSET_S : MInst<"svld1sb_gather[_{2}base]_offset_{d}", "dPul", "ilUiUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt8, "aarch64_sve_ld1_gather_scalar_offset">;
+def SVLD1UB_GATHER_OFFSET_S : MInst<"svld1ub_gather[_{2}base]_offset_{d}", "dPul", "ilUiUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ld1_gather_scalar_offset">;
+def SVLD1SH_GATHER_OFFSET_S : MInst<"svld1sh_gather[_{2}base]_offset_{d}", "dPul", "ilUiUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt16, "aarch64_sve_ld1_gather_scalar_offset">;
+def SVLD1UH_GATHER_OFFSET_S : MInst<"svld1uh_gather[_{2}base]_offset_{d}", "dPul", "ilUiUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ld1_gather_scalar_offset">;
+def SVLD1SW_GATHER_OFFSET_S : MInst<"svld1sw_gather[_{2}base]_offset_{d}", "dPul", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt32, "aarch64_sve_ld1_gather_scalar_offset">;
+def SVLD1UW_GATHER_OFFSET_S : MInst<"svld1uw_gather[_{2}base]_offset_{d}", "dPul", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ld1_gather_scalar_offset">;
+
+// Load one vector (scalar base, signed vector index)
+def SVLD1_GATHER_64B_INDICES_S : MInst<"svld1_gather_[{3}]index[_{d}]", "dPcx", "lUld", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ld1_gather_index">;
+def SVLD1SH_GATHER_64B_INDICES_S : MInst<"svld1sh_gather_[{3}]index_{d}", "dPTx", "lUl", [IsGatherLoad], MemEltTyInt16, "aarch64_sve_ld1_gather_index">;
+def SVLD1UH_GATHER_64B_INDICES_S : MInst<"svld1uh_gather_[{3}]index_{d}", "dPXx", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ld1_gather_index">;
+def SVLD1SW_GATHER_64B_INDICES_S : MInst<"svld1sw_gather_[{3}]index_{d}", "dPUx", "lUl", [IsGatherLoad], MemEltTyInt32, "aarch64_sve_ld1_gather_index">;
+def SVLD1UW_GATHER_64B_INDICES_S : MInst<"svld1uw_gather_[{3}]index_{d}", "dPYx", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ld1_gather_index">;
+
+def SVLD1_GATHER_32B_INDICES_S : MInst<"svld1_gather_[{3}]index[_{d}]", "dPcx", "iUif", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ld1_gather_sxtw_index">;
+def SVLD1SH_GATHER_32B_INDICES_S : MInst<"svld1sh_gather_[{3}]index_{d}", "dPTx", "iUi", [IsGatherLoad], MemEltTyInt16, "aarch64_sve_ld1_gather_sxtw_index">;
+def SVLD1UH_GATHER_32B_INDICES_S : MInst<"svld1uh_gather_[{3}]index_{d}", "dPXx", "iUi", [IsGatherLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ld1_gather_sxtw_index">;
+
+// Load one vector (scalar base, unsigned vector index)
+def SVLD1_GATHER_64B_INDICES_U : MInst<"svld1_gather_[{3}]index[_{d}]", "dPcu", "lUld", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ld1_gather_index">;
+def SVLD1SH_GATHER_64B_INDICES_U : MInst<"svld1sh_gather_[{3}]index_{d}", "dPTu", "lUl", [IsGatherLoad], MemEltTyInt16, "aarch64_sve_ld1_gather_index">;
+def SVLD1UH_GATHER_64B_INDICES_U : MInst<"svld1uh_gather_[{3}]index_{d}", "dPXu", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ld1_gather_index">;
+def SVLD1SW_GATHER_64B_INDICES_U : MInst<"svld1sw_gather_[{3}]index_{d}", "dPUu", "lUl", [IsGatherLoad], MemEltTyInt32, "aarch64_sve_ld1_gather_index">;
+def SVLD1UW_GATHER_64B_INDICES_U : MInst<"svld1uw_gather_[{3}]index_{d}", "dPYu", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ld1_gather_index">;
+
+def SVLD1_GATHER_32B_INDICES_U : MInst<"svld1_gather_[{3}]index[_{d}]", "dPcu", "iUif", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ld1_gather_uxtw_index">;
+def SVLD1SH_GATHER_32B_INDICES_U : MInst<"svld1sh_gather_[{3}]index_{d}", "dPTu", "iUi", [IsGatherLoad], MemEltTyInt16, "aarch64_sve_ld1_gather_uxtw_index">;
+def SVLD1UH_GATHER_32B_INDICES_U : MInst<"svld1uh_gather_[{3}]index_{d}", "dPXu", "iUi", [IsGatherLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ld1_gather_uxtw_index">;
+
+// Load one vector (vector base, signed scalar index)
+def SVLD1_GATHER_INDEX_S : MInst<"svld1_gather[_{2}base]_index_{d}", "dPul", "ilUiUlfd", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ld1_gather_scalar_offset">;
+def SVLD1SH_GATHER_INDEX_S : MInst<"svld1sh_gather[_{2}base]_index_{d}", "dPul", "ilUiUl", [IsGatherLoad], MemEltTyInt16, "aarch64_sve_ld1_gather_scalar_offset">;
+def SVLD1UH_GATHER_INDEX_S : MInst<"svld1uh_gather[_{2}base]_index_{d}", "dPul", "ilUiUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ld1_gather_scalar_offset">;
+def SVLD1SW_GATHER_INDEX_S : MInst<"svld1sw_gather[_{2}base]_index_{d}", "dPul", "lUl", [IsGatherLoad], MemEltTyInt32, "aarch64_sve_ld1_gather_scalar_offset">;
+def SVLD1UW_GATHER_INDEX_S : MInst<"svld1uw_gather[_{2}base]_index_{d}", "dPul", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ld1_gather_scalar_offset">;
+
+
+// First-faulting load one vector (scalar base)
+def SVLDFF1 : MInst<"svldff1[_{2}]", "dPc", "csilUcUsUiUlhfd", [IsLoad], MemEltTyDefault, "aarch64_sve_ldff1">;
+def SVLDFF1SB : MInst<"svldff1sb_{d}", "dPS", "silUsUiUl", [IsLoad], MemEltTyInt8, "aarch64_sve_ldff1">;
+def SVLDFF1UB : MInst<"svldff1ub_{d}", "dPW", "silUsUiUl", [IsLoad, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldff1">;
+def SVLDFF1SH : MInst<"svldff1sh_{d}", "dPT", "ilUiUl", [IsLoad], MemEltTyInt16, "aarch64_sve_ldff1">;
+def SVLDFF1UH : MInst<"svldff1uh_{d}", "dPX", "ilUiUl", [IsLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldff1">;
+def SVLDFF1SW : MInst<"svldff1sw_{d}", "dPU", "lUl", [IsLoad], MemEltTyInt32, "aarch64_sve_ldff1">;
+def SVLDFF1UW : MInst<"svldff1uw_{d}", "dPY", "lUl", [IsLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldff1">;
+
+// First-faulting load one vector (scalar base, VL displacement)
+def SVLDFF1_VNUM : MInst<"svldff1_vnum[_{2}]", "dPcl", "csilUcUsUiUlhfd", [IsLoad], MemEltTyDefault, "aarch64_sve_ldff1">;
+def SVLDFF1SB_VNUM : MInst<"svldff1sb_vnum_{d}", "dPSl", "silUsUiUl", [IsLoad], MemEltTyInt8, "aarch64_sve_ldff1">;
+def SVLDFF1UB_VNUM : MInst<"svldff1ub_vnum_{d}", "dPWl", "silUsUiUl", [IsLoad, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldff1">;
+def SVLDFF1SH_VNUM : MInst<"svldff1sh_vnum_{d}", "dPTl", "ilUiUl", [IsLoad], MemEltTyInt16, "aarch64_sve_ldff1">;
+def SVLDFF1UH_VNUM : MInst<"svldff1uh_vnum_{d}", "dPXl", "ilUiUl", [IsLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldff1">;
+def SVLDFF1SW_VNUM : MInst<"svldff1sw_vnum_{d}", "dPUl", "lUl", [IsLoad], MemEltTyInt32, "aarch64_sve_ldff1">;
+def SVLDFF1UW_VNUM : MInst<"svldff1uw_vnum_{d}", "dPYl", "lUl", [IsLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldff1">;
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ def SVLDFF1_BF : MInst<"svldff1[_{2}]", "dPc", "b", [IsLoad], MemEltTyDefault, "aarch64_sve_ldff1">;
+ def SVLDFF1_VNUM_BF : MInst<"svldff1_vnum[_{2}]", "dPcl", "b", [IsLoad], MemEltTyDefault, "aarch64_sve_ldff1">;
+}
+
+// First-faulting load one vector (vector base)
+def SVLDFF1_GATHER_BASES_U : MInst<"svldff1_gather[_{2}base]_{d}", "dPu", "ilUiUlfd", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ldff1_gather_scalar_offset">;
+def SVLDFF1SB_GATHER_BASES_U : MInst<"svldff1sb_gather[_{2}base]_{d}", "dPu", "ilUiUl", [IsGatherLoad], MemEltTyInt8, "aarch64_sve_ldff1_gather_scalar_offset">;
+def SVLDFF1UB_GATHER_BASES_U : MInst<"svldff1ub_gather[_{2}base]_{d}", "dPu", "ilUiUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldff1_gather_scalar_offset">;
+def SVLDFF1SH_GATHER_BASES_U : MInst<"svldff1sh_gather[_{2}base]_{d}", "dPu", "ilUiUl", [IsGatherLoad], MemEltTyInt16, "aarch64_sve_ldff1_gather_scalar_offset">;
+def SVLDFF1UH_GATHER_BASES_U : MInst<"svldff1uh_gather[_{2}base]_{d}", "dPu", "ilUiUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldff1_gather_scalar_offset">;
+def SVLDFF1SW_GATHER_BASES_U : MInst<"svldff1sw_gather[_{2}base]_{d}", "dPu", "lUl", [IsGatherLoad], MemEltTyInt32, "aarch64_sve_ldff1_gather_scalar_offset">;
+def SVLDFF1UW_GATHER_BASES_U : MInst<"svldff1uw_gather[_{2}base]_{d}", "dPu", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldff1_gather_scalar_offset">;
+
+// First-faulting load one vector (scalar base, signed vector offset in bytes)
+def SVLDFF1_GATHER_64B_OFFSETS_S : MInst<"svldff1_gather_[{3}]offset[_{d}]", "dPcx", "lUld", [IsGatherLoad, IsByteIndexed], MemEltTyDefault, "aarch64_sve_ldff1_gather">;
+def SVLDFF1SB_GATHER_64B_OFFSETS_S : MInst<"svldff1sb_gather_[{3}]offset_{d}", "dPSx", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt8, "aarch64_sve_ldff1_gather">;
+def SVLDFF1UB_GATHER_64B_OFFSETS_S : MInst<"svldff1ub_gather_[{3}]offset_{d}", "dPWx", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldff1_gather">;
+def SVLDFF1SH_GATHER_64B_OFFSETS_S : MInst<"svldff1sh_gather_[{3}]offset_{d}", "dPTx", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt16, "aarch64_sve_ldff1_gather">;
+def SVLDFF1UH_GATHER_64B_OFFSETS_S : MInst<"svldff1uh_gather_[{3}]offset_{d}", "dPXx", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldff1_gather">;
+def SVLDFF1SW_GATHER_64B_OFFSETS_S : MInst<"svldff1sw_gather_[{3}]offset_{d}", "dPUx", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt32, "aarch64_sve_ldff1_gather">;
+def SVLDFF1UW_GATHER_64B_OFFSETS_S : MInst<"svldff1uw_gather_[{3}]offset_{d}", "dPYx", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldff1_gather">;
+
+def SVLDFF1_GATHER_32B_OFFSETS_S : MInst<"svldff1_gather_[{3}]offset[_{d}]", "dPcx", "iUif", [IsGatherLoad, IsByteIndexed], MemEltTyDefault, "aarch64_sve_ldff1_gather_sxtw">;
+def SVLDFF1SB_GATHER_32B_OFFSETS_S : MInst<"svldff1sb_gather_[{3}]offset_{d}", "dPSx", "iUi", [IsGatherLoad, IsByteIndexed], MemEltTyInt8, "aarch64_sve_ldff1_gather_sxtw">;
+def SVLDFF1UB_GATHER_32B_OFFSETS_S : MInst<"svldff1ub_gather_[{3}]offset_{d}", "dPWx", "iUi", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldff1_gather_sxtw">;
+def SVLDFF1SH_GATHER_32B_OFFSETS_S : MInst<"svldff1sh_gather_[{3}]offset_{d}", "dPTx", "iUi", [IsGatherLoad, IsByteIndexed], MemEltTyInt16, "aarch64_sve_ldff1_gather_sxtw">;
+def SVLDFF1UH_GATHER_32B_OFFSETS_S : MInst<"svldff1uh_gather_[{3}]offset_{d}", "dPXx", "iUi", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldff1_gather_sxtw">;
+
+// First-faulting load one vector (scalar base, unsigned vector offset in bytes)
+def SVLDFF1_GATHER_64B_OFFSETS_U : MInst<"svldff1_gather_[{3}]offset[_{d}]", "dPcu", "lUld", [IsGatherLoad, IsByteIndexed], MemEltTyDefault, "aarch64_sve_ldff1_gather">;
+def SVLDFF1SB_GATHER_64B_OFFSETS_U : MInst<"svldff1sb_gather_[{3}]offset_{d}", "dPSu", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt8, "aarch64_sve_ldff1_gather">;
+def SVLDFF1UB_GATHER_64B_OFFSETS_U : MInst<"svldff1ub_gather_[{3}]offset_{d}", "dPWu", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldff1_gather">;
+def SVLDFF1SH_GATHER_64B_OFFSETS_U : MInst<"svldff1sh_gather_[{3}]offset_{d}", "dPTu", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt16, "aarch64_sve_ldff1_gather">;
+def SVLDFF1UH_GATHER_64B_OFFSETS_U : MInst<"svldff1uh_gather_[{3}]offset_{d}", "dPXu", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldff1_gather">;
+def SVLDFF1SW_GATHER_64B_OFFSETS_U : MInst<"svldff1sw_gather_[{3}]offset_{d}", "dPUu", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt32, "aarch64_sve_ldff1_gather">;
+def SVLDFF1UW_GATHER_64B_OFFSETS_U : MInst<"svldff1uw_gather_[{3}]offset_{d}", "dPYu", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldff1_gather">;
+
+def SVLDFF1_GATHER_32B_OFFSETS_U : MInst<"svldff1_gather_[{3}]offset[_{d}]", "dPcu", "iUif", [IsGatherLoad, IsByteIndexed], MemEltTyDefault, "aarch64_sve_ldff1_gather_uxtw">;
+def SVLDFF1SB_GATHER_32B_OFFSETS_U : MInst<"svldff1sb_gather_[{3}]offset_{d}", "dPSu", "iUi", [IsGatherLoad, IsByteIndexed], MemEltTyInt8, "aarch64_sve_ldff1_gather_uxtw">;
+def SVLDFF1UB_GATHER_32B_OFFSETS_U : MInst<"svldff1ub_gather_[{3}]offset_{d}", "dPWu", "iUi", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldff1_gather_uxtw">;
+def SVLDFF1SH_GATHER_32B_OFFSETS_U : MInst<"svldff1sh_gather_[{3}]offset_{d}", "dPTu", "iUi", [IsGatherLoad, IsByteIndexed], MemEltTyInt16, "aarch64_sve_ldff1_gather_uxtw">;
+def SVLDFF1UH_GATHER_32B_OFFSETS_U : MInst<"svldff1uh_gather_[{3}]offset_{d}", "dPXu", "iUi", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldff1_gather_uxtw">;
+
+// First-faulting load one vector (vector base, signed scalar offset in bytes)
+def SVLDFF1_GATHER_OFFSET_S : MInst<"svldff1_gather[_{2}base]_offset_{d}", "dPul", "ilUiUlfd", [IsGatherLoad, IsByteIndexed], MemEltTyDefault, "aarch64_sve_ldff1_gather_scalar_offset">;
+def SVLDFF1SB_GATHER_OFFSET_S : MInst<"svldff1sb_gather[_{2}base]_offset_{d}", "dPul", "ilUiUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt8, "aarch64_sve_ldff1_gather_scalar_offset">;
+def SVLDFF1UB_GATHER_OFFSET_S : MInst<"svldff1ub_gather[_{2}base]_offset_{d}", "dPul", "ilUiUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldff1_gather_scalar_offset">;
+def SVLDFF1SH_GATHER_OFFSET_S : MInst<"svldff1sh_gather[_{2}base]_offset_{d}", "dPul", "ilUiUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt16, "aarch64_sve_ldff1_gather_scalar_offset">;
+def SVLDFF1UH_GATHER_OFFSET_S : MInst<"svldff1uh_gather[_{2}base]_offset_{d}", "dPul", "ilUiUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldff1_gather_scalar_offset">;
+def SVLDFF1SW_GATHER_OFFSET_S : MInst<"svldff1sw_gather[_{2}base]_offset_{d}", "dPul", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt32, "aarch64_sve_ldff1_gather_scalar_offset">;
+def SVLDFF1UW_GATHER_OFFSET_S : MInst<"svldff1uw_gather[_{2}base]_offset_{d}", "dPul", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldff1_gather_scalar_offset">;
+
+// First-faulting load one vector (scalar base, signed vector index)
+def SVLDFF1_GATHER_64B_INDICES_S : MInst<"svldff1_gather_[{3}]index[_{d}]", "dPcx", "lUld", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ldff1_gather_index">;
+def SVLDFF1SH_GATHER_64B_INDICES_S : MInst<"svldff1sh_gather_[{3}]index_{d}", "dPTx", "lUl", [IsGatherLoad], MemEltTyInt16, "aarch64_sve_ldff1_gather_index">;
+def SVLDFF1UH_GATHER_64B_INDICES_S : MInst<"svldff1uh_gather_[{3}]index_{d}", "dPXx", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldff1_gather_index">;
+def SVLDFF1SW_GATHER_64B_INDICES_S : MInst<"svldff1sw_gather_[{3}]index_{d}", "dPUx", "lUl", [IsGatherLoad], MemEltTyInt32, "aarch64_sve_ldff1_gather_index">;
+def SVLDFF1UW_GATHER_64B_INDICES_S : MInst<"svldff1uw_gather_[{3}]index_{d}", "dPYx", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldff1_gather_index">;
+
+def SVLDFF1_GATHER_32B_INDICES_S : MInst<"svldff1_gather_[{3}]index[_{d}]", "dPcx", "iUif", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ldff1_gather_sxtw_index">;
+def SVLDFF1SH_GATHER_32B_INDICES_S : MInst<"svldff1sh_gather_[{3}]index_{d}", "dPTx", "iUi", [IsGatherLoad], MemEltTyInt16, "aarch64_sve_ldff1_gather_sxtw_index">;
+def SVLDFF1UH_GATHER_32B_INDICES_S : MInst<"svldff1uh_gather_[{3}]index_{d}", "dPXx", "iUi", [IsGatherLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldff1_gather_sxtw_index">;
+
+// First-faulting load one vector (scalar base, unsigned vector index)
+def SVLDFF1_GATHER_64B_INDICES_U : MInst<"svldff1_gather_[{3}]index[_{d}]", "dPcu", "lUld", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ldff1_gather_index">;
+def SVLDFF1SH_GATHER_64B_INDICES_U : MInst<"svldff1sh_gather_[{3}]index_{d}", "dPTu", "lUl", [IsGatherLoad], MemEltTyInt16, "aarch64_sve_ldff1_gather_index">;
+def SVLDFF1UH_GATHER_64B_INDICES_U : MInst<"svldff1uh_gather_[{3}]index_{d}", "dPXu", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldff1_gather_index">;
+def SVLDFF1SW_GATHER_64B_INDICES_U : MInst<"svldff1sw_gather_[{3}]index_{d}", "dPUu", "lUl", [IsGatherLoad], MemEltTyInt32, "aarch64_sve_ldff1_gather_index">;
+def SVLDFF1UW_GATHER_64B_INDICES_U : MInst<"svldff1uw_gather_[{3}]index_{d}", "dPYu", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldff1_gather_index">;
+
+def SVLDFF1_GATHER_32B_INDICES_U : MInst<"svldff1_gather_[{3}]index[_{d}]", "dPcu", "iUif", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ldff1_gather_uxtw_index">;
+def SVLDFF1SH_GATHER_32B_INDICES_U : MInst<"svldff1sh_gather_[{3}]index_{d}", "dPTu", "iUi", [IsGatherLoad], MemEltTyInt16, "aarch64_sve_ldff1_gather_uxtw_index">;
+def SVLDFF1UH_GATHER_32B_INDICES_U : MInst<"svldff1uh_gather_[{3}]index_{d}", "dPXu", "iUi", [IsGatherLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldff1_gather_uxtw_index">;
+
+// First-faulting load one vector (vector base, signed scalar index)
+def SVLDFF1_GATHER_INDEX_S : MInst<"svldff1_gather[_{2}base]_index_{d}", "dPul", "ilUiUlfd", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ldff1_gather_scalar_offset">;
+def SVLDFF1SH_GATHER_INDEX_S : MInst<"svldff1sh_gather[_{2}base]_index_{d}", "dPul", "ilUiUl", [IsGatherLoad], MemEltTyInt16, "aarch64_sve_ldff1_gather_scalar_offset">;
+def SVLDFF1UH_GATHER_INDEX_S : MInst<"svldff1uh_gather[_{2}base]_index_{d}", "dPul", "ilUiUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldff1_gather_scalar_offset">;
+def SVLDFF1SW_GATHER_INDEX_S : MInst<"svldff1sw_gather[_{2}base]_index_{d}", "dPul", "lUl", [IsGatherLoad], MemEltTyInt32, "aarch64_sve_ldff1_gather_scalar_offset">;
+def SVLDFF1UW_GATHER_INDEX_S : MInst<"svldff1uw_gather[_{2}base]_index_{d}", "dPul", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldff1_gather_scalar_offset">;
+
+// Non-faulting load one vector (scalar base)
+def SVLDNF1 : MInst<"svldnf1[_{2}]", "dPc", "csilUcUsUiUlhfd", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnf1">;
+def SVLDNF1SB : MInst<"svldnf1sb_{d}", "dPS", "silUsUiUl", [IsLoad], MemEltTyInt8, "aarch64_sve_ldnf1">;
+def SVLDNF1UB : MInst<"svldnf1ub_{d}", "dPW", "silUsUiUl", [IsLoad, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldnf1">;
+def SVLDNF1SH : MInst<"svldnf1sh_{d}", "dPT", "ilUiUl", [IsLoad], MemEltTyInt16, "aarch64_sve_ldnf1">;
+def SVLDNF1UH : MInst<"svldnf1uh_{d}", "dPX", "ilUiUl", [IsLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldnf1">;
+def SVLDNF1SW : MInst<"svldnf1sw_{d}", "dPU", "lUl", [IsLoad], MemEltTyInt32, "aarch64_sve_ldnf1">;
+def SVLDNF1UW : MInst<"svldnf1uw_{d}", "dPY", "lUl", [IsLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldnf1">;
+
+// Non-faulting load one vector (scalar base, VL displacement)
+def SVLDNF1_VNUM : MInst<"svldnf1_vnum[_{2}]", "dPcl", "csilUcUsUiUlhfd", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnf1">;
+def SVLDNF1SB_VNUM : MInst<"svldnf1sb_vnum_{d}", "dPSl", "silUsUiUl", [IsLoad], MemEltTyInt8, "aarch64_sve_ldnf1">;
+def SVLDNF1UB_VNUM : MInst<"svldnf1ub_vnum_{d}", "dPWl", "silUsUiUl", [IsLoad, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldnf1">;
+def SVLDNF1SH_VNUM : MInst<"svldnf1sh_vnum_{d}", "dPTl", "ilUiUl", [IsLoad], MemEltTyInt16, "aarch64_sve_ldnf1">;
+def SVLDNF1UH_VNUM : MInst<"svldnf1uh_vnum_{d}", "dPXl", "ilUiUl", [IsLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldnf1">;
+def SVLDNF1SW_VNUM : MInst<"svldnf1sw_vnum_{d}", "dPUl", "lUl", [IsLoad], MemEltTyInt32, "aarch64_sve_ldnf1">;
+def SVLDNF1UW_VNUM : MInst<"svldnf1uw_vnum_{d}", "dPYl", "lUl", [IsLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldnf1">;
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ def SVLDNF1_BF : MInst<"svldnf1[_{2}]", "dPc", "b", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnf1">;
+ def SVLDNF1_VNUM_BF : MInst<"svldnf1_vnum[_{2}]", "dPcl", "b", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnf1">;
+}
+
+// Load one vector, unextended load, non-temporal (scalar base)
+def SVLDNT1 : MInst<"svldnt1[_{2}]", "dPc", "csilUcUsUiUlhfd", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnt1">;
+
+// Load one vector, unextended load, non-temporal (scalar base, VL displacement)
+def SVLDNT1_VNUM : MInst<"svldnt1_vnum[_{2}]", "dPcl", "csilUcUsUiUlhfd", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnt1">;
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ def SVLDNT1_BF : MInst<"svldnt1[_{2}]", "dPc", "b", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnt1">;
+ def SVLDNT1_VNUM_BF : MInst<"svldnt1_vnum[_{2}]", "dPcl", "b", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnt1">;
+}
+
+// Load one quadword and replicate (scalar base)
+def SVLD1RQ : SInst<"svld1rq[_{2}]", "dPc", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_ld1rq">;
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ def SVLD1RQ_BF : SInst<"svld1rq[_{2}]", "dPc", "b", MergeNone, "aarch64_sve_ld1rq">;
+}
+
+multiclass StructLoad<string name, string proto, string i> {
+ def : SInst<name, proto, "csilUcUsUiUlhfd", MergeNone, i, [IsStructLoad]>;
+ let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ def: SInst<name, proto, "b", MergeNone, i, [IsStructLoad]>;
+ }
+}
+
+// Load N-element structure into N vectors (scalar base)
+defm SVLD2 : StructLoad<"svld2[_{2}]", "2Pc", "aarch64_sve_ld2">;
+defm SVLD3 : StructLoad<"svld3[_{2}]", "3Pc", "aarch64_sve_ld3">;
+defm SVLD4 : StructLoad<"svld4[_{2}]", "4Pc", "aarch64_sve_ld4">;
+
+// Load N-element structure into N vectors (scalar base, VL displacement)
+defm SVLD2_VNUM : StructLoad<"svld2_vnum[_{2}]", "2Pcl", "aarch64_sve_ld2">;
+defm SVLD3_VNUM : StructLoad<"svld3_vnum[_{2}]", "3Pcl", "aarch64_sve_ld3">;
+defm SVLD4_VNUM : StructLoad<"svld4_vnum[_{2}]", "4Pcl", "aarch64_sve_ld4">;
+
+// Load one octoword and replicate (scalar base)
+let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_FP64)" in {
+ def SVLD1RO : SInst<"svld1ro[_{2}]", "dPc", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_ld1ro">;
+}
+let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_FP64) && defined(__ARM_FEATURE_SVE_BF16)" in {
+ def SVLD1RO_BF16 : SInst<"svld1ro[_{2}]", "dPc", "b", MergeNone, "aarch64_sve_ld1ro">;
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ def SVBFDOT : SInst<"svbfdot[_{0}]", "MMdd", "b", MergeNone, "aarch64_sve_bfdot", [IsOverloadNone]>;
+ def SVBFMLALB : SInst<"svbfmlalb[_{0}]", "MMdd", "b", MergeNone, "aarch64_sve_bfmlalb", [IsOverloadNone]>;
+ def SVBFMLALT : SInst<"svbfmlalt[_{0}]", "MMdd", "b", MergeNone, "aarch64_sve_bfmlalt", [IsOverloadNone]>;
+ def SVBFMMLA : SInst<"svbfmmla[_{0}]", "MMdd", "b", MergeNone, "aarch64_sve_bfmmla", [IsOverloadNone]>;
+ def SVBFDOT_N : SInst<"svbfdot[_n_{0}]", "MMda", "b", MergeNone, "aarch64_sve_bfdot", [IsOverloadNone]>;
+ def SVBFMLAL_N : SInst<"svbfmlalb[_n_{0}]", "MMda", "b", MergeNone, "aarch64_sve_bfmlalb", [IsOverloadNone]>;
+ def SVBFMLALT_N : SInst<"svbfmlalt[_n_{0}]", "MMda", "b", MergeNone, "aarch64_sve_bfmlalt", [IsOverloadNone]>;
+ def SVBFDOT_LANE : SInst<"svbfdot_lane[_{0}]", "MMddn", "b", MergeNone, "aarch64_sve_bfdot_lane", [IsOverloadNone], [ImmCheck<3, ImmCheck0_3>]>;
+ def SVBFMLALB_LANE : SInst<"svbfmlalb_lane[_{0}]", "MMddn", "b", MergeNone, "aarch64_sve_bfmlalb_lane", [IsOverloadNone], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVBFMLALT_LANE : SInst<"svbfmlalt_lane[_{0}]", "MMddn", "b", MergeNone, "aarch64_sve_bfmlalt_lane", [IsOverloadNone], [ImmCheck<3, ImmCheck0_7>]>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Stores
+
+// Store one vector (scalar base)
+def SVST1 : MInst<"svst1[_{d}]", "vPpd", "csilUcUsUiUlhfd", [IsStore], MemEltTyDefault, "aarch64_sve_st1">;
+def SVST1B_S : MInst<"svst1b[_{d}]", "vPAd", "sil", [IsStore], MemEltTyInt8, "aarch64_sve_st1">;
+def SVST1B_U : MInst<"svst1b[_{d}]", "vPEd", "UsUiUl", [IsStore], MemEltTyInt8, "aarch64_sve_st1">;
+def SVST1H_S : MInst<"svst1h[_{d}]", "vPBd", "il", [IsStore], MemEltTyInt16, "aarch64_sve_st1">;
+def SVST1H_U : MInst<"svst1h[_{d}]", "vPFd", "UiUl", [IsStore], MemEltTyInt16, "aarch64_sve_st1">;
+def SVST1W_S : MInst<"svst1w[_{d}]", "vPCd", "l", [IsStore], MemEltTyInt32, "aarch64_sve_st1">;
+def SVST1W_U : MInst<"svst1w[_{d}]", "vPGd", "Ul", [IsStore], MemEltTyInt32, "aarch64_sve_st1">;
+
+// Store one vector (scalar base, VL displacement)
+def SVST1_VNUM : MInst<"svst1_vnum[_{d}]", "vPpld", "csilUcUsUiUlhfd", [IsStore], MemEltTyDefault, "aarch64_sve_st1">;
+def SVST1B_VNUM_S : MInst<"svst1b_vnum[_{d}]", "vPAld", "sil", [IsStore], MemEltTyInt8, "aarch64_sve_st1">;
+def SVST1B_VNUM_U : MInst<"svst1b_vnum[_{d}]", "vPEld", "UsUiUl", [IsStore], MemEltTyInt8, "aarch64_sve_st1">;
+def SVST1H_VNUM_S : MInst<"svst1h_vnum[_{d}]", "vPBld", "il", [IsStore], MemEltTyInt16, "aarch64_sve_st1">;
+def SVST1H_VNUM_U : MInst<"svst1h_vnum[_{d}]", "vPFld", "UiUl", [IsStore], MemEltTyInt16, "aarch64_sve_st1">;
+def SVST1W_VNUM_S : MInst<"svst1w_vnum[_{d}]", "vPCld", "l", [IsStore], MemEltTyInt32, "aarch64_sve_st1">;
+def SVST1W_VNUM_U : MInst<"svst1w_vnum[_{d}]", "vPGld", "Ul", [IsStore], MemEltTyInt32, "aarch64_sve_st1">;
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ def SVST1_BF : MInst<"svst1[_{d}]", "vPpd", "b", [IsStore], MemEltTyDefault, "aarch64_sve_st1">;
+ def SVST1_VNUM_BF : MInst<"svst1_vnum[_{d}]", "vPpld", "b", [IsStore], MemEltTyDefault, "aarch64_sve_st1">;
+}
+
+// Store one vector (vector base)
+def SVST1_SCATTER_BASES_U : MInst<"svst1_scatter[_{2}base_{d}]", "vPud", "ilUiUlfd", [IsScatterStore], MemEltTyDefault, "aarch64_sve_st1_scatter_scalar_offset">;
+def SVST1B_SCATTER_BASES_U : MInst<"svst1b_scatter[_{2}base_{d}]", "vPud", "ilUiUl", [IsScatterStore], MemEltTyInt8, "aarch64_sve_st1_scatter_scalar_offset">;
+def SVST1H_SCATTER_BASES_U : MInst<"svst1h_scatter[_{2}base_{d}]", "vPud", "ilUiUl", [IsScatterStore], MemEltTyInt16, "aarch64_sve_st1_scatter_scalar_offset">;
+def SVST1W_SCATTER_BASES_U : MInst<"svst1w_scatter[_{2}base_{d}]", "vPud", "lUl", [IsScatterStore], MemEltTyInt32, "aarch64_sve_st1_scatter_scalar_offset">;
+
+// Store one vector (scalar base, signed vector offset in bytes)
+def SVST1_SCATTER_64B_OFFSETS_S : MInst<"svst1_scatter_[{3}]offset[_{d}]", "vPpxd", "lUld", [IsScatterStore, IsByteIndexed], MemEltTyDefault, "aarch64_sve_st1_scatter">;
+def SVST1B_SCATTER_64B_OFFSETS_SS : MInst<"svst1b_scatter_[{3}]offset[_{d}]", "vPAxd", "l", [IsScatterStore, IsByteIndexed], MemEltTyInt8, "aarch64_sve_st1_scatter">;
+def SVST1B_SCATTER_64B_OFFSETS_SU : MInst<"svst1b_scatter_[{3}]offset[_{d}]", "vPExd", "Ul", [IsScatterStore, IsByteIndexed], MemEltTyInt8, "aarch64_sve_st1_scatter">;
+def SVST1H_SCATTER_64B_OFFSETS_SS : MInst<"svst1h_scatter_[{3}]offset[_{d}]", "vPBxd", "l", [IsScatterStore, IsByteIndexed], MemEltTyInt16, "aarch64_sve_st1_scatter">;
+def SVST1H_SCATTER_64B_OFFSETS_SU : MInst<"svst1h_scatter_[{3}]offset[_{d}]", "vPFxd", "Ul", [IsScatterStore, IsByteIndexed], MemEltTyInt16, "aarch64_sve_st1_scatter">;
+def SVST1W_SCATTER_64B_OFFSETS_SS : MInst<"svst1w_scatter_[{3}]offset[_{d}]", "vPCxd", "l", [IsScatterStore, IsByteIndexed], MemEltTyInt32, "aarch64_sve_st1_scatter">;
+def SVST1W_SCATTER_64B_OFFSETS_SU : MInst<"svst1w_scatter_[{3}]offset[_{d}]", "vPGxd", "Ul", [IsScatterStore, IsByteIndexed], MemEltTyInt32, "aarch64_sve_st1_scatter">;
+
+def SVST1_SCATTER_32B_OFFSETS_S : MInst<"svst1_scatter_[{3}]offset[_{d}]", "vPpxd", "iUif", [IsScatterStore, IsByteIndexed], MemEltTyDefault, "aarch64_sve_st1_scatter_sxtw">;
+def SVST1B_SCATTER_32B_OFFSETS_SS : MInst<"svst1b_scatter_[{3}]offset[_{d}]", "vPAxd", "i", [IsScatterStore, IsByteIndexed], MemEltTyInt8, "aarch64_sve_st1_scatter_sxtw">;
+def SVST1B_SCATTER_32B_OFFSETS_SU : MInst<"svst1b_scatter_[{3}]offset[_{d}]", "vPExd", "Ui", [IsScatterStore, IsByteIndexed], MemEltTyInt8, "aarch64_sve_st1_scatter_sxtw">;
+def SVST1H_SCATTER_32B_OFFSETS_SS : MInst<"svst1h_scatter_[{3}]offset[_{d}]", "vPBxd", "i", [IsScatterStore, IsByteIndexed], MemEltTyInt16, "aarch64_sve_st1_scatter_sxtw">;
+def SVST1H_SCATTER_32B_OFFSETS_SU : MInst<"svst1h_scatter_[{3}]offset[_{d}]", "vPFxd", "Ui", [IsScatterStore, IsByteIndexed], MemEltTyInt16, "aarch64_sve_st1_scatter_sxtw">;
+
+// Store one vector (scalar base, unsigned vector offset in bytes)
+def SVST1_SCATTER_64B_OFFSETS_U : MInst<"svst1_scatter_[{3}]offset[_{d}]", "vPpud", "lUld", [IsScatterStore, IsByteIndexed], MemEltTyDefault, "aarch64_sve_st1_scatter">;
+def SVST1B_SCATTER_64B_OFFSETS_US : MInst<"svst1b_scatter_[{3}]offset[_{d}]", "vPAud", "l", [IsScatterStore, IsByteIndexed], MemEltTyInt8, "aarch64_sve_st1_scatter">;
+def SVST1B_SCATTER_64B_OFFSETS_UU : MInst<"svst1b_scatter_[{3}]offset[_{d}]", "vPEud", "Ul", [IsScatterStore, IsByteIndexed], MemEltTyInt8, "aarch64_sve_st1_scatter">;
+def SVST1H_SCATTER_64B_OFFSETS_US : MInst<"svst1h_scatter_[{3}]offset[_{d}]", "vPBud", "l", [IsScatterStore, IsByteIndexed], MemEltTyInt16, "aarch64_sve_st1_scatter">;
+def SVST1H_SCATTER_64B_OFFSETS_UU : MInst<"svst1h_scatter_[{3}]offset[_{d}]", "vPFud", "Ul", [IsScatterStore, IsByteIndexed], MemEltTyInt16, "aarch64_sve_st1_scatter">;
+def SVST1W_SCATTER_64B_OFFSETS_US : MInst<"svst1w_scatter_[{3}]offset[_{d}]", "vPCud", "l", [IsScatterStore, IsByteIndexed], MemEltTyInt32, "aarch64_sve_st1_scatter">;
+def SVST1W_SCATTER_64B_OFFSETS_UU : MInst<"svst1w_scatter_[{3}]offset[_{d}]", "vPGud", "Ul", [IsScatterStore, IsByteIndexed], MemEltTyInt32, "aarch64_sve_st1_scatter">;
+
+def SVST1_SCATTER_32B_OFFSETS_U : MInst<"svst1_scatter_[{3}]offset[_{d}]", "vPpud", "iUif", [IsScatterStore, IsByteIndexed], MemEltTyDefault, "aarch64_sve_st1_scatter_uxtw">;
+def SVST1B_SCATTER_32B_OFFSETS_US : MInst<"svst1b_scatter_[{3}]offset[_{d}]", "vPAud", "i", [IsScatterStore, IsByteIndexed], MemEltTyInt8, "aarch64_sve_st1_scatter_uxtw">;
+def SVST1B_SCATTER_32B_OFFSETS_UU : MInst<"svst1b_scatter_[{3}]offset[_{d}]", "vPEud", "Ui", [IsScatterStore, IsByteIndexed], MemEltTyInt8, "aarch64_sve_st1_scatter_uxtw">;
+def SVST1H_SCATTER_32B_OFFSETS_US : MInst<"svst1h_scatter_[{3}]offset[_{d}]", "vPBud", "i", [IsScatterStore, IsByteIndexed], MemEltTyInt16, "aarch64_sve_st1_scatter_uxtw">;
+def SVST1H_SCATTER_32B_OFFSETS_UU : MInst<"svst1h_scatter_[{3}]offset[_{d}]", "vPFud", "Ui", [IsScatterStore, IsByteIndexed], MemEltTyInt16, "aarch64_sve_st1_scatter_uxtw">;
+
+// Store one vector (vector base, signed scalar offset in bytes)
+def SVST1_SCATTER_OFFSET_S : MInst<"svst1_scatter[_{2}base]_offset[_{d}]", "vPuld", "ilUiUlfd", [IsScatterStore, IsByteIndexed], MemEltTyDefault, "aarch64_sve_st1_scatter_scalar_offset">;
+def SVST1B_SCATTER_OFFSET_S : MInst<"svst1b_scatter[_{2}base]_offset[_{d}]", "vPuld", "ilUiUl", [IsScatterStore, IsByteIndexed], MemEltTyInt8, "aarch64_sve_st1_scatter_scalar_offset">;
+def SVST1H_SCATTER_OFFSET_S : MInst<"svst1h_scatter[_{2}base]_offset[_{d}]", "vPuld", "ilUiUl", [IsScatterStore, IsByteIndexed], MemEltTyInt16, "aarch64_sve_st1_scatter_scalar_offset">;
+def SVST1W_SCATTER_OFFSET_S : MInst<"svst1w_scatter[_{2}base]_offset[_{d}]", "vPuld", "lUl", [IsScatterStore, IsByteIndexed], MemEltTyInt32, "aarch64_sve_st1_scatter_scalar_offset">;
+
+// Store one vector (scalar base, signed vector index)
+def SVST1_SCATTER_64B_INDICES_S : MInst<"svst1_scatter_[{3}]index[_{d}]", "vPpxd", "lUld", [IsScatterStore], MemEltTyDefault, "aarch64_sve_st1_scatter_index">;
+def SVST1H_SCATTER_64B_INDICES_SS : MInst<"svst1h_scatter_[{3}]index[_{d}]", "vPBxd", "l", [IsScatterStore], MemEltTyInt16, "aarch64_sve_st1_scatter_index">;
+def SVST1H_SCATTER_64B_INDICES_SU : MInst<"svst1h_scatter_[{3}]index[_{d}]", "vPFxd", "Ul", [IsScatterStore], MemEltTyInt16, "aarch64_sve_st1_scatter_index">;
+def SVST1W_SCATTER_64B_INDICES_SS : MInst<"svst1w_scatter_[{3}]index[_{d}]", "vPCxd", "l", [IsScatterStore], MemEltTyInt32, "aarch64_sve_st1_scatter_index">;
+def SVST1W_SCATTER_64B_INDICES_SU : MInst<"svst1w_scatter_[{3}]index[_{d}]", "vPGxd", "Ul", [IsScatterStore], MemEltTyInt32, "aarch64_sve_st1_scatter_index">;
+
+def SVST1_SCATTER_32B_INDICES_S : MInst<"svst1_scatter_[{3}]index[_{d}]", "vPpxd", "iUif", [IsScatterStore], MemEltTyDefault, "aarch64_sve_st1_scatter_sxtw_index">;
+def SVST1H_SCATTER_32B_INDICES_SS : MInst<"svst1h_scatter_[{3}]index[_{d}]", "vPBxd", "i", [IsScatterStore], MemEltTyInt16, "aarch64_sve_st1_scatter_sxtw_index">;
+def SVST1H_SCATTER_32B_INDICES_SU : MInst<"svst1h_scatter_[{3}]index[_{d}]", "vPFxd", "Ui", [IsScatterStore], MemEltTyInt16, "aarch64_sve_st1_scatter_sxtw_index">;
+
+// Store one vector (scalar base, unsigned vector index)
+def SVST1_SCATTER_64B_INDICES_U : MInst<"svst1_scatter_[{3}]index[_{d}]", "vPpud", "lUld", [IsScatterStore], MemEltTyDefault, "aarch64_sve_st1_scatter_index">;
+def SVST1H_SCATTER_64B_INDICES_US : MInst<"svst1h_scatter_[{3}]index[_{d}]", "vPBud", "l", [IsScatterStore], MemEltTyInt16, "aarch64_sve_st1_scatter_index">;
+def SVST1H_SCATTER_64B_INDICES_UU : MInst<"svst1h_scatter_[{3}]index[_{d}]", "vPFud", "Ul", [IsScatterStore], MemEltTyInt16, "aarch64_sve_st1_scatter_index">;
+def SVST1W_SCATTER_64B_INDICES_US : MInst<"svst1w_scatter_[{3}]index[_{d}]", "vPCud", "l", [IsScatterStore], MemEltTyInt32, "aarch64_sve_st1_scatter_index">;
+def SVST1W_SCATTER_64B_INDICES_UU : MInst<"svst1w_scatter_[{3}]index[_{d}]", "vPGud", "Ul", [IsScatterStore], MemEltTyInt32, "aarch64_sve_st1_scatter_index">;
+
+def SVST1_SCATTER_32B_INDICES_U : MInst<"svst1_scatter_[{3}]index[_{d}]", "vPpud", "iUif", [IsScatterStore], MemEltTyDefault, "aarch64_sve_st1_scatter_uxtw_index">;
+def SVST1H_SCATTER_32B_INDICES_US : MInst<"svst1h_scatter_[{3}]index[_{d}]", "vPBud", "i", [IsScatterStore], MemEltTyInt16, "aarch64_sve_st1_scatter_uxtw_index">;
+def SVST1H_SCATTER_32B_INDICES_UU : MInst<"svst1h_scatter_[{3}]index[_{d}]", "vPFud", "Ui", [IsScatterStore], MemEltTyInt16, "aarch64_sve_st1_scatter_uxtw_index">;
+
+// Store one vector (vector base, signed scalar index)
+def SVST1_SCATTER_INDEX_S : MInst<"svst1_scatter[_{2}base]_index[_{d}]", "vPuld", "ilUiUlfd", [IsScatterStore], MemEltTyDefault, "aarch64_sve_st1_scatter_scalar_offset">;
+def SVST1H_SCATTER_INDEX_S : MInst<"svst1h_scatter[_{2}base]_index[_{d}]", "vPuld", "ilUiUl", [IsScatterStore], MemEltTyInt16, "aarch64_sve_st1_scatter_scalar_offset">;
+def SVST1W_SCATTER_INDEX_S : MInst<"svst1w_scatter[_{2}base]_index[_{d}]", "vPuld", "lUl", [IsScatterStore], MemEltTyInt32, "aarch64_sve_st1_scatter_scalar_offset">;
+
+multiclass StructStore<string name, string proto, string i> {
+ def : SInst<name, proto, "csilUcUsUiUlhfd", MergeNone, i, [IsStructStore]>;
+ let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ def: SInst<name, proto, "b", MergeNone, i, [IsStructStore]>;
+ }
+}
+// Store N vectors into N-element structure (scalar base)
+defm SVST2 : StructStore<"svst2[_{d}]", "vPp2", "aarch64_sve_st2">;
+defm SVST3 : StructStore<"svst3[_{d}]", "vPp3", "aarch64_sve_st3">;
+defm SVST4 : StructStore<"svst4[_{d}]", "vPp4", "aarch64_sve_st4">;
+
+// Store N vectors into N-element structure (scalar base, VL displacement)
+defm SVST2_VNUM : StructStore<"svst2_vnum[_{d}]", "vPpl2", "aarch64_sve_st2">;
+defm SVST3_VNUM : StructStore<"svst3_vnum[_{d}]", "vPpl3", "aarch64_sve_st3">;
+defm SVST4_VNUM : StructStore<"svst4_vnum[_{d}]", "vPpl4", "aarch64_sve_st4">;
+
+// Store one vector, with no truncation, non-temporal (scalar base)
+def SVSTNT1 : MInst<"svstnt1[_{d}]", "vPpd", "csilUcUsUiUlhfd", [IsStore], MemEltTyDefault, "aarch64_sve_stnt1">;
+
+// Store one vector, with no truncation, non-temporal (scalar base, VL displacement)
+def SVSTNT1_VNUM : MInst<"svstnt1_vnum[_{d}]", "vPpld", "csilUcUsUiUlhfd", [IsStore], MemEltTyDefault, "aarch64_sve_stnt1">;
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ def SVSTNT1_BF : MInst<"svstnt1[_{d}]", "vPpd", "b", [IsStore], MemEltTyDefault, "aarch64_sve_stnt1">;
+ def SVSTNT1_VNUM_BF : MInst<"svstnt1_vnum[_{d}]", "vPpld", "b", [IsStore], MemEltTyDefault, "aarch64_sve_stnt1">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Prefetches
+
+// Prefetch (Scalar base)
+def SVPRFB : MInst<"svprfb", "vPcJ", "c", [IsPrefetch], MemEltTyInt8, "aarch64_sve_prf">;
+def SVPRFH : MInst<"svprfh", "vPcJ", "s", [IsPrefetch], MemEltTyInt16, "aarch64_sve_prf">;
+def SVPRFW : MInst<"svprfw", "vPcJ", "i", [IsPrefetch], MemEltTyInt32, "aarch64_sve_prf">;
+def SVPRFD : MInst<"svprfd", "vPcJ", "l", [IsPrefetch], MemEltTyInt64, "aarch64_sve_prf">;
+
+// Prefetch (Scalar base, VL displacement)
+def SVPRFB_VNUM : MInst<"svprfb_vnum", "vPclJ", "c", [IsPrefetch], MemEltTyInt8, "aarch64_sve_prf">;
+def SVPRFH_VNUM : MInst<"svprfh_vnum", "vPclJ", "s", [IsPrefetch], MemEltTyInt16, "aarch64_sve_prf">;
+def SVPRFW_VNUM : MInst<"svprfw_vnum", "vPclJ", "i", [IsPrefetch], MemEltTyInt32, "aarch64_sve_prf">;
+def SVPRFD_VNUM : MInst<"svprfd_vnum", "vPclJ", "l", [IsPrefetch], MemEltTyInt64, "aarch64_sve_prf">;
+
+// Prefetch (Vector bases)
+def SVPRFB_GATHER_BASES : MInst<"svprfb_gather[_{2}base]", "vPdJ", "UiUl", [IsGatherPrefetch], MemEltTyInt8, "aarch64_sve_prfb_gather_scalar_offset">;
+def SVPRFH_GATHER_BASES : MInst<"svprfh_gather[_{2}base]", "vPdJ", "UiUl", [IsGatherPrefetch], MemEltTyInt16, "aarch64_sve_prfh_gather_scalar_offset">;
+def SVPRFW_GATHER_BASES : MInst<"svprfw_gather[_{2}base]", "vPdJ", "UiUl", [IsGatherPrefetch], MemEltTyInt32, "aarch64_sve_prfw_gather_scalar_offset">;
+def SVPRFD_GATHER_BASES : MInst<"svprfd_gather[_{2}base]", "vPdJ", "UiUl", [IsGatherPrefetch], MemEltTyInt64, "aarch64_sve_prfd_gather_scalar_offset">;
+
+// Prefetch (Scalar base, Vector offsets)
+def SVPRFB_GATHER_32B_OFFSETS_S : MInst<"svprfb_gather_[{3}]offset", "vPQdJ", "i", [IsGatherPrefetch], MemEltTyInt8, "aarch64_sve_prfb_gather_sxtw_index">;
+def SVPRFH_GATHER_32B_OFFSETS_S : MInst<"svprfh_gather_[{3}]index", "vPQdJ", "i", [IsGatherPrefetch], MemEltTyInt16, "aarch64_sve_prfh_gather_sxtw_index">;
+def SVPRFW_GATHER_32B_OFFSETS_S : MInst<"svprfw_gather_[{3}]index", "vPQdJ", "i", [IsGatherPrefetch], MemEltTyInt32, "aarch64_sve_prfw_gather_sxtw_index">;
+def SVPRFD_GATHER_32B_OFFSETS_S : MInst<"svprfd_gather_[{3}]index", "vPQdJ", "i", [IsGatherPrefetch], MemEltTyInt64, "aarch64_sve_prfd_gather_sxtw_index">;
+
+def SVPRFB_GATHER_64B_OFFSETS_S : MInst<"svprfb_gather_[{3}]offset", "vPQdJ", "l", [IsGatherPrefetch], MemEltTyInt8, "aarch64_sve_prfb_gather_index">;
+def SVPRFH_GATHER_64B_OFFSETS_S : MInst<"svprfh_gather_[{3}]index", "vPQdJ", "l", [IsGatherPrefetch], MemEltTyInt16, "aarch64_sve_prfh_gather_index">;
+def SVPRFW_GATHER_64B_OFFSETS_S : MInst<"svprfw_gather_[{3}]index", "vPQdJ", "l", [IsGatherPrefetch], MemEltTyInt32, "aarch64_sve_prfw_gather_index">;
+def SVPRFD_GATHER_64B_OFFSETS_S : MInst<"svprfd_gather_[{3}]index", "vPQdJ", "l", [IsGatherPrefetch], MemEltTyInt64, "aarch64_sve_prfd_gather_index">;
+
+def SVPRFB_GATHER_32B_OFFSETS_U : MInst<"svprfb_gather_[{3}]offset", "vPQdJ", "Ui", [IsGatherPrefetch], MemEltTyInt8, "aarch64_sve_prfb_gather_uxtw_index">;
+def SVPRFH_GATHER_32B_OFFSETS_U : MInst<"svprfh_gather_[{3}]index", "vPQdJ", "Ui", [IsGatherPrefetch], MemEltTyInt16, "aarch64_sve_prfh_gather_uxtw_index">;
+def SVPRFW_GATHER_32B_OFFSETS_U : MInst<"svprfw_gather_[{3}]index", "vPQdJ", "Ui", [IsGatherPrefetch], MemEltTyInt32, "aarch64_sve_prfw_gather_uxtw_index">;
+def SVPRFD_GATHER_32B_OFFSETS_U : MInst<"svprfd_gather_[{3}]index", "vPQdJ", "Ui", [IsGatherPrefetch], MemEltTyInt64, "aarch64_sve_prfd_gather_uxtw_index">;
+
+def SVPRFB_GATHER_64B_OFFSETS_U : MInst<"svprfb_gather_[{3}]offset", "vPQdJ", "Ul", [IsGatherPrefetch], MemEltTyInt8, "aarch64_sve_prfb_gather_index">;
+def SVPRFH_GATHER_64B_OFFSETS_U : MInst<"svprfh_gather_[{3}]index", "vPQdJ", "Ul", [IsGatherPrefetch], MemEltTyInt16, "aarch64_sve_prfh_gather_index">;
+def SVPRFW_GATHER_64B_OFFSETS_U : MInst<"svprfw_gather_[{3}]index", "vPQdJ", "Ul", [IsGatherPrefetch], MemEltTyInt32, "aarch64_sve_prfw_gather_index">;
+def SVPRFD_GATHER_64B_OFFSETS_U : MInst<"svprfd_gather_[{3}]index", "vPQdJ", "Ul", [IsGatherPrefetch], MemEltTyInt64, "aarch64_sve_prfd_gather_index">;
+
+// Prefetch (Vector bases, scalar offset)
+def SVPRFB_GATHER_BASES_OFFSET : MInst<"svprfb_gather[_{2}base]_offset", "vPdlJ", "UiUl", [IsGatherPrefetch], MemEltTyInt8, "aarch64_sve_prfb_gather_scalar_offset">;
+def SVPRFH_GATHER_BASES_OFFSET : MInst<"svprfh_gather[_{2}base]_index", "vPdlJ", "UiUl", [IsGatherPrefetch], MemEltTyInt16, "aarch64_sve_prfh_gather_scalar_offset">;
+def SVPRFW_GATHER_BASES_OFFSET : MInst<"svprfw_gather[_{2}base]_index", "vPdlJ", "UiUl", [IsGatherPrefetch], MemEltTyInt32, "aarch64_sve_prfw_gather_scalar_offset">;
+def SVPRFD_GATHER_BASES_OFFSET : MInst<"svprfd_gather[_{2}base]_index", "vPdlJ", "UiUl", [IsGatherPrefetch], MemEltTyInt64, "aarch64_sve_prfd_gather_scalar_offset">;
+
+////////////////////////////////////////////////////////////////////////////////
+// Address calculations
+
+def SVADRB : SInst<"svadrb[_{0}base]_[{2}]offset", "uud", "ilUiUl", MergeNone, "aarch64_sve_adrb">;
+def SVADRH : SInst<"svadrh[_{0}base]_[{2}]index", "uud", "ilUiUl", MergeNone, "aarch64_sve_adrh">;
+def SVADRW : SInst<"svadrw[_{0}base]_[{2}]index", "uud", "ilUiUl", MergeNone, "aarch64_sve_adrw">;
+def SVADRD : SInst<"svadrd[_{0}base]_[{2}]index", "uud", "ilUiUl", MergeNone, "aarch64_sve_adrd">;
+
+////////////////////////////////////////////////////////////////////////////////
+// Scalar to vector
+
+def SVDUPQ_8 : SInst<"svdupq[_n]_{d}", "dssssssssssssssss", "cUc", MergeNone>;
+def SVDUPQ_16 : SInst<"svdupq[_n]_{d}", "dssssssss", "sUsh", MergeNone>;
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ def SVDUPQ_BF16 : SInst<"svdupq[_n]_{d}", "dssssssss", "b", MergeNone>;
+}
+def SVDUPQ_32 : SInst<"svdupq[_n]_{d}", "dssss", "iUif", MergeNone>;
+def SVDUPQ_64 : SInst<"svdupq[_n]_{d}", "dss", "lUld", MergeNone>;
+
+multiclass svdup_base<string n, string p, MergeType mt, string i> {
+ def NAME : SInst<n, p, "csilUcUsUiUlhfd", mt, i>;
+ let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ def _BF16: SInst<n, p, "b", mt, i>;
+ }
+}
+
+defm SVDUP : svdup_base<"svdup[_n]_{d}", "ds", MergeNone, "aarch64_sve_dup_x">;
+defm SVDUP_M : svdup_base<"svdup[_n]_{d}", "ddPs", MergeOp1, "aarch64_sve_dup">;
+defm SVDUP_X : svdup_base<"svdup[_n]_{d}", "dPs", MergeAnyExp, "aarch64_sve_dup">;
+defm SVDUP_Z : svdup_base<"svdup[_n]_{d}", "dPs", MergeZeroExp, "aarch64_sve_dup">;
+
+def SVINDEX : SInst<"svindex_{d}", "dss", "csilUcUsUiUl", MergeNone, "aarch64_sve_index">;
+
+// Integer arithmetic
+
+multiclass SInstZPZ<string name, string types, string intrinsic, list<FlagType> flags=[]> {
+ def _M : SInst<name # "[_{d}]", "ddPd", types, MergeOp1, intrinsic, flags>;
+ def _X : SInst<name # "[_{d}]", "dPd", types, MergeAnyExp, intrinsic, flags>;
+ def _Z : SInst<name # "[_{d}]", "dPd", types, MergeZeroExp, intrinsic, flags>;
+}
+
+defm SVABS : SInstZPZ<"svabs", "csil", "aarch64_sve_abs">;
+defm SVNEG : SInstZPZ<"svneg", "csil", "aarch64_sve_neg">;
+
+//------------------------------------------------------------------------------
+
+multiclass SInstZPZZ<string name, string types, string intrinsic, list<FlagType> flags=[]> {
+ def _M : SInst<name # "[_{d}]", "dPdd", types, MergeOp1, intrinsic, flags>;
+ def _X : SInst<name # "[_{d}]", "dPdd", types, MergeAny, intrinsic, flags>;
+ def _Z : SInst<name # "[_{d}]", "dPdd", types, MergeZero, intrinsic, flags>;
+
+ def _N_M : SInst<name # "[_n_{d}]", "dPda", types, MergeOp1, intrinsic, flags>;
+ def _N_X : SInst<name # "[_n_{d}]", "dPda", types, MergeAny, intrinsic, flags>;
+ def _N_Z : SInst<name # "[_n_{d}]", "dPda", types, MergeZero, intrinsic, flags>;
+}
+
+defm SVABD_S : SInstZPZZ<"svabd", "csil", "aarch64_sve_sabd">;
+defm SVABD_U : SInstZPZZ<"svabd", "UcUsUiUl", "aarch64_sve_uabd">;
+defm SVADD : SInstZPZZ<"svadd", "csilUcUsUiUl", "aarch64_sve_add">;
+defm SVDIV_S : SInstZPZZ<"svdiv", "il", "aarch64_sve_sdiv">;
+defm SVDIV_U : SInstZPZZ<"svdiv", "UiUl", "aarch64_sve_udiv">;
+defm SVDIVR_S : SInstZPZZ<"svdivr", "il", "aarch64_sve_sdivr">;
+defm SVDIVR_U : SInstZPZZ<"svdivr", "UiUl", "aarch64_sve_udivr">;
+defm SVMAX_S : SInstZPZZ<"svmax", "csil", "aarch64_sve_smax">;
+defm SVMAX_U : SInstZPZZ<"svmax", "UcUsUiUl", "aarch64_sve_umax">;
+defm SVMIN_S : SInstZPZZ<"svmin", "csil", "aarch64_sve_smin">;
+defm SVMIN_U : SInstZPZZ<"svmin", "UcUsUiUl", "aarch64_sve_umin">;
+defm SVMUL : SInstZPZZ<"svmul", "csilUcUsUiUl", "aarch64_sve_mul">;
+defm SVMULH_S : SInstZPZZ<"svmulh", "csil", "aarch64_sve_smulh">;
+defm SVMULH_U : SInstZPZZ<"svmulh", "UcUsUiUl", "aarch64_sve_umulh">;
+defm SVSUB : SInstZPZZ<"svsub", "csilUcUsUiUl", "aarch64_sve_sub">;
+defm SVSUBR : SInstZPZZ<"svsubr", "csilUcUsUiUl", "aarch64_sve_subr">;
+
+//------------------------------------------------------------------------------
+
+multiclass SInstZPZZZ<string name, string types, string intrinsic, list<FlagType> flags=[]> {
+ def _M : SInst<name # "[_{d}]", "dPddd", types, MergeOp1, intrinsic, flags>;
+ def _X : SInst<name # "[_{d}]", "dPddd", types, MergeAny, intrinsic, flags>;
+ def _Z : SInst<name # "[_{d}]", "dPddd", types, MergeZero, intrinsic, flags>;
+
+ def _N_M : SInst<name # "[_n_{d}]", "dPdda", types, MergeOp1, intrinsic, flags>;
+ def _N_X : SInst<name # "[_n_{d}]", "dPdda", types, MergeAny, intrinsic, flags>;
+ def _N_Z : SInst<name # "[_n_{d}]", "dPdda", types, MergeZero, intrinsic, flags>;
+}
+
+defm SVMAD : SInstZPZZZ<"svmad", "csilUcUsUiUl", "aarch64_sve_mad">;
+defm SVMLA : SInstZPZZZ<"svmla", "csilUcUsUiUl", "aarch64_sve_mla">;
+defm SVMLS : SInstZPZZZ<"svmls", "csilUcUsUiUl", "aarch64_sve_mls">;
+defm SVMSB : SInstZPZZZ<"svmsb", "csilUcUsUiUl", "aarch64_sve_msb">;
+
+//------------------------------------------------------------------------------
+
+def SVDOT_S : SInst<"svdot[_{0}]", "ddqq", "il", MergeNone, "aarch64_sve_sdot">;
+def SVDOT_U : SInst<"svdot[_{0}]", "ddqq", "UiUl", MergeNone, "aarch64_sve_udot">;
+def SVQADD_S : SInst<"svqadd[_{d}]", "ddd", "csil", MergeNone, "aarch64_sve_sqadd_x">;
+def SVQADD_U : SInst<"svqadd[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_uqadd_x">;
+def SVQSUB_S : SInst<"svqsub[_{d}]", "ddd", "csil", MergeNone, "aarch64_sve_sqsub_x">;
+def SVQSUB_U : SInst<"svqsub[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_uqsub_x">;
+
+def SVDOT_N_S : SInst<"svdot[_n_{0}]", "ddqr", "il", MergeNone, "aarch64_sve_sdot">;
+def SVDOT_N_U : SInst<"svdot[_n_{0}]", "ddqr", "UiUl", MergeNone, "aarch64_sve_udot">;
+def SVQADD_N_S : SInst<"svqadd[_n_{d}]", "dda", "csil", MergeNone, "aarch64_sve_sqadd_x">;
+def SVQADD_N_U : SInst<"svqadd[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_uqadd_x">;
+def SVQSUB_N_S : SInst<"svqsub[_n_{d}]", "dda", "csil", MergeNone, "aarch64_sve_sqsub_x">;
+def SVQSUB_N_U : SInst<"svqsub[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_uqsub_x">;
+
+def SVDOT_LANE_S : SInst<"svdot_lane[_{d}]", "ddqqi", "il", MergeNone, "aarch64_sve_sdot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
+def SVDOT_LANE_U : SInst<"svdot_lane[_{d}]", "ddqqi", "UiUl", MergeNone, "aarch64_sve_udot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
+
+////////////////////////////////////////////////////////////////////////////////
+// Logical operations
+
+defm SVAND : SInstZPZZ<"svand", "csilUcUsUiUl", "aarch64_sve_and">;
+defm SVBIC : SInstZPZZ<"svbic", "csilUcUsUiUl", "aarch64_sve_bic">;
+defm SVEOR : SInstZPZZ<"sveor", "csilUcUsUiUl", "aarch64_sve_eor">;
+defm SVORR : SInstZPZZ<"svorr", "csilUcUsUiUl", "aarch64_sve_orr">;
+
+defm SVCNOT : SInstZPZ<"svcnot", "csilUcUsUiUl", "aarch64_sve_cnot">;
+defm SVNOT : SInstZPZ<"svnot", "csilUcUsUiUl", "aarch64_sve_not">;
+
+////////////////////////////////////////////////////////////////////////////////
+// Shifts
+
+multiclass SInst_SHIFT<string name, string intrinsic, string ts, string wide_ts> {
+ def _M : SInst<name # "[_{d}]", "dPdu", ts, MergeOp1, intrinsic>;
+ def _X : SInst<name # "[_{d}]", "dPdu", ts, MergeAny, intrinsic>;
+ def _Z : SInst<name # "[_{d}]", "dPdu", ts, MergeZero, intrinsic>;
+
+ def _N_M : SInst<name # "[_n_{d}]", "dPdL", ts, MergeOp1, intrinsic>;
+ def _N_X : SInst<name # "[_n_{d}]", "dPdL", ts, MergeAny, intrinsic>;
+ def _N_Z : SInst<name # "[_n_{d}]", "dPdL", ts, MergeZero, intrinsic>;
+
+ def _WIDE_M : SInst<name # _wide # "[_{d}]", "dPdg", wide_ts, MergeOp1, intrinsic # _wide>;
+ def _WIDE_X : SInst<name # _wide # "[_{d}]", "dPdg", wide_ts, MergeAny, intrinsic # _wide>;
+ def _WIDE_Z : SInst<name # _wide # "[_{d}]", "dPdg", wide_ts, MergeZero, intrinsic # _wide>;
+
+ def _WIDE_N_M : SInst<name # _wide # "[_n_{d}]", "dPdf", wide_ts, MergeOp1, intrinsic # _wide>;
+ def _WIDE_N_X : SInst<name # _wide # "[_n_{d}]", "dPdf", wide_ts, MergeAny, intrinsic # _wide>;
+ def _WIDE_N_Z : SInst<name # _wide # "[_n_{d}]", "dPdf", wide_ts, MergeZero, intrinsic # _wide>;
+}
+
+defm SVASR : SInst_SHIFT<"svasr", "aarch64_sve_asr", "csil", "csi">;
+defm SVLSL : SInst_SHIFT<"svlsl", "aarch64_sve_lsl", "csilUcUsUiUl", "csiUcUsUi">;
+defm SVLSR : SInst_SHIFT<"svlsr", "aarch64_sve_lsr", "UcUsUiUl", "UcUsUi">;
+
+def SVASRD_M : SInst<"svasrd[_n_{d}]", "dPdi", "csil", MergeOp1, "aarch64_sve_asrd", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVASRD_X : SInst<"svasrd[_n_{d}]", "dPdi", "csil", MergeAny, "aarch64_sve_asrd", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVASRD_Z : SInst<"svasrd[_n_{d}]", "dPdi", "csil", MergeZero, "aarch64_sve_asrd", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+
+def SVINSR : SInst<"svinsr[_n_{d}]", "dds", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_insr">;
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ def SVINSR_BF16 : SInst<"svinsr[_n_{d}]", "dds", "b", MergeNone, "aarch64_sve_insr">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Integer reductions
+
+def SVADDV_S : SInst<"svaddv[_{d}]", "lPd", "csil", MergeNone, "aarch64_sve_saddv">;
+def SVADDV_U : SInst<"svaddv[_{d}]", "nPd", "UcUsUiUl", MergeNone, "aarch64_sve_uaddv">;
+def SVANDV : SInst<"svandv[_{d}]", "sPd", "csilUcUsUiUl", MergeNone, "aarch64_sve_andv">;
+def SVEORV : SInst<"sveorv[_{d}]", "sPd", "csilUcUsUiUl", MergeNone, "aarch64_sve_eorv">;
+def SVMAXV_S : SInst<"svmaxv[_{d}]", "sPd", "csil", MergeNone, "aarch64_sve_smaxv">;
+def SVMAXV_U : SInst<"svmaxv[_{d}]", "sPd", "UcUsUiUl", MergeNone, "aarch64_sve_umaxv">;
+def SVMINV_S : SInst<"svminv[_{d}]", "sPd", "csil", MergeNone, "aarch64_sve_sminv">;
+def SVMINV_U : SInst<"svminv[_{d}]", "sPd", "UcUsUiUl", MergeNone, "aarch64_sve_uminv">;
+def SVORV : SInst<"svorv[_{d}]", "sPd", "csilUcUsUiUl", MergeNone, "aarch64_sve_orv">;
+
+////////////////////////////////////////////////////////////////////////////////
+// Integer comparisons
+
+def SVCMPEQ : SInst<"svcmpeq[_{d}]", "PPdd", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmpeq">;
+def SVCMPNE : SInst<"svcmpne[_{d}]", "PPdd", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmpne">;
+def SVCMPGE : SInst<"svcmpge[_{d}]", "PPdd", "csil", MergeNone, "aarch64_sve_cmpge">;
+def SVCMPGT : SInst<"svcmpgt[_{d}]", "PPdd", "csil", MergeNone, "aarch64_sve_cmpgt">;
+def SVCMPLE : SInst<"svcmple[_{d}]", "PPdd", "csil", MergeNone, "aarch64_sve_cmpge", [ReverseCompare]>;
+def SVCMPLT : SInst<"svcmplt[_{d}]", "PPdd", "csil", MergeNone, "aarch64_sve_cmpgt", [ReverseCompare]>;
+def SVCMPHI : SInst<"svcmpgt[_{d}]", "PPdd", "UcUsUiUl", MergeNone, "aarch64_sve_cmphi">;
+def SVCMPHS : SInst<"svcmpge[_{d}]", "PPdd", "UcUsUiUl", MergeNone, "aarch64_sve_cmphs">;
+def SVCMPLO : SInst<"svcmplt[_{d}]", "PPdd", "UcUsUiUl", MergeNone, "aarch64_sve_cmphi", [ReverseCompare]>;
+def SVCMPLS : SInst<"svcmple[_{d}]", "PPdd", "UcUsUiUl", MergeNone, "aarch64_sve_cmphs", [ReverseCompare]>;
+
+def SVCMPEQ_N : SInst<"svcmpeq[_n_{d}]", "PPda", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmpeq">;
+def SVCMPNE_N : SInst<"svcmpne[_n_{d}]", "PPda", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmpne">;
+def SVCMPGE_N : SInst<"svcmpge[_n_{d}]", "PPda", "csil", MergeNone, "aarch64_sve_cmpge">;
+def SVCMPGT_N : SInst<"svcmpgt[_n_{d}]", "PPda", "csil", MergeNone, "aarch64_sve_cmpgt">;
+def SVCMPLE_N : SInst<"svcmple[_n_{d}]", "PPda", "csil", MergeNone, "aarch64_sve_cmpge", [ReverseCompare]>;
+def SVCMPLT_N : SInst<"svcmplt[_n_{d}]", "PPda", "csil", MergeNone, "aarch64_sve_cmpgt", [ReverseCompare]>;
+def SVCMPHS_N : SInst<"svcmpge[_n_{d}]", "PPda", "UcUsUiUl", MergeNone, "aarch64_sve_cmphs">;
+def SVCMPHI_N : SInst<"svcmpgt[_n_{d}]", "PPda", "UcUsUiUl", MergeNone, "aarch64_sve_cmphi">;
+def SVCMPLS_N : SInst<"svcmple[_n_{d}]", "PPda", "UcUsUiUl", MergeNone, "aarch64_sve_cmphs", [ReverseCompare]>;
+def SVCMPLO_N : SInst<"svcmplt[_n_{d}]", "PPda", "UcUsUiUl", MergeNone, "aarch64_sve_cmphi", [ReverseCompare]>;
+
+def SVCMPEQ_WIDE : SInst<"svcmpeq_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmpeq_wide">;
+def SVCMPNE_WIDE : SInst<"svcmpne_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmpne_wide">;
+def SVCMPGE_WIDE : SInst<"svcmpge_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmpge_wide">;
+def SVCMPGT_WIDE : SInst<"svcmpgt_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmpgt_wide">;
+def SVCMPLE_WIDE : SInst<"svcmple_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmple_wide">;
+def SVCMPLT_WIDE : SInst<"svcmplt_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmplt_wide">;
+def SVCMPHI_WIDE : SInst<"svcmpgt_wide[_{d}]", "PPdw", "UcUsUi", MergeNone, "aarch64_sve_cmphi_wide">;
+def SVCMPHS_WIDE : SInst<"svcmpge_wide[_{d}]", "PPdw", "UcUsUi", MergeNone, "aarch64_sve_cmphs_wide">;
+def SVCMPLO_WIDE : SInst<"svcmplt_wide[_{d}]", "PPdw", "UcUsUi", MergeNone, "aarch64_sve_cmplo_wide">;
+def SVCMPLS_WIDE : SInst<"svcmple_wide[_{d}]", "PPdw", "UcUsUi", MergeNone, "aarch64_sve_cmpls_wide">;
+
+def SVCMPEQ_WIDE_N : SInst<"svcmpeq_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmpeq_wide">;
+def SVCMPNE_WIDE_N : SInst<"svcmpne_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmpne_wide">;
+def SVCMPGE_WIDE_N : SInst<"svcmpge_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmpge_wide">;
+def SVCMPGT_WIDE_N : SInst<"svcmpgt_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmpgt_wide">;
+def SVCMPLE_WIDE_N : SInst<"svcmple_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmple_wide">;
+def SVCMPLT_WIDE_N : SInst<"svcmplt_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmplt_wide">;
+def SVCMPHS_WIDE_N : SInst<"svcmpge_wide[_n_{d}]", "PPdj", "UcUsUi", MergeNone, "aarch64_sve_cmphs_wide">;
+def SVCMPHI_WIDE_N : SInst<"svcmpgt_wide[_n_{d}]", "PPdj", "UcUsUi", MergeNone, "aarch64_sve_cmphi_wide">;
+def SVCMPLO_WIDE_N : SInst<"svcmplt_wide[_n_{d}]", "PPdj", "UcUsUi", MergeNone, "aarch64_sve_cmplo_wide">;
+def SVCMPLS_WIDE_N : SInst<"svcmple_wide[_n_{d}]", "PPdj", "UcUsUi", MergeNone, "aarch64_sve_cmpls_wide">;
+
+////////////////////////////////////////////////////////////////////////////////
+// While comparisons
+
+def SVWHILELE_S32 : SInst<"svwhilele_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilele", [IsOverloadWhile]>;
+def SVWHILELE_S64 : SInst<"svwhilele_{d}[_{1}]", "Pll", "PcPsPiPl", MergeNone, "aarch64_sve_whilele", [IsOverloadWhile]>;
+def SVWHILELO_U32 : SInst<"svwhilelt_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilelo", [IsOverloadWhile]>;
+def SVWHILELO_U64 : SInst<"svwhilelt_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilelo", [IsOverloadWhile]>;
+def SVWHILELS_U32 : SInst<"svwhilele_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilels", [IsOverloadWhile]>;
+def SVWHILELS_U64 : SInst<"svwhilele_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilels", [IsOverloadWhile]>;
+def SVWHILELT_S32 : SInst<"svwhilelt_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilelt", [IsOverloadWhile]>;
+def SVWHILELT_S64 : SInst<"svwhilelt_{d}[_{1}]", "Pll", "PcPsPiPl", MergeNone, "aarch64_sve_whilelt", [IsOverloadWhile]>;
+
+////////////////////////////////////////////////////////////////////////////////
+// Counting bit
+
+multiclass SInstCLS<string name, string types, string intrinsic, list<FlagType> flags=[]> {
+ def _M : SInst<name # "[_{d}]", "uuPd", types, MergeOp1, intrinsic, flags>;
+ def _X : SInst<name # "[_{d}]", "uPd", types, MergeAnyExp, intrinsic, flags>;
+ def _Z : SInst<name # "[_{d}]", "uPd", types, MergeZeroExp, intrinsic, flags>;
+}
+
+defm SVCLS : SInstCLS<"svcls", "csil", "aarch64_sve_cls">;
+defm SVCLZ : SInstCLS<"svclz", "csilUcUsUiUl", "aarch64_sve_clz">;
+defm SVCNT : SInstCLS<"svcnt", "csilUcUsUiUlhfd", "aarch64_sve_cnt">;
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ defm SVCNT_BF16 : SInstCLS<"svcnt", "b", "aarch64_sve_cnt">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Conversion
+
+defm SVEXTB_S : SInstZPZ<"svextb", "sil", "aarch64_sve_sxtb">;
+defm SVEXTB_U : SInstZPZ<"svextb", "UsUiUl", "aarch64_sve_uxtb">;
+defm SVEXTH_S : SInstZPZ<"svexth", "il", "aarch64_sve_sxth">;
+defm SVEXTH_U : SInstZPZ<"svexth", "UiUl", "aarch64_sve_uxth">;
+defm SVEXTW_S : SInstZPZ<"svextw", "l", "aarch64_sve_sxtw">;
+defm SVEXTW_U : SInstZPZ<"svextw", "Ul", "aarch64_sve_uxtw">;
+
+////////////////////////////////////////////////////////////////////////////////
+// Reversal
+
+defm SVRBIT : SInstZPZ<"svrbit", "csilUcUsUiUl", "aarch64_sve_rbit">;
+defm SVREVB : SInstZPZ<"svrevb", "silUsUiUl", "aarch64_sve_revb">;
+defm SVREVH : SInstZPZ<"svrevh", "ilUiUl", "aarch64_sve_revh">;
+defm SVREVW : SInstZPZ<"svrevw", "lUl", "aarch64_sve_revw">;
+
+////////////////////////////////////////////////////////////////////////////////
+// Floating-point arithmetic
+
+defm SVABS_F : SInstZPZ<"svabs", "hfd", "aarch64_sve_fabs">;
+defm SVNEG_F : SInstZPZ<"svneg", "hfd", "aarch64_sve_fneg">;
+
+defm SVABD_F : SInstZPZZ<"svabd", "hfd", "aarch64_sve_fabd">;
+defm SVADD_F : SInstZPZZ<"svadd", "hfd", "aarch64_sve_fadd">;
+defm SVDIV_F : SInstZPZZ<"svdiv", "hfd", "aarch64_sve_fdiv">;
+defm SVDIVR_F : SInstZPZZ<"svdivr", "hfd", "aarch64_sve_fdivr">;
+defm SVMAX_F : SInstZPZZ<"svmax", "hfd", "aarch64_sve_fmax">;
+defm SVMAXNM : SInstZPZZ<"svmaxnm","hfd", "aarch64_sve_fmaxnm">;
+defm SVMIN_F : SInstZPZZ<"svmin", "hfd", "aarch64_sve_fmin">;
+defm SVMINNM : SInstZPZZ<"svminnm","hfd", "aarch64_sve_fminnm">;
+defm SVMUL_F : SInstZPZZ<"svmul", "hfd", "aarch64_sve_fmul">;
+defm SVMULX : SInstZPZZ<"svmulx", "hfd", "aarch64_sve_fmulx">;
+defm SVSUB_F : SInstZPZZ<"svsub", "hfd", "aarch64_sve_fsub">;
+defm SVSUBR_F : SInstZPZZ<"svsubr", "hfd", "aarch64_sve_fsubr">;
+
+defm SVRECPX : SInstZPZ<"svrecpx", "hfd", "aarch64_sve_frecpx">;
+defm SVRINTA : SInstZPZ<"svrinta", "hfd", "aarch64_sve_frinta">;
+defm SVRINTI : SInstZPZ<"svrinti", "hfd", "aarch64_sve_frinti">;
+defm SVRINTM : SInstZPZ<"svrintm", "hfd", "aarch64_sve_frintm">;
+defm SVRINTN : SInstZPZ<"svrintn", "hfd", "aarch64_sve_frintn">;
+defm SVRINTP : SInstZPZ<"svrintp", "hfd", "aarch64_sve_frintp">;
+defm SVRINTX : SInstZPZ<"svrintx", "hfd", "aarch64_sve_frintx">;
+defm SVRINTZ : SInstZPZ<"svrintz", "hfd", "aarch64_sve_frintz">;
+defm SVSQRT : SInstZPZ<"svsqrt", "hfd", "aarch64_sve_fsqrt">;
+
+def SVEXPA : SInst<"svexpa[_{d}]", "du", "hfd", MergeNone, "aarch64_sve_fexpa_x">;
+def SVTMAD : SInst<"svtmad[_{d}]", "dddi", "hfd", MergeNone, "aarch64_sve_ftmad_x", [], [ImmCheck<2, ImmCheck0_7>]>;
+def SVTSMUL : SInst<"svtsmul[_{d}]", "ddu", "hfd", MergeNone, "aarch64_sve_ftsmul_x">;
+def SVTSSEL : SInst<"svtssel[_{d}]", "ddu", "hfd", MergeNone, "aarch64_sve_ftssel_x">;
+
+def SVSCALE_M : SInst<"svscale[_{d}]", "dPdx", "hfd", MergeOp1, "aarch64_sve_fscale">;
+def SVSCALE_X : SInst<"svscale[_{d}]", "dPdx", "hfd", MergeAny, "aarch64_sve_fscale">;
+def SVSCALE_Z : SInst<"svscale[_{d}]", "dPdx", "hfd", MergeZero, "aarch64_sve_fscale">;
+
+def SVSCALE_N_M : SInst<"svscale[_n_{d}]", "dPdK", "hfd", MergeOp1, "aarch64_sve_fscale">;
+def SVSCALE_N_X : SInst<"svscale[_n_{d}]", "dPdK", "hfd", MergeAny, "aarch64_sve_fscale">;
+def SVSCALE_N_Z : SInst<"svscale[_n_{d}]", "dPdK", "hfd", MergeZero, "aarch64_sve_fscale">;
+
+defm SVMAD_F : SInstZPZZZ<"svmad", "hfd", "aarch64_sve_fmad">;
+defm SVMLA_F : SInstZPZZZ<"svmla", "hfd", "aarch64_sve_fmla">;
+defm SVMLS_F : SInstZPZZZ<"svmls", "hfd", "aarch64_sve_fmls">;
+defm SVMSB_F : SInstZPZZZ<"svmsb", "hfd", "aarch64_sve_fmsb">;
+defm SVNMAD_F : SInstZPZZZ<"svnmad", "hfd", "aarch64_sve_fnmad">;
+defm SVNMLA_F : SInstZPZZZ<"svnmla", "hfd", "aarch64_sve_fnmla">;
+defm SVNMLS_F : SInstZPZZZ<"svnmls", "hfd", "aarch64_sve_fnmls">;
+defm SVNMSB_F : SInstZPZZZ<"svnmsb", "hfd", "aarch64_sve_fnmsb">;
+
+def SVCADD_M : SInst<"svcadd[_{d}]", "dPddi", "hfd", MergeOp1, "aarch64_sve_fcadd", [], [ImmCheck<3, ImmCheckComplexRot90_270>]>;
+def SVCADD_X : SInst<"svcadd[_{d}]", "dPddi", "hfd", MergeAny, "aarch64_sve_fcadd", [], [ImmCheck<3, ImmCheckComplexRot90_270>]>;
+def SVCADD_Z : SInst<"svcadd[_{d}]", "dPddi", "hfd", MergeZero, "aarch64_sve_fcadd", [], [ImmCheck<3, ImmCheckComplexRot90_270>]>;
+def SVCMLA_M : SInst<"svcmla[_{d}]", "dPdddi", "hfd", MergeOp1, "aarch64_sve_fcmla", [], [ImmCheck<4, ImmCheckComplexRotAll90>]>;
+def SVCMLA_X : SInst<"svcmla[_{d}]", "dPdddi", "hfd", MergeAny, "aarch64_sve_fcmla", [], [ImmCheck<4, ImmCheckComplexRotAll90>]>;
+def SVCMLA_Z : SInst<"svcmla[_{d}]", "dPdddi", "hfd", MergeZero, "aarch64_sve_fcmla", [], [ImmCheck<4, ImmCheckComplexRotAll90>]>;
+
+def SVCMLA_LANE : SInst<"svcmla_lane[_{d}]", "ddddii", "hf", MergeNone, "aarch64_sve_fcmla_lane", [], [ImmCheck<3, ImmCheckLaneIndexCompRotate, 2>,
+ ImmCheck<4, ImmCheckComplexRotAll90>]>;
+def SVMLA_LANE : SInst<"svmla_lane[_{d}]", "ddddi", "hfd", MergeNone, "aarch64_sve_fmla_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLS_LANE : SInst<"svmls_lane[_{d}]", "ddddi", "hfd", MergeNone, "aarch64_sve_fmls_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMUL_LANE : SInst<"svmul_lane[_{d}]", "dddi", "hfd", MergeNone, "aarch64_sve_fmul_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+
+def SVRECPE : SInst<"svrecpe[_{d}]", "dd", "hfd", MergeNone, "aarch64_sve_frecpe_x">;
+def SVRECPS : SInst<"svrecps[_{d}]", "ddd", "hfd", MergeNone, "aarch64_sve_frecps_x">;
+def SVRSQRTE : SInst<"svrsqrte[_{d}]", "dd", "hfd", MergeNone, "aarch64_sve_frsqrte_x">;
+def SVRSQRTS : SInst<"svrsqrts[_{d}]", "ddd", "hfd", MergeNone, "aarch64_sve_frsqrts_x">;
+
+////////////////////////////////////////////////////////////////////////////////
+// Floating-point reductions
+
+def SVFADDA : SInst<"svadda[_{d}]", "sPsd", "hfd", MergeNone, "aarch64_sve_fadda">;
+def SVFADDV : SInst<"svaddv[_{d}]", "sPd", "hfd", MergeNone, "aarch64_sve_faddv">;
+def SVFMAXV : SInst<"svmaxv[_{d}]", "sPd", "hfd", MergeNone, "aarch64_sve_fmaxv">;
+def SVFMAXNMV : SInst<"svmaxnmv[_{d}]", "sPd", "hfd", MergeNone, "aarch64_sve_fmaxnmv">;
+def SVFMINV : SInst<"svminv[_{d}]", "sPd", "hfd", MergeNone, "aarch64_sve_fminv">;
+def SVFMINNMV : SInst<"svminnmv[_{d}]", "sPd", "hfd", MergeNone, "aarch64_sve_fminnmv">;
+
+////////////////////////////////////////////////////////////////////////////////
+// Floating-point comparisons
+
+def SVACGE : SInst<"svacge[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facge">;
+def SVACGT : SInst<"svacgt[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facgt">;
+def SVACLE : SInst<"svacle[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facge", [ReverseCompare]>;
+def SVACLT : SInst<"svaclt[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facgt", [ReverseCompare]>;
+def SVCMPUO : SInst<"svcmpuo[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpuo">;
+
+def SVACGE_N : SInst<"svacge[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facge">;
+def SVACGT_N : SInst<"svacgt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facgt">;
+def SVACLE_N : SInst<"svacle[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facge", [ReverseCompare]>;
+def SVACLT_N : SInst<"svaclt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facgt", [ReverseCompare]>;
+def SVCMPUO_N : SInst<"svcmpuo[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpuo">;
+
+def SVCMPEQ_F : SInst<"svcmpeq[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpeq">;
+def SVCMPNE_F : SInst<"svcmpne[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpne">;
+def SVCMPGE_F : SInst<"svcmpge[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpge">;
+def SVCMPGT_F : SInst<"svcmpgt[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpgt">;
+def SVCMPLE_F : SInst<"svcmple[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpge", [ReverseCompare]>;
+def SVCMPLT_F : SInst<"svcmplt[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpgt", [ReverseCompare]>;
+
+def SVCMPEQ_F_N : SInst<"svcmpeq[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpeq">;
+def SVCMPNE_F_N : SInst<"svcmpne[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpne">;
+def SVCMPGE_F_N : SInst<"svcmpge[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpge">;
+def SVCMPGT_F_N : SInst<"svcmpgt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpgt">;
+def SVCMPLE_F_N : SInst<"svcmple[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpge", [ReverseCompare]>;
+def SVCMPLT_F_N : SInst<"svcmplt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpgt", [ReverseCompare]>;
+
+////////////////////////////////////////////////////////////////////////////////
+// Floating-point conversions
+
+multiclass SInstCvtMXZ<
+ string name, string m_types, string xz_types, string types,
+ string intrinsic, list<FlagType> flags = [IsOverloadNone]> {
+ def _M : SInst<name, m_types, types, MergeOp1, intrinsic, flags>;
+ def _X : SInst<name, xz_types, types, MergeAnyExp, intrinsic, flags>;
+ def _Z : SInst<name, xz_types, types, MergeZeroExp, intrinsic, flags>;
+}
+
+multiclass SInstCvtMX<string name, string m_types, string xz_types,
+ string types, string intrinsic,
+ list<FlagType> flags = [IsOverloadNone]> {
+ def _M : SInst<name, m_types, types, MergeOp1, intrinsic, flags>;
+ def _X : SInst<name, xz_types, types, MergeAnyExp, intrinsic, flags>;
+}
+
+// svcvt_s##_f16
+defm SVFCVTZS_S16_F16 : SInstCvtMXZ<"svcvt_s16[_f16]", "ddPO", "dPO", "s", "aarch64_sve_fcvtzs", [IsOverloadCvt]>;
+defm SVFCVTZS_S32_F16 : SInstCvtMXZ<"svcvt_s32[_f16]", "ddPO", "dPO", "i", "aarch64_sve_fcvtzs_i32f16">;
+defm SVFCVTZS_S64_F16 : SInstCvtMXZ<"svcvt_s64[_f16]", "ddPO", "dPO", "l", "aarch64_sve_fcvtzs_i64f16">;
+
+// svcvt_s##_f32
+defm SVFCVTZS_S32_F32 : SInstCvtMXZ<"svcvt_s32[_f32]", "ddPM", "dPM", "i", "aarch64_sve_fcvtzs", [IsOverloadCvt]>;
+defm SVFCVTZS_S64_F32 : SInstCvtMXZ<"svcvt_s64[_f32]", "ddPM", "dPM", "l", "aarch64_sve_fcvtzs_i64f32">;
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ defm SVCVT_BF16_F32 : SInstCvtMXZ<"svcvt_bf16[_f32]", "ddPM", "dPM", "b", "aarch64_sve_fcvt_bf16f32">;
+ def SVCVTNT_BF16_F32 : SInst<"svcvtnt_bf16[_f32]", "ddPM", "b", MergeOp1, "aarch64_sve_fcvtnt_bf16f32", [IsOverloadNone]>;
+}
+
+// svcvt_s##_f64
+defm SVFCVTZS_S32_F64 : SInstCvtMXZ<"svcvt_s32[_f64]", "ttPd", "tPd", "d", "aarch64_sve_fcvtzs_i32f64">;
+defm SVFCVTZS_S64_F64 : SInstCvtMXZ<"svcvt_s64[_f64]", "ddPN", "dPN", "l", "aarch64_sve_fcvtzs", [IsOverloadCvt]>;
+
+// svcvt_u##_f16
+defm SVFCVTZU_U16_F16 : SInstCvtMXZ<"svcvt_u16[_f16]", "ddPO", "dPO", "Us", "aarch64_sve_fcvtzu", [IsOverloadCvt]>;
+defm SVFCVTZU_U32_F16 : SInstCvtMXZ<"svcvt_u32[_f16]", "ddPO", "dPO", "Ui", "aarch64_sve_fcvtzu_i32f16">;
+defm SVFCVTZU_U64_F16 : SInstCvtMXZ<"svcvt_u64[_f16]", "ddPO", "dPO", "Ul", "aarch64_sve_fcvtzu_i64f16">;
+
+// svcvt_u##_f32
+defm SVFCVTZU_U32_F32 : SInstCvtMXZ<"svcvt_u32[_f32]", "ddPM", "dPM", "Ui", "aarch64_sve_fcvtzu", [IsOverloadCvt]>;
+defm SVFCVTZU_U64_F32 : SInstCvtMXZ<"svcvt_u64[_f32]", "ddPM", "dPM", "Ul", "aarch64_sve_fcvtzu_i64f32">;
+
+// svcvt_u##_f64
+defm SVFCVTZU_U32_F64 : SInstCvtMXZ<"svcvt_u32[_f64]", "zzPd", "zPd", "d", "aarch64_sve_fcvtzu_i32f64">;
+defm SVFCVTZU_U64_F64 : SInstCvtMXZ<"svcvt_u64[_f64]", "ddPN", "dPN", "Ul", "aarch64_sve_fcvtzu", [IsOverloadCvt]>;
+
+// svcvt_f16_s##
+defm SVFCVTZS_F16_S16 : SInstCvtMXZ<"svcvt_f16[_s16]", "OOPd", "OPd", "s", "aarch64_sve_scvtf", [IsOverloadCvt]>;
+defm SVFCVTZS_F16_S32 : SInstCvtMXZ<"svcvt_f16[_s32]", "OOPd", "OPd", "i", "aarch64_sve_scvtf_f16i32">;
+defm SVFCVTZS_F16_S64 : SInstCvtMXZ<"svcvt_f16[_s64]", "OOPd", "OPd", "l", "aarch64_sve_scvtf_f16i64">;
+
+// svcvt_f32_s##
+defm SVFCVTZS_F32_S32 : SInstCvtMXZ<"svcvt_f32[_s32]", "MMPd", "MPd", "i", "aarch64_sve_scvtf", [IsOverloadCvt]>;
+defm SVFCVTZS_F32_S64 : SInstCvtMXZ<"svcvt_f32[_s64]", "MMPd", "MPd", "l", "aarch64_sve_scvtf_f32i64">;
+
+// svcvt_f64_s##
+defm SVFCVTZS_F64_S32 : SInstCvtMXZ<"svcvt_f64[_s32]", "ddPt", "dPt", "d", "aarch64_sve_scvtf_f64i32">;
+defm SVFCVTZS_F64_S64 : SInstCvtMXZ<"svcvt_f64[_s64]", "NNPd", "NPd", "l", "aarch64_sve_scvtf", [IsOverloadCvt]>;
+
+// svcvt_f16_u##
+defm SVFCVTZU_F16_U16 : SInstCvtMXZ<"svcvt_f16[_u16]", "OOPd", "OPd", "Us", "aarch64_sve_ucvtf", [IsOverloadCvt]>;
+defm SVFCVTZU_F16_U32 : SInstCvtMXZ<"svcvt_f16[_u32]", "OOPd", "OPd", "Ui", "aarch64_sve_ucvtf_f16i32">;
+defm SVFCVTZU_F16_U64 : SInstCvtMXZ<"svcvt_f16[_u64]", "OOPd", "OPd", "Ul", "aarch64_sve_ucvtf_f16i64">;
+
+// svcvt_f32_u##
+defm SVFCVTZU_F32_U32 : SInstCvtMXZ<"svcvt_f32[_u32]", "MMPd", "MPd", "Ui", "aarch64_sve_ucvtf", [IsOverloadCvt]>;
+defm SVFCVTZU_F32_U64 : SInstCvtMXZ<"svcvt_f32[_u64]", "MMPd", "MPd", "Ul", "aarch64_sve_ucvtf_f32i64">;
+
+// svcvt_f64_u##
+defm SVFCVTZU_F64_U32 : SInstCvtMXZ<"svcvt_f64[_u32]", "ddPz", "dPz", "d", "aarch64_sve_ucvtf_f64i32">;
+defm SVFCVTZU_F64_U64 : SInstCvtMXZ<"svcvt_f64[_u64]", "NNPd", "NPd", "Ul", "aarch64_sve_ucvtf", [IsOverloadCvt]>;
+
+// svcvt_f16_f##
+defm SVFCVT_F16_F32 : SInstCvtMXZ<"svcvt_f16[_f32]", "OOPd", "OPd", "f", "aarch64_sve_fcvt_f16f32">;
+defm SVFCVT_F16_F64 : SInstCvtMXZ<"svcvt_f16[_f64]", "OOPd", "OPd", "d", "aarch64_sve_fcvt_f16f64">;
+
+// svcvt_f32_f##
+defm SVFCVT_F32_F16 : SInstCvtMXZ<"svcvt_f32[_f16]", "ddPO", "dPO", "f", "aarch64_sve_fcvt_f32f16">;
+defm SVFCVT_F32_F64 : SInstCvtMXZ<"svcvt_f32[_f64]", "MMPd", "MPd", "d", "aarch64_sve_fcvt_f32f64">;
+
+// svcvt_f64_f##
+defm SVFCVT_F64_F16 : SInstCvtMXZ<"svcvt_f64[_f16]", "ddPO", "dPO", "d", "aarch64_sve_fcvt_f64f16">;
+defm SVFCVT_F64_F32 : SInstCvtMXZ<"svcvt_f64[_f32]", "ddPM", "dPM", "d", "aarch64_sve_fcvt_f64f32">;
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+defm SVCVTLT_F32 : SInstCvtMX<"svcvtlt_f32[_f16]", "ddPh", "dPh", "f", "aarch64_sve_fcvtlt_f32f16">;
+defm SVCVTLT_F64 : SInstCvtMX<"svcvtlt_f64[_f32]", "ddPh", "dPh", "d", "aarch64_sve_fcvtlt_f64f32">;
+
+defm SVCVTX_F32 : SInstCvtMXZ<"svcvtx_f32[_f64]", "MMPd", "MPd", "d", "aarch64_sve_fcvtx_f32f64">;
+
+def SVCVTNT_F32 : SInst<"svcvtnt_f16[_f32]", "hhPd", "f", MergeOp1, "aarch64_sve_fcvtnt_f16f32", [IsOverloadNone]>;
+def SVCVTNT_F64 : SInst<"svcvtnt_f32[_f64]", "hhPd", "d", MergeOp1, "aarch64_sve_fcvtnt_f32f64", [IsOverloadNone]>;
+// SVCVTNT_X : Implemented as macro by SveEmitter.cpp
+
+def SVCVTXNT_F32 : SInst<"svcvtxnt_f32[_f64]", "MMPd", "d", MergeOp1, "aarch64_sve_fcvtxnt_f32f64", [IsOverloadNone]>;
+// SVCVTXNT_X_F32 : Implemented as macro by SveEmitter.cpp
+
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Permutations and selection
+
+multiclass SVEPerm<string name, string proto, string i> {
+ def : SInst<name, proto, "csilUcUsUiUlhfd", MergeNone, i>;
+ let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ def: SInst<name, proto, "b", MergeNone, i>;
+ }
+}
+
+defm SVCLASTA : SVEPerm<"svclasta[_{d}]", "dPdd", "aarch64_sve_clasta">;
+defm SVCLASTA_N : SVEPerm<"svclasta[_n_{d}]", "sPsd", "aarch64_sve_clasta_n">;
+defm SVCLASTB : SVEPerm<"svclastb[_{d}]", "dPdd", "aarch64_sve_clastb">;
+defm SVCLASTB_N : SVEPerm<"svclastb[_n_{d}]", "sPsd", "aarch64_sve_clastb_n">;
+
+def SVCOMPACT : SInst<"svcompact[_{d}]", "dPd", "ilUiUlfd", MergeNone, "aarch64_sve_compact">;
+// Note: svdup_lane is implemented using the intrinsic for TBL to represent a
+// splat of any possible lane. It is upto LLVM to pick a more efficient
+// instruction such as DUP (indexed) if the lane index fits the range of the
+// instruction's immediate.
+def SVDUP_LANE : SInst<"svdup_lane[_{d}]", "ddL", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tbl">;
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+def SVDUP_LANE_BF16 :
+ SInst<"svdup_lane[_{d}]", "ddL", "b", MergeNone, "aarch64_sve_tbl">;
+}
+
+def SVDUPQ_LANE : SInst<"svdupq_lane[_{d}]", "ddn", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_dupq_lane">;
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ def SVDUPQ_LANE_BF16 : SInst<"svdupq_lane[_{d}]", "ddn", "b", MergeNone, "aarch64_sve_dupq_lane">;
+}
+def SVEXT : SInst<"svext[_{d}]", "dddi", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_ext", [], [ImmCheck<2, ImmCheckExtract, 1>]>;
+defm SVLASTA : SVEPerm<"svlasta[_{d}]", "sPd", "aarch64_sve_lasta">;
+defm SVLASTB : SVEPerm<"svlastb[_{d}]", "sPd", "aarch64_sve_lastb">;
+def SVREV : SInst<"svrev[_{d}]", "dd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_rev">;
+def SVSEL : SInst<"svsel[_{d}]", "dPdd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_sel">;
+def SVSPLICE : SInst<"svsplice[_{d}]", "dPdd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_splice">;
+def SVTBL : SInst<"svtbl[_{d}]", "ddu", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tbl">;
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+ def SVTBL_BF16 : SInst<"svtbl[_{d}]", "ddu", "b", MergeNone, "aarch64_sve_tbl">;
+}
+
+def SVTRN1 : SInst<"svtrn1[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn1">;
+def SVTRN2 : SInst<"svtrn2[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn2">;
+def SVUNPKHI_S : SInst<"svunpkhi[_{d}]", "dh", "sil", MergeNone, "aarch64_sve_sunpkhi">;
+def SVUNPKHI_U : SInst<"svunpkhi[_{d}]", "dh", "UsUiUl", MergeNone, "aarch64_sve_uunpkhi">;
+def SVUNPKLO_S : SInst<"svunpklo[_{d}]", "dh", "sil", MergeNone, "aarch64_sve_sunpklo">;
+def SVUNPKLO_U : SInst<"svunpklo[_{d}]", "dh", "UsUiUl", MergeNone, "aarch64_sve_uunpklo">;
+def SVUZP1 : SInst<"svuzp1[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp1">;
+def SVUZP2 : SInst<"svuzp2[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp2">;
+def SVZIP1 : SInst<"svzip1[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip1">;
+def SVZIP2 : SInst<"svzip2[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip2">;
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+def SVEXT_BF16 : SInst<"svext[_{d}]", "dddi", "b", MergeNone, "aarch64_sve_ext", [], [ImmCheck<2, ImmCheckExtract, 1>]>;
+def SVREV_BF16 : SInst<"svrev[_{d}]", "dd", "b", MergeNone, "aarch64_sve_rev">;
+def SVSEL_BF16 : SInst<"svsel[_{d}]", "dPdd", "b", MergeNone, "aarch64_sve_sel">;
+def SVSPLICE_BF16 : SInst<"svsplice[_{d}]", "dPdd", "b", MergeNone, "aarch64_sve_splice">;
+def SVTRN1_BF16 : SInst<"svtrn1[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_trn1">;
+def SVTRN2_BF16 : SInst<"svtrn2[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_trn2">;
+def SVUZP1_BF16 : SInst<"svuzp1[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_uzp1">;
+def SVUZP2_BF16 : SInst<"svuzp2[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_uzp2">;
+def SVZIP1_BF16 : SInst<"svzip1[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_zip1">;
+def SVZIP2_BF16 : SInst<"svzip2[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_zip2">;
+}
+
+def SVREV_B : SInst<"svrev_{d}", "PP", "PcPsPiPl", MergeNone, "aarch64_sve_rev">;
+def SVSEL_B : SInst<"svsel[_b]", "PPPP", "Pc", MergeNone, "aarch64_sve_sel">;
+def SVTRN1_B : SInst<"svtrn1_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_trn1">;
+def SVTRN2_B : SInst<"svtrn2_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_trn2">;
+def SVPUNPKHI : SInst<"svunpkhi[_b]", "PP", "Pc", MergeNone, "aarch64_sve_punpkhi">;
+def SVPUNPKLO : SInst<"svunpklo[_b]", "PP", "Pc", MergeNone, "aarch64_sve_punpklo">;
+def SVUZP1_B : SInst<"svuzp1_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_uzp1">;
+def SVUZP2_B : SInst<"svuzp2_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_uzp2">;
+def SVZIP1_B : SInst<"svzip1_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_zip1">;
+def SVZIP2_B : SInst<"svzip2_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_zip2">;
+
+////////////////////////////////////////////////////////////////////////////////
+// Predicate creation
+
+def SVPFALSE : SInst<"svpfalse[_b]", "P", "", MergeNone, "", [IsOverloadNone]>;
+
+def SVPTRUE_PAT : SInst<"svptrue_pat_{d}", "PI", "PcPsPiPl", MergeNone, "aarch64_sve_ptrue">;
+def SVPTRUE : SInst<"svptrue_{d}", "P", "PcPsPiPl", MergeNone, "aarch64_sve_ptrue", [IsAppendSVALL]>;
+
+def SVDUPQ_B8 : SInst<"svdupq[_n]_{d}", "Pssssssssssssssss", "Pc", MergeNone>;
+def SVDUPQ_B16 : SInst<"svdupq[_n]_{d}", "Pssssssss", "Ps", MergeNone>;
+def SVDUPQ_B32 : SInst<"svdupq[_n]_{d}", "Pssss", "Pi", MergeNone>;
+def SVDUPQ_B64 : SInst<"svdupq[_n]_{d}", "Pss", "Pl", MergeNone>;
+def SVDUP_N_B : SInst<"svdup[_n]_{d}", "Ps", "PcPsPiPl", MergeNone>;
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Predicate operations
+
+def SVAND_B_Z : SInst<"svand[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_and_z">;
+def SVBIC_B_Z : SInst<"svbic[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_bic_z">;
+def SVEOR_B_Z : SInst<"sveor[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_eor_z">;
+def SVMOV_B_Z : SInst<"svmov[_b]_z", "PPP", "Pc", MergeNone>; // Uses custom expansion
+def SVNAND_B_Z : SInst<"svnand[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_nand_z">;
+def SVNOR_B_Z : SInst<"svnor[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_nor_z">;
+def SVNOT_B_Z : SInst<"svnot[_b]_z", "PPP", "Pc", MergeNone>; // Uses custom expansion
+def SVORN_B_Z : SInst<"svorn[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_orn_z">;
+def SVORR_B_Z : SInst<"svorr[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_orr_z">;
+
+def SVBRKA : SInst<"svbrka[_b]_m", "PPPP", "Pc", MergeNone, "aarch64_sve_brka">;
+def SVBRKA_Z : SInst<"svbrka[_b]_z", "PPP", "Pc", MergeNone, "aarch64_sve_brka_z">;
+def SVBRKB : SInst<"svbrkb[_b]_m", "PPPP", "Pc", MergeNone, "aarch64_sve_brkb">;
+def SVBRKB_Z : SInst<"svbrkb[_b]_z", "PPP", "Pc", MergeNone, "aarch64_sve_brkb_z">;
+def SVBRKN_Z : SInst<"svbrkn[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_brkn_z">;
+def SVBRKPA_Z : SInst<"svbrkpa[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_brkpa_z">;
+def SVBRKPB_Z : SInst<"svbrkpb[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_brkpb_z">;
+
+def SVPFIRST : SInst<"svpfirst[_b]", "PPP", "Pc", MergeNone, "aarch64_sve_pfirst">;
+def SVPNEXT : SInst<"svpnext_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_pnext">;
+
+////////////////////////////////////////////////////////////////////////////////
+// Testing predicates
+
+def SVPTEST_ANY : SInst<"svptest_any", "sPP", "Pc", MergeNone, "aarch64_sve_ptest_any">;
+def SVPTEST_FIRST : SInst<"svptest_first", "sPP", "Pc", MergeNone, "aarch64_sve_ptest_first">;
+def SVPTEST_LAST : SInst<"svptest_last", "sPP", "Pc", MergeNone, "aarch64_sve_ptest_last">;
+
+////////////////////////////////////////////////////////////////////////////////
+// FFR manipulation
+
+def SVRDFFR : SInst<"svrdffr", "P", "Pc", MergeNone, "", [IsOverloadNone]>;
+def SVRDFFR_Z : SInst<"svrdffr_z", "PP", "Pc", MergeNone, "", [IsOverloadNone]>;
+def SVSETFFR : SInst<"svsetffr", "v", "", MergeNone, "", [IsOverloadNone]>;
+def SVWRFFR : SInst<"svwrffr", "vP", "Pc", MergeNone, "", [IsOverloadNone]>;
+
+////////////////////////////////////////////////////////////////////////////////
+// Counting elements
+
+def SVCNTB_PAT : SInst<"svcntb_pat", "nI", "", MergeNone, "aarch64_sve_cntb", [IsOverloadNone]>;
+def SVCNTH_PAT : SInst<"svcnth_pat", "nI", "", MergeNone, "aarch64_sve_cnth", [IsOverloadNone]>;
+def SVCNTW_PAT : SInst<"svcntw_pat", "nI", "", MergeNone, "aarch64_sve_cntw", [IsOverloadNone]>;
+def SVCNTD_PAT : SInst<"svcntd_pat", "nI", "", MergeNone, "aarch64_sve_cntd", [IsOverloadNone]>;
+
+def SVCNTB : SInst<"svcntb", "n", "", MergeNone, "aarch64_sve_cntb", [IsAppendSVALL, IsOverloadNone]>;
+def SVCNTH : SInst<"svcnth", "n", "", MergeNone, "aarch64_sve_cnth", [IsAppendSVALL, IsOverloadNone]>;
+def SVCNTW : SInst<"svcntw", "n", "", MergeNone, "aarch64_sve_cntw", [IsAppendSVALL, IsOverloadNone]>;
+def SVCNTD : SInst<"svcntd", "n", "", MergeNone, "aarch64_sve_cntd", [IsAppendSVALL, IsOverloadNone]>;
+
+def SVCNTP : SInst<"svcntp_{d}", "nPP", "PcPsPiPl", MergeNone, "aarch64_sve_cntp">;
+def SVLEN : SInst<"svlen[_{d}]", "nd", "csilUcUsUiUlhfd", MergeNone>;
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+def SVLEN_BF16 : SInst<"svlen[_{d}]", "nd", "b", MergeNone>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Saturating scalar arithmetic
+
+class sat_type<string u, string t> { string U = u; string T = t; }
+def SignedByte : sat_type<"", "c">;
+def SignedHalf : sat_type<"", "s">;
+def SignedWord : sat_type<"", "i">;
+def SignedDoubleWord : sat_type<"", "l">;
+def UnsignedByte : sat_type<"U", "Uc">;
+def UnsignedHalf : sat_type<"U", "Us">;
+def UnsignedWord : sat_type<"U", "Ui">;
+def UnsignedDoubleWord : sat_type<"U", "Ul">;
+
+multiclass SInst_SAT1<string name, string intrinsic, sat_type type> {
+ def _N32 : SInst<name # "_pat[_n_{d}]", "ssIi", type.U # "i", MergeNone, intrinsic # "_n32", [IsOverloadNone], [ImmCheck<2, ImmCheck1_16>]>;
+ def _N64 : SInst<name # "_pat[_n_{d}]", "ssIi", type.U # "l", MergeNone, intrinsic # "_n64", [IsOverloadNone], [ImmCheck<2, ImmCheck1_16>]>;
+ def _N32_ALL : SInst<name # "[_n_{d}]", "ssi", type.U # "i", MergeNone, intrinsic # "_n32", [IsOverloadNone, IsInsertOp1SVALL], [ImmCheck<1, ImmCheck1_16>]>;
+ def _N64_ALL : SInst<name # "[_n_{d}]", "ssi", type.U # "l", MergeNone, intrinsic # "_n64", [IsOverloadNone, IsInsertOp1SVALL], [ImmCheck<1, ImmCheck1_16>]>;
+}
+
+multiclass SInst_SAT2<string name, string intrinsic, sat_type type> {
+ def "" : SInst<name # "_pat[_{d}]", "ddIi", type.T, MergeNone, intrinsic, [], [ImmCheck<2, ImmCheck1_16>]>;
+ def _ALL : SInst<name # "[_{d}]", "ddi", type.T, MergeNone, intrinsic, [IsInsertOp1SVALL], [ImmCheck<1, ImmCheck1_16>]>;
+
+ def _N32 : SInst<name # "_pat[_n_{d}]", "ssIi", type.U # "i", MergeNone, intrinsic # "_n32", [IsOverloadNone], [ImmCheck<2, ImmCheck1_16>]>;
+ def _N64 : SInst<name # "_pat[_n_{d}]", "ssIi", type.U # "l", MergeNone, intrinsic # "_n64", [IsOverloadNone], [ImmCheck<2, ImmCheck1_16>]>;
+ def _N32_ALL : SInst<name # "[_n_{d}]", "ssi", type.U # "i", MergeNone, intrinsic # "_n32", [IsOverloadNone, IsInsertOp1SVALL], [ImmCheck<1, ImmCheck1_16>]>;
+ def _N64_ALL : SInst<name # "[_n_{d}]", "ssi", type.U # "l", MergeNone, intrinsic # "_n64", [IsOverloadNone, IsInsertOp1SVALL], [ImmCheck<1, ImmCheck1_16>]>;
+}
+
+defm SVQDECB_S : SInst_SAT1<"svqdecb", "aarch64_sve_sqdecb", SignedByte>;
+defm SVQDECB_U : SInst_SAT1<"svqdecb", "aarch64_sve_uqdecb", UnsignedByte>;
+defm SVQDECH_S : SInst_SAT2<"svqdech", "aarch64_sve_sqdech", SignedHalf>;
+defm SVQDECH_U : SInst_SAT2<"svqdech", "aarch64_sve_uqdech", UnsignedHalf>;
+defm SVQDECW_S : SInst_SAT2<"svqdecw", "aarch64_sve_sqdecw", SignedWord>;
+defm SVQDECW_U : SInst_SAT2<"svqdecw", "aarch64_sve_uqdecw", UnsignedWord>;
+defm SVQDECD_S : SInst_SAT2<"svqdecd", "aarch64_sve_sqdecd", SignedDoubleWord>;
+defm SVQDECD_U : SInst_SAT2<"svqdecd", "aarch64_sve_uqdecd", UnsignedDoubleWord>;
+
+defm SVQINCB_S : SInst_SAT1<"svqincb", "aarch64_sve_sqincb", SignedByte>;
+defm SVQINCB_U : SInst_SAT1<"svqincb", "aarch64_sve_uqincb", UnsignedByte>;
+defm SVQINCH_S : SInst_SAT2<"svqinch", "aarch64_sve_sqinch", SignedHalf>;
+defm SVQINCH_U : SInst_SAT2<"svqinch", "aarch64_sve_uqinch", UnsignedHalf>;
+defm SVQINCW_S : SInst_SAT2<"svqincw", "aarch64_sve_sqincw", SignedWord>;
+defm SVQINCW_U : SInst_SAT2<"svqincw", "aarch64_sve_uqincw", UnsignedWord>;
+defm SVQINCD_S : SInst_SAT2<"svqincd", "aarch64_sve_sqincd", SignedDoubleWord>;
+defm SVQINCD_U : SInst_SAT2<"svqincd", "aarch64_sve_uqincd", UnsignedDoubleWord>;
+
+def SVQDECP_S : SInst<"svqdecp[_{d}]", "ddP", "sil", MergeNone, "aarch64_sve_sqdecp">;
+def SVQDECP_U : SInst<"svqdecp[_{d}]", "ddP", "UsUiUl", MergeNone, "aarch64_sve_uqdecp">;
+def SVQINCP_S : SInst<"svqincp[_{d}]", "ddP", "sil", MergeNone, "aarch64_sve_sqincp">;
+def SVQINCP_U : SInst<"svqincp[_{d}]", "ddP", "UsUiUl", MergeNone, "aarch64_sve_uqincp">;
+
+def SVQDECP_N_S32 : SInst<"svqdecp[_n_s32]_{d}", "kkP", "PcPsPiPl", MergeNone, "aarch64_sve_sqdecp_n32">;
+def SVQDECP_N_S64 : SInst<"svqdecp[_n_s64]_{d}", "llP", "PcPsPiPl", MergeNone, "aarch64_sve_sqdecp_n64">;
+def SVQDECP_N_U32 : SInst<"svqdecp[_n_u32]_{d}", "mmP", "PcPsPiPl", MergeNone, "aarch64_sve_uqdecp_n32">;
+def SVQDECP_N_U64 : SInst<"svqdecp[_n_u64]_{d}", "nnP", "PcPsPiPl", MergeNone, "aarch64_sve_uqdecp_n64">;
+def SVQINCP_N_S32 : SInst<"svqincp[_n_s32]_{d}", "kkP", "PcPsPiPl", MergeNone, "aarch64_sve_sqincp_n32">;
+def SVQINCP_N_S64 : SInst<"svqincp[_n_s64]_{d}", "llP", "PcPsPiPl", MergeNone, "aarch64_sve_sqincp_n64">;
+def SVQINCP_N_U32 : SInst<"svqincp[_n_u32]_{d}", "mmP", "PcPsPiPl", MergeNone, "aarch64_sve_uqincp_n32">;
+def SVQINCP_N_U64 : SInst<"svqincp[_n_u64]_{d}", "nnP", "PcPsPiPl", MergeNone, "aarch64_sve_uqincp_n64">;
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_INT8)" in {
+def SVMLLA_S32 : SInst<"svmmla[_s32]", "ddqq","i", MergeNone, "aarch64_sve_smmla">;
+def SVMLLA_U32 : SInst<"svmmla[_u32]", "ddqq","Ui", MergeNone, "aarch64_sve_ummla">;
+def SVUSMLLA_S32 : SInst<"svusmmla[_s32]", "ddbq","i", MergeNone, "aarch64_sve_usmmla">;
+
+def SVUSDOT_S : SInst<"svusdot[_s32]", "ddbq", "i", MergeNone, "aarch64_sve_usdot">;
+def SVUSDOT_N_S : SInst<"svusdot[_n_s32]", "ddbr", "i", MergeNone, "aarch64_sve_usdot">;
+def SVSUDOT_S : SInst<"svsudot[_s32]", "ddqb", "i", MergeNone, "aarch64_sve_usdot", [ReverseUSDOT]>;
+def SVSUDOT_N_S : SInst<"svsudot[_n_s32]", "ddq@", "i", MergeNone, "aarch64_sve_usdot", [ReverseUSDOT]>;
+
+def SVUSDOT_LANE_S : SInst<"svusdot_lane[_s32]", "ddbqi", "i", MergeNone, "aarch64_sve_usdot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
+def SVSUDOT_LANE_S : SInst<"svsudot_lane[_s32]", "ddqbi", "i", MergeNone, "aarch64_sve_sudot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_FP32)" in {
+def SVMLLA_F32 : SInst<"svmmla[_f32]", "dddd","f", MergeNone, "aarch64_sve_fmmla">;
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_FP64)" in {
+def SVMLLA_F64 : SInst<"svmmla[_f64]", "dddd","d", MergeNone, "aarch64_sve_fmmla">;
+def SVTRN1Q : SInst<"svtrn1q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn1q">;
+def SVTRN2Q : SInst<"svtrn2q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn2q">;
+def SVUZP1Q : SInst<"svuzp1q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp1q">;
+def SVUZP2Q : SInst<"svuzp2q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp2q">;
+def SVZIP1Q : SInst<"svzip1q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip1q">;
+def SVZIP2Q : SInst<"svzip2q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip2q">;
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_FP64) && defined(__ARM_FEATURE_SVE_BF16)" in {
+def SVTRN1Q_BF16 : SInst<"svtrn1q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_trn1q">;
+def SVTRN2Q_BF16 : SInst<"svtrn2q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_trn2q">;
+def SVUZP1Q_BF16 : SInst<"svuzp1q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_uzp1q">;
+def SVUZP2Q_BF16 : SInst<"svuzp2q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_uzp2q">;
+def SVZIP1Q_BF16 : SInst<"svzip1q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_zip1q">;
+def SVZIP2Q_BF16 : SInst<"svzip2q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_zip2q">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Vector creation
+def SVUNDEF_1 : SInst<"svundef_{d}", "d", "csilUcUsUiUlhfd", MergeNone, "", [IsUndef]>;
+def SVUNDEF_2 : SInst<"svundef2_{d}", "2", "csilUcUsUiUlhfd", MergeNone, "", [IsUndef]>;
+def SVUNDEF_3 : SInst<"svundef3_{d}", "3", "csilUcUsUiUlhfd", MergeNone, "", [IsUndef]>;
+def SVUNDEF_4 : SInst<"svundef4_{d}", "4", "csilUcUsUiUlhfd", MergeNone, "", [IsUndef]>;
+
+def SVCREATE_2 : SInst<"svcreate2[_{d}]", "2dd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_create2", [IsTupleCreate]>;
+def SVCREATE_3 : SInst<"svcreate3[_{d}]", "3ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_create3", [IsTupleCreate]>;
+def SVCREATE_4 : SInst<"svcreate4[_{d}]", "4dddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_create4", [IsTupleCreate]>;
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+def SVUNDEF_1_BF16 : SInst<"svundef_{d}", "d", "b", MergeNone, "", [IsUndef]>;
+def SVUNDEF_2_BF16 : SInst<"svundef2_{d}", "2", "b", MergeNone, "", [IsUndef]>;
+def SVUNDEF_3_BF16 : SInst<"svundef3_{d}", "3", "b", MergeNone, "", [IsUndef]>;
+def SVUNDEF_4_BF16 : SInst<"svundef4_{d}", "4", "b", MergeNone, "", [IsUndef]>;
+
+def SVCREATE_2_BF16 : SInst<"svcreate2[_{d}]", "2dd", "b", MergeNone, "aarch64_sve_tuple_create2", [IsTupleCreate]>;
+def SVCREATE_3_BF16 : SInst<"svcreate3[_{d}]", "3ddd", "b", MergeNone, "aarch64_sve_tuple_create3", [IsTupleCreate]>;
+def SVCREATE_4_BF16 : SInst<"svcreate4[_{d}]", "4dddd", "b", MergeNone, "aarch64_sve_tuple_create4", [IsTupleCreate]>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Vector insertion and extraction
+def SVGET_2 : SInst<"svget2[_{d}]", "d2i", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_get", [IsTupleGet], [ImmCheck<1, ImmCheck0_1>]>;
+def SVGET_3 : SInst<"svget3[_{d}]", "d3i", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_get", [IsTupleGet], [ImmCheck<1, ImmCheck0_2>]>;
+def SVGET_4 : SInst<"svget4[_{d}]", "d4i", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_get", [IsTupleGet], [ImmCheck<1, ImmCheck0_3>]>;
+
+def SVSET_2 : SInst<"svset2[_{d}]", "22id", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_set", [IsTupleSet], [ImmCheck<1, ImmCheck0_1>]>;
+def SVSET_3 : SInst<"svset3[_{d}]", "33id", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_set", [IsTupleSet], [ImmCheck<1, ImmCheck0_2>]>;
+def SVSET_4 : SInst<"svset4[_{d}]", "44id", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_set", [IsTupleSet], [ImmCheck<1, ImmCheck0_3>]>;
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+def SVGET_2_BF16 : SInst<"svget2[_{d}]", "d2i", "b", MergeNone, "aarch64_sve_tuple_get", [IsTupleGet], [ImmCheck<1, ImmCheck0_1>]>;
+def SVGET_3_BF16 : SInst<"svget3[_{d}]", "d3i", "b", MergeNone, "aarch64_sve_tuple_get", [IsTupleGet], [ImmCheck<1, ImmCheck0_2>]>;
+def SVGET_4_BF16 : SInst<"svget4[_{d}]", "d4i", "b", MergeNone, "aarch64_sve_tuple_get", [IsTupleGet], [ImmCheck<1, ImmCheck0_3>]>;
+
+def SVSET_2_BF16 : SInst<"svset2[_{d}]", "22id", "b", MergeNone, "aarch64_sve_tuple_set", [IsTupleSet], [ImmCheck<1, ImmCheck0_1>]>;
+def SVSET_3_BF16 : SInst<"svset3[_{d}]", "33id", "b", MergeNone, "aarch64_sve_tuple_set", [IsTupleSet], [ImmCheck<1, ImmCheck0_2>]>;
+def SVSET_4_BF16 : SInst<"svset4[_{d}]", "44id", "b", MergeNone, "aarch64_sve_tuple_set", [IsTupleSet], [ImmCheck<1, ImmCheck0_3>]>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 WhileGE/GT
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVWHILEGE_S32 : SInst<"svwhilege_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilege", [IsOverloadWhile]>;
+def SVWHILEGE_S64 : SInst<"svwhilege_{d}[_{1}]", "Pll", "PcPsPiPl", MergeNone, "aarch64_sve_whilege", [IsOverloadWhile]>;
+def SVWHILEGT_S32 : SInst<"svwhilegt_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilegt", [IsOverloadWhile]>;
+def SVWHILEGT_S64 : SInst<"svwhilegt_{d}[_{1}]", "Pll", "PcPsPiPl", MergeNone, "aarch64_sve_whilegt", [IsOverloadWhile]>;
+def SVWHILEHI_U32 : SInst<"svwhilegt_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehi", [IsOverloadWhile]>;
+def SVWHILEHI_U64 : SInst<"svwhilegt_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehi", [IsOverloadWhile]>;
+def SVWHILEHS_U32 : SInst<"svwhilege_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehs", [IsOverloadWhile]>;
+def SVWHILEHS_U64 : SInst<"svwhilege_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehs", [IsOverloadWhile]>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Uniform DSP operations
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+defm SVQADD_S : SInstZPZZ<"svqadd", "csli", "aarch64_sve_sqadd">;
+defm SVQADD_U : SInstZPZZ<"svqadd", "UcUsUiUl", "aarch64_sve_uqadd">;
+defm SVHADD_S : SInstZPZZ<"svhadd", "csli", "aarch64_sve_shadd">;
+defm SVHADD_U : SInstZPZZ<"svhadd", "UcUsUiUl", "aarch64_sve_uhadd">;
+defm SVRHADD_S : SInstZPZZ<"svrhadd", "csli", "aarch64_sve_srhadd">;
+defm SVRHADD_U : SInstZPZZ<"svrhadd", "UcUsUiUl", "aarch64_sve_urhadd">;
+
+defm SVQSUB_S : SInstZPZZ<"svqsub", "csli", "aarch64_sve_sqsub">;
+defm SVQSUB_U : SInstZPZZ<"svqsub", "UcUsUiUl", "aarch64_sve_uqsub">;
+defm SVQSUBR_S : SInstZPZZ<"svqsubr", "csli", "aarch64_sve_sqsubr">;
+defm SVQSUBR_U : SInstZPZZ<"svqsubr", "UcUsUiUl", "aarch64_sve_uqsubr">;
+defm SVHSUB_S : SInstZPZZ<"svhsub", "csli", "aarch64_sve_shsub">;
+defm SVHSUB_U : SInstZPZZ<"svhsub", "UcUsUiUl", "aarch64_sve_uhsub">;
+defm SVHSUBR_S : SInstZPZZ<"svhsubr", "csli", "aarch64_sve_shsubr">;
+defm SVHSUBR_U : SInstZPZZ<"svhsubr", "UcUsUiUl", "aarch64_sve_uhsubr">;
+
+defm SVQABS : SInstZPZ<"svqabs", "csil", "aarch64_sve_sqabs">;
+defm SVQNEG : SInstZPZ<"svqneg", "csil", "aarch64_sve_sqneg">;
+defm SVRECPE : SInstZPZ<"svrecpe", "Ui", "aarch64_sve_urecpe">;
+defm SVRSQRTE : SInstZPZ<"svrsqrte", "Ui", "aarch64_sve_ursqrte">;
+}
+
+//------------------------------------------------------------------------------
+
+multiclass SInstZPZxZ<string name, string types, string pat_v, string pat_n, string intrinsic, list<FlagType> flags=[]> {
+ def _M : SInst<name # "[_{d}]", pat_v, types, MergeOp1, intrinsic, flags>;
+ def _X : SInst<name # "[_{d}]", pat_v, types, MergeAny, intrinsic, flags>;
+ def _Z : SInst<name # "[_{d}]", pat_v, types, MergeZero, intrinsic, flags>;
+
+ def _N_M : SInst<name # "[_n_{d}]", pat_n, types, MergeOp1, intrinsic, flags>;
+ def _N_X : SInst<name # "[_n_{d}]", pat_n, types, MergeAny, intrinsic, flags>;
+ def _N_Z : SInst<name # "[_n_{d}]", pat_n, types, MergeZero, intrinsic, flags>;
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+defm SVQRSHL_S : SInstZPZxZ<"svqrshl", "csil", "dPdx", "dPdK", "aarch64_sve_sqrshl">;
+defm SVQRSHL_U : SInstZPZxZ<"svqrshl", "UcUsUiUl", "dPdx", "dPdK", "aarch64_sve_uqrshl">;
+defm SVQSHL_S : SInstZPZxZ<"svqshl", "csil", "dPdx", "dPdK", "aarch64_sve_sqshl">;
+defm SVQSHL_U : SInstZPZxZ<"svqshl", "UcUsUiUl", "dPdx", "dPdK", "aarch64_sve_uqshl">;
+defm SVRSHL_S : SInstZPZxZ<"svrshl", "csil", "dPdx", "dPdK", "aarch64_sve_srshl">;
+defm SVRSHL_U : SInstZPZxZ<"svrshl", "UcUsUiUl", "dPdx", "dPdK", "aarch64_sve_urshl">;
+defm SVSQADD : SInstZPZxZ<"svsqadd", "UcUsUiUl", "dPdx", "dPdK", "aarch64_sve_usqadd">;
+defm SVUQADD : SInstZPZxZ<"svuqadd", "csil", "dPdu", "dPdL", "aarch64_sve_suqadd">;
+
+def SVABA_S : SInst<"svaba[_{d}]", "dddd", "csil" , MergeNone, "aarch64_sve_saba">;
+def SVABA_U : SInst<"svaba[_{d}]", "dddd", "UcUsUiUl", MergeNone, "aarch64_sve_uaba">;
+def SVQDMULH : SInst<"svqdmulh[_{d}]", "ddd", "csil", MergeNone, "aarch64_sve_sqdmulh">;
+def SVQRDMULH : SInst<"svqrdmulh[_{d}]", "ddd", "csil", MergeNone, "aarch64_sve_sqrdmulh">;
+def SVQRDMLAH : SInst<"svqrdmlah[_{d}]", "dddd", "csil", MergeNone, "aarch64_sve_sqrdmlah">;
+def SVQRDMLSH : SInst<"svqrdmlsh[_{d}]", "dddd", "csil", MergeNone, "aarch64_sve_sqrdmlsh">;
+
+def SVABA_S_N : SInst<"svaba[_n_{d}]", "ddda", "csil", MergeNone, "aarch64_sve_saba">;
+def SVABA_U_N : SInst<"svaba[_n_{d}]", "ddda", "UcUsUiUl", MergeNone, "aarch64_sve_uaba">;
+def SVQDMULH_N : SInst<"svqdmulh[_n_{d}]", "dda", "csil", MergeNone, "aarch64_sve_sqdmulh">;
+def SVQRDMULH_N : SInst<"svqrdmulh[_n_{d}]", "dda", "csil", MergeNone, "aarch64_sve_sqrdmulh">;
+def SVQRDMLAH_N : SInst<"svqrdmlah[_n_{d}]", "ddda", "csil", MergeNone, "aarch64_sve_sqrdmlah">;
+def SVQRDMLSH_N : SInst<"svqrdmlsh[_n_{d}]", "ddda", "csil", MergeNone, "aarch64_sve_sqrdmlsh">;
+
+def SVQDMULH_LANE : SInst<"svqdmulh_lane[_{d}]", "dddi", "sil", MergeNone, "aarch64_sve_sqdmulh_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVQRDMULH_LANE : SInst<"svqrdmulh_lane[_{d}]", "dddi", "sil", MergeNone, "aarch64_sve_sqrdmulh_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVQRDMLAH_LANE : SInst<"svqrdmlah_lane[_{d}]", "ddddi", "sil", MergeNone, "aarch64_sve_sqrdmlah_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVQRDMLSH_LANE : SInst<"svqrdmlsh_lane[_{d}]", "ddddi", "sil", MergeNone, "aarch64_sve_sqrdmlsh_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+
+def SVQSHLU_M : SInst<"svqshlu[_n_{d}]", "uPdi", "csil", MergeOp1, "aarch64_sve_sqshlu", [], [ImmCheck<2, ImmCheckShiftLeft, 1>]>;
+def SVQSHLU_X : SInst<"svqshlu[_n_{d}]", "uPdi", "csil", MergeAny, "aarch64_sve_sqshlu", [], [ImmCheck<2, ImmCheckShiftLeft, 1>]>;
+def SVQSHLU_Z : SInst<"svqshlu[_n_{d}]", "uPdi", "csil", MergeZero, "aarch64_sve_sqshlu", [], [ImmCheck<2, ImmCheckShiftLeft, 1>]>;
+def SVRSHR_M_S : SInst<"svrshr[_n_{d}]", "dPdi", "csil", MergeOp1, "aarch64_sve_srshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVRSHR_M_U : SInst<"svrshr[_n_{d}]", "dPdi", "UcUsUiUl", MergeOp1, "aarch64_sve_urshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVRSHR_X_S : SInst<"svrshr[_n_{d}]", "dPdi", "csil", MergeAny, "aarch64_sve_srshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVRSHR_X_U : SInst<"svrshr[_n_{d}]", "dPdi", "UcUsUiUl", MergeAny, "aarch64_sve_urshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVRSHR_Z_S : SInst<"svrshr[_n_{d}]", "dPdi", "csil", MergeZero, "aarch64_sve_srshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVRSHR_Z_U : SInst<"svrshr[_n_{d}]", "dPdi", "UcUsUiUl", MergeZero, "aarch64_sve_urshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVRSRA_S : SInst<"svrsra[_n_{d}]", "dddi", "csil", MergeNone, "aarch64_sve_srsra", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVRSRA_U : SInst<"svrsra[_n_{d}]", "dddi", "UcUsUiUl", MergeNone, "aarch64_sve_ursra", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVSLI : SInst<"svsli[_n_{d}]", "dddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_sli", [], [ImmCheck<2, ImmCheckShiftLeft, 1>]>;
+def SVSRA_S : SInst<"svsra[_n_{d}]", "dddi", "csil", MergeNone, "aarch64_sve_ssra", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVSRA_U : SInst<"svsra[_n_{d}]", "dddi", "UcUsUiUl", MergeNone, "aarch64_sve_usra", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVSRI : SInst<"svsri[_n_{d}]", "dddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_sri", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Non-widening pairwise arithmetic
+
+multiclass SInstPairwise<string name, string types, string intrinsic, list<FlagType> flags=[]> {
+ def _M : SInst<name # "[_{d}]", "dPdd", types, MergeOp1, intrinsic, flags>;
+ def _X : SInst<name # "[_{d}]", "dPdd", types, MergeAny, intrinsic, flags>;
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+defm SVADDP : SInstPairwise<"svaddp", "csliUcUsUiUl", "aarch64_sve_addp">;
+defm SVADDP_F : SInstPairwise<"svaddp", "hfd", "aarch64_sve_faddp">;
+defm SVMAXNMP : SInstPairwise<"svmaxnmp", "hfd", "aarch64_sve_fmaxnmp">;
+defm SVMAXP_F : SInstPairwise<"svmaxp", "hfd", "aarch64_sve_fmaxp">;
+defm SVMAXP_S : SInstPairwise<"svmaxp", "csli", "aarch64_sve_smaxp">;
+defm SVMAXP_U : SInstPairwise<"svmaxp", "UcUsUiUl", "aarch64_sve_umaxp">;
+defm SVMINNMP : SInstPairwise<"svminnmp", "hfd", "aarch64_sve_fminnmp">;
+defm SVMINP_F : SInstPairwise<"svminp", "hfd", "aarch64_sve_fminp">;
+defm SVMINP_S : SInstPairwise<"svminp", "csli", "aarch64_sve_sminp">;
+defm SVMINP_U : SInstPairwise<"svminp", "UcUsUiUl", "aarch64_sve_uminp">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Widening pairwise arithmetic
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVADALP_S_M : SInst<"svadalp[_{d}]", "dPdh", "sil", MergeOp1, "aarch64_sve_sadalp">;
+def SVADALP_S_X : SInst<"svadalp[_{d}]", "dPdh", "sil", MergeAny, "aarch64_sve_sadalp">;
+def SVADALP_S_Z : SInst<"svadalp[_{d}]", "dPdh", "sil", MergeZero, "aarch64_sve_sadalp">;
+
+def SVADALP_U_M : SInst<"svadalp[_{d}]", "dPdh", "UsUiUl", MergeOp1, "aarch64_sve_uadalp">;
+def SVADALP_U_X : SInst<"svadalp[_{d}]", "dPdh", "UsUiUl", MergeAny, "aarch64_sve_uadalp">;
+def SVADALP_U_Z : SInst<"svadalp[_{d}]", "dPdh", "UsUiUl", MergeZero, "aarch64_sve_uadalp">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Bitwise ternary logical instructions
+//
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVBCAX : SInst<"svbcax[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bcax">;
+def SVBSL : SInst<"svbsl[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl">;
+def SVBSL1N : SInst<"svbsl1n[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl1n">;
+def SVBSL2N : SInst<"svbsl2n[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl2n">;
+def SVEOR3 : SInst<"sveor3[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_eor3">;
+def SVNBSL : SInst<"svnbsl[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_nbsl">;
+
+def SVBCAX_N : SInst<"svbcax[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_bcax">;
+def SVBSL_N : SInst<"svbsl[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl">;
+def SVBSL1N_N : SInst<"svbsl1n[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl1n">;
+def SVBSL2N_N : SInst<"svbsl2n[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl2n">;
+def SVEOR3_N : SInst<"sveor3[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_eor3">;
+def SVNBSL_N : SInst<"svnbsl[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_nbsl">;
+def SVXAR_N : SInst<"svxar[_n_{d}]", "dddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_xar", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Large integer arithmetic
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVADCLB : SInst<"svadclb[_{d}]", "dddd", "UiUl", MergeNone, "aarch64_sve_adclb">;
+def SVADCLT : SInst<"svadclt[_{d}]", "dddd", "UiUl", MergeNone, "aarch64_sve_adclt">;
+def SVSBCLB : SInst<"svsbclb[_{d}]", "dddd", "UiUl", MergeNone, "aarch64_sve_sbclb">;
+def SVSBCLT : SInst<"svsbclt[_{d}]", "dddd", "UiUl", MergeNone, "aarch64_sve_sbclt">;
+
+def SVADCLB_N : SInst<"svadclb[_n_{d}]", "ddda", "UiUl", MergeNone, "aarch64_sve_adclb">;
+def SVADCLT_N : SInst<"svadclt[_n_{d}]", "ddda", "UiUl", MergeNone, "aarch64_sve_adclt">;
+def SVSBCLB_N : SInst<"svsbclb[_n_{d}]", "ddda", "UiUl", MergeNone, "aarch64_sve_sbclb">;
+def SVSBCLT_N : SInst<"svsbclt[_n_{d}]", "ddda", "UiUl", MergeNone, "aarch64_sve_sbclt">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Multiplication by indexed elements
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVMLA_LANE_2 : SInst<"svmla_lane[_{d}]", "ddddi", "silUsUiUl", MergeNone, "aarch64_sve_mla_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLS_LANE_2 : SInst<"svmls_lane[_{d}]", "ddddi", "silUsUiUl", MergeNone, "aarch64_sve_mls_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMUL_LANE_2 : SInst<"svmul_lane[_{d}]", "dddi", "silUsUiUl", MergeNone, "aarch64_sve_mul_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Uniform complex integer arithmetic
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVCADD : SInst<"svcadd[_{d}]", "dddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_cadd_x", [], [ImmCheck<2, ImmCheckComplexRot90_270>]>;
+def SVSQCADD : SInst<"svqcadd[_{d}]", "dddi", "csil", MergeNone, "aarch64_sve_sqcadd_x", [], [ImmCheck<2, ImmCheckComplexRot90_270>]>;
+def SVCMLA : SInst<"svcmla[_{d}]", "ddddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmla_x", [], [ImmCheck<3, ImmCheckComplexRotAll90>]>;
+def SVCMLA_LANE_X : SInst<"svcmla_lane[_{d}]", "ddddii", "siUsUi", MergeNone, "aarch64_sve_cmla_lane_x", [], [ImmCheck<3, ImmCheckLaneIndexCompRotate, 2>,
+ ImmCheck<4, ImmCheckComplexRotAll90>]>;
+def SVSQRDCMLAH_X : SInst<"svqrdcmlah[_{d}]", "ddddi", "csil", MergeNone, "aarch64_sve_sqrdcmlah_x", [], [ImmCheck<3, ImmCheckComplexRotAll90>]>;
+def SVSQRDCMLAH_LANE_X : SInst<"svqrdcmlah_lane[_{d}]", "ddddii", "si", MergeNone, "aarch64_sve_sqrdcmlah_lane_x", [], [ImmCheck<3, ImmCheckLaneIndexCompRotate, 2>,
+ ImmCheck<4, ImmCheckComplexRotAll90>]>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Widening DSP operations
+
+multiclass SInstWideDSPAcc<string name, string types, string intrinsic> {
+ def : SInst<name # "[_{d}]", "ddhh", types, MergeNone, intrinsic>;
+ def _N : SInst<name # "[_n_{d}]", "ddhR", types, MergeNone, intrinsic>;
+}
+
+multiclass SInstWideDSPLong<string name, string types, string intrinsic> {
+ def : SInst<name # "[_{d}]", "dhh", types, MergeNone, intrinsic>;
+ def _N : SInst<name # "[_n_{d}]", "dhR", types, MergeNone, intrinsic>;
+}
+
+multiclass SInstWideDSPWide<string name, string types, string intrinsic> {
+ def : SInst<name # "[_{d}]", "ddh", types, MergeNone, intrinsic>;
+ def _N : SInst<name # "[_n_{d}]", "ddR", types, MergeNone, intrinsic>;
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+defm SVABALB_S : SInstWideDSPAcc<"svabalb", "sil", "aarch64_sve_sabalb">;
+defm SVABALB_U : SInstWideDSPAcc<"svabalb", "UsUiUl", "aarch64_sve_uabalb">;
+defm SVABALT_S : SInstWideDSPAcc<"svabalt", "sil", "aarch64_sve_sabalt">;
+defm SVABALT_U : SInstWideDSPAcc<"svabalt", "UsUiUl", "aarch64_sve_uabalt">;
+defm SVMLALB_S : SInstWideDSPAcc<"svmlalb", "sil", "aarch64_sve_smlalb">;
+defm SVMLALB_U : SInstWideDSPAcc<"svmlalb", "UsUiUl", "aarch64_sve_umlalb">;
+defm SVMLALT_S : SInstWideDSPAcc<"svmlalt", "sil", "aarch64_sve_smlalt">;
+defm SVMLALT_U : SInstWideDSPAcc<"svmlalt", "UsUiUl", "aarch64_sve_umlalt">;
+defm SVMLSLB_S : SInstWideDSPAcc<"svmlslb", "sil", "aarch64_sve_smlslb">;
+defm SVMLSLB_U : SInstWideDSPAcc<"svmlslb", "UsUiUl", "aarch64_sve_umlslb">;
+defm SVMLSLT_S : SInstWideDSPAcc<"svmlslt", "sil", "aarch64_sve_smlslt">;
+defm SVMLSLT_U : SInstWideDSPAcc<"svmlslt", "UsUiUl", "aarch64_sve_umlslt">;
+defm SVQDMLALB : SInstWideDSPAcc<"svqdmlalb", "sil", "aarch64_sve_sqdmlalb">;
+defm SVQDMLALT : SInstWideDSPAcc<"svqdmlalt", "sil", "aarch64_sve_sqdmlalt">;
+defm SVQDMLSLB : SInstWideDSPAcc<"svqdmlslb", "sil", "aarch64_sve_sqdmlslb">;
+defm SVQDMLSLT : SInstWideDSPAcc<"svqdmlslt", "sil", "aarch64_sve_sqdmlslt">;
+
+defm SVABDLB_S : SInstWideDSPLong<"svabdlb", "sil", "aarch64_sve_sabdlb">;
+defm SVABDLB_U : SInstWideDSPLong<"svabdlb", "UsUiUl", "aarch64_sve_uabdlb">;
+defm SVABDLT_S : SInstWideDSPLong<"svabdlt", "sil", "aarch64_sve_sabdlt">;
+defm SVABDLT_U : SInstWideDSPLong<"svabdlt", "UsUiUl", "aarch64_sve_uabdlt">;
+defm SVADDLB_S : SInstWideDSPLong<"svaddlb", "sil", "aarch64_sve_saddlb">;
+defm SVADDLB_U : SInstWideDSPLong<"svaddlb", "UsUiUl", "aarch64_sve_uaddlb">;
+defm SVADDLT_S : SInstWideDSPLong<"svaddlt", "sil", "aarch64_sve_saddlt">;
+defm SVADDLT_U : SInstWideDSPLong<"svaddlt", "UsUiUl", "aarch64_sve_uaddlt">;
+defm SVMULLB_S : SInstWideDSPLong<"svmullb", "sil", "aarch64_sve_smullb">;
+defm SVMULLB_U : SInstWideDSPLong<"svmullb", "UsUiUl", "aarch64_sve_umullb">;
+defm SVMULLT_S : SInstWideDSPLong<"svmullt", "sil", "aarch64_sve_smullt">;
+defm SVMULLT_U : SInstWideDSPLong<"svmullt", "UsUiUl", "aarch64_sve_umullt">;
+defm SVQDMULLB : SInstWideDSPLong<"svqdmullb", "sil", "aarch64_sve_sqdmullb">;
+defm SVQDMULLT : SInstWideDSPLong<"svqdmullt", "sil", "aarch64_sve_sqdmullt">;
+defm SVSUBLB_S : SInstWideDSPLong<"svsublb", "sil", "aarch64_sve_ssublb">;
+defm SVSUBLB_U : SInstWideDSPLong<"svsublb", "UsUiUl", "aarch64_sve_usublb">;
+defm SVSUBLT_S : SInstWideDSPLong<"svsublt", "sil", "aarch64_sve_ssublt">;
+defm SVSUBLT_U : SInstWideDSPLong<"svsublt", "UsUiUl", "aarch64_sve_usublt">;
+
+defm SVADDWB_S : SInstWideDSPWide<"svaddwb", "sil", "aarch64_sve_saddwb">;
+defm SVADDWB_U : SInstWideDSPWide<"svaddwb", "UsUiUl", "aarch64_sve_uaddwb">;
+defm SVADDWT_S : SInstWideDSPWide<"svaddwt", "sil", "aarch64_sve_saddwt">;
+defm SVADDWT_U : SInstWideDSPWide<"svaddwt", "UsUiUl", "aarch64_sve_uaddwt">;
+defm SVSUBWB_S : SInstWideDSPWide<"svsubwb", "sil", "aarch64_sve_ssubwb">;
+defm SVSUBWB_U : SInstWideDSPWide<"svsubwb", "UsUiUl", "aarch64_sve_usubwb">;
+defm SVSUBWT_S : SInstWideDSPWide<"svsubwt", "sil", "aarch64_sve_ssubwt">;
+defm SVSUBWT_U : SInstWideDSPWide<"svsubwt", "UsUiUl", "aarch64_sve_usubwt">;
+
+def SVSHLLB_S_N : SInst<"svshllb[_n_{d}]", "dhi", "sil", MergeNone, "aarch64_sve_sshllb", [], [ImmCheck<1, ImmCheckShiftLeft, 0>]>;
+def SVSHLLB_U_N : SInst<"svshllb[_n_{d}]", "dhi", "UsUiUl", MergeNone, "aarch64_sve_ushllb", [], [ImmCheck<1, ImmCheckShiftLeft, 0>]>;
+def SVSHLLT_S_N : SInst<"svshllt[_n_{d}]", "dhi", "sil", MergeNone, "aarch64_sve_sshllt", [], [ImmCheck<1, ImmCheckShiftLeft, 0>]>;
+def SVSHLLT_U_N : SInst<"svshllt[_n_{d}]", "dhi", "UsUiUl", MergeNone, "aarch64_sve_ushllt", [], [ImmCheck<1, ImmCheckShiftLeft, 0>]>;
+
+def SVMOVLB_S_N : SInst<"svmovlb[_{d}]", "dh", "sil", MergeNone>;
+def SVMOVLB_U_N : SInst<"svmovlb[_{d}]", "dh", "UsUiUl", MergeNone>;
+def SVMOVLT_S_N : SInst<"svmovlt[_{d}]", "dh", "sil", MergeNone>;
+def SVMOVLT_U_N : SInst<"svmovlt[_{d}]", "dh", "UsUiUl", MergeNone>;
+
+def SVMLALB_S_LANE : SInst<"svmlalb_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_smlalb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLALB_U_LANE : SInst<"svmlalb_lane[_{d}]", "ddhhi", "UiUl", MergeNone, "aarch64_sve_umlalb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLALT_S_LANE : SInst<"svmlalt_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_smlalt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLALT_U_LANE : SInst<"svmlalt_lane[_{d}]", "ddhhi", "UiUl", MergeNone, "aarch64_sve_umlalt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLSLB_S_LANE : SInst<"svmlslb_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_smlslb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLSLB_U_LANE : SInst<"svmlslb_lane[_{d}]", "ddhhi", "UiUl", MergeNone, "aarch64_sve_umlslb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLSLT_S_LANE : SInst<"svmlslt_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_smlslt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLSLT_U_LANE : SInst<"svmlslt_lane[_{d}]", "ddhhi", "UiUl", MergeNone, "aarch64_sve_umlslt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMULLB_S_LANE : SInst<"svmullb_lane[_{d}]", "dhhi", "il", MergeNone, "aarch64_sve_smullb_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVMULLB_U_LANE : SInst<"svmullb_lane[_{d}]", "dhhi", "UiUl", MergeNone, "aarch64_sve_umullb_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVMULLT_S_LANE : SInst<"svmullt_lane[_{d}]", "dhhi", "il", MergeNone, "aarch64_sve_smullt_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVMULLT_U_LANE : SInst<"svmullt_lane[_{d}]", "dhhi", "UiUl", MergeNone, "aarch64_sve_umullt_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVQDMLALB_LANE : SInst<"svqdmlalb_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_sqdmlalb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVQDMLALT_LANE : SInst<"svqdmlalt_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_sqdmlalt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVQDMLSLB_LANE : SInst<"svqdmlslb_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_sqdmlslb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVQDMLSLT_LANE : SInst<"svqdmlslt_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_sqdmlslt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVQDMULLB_LANE : SInst<"svqdmullb_lane[_{d}]", "dhhi", "il", MergeNone, "aarch64_sve_sqdmullb_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVQDMULLT_LANE : SInst<"svqdmullt_lane[_{d}]", "dhhi", "il", MergeNone, "aarch64_sve_sqdmullt_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Narrowing DSP operations
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVADDHNB : SInst<"svaddhnb[_{d}]", "hdd", "silUsUiUl", MergeNone, "aarch64_sve_addhnb">;
+def SVADDHNT : SInst<"svaddhnt[_{d}]", "hhdd", "silUsUiUl", MergeNone, "aarch64_sve_addhnt">;
+def SVRADDHNB : SInst<"svraddhnb[_{d}]", "hdd", "silUsUiUl", MergeNone, "aarch64_sve_raddhnb">;
+def SVRADDHNT : SInst<"svraddhnt[_{d}]", "hhdd", "silUsUiUl", MergeNone, "aarch64_sve_raddhnt">;
+def SVRSUBHNB : SInst<"svrsubhnb[_{d}]", "hdd", "silUsUiUl", MergeNone, "aarch64_sve_rsubhnb">;
+def SVRSUBHNT : SInst<"svrsubhnt[_{d}]", "hhdd", "silUsUiUl", MergeNone, "aarch64_sve_rsubhnt">;
+def SVSUBHNB : SInst<"svsubhnb[_{d}]", "hdd", "silUsUiUl", MergeNone, "aarch64_sve_subhnb">;
+def SVSUBHNT : SInst<"svsubhnt[_{d}]", "hhdd", "silUsUiUl", MergeNone, "aarch64_sve_subhnt">;
+
+def SVADDHNB_N : SInst<"svaddhnb[_n_{d}]", "hda", "silUsUiUl", MergeNone, "aarch64_sve_addhnb">;
+def SVADDHNT_N : SInst<"svaddhnt[_n_{d}]", "hhda", "silUsUiUl", MergeNone, "aarch64_sve_addhnt">;
+def SVRADDHNB_N : SInst<"svraddhnb[_n_{d}]", "hda", "silUsUiUl", MergeNone, "aarch64_sve_raddhnb">;
+def SVRADDHNT_N : SInst<"svraddhnt[_n_{d}]", "hhda", "silUsUiUl", MergeNone, "aarch64_sve_raddhnt">;
+def SVRSUBHNB_N : SInst<"svrsubhnb[_n_{d}]", "hda", "silUsUiUl", MergeNone, "aarch64_sve_rsubhnb">;
+def SVRSUBHNT_N : SInst<"svrsubhnt[_n_{d}]", "hhda", "silUsUiUl", MergeNone, "aarch64_sve_rsubhnt">;
+def SVSUBHNB_N : SInst<"svsubhnb[_n_{d}]", "hda", "silUsUiUl", MergeNone, "aarch64_sve_subhnb">;
+def SVSUBHNT_N : SInst<"svsubhnt[_n_{d}]", "hhda", "silUsUiUl", MergeNone, "aarch64_sve_subhnt">;
+
+def SVSHRNB : SInst<"svshrnb[_n_{d}]", "hdi", "silUsUiUl", MergeNone, "aarch64_sve_shrnb", [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
+def SVRSHRNB : SInst<"svrshrnb[_n_{d}]", "hdi", "silUsUiUl", MergeNone, "aarch64_sve_rshrnb", [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
+def SVQSHRUNB : SInst<"svqshrunb[_n_{d}]", "edi", "sil", MergeNone, "aarch64_sve_sqshrunb", [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
+def SVQRSHRUNB : SInst<"svqrshrunb[_n_{d}]", "edi", "sil", MergeNone, "aarch64_sve_sqrshrunb", [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
+def SVQSHRNB_S : SInst<"svqshrnb[_n_{d}]", "hdi", "sil", MergeNone, "aarch64_sve_sqshrnb", [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
+def SVQSHRNB_U : SInst<"svqshrnb[_n_{d}]", "hdi", "UsUiUl", MergeNone, "aarch64_sve_uqshrnb", [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
+def SVQRSHRNB_S : SInst<"svqrshrnb[_n_{d}]", "hdi", "sil", MergeNone, "aarch64_sve_sqrshrnb", [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
+def SVQRSHRNB_U : SInst<"svqrshrnb[_n_{d}]", "hdi", "UsUiUl", MergeNone, "aarch64_sve_uqrshrnb", [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
+
+def SVSHRNT : SInst<"svshrnt[_n_{d}]", "hhdi", "silUsUiUl", MergeNone, "aarch64_sve_shrnt", [], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
+def SVRSHRNT : SInst<"svrshrnt[_n_{d}]", "hhdi", "silUsUiUl", MergeNone, "aarch64_sve_rshrnt", [], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
+def SVQSHRUNT : SInst<"svqshrunt[_n_{d}]", "eedi", "sil", MergeNone, "aarch64_sve_sqshrunt", [], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
+def SVQRSHRUNT : SInst<"svqrshrunt[_n_{d}]", "eedi", "sil", MergeNone, "aarch64_sve_sqrshrunt", [], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
+def SVQSHRNT_S : SInst<"svqshrnt[_n_{d}]", "hhdi", "sil", MergeNone, "aarch64_sve_sqshrnt", [], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
+def SVQSHRNT_U : SInst<"svqshrnt[_n_{d}]", "hhdi", "UsUiUl", MergeNone, "aarch64_sve_uqshrnt", [], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
+def SVQRSHRNT_S : SInst<"svqrshrnt[_n_{d}]", "hhdi", "sil", MergeNone, "aarch64_sve_sqrshrnt", [], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
+def SVQRSHRNT_U : SInst<"svqrshrnt[_n_{d}]", "hhdi", "UsUiUl", MergeNone, "aarch64_sve_uqrshrnt", [], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
+}
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Unary narrowing operations
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVQXTNB_S : SInst<"svqxtnb[_{d}]", "hd", "sil", MergeNone, "aarch64_sve_sqxtnb">;
+def SVQXTNB_U : SInst<"svqxtnb[_{d}]", "hd", "UsUiUl", MergeNone, "aarch64_sve_uqxtnb">;
+def SVQXTUNB_S : SInst<"svqxtunb[_{d}]", "ed", "sil", MergeNone, "aarch64_sve_sqxtunb">;
+
+def SVQXTNT_S : SInst<"svqxtnt[_{d}]", "hhd", "sil", MergeNone, "aarch64_sve_sqxtnt">;
+def SVQXTNT_U : SInst<"svqxtnt[_{d}]", "hhd", "UsUiUl", MergeNone, "aarch64_sve_uqxtnt">;
+def SVQXTUNT_S : SInst<"svqxtunt[_{d}]", "eed", "sil", MergeNone, "aarch64_sve_sqxtunt">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Widening complex integer arithmetic
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+defm SVADDLBT : SInstWideDSPLong<"svaddlbt", "sil", "aarch64_sve_saddlbt">;
+defm SVSUBLBT : SInstWideDSPLong<"svsublbt", "sil", "aarch64_sve_ssublbt">;
+defm SVSUBLTB : SInstWideDSPLong<"svsubltb", "sil", "aarch64_sve_ssubltb">;
+
+defm SVQDMLALBT : SInstWideDSPAcc<"svqdmlalbt", "sil", "aarch64_sve_sqdmlalbt">;
+defm SVQDMLSLBT : SInstWideDSPAcc<"svqdmlslbt", "sil", "aarch64_sve_sqdmlslbt">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Non-temporal gather/scatter
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+// Non-temporal gather load one vector (vector base)
+def SVLDNT1_GATHER_BASES_U : MInst<"svldnt1_gather[_{2}base]_{0}", "dPu", "ilUiUlfd", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ldnt1_gather_scalar_offset">;
+def SVLDNT1SB_GATHER_BASES_U : MInst<"svldnt1sb_gather[_{2}base]_{0}", "dPu", "ilUiUl", [IsGatherLoad], MemEltTyInt8, "aarch64_sve_ldnt1_gather_scalar_offset">;
+def SVLDNT1UB_GATHER_BASES_U : MInst<"svldnt1ub_gather[_{2}base]_{0}", "dPu", "ilUiUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldnt1_gather_scalar_offset">;
+def SVLDNT1SH_GATHER_BASES_U : MInst<"svldnt1sh_gather[_{2}base]_{0}", "dPu", "ilUiUl", [IsGatherLoad], MemEltTyInt16, "aarch64_sve_ldnt1_gather_scalar_offset">;
+def SVLDNT1UH_GATHER_BASES_U : MInst<"svldnt1uh_gather[_{2}base]_{0}", "dPu", "ilUiUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldnt1_gather_scalar_offset">;
+def SVLDNT1SW_GATHER_BASES_U : MInst<"svldnt1sw_gather[_{2}base]_{0}", "dPu", "lUl", [IsGatherLoad], MemEltTyInt32, "aarch64_sve_ldnt1_gather_scalar_offset">;
+def SVLDNT1UW_GATHER_BASES_U : MInst<"svldnt1uw_gather[_{2}base]_{0}", "dPu", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldnt1_gather_scalar_offset">;
+
+// Non-temporal gather load one vector (scalar base, signed vector offset in bytes)
+def SVLDNT1_GATHER_64B_OFFSETS_S : MInst<"svldnt1_gather_[{3}]offset[_{0}]", "dPcx", "lUld", [IsGatherLoad, IsByteIndexed], MemEltTyDefault, "aarch64_sve_ldnt1_gather">;
+def SVLDNT1SB_GATHER_64B_OFFSETS_S : MInst<"svldnt1sb_gather_[{3}]offset_{0}", "dPSx", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt8, "aarch64_sve_ldnt1_gather">;
+def SVLDNT1UB_GATHER_64B_OFFSETS_S : MInst<"svldnt1ub_gather_[{3}]offset_{0}", "dPWx", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldnt1_gather">;
+def SVLDNT1SH_GATHER_64B_OFFSETS_S : MInst<"svldnt1sh_gather_[{3}]offset_{0}", "dPTx", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt16, "aarch64_sve_ldnt1_gather">;
+def SVLDNT1UH_GATHER_64B_OFFSETS_S : MInst<"svldnt1uh_gather_[{3}]offset_{0}", "dPXx", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldnt1_gather">;
+def SVLDNT1SW_GATHER_64B_OFFSETS_S : MInst<"svldnt1sw_gather_[{3}]offset_{0}", "dPUx", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt32, "aarch64_sve_ldnt1_gather">;
+def SVLDNT1UW_GATHER_64B_OFFSETS_S : MInst<"svldnt1uw_gather_[{3}]offset_{0}", "dPYx", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldnt1_gather">;
+
+// Non-temporal gather load one vector (scalar base, unsigned vector offset in bytes)
+def SVLDNT1_GATHER_64B_OFFSETS_U : MInst<"svldnt1_gather_[{3}]offset[_{0}]", "dPcu", "lUld", [IsGatherLoad, IsByteIndexed], MemEltTyDefault, "aarch64_sve_ldnt1_gather">;
+def SVLDNT1SB_GATHER_64B_OFFSETS_U : MInst<"svldnt1sb_gather_[{3}]offset_{0}", "dPSu", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt8, "aarch64_sve_ldnt1_gather">;
+def SVLDNT1UB_GATHER_64B_OFFSETS_U : MInst<"svldnt1ub_gather_[{3}]offset_{0}", "dPWu", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldnt1_gather">;
+def SVLDNT1SH_GATHER_64B_OFFSETS_U : MInst<"svldnt1sh_gather_[{3}]offset_{0}", "dPTu", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt16, "aarch64_sve_ldnt1_gather">;
+def SVLDNT1UH_GATHER_64B_OFFSETS_U : MInst<"svldnt1uh_gather_[{3}]offset_{0}", "dPXu", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldnt1_gather">;
+def SVLDNT1SW_GATHER_64B_OFFSETS_U : MInst<"svldnt1sw_gather_[{3}]offset_{0}", "dPUu", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt32, "aarch64_sve_ldnt1_gather">;
+def SVLDNT1UW_GATHER_64B_OFFSETS_U : MInst<"svldnt1uw_gather_[{3}]offset_{0}", "dPYu", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldnt1_gather">;
+
+def SVLDNT1_GATHER_32B_OFFSETS_U : MInst<"svldnt1_gather_[{3}]offset[_{0}]", "dPcu", "iUif", [IsGatherLoad, IsByteIndexed], MemEltTyDefault, "aarch64_sve_ldnt1_gather_uxtw">;
+def SVLDNT1SB_GATHER_32B_OFFSETS_U : MInst<"svldnt1sb_gather_[{3}]offset_{0}", "dPSu", "iUi", [IsGatherLoad, IsByteIndexed], MemEltTyInt8, "aarch64_sve_ldnt1_gather_uxtw">;
+def SVLDNT1UB_GATHER_32B_OFFSETS_U : MInst<"svldnt1ub_gather_[{3}]offset_{0}", "dPWu", "iUi", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldnt1_gather_uxtw">;
+def SVLDNT1SH_GATHER_32B_OFFSETS_U : MInst<"svldnt1sh_gather_[{3}]offset_{0}", "dPTu", "iUi", [IsGatherLoad, IsByteIndexed], MemEltTyInt16, "aarch64_sve_ldnt1_gather_uxtw">;
+def SVLDNT1UH_GATHER_32B_OFFSETS_U : MInst<"svldnt1uh_gather_[{3}]offset_{0}", "dPXu", "iUi", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldnt1_gather_uxtw">;
+
+// Non-temporal gather load one vector (vector base, scalar offset in bytes)
+def SVLDNT1_GATHER_OFFSET_S : MInst<"svldnt1_gather[_{2}base]_offset_{0}", "dPul", "ilUiUlfd", [IsGatherLoad, IsByteIndexed], MemEltTyDefault, "aarch64_sve_ldnt1_gather_scalar_offset">;
+def SVLDNT1SB_GATHER_OFFSET_S : MInst<"svldnt1sb_gather[_{2}base]_offset_{0}", "dPul", "ilUiUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt8, "aarch64_sve_ldnt1_gather_scalar_offset">;
+def SVLDNT1UB_GATHER_OFFSET_S : MInst<"svldnt1ub_gather[_{2}base]_offset_{0}", "dPul", "ilUiUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldnt1_gather_scalar_offset">;
+def SVLDNT1SH_GATHER_OFFSET_S : MInst<"svldnt1sh_gather[_{2}base]_offset_{0}", "dPul", "ilUiUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt16, "aarch64_sve_ldnt1_gather_scalar_offset">;
+def SVLDNT1UH_GATHER_OFFSET_S : MInst<"svldnt1uh_gather[_{2}base]_offset_{0}", "dPul", "ilUiUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldnt1_gather_scalar_offset">;
+def SVLDNT1SW_GATHER_OFFSET_S : MInst<"svldnt1sw_gather[_{2}base]_offset_{0}", "dPul", "lUl", [IsGatherLoad, IsByteIndexed], MemEltTyInt32, "aarch64_sve_ldnt1_gather_scalar_offset">;
+def SVLDNT1UW_GATHER_OFFSET_S : MInst<"svldnt1uw_gather[_{2}base]_offset_{0}", "dPul", "lUl", [IsGatherLoad, IsByteIndexed, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldnt1_gather_scalar_offset">;
+
+// Non-temporal gather load one vector (scalar base, signed vector index)
+def SVLDNT1_GATHER_64B_INDICES_S : MInst<"svldnt1_gather_[{3}]index[_{0}]", "dPcx", "lUld", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ldnt1_gather_index">;
+def SVLDNT1SH_GATHER_64B_INDICES_S : MInst<"svldnt1sh_gather_[{3}]index_{0}", "dPTx", "lUl", [IsGatherLoad], MemEltTyInt16, "aarch64_sve_ldnt1_gather_index">;
+def SVLDNT1UH_GATHER_64B_INDICES_S : MInst<"svldnt1uh_gather_[{3}]index_{0}", "dPXx", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldnt1_gather_index">;
+def SVLDNT1SW_GATHER_64B_INDICES_S : MInst<"svldnt1sw_gather_[{3}]index_{0}", "dPUx", "lUl", [IsGatherLoad], MemEltTyInt32, "aarch64_sve_ldnt1_gather_index">;
+def SVLDNT1UW_GATHER_64B_INDICES_S : MInst<"svldnt1uw_gather_[{3}]index_{0}", "dPYx", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldnt1_gather_index">;
+
+// Non temporal gather load one vector (scalar base, unsigned vector index)
+def SVLDNT1_GATHER_64B_INDICES_U : MInst<"svldnt1_gather_[{3}]index[_{0}]", "dPcu", "lUld", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ldnt1_gather_index">;
+def SVLDNT1SH_GATHER_64B_INDICES_U : MInst<"svldnt1sh_gather_[{3}]index_{0}", "dPTu", "lUl", [IsGatherLoad], MemEltTyInt16, "aarch64_sve_ldnt1_gather_index">;
+def SVLDNT1UH_GATHER_64B_INDICES_U : MInst<"svldnt1uh_gather_[{3}]index_{0}", "dPXu", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldnt1_gather_index">;
+def SVLDNT1SW_GATHER_64B_INDICES_U : MInst<"svldnt1sw_gather_[{3}]index_{0}", "dPUu", "lUl", [IsGatherLoad], MemEltTyInt32, "aarch64_sve_ldnt1_gather_index">;
+def SVLDNT1UW_GATHER_64B_INDICES_U : MInst<"svldnt1uw_gather_[{3}]index_{0}", "dPYu", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldnt1_gather_index">;
+
+// Non-temporal gather load one vector (vector base, signed scalar index)
+def SVLDNT1_GATHER_INDEX_S : MInst<"svldnt1_gather[_{2}base]_index_{0}", "dPul", "ilUiUlfd", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ldnt1_gather_scalar_offset">;
+def SVLDNT1SH_GATHER_INDEX_S : MInst<"svldnt1sh_gather[_{2}base]_index_{0}", "dPul", "ilUiUl", [IsGatherLoad], MemEltTyInt16, "aarch64_sve_ldnt1_gather_scalar_offset">;
+def SVLDNT1UH_GATHER_INDEX_S : MInst<"svldnt1uh_gather[_{2}base]_index_{0}", "dPul", "ilUiUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ldnt1_gather_scalar_offset">;
+def SVLDNT1SW_GATHER_INDEX_S : MInst<"svldnt1sw_gather[_{2}base]_index_{0}", "dPul", "lUl", [IsGatherLoad], MemEltTyInt32, "aarch64_sve_ldnt1_gather_scalar_offset">;
+def SVLDNT1UW_GATHER_INDEX_S : MInst<"svldnt1uw_gather[_{2}base]_index_{0}", "dPul", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldnt1_gather_scalar_offset">;
+
+// Non-temporal scatter store one vector (vector base)
+def SVSTNT1_SCATTER_BASES_U : MInst<"svstnt1_scatter[_{2}base_{d}]", "vPud", "ilUiUlfd", [IsScatterStore], MemEltTyDefault, "aarch64_sve_stnt1_scatter_scalar_offset">;
+def SVSTNT1B_SCATTER_BASES_U : MInst<"svstnt1b_scatter[_{2}base_{d}]", "vPud", "ilUiUl", [IsScatterStore], MemEltTyInt8, "aarch64_sve_stnt1_scatter_scalar_offset">;
+def SVSTNT1H_SCATTER_BASES_U : MInst<"svstnt1h_scatter[_{2}base_{d}]", "vPud", "ilUiUl", [IsScatterStore], MemEltTyInt16, "aarch64_sve_stnt1_scatter_scalar_offset">;
+def SVSTNT1W_SCATTER_BASES_U : MInst<"svstnt1w_scatter[_{2}base_{d}]", "vPud", "lUl", [IsScatterStore], MemEltTyInt32, "aarch64_sve_stnt1_scatter_scalar_offset">;
+
+// Non-temporal scatter store one vector (scalar base, signed vector offset in bytes)
+def SVSTNT1_SCATTER_64B_OFFSETS_S : MInst<"svstnt1_scatter_[{3}]offset[_{d}]", "vPpxd", "lUld", [IsScatterStore, IsByteIndexed], MemEltTyDefault, "aarch64_sve_stnt1_scatter">;
+def SVSTNT1B_SCATTER_64B_OFFSETS_SS : MInst<"svstnt1b_scatter_[{3}]offset[_{d}]", "vPAxd", "l", [IsScatterStore, IsByteIndexed], MemEltTyInt8, "aarch64_sve_stnt1_scatter">;
+def SVSTNT1B_SCATTER_64B_OFFSETS_SU : MInst<"svstnt1b_scatter_[{3}]offset[_{d}]", "vPExd", "Ul", [IsScatterStore, IsByteIndexed], MemEltTyInt8, "aarch64_sve_stnt1_scatter">;
+def SVSTNT1H_SCATTER_64B_OFFSETS_SS : MInst<"svstnt1h_scatter_[{3}]offset[_{d}]", "vPBxd", "l", [IsScatterStore, IsByteIndexed], MemEltTyInt16, "aarch64_sve_stnt1_scatter">;
+def SVSTNT1H_SCATTER_64B_OFFSETS_SU : MInst<"svstnt1h_scatter_[{3}]offset[_{d}]", "vPFxd", "Ul", [IsScatterStore, IsByteIndexed], MemEltTyInt16, "aarch64_sve_stnt1_scatter">;
+def SVSTNT1W_SCATTER_64B_OFFSETS_SS : MInst<"svstnt1w_scatter_[{3}]offset[_{d}]", "vPCxd", "l", [IsScatterStore, IsByteIndexed], MemEltTyInt32, "aarch64_sve_stnt1_scatter">;
+def SVSTNT1W_SCATTER_64B_OFFSETS_SU : MInst<"svstnt1w_scatter_[{3}]offset[_{d}]", "vPGxd", "Ul", [IsScatterStore, IsByteIndexed], MemEltTyInt32, "aarch64_sve_stnt1_scatter">;
+
+// Non-temporal scatter store one vector (scalar base, unsigned vector offset in bytes)
+def SVSTNT1_SCATTER_64B_OFFSETS_U : MInst<"svstnt1_scatter_[{3}]offset[_{d}]", "vPpud", "lUld", [IsScatterStore, IsByteIndexed], MemEltTyDefault, "aarch64_sve_stnt1_scatter">;
+def SVSTNT1B_SCATTER_64B_OFFSETS_US : MInst<"svstnt1b_scatter_[{3}]offset[_{d}]", "vPAud", "l", [IsScatterStore, IsByteIndexed], MemEltTyInt8, "aarch64_sve_stnt1_scatter">;
+def SVSTNT1B_SCATTER_64B_OFFSETS_UU : MInst<"svstnt1b_scatter_[{3}]offset[_{d}]", "vPEud", "Ul", [IsScatterStore, IsByteIndexed], MemEltTyInt8, "aarch64_sve_stnt1_scatter">;
+def SVSTNT1H_SCATTER_64B_OFFSETS_US : MInst<"svstnt1h_scatter_[{3}]offset[_{d}]", "vPBud", "l", [IsScatterStore, IsByteIndexed], MemEltTyInt16, "aarch64_sve_stnt1_scatter">;
+def SVSTNT1H_SCATTER_64B_OFFSETS_UU : MInst<"svstnt1h_scatter_[{3}]offset[_{d}]", "vPFud", "Ul", [IsScatterStore, IsByteIndexed], MemEltTyInt16, "aarch64_sve_stnt1_scatter">;
+def SVSTNT1W_SCATTER_64B_OFFSETS_US : MInst<"svstnt1w_scatter_[{3}]offset[_{d}]", "vPCud", "l", [IsScatterStore, IsByteIndexed], MemEltTyInt32, "aarch64_sve_stnt1_scatter">;
+def SVSTNT1W_SCATTER_64B_OFFSETS_UU : MInst<"svstnt1w_scatter_[{3}]offset[_{d}]", "vPGud", "Ul", [IsScatterStore, IsByteIndexed], MemEltTyInt32, "aarch64_sve_stnt1_scatter">;
+
+def SVSTNT1_SCATTER_32B_OFFSETS_U : MInst<"svstnt1_scatter_[{3}]offset[_{d}]", "vPpud", "iUif", [IsScatterStore, IsByteIndexed], MemEltTyDefault, "aarch64_sve_stnt1_scatter_uxtw">;
+def SVSTNT1B_SCATTER_32B_OFFSETS_US : MInst<"svstnt1b_scatter_[{3}]offset[_{d}]", "vPAud", "i", [IsScatterStore, IsByteIndexed], MemEltTyInt8, "aarch64_sve_stnt1_scatter_uxtw">;
+def SVSTNT1B_SCATTER_32B_OFFSETS_UU : MInst<"svstnt1b_scatter_[{3}]offset[_{d}]", "vPEud", "Ui", [IsScatterStore, IsByteIndexed], MemEltTyInt8, "aarch64_sve_stnt1_scatter_uxtw">;
+def SVSTNT1H_SCATTER_32B_OFFSETS_US : MInst<"svstnt1h_scatter_[{3}]offset[_{d}]", "vPBud", "i", [IsScatterStore, IsByteIndexed], MemEltTyInt16, "aarch64_sve_stnt1_scatter_uxtw">;
+def SVSTNT1H_SCATTER_32B_OFFSETS_UU : MInst<"svstnt1h_scatter_[{3}]offset[_{d}]", "vPFud", "Ui", [IsScatterStore, IsByteIndexed], MemEltTyInt16, "aarch64_sve_stnt1_scatter_uxtw">;
+
+// Non-temporal scatter store one vector (vector base, scalar offset in bytes)
+def SVSTNT1_SCATTER_OFFSET_S : MInst<"svstnt1_scatter[_{2}base]_offset[_{d}]", "vPuld", "ilUiUlfd", [IsScatterStore, IsByteIndexed], MemEltTyDefault, "aarch64_sve_stnt1_scatter_scalar_offset">;
+def SVSTNT1B_SCATTER_OFFSET_S : MInst<"svstnt1b_scatter[_{2}base]_offset[_{d}]", "vPuld", "ilUiUl", [IsScatterStore, IsByteIndexed], MemEltTyInt8, "aarch64_sve_stnt1_scatter_scalar_offset">;
+def SVSTNT1H_SCATTER_OFFSET_S : MInst<"svstnt1h_scatter[_{2}base]_offset[_{d}]", "vPuld", "ilUiUl", [IsScatterStore, IsByteIndexed], MemEltTyInt16, "aarch64_sve_stnt1_scatter_scalar_offset">;
+def SVSTNT1W_SCATTER_OFFSET_S : MInst<"svstnt1w_scatter[_{2}base]_offset[_{d}]", "vPuld", "lUl", [IsScatterStore, IsByteIndexed], MemEltTyInt32, "aarch64_sve_stnt1_scatter_scalar_offset">;
+
+// Non-temporal scatter store one vector (scalar base, signed vector index)
+def SVSTNT1_SCATTER_INDICES_S : MInst<"svstnt1_scatter_[{3}]index[_{d}]", "vPpxd", "lUld", [IsScatterStore], MemEltTyDefault, "aarch64_sve_stnt1_scatter_index">;
+def SVSTNT1H_SCATTER_INDICES_SS : MInst<"svstnt1h_scatter_[{3}]index[_{d}]", "vPBxd", "l", [IsScatterStore], MemEltTyInt16, "aarch64_sve_stnt1_scatter_index">;
+def SVSTNT1H_SCATTER_INDICES_SU : MInst<"svstnt1h_scatter_[{3}]index[_{d}]", "vPFxd", "Ul", [IsScatterStore], MemEltTyInt16, "aarch64_sve_stnt1_scatter_index">;
+def SVSTNT1W_SCATTER_INDICES_SS : MInst<"svstnt1w_scatter_[{3}]index[_{d}]", "vPCxd", "l", [IsScatterStore], MemEltTyInt32, "aarch64_sve_stnt1_scatter_index">;
+def SVSTNT1W_SCATTER_INDICES_SU : MInst<"svstnt1w_scatter_[{3}]index[_{d}]", "vPGxd", "Ul", [IsScatterStore], MemEltTyInt32, "aarch64_sve_stnt1_scatter_index">;
+
+// Non-temporal scatter store one vector (scalar base, unsigned vector index)
+def SVSTNT1_SCATTER_INDICES_U : MInst<"svstnt1_scatter_[{3}]index[_{d}]", "vPpud", "lUld", [IsScatterStore], MemEltTyDefault, "aarch64_sve_stnt1_scatter_index">;
+def SVSTNT1H_SCATTER_INDICES_US : MInst<"svstnt1h_scatter_[{3}]index[_{d}]", "vPBud", "l", [IsScatterStore], MemEltTyInt16, "aarch64_sve_stnt1_scatter_index">;
+def SVSTNT1H_SCATTER_INDICES_UU : MInst<"svstnt1h_scatter_[{3}]index[_{d}]", "vPFud", "Ul", [IsScatterStore], MemEltTyInt16, "aarch64_sve_stnt1_scatter_index">;
+def SVSTNT1W_SCATTER_INDICES_US : MInst<"svstnt1w_scatter_[{3}]index[_{d}]", "vPCud", "l", [IsScatterStore], MemEltTyInt32, "aarch64_sve_stnt1_scatter_index">;
+def SVSTNT1W_SCATTER_INDICES_UU : MInst<"svstnt1w_scatter_[{3}]index[_{d}]", "vPGud", "Ul", [IsScatterStore], MemEltTyInt32, "aarch64_sve_stnt1_scatter_index">;
+
+// Non-temporal scatter store one vector (vector base, signed scalar index)
+def SVSTNT1_SCATTER_INDEX_S : MInst<"svstnt1_scatter[_{2}base]_index[_{d}]", "vPuld", "ilUiUlfd", [IsScatterStore], MemEltTyDefault, "aarch64_sve_stnt1_scatter_scalar_offset">;
+def SVSTNT1H_SCATTER_INDEX_S : MInst<"svstnt1h_scatter[_{2}base]_index[_{d}]", "vPuld", "ilUiUl", [IsScatterStore], MemEltTyInt16, "aarch64_sve_stnt1_scatter_scalar_offset">;
+def SVSTNT1W_SCATTER_INDEX_S : MInst<"svstnt1w_scatter[_{2}base]_index[_{d}]", "vPuld", "lUl", [IsScatterStore], MemEltTyInt32, "aarch64_sve_stnt1_scatter_scalar_offset">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Polynomial arithmetic
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVEORBT : SInst<"sveorbt[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_eorbt">;
+def SVEORBT_N : SInst<"sveorbt[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_eorbt">;
+def SVEORTB : SInst<"sveortb[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_eortb">;
+def SVEORTB_N : SInst<"sveortb[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_eortb">;
+def SVPMUL : SInst<"svpmul[_{d}]", "ddd", "Uc", MergeNone, "aarch64_sve_pmul">;
+def SVPMUL_N : SInst<"svpmul[_n_{d}]", "dda", "Uc", MergeNone, "aarch64_sve_pmul">;
+def SVPMULLB : SInst<"svpmullb[_{d}]", "dhh", "UsUl", MergeNone>;
+def SVPMULLB_N : SInst<"svpmullb[_n_{d}]", "dhR", "UsUl", MergeNone>;
+def SVPMULLB_PAIR : SInst<"svpmullb_pair[_{d}]", "ddd", "UcUi", MergeNone, "aarch64_sve_pmullb_pair">;
+def SVPMULLB_PAIR_N : SInst<"svpmullb_pair[_n_{d}]", "dda", "UcUi", MergeNone, "aarch64_sve_pmullb_pair">;
+def SVPMULLT : SInst<"svpmullt[_{d}]", "dhh", "UsUl", MergeNone>;
+def SVPMULLT_N : SInst<"svpmullt[_n_{d}]", "dhR", "UsUl", MergeNone>;
+def SVPMULLT_PAIR : SInst<"svpmullt_pair[_{d}]", "ddd", "UcUi", MergeNone, "aarch64_sve_pmullt_pair">;
+def SVPMULLT_PAIR_N : SInst<"svpmullt_pair[_n_{d}]", "dda", "UcUi", MergeNone, "aarch64_sve_pmullt_pair">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Complex integer dot product
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVCDOT : SInst<"svcdot[_{d}]", "ddqqi", "il", MergeNone, "aarch64_sve_cdot", [], [ImmCheck<3, ImmCheckComplexRotAll90>]>;
+def SVCDOT_LANE : SInst<"svcdot_lane[_{d}]", "ddqqii", "il", MergeNone, "aarch64_sve_cdot_lane", [], [ImmCheck<4, ImmCheckComplexRotAll90>,
+ ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Floating-point widening multiply-accumulate
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVMLALB_F : SInst<"svmlalb[_{d}]", "ddhh", "f", MergeNone, "aarch64_sve_fmlalb">;
+def SVMLALB_F_N : SInst<"svmlalb[_n_{d}]", "ddhR", "f", MergeNone, "aarch64_sve_fmlalb">;
+def SVMLALB_F_LANE : SInst<"svmlalb_lane[_{d}]", "ddhhi", "f", MergeNone, "aarch64_sve_fmlalb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLALT_F : SInst<"svmlalt[_{d}]", "ddhh", "f", MergeNone, "aarch64_sve_fmlalt">;
+def SVMLALT_F_N : SInst<"svmlalt[_n_{d}]", "ddhR", "f", MergeNone, "aarch64_sve_fmlalt">;
+def SVMLALT_F_LANE : SInst<"svmlalt_lane[_{d}]", "ddhhi", "f", MergeNone, "aarch64_sve_fmlalt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLSLB_F : SInst<"svmlslb[_{d}]", "ddhh", "f", MergeNone, "aarch64_sve_fmlslb">;
+def SVMLSLB_F_N : SInst<"svmlslb[_n_{d}]", "ddhR", "f", MergeNone, "aarch64_sve_fmlslb">;
+def SVMLSLB_F_LANE : SInst<"svmlslb_lane[_{d}]", "ddhhi", "f", MergeNone, "aarch64_sve_fmlslb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLSLT_F : SInst<"svmlslt[_{d}]", "ddhh", "f", MergeNone, "aarch64_sve_fmlslt">;
+def SVMLSLT_F_N : SInst<"svmlslt[_n_{d}]", "ddhR", "f", MergeNone, "aarch64_sve_fmlslt">;
+def SVMLSLT_F_LANE : SInst<"svmlslt_lane[_{d}]", "ddhhi", "f", MergeNone, "aarch64_sve_fmlslt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Floating-point integer binary logarithm
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVLOGB_M : SInst<"svlogb[_{d}]", "xxPd", "hfd", MergeOp1, "aarch64_sve_flogb">;
+def SVLOGB_X : SInst<"svlogb[_{d}]", "xPd", "hfd", MergeAnyExp, "aarch64_sve_flogb">;
+def SVLOGB_Z : SInst<"svlogb[_{d}]", "xPd", "hfd", MergeZeroExp, "aarch64_sve_flogb">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Vector Histogram count
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVHISTCNT : SInst<"svhistcnt[_{d}]_z", "uPdd", "ilUiUl", MergeNone, "aarch64_sve_histcnt">;
+def SVHISTSEG : SInst<"svhistseg[_{d}]", "udd", "cUc", MergeNone, "aarch64_sve_histseg">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Character match
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVMATCH : SInst<"svmatch[_{d}]", "PPdd", "csUcUs", MergeNone, "aarch64_sve_match">;
+def SVNMATCH : SInst<"svnmatch[_{d}]", "PPdd", "csUcUs", MergeNone, "aarch64_sve_nmatch">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Contiguous conflict detection
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVWHILERW_B : SInst<"svwhilerw[_{1}]", "Pcc", "cUc", MergeNone, "aarch64_sve_whilerw_b", [IsOverloadWhileRW]>;
+def SVWHILERW_H : SInst<"svwhilerw[_{1}]", "Pcc", "sUsh", MergeNone, "aarch64_sve_whilerw_h", [IsOverloadWhileRW]>;
+def SVWHILERW_S : SInst<"svwhilerw[_{1}]", "Pcc", "iUif", MergeNone, "aarch64_sve_whilerw_s", [IsOverloadWhileRW]>;
+def SVWHILERW_D : SInst<"svwhilerw[_{1}]", "Pcc", "lUld", MergeNone, "aarch64_sve_whilerw_d", [IsOverloadWhileRW]>;
+
+def SVWHILEWR_B : SInst<"svwhilewr[_{1}]", "Pcc", "cUc", MergeNone, "aarch64_sve_whilewr_b", [IsOverloadWhileRW]>;
+def SVWHILEWR_H : SInst<"svwhilewr[_{1}]", "Pcc", "sUsh", MergeNone, "aarch64_sve_whilewr_h", [IsOverloadWhileRW]>;
+def SVWHILEWR_S : SInst<"svwhilewr[_{1}]", "Pcc", "iUif", MergeNone, "aarch64_sve_whilewr_s", [IsOverloadWhileRW]>;
+def SVWHILEWR_D : SInst<"svwhilewr[_{1}]", "Pcc", "lUld", MergeNone, "aarch64_sve_whilewr_d", [IsOverloadWhileRW]>;
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)" in {
+def SVWHILERW_H_BF16 : SInst<"svwhilerw[_{1}]", "Pcc", "b", MergeNone, "aarch64_sve_whilerw_h", [IsOverloadWhileRW]>;
+def SVWHILEWR_H_BF16 : SInst<"svwhilewr[_{1}]", "Pcc", "b", MergeNone, "aarch64_sve_whilewr_h", [IsOverloadWhileRW]>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Extended table lookup/permute
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVTBL2 : SInst<"svtbl2[_{d}]", "d2u", "csilUcUsUiUlhfd", MergeNone>;
+def SVTBX : SInst<"svtbx[_{d}]", "dddu", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tbx">;
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_SVE_BF16)" in {
+def SVTBL2_BF16 : SInst<"svtbl2[_{d}]", "d2u", "b", MergeNone>;
+def SVTBX_BF16 : SInst<"svtbx[_{d}]", "dddu", "b", MergeNone, "aarch64_sve_tbx">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Optional
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2_AES)" in {
+def SVAESD : SInst<"svaesd[_{d}]", "ddd", "Uc", MergeNone, "aarch64_sve_aesd", [IsOverloadNone]>;
+def SVAESIMC : SInst<"svaesimc[_{d}]", "dd", "Uc", MergeNone, "aarch64_sve_aesimc", [IsOverloadNone]>;
+def SVAESE : SInst<"svaese[_{d}]", "ddd", "Uc", MergeNone, "aarch64_sve_aese", [IsOverloadNone]>;
+def SVAESMC : SInst<"svaesmc[_{d}]", "dd", "Uc", MergeNone, "aarch64_sve_aesmc", [IsOverloadNone]>;
+
+def SVPMULLB_PAIR_U64 : SInst<"svpmullb_pair[_{d}]", "ddd", "Ul", MergeNone, "aarch64_sve_pmullb_pair">;
+def SVPMULLB_PAIR_N_U64 : SInst<"svpmullb_pair[_n_{d}]", "dda", "Ul", MergeNone, "aarch64_sve_pmullb_pair">;
+
+def SVPMULLT_PAIR_U64 : SInst<"svpmullt_pair[_{d}]", "ddd", "Ul", MergeNone, "aarch64_sve_pmullt_pair">;
+def SVPMULLT_PAIR_N_U64 : SInst<"svpmullt_pair[_n_{d}]", "dda", "Ul", MergeNone, "aarch64_sve_pmullt_pair">;
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2_SHA3)" in {
+def SVRAX1 : SInst<"svrax1[_{d}]", "ddd", "lUl", MergeNone, "aarch64_sve_rax1", [IsOverloadNone]>;
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2_SM4)" in {
+def SVSM4E : SInst<"svsm4e[_{d}]", "ddd", "Ui", MergeNone, "aarch64_sve_sm4e", [IsOverloadNone]>;
+def SVSM4EKEY : SInst<"svsm4ekey[_{d}]", "ddd", "Ui", MergeNone, "aarch64_sve_sm4ekey", [IsOverloadNone]>;
+}
+
+let ArchGuard = "__ARM_FEATURE_SVE2_BITPERM" in {
+def SVBDEP : SInst<"svbdep[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_bdep_x">;
+def SVBDEP_N : SInst<"svbdep[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_bdep_x">;
+def SVBEXT : SInst<"svbext[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_bext_x">;
+def SVBEXT_N : SInst<"svbext[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_bext_x">;
+def SVBGRP : SInst<"svbgrp[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_bgrp_x">;
+def SVBGRP_N : SInst<"svbgrp[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_bgrp_x">;
+}
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h b/contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h
index 2a41ab9eece7..eaf5a3d5aad7 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h
@@ -88,6 +88,7 @@ private:
Kind TheKind;
bool PaddingInReg : 1;
bool InAllocaSRet : 1; // isInAlloca()
+ bool InAllocaIndirect : 1;// isInAlloca()
bool IndirectByVal : 1; // isIndirect()
bool IndirectRealign : 1; // isIndirect()
bool SRetAfterThis : 1; // isIndirect()
@@ -110,8 +111,8 @@ private:
public:
ABIArgInfo(Kind K = Direct)
- : TypeData(nullptr), PaddingType(nullptr), DirectOffset(0),
- TheKind(K), PaddingInReg(false), InAllocaSRet(false),
+ : TypeData(nullptr), PaddingType(nullptr), DirectOffset(0), TheKind(K),
+ PaddingInReg(false), InAllocaSRet(false), InAllocaIndirect(false),
IndirectByVal(false), IndirectRealign(false), SRetAfterThis(false),
InReg(false), CanBeFlattened(false), SignExt(false) {}
@@ -185,9 +186,10 @@ public:
AI.setInReg(true);
return AI;
}
- static ABIArgInfo getInAlloca(unsigned FieldIndex) {
+ static ABIArgInfo getInAlloca(unsigned FieldIndex, bool Indirect = false) {
auto AI = ABIArgInfo(InAlloca);
AI.setInAllocaFieldIndex(FieldIndex);
+ AI.setInAllocaIndirect(Indirect);
return AI;
}
static ABIArgInfo getExpand() {
@@ -380,6 +382,15 @@ public:
AllocaFieldIndex = FieldIndex;
}
+ unsigned getInAllocaIndirect() const {
+ assert(isInAlloca() && "Invalid kind!");
+ return InAllocaIndirect;
+ }
+ void setInAllocaIndirect(bool Indirect) {
+ assert(isInAlloca() && "Invalid kind!");
+ InAllocaIndirect = Indirect;
+ }
+
/// Return true if this field of an inalloca struct should be returned
/// to implement a struct return calling convention.
bool getInAllocaSRet() const {
@@ -497,6 +508,9 @@ class CGFunctionInfo final
/// Whether this is a chain call.
unsigned ChainCall : 1;
+ /// Whether this function is a CMSE nonsecure call
+ unsigned CmseNSCall : 1;
+
/// Whether this function is noreturn.
unsigned NoReturn : 1;
@@ -587,6 +601,8 @@ public:
bool isChainCall() const { return ChainCall; }
+ bool isCmseNSCall() const { return CmseNSCall; }
+
bool isNoReturn() const { return NoReturn; }
/// In ARC, whether this function retains its return value. This
@@ -624,7 +640,8 @@ public:
FunctionType::ExtInfo getExtInfo() const {
return FunctionType::ExtInfo(isNoReturn(), getHasRegParm(), getRegParm(),
getASTCallingConvention(), isReturnsRetained(),
- isNoCallerSavedRegs(), isNoCfCheck());
+ isNoCallerSavedRegs(), isNoCfCheck(),
+ isCmseNSCall());
}
CanQualType getReturnType() const { return getArgsBuffer()[0].type; }
@@ -665,6 +682,7 @@ public:
ID.AddBoolean(HasRegParm);
ID.AddInteger(RegParm);
ID.AddBoolean(NoCfCheck);
+ ID.AddBoolean(CmseNSCall);
ID.AddInteger(Required.getOpaqueData());
ID.AddBoolean(HasExtParameterInfos);
if (HasExtParameterInfos) {
@@ -692,6 +710,7 @@ public:
ID.AddBoolean(info.getHasRegParm());
ID.AddInteger(info.getRegParm());
ID.AddBoolean(info.getNoCfCheck());
+ ID.AddBoolean(info.getCmseNSCall());
ID.AddInteger(required.getOpaqueData());
ID.AddBoolean(!paramInfos.empty());
if (!paramInfos.empty()) {
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenABITypes.h b/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenABITypes.h
index 31f0cea57232..3c745fadbe78 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenABITypes.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenABITypes.h
@@ -25,18 +25,24 @@
#include "clang/AST/CanonicalType.h"
#include "clang/AST/Type.h"
+#include "clang/Basic/ABI.h"
#include "clang/CodeGen/CGFunctionInfo.h"
+#include "llvm/IR/BasicBlock.h"
namespace llvm {
- class DataLayout;
- class Module;
- class Function;
- class FunctionType;
- class Type;
+class AttrBuilder;
+class Constant;
+class DataLayout;
+class Module;
+class Function;
+class FunctionType;
+class Type;
}
namespace clang {
class ASTContext;
+class CXXConstructorDecl;
+class CXXDestructorDecl;
class CXXRecordDecl;
class CXXMethodDecl;
class CodeGenOptions;
@@ -44,12 +50,23 @@ class CoverageSourceInfo;
class DiagnosticsEngine;
class HeaderSearchOptions;
class ObjCMethodDecl;
+class ObjCProtocolDecl;
class PreprocessorOptions;
namespace CodeGen {
class CGFunctionInfo;
class CodeGenModule;
+/// Additional implicit arguments to add to a constructor argument list.
+struct ImplicitCXXConstructorArgs {
+ /// Implicit arguments to add before the explicit arguments, but after the
+ /// `*this` argument (which always comes first).
+ SmallVector<llvm::Value *, 1> Prefix;
+
+ /// Implicit arguments to add after the explicit arguments.
+ SmallVector<llvm::Value *, 1> Suffix;
+};
+
const CGFunctionInfo &arrangeObjCMessageSendSignature(CodeGenModule &CGM,
const ObjCMethodDecl *MD,
QualType receiverType);
@@ -71,6 +88,17 @@ const CGFunctionInfo &arrangeFreeFunctionCall(CodeGenModule &CGM,
FunctionType::ExtInfo info,
RequiredArgs args);
+/// Returns the implicit arguments to add to a complete, non-delegating C++
+/// constructor call.
+ImplicitCXXConstructorArgs
+getImplicitCXXConstructorArgs(CodeGenModule &CGM, const CXXConstructorDecl *D);
+
+llvm::Value *
+getCXXDestructorImplicitParam(CodeGenModule &CGM, llvm::BasicBlock *InsertBlock,
+ llvm::BasicBlock::iterator InsertPoint,
+ const CXXDestructorDecl *D, CXXDtorType Type,
+ bool ForVirtualBase, bool Delegating);
+
/// Returns null if the function type is incomplete and can't be lowered.
llvm::FunctionType *convertFreeFunctionType(CodeGenModule &CGM,
const FunctionDecl *FD);
@@ -84,6 +112,25 @@ llvm::Type *convertTypeForMemory(CodeGenModule &CGM, QualType T);
unsigned getLLVMFieldNumber(CodeGenModule &CGM,
const RecordDecl *RD, const FieldDecl *FD);
+/// Given the language and code-generation options that Clang was configured
+/// with, set the default LLVM IR attributes for a function definition.
+/// The attributes set here are mostly global target-configuration and
+/// pipeline-configuration options like the target CPU, variant stack
+/// rules, whether to optimize for size, and so on. This is useful for
+/// frontends (such as Swift) that generally intend to interoperate with
+/// C code and rely on Clang's target configuration logic.
+///
+/// As a general rule, this function assumes that meaningful attributes
+/// haven't already been added to the builder. It won't intentionally
+/// displace any existing attributes, but it also won't check to avoid
+/// overwriting them. Callers should generally apply customizations after
+/// making this call.
+///
+/// This function assumes that the caller is not defining a function that
+/// requires special no-builtin treatment.
+void addDefaultFunctionDefinitionAttributes(CodeGenModule &CGM,
+ llvm::AttrBuilder &attrs);
+
/// Returns the default constructor for a C struct with non-trivially copyable
/// fields, generating it if necessary. The returned function uses the `cdecl`
/// calling convention, returns void, and takes a single argument that is a
@@ -137,6 +184,13 @@ llvm::Function *getNonTrivialCStructDestructor(CodeGenModule &CGM,
CharUnits DstAlignment,
bool IsVolatile, QualType QT);
+/// Get a pointer to a protocol object for the given declaration, emitting it if
+/// it hasn't already been emitted in this translation unit. Note that the ABI
+/// for emitting a protocol reference in code (e.g. for a protocol expression)
+/// in most runtimes is not as simple as just materializing a pointer to this
+/// object.
+llvm::Constant *emitObjCProtocolObject(CodeGenModule &CGM,
+ const ObjCProtocolDecl *p);
} // end namespace CodeGen
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/ConstantInitBuilder.h b/contrib/llvm-project/clang/include/clang/CodeGen/ConstantInitBuilder.h
index fd07e91ba6ae..88e357a0c29c 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/ConstantInitBuilder.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/ConstantInitBuilder.h
@@ -226,6 +226,13 @@ public:
add(getRelativeOffset(type, target));
}
+ /// Same as addRelativeOffset(), but instead relative to an element in this
+ /// aggregate, identified by its index.
+ void addRelativeOffsetToPosition(llvm::IntegerType *type,
+ llvm::Constant *target, size_t position) {
+ add(getRelativeOffsetToPosition(type, target, position));
+ }
+
/// Add a relative offset to the target address, plus a small
/// constant offset. This is primarily useful when the relative
/// offset is known to be a multiple of (say) four and therefore
@@ -298,10 +305,18 @@ public:
/// position to be filled. This is computed with an indexed
/// getelementptr rather than by computing offsets.
///
- /// The returned pointer will have type T*, where T is the given
- /// position.
+ /// The returned pointer will have type T*, where T is the given type. This
+ /// type can differ from the type of the actual element.
llvm::Constant *getAddrOfCurrentPosition(llvm::Type *type);
+ /// Produce an address which points to a position in the aggregate being
+ /// constructed. This is computed with an indexed getelementptr rather than by
+ /// computing offsets.
+ ///
+ /// The returned pointer will have type T*, where T is the given type. This
+ /// type can differ from the type of the actual element.
+ llvm::Constant *getAddrOfPosition(llvm::Type *type, size_t position);
+
llvm::ArrayRef<llvm::Constant*> getGEPIndicesToCurrentPosition(
llvm::SmallVectorImpl<llvm::Constant*> &indices) {
getGEPIndicesTo(indices, Builder.Buffer.size());
@@ -319,6 +334,10 @@ private:
llvm::Constant *getRelativeOffset(llvm::IntegerType *offsetType,
llvm::Constant *target);
+ llvm::Constant *getRelativeOffsetToPosition(llvm::IntegerType *offsetType,
+ llvm::Constant *target,
+ size_t position);
+
CharUnits getOffsetFromGlobalTo(size_t index) const;
};
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/ConstantInitFuture.h b/contrib/llvm-project/clang/include/clang/CodeGen/ConstantInitFuture.h
index b08f52872290..452ba36d2087 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/ConstantInitFuture.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/ConstantInitFuture.h
@@ -35,7 +35,7 @@ struct PointerLikeTypeTraits< ::clang::CodeGen::ConstantInitBuilderBase*> {
static inline void *getAsVoidPointer(T p) { return p; }
static inline T getFromVoidPointer(void *p) {return static_cast<T>(p);}
- enum { NumLowBitsAvailable = 2 };
+ static constexpr int NumLowBitsAvailable = 2;
};
}
@@ -79,10 +79,8 @@ public:
result.Data = PairTy::getFromOpaqueValue(value);
return result;
}
- enum {
- NumLowBitsAvailable =
- llvm::PointerLikeTypeTraits<PairTy>::NumLowBitsAvailable
- };
+ static constexpr int NumLowBitsAvailable =
+ llvm::PointerLikeTypeTraits<PairTy>::NumLowBitsAvailable;
};
} // end namespace CodeGen
@@ -100,7 +98,7 @@ struct PointerLikeTypeTraits< ::clang::CodeGen::ConstantInitFuture> {
static inline T getFromVoidPointer(void *p) {
return T::getFromOpaqueValue(p);
}
- enum { NumLowBitsAvailable = T::NumLowBitsAvailable };
+ static constexpr int NumLowBitsAvailable = T::NumLowBitsAvailable;
};
} // end namespace llvm
diff --git a/contrib/llvm-project/clang/include/clang/CrossTU/CrossTranslationUnit.h b/contrib/llvm-project/clang/include/clang/CrossTU/CrossTranslationUnit.h
index 4d2b7109c62a..027c6f16430b 100644
--- a/contrib/llvm-project/clang/include/clang/CrossTU/CrossTranslationUnit.h
+++ b/contrib/llvm-project/clang/include/clang/CrossTU/CrossTranslationUnit.h
@@ -21,6 +21,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Error.h"
+#include "llvm/Support/Path.h"
namespace clang {
class CompilerInstance;
@@ -47,7 +48,12 @@ enum class index_error_code {
triple_mismatch,
lang_mismatch,
lang_dialect_mismatch,
- load_threshold_reached
+ load_threshold_reached,
+ invocation_list_ambiguous,
+ invocation_list_file_not_found,
+ invocation_list_empty,
+ invocation_list_wrong_format,
+ invocation_list_lookup_unsuccessful
};
class IndexError : public llvm::ErrorInfo<IndexError> {
@@ -78,7 +84,8 @@ private:
};
/// This function parses an index file that determines which
-/// translation unit contains which definition.
+/// translation unit contains which definition. The IndexPath is not prefixed
+/// with CTUDir, so an absolute path is expected for consistent results.
///
/// The index file format is the following:
/// each line consists of an USR and a filepath separated by a space.
@@ -86,17 +93,27 @@ private:
/// \return Returns a map where the USR is the key and the filepath is the value
/// or an error.
llvm::Expected<llvm::StringMap<std::string>>
-parseCrossTUIndex(StringRef IndexPath, StringRef CrossTUDir);
+parseCrossTUIndex(StringRef IndexPath);
std::string createCrossTUIndexString(const llvm::StringMap<std::string> &Index);
+using InvocationListTy = llvm::StringMap<llvm::SmallVector<std::string, 32>>;
+/// Parse the YAML formatted invocation list file content \p FileContent.
+/// The format is expected to be a mapping from from absolute source file
+/// paths in the filesystem to a list of command-line parts, which
+/// constitute the invocation needed to compile that file. That invocation
+/// will be used to produce the AST of the TU.
+llvm::Expected<InvocationListTy> parseInvocationList(
+ StringRef FileContent,
+ llvm::sys::path::Style PathStyle = llvm::sys::path::Style::posix);
+
// Returns true if the variable or any field of a record variable is const.
bool containsConst(const VarDecl *VD, const ASTContext &ACtx);
/// This class is used for tools that requires cross translation
/// unit capability.
///
-/// This class can load definitions from external AST files.
+/// This class can load definitions from external AST sources.
/// The loaded definition will be merged back to the original AST using the
/// AST Importer.
/// In order to use this class, an index file is required that describes
@@ -116,7 +133,7 @@ public:
/// the current translation unit. A function definition with the same
/// declaration will be looked up in the index file which should be in the
/// \p CrossTUDir directory, called \p IndexName. In case the declaration is
- /// found in the index the corresponding AST file will be loaded and the
+ /// found in the index the corresponding AST will be loaded and the
/// definition will be merged into the original AST using the AST Importer.
///
/// \return The declaration with the definition will be returned.
@@ -136,7 +153,7 @@ public:
/// A definition with the same declaration will be looked up in the
/// index file which should be in the \p CrossTUDir directory, called
/// \p IndexName. In case the declaration is found in the index the
- /// corresponding AST file will be loaded. If the number of TUs imported
+ /// corresponding AST will be loaded. If the number of TUs imported
/// reaches \p CTULoadTreshold, no loading is performed.
///
/// \return Returns a pointer to the ASTUnit that contains the definition of
@@ -209,14 +226,43 @@ private:
/// imported the FileID.
ImportedFileIDMap ImportedFileIDs;
- /// Functor for loading ASTUnits from AST-dump files.
- class ASTFileLoader {
+ using LoadResultTy = llvm::Expected<std::unique_ptr<ASTUnit>>;
+
+ /// Loads ASTUnits from AST-dumps or source-files.
+ class ASTLoader {
public:
- ASTFileLoader(const CompilerInstance &CI);
- std::unique_ptr<ASTUnit> operator()(StringRef ASTFilePath);
+ ASTLoader(CompilerInstance &CI, StringRef CTUDir,
+ StringRef InvocationListFilePath);
+
+ /// Load the ASTUnit by its identifier found in the index file. If the
+ /// indentifier is suffixed with '.ast' it is considered a dump. Otherwise
+ /// it is treated as source-file, and on-demand parsed. Relative paths are
+ /// prefixed with CTUDir.
+ LoadResultTy load(StringRef Identifier);
+
+ /// Lazily initialize the invocation list information, which is needed for
+ /// on-demand parsing.
+ llvm::Error lazyInitInvocationList();
private:
- const CompilerInstance &CI;
+ /// The style used for storage and lookup of filesystem paths.
+ /// Defaults to posix.
+ const llvm::sys::path::Style PathStyle = llvm::sys::path::Style::posix;
+
+ /// Loads an AST from a pch-dump.
+ LoadResultTy loadFromDump(StringRef Identifier);
+ /// Loads an AST from a source-file.
+ LoadResultTy loadFromSource(StringRef Identifier);
+
+ CompilerInstance &CI;
+ StringRef CTUDir;
+ /// The path to the file containing the invocation list, which is in YAML
+ /// format, and contains a mapping from source files to compiler invocations
+ /// that produce the AST used for analysis.
+ StringRef InvocationListFilePath;
+ /// In case of on-demand parsing, the invocations for parsing the source
+ /// files is stored.
+ llvm::Optional<InvocationListTy> InvocationList;
};
/// Maintain number of AST loads and check for reaching the load limit.
@@ -242,7 +288,7 @@ private:
/// are the concerns of ASTUnitStorage class.
class ASTUnitStorage {
public:
- ASTUnitStorage(const CompilerInstance &CI);
+ ASTUnitStorage(CompilerInstance &CI);
/// Loads an ASTUnit for a function.
///
/// \param FunctionName USR name of the function.
@@ -287,18 +333,17 @@ private:
using IndexMapTy = BaseMapTy<std::string>;
IndexMapTy NameFileMap;
- ASTFileLoader FileAccessor;
+ /// Loads the AST based on the identifier found in the index.
+ ASTLoader Loader;
- /// Limit the number of loaded ASTs. Used to limit the memory usage of the
- /// CrossTranslationUnitContext.
- /// The ASTUnitStorage has the knowledge about if the AST to load is
- /// actually loaded or returned from cache. This information is needed to
- /// maintain the counter.
+ /// Limit the number of loaded ASTs. It is used to limit the memory usage
+ /// of the CrossTranslationUnitContext. The ASTUnitStorage has the
+ /// information whether the AST to load is actually loaded or returned from
+ /// cache. This information is needed to maintain the counter.
ASTLoadGuard LoadGuard;
};
ASTUnitStorage ASTStorage;
-
};
} // namespace cross_tu
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Action.h b/contrib/llvm-project/clang/include/clang/Driver/Action.h
index 8ccbb6c2bbfa..27c95c6f89d4 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Action.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Action.h
@@ -73,9 +73,10 @@ public:
OffloadBundlingJobClass,
OffloadUnbundlingJobClass,
OffloadWrapperJobClass,
+ StaticLibJobClass,
JobClassFirst = PreprocessJobClass,
- JobClassLast = OffloadWrapperJobClass
+ JobClassLast = StaticLibJobClass
};
// The offloading kind determines if this action is binded to a particular
@@ -637,6 +638,17 @@ public:
}
};
+class StaticLibJobAction : public JobAction {
+ void anchor() override;
+
+public:
+ StaticLibJobAction(ActionList &Inputs, types::ID Type);
+
+ static bool classof(const Action *A) {
+ return A->getKind() == StaticLibJobClass;
+ }
+};
+
} // namespace driver
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Driver/CC1Options.td b/contrib/llvm-project/clang/include/clang/Driver/CC1Options.td
deleted file mode 100644
index d1f5ec5a3d4c..000000000000
--- a/contrib/llvm-project/clang/include/clang/Driver/CC1Options.td
+++ /dev/null
@@ -1,931 +0,0 @@
-//===--- CC1Options.td - Options for clang -cc1 ---------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the options accepted by clang -cc1 and clang -cc1as.
-//
-//===----------------------------------------------------------------------===//
-
-let Flags = [CC1Option, NoDriverOption] in {
-
-//===----------------------------------------------------------------------===//
-// Target Options
-//===----------------------------------------------------------------------===//
-
-let Flags = [CC1Option, CC1AsOption, NoDriverOption] in {
-
-def target_cpu : Separate<["-"], "target-cpu">,
- HelpText<"Target a specific cpu type">;
-def target_feature : Separate<["-"], "target-feature">,
- HelpText<"Target specific attributes">;
-def triple : Separate<["-"], "triple">,
- HelpText<"Specify target triple (e.g. i686-apple-darwin9)">;
-def target_abi : Separate<["-"], "target-abi">,
- HelpText<"Target a particular ABI type">;
-def target_sdk_version_EQ : Joined<["-"], "target-sdk-version=">,
- HelpText<"The version of target SDK used for compilation">;
-
-}
-
-def target_linker_version : Separate<["-"], "target-linker-version">,
- HelpText<"Target linker version">;
-def triple_EQ : Joined<["-"], "triple=">, Alias<triple>;
-def mfpmath : Separate<["-"], "mfpmath">,
- HelpText<"Which unit to use for fp math">;
-
-def fpadding_on_unsigned_fixed_point : Flag<["-"], "fpadding-on-unsigned-fixed-point">,
- HelpText<"Force each unsigned fixed point type to have an extra bit of padding to align their scales with those of signed fixed point types">;
-def fno_padding_on_unsigned_fixed_point : Flag<["-"], "fno-padding-on-unsigned-fixed-point">;
-
-//===----------------------------------------------------------------------===//
-// Analyzer Options
-//===----------------------------------------------------------------------===//
-
-def analysis_UnoptimizedCFG : Flag<["-"], "unoptimized-cfg">,
- HelpText<"Generate unoptimized CFGs for all analyses">;
-def analysis_CFGAddImplicitDtors : Flag<["-"], "cfg-add-implicit-dtors">,
- HelpText<"Add C++ implicit destructors to CFGs for all analyses">;
-
-def analyzer_store : Separate<["-"], "analyzer-store">,
- HelpText<"Source Code Analysis - Abstract Memory Store Models">;
-def analyzer_store_EQ : Joined<["-"], "analyzer-store=">, Alias<analyzer_store>;
-
-def analyzer_constraints : Separate<["-"], "analyzer-constraints">,
- HelpText<"Source Code Analysis - Symbolic Constraint Engines">;
-def analyzer_constraints_EQ : Joined<["-"], "analyzer-constraints=">,
- Alias<analyzer_constraints>;
-
-def analyzer_output : Separate<["-"], "analyzer-output">,
- HelpText<"Source Code Analysis - Output Options">;
-def analyzer_output_EQ : Joined<["-"], "analyzer-output=">,
- Alias<analyzer_output>;
-
-def analyzer_purge : Separate<["-"], "analyzer-purge">,
- HelpText<"Source Code Analysis - Dead Symbol Removal Frequency">;
-def analyzer_purge_EQ : Joined<["-"], "analyzer-purge=">, Alias<analyzer_purge>;
-
-def analyzer_opt_analyze_headers : Flag<["-"], "analyzer-opt-analyze-headers">,
- HelpText<"Force the static analyzer to analyze functions defined in header files">;
-def analyzer_opt_analyze_nested_blocks : Flag<["-"], "analyzer-opt-analyze-nested-blocks">,
- HelpText<"Analyze the definitions of blocks in addition to functions">;
-def analyzer_display_progress : Flag<["-"], "analyzer-display-progress">,
- HelpText<"Emit verbose output about the analyzer's progress">;
-def analyze_function : Separate<["-"], "analyze-function">,
- HelpText<"Run analysis on specific function (for C++ include parameters in name)">;
-def analyze_function_EQ : Joined<["-"], "analyze-function=">, Alias<analyze_function>;
-def trim_egraph : Flag<["-"], "trim-egraph">,
- HelpText<"Only show error-related paths in the analysis graph">;
-def analyzer_viz_egraph_graphviz : Flag<["-"], "analyzer-viz-egraph-graphviz">,
- HelpText<"Display exploded graph using GraphViz">;
-def analyzer_dump_egraph : Separate<["-"], "analyzer-dump-egraph">,
- HelpText<"Dump exploded graph to the specified file">;
-def analyzer_dump_egraph_EQ : Joined<["-"], "analyzer-dump-egraph=">, Alias<analyzer_dump_egraph>;
-
-def analyzer_inline_max_stack_depth : Separate<["-"], "analyzer-inline-max-stack-depth">,
- HelpText<"Bound on stack depth while inlining (4 by default)">;
-def analyzer_inline_max_stack_depth_EQ : Joined<["-"], "analyzer-inline-max-stack-depth=">,
- Alias<analyzer_inline_max_stack_depth>;
-
-def analyzer_inlining_mode : Separate<["-"], "analyzer-inlining-mode">,
- HelpText<"Specify the function selection heuristic used during inlining">;
-def analyzer_inlining_mode_EQ : Joined<["-"], "analyzer-inlining-mode=">, Alias<analyzer_inlining_mode>;
-
-def analyzer_disable_retry_exhausted : Flag<["-"], "analyzer-disable-retry-exhausted">,
- HelpText<"Do not re-analyze paths leading to exhausted nodes with a different strategy (may decrease code coverage)">;
-
-def analyzer_max_loop : Separate<["-"], "analyzer-max-loop">,
- HelpText<"The maximum number of times the analyzer will go through a loop">;
-def analyzer_stats : Flag<["-"], "analyzer-stats">,
- HelpText<"Print internal analyzer statistics.">;
-
-def analyzer_checker : Separate<["-"], "analyzer-checker">,
- HelpText<"Choose analyzer checkers to enable">,
- ValuesCode<[{
- const char *Values =
- #define GET_CHECKERS
- #define CHECKER(FULLNAME, CLASS, HT, DOC_URI, IS_HIDDEN) FULLNAME ","
- #include "clang/StaticAnalyzer/Checkers/Checkers.inc"
- #undef GET_CHECKERS
- #define GET_PACKAGES
- #define PACKAGE(FULLNAME) FULLNAME ","
- #include "clang/StaticAnalyzer/Checkers/Checkers.inc"
- #undef GET_PACKAGES
- ;
- }]>;
-def analyzer_checker_EQ : Joined<["-"], "analyzer-checker=">,
- Alias<analyzer_checker>;
-
-def analyzer_disable_checker : Separate<["-"], "analyzer-disable-checker">,
- HelpText<"Choose analyzer checkers to disable">;
-def analyzer_disable_checker_EQ : Joined<["-"], "analyzer-disable-checker=">,
- Alias<analyzer_disable_checker>;
-
-def analyzer_disable_all_checks : Flag<["-"], "analyzer-disable-all-checks">,
- HelpText<"Disable all static analyzer checks">;
-
-def analyzer_checker_help : Flag<["-"], "analyzer-checker-help">,
- HelpText<"Display the list of analyzer checkers that are available">;
-
-def analyzer_checker_help_alpha : Flag<["-"], "analyzer-checker-help-alpha">,
- HelpText<"Display the list of in development analyzer checkers. These "
- "are NOT considered safe, they are unstable and will emit incorrect "
- "reports. Enable ONLY FOR DEVELOPMENT purposes">;
-
-def analyzer_checker_help_developer : Flag<["-"], "analyzer-checker-help-developer">,
- HelpText<"Display the list of developer-only checkers such as modeling "
- "and debug checkers">;
-
-def analyzer_config_help : Flag<["-"], "analyzer-config-help">,
- HelpText<"Display the list of -analyzer-config options. These are meant for "
- "development purposes only!">;
-
-def analyzer_list_enabled_checkers : Flag<["-"], "analyzer-list-enabled-checkers">,
- HelpText<"Display the list of enabled analyzer checkers">;
-
-def analyzer_config : Separate<["-"], "analyzer-config">,
- HelpText<"Choose analyzer options to enable">;
-
-def analyzer_checker_option_help : Flag<["-"], "analyzer-checker-option-help">,
- HelpText<"Display the list of checker and package options">;
-
-def analyzer_checker_option_help_alpha : Flag<["-"], "analyzer-checker-option-help-alpha">,
- HelpText<"Display the list of in development checker and package options. "
- "These are NOT considered safe, they are unstable and will emit "
- "incorrect reports. Enable ONLY FOR DEVELOPMENT purposes">;
-
-def analyzer_checker_option_help_developer : Flag<["-"], "analyzer-checker-option-help-developer">,
- HelpText<"Display the list of checker and package options meant for "
- "development purposes only">;
-
-def analyzer_config_compatibility_mode : Separate<["-"], "analyzer-config-compatibility-mode">,
- HelpText<"Don't emit errors on invalid analyzer-config inputs">;
-
-def analyzer_config_compatibility_mode_EQ : Joined<["-"], "analyzer-config-compatibility-mode=">,
- Alias<analyzer_config_compatibility_mode>;
-
-def analyzer_werror : Flag<["-"], "analyzer-werror">,
- HelpText<"Emit analyzer results as errors rather than warnings">;
-
-//===----------------------------------------------------------------------===//
-// Migrator Options
-//===----------------------------------------------------------------------===//
-def migrator_no_nsalloc_error : Flag<["-"], "no-ns-alloc-error">,
- HelpText<"Do not error on use of NSAllocateCollectable/NSReallocateCollectable">;
-
-def migrator_no_finalize_removal : Flag<["-"], "no-finalize-removal">,
- HelpText<"Do not remove finalize method in gc mode">;
-
-//===----------------------------------------------------------------------===//
-// CodeGen Options
-//===----------------------------------------------------------------------===//
-
-let Flags = [CC1Option, CC1AsOption, NoDriverOption] in {
-def debug_info_kind_EQ : Joined<["-"], "debug-info-kind=">;
-def debug_info_macro : Flag<["-"], "debug-info-macro">,
- HelpText<"Emit macro debug information">;
-def default_function_attr : Separate<["-"], "default-function-attr">,
- HelpText<"Apply given attribute to all functions">;
-def dwarf_version_EQ : Joined<["-"], "dwarf-version=">;
-def debugger_tuning_EQ : Joined<["-"], "debugger-tuning=">;
-def dwarf_debug_flags : Separate<["-"], "dwarf-debug-flags">,
- HelpText<"The string to embed in the Dwarf debug flags record.">;
-def record_command_line : Separate<["-"], "record-command-line">,
- HelpText<"The string to embed in the .LLVM.command.line section.">;
-def compress_debug_sections : Flag<["-", "--"], "compress-debug-sections">,
- HelpText<"DWARF debug sections compression">;
-def compress_debug_sections_EQ : Joined<["-", "--"], "compress-debug-sections=">,
- HelpText<"DWARF debug sections compression type">;
-def mno_exec_stack : Flag<["-"], "mnoexecstack">,
- HelpText<"Mark the file as not needing an executable stack">;
-def massembler_no_warn : Flag<["-"], "massembler-no-warn">,
- HelpText<"Make assembler not emit warnings">;
-def massembler_fatal_warnings : Flag<["-"], "massembler-fatal-warnings">,
- HelpText<"Make assembler warnings fatal">;
-def mrelax_relocations : Flag<["--"], "mrelax-relocations">,
- HelpText<"Use relaxable elf relocations">;
-def msave_temp_labels : Flag<["-"], "msave-temp-labels">,
- HelpText<"Save temporary labels in the symbol table. "
- "Note this may change .s semantics and shouldn't generally be used "
- "on compiler-generated code.">;
-def mrelocation_model : Separate<["-"], "mrelocation-model">,
- HelpText<"The relocation model to use">, Values<"static,pic,ropi,rwpi,ropi-rwpi,dynamic-no-pic">;
-def fno_math_builtin : Flag<["-"], "fno-math-builtin">,
- HelpText<"Disable implicit builtin knowledge of math functions">;
-}
-
-def disable_llvm_verifier : Flag<["-"], "disable-llvm-verifier">,
- HelpText<"Don't run the LLVM IR verifier pass">;
-def disable_llvm_passes : Flag<["-"], "disable-llvm-passes">,
- HelpText<"Use together with -emit-llvm to get pristine LLVM IR from the "
- "frontend by not running any LLVM passes at all">;
-def disable_llvm_optzns : Flag<["-"], "disable-llvm-optzns">,
- Alias<disable_llvm_passes>;
-def disable_lifetimemarkers : Flag<["-"], "disable-lifetime-markers">,
- HelpText<"Disable lifetime-markers emission even when optimizations are "
- "enabled">;
-def disable_O0_optnone : Flag<["-"], "disable-O0-optnone">,
- HelpText<"Disable adding the optnone attribute to functions at O0">;
-def disable_red_zone : Flag<["-"], "disable-red-zone">,
- HelpText<"Do not emit code that uses the red zone.">;
-def dwarf_column_info : Flag<["-"], "dwarf-column-info">,
- HelpText<"Turn on column location information.">;
-def dwarf_ext_refs : Flag<["-"], "dwarf-ext-refs">,
- HelpText<"Generate debug info with external references to clang modules"
- " or precompiled headers">;
-def dwarf_explicit_import : Flag<["-"], "dwarf-explicit-import">,
- HelpText<"Generate explicit import from anonymous namespace to containing"
- " scope">;
-def debug_forward_template_params : Flag<["-"], "debug-forward-template-params">,
- HelpText<"Emit complete descriptions of template parameters in forward"
- " declarations">;
-def fforbid_guard_variables : Flag<["-"], "fforbid-guard-variables">,
- HelpText<"Emit an error if a C++ static local initializer would need a guard variable">;
-def no_implicit_float : Flag<["-"], "no-implicit-float">,
- HelpText<"Don't generate implicit floating point instructions">;
-def fdump_vtable_layouts : Flag<["-"], "fdump-vtable-layouts">,
- HelpText<"Dump the layouts of all vtables that will be emitted in a translation unit">;
-def fmerge_functions : Flag<["-"], "fmerge-functions">,
- HelpText<"Permit merging of identical functions when optimizing.">;
-def femit_coverage_notes : Flag<["-"], "femit-coverage-notes">,
- HelpText<"Emit a gcov coverage notes file when compiling.">;
-def femit_coverage_data: Flag<["-"], "femit-coverage-data">,
- HelpText<"Instrument the program to emit gcov coverage data when run.">;
-def coverage_data_file : Separate<["-"], "coverage-data-file">,
- HelpText<"Emit coverage data to this filename.">;
-def coverage_data_file_EQ : Joined<["-"], "coverage-data-file=">,
- Alias<coverage_data_file>;
-def coverage_notes_file : Separate<["-"], "coverage-notes-file">,
- HelpText<"Emit coverage notes to this filename.">;
-def coverage_notes_file_EQ : Joined<["-"], "coverage-notes-file=">,
- Alias<coverage_notes_file>;
-def coverage_cfg_checksum : Flag<["-"], "coverage-cfg-checksum">,
- HelpText<"Emit CFG checksum for functions in .gcno files.">;
-def coverage_no_function_names_in_data : Flag<["-"], "coverage-no-function-names-in-data">,
- HelpText<"Emit function names in .gcda files.">;
-def coverage_exit_block_before_body : Flag<["-"], "coverage-exit-block-before-body">,
- HelpText<"Emit the exit block before the body blocks in .gcno files.">;
-def coverage_version_EQ : Joined<["-"], "coverage-version=">,
- HelpText<"Four-byte version string for gcov files.">;
-def test_coverage : Flag<["-"], "test-coverage">,
- HelpText<"Do not generate coverage files or remove coverage changes from IR">;
-def dump_coverage_mapping : Flag<["-"], "dump-coverage-mapping">,
- HelpText<"Dump the coverage mapping records, for testing">;
-def fuse_register_sized_bitfield_access: Flag<["-"], "fuse-register-sized-bitfield-access">,
- HelpText<"Use register sized accesses to bit-fields, when possible.">;
-def relaxed_aliasing : Flag<["-"], "relaxed-aliasing">,
- HelpText<"Turn off Type Based Alias Analysis">;
-def no_struct_path_tbaa : Flag<["-"], "no-struct-path-tbaa">,
- HelpText<"Turn off struct-path aware Type Based Alias Analysis">;
-def new_struct_path_tbaa : Flag<["-"], "new-struct-path-tbaa">,
- HelpText<"Enable enhanced struct-path aware Type Based Alias Analysis">;
-def masm_verbose : Flag<["-"], "masm-verbose">,
- HelpText<"Generate verbose assembly output">;
-def mcode_model : Separate<["-"], "mcode-model">,
- HelpText<"The code model to use">, Values<"tiny,small,kernel,medium,large">;
-def mdebug_pass : Separate<["-"], "mdebug-pass">,
- HelpText<"Enable additional debug output">;
-def mframe_pointer_EQ : Joined<["-"], "mframe-pointer=">,
- HelpText<"Specify which frame pointers to retain (all, non-leaf, none).">, Values<"all,non-leaf,none">;
-def mdisable_tail_calls : Flag<["-"], "mdisable-tail-calls">,
- HelpText<"Disable tail call optimization, keeping the call stack accurate">;
-def menable_no_infinities : Flag<["-"], "menable-no-infs">,
- HelpText<"Allow optimization to assume there are no infinities.">;
-def menable_no_nans : Flag<["-"], "menable-no-nans">,
- HelpText<"Allow optimization to assume there are no NaNs.">;
-def menable_unsafe_fp_math : Flag<["-"], "menable-unsafe-fp-math">,
- HelpText<"Allow unsafe floating-point math optimizations which may decrease "
- "precision">;
-def mreassociate : Flag<["-"], "mreassociate">,
- HelpText<"Allow reassociation transformations for floating-point instructions">;
-def mabi_EQ_ieeelongdouble : Flag<["-"], "mabi=ieeelongdouble">,
- HelpText<"Use IEEE 754 quadruple-precision for long double">;
-def mfloat_abi : Separate<["-"], "mfloat-abi">,
- HelpText<"The float ABI to use">;
-def mtp : Separate<["-"], "mtp">,
- HelpText<"Mode for reading thread pointer">;
-def mlimit_float_precision : Separate<["-"], "mlimit-float-precision">,
- HelpText<"Limit float precision to the given value">;
-def split_stacks : Flag<["-"], "split-stacks">,
- HelpText<"Try to use a split stack if possible.">;
-def mno_zero_initialized_in_bss : Flag<["-"], "mno-zero-initialized-in-bss">,
- HelpText<"Do not put zero initialized data in the BSS">;
-def mregparm : Separate<["-"], "mregparm">,
- HelpText<"Limit the number of registers available for integer arguments">;
-def munwind_tables : Flag<["-"], "munwind-tables">,
- HelpText<"Generate unwinding tables for all functions">;
-def mconstructor_aliases : Flag<["-"], "mconstructor-aliases">,
- HelpText<"Emit complete constructors and destructors as aliases when possible">;
-def mlink_bitcode_file : Separate<["-"], "mlink-bitcode-file">,
- HelpText<"Link the given bitcode file before performing optimizations.">;
-def mlink_builtin_bitcode : Separate<["-"], "mlink-builtin-bitcode">,
- HelpText<"Link and internalize needed symbols from the given bitcode file "
- "before performing optimizations.">;
-def mlink_cuda_bitcode : Separate<["-"], "mlink-cuda-bitcode">,
- Alias<mlink_builtin_bitcode>;
-def vectorize_loops : Flag<["-"], "vectorize-loops">,
- HelpText<"Run the Loop vectorization passes">;
-def vectorize_slp : Flag<["-"], "vectorize-slp">,
- HelpText<"Run the SLP vectorization passes">;
-def dependent_lib : Joined<["--"], "dependent-lib=">,
- HelpText<"Add dependent library">;
-def linker_option : Joined<["--"], "linker-option=">,
- HelpText<"Add linker option">;
-def fsanitize_coverage_type : Joined<["-"], "fsanitize-coverage-type=">,
- HelpText<"Sanitizer coverage type">;
-def fsanitize_coverage_indirect_calls
- : Flag<["-"], "fsanitize-coverage-indirect-calls">,
- HelpText<"Enable sanitizer coverage for indirect calls">;
-def fsanitize_coverage_trace_bb
- : Flag<["-"], "fsanitize-coverage-trace-bb">,
- HelpText<"Enable basic block tracing in sanitizer coverage">;
-def fsanitize_coverage_trace_cmp
- : Flag<["-"], "fsanitize-coverage-trace-cmp">,
- HelpText<"Enable cmp instruction tracing in sanitizer coverage">;
-def fsanitize_coverage_trace_div
- : Flag<["-"], "fsanitize-coverage-trace-div">,
- HelpText<"Enable div instruction tracing in sanitizer coverage">;
-def fsanitize_coverage_trace_gep
- : Flag<["-"], "fsanitize-coverage-trace-gep">,
- HelpText<"Enable gep instruction tracing in sanitizer coverage">;
-def fsanitize_coverage_8bit_counters
- : Flag<["-"], "fsanitize-coverage-8bit-counters">,
- HelpText<"Enable frequency counters in sanitizer coverage">;
-def fsanitize_coverage_inline_8bit_counters
- : Flag<["-"], "fsanitize-coverage-inline-8bit-counters">,
- HelpText<"Enable inline 8-bit counters in sanitizer coverage">;
-def fsanitize_coverage_pc_table
- : Flag<["-"], "fsanitize-coverage-pc-table">,
- HelpText<"Create a table of coverage-instrumented PCs">;
-def fsanitize_coverage_trace_pc
- : Flag<["-"], "fsanitize-coverage-trace-pc">,
- HelpText<"Enable PC tracing in sanitizer coverage">;
-def fsanitize_coverage_trace_pc_guard
- : Flag<["-"], "fsanitize-coverage-trace-pc-guard">,
- HelpText<"Enable PC tracing with guard in sanitizer coverage">;
-def fsanitize_coverage_no_prune
- : Flag<["-"], "fsanitize-coverage-no-prune">,
- HelpText<"Disable coverage pruning (i.e. instrument all blocks/edges)">;
-def fsanitize_coverage_stack_depth
- : Flag<["-"], "fsanitize-coverage-stack-depth">,
- HelpText<"Enable max stack depth tracing">;
-def fpatchable_function_entry_offset_EQ
- : Joined<["-"], "fpatchable-function-entry-offset=">, MetaVarName<"<M>">,
- HelpText<"Generate M NOPs before function entry">;
-def fprofile_instrument_EQ : Joined<["-"], "fprofile-instrument=">,
- HelpText<"Enable PGO instrumentation. The accepted value is clang, llvm, "
- "or none">, Values<"none,clang,llvm">;
-def fprofile_instrument_path_EQ : Joined<["-"], "fprofile-instrument-path=">,
- HelpText<"Generate instrumented code to collect execution counts into "
- "<file> (overridden by LLVM_PROFILE_FILE env var)">;
-def fprofile_instrument_use_path_EQ :
- Joined<["-"], "fprofile-instrument-use-path=">,
- HelpText<"Specify the profile path in PGO use compilation">;
-def flto_visibility_public_std:
- Flag<["-"], "flto-visibility-public-std">,
- HelpText<"Use public LTO visibility for classes in std and stdext namespaces">;
-def flto_unit: Flag<["-"], "flto-unit">,
- HelpText<"Emit IR to support LTO unit features (CFI, whole program vtable opt)">;
-def fno_lto_unit: Flag<["-"], "fno-lto-unit">;
-def femit_debug_entry_values : Flag<["-"], "femit-debug-entry-values">,
- HelpText<"Enables debug info about call site parameter's entry values">;
-def fdebug_pass_manager : Flag<["-"], "fdebug-pass-manager">,
- HelpText<"Prints debug information for the new pass manager">;
-def fno_debug_pass_manager : Flag<["-"], "fno-debug-pass-manager">,
- HelpText<"Disables debug printing for the new pass manager">;
-// The driver option takes the key as a parameter to the -msign-return-address=
-// and -mbranch-protection= options, but CC1 has a separate option so we
-// don't have to parse the parameter twice.
-def msign_return_address_key_EQ : Joined<["-"], "msign-return-address-key=">,
- Values<"a_key,b_key">;
-def mbranch_target_enforce : Flag<["-"], "mbranch-target-enforce">;
-def fno_dllexport_inlines : Flag<["-"], "fno-dllexport-inlines">;
-def cfguard_no_checks : Flag<["-"], "cfguard-no-checks">,
- HelpText<"Emit Windows Control Flow Guard tables only (no checks)">;
-def cfguard : Flag<["-"], "cfguard">,
- HelpText<"Emit Windows Control Flow Guard tables and checks">;
-
-//===----------------------------------------------------------------------===//
-// Dependency Output Options
-//===----------------------------------------------------------------------===//
-
-def sys_header_deps : Flag<["-"], "sys-header-deps">,
- HelpText<"Include system headers in dependency output">;
-def module_file_deps : Flag<["-"], "module-file-deps">,
- HelpText<"Include module files in dependency output">;
-def header_include_file : Separate<["-"], "header-include-file">,
- HelpText<"Filename (or -) to write header include output to">;
-def show_includes : Flag<["--"], "show-includes">,
- HelpText<"Print cl.exe style /showIncludes to stdout">;
-
-//===----------------------------------------------------------------------===//
-// Diagnostic Options
-//===----------------------------------------------------------------------===//
-
-def diagnostic_log_file : Separate<["-"], "diagnostic-log-file">,
- HelpText<"Filename (or -) to log diagnostics to">;
-def diagnostic_serialized_file : Separate<["-"], "serialize-diagnostic-file">,
- MetaVarName<"<filename>">,
- HelpText<"File for serializing diagnostics in a binary format">;
-
-def fdiagnostics_format : Separate<["-"], "fdiagnostics-format">,
- HelpText<"Change diagnostic formatting to match IDE and command line tools">, Values<"clang,msvc,msvc-fallback,vi">;
-def fdiagnostics_show_category : Separate<["-"], "fdiagnostics-show-category">,
- HelpText<"Print diagnostic category">, Values<"none,id,name">;
-def fno_diagnostics_use_presumed_location : Flag<["-"], "fno-diagnostics-use-presumed-location">,
- HelpText<"Ignore #line directives when displaying diagnostic locations">;
-def ftabstop : Separate<["-"], "ftabstop">, MetaVarName<"<N>">,
- HelpText<"Set the tab stop distance.">;
-def ferror_limit : Separate<["-"], "ferror-limit">, MetaVarName<"<N>">,
- HelpText<"Set the maximum number of errors to emit before stopping (0 = no limit).">;
-def fmacro_backtrace_limit : Separate<["-"], "fmacro-backtrace-limit">, MetaVarName<"<N>">,
- HelpText<"Set the maximum number of entries to print in a macro expansion backtrace (0 = no limit).">;
-def ftemplate_backtrace_limit : Separate<["-"], "ftemplate-backtrace-limit">, MetaVarName<"<N>">,
- HelpText<"Set the maximum number of entries to print in a template instantiation backtrace (0 = no limit).">;
-def fconstexpr_backtrace_limit : Separate<["-"], "fconstexpr-backtrace-limit">, MetaVarName<"<N>">,
- HelpText<"Set the maximum number of entries to print in a constexpr evaluation backtrace (0 = no limit).">;
-def fspell_checking_limit : Separate<["-"], "fspell-checking-limit">, MetaVarName<"<N>">,
- HelpText<"Set the maximum number of times to perform spell checking on unrecognized identifiers (0 = no limit).">;
-def fcaret_diagnostics_max_lines :
- Separate<["-"], "fcaret-diagnostics-max-lines">, MetaVarName<"<N>">,
- HelpText<"Set the maximum number of source lines to show in a caret diagnostic">;
-def fmessage_length : Separate<["-"], "fmessage-length">, MetaVarName<"<N>">,
- HelpText<"Format message diagnostics so that they fit within N columns or fewer, when possible.">;
-def verify_EQ : CommaJoined<["-"], "verify=">,
- MetaVarName<"<prefixes>">,
- HelpText<"Verify diagnostic output using comment directives that start with"
- " prefixes in the comma-separated sequence <prefixes>">;
-def verify : Flag<["-"], "verify">,
- HelpText<"Equivalent to -verify=expected">;
-def verify_ignore_unexpected : Flag<["-"], "verify-ignore-unexpected">,
- HelpText<"Ignore unexpected diagnostic messages">;
-def verify_ignore_unexpected_EQ : CommaJoined<["-"], "verify-ignore-unexpected=">,
- HelpText<"Ignore unexpected diagnostic messages">;
-def Wno_rewrite_macros : Flag<["-"], "Wno-rewrite-macros">,
- HelpText<"Silence ObjC rewriting warnings">;
-
-//===----------------------------------------------------------------------===//
-// Frontend Options
-//===----------------------------------------------------------------------===//
-
-// This isn't normally used, it is just here so we can parse a
-// CompilerInvocation out of a driver-derived argument vector.
-def cc1 : Flag<["-"], "cc1">;
-def cc1as : Flag<["-"], "cc1as">;
-
-def ast_merge : Separate<["-"], "ast-merge">,
- MetaVarName<"<ast file>">,
- HelpText<"Merge the given AST file into the translation unit being compiled.">;
-def aux_triple : Separate<["-"], "aux-triple">,
- HelpText<"Auxiliary target triple.">;
-def code_completion_at : Separate<["-"], "code-completion-at">,
- MetaVarName<"<file>:<line>:<column>">,
- HelpText<"Dump code-completion information at a location">;
-def remap_file : Separate<["-"], "remap-file">,
- MetaVarName<"<from>;<to>">,
- HelpText<"Replace the contents of the <from> file with the contents of the <to> file">;
-def code_completion_at_EQ : Joined<["-"], "code-completion-at=">,
- Alias<code_completion_at>;
-def code_completion_macros : Flag<["-"], "code-completion-macros">,
- HelpText<"Include macros in code-completion results">;
-def code_completion_patterns : Flag<["-"], "code-completion-patterns">,
- HelpText<"Include code patterns in code-completion results">;
-def no_code_completion_globals : Flag<["-"], "no-code-completion-globals">,
- HelpText<"Do not include global declarations in code-completion results.">;
-def no_code_completion_ns_level_decls : Flag<["-"], "no-code-completion-ns-level-decls">,
- HelpText<"Do not include declarations inside namespaces (incl. global namespace) in the code-completion results.">;
-def code_completion_brief_comments : Flag<["-"], "code-completion-brief-comments">,
- HelpText<"Include brief documentation comments in code-completion results.">;
-def code_completion_with_fixits : Flag<["-"], "code-completion-with-fixits">,
- HelpText<"Include code completion results which require small fix-its.">;
-def disable_free : Flag<["-"], "disable-free">,
- HelpText<"Disable freeing of memory on exit">;
-def discard_value_names : Flag<["-"], "discard-value-names">,
- HelpText<"Discard value names in LLVM IR">;
-def load : Separate<["-"], "load">, MetaVarName<"<dsopath>">,
- HelpText<"Load the named plugin (dynamic shared object)">;
-def plugin : Separate<["-"], "plugin">, MetaVarName<"<name>">,
- HelpText<"Use the named plugin action instead of the default action (use \"help\" to list available options)">;
-def plugin_arg : JoinedAndSeparate<["-"], "plugin-arg-">,
- MetaVarName<"<name> <arg>">,
- HelpText<"Pass <arg> to plugin <name>">;
-def add_plugin : Separate<["-"], "add-plugin">, MetaVarName<"<name>">,
- HelpText<"Use the named plugin action in addition to the default action">;
-def ast_dump_filter : Separate<["-"], "ast-dump-filter">,
- MetaVarName<"<dump_filter>">,
- HelpText<"Use with -ast-dump or -ast-print to dump/print only AST declaration"
- " nodes having a certain substring in a qualified name. Use"
- " -ast-list to list all filterable declaration node names.">;
-def fno_modules_global_index : Flag<["-"], "fno-modules-global-index">,
- HelpText<"Do not automatically generate or update the global module index">;
-def fno_modules_error_recovery : Flag<["-"], "fno-modules-error-recovery">,
- HelpText<"Do not automatically import modules for error recovery">;
-def fmodule_map_file_home_is_cwd : Flag<["-"], "fmodule-map-file-home-is-cwd">,
- HelpText<"Use the current working directory as the home directory of "
- "module maps specified by -fmodule-map-file=<FILE>">;
-def fmodule_feature : Separate<["-"], "fmodule-feature">,
- MetaVarName<"<feature>">,
- HelpText<"Enable <feature> in module map requires declarations">;
-def fmodules_embed_file_EQ : Joined<["-"], "fmodules-embed-file=">,
- MetaVarName<"<file>">,
- HelpText<"Embed the contents of the specified file into the module file "
- "being compiled.">;
-def fmodules_embed_all_files : Joined<["-"], "fmodules-embed-all-files">,
- HelpText<"Embed the contents of all files read by this compilation into "
- "the produced module file.">;
-def fmodules_local_submodule_visibility :
- Flag<["-"], "fmodules-local-submodule-visibility">,
- HelpText<"Enforce name visibility rules across submodules of the same "
- "top-level module.">;
-def fmodules_codegen :
- Flag<["-"], "fmodules-codegen">,
- HelpText<"Generate code for uses of this module that assumes an explicit "
- "object file will be built for the module">;
-def fmodules_debuginfo :
- Flag<["-"], "fmodules-debuginfo">,
- HelpText<"Generate debug info for types in an object file built from this "
- "module and do not generate them elsewhere">;
-def fmodule_format_EQ : Joined<["-"], "fmodule-format=">,
- HelpText<"Select the container format for clang modules and PCH. "
- "Supported options are 'raw' and 'obj'.">;
-def ftest_module_file_extension_EQ :
- Joined<["-"], "ftest-module-file-extension=">,
- HelpText<"introduce a module file extension for testing purposes. "
- "The argument is parsed as blockname:major:minor:hashed:user info">;
-def fconcepts_ts : Flag<["-"], "fconcepts-ts">,
- HelpText<"Enable C++ Extensions for Concepts. (deprecated - use -std=c++2a)">;
-def fno_concept_satisfaction_caching : Flag<["-"],
- "fno-concept-satisfaction-caching">,
- HelpText<"Disable satisfaction caching for C++2a Concepts.">;
-
-let Group = Action_Group in {
-
-def Eonly : Flag<["-"], "Eonly">,
- HelpText<"Just run preprocessor, no output (for timings)">;
-def dump_raw_tokens : Flag<["-"], "dump-raw-tokens">,
- HelpText<"Lex file in raw mode and dump raw tokens">;
-def analyze : Flag<["-"], "analyze">,
- HelpText<"Run static analysis engine">;
-def dump_tokens : Flag<["-"], "dump-tokens">,
- HelpText<"Run preprocessor, dump internal rep of tokens">;
-def init_only : Flag<["-"], "init-only">,
- HelpText<"Only execute frontend initialization">;
-def fixit : Flag<["-"], "fixit">,
- HelpText<"Apply fix-it advice to the input source">;
-def fixit_EQ : Joined<["-"], "fixit=">,
- HelpText<"Apply fix-it advice creating a file with the given suffix">;
-def print_preamble : Flag<["-"], "print-preamble">,
- HelpText<"Print the \"preamble\" of a file, which is a candidate for implicit"
- " precompiled headers.">;
-def emit_html : Flag<["-"], "emit-html">,
- HelpText<"Output input source as HTML">;
-def ast_print : Flag<["-"], "ast-print">,
- HelpText<"Build ASTs and then pretty-print them">;
-def ast_list : Flag<["-"], "ast-list">,
- HelpText<"Build ASTs and print the list of declaration node qualified names">;
-def ast_dump : Flag<["-"], "ast-dump">,
- HelpText<"Build ASTs and then debug dump them">;
-def ast_dump_EQ : Joined<["-"], "ast-dump=">,
- HelpText<"Build ASTs and then debug dump them in the specified format. "
- "Supported formats include: default, json">;
-def ast_dump_all : Flag<["-"], "ast-dump-all">,
- HelpText<"Build ASTs and then debug dump them, forcing deserialization">;
-def ast_dump_all_EQ : Joined<["-"], "ast-dump-all=">,
- HelpText<"Build ASTs and then debug dump them in the specified format, "
- "forcing deserialization. Supported formats include: default, json">;
-def templight_dump : Flag<["-"], "templight-dump">,
- HelpText<"Dump templight information to stdout">;
-def ast_dump_lookups : Flag<["-"], "ast-dump-lookups">,
- HelpText<"Build ASTs and then debug dump their name lookup tables">;
-def ast_view : Flag<["-"], "ast-view">,
- HelpText<"Build ASTs and view them with GraphViz">;
-def emit_module : Flag<["-"], "emit-module">,
- HelpText<"Generate pre-compiled module file from a module map">;
-def emit_module_interface : Flag<["-"], "emit-module-interface">,
- HelpText<"Generate pre-compiled module file from a C++ module interface">;
-def emit_header_module : Flag<["-"], "emit-header-module">,
- HelpText<"Generate pre-compiled module file from a set of header files">;
-def emit_pch : Flag<["-"], "emit-pch">,
- HelpText<"Generate pre-compiled header file">;
-def emit_llvm_bc : Flag<["-"], "emit-llvm-bc">,
- HelpText<"Build ASTs then convert to LLVM, emit .bc file">;
-def emit_llvm_only : Flag<["-"], "emit-llvm-only">,
- HelpText<"Build ASTs and convert to LLVM, discarding output">;
-def emit_codegen_only : Flag<["-"], "emit-codegen-only">,
- HelpText<"Generate machine code, but discard output">;
-def emit_obj : Flag<["-"], "emit-obj">,
- HelpText<"Emit native object files">;
-def rewrite_test : Flag<["-"], "rewrite-test">,
- HelpText<"Rewriter playground">;
-def rewrite_macros : Flag<["-"], "rewrite-macros">,
- HelpText<"Expand macros without full preprocessing">;
-def migrate : Flag<["-"], "migrate">,
- HelpText<"Migrate source code">;
-def compiler_options_dump : Flag<["-"], "compiler-options-dump">,
- HelpText<"Dump the compiler configuration options">;
-def print_dependency_directives_minimized_source : Flag<["-"],
- "print-dependency-directives-minimized-source">,
- HelpText<"Print the output of the dependency directives source minimizer">;
-}
-
-def emit_llvm_uselists : Flag<["-"], "emit-llvm-uselists">,
- HelpText<"Preserve order of LLVM use-lists when serializing">;
-def no_emit_llvm_uselists : Flag<["-"], "no-emit-llvm-uselists">,
- HelpText<"Don't preserve order of LLVM use-lists when serializing">;
-
-def mt_migrate_directory : Separate<["-"], "mt-migrate-directory">,
- HelpText<"Directory for temporary files produced during ARC or ObjC migration">;
-def arcmt_check : Flag<["-"], "arcmt-check">,
- HelpText<"Check for ARC migration issues that need manual handling">;
-def arcmt_modify : Flag<["-"], "arcmt-modify">,
- HelpText<"Apply modifications to files to conform to ARC">;
-def arcmt_migrate : Flag<["-"], "arcmt-migrate">,
- HelpText<"Apply modifications and produces temporary files that conform to ARC">;
-
-def opt_record_file : Separate<["-"], "opt-record-file">,
- HelpText<"File name to use for YAML optimization record output">;
-def opt_record_passes : Separate<["-"], "opt-record-passes">,
- HelpText<"Only record remark information for passes whose names match the given regular expression">;
-def opt_record_format : Separate<["-"], "opt-record-format">,
- HelpText<"The format used for serializing remarks (default: YAML)">;
-
-def print_stats : Flag<["-"], "print-stats">,
- HelpText<"Print performance metrics and statistics">;
-def stats_file : Joined<["-"], "stats-file=">,
- HelpText<"Filename to write statistics to">;
-def fdump_record_layouts : Flag<["-"], "fdump-record-layouts">,
- HelpText<"Dump record layout information">;
-def fdump_record_layouts_simple : Flag<["-"], "fdump-record-layouts-simple">,
- HelpText<"Dump record layout information in a simple form used for testing">;
-def fix_what_you_can : Flag<["-"], "fix-what-you-can">,
- HelpText<"Apply fix-it advice even in the presence of unfixable errors">;
-def fix_only_warnings : Flag<["-"], "fix-only-warnings">,
- HelpText<"Apply fix-it advice only for warnings, not errors">;
-def fixit_recompile : Flag<["-"], "fixit-recompile">,
- HelpText<"Apply fix-it changes and recompile">;
-def fixit_to_temp : Flag<["-"], "fixit-to-temporary">,
- HelpText<"Apply fix-it changes to temporary files">;
-
-def foverride_record_layout_EQ : Joined<["-"], "foverride-record-layout=">,
- HelpText<"Override record layouts with those in the given file">;
-def pch_through_header_EQ : Joined<["-"], "pch-through-header=">,
- HelpText<"Stop PCH generation after including this file. When using a PCH, "
- "skip tokens until after this file is included.">;
-def pch_through_hdrstop_create : Flag<["-"], "pch-through-hdrstop-create">,
- HelpText<"When creating a PCH, stop PCH generation after #pragma hdrstop.">;
-def pch_through_hdrstop_use : Flag<["-"], "pch-through-hdrstop-use">,
- HelpText<"When using a PCH, skip tokens until after a #pragma hdrstop.">;
-def fno_pch_timestamp : Flag<["-"], "fno-pch-timestamp">,
- HelpText<"Disable inclusion of timestamp in precompiled headers">;
-def building_pch_with_obj : Flag<["-"], "building-pch-with-obj">,
- HelpText<"This compilation is part of building a PCH with corresponding object file.">;
-
-def aligned_alloc_unavailable : Flag<["-"], "faligned-alloc-unavailable">,
- HelpText<"Aligned allocation/deallocation functions are unavailable">;
-
-//===----------------------------------------------------------------------===//
-// Language Options
-//===----------------------------------------------------------------------===//
-
-let Flags = [CC1Option, CC1AsOption, NoDriverOption] in {
-
-def version : Flag<["-"], "version">,
- HelpText<"Print the compiler version">;
-def main_file_name : Separate<["-"], "main-file-name">,
- HelpText<"Main file name to use for debug info and source if missing">;
-def split_dwarf_output : Separate<["-"], "split-dwarf-output">,
- HelpText<"File name to use for split dwarf debug info output">;
-
-}
-
-def fblocks_runtime_optional : Flag<["-"], "fblocks-runtime-optional">,
- HelpText<"Weakly link in the blocks runtime">;
-def fexternc_nounwind : Flag<["-"], "fexternc-nounwind">,
- HelpText<"Assume all functions with C linkage do not unwind">;
-def split_dwarf_file : Separate<["-"], "split-dwarf-file">,
- HelpText<"Name of the split dwarf debug info file to encode in the object file">;
-def fno_wchar : Flag<["-"], "fno-wchar">,
- HelpText<"Disable C++ builtin type wchar_t">;
-def fconstant_string_class : Separate<["-"], "fconstant-string-class">,
- MetaVarName<"<class name>">,
- HelpText<"Specify the class to use for constant Objective-C string objects.">;
-def fobjc_arc_cxxlib_EQ : Joined<["-"], "fobjc-arc-cxxlib=">,
- HelpText<"Objective-C++ Automatic Reference Counting standard library kind">, Values<"libc++,libstdc++,none">;
-def fobjc_runtime_has_weak : Flag<["-"], "fobjc-runtime-has-weak">,
- HelpText<"The target Objective-C runtime supports ARC weak operations">;
-def fobjc_dispatch_method_EQ : Joined<["-"], "fobjc-dispatch-method=">,
- HelpText<"Objective-C dispatch method to use">, Values<"legacy,non-legacy,mixed">;
-def disable_objc_default_synthesize_properties : Flag<["-"], "disable-objc-default-synthesize-properties">,
- HelpText<"disable the default synthesis of Objective-C properties">;
-def fencode_extended_block_signature : Flag<["-"], "fencode-extended-block-signature">,
- HelpText<"enable extended encoding of block type signature">;
-def function_alignment : Separate<["-"], "function-alignment">,
- HelpText<"default alignment for functions">;
-def pic_level : Separate<["-"], "pic-level">,
- HelpText<"Value for __PIC__">;
-def pic_is_pie : Flag<["-"], "pic-is-pie">,
- HelpText<"File is for a position independent executable">;
-def fno_validate_pch : Flag<["-"], "fno-validate-pch">,
- HelpText<"Disable validation of precompiled headers">;
-def fallow_pch_with_errors : Flag<["-"], "fallow-pch-with-compiler-errors">,
- HelpText<"Accept a PCH file that was created with compiler errors">;
-def dump_deserialized_pch_decls : Flag<["-"], "dump-deserialized-decls">,
- HelpText<"Dump declarations that are deserialized from PCH, for testing">;
-def error_on_deserialized_pch_decl : Separate<["-"], "error-on-deserialized-decl">,
- HelpText<"Emit error if a specific declaration is deserialized from PCH, for testing">;
-def error_on_deserialized_pch_decl_EQ : Joined<["-"], "error-on-deserialized-decl=">,
- Alias<error_on_deserialized_pch_decl>;
-def static_define : Flag<["-"], "static-define">,
- HelpText<"Should __STATIC__ be defined">;
-def stack_protector : Separate<["-"], "stack-protector">,
- HelpText<"Enable stack protectors">;
-def stack_protector_buffer_size : Separate<["-"], "stack-protector-buffer-size">,
- HelpText<"Lower bound for a buffer to be considered for stack protection">;
-def fvisibility : Separate<["-"], "fvisibility">,
- HelpText<"Default type and symbol visibility">;
-def ftype_visibility : Separate<["-"], "ftype-visibility">,
- HelpText<"Default type visibility">;
-def fapply_global_visibility_to_externs : Flag<["-"], "fapply-global-visibility-to-externs">,
- HelpText<"Apply global symbol visibility to external declarations without an explicit visibility">;
-def ftemplate_depth : Separate<["-"], "ftemplate-depth">,
- HelpText<"Maximum depth of recursive template instantiation">;
-def foperator_arrow_depth : Separate<["-"], "foperator-arrow-depth">,
- HelpText<"Maximum number of 'operator->'s to call for a member access">;
-def fconstexpr_depth : Separate<["-"], "fconstexpr-depth">,
- HelpText<"Maximum depth of recursive constexpr function calls">;
-def fconstexpr_steps : Separate<["-"], "fconstexpr-steps">,
- HelpText<"Maximum number of steps in constexpr function evaluation">;
-def fbracket_depth : Separate<["-"], "fbracket-depth">,
- HelpText<"Maximum nesting level for parentheses, brackets, and braces">;
-def fconst_strings : Flag<["-"], "fconst-strings">,
- HelpText<"Use a const qualified type for string literals in C and ObjC">;
-def fno_const_strings : Flag<["-"], "fno-const-strings">,
- HelpText<"Don't use a const qualified type for string literals in C and ObjC">;
-def fno_bitfield_type_align : Flag<["-"], "fno-bitfield-type-align">,
- HelpText<"Ignore bit-field types when aligning structures">;
-def ffake_address_space_map : Flag<["-"], "ffake-address-space-map">,
- HelpText<"Use a fake address space map; OpenCL testing purposes only">;
-def faddress_space_map_mangling_EQ : Joined<["-"], "faddress-space-map-mangling=">, MetaVarName<"<yes|no|target>">,
- HelpText<"Set the mode for address space map based mangling; OpenCL testing purposes only">;
-def funknown_anytype : Flag<["-"], "funknown-anytype">,
- HelpText<"Enable parser support for the __unknown_anytype type; for testing purposes only">;
-def fdebugger_support : Flag<["-"], "fdebugger-support">,
- HelpText<"Enable special debugger support behavior">;
-def fdebugger_cast_result_to_id : Flag<["-"], "fdebugger-cast-result-to-id">,
- HelpText<"Enable casting unknown expression results to id">;
-def fdebugger_objc_literal : Flag<["-"], "fdebugger-objc-literal">,
- HelpText<"Enable special debugger support for Objective-C subscripting and literals">;
-def fdeprecated_macro : Flag<["-"], "fdeprecated-macro">,
- HelpText<"Defines the __DEPRECATED macro">;
-def fno_deprecated_macro : Flag<["-"], "fno-deprecated-macro">,
- HelpText<"Undefines the __DEPRECATED macro">;
-def fobjc_subscripting_legacy_runtime : Flag<["-"], "fobjc-subscripting-legacy-runtime">,
- HelpText<"Allow Objective-C array and dictionary subscripting in legacy runtime">;
-def vtordisp_mode_EQ : Joined<["-"], "vtordisp-mode=">,
- HelpText<"Control vtordisp placement on win32 targets">;
-def fnative_half_type: Flag<["-"], "fnative-half-type">,
- HelpText<"Use the native half type for __fp16 instead of promoting to float">;
-def fnative_half_arguments_and_returns : Flag<["-"], "fnative-half-arguments-and-returns">,
- HelpText<"Use the native __fp16 type for arguments and returns (and skip ABI-specific lowering)">;
-def fallow_half_arguments_and_returns : Flag<["-"], "fallow-half-arguments-and-returns">,
- HelpText<"Allow function arguments and returns of type half">;
-def fdefault_calling_conv_EQ : Joined<["-"], "fdefault-calling-conv=">,
- HelpText<"Set default calling convention">, Values<"cdecl,fastcall,stdcall,vectorcall,regcall">;
-def finclude_default_header : Flag<["-"], "finclude-default-header">,
- HelpText<"Include default header file for OpenCL">;
-def fdeclare_opencl_builtins : Flag<["-"], "fdeclare-opencl-builtins">,
- HelpText<"Add OpenCL builtin function declarations (experimental)">;
-def fpreserve_vec3_type : Flag<["-"], "fpreserve-vec3-type">,
- HelpText<"Preserve 3-component vector type">;
-def fwchar_type_EQ : Joined<["-"], "fwchar-type=">,
- HelpText<"Select underlying type for wchar_t">, Values<"char,short,int">;
-def fsigned_wchar : Flag<["-"], "fsigned-wchar">,
- HelpText<"Use a signed type for wchar_t">;
-def fno_signed_wchar : Flag<["-"], "fno-signed-wchar">,
- HelpText<"Use an unsigned type for wchar_t">;
-
-// FIXME: Remove these entirely once functionality/tests have been excised.
-def fobjc_gc_only : Flag<["-"], "fobjc-gc-only">, Group<f_Group>,
- HelpText<"Use GC exclusively for Objective-C related memory management">;
-def fobjc_gc : Flag<["-"], "fobjc-gc">, Group<f_Group>,
- HelpText<"Enable Objective-C garbage collection">;
-
-//===----------------------------------------------------------------------===//
-// Header Search Options
-//===----------------------------------------------------------------------===//
-
-def nostdsysteminc : Flag<["-"], "nostdsysteminc">,
- HelpText<"Disable standard system #include directories">;
-def fdisable_module_hash : Flag<["-"], "fdisable-module-hash">,
- HelpText<"Disable the module hash">;
-def fmodules_hash_content : Flag<["-"], "fmodules-hash-content">,
- HelpText<"Enable hashing the content of a module file">;
-def fmodules_strict_context_hash : Flag<["-"], "fmodules-strict-context-hash">,
- HelpText<"Enable hashing of all compiler options that could impact the "
- "semantics of a module in an implicit build">;
-def c_isystem : JoinedOrSeparate<["-"], "c-isystem">, MetaVarName<"<directory>">,
- HelpText<"Add directory to the C SYSTEM include search path">;
-def objc_isystem : JoinedOrSeparate<["-"], "objc-isystem">,
- MetaVarName<"<directory>">,
- HelpText<"Add directory to the ObjC SYSTEM include search path">;
-def objcxx_isystem : JoinedOrSeparate<["-"], "objcxx-isystem">,
- MetaVarName<"<directory>">,
- HelpText<"Add directory to the ObjC++ SYSTEM include search path">;
-def internal_isystem : JoinedOrSeparate<["-"], "internal-isystem">,
- MetaVarName<"<directory>">,
- HelpText<"Add directory to the internal system include search path; these "
- "are assumed to not be user-provided and are used to model system "
- "and standard headers' paths.">;
-def internal_externc_isystem : JoinedOrSeparate<["-"], "internal-externc-isystem">,
- MetaVarName<"<directory>">,
- HelpText<"Add directory to the internal system include search path with "
- "implicit extern \"C\" semantics; these are assumed to not be "
- "user-provided and are used to model system and standard headers' "
- "paths.">;
-
-//===----------------------------------------------------------------------===//
-// Preprocessor Options
-//===----------------------------------------------------------------------===//
-
-def chain_include : Separate<["-"], "chain-include">, MetaVarName<"<file>">,
- HelpText<"Include and chain a header file after turning it into PCH">;
-def preamble_bytes_EQ : Joined<["-"], "preamble-bytes=">,
- HelpText<"Assume that the precompiled header is a precompiled preamble "
- "covering the first N bytes of the main file">;
-def detailed_preprocessing_record : Flag<["-"], "detailed-preprocessing-record">,
- HelpText<"include a detailed record of preprocessing actions">;
-def setup_static_analyzer : Flag<["-"], "setup-static-analyzer">,
- HelpText<"Set up preprocessor for static analyzer (done automatically when static analyzer is run).">;
-def disable_pragma_debug_crash : Flag<["-"], "disable-pragma-debug-crash">,
- HelpText<"Disable any #pragma clang __debug that can lead to crashing behavior. This is meant for testing.">;
-
-//===----------------------------------------------------------------------===//
-// OpenCL Options
-//===----------------------------------------------------------------------===//
-
-def cl_ext_EQ : CommaJoined<["-"], "cl-ext=">,
- HelpText<"OpenCL only. Enable or disable OpenCL extensions. The argument is a comma-separated sequence of one or more extension names, each prefixed by '+' or '-'.">;
-
-//===----------------------------------------------------------------------===//
-// CUDA Options
-//===----------------------------------------------------------------------===//
-
-def fcuda_is_device : Flag<["-"], "fcuda-is-device">,
- HelpText<"Generate code for CUDA device">;
-def fcuda_include_gpubinary : Separate<["-"], "fcuda-include-gpubinary">,
- HelpText<"Incorporate CUDA device-side binary into host object file.">;
-def fcuda_allow_variadic_functions : Flag<["-"], "fcuda-allow-variadic-functions">,
- HelpText<"Allow variadic functions in CUDA device code.">;
-def fno_cuda_host_device_constexpr : Flag<["-"], "fno-cuda-host-device-constexpr">,
- HelpText<"Don't treat unattributed constexpr functions as __host__ __device__.">;
-
-//===----------------------------------------------------------------------===//
-// OpenMP Options
-//===----------------------------------------------------------------------===//
-
-def fopenmp_is_device : Flag<["-"], "fopenmp-is-device">,
- HelpText<"Generate code only for an OpenMP target device.">;
-def fopenmp_host_ir_file_path : Separate<["-"], "fopenmp-host-ir-file-path">,
- HelpText<"Path to the IR file produced by the frontend for the host.">;
-
-//===----------------------------------------------------------------------===//
-// SYCL Options
-//===----------------------------------------------------------------------===//
-
-def fsycl_is_device : Flag<["-"], "fsycl-is-device">,
- HelpText<"Generate code for SYCL device.">;
-
-} // let Flags = [CC1Option]
-
-//===----------------------------------------------------------------------===//
-// cc1as-only Options
-//===----------------------------------------------------------------------===//
-
-let Flags = [CC1AsOption, NoDriverOption] in {
-
-// Language Options
-def n : Flag<["-"], "n">,
- HelpText<"Don't automatically start assembly file with a text section">;
-
-// Frontend Options
-def filetype : Separate<["-"], "filetype">,
- HelpText<"Specify the output file type ('asm', 'null', or 'obj')">;
-
-// Transliterate Options
-def output_asm_variant : Separate<["-"], "output-asm-variant">,
- HelpText<"Select the asm variant index to use for output">;
-def show_encoding : Flag<["-"], "show-encoding">,
- HelpText<"Show instruction encoding information in transliterate mode">;
-def show_inst : Flag<["-"], "show-inst">,
- HelpText<"Show internal instruction representation in transliterate mode">;
-
-// Assemble Options
-def dwarf_debug_producer : Separate<["-"], "dwarf-debug-producer">,
- HelpText<"The string to embed in the Dwarf debug AT_producer record.">;
-
-def defsym : Separate<["-"], "defsym">,
- HelpText<"Define a value for a symbol">;
-} // let Flags = [CC1AsOption]
diff --git a/contrib/llvm-project/clang/include/clang/Driver/CLCompatOptions.td b/contrib/llvm-project/clang/include/clang/Driver/CLCompatOptions.td
deleted file mode 100644
index 50d4622009c9..000000000000
--- a/contrib/llvm-project/clang/include/clang/Driver/CLCompatOptions.td
+++ /dev/null
@@ -1,466 +0,0 @@
-//===--- CLCompatOptions.td - Options for clang-cl ------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the options accepted by clang-cl.
-//
-//===----------------------------------------------------------------------===//
-
-def cl_Group : OptionGroup<"<clang-cl options>">, Flags<[CLOption]>,
- HelpText<"CL.EXE COMPATIBILITY OPTIONS">;
-
-def cl_compile_Group : OptionGroup<"<clang-cl compile-only options>">,
- Group<cl_Group>;
-
-def cl_ignored_Group : OptionGroup<"<clang-cl ignored options>">,
- Group<cl_Group>;
-
-class CLFlag<string name> : Option<["/", "-"], name, KIND_FLAG>,
- Group<cl_Group>, Flags<[CLOption, DriverOption]>;
-
-class CLCompileFlag<string name> : Option<["/", "-"], name, KIND_FLAG>,
- Group<cl_compile_Group>, Flags<[CLOption, DriverOption]>;
-
-class CLIgnoredFlag<string name> : Option<["/", "-"], name, KIND_FLAG>,
- Group<cl_ignored_Group>, Flags<[CLOption, DriverOption]>;
-
-class CLJoined<string name> : Option<["/", "-"], name, KIND_JOINED>,
- Group<cl_Group>, Flags<[CLOption, DriverOption]>;
-
-class CLCompileJoined<string name> : Option<["/", "-"], name, KIND_JOINED>,
- Group<cl_compile_Group>, Flags<[CLOption, DriverOption]>;
-
-class CLIgnoredJoined<string name> : Option<["/", "-"], name, KIND_JOINED>,
- Group<cl_ignored_Group>, Flags<[CLOption, DriverOption, HelpHidden]>;
-
-class CLJoinedOrSeparate<string name> : Option<["/", "-"], name,
- KIND_JOINED_OR_SEPARATE>, Group<cl_Group>, Flags<[CLOption, DriverOption]>;
-
-class CLCompileJoinedOrSeparate<string name> : Option<["/", "-"], name,
- KIND_JOINED_OR_SEPARATE>, Group<cl_compile_Group>,
- Flags<[CLOption, DriverOption]>;
-
-class CLRemainingArgsJoined<string name> : Option<["/", "-"], name,
- KIND_REMAINING_ARGS_JOINED>, Group<cl_Group>, Flags<[CLOption, DriverOption]>;
-
-// Aliases:
-// (We don't put any of these in cl_compile_Group as the options they alias are
-// already in the right group.)
-
-def _SLASH_Brepro : CLFlag<"Brepro">,
- HelpText<"Do not write current time into COFF output (breaks link.exe /incremental)">,
- Alias<mno_incremental_linker_compatible>;
-def _SLASH_Brepro_ : CLFlag<"Brepro-">,
- HelpText<"Write current time into COFF output (default)">,
- Alias<mincremental_linker_compatible>;
-def _SLASH_C : CLFlag<"C">,
- HelpText<"Do not discard comments when preprocessing">, Alias<C>;
-def _SLASH_c : CLFlag<"c">, HelpText<"Compile only">, Alias<c>;
-def _SLASH_d1PP : CLFlag<"d1PP">,
- HelpText<"Retain macro definitions in /E mode">, Alias<dD>;
-def _SLASH_d1reportAllClassLayout : CLFlag<"d1reportAllClassLayout">,
- HelpText<"Dump record layout information">,
- Alias<Xclang>, AliasArgs<["-fdump-record-layouts"]>;
-def _SLASH_diagnostics_caret : CLFlag<"diagnostics:caret">,
- HelpText<"Enable caret and column diagnostics (default)">;
-def _SLASH_diagnostics_column : CLFlag<"diagnostics:column">,
- HelpText<"Disable caret diagnostics but keep column info">;
-def _SLASH_diagnostics_classic : CLFlag<"diagnostics:classic">,
- HelpText<"Disable column and caret diagnostics">;
-def _SLASH_D : CLJoinedOrSeparate<"D">, HelpText<"Define macro">,
- MetaVarName<"<macro[=value]>">, Alias<D>;
-def _SLASH_E : CLFlag<"E">, HelpText<"Preprocess to stdout">, Alias<E>;
-def _SLASH_fp_except : CLFlag<"fp:except">, HelpText<"">, Alias<ftrapping_math>;
-def _SLASH_fp_except_ : CLFlag<"fp:except-">,
- HelpText<"">, Alias<fno_trapping_math>;
-def _SLASH_fp_fast : CLFlag<"fp:fast">, HelpText<"">, Alias<ffast_math>;
-def _SLASH_fp_precise : CLFlag<"fp:precise">,
- HelpText<"">, Alias<fno_fast_math>;
-def _SLASH_fp_strict : CLFlag<"fp:strict">, HelpText<"">, Alias<fno_fast_math>;
-def _SLASH_GA : CLFlag<"GA">, Alias<ftlsmodel_EQ>, AliasArgs<["local-exec"]>,
- HelpText<"Assume thread-local variables are defined in the executable">;
-def _SLASH_GR : CLFlag<"GR">, HelpText<"Emit RTTI data (default)">;
-def _SLASH_GR_ : CLFlag<"GR-">, HelpText<"Do not emit RTTI data">;
-def _SLASH_GF : CLIgnoredFlag<"GF">,
- HelpText<"Enable string pooling (default)">;
-def _SLASH_GF_ : CLFlag<"GF-">, HelpText<"Disable string pooling">,
- Alias<fwritable_strings>;
-def _SLASH_GS : CLFlag<"GS">,
- HelpText<"Enable buffer security check (default)">;
-def _SLASH_GS_ : CLFlag<"GS-">, HelpText<"Disable buffer security check">;
-def : CLFlag<"Gs">, HelpText<"Use stack probes (default)">,
- Alias<mstack_probe_size>, AliasArgs<["4096"]>;
-def _SLASH_Gs : CLJoined<"Gs">,
- HelpText<"Set stack probe size (default 4096)">, Alias<mstack_probe_size>;
-def _SLASH_Gy : CLFlag<"Gy">, HelpText<"Put each function in its own section">,
- Alias<ffunction_sections>;
-def _SLASH_Gy_ : CLFlag<"Gy-">,
- HelpText<"Do not put each function in its own section (default)">,
- Alias<fno_function_sections>;
-def _SLASH_Gw : CLFlag<"Gw">, HelpText<"Put each data item in its own section">,
- Alias<fdata_sections>;
-def _SLASH_Gw_ : CLFlag<"Gw-">,
- HelpText<"Do not put each data item in its own section (default)">,
- Alias<fno_data_sections>;
-def _SLASH_help : CLFlag<"help">, Alias<help>,
- HelpText<"Display available options">;
-def _SLASH_HELP : CLFlag<"HELP">, Alias<help>;
-def _SLASH_I : CLJoinedOrSeparate<"I">,
- HelpText<"Add directory to include search path">, MetaVarName<"<dir>">,
- Alias<I>;
-def _SLASH_J : CLFlag<"J">, HelpText<"Make char type unsigned">,
- Alias<funsigned_char>;
-
-// The _SLASH_O option handles all the /O flags, but we also provide separate
-// aliased options to provide separate help messages.
-def _SLASH_O : CLJoined<"O">,
- HelpText<"Set multiple /O flags at once; e.g. '/O2y-' for '/O2 /Oy-'">,
- MetaVarName<"<flags>">;
-def : CLFlag<"O1">, Alias<_SLASH_O>, AliasArgs<["1"]>,
- HelpText<"Optimize for size (like /Og /Os /Oy /Ob2 /GF /Gy)">;
-def : CLFlag<"O2">, Alias<_SLASH_O>, AliasArgs<["2"]>,
- HelpText<"Optimize for speed (like /Og /Oi /Ot /Oy /Ob2 /GF /Gy)">;
-def : CLFlag<"Ob0">, Alias<_SLASH_O>, AliasArgs<["b0"]>,
- HelpText<"Disable function inlining">;
-def : CLFlag<"Ob1">, Alias<_SLASH_O>, AliasArgs<["b1"]>,
- HelpText<"Only inline functions explicitly or implicitly marked inline">;
-def : CLFlag<"Ob2">, Alias<_SLASH_O>, AliasArgs<["b2"]>,
- HelpText<"Inline functions as deemed beneficial by the compiler">;
-def : CLFlag<"Od">, Alias<_SLASH_O>, AliasArgs<["d"]>,
- HelpText<"Disable optimization">;
-def : CLFlag<"Og">, Alias<_SLASH_O>, AliasArgs<["g"]>,
- HelpText<"No effect">;
-def : CLFlag<"Oi">, Alias<_SLASH_O>, AliasArgs<["i"]>,
- HelpText<"Enable use of builtin functions">;
-def : CLFlag<"Oi-">, Alias<_SLASH_O>, AliasArgs<["i-"]>,
- HelpText<"Disable use of builtin functions">;
-def : CLFlag<"Os">, Alias<_SLASH_O>, AliasArgs<["s"]>,
- HelpText<"Optimize for size">;
-def : CLFlag<"Ot">, Alias<_SLASH_O>, AliasArgs<["t"]>,
- HelpText<"Optimize for speed">;
-def : CLFlag<"Ox">, Alias<_SLASH_O>, AliasArgs<["x"]>,
- HelpText<"Deprecated (like /Og /Oi /Ot /Oy /Ob2); use /O2">;
-def : CLFlag<"Oy">, Alias<_SLASH_O>, AliasArgs<["y"]>,
- HelpText<"Enable frame pointer omission (x86 only)">;
-def : CLFlag<"Oy-">, Alias<_SLASH_O>, AliasArgs<["y-"]>,
- HelpText<"Disable frame pointer omission (x86 only, default)">;
-
-def _SLASH_QUESTION : CLFlag<"?">, Alias<help>,
- HelpText<"Display available options">;
-def _SLASH_Qvec : CLFlag<"Qvec">,
- HelpText<"Enable the loop vectorization passes">, Alias<fvectorize>;
-def _SLASH_Qvec_ : CLFlag<"Qvec-">,
- HelpText<"Disable the loop vectorization passes">, Alias<fno_vectorize>;
-def _SLASH_showIncludes : CLFlag<"showIncludes">,
- HelpText<"Print info about included files to stderr">,
- Alias<show_includes>;
-def _SLASH_showFilenames : CLFlag<"showFilenames">,
- HelpText<"Print the name of each compiled file">;
-def _SLASH_showFilenames_ : CLFlag<"showFilenames-">,
- HelpText<"Do not print the name of each compiled file (default)">;
-def _SLASH_source_charset : CLCompileJoined<"source-charset:">,
- HelpText<"Set source encoding, supports only UTF-8">,
- Alias<finput_charset_EQ>;
-def _SLASH_execution_charset : CLCompileJoined<"execution-charset:">,
- HelpText<"Set runtime encoding, supports only UTF-8">,
- Alias<fexec_charset_EQ>;
-def _SLASH_std : CLCompileJoined<"std:">,
- HelpText<"Set C++ version (c++14,c++17,c++latest)">;
-def _SLASH_U : CLJoinedOrSeparate<"U">, HelpText<"Undefine macro">,
- MetaVarName<"<macro>">, Alias<U>;
-def _SLASH_validate_charset : CLFlag<"validate-charset">,
- Alias<W_Joined>, AliasArgs<["invalid-source-encoding"]>;
-def _SLASH_validate_charset_ : CLFlag<"validate-charset-">,
- Alias<W_Joined>, AliasArgs<["no-invalid-source-encoding"]>;
-def _SLASH_W0 : CLFlag<"W0">, HelpText<"Disable all warnings">, Alias<w>;
-def _SLASH_W1 : CLFlag<"W1">, HelpText<"Enable -Wall">, Alias<Wall>;
-def _SLASH_W2 : CLFlag<"W2">, HelpText<"Enable -Wall">, Alias<Wall>;
-def _SLASH_W3 : CLFlag<"W3">, HelpText<"Enable -Wall">, Alias<Wall>;
-def _SLASH_W4 : CLFlag<"W4">, HelpText<"Enable -Wall and -Wextra">, Alias<WCL4>;
-def _SLASH_Wall : CLFlag<"Wall">, HelpText<"Enable -Weverything">,
- Alias<W_Joined>, AliasArgs<["everything"]>;
-def _SLASH_WX : CLFlag<"WX">, HelpText<"Treat warnings as errors">,
- Alias<W_Joined>, AliasArgs<["error"]>;
-def _SLASH_WX_ : CLFlag<"WX-">,
- HelpText<"Do not treat warnings as errors (default)">,
- Alias<W_Joined>, AliasArgs<["no-error"]>;
-def _SLASH_w_flag : CLFlag<"w">, HelpText<"Disable all warnings">, Alias<w>;
-def _SLASH_wd4005 : CLFlag<"wd4005">, Alias<W_Joined>,
- AliasArgs<["no-macro-redefined"]>;
-def _SLASH_wd4018 : CLFlag<"wd4018">, Alias<W_Joined>,
- AliasArgs<["no-sign-compare"]>;
-def _SLASH_wd4100 : CLFlag<"wd4100">, Alias<W_Joined>,
- AliasArgs<["no-unused-parameter"]>;
-def _SLASH_wd4910 : CLFlag<"wd4910">, Alias<W_Joined>,
- AliasArgs<["no-dllexport-explicit-instantiation-decl"]>;
-def _SLASH_wd4996 : CLFlag<"wd4996">, Alias<W_Joined>,
- AliasArgs<["no-deprecated-declarations"]>;
-def _SLASH_vd : CLJoined<"vd">, HelpText<"Control vtordisp placement">,
- Alias<vtordisp_mode_EQ>;
-def _SLASH_X : CLFlag<"X">,
- HelpText<"Do not add %INCLUDE% to include search path">, Alias<nostdlibinc>;
-def _SLASH_Zc_sizedDealloc : CLFlag<"Zc:sizedDealloc">,
- HelpText<"Enable C++14 sized global deallocation functions">,
- Alias<fsized_deallocation>;
-def _SLASH_Zc_sizedDealloc_ : CLFlag<"Zc:sizedDealloc-">,
- HelpText<"Disable C++14 sized global deallocation functions">,
- Alias<fno_sized_deallocation>;
-def _SLASH_Zc_alignedNew : CLFlag<"Zc:alignedNew">,
- HelpText<"Enable C++17 aligned allocation functions">,
- Alias<faligned_allocation>;
-def _SLASH_Zc_alignedNew_ : CLFlag<"Zc:alignedNew-">,
- HelpText<"Disable C++17 aligned allocation functions">,
- Alias<fno_aligned_allocation>;
-def _SLASH_Zc_char8_t : CLFlag<"Zc:char8_t">,
- HelpText<"Enable char8_t from C++2a">,
- Alias<fchar8__t>;
-def _SLASH_Zc_char8_t_ : CLFlag<"Zc:char8_t-">,
- HelpText<"Disable char8_t from c++2a">,
- Alias<fno_char8__t>;
-def _SLASH_Zc_strictStrings : CLFlag<"Zc:strictStrings">,
- HelpText<"Treat string literals as const">, Alias<W_Joined>,
- AliasArgs<["error=c++11-compat-deprecated-writable-strings"]>;
-def _SLASH_Zc_threadSafeInit : CLFlag<"Zc:threadSafeInit">,
- HelpText<"Enable thread-safe initialization of static variables">,
- Alias<fthreadsafe_statics>;
-def _SLASH_Zc_threadSafeInit_ : CLFlag<"Zc:threadSafeInit-">,
- HelpText<"Disable thread-safe initialization of static variables">,
- Alias<fno_threadsafe_statics>;
-def _SLASH_Zc_trigraphs : CLFlag<"Zc:trigraphs">,
- HelpText<"Enable trigraphs">, Alias<ftrigraphs>;
-def _SLASH_Zc_trigraphs_off : CLFlag<"Zc:trigraphs-">,
- HelpText<"Disable trigraphs (default)">, Alias<fno_trigraphs>;
-def _SLASH_Zc_twoPhase : CLFlag<"Zc:twoPhase">,
- HelpText<"Enable two-phase name lookup in templates">,
- Alias<fno_delayed_template_parsing>;
-def _SLASH_Zc_twoPhase_ : CLFlag<"Zc:twoPhase-">,
- HelpText<"Disable two-phase name lookup in templates (default)">,
- Alias<fdelayed_template_parsing>;
-def _SLASH_Z7 : CLFlag<"Z7">,
- HelpText<"Enable CodeView debug information in object files">;
-def _SLASH_Zd : CLFlag<"Zd">,
- HelpText<"Emit debug line number tables only">;
-def _SLASH_Zi : CLFlag<"Zi">, Alias<_SLASH_Z7>,
- HelpText<"Like /Z7">;
-def _SLASH_Zp : CLJoined<"Zp">,
- HelpText<"Set default maximum struct packing alignment">,
- Alias<fpack_struct_EQ>;
-def _SLASH_Zp_flag : CLFlag<"Zp">,
- HelpText<"Set default maximum struct packing alignment to 1">,
- Alias<fpack_struct_EQ>, AliasArgs<["1"]>;
-def _SLASH_Zs : CLFlag<"Zs">, HelpText<"Syntax-check only">,
- Alias<fsyntax_only>;
-def _SLASH_openmp_ : CLFlag<"openmp-">,
- HelpText<"Disable OpenMP support">, Alias<fno_openmp>;
-def _SLASH_openmp : CLFlag<"openmp">, HelpText<"Enable OpenMP support">,
- Alias<fopenmp>;
-def _SLASH_openmp_experimental : CLFlag<"openmp:experimental">,
- HelpText<"Enable OpenMP support with experimental SIMD support">,
- Alias<fopenmp>;
-
-// Non-aliases:
-
-def _SLASH_arch : CLCompileJoined<"arch:">,
- HelpText<"Set architecture for code generation">;
-
-def _SLASH_M_Group : OptionGroup<"</M group>">, Group<cl_compile_Group>;
-def _SLASH_volatile_Group : OptionGroup<"</volatile group>">,
- Group<cl_compile_Group>;
-
-def _SLASH_EH : CLJoined<"EH">, HelpText<"Set exception handling model">;
-def _SLASH_EP : CLFlag<"EP">,
- HelpText<"Disable linemarker output and preprocess to stdout">;
-def _SLASH_FA : CLFlag<"FA">,
- HelpText<"Output assembly code file during compilation">;
-def _SLASH_Fa : CLJoined<"Fa">,
- HelpText<"Set assembly output file name (with /FA)">,
- MetaVarName<"<file or dir/>">;
-def _SLASH_fallback : CLCompileFlag<"fallback">,
- HelpText<"Fall back to cl.exe if clang-cl fails to compile">;
-def _SLASH_FI : CLJoinedOrSeparate<"FI">,
- HelpText<"Include file before parsing">, Alias<include_>;
-def _SLASH_Fe : CLJoined<"Fe">,
- HelpText<"Set output executable file name">,
- MetaVarName<"<file or dir/>">;
-def _SLASH_Fi : CLCompileJoined<"Fi">,
- HelpText<"Set preprocess output file name (with /P)">,
- MetaVarName<"<file>">;
-def _SLASH_Fo : CLCompileJoined<"Fo">,
- HelpText<"Set output object file (with /c)">,
- MetaVarName<"<file or dir/>">;
-def _SLASH_guard : CLJoined<"guard:">,
- HelpText<"Enable Control Flow Guard with /guard:cf, or only the table with /guard:cf,nochecks">;
-def _SLASH_GX : CLFlag<"GX">,
- HelpText<"Deprecated; use /EHsc">;
-def _SLASH_GX_ : CLFlag<"GX-">,
- HelpText<"Deprecated (like not passing /EH)">;
-def _SLASH_imsvc : CLJoinedOrSeparate<"imsvc">,
- HelpText<"Add <dir> to system include search path, as if in %INCLUDE%">,
- MetaVarName<"<dir>">;
-def _SLASH_LD : CLFlag<"LD">, HelpText<"Create DLL">;
-def _SLASH_LDd : CLFlag<"LDd">, HelpText<"Create debug DLL">;
-def _SLASH_link : CLRemainingArgsJoined<"link">,
- HelpText<"Forward options to the linker">, MetaVarName<"<options>">;
-def _SLASH_MD : Option<["/", "-"], "MD", KIND_FLAG>, Group<_SLASH_M_Group>,
- Flags<[CLOption, DriverOption]>, HelpText<"Use DLL run-time">;
-def _SLASH_MDd : Option<["/", "-"], "MDd", KIND_FLAG>, Group<_SLASH_M_Group>,
- Flags<[CLOption, DriverOption]>, HelpText<"Use DLL debug run-time">;
-def _SLASH_MT : Option<["/", "-"], "MT", KIND_FLAG>, Group<_SLASH_M_Group>,
- Flags<[CLOption, DriverOption]>, HelpText<"Use static run-time">;
-def _SLASH_MTd : Option<["/", "-"], "MTd", KIND_FLAG>, Group<_SLASH_M_Group>,
- Flags<[CLOption, DriverOption]>, HelpText<"Use static debug run-time">;
-def _SLASH_o : CLJoinedOrSeparate<"o">,
- HelpText<"Deprecated (set output file name); use /Fe or /Fe">,
- MetaVarName<"<file or dir/>">;
-def _SLASH_P : CLFlag<"P">, HelpText<"Preprocess to file">;
-def _SLASH_Tc : CLCompileJoinedOrSeparate<"Tc">,
- HelpText<"Treat <file> as C source file">, MetaVarName<"<file>">;
-def _SLASH_TC : CLCompileFlag<"TC">, HelpText<"Treat all source files as C">;
-def _SLASH_Tp : CLCompileJoinedOrSeparate<"Tp">,
- HelpText<"Treat <file> as C++ source file">, MetaVarName<"<file>">;
-def _SLASH_TP : CLCompileFlag<"TP">, HelpText<"Treat all source files as C++">;
-def _SLASH_volatile_iso : Option<["/", "-"], "volatile:iso", KIND_FLAG>,
- Group<_SLASH_volatile_Group>, Flags<[CLOption, DriverOption]>,
- HelpText<"Volatile loads and stores have standard semantics">;
-def _SLASH_vmb : CLFlag<"vmb">,
- HelpText<"Use a best-case representation method for member pointers">;
-def _SLASH_vmg : CLFlag<"vmg">,
- HelpText<"Use a most-general representation for member pointers">;
-def _SLASH_vms : CLFlag<"vms">,
- HelpText<"Set the default most-general representation to single inheritance">;
-def _SLASH_vmm : CLFlag<"vmm">,
- HelpText<"Set the default most-general representation to "
- "multiple inheritance">;
-def _SLASH_vmv : CLFlag<"vmv">,
- HelpText<"Set the default most-general representation to "
- "virtual inheritance">;
-def _SLASH_volatile_ms : Option<["/", "-"], "volatile:ms", KIND_FLAG>,
- Group<_SLASH_volatile_Group>, Flags<[CLOption, DriverOption]>,
- HelpText<"Volatile loads and stores have acquire and release semantics">;
-def _SLASH_clang : CLJoined<"clang:">,
- HelpText<"Pass <arg> to the clang driver">, MetaVarName<"<arg>">;
-def _SLASH_Zl : CLFlag<"Zl">,
- HelpText<"Do not let object file auto-link default libraries">;
-
-def _SLASH_Yc : CLJoined<"Yc">,
- HelpText<"Generate a pch file for all code up to and including <filename>">,
- MetaVarName<"<filename>">;
-def _SLASH_Yu : CLJoined<"Yu">,
- HelpText<"Load a pch file and use it instead of all code up to "
- "and including <filename>">,
- MetaVarName<"<filename>">;
-def _SLASH_Y_ : CLFlag<"Y-">,
- HelpText<"Disable precompiled headers, overrides /Yc and /Yu">;
-def _SLASH_Zc_dllexportInlines : CLFlag<"Zc:dllexportInlines">,
- HelpText<"dllexport/dllimport inline member functions of dllexport/import classes (default)">;
-def _SLASH_Zc_dllexportInlines_ : CLFlag<"Zc:dllexportInlines-">,
- HelpText<"Do not dllexport/dllimport inline member functions of dllexport/import classes">;
-def _SLASH_Fp : CLJoined<"Fp">,
- HelpText<"Set pch file name (with /Yc and /Yu)">, MetaVarName<"<file>">;
-
-def _SLASH_Gd : CLFlag<"Gd">,
- HelpText<"Set __cdecl as a default calling convention">;
-def _SLASH_Gr : CLFlag<"Gr">,
- HelpText<"Set __fastcall as a default calling convention">;
-def _SLASH_Gz : CLFlag<"Gz">,
- HelpText<"Set __stdcall as a default calling convention">;
-def _SLASH_Gv : CLFlag<"Gv">,
- HelpText<"Set __vectorcall as a default calling convention">;
-def _SLASH_Gregcall : CLFlag<"Gregcall">,
- HelpText<"Set __regcall as a default calling convention">;
-
-// Ignored:
-
-def _SLASH_analyze_ : CLIgnoredFlag<"analyze-">;
-def _SLASH_bigobj : CLIgnoredFlag<"bigobj">;
-def _SLASH_cgthreads : CLIgnoredJoined<"cgthreads">;
-def _SLASH_d2FastFail : CLIgnoredFlag<"d2FastFail">;
-def _SLASH_d2Zi_PLUS : CLIgnoredFlag<"d2Zi+">;
-def _SLASH_errorReport : CLIgnoredJoined<"errorReport">;
-def _SLASH_FC : CLIgnoredFlag<"FC">;
-def _SLASH_Fd : CLIgnoredJoined<"Fd">;
-def _SLASH_FS : CLIgnoredFlag<"FS">;
-def _SLASH_JMC : CLIgnoredFlag<"JMC">;
-def _SLASH_kernel_ : CLIgnoredFlag<"kernel-">;
-def _SLASH_nologo : CLIgnoredFlag<"nologo">;
-def _SLASH_permissive_ : CLIgnoredFlag<"permissive-">;
-def _SLASH_RTC : CLIgnoredJoined<"RTC">;
-def _SLASH_sdl : CLIgnoredFlag<"sdl">;
-def _SLASH_sdl_ : CLIgnoredFlag<"sdl-">;
-def _SLASH_utf8 : CLIgnoredFlag<"utf-8">,
- HelpText<"Set source and runtime encoding to UTF-8 (default)">;
-def _SLASH_w : CLIgnoredJoined<"w">;
-def _SLASH_Zc___cplusplus : CLIgnoredFlag<"Zc:__cplusplus">;
-def _SLASH_Zc_auto : CLIgnoredFlag<"Zc:auto">;
-def _SLASH_Zc_forScope : CLIgnoredFlag<"Zc:forScope">;
-def _SLASH_Zc_inline : CLIgnoredFlag<"Zc:inline">;
-def _SLASH_Zc_rvalueCast : CLIgnoredFlag<"Zc:rvalueCast">;
-def _SLASH_Zc_ternary : CLIgnoredFlag<"Zc:ternary">;
-def _SLASH_Zc_wchar_t : CLIgnoredFlag<"Zc:wchar_t">;
-def _SLASH_ZH_MD5 : CLIgnoredFlag<"ZH:MD5">;
-def _SLASH_ZH_SHA1 : CLIgnoredFlag<"ZH:SHA1">;
-def _SLASH_ZH_SHA_256 : CLIgnoredFlag<"ZH:SHA_256">;
-def _SLASH_Zm : CLIgnoredJoined<"Zm">;
-def _SLASH_Zo : CLIgnoredFlag<"Zo">;
-def _SLASH_Zo_ : CLIgnoredFlag<"Zo-">;
-
-
-// Unsupported:
-
-def _SLASH_await : CLFlag<"await">;
-def _SLASH_constexpr : CLJoined<"constexpr:">;
-def _SLASH_AI : CLJoinedOrSeparate<"AI">;
-def _SLASH_Bt : CLFlag<"Bt">;
-def _SLASH_Bt_plus : CLFlag<"Bt+">;
-def _SLASH_clr : CLJoined<"clr">;
-def _SLASH_d2 : CLJoined<"d2">;
-def _SLASH_doc : CLJoined<"doc">;
-def _SLASH_FA_joined : CLJoined<"FA">;
-def _SLASH_favor : CLJoined<"favor">;
-def _SLASH_F : CLJoinedOrSeparate<"F">;
-def _SLASH_Fm : CLJoined<"Fm">;
-def _SLASH_Fr : CLJoined<"Fr">;
-def _SLASH_FR : CLJoined<"FR">;
-def _SLASH_FU : CLJoinedOrSeparate<"FU">;
-def _SLASH_Fx : CLFlag<"Fx">;
-def _SLASH_G1 : CLFlag<"G1">;
-def _SLASH_G2 : CLFlag<"G2">;
-def _SLASH_Ge : CLFlag<"Ge">;
-def _SLASH_Gh : CLFlag<"Gh">;
-def _SLASH_GH : CLFlag<"GH">;
-def _SLASH_GL : CLFlag<"GL">;
-def _SLASH_GL_ : CLFlag<"GL-">;
-def _SLASH_Gm : CLFlag<"Gm">;
-def _SLASH_Gm_ : CLFlag<"Gm-">;
-def _SLASH_GT : CLFlag<"GT">;
-def _SLASH_GZ : CLFlag<"GZ">;
-def _SLASH_H : CLFlag<"H">;
-def _SLASH_homeparams : CLFlag<"homeparams">;
-def _SLASH_hotpatch : CLFlag<"hotpatch">;
-def _SLASH_kernel : CLFlag<"kernel">;
-def _SLASH_LN : CLFlag<"LN">;
-def _SLASH_MP : CLJoined<"MP">;
-def _SLASH_Qfast_transcendentals : CLFlag<"Qfast_transcendentals">;
-def _SLASH_QIfist : CLFlag<"QIfist">;
-def _SLASH_Qimprecise_fwaits : CLFlag<"Qimprecise_fwaits">;
-def _SLASH_Qpar : CLFlag<"Qpar">;
-def _SLASH_Qpar_report : CLJoined<"Qpar-report">;
-def _SLASH_Qsafe_fp_loads : CLFlag<"Qsafe_fp_loads">;
-def _SLASH_Qspectre : CLFlag<"Qspectre">;
-def _SLASH_Qvec_report : CLJoined<"Qvec-report">;
-def _SLASH_u : CLFlag<"u">;
-def _SLASH_V : CLFlag<"V">;
-def _SLASH_WL : CLFlag<"WL">;
-def _SLASH_Wp64 : CLFlag<"Wp64">;
-def _SLASH_Yd : CLFlag<"Yd">;
-def _SLASH_Yl : CLJoined<"Yl">;
-def _SLASH_Za : CLFlag<"Za">;
-def _SLASH_Zc : CLJoined<"Zc:">;
-def _SLASH_Ze : CLFlag<"Ze">;
-def _SLASH_Zg : CLFlag<"Zg">;
-def _SLASH_ZI : CLFlag<"ZI">;
-def _SLASH_ZW : CLJoined<"ZW">;
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Distro.h b/contrib/llvm-project/clang/include/clang/Driver/Distro.h
index d382cf77a8b2..038d4ce75d80 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Distro.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Distro.h
@@ -67,6 +67,7 @@ public:
UbuntuDisco,
UbuntuEoan,
UbuntuFocal,
+ UbuntuGroovy,
UnknownDistro
};
@@ -120,7 +121,7 @@ public:
}
bool IsUbuntu() const {
- return DistroVal >= UbuntuHardy && DistroVal <= UbuntuFocal;
+ return DistroVal >= UbuntuHardy && DistroVal <= UbuntuGroovy;
}
bool IsAlpineLinux() const {
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Driver.h b/contrib/llvm-project/clang/include/clang/Driver/Driver.h
index 6c3feaba0568..dc18f1314f81 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Driver.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Driver.h
@@ -340,9 +340,7 @@ public:
return InstalledDir.c_str();
return Dir.c_str();
}
- void setInstalledDir(StringRef Value) {
- InstalledDir = Value;
- }
+ void setInstalledDir(StringRef Value) { InstalledDir = std::string(Value); }
bool isSaveTempsEnabled() const { return SaveTemps != SaveTempsNone; }
bool isSaveTempsObj() const { return SaveTemps == SaveTempsObj; }
@@ -550,6 +548,9 @@ public:
/// handle this action.
bool ShouldUseFlangCompiler(const JobAction &JA) const;
+ /// ShouldEmitStaticLibrary - Should the linker emit a static library.
+ bool ShouldEmitStaticLibrary(const llvm::opt::ArgList &Args) const;
+
/// Returns true if we are performing any kind of LTO.
bool isUsingLTO() const { return LTOMode != LTOK_None; }
@@ -620,7 +621,8 @@ public:
static bool GetReleaseVersion(StringRef Str,
MutableArrayRef<unsigned> Digits);
/// Compute the default -fmodule-cache-path.
- static void getDefaultModuleCachePath(SmallVectorImpl<char> &Result);
+ /// \return True if the system provides a default cache directory.
+ static bool getDefaultModuleCachePath(SmallVectorImpl<char> &Result);
};
/// \return True if the last defined optimization level is -Ofast.
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Job.h b/contrib/llvm-project/clang/include/clang/Driver/Job.h
index 9a3cad23363b..6173b9d314b4 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Job.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Job.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Option/Option.h"
+#include "llvm/Support/Program.h"
#include <memory>
#include <string>
#include <utility>
@@ -36,6 +37,69 @@ struct CrashReportInfo {
: Filename(Filename), VFSPath(VFSPath) {}
};
+// Encodes the kind of response file supported for a command invocation.
+// Response files are necessary if the command line gets too large, requiring
+// the arguments to be transferred to a file.
+struct ResponseFileSupport {
+ enum ResponseFileKind {
+ // Provides full support for response files, which means we can transfer
+ // all tool input arguments to a file.
+ RF_Full,
+ // Input file names can live in a file, but flags can't. This is a special
+ // case for old versions of Apple's ld64.
+ RF_FileList,
+ // Does not support response files: all arguments must be passed via
+ // command line.
+ RF_None
+ };
+ /// The level of support for response files.
+ ResponseFileKind ResponseKind;
+
+ /// The encoding to use when writing response files on Windows. Ignored on
+ /// other host OSes.
+ ///
+ /// Windows use cases: - GCC and Binutils on mingw only accept ANSI response
+ /// files encoded with the system current code page.
+ /// - MSVC's CL.exe and LINK.exe accept UTF16 on Windows.
+ /// - Clang accepts both UTF8 and UTF16.
+ ///
+ /// FIXME: When GNU tools learn how to parse UTF16 on Windows, we should
+ /// always use UTF16 for Windows, which is the Windows official encoding for
+ /// international characters.
+ llvm::sys::WindowsEncodingMethod ResponseEncoding;
+
+ /// What prefix to use for the command-line argument when passing a response
+ /// file.
+ const char *ResponseFlag;
+
+ /// Returns a ResponseFileSupport indicating that response files are not
+ /// supported.
+ static constexpr ResponseFileSupport None() {
+ return {RF_None, llvm::sys::WEM_UTF8, nullptr};
+ }
+
+ /// Returns a ResponseFileSupport indicating that response files are
+ /// supported, using the @file syntax. On windows, the file is written in the
+ /// UTF8 encoding. On other OSes, no re-encoding occurs.
+ static constexpr ResponseFileSupport AtFileUTF8() {
+ return {RF_Full, llvm::sys::WEM_UTF8, "@"};
+ }
+
+ /// Returns a ResponseFileSupport indicating that response files are
+ /// supported, using the @file syntax. On windows, the file is written in the
+ /// current ANSI code-page encoding. On other OSes, no re-encoding occurs.
+ static constexpr ResponseFileSupport AtFileCurCP() {
+ return {RF_Full, llvm::sys::WEM_CurrentCodePage, "@"};
+ }
+
+ /// Returns a ResponseFileSupport indicating that response files are
+ /// supported, using the @file syntax. On windows, the file is written in the
+ /// UTF-16 encoding. On other OSes, no re-encoding occurs.
+ static constexpr ResponseFileSupport AtFileUTF16() {
+ return {RF_Full, llvm::sys::WEM_UTF16, "@"};
+ }
+};
+
/// Command - An executable path/name and argument vector to
/// execute.
class Command {
@@ -45,6 +109,9 @@ class Command {
/// Tool - The tool which caused the creation of this job.
const Tool &Creator;
+ /// Whether and how to generate response files if the arguments are too long.
+ ResponseFileSupport ResponseSupport;
+
/// The executable to run.
const char *Executable;
@@ -89,7 +156,8 @@ public:
/// Whether the command will be executed in this process or not.
bool InProcess = false;
- Command(const Action &Source, const Tool &Creator, const char *Executable,
+ Command(const Action &Source, const Tool &Creator,
+ ResponseFileSupport ResponseSupport, const char *Executable,
const llvm::opt::ArgStringList &Arguments,
ArrayRef<InputInfo> Inputs);
// FIXME: This really shouldn't be copyable, but is currently copied in some
@@ -109,11 +177,16 @@ public:
/// getCreator - Return the Tool which caused the creation of this job.
const Tool &getCreator() const { return Creator; }
+ /// Returns the kind of response file supported by the current invocation.
+ const ResponseFileSupport &getResponseFileSupport() {
+ return ResponseSupport;
+ }
+
/// Set to pass arguments via a response file when launching the command
void setResponseFile(const char *FileName);
- /// Set an input file list, necessary if we need to use a response file but
- /// the tool being called only supports input files lists.
+ /// Set an input file list, necessary if you specified an RF_FileList response
+ /// file support.
void setInputFileList(llvm::opt::ArgStringList List) {
InputFileList = std::move(List);
}
@@ -128,9 +201,6 @@ public:
const llvm::opt::ArgStringList &getArguments() const { return Arguments; }
- /// Print a command argument, and optionally quote it.
- static void printArg(llvm::raw_ostream &OS, StringRef Arg, bool Quote);
-
protected:
/// Optionally print the filenames to be compiled
void PrintFileNames() const;
@@ -139,7 +209,8 @@ protected:
/// Use the CC1 tool callback when available, to avoid creating a new process
class CC1Command : public Command {
public:
- CC1Command(const Action &Source, const Tool &Creator, const char *Executable,
+ CC1Command(const Action &Source, const Tool &Creator,
+ ResponseFileSupport ResponseSupport, const char *Executable,
const llvm::opt::ArgStringList &Arguments,
ArrayRef<InputInfo> Inputs);
@@ -157,7 +228,7 @@ public:
class FallbackCommand : public Command {
public:
FallbackCommand(const Action &Source_, const Tool &Creator_,
- const char *Executable_,
+ ResponseFileSupport ResponseSupport, const char *Executable_,
const llvm::opt::ArgStringList &Arguments_,
ArrayRef<InputInfo> Inputs,
std::unique_ptr<Command> Fallback_);
@@ -176,6 +247,7 @@ private:
class ForceSuccessCommand : public Command {
public:
ForceSuccessCommand(const Action &Source_, const Tool &Creator_,
+ ResponseFileSupport ResponseSupport,
const char *Executable_,
const llvm::opt::ArgStringList &Arguments_,
ArrayRef<InputInfo> Inputs);
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Multilib.h b/contrib/llvm-project/clang/include/clang/Driver/Multilib.h
index abf0d5fa6ea2..cf2dbf6ff58a 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Multilib.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Multilib.h
@@ -91,7 +91,7 @@ public:
/// otherwise '-print-multi-lib' will not emit them correctly.
Multilib &flag(StringRef F) {
assert(F.front() == '+' || F.front() == '-');
- Flags.push_back(F);
+ Flags.push_back(std::string(F));
return *this;
}
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Options.td b/contrib/llvm-project/clang/include/clang/Driver/Options.td
index a30caaa9c50b..f4556c15d744 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Options.td
+++ b/contrib/llvm-project/clang/include/clang/Driver/Options.td
@@ -124,6 +124,9 @@ def pedantic_Group : OptionGroup<"<pedantic group>">, Group<f_Group>,
def opencl_Group : OptionGroup<"<opencl group>">, Group<f_Group>,
DocName<"OpenCL flags">;
+def sycl_Group : OptionGroup<"<SYCL group>">, Group<f_Group>,
+ DocName<"SYCL flags">;
+
def m_Group : OptionGroup<"<m group>">, Group<CompileOnly_Group>,
DocName<"Target-dependent compilation options">;
@@ -148,6 +151,10 @@ def m_ppc_Features_Group : OptionGroup<"<ppc features group>">,
Group<m_Group>, DocName<"PowerPC">;
def m_wasm_Features_Group : OptionGroup<"<wasm features group>">,
Group<m_Group>, DocName<"WebAssembly">;
+// The features added by this group will not be added to target features.
+// These are explicitly handled.
+def m_wasm_Features_Driver_Group : OptionGroup<"<wasm driver features group>">,
+ Group<m_Group>, DocName<"WebAssembly Driver">;
def m_x86_Features_Group : OptionGroup<"<x86 features group>">,
Group<m_Group>, Flags<[CoreOption]>, DocName<"X86">;
def m_riscv_Features_Group : OptionGroup<"<riscv features group>">,
@@ -219,6 +226,27 @@ def mno_mpx : Flag<["-"], "mno-mpx">, Group<clang_ignored_legacy_options_Group>;
def clang_ignored_gcc_optimization_f_Group : OptionGroup<
"<clang_ignored_gcc_optimization_f_Group>">, Group<f_Group>, Flags<[Ignored]>;
+// A boolean option which is opt-in in CC1. The positive option exists in CC1 and
+// Args.hasArg(OPT_ffoo) is used to check that the flag is enabled.
+// This is useful if the option is usually disabled.
+multiclass OptInFFlag<string name, string pos_prefix, string neg_prefix="",
+ string help="", list<OptionFlag> flags=[]> {
+ def f#NAME : Flag<["-"], "f"#name>, Flags<!listconcat([CC1Option], flags)>,
+ Group<f_Group>, HelpText<!strconcat(pos_prefix, help)>;
+ def fno_#NAME : Flag<["-"], "fno-"#name>, Flags<flags>,
+ Group<f_Group>, HelpText<!strconcat(neg_prefix, help)>;
+}
+
+// A boolean option which is opt-out in CC1. The negative option exists in CC1 and
+// Args.hasArg(OPT_fno_foo) is used to check that the flag is disabled.
+multiclass OptOutFFlag<string name, string pos_prefix, string neg_prefix,
+ string help="", list<OptionFlag> flags=[]> {
+ def f#NAME : Flag<["-"], "f"#name>, Flags<flags>,
+ Group<f_Group>, HelpText<!strconcat(pos_prefix, help)>;
+ def fno_#NAME : Flag<["-"], "fno-"#name>, Flags<!listconcat([CC1Option], flags)>,
+ Group<f_Group>, HelpText<!strconcat(neg_prefix, help)>;
+}
+
/////////
// Options
@@ -397,7 +425,7 @@ def ObjCXX : Flag<["-"], "ObjC++">, Flags<[DriverOption]>,
def ObjC : Flag<["-"], "ObjC">, Flags<[DriverOption]>,
HelpText<"Treat source input files as Objective-C inputs">;
def O : Joined<["-"], "O">, Group<O_Group>, Flags<[CC1Option]>;
-def O_flag : Flag<["-"], "O">, Flags<[CC1Option]>, Alias<O>, AliasArgs<["2"]>;
+def O_flag : Flag<["-"], "O">, Flags<[CC1Option]>, Alias<O>, AliasArgs<["1"]>;
def Ofast : Joined<["-"], "Ofast">, Group<O_Group>, Flags<[CC1Option]>;
def P : Flag<["-"], "P">, Flags<[CC1Option]>, Group<Preprocessor_Group>,
HelpText<"Disable linemarker output in -E mode">;
@@ -455,6 +483,9 @@ def Wnonportable_cfstrings : Joined<["-"], "Wnonportable-cfstrings">, Group<W_Gr
def Wp_COMMA : CommaJoined<["-"], "Wp,">,
HelpText<"Pass the comma separated arguments in <arg> to the preprocessor">,
MetaVarName<"<arg>">, Group<Preprocessor_Group>;
+def Wundef_prefix_EQ : CommaJoined<["-"], "Wundef-prefix=">, Group<W_value_Group>,
+ Flags<[CC1Option, CoreOption, HelpHidden]>, MetaVarName<"<arg>">,
+ HelpText<"Enable warnings for undefined macros with a prefix in the comma separated list <arg>">;
def Wwrite_strings : Flag<["-"], "Wwrite-strings">, Group<W_Group>, Flags<[CC1Option, HelpHidden]>;
def Wno_write_strings : Flag<["-"], "Wno-write-strings">, Group<W_Group>, Flags<[CC1Option, HelpHidden]>;
def W_Joined : Joined<["-"], "W">, Group<W_Group>, Flags<[CC1Option, CoreOption]>,
@@ -463,6 +494,10 @@ def Xanalyzer : Separate<["-"], "Xanalyzer">,
HelpText<"Pass <arg> to the static analyzer">, MetaVarName<"<arg>">,
Group<StaticAnalyzer_Group>;
def Xarch__ : JoinedAndSeparate<["-"], "Xarch_">, Flags<[DriverOption]>;
+def Xarch_host : Separate<["-"], "Xarch_host">, Flags<[DriverOption]>,
+ HelpText<"Pass <arg> to the CUDA/HIP host compilation">, MetaVarName<"<arg>">;
+def Xarch_device : Separate<["-"], "Xarch_device">, Flags<[DriverOption]>,
+ HelpText<"Pass <arg> to the CUDA/HIP device compilation">, MetaVarName<"<arg>">;
def Xassembler : Separate<["-"], "Xassembler">,
HelpText<"Pass <arg> to the assembler">, MetaVarName<"<arg>">,
Group<CompileOnly_Group>;
@@ -523,7 +558,7 @@ def cl_no_signed_zeros : Flag<["-"], "cl-no-signed-zeros">, Group<opencl_Group>,
HelpText<"OpenCL only. Allow use of less precise no signed zeros computations in the generated binary.">;
def cl_std_EQ : Joined<["-"], "cl-std=">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL language standard to compile for.">, Values<"cl,CL,cl1.1,CL1.1,cl1.2,CL1.2,cl2.0,CL2.0,clc++,CLC++">;
-def cl_denorms_are_zero : Flag<["-"], "cl-denorms-are-zero">, Group<opencl_Group>, Flags<[CC1Option]>,
+def cl_denorms_are_zero : Flag<["-"], "cl-denorms-are-zero">, Group<opencl_Group>,
HelpText<"OpenCL only. Allow denormals to be flushed to zero.">;
def cl_fp32_correctly_rounded_divide_sqrt : Flag<["-"], "cl-fp32-correctly-rounded-divide-sqrt">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. Specify that single precision floating-point divide and sqrt used in the program source are correctly rounded.">;
@@ -549,6 +584,9 @@ def c : Flag<["-"], "c">, Flags<[DriverOption]>, Group<Action_Group>,
def fconvergent_functions : Flag<["-"], "fconvergent-functions">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Assume functions may be convergent">;
+def gpu_use_aux_triple_only : Flag<["--"], "gpu-use-aux-triple-only">,
+ InternalDriverOpt, HelpText<"Prepare '-aux-triple' only without populating "
+ "'-aux-target-cpu' and '-aux-target-feature'.">;
def cuda_device_only : Flag<["--"], "cuda-device-only">,
HelpText<"Compile CUDA code for device only">;
def cuda_host_only : Flag<["--"], "cuda-host-only">,
@@ -561,13 +599,19 @@ def cuda_include_ptx_EQ : Joined<["--"], "cuda-include-ptx=">, Flags<[DriverOpti
HelpText<"Include PTX for the following GPU architecture (e.g. sm_35) or 'all'. May be specified more than once.">;
def no_cuda_include_ptx_EQ : Joined<["--"], "no-cuda-include-ptx=">, Flags<[DriverOption]>,
HelpText<"Do not include PTX for the following GPU architecture (e.g. sm_35) or 'all'. May be specified more than once.">;
+def offload_arch_EQ : Joined<["--"], "offload-arch=">, Flags<[DriverOption]>,
+ HelpText<"CUDA/HIP offloading device architecture (e.g. sm_35, gfx906). May be specified more than once.">;
def cuda_gpu_arch_EQ : Joined<["--"], "cuda-gpu-arch=">, Flags<[DriverOption]>,
- HelpText<"CUDA GPU architecture (e.g. sm_35). May be specified more than once.">;
+ Alias<offload_arch_EQ>;
def hip_link : Flag<["--"], "hip-link">,
HelpText<"Link clang-offload-bundler bundles for HIP">;
-def no_cuda_gpu_arch_EQ : Joined<["--"], "no-cuda-gpu-arch=">, Flags<[DriverOption]>,
- HelpText<"Remove GPU architecture (e.g. sm_35) from the list of GPUs to compile for. "
+def no_offload_arch_EQ : Joined<["--"], "no-offload-arch=">, Flags<[DriverOption]>,
+ HelpText<"Remove CUDA/HIP offloading device architecture (e.g. sm_35, gfx906) from the list of devices to compile for. "
"'all' resets the list to its default value.">;
+def emit_static_lib : Flag<["--"], "emit-static-lib">,
+ HelpText<"Enable linker job to emit a static library.">;
+def no_cuda_gpu_arch_EQ : Joined<["--"], "no-cuda-gpu-arch=">, Flags<[DriverOption]>,
+ Alias<no_offload_arch_EQ>;
def cuda_noopt_device_debug : Flag<["--"], "cuda-noopt-device-debug">,
HelpText<"Enable device-side debug info generation. Disables ptxas optimizations.">;
def no_cuda_version_check : Flag<["--"], "no-cuda-version-check">,
@@ -581,31 +625,31 @@ def cuda_path_ignore_env : Flag<["--"], "cuda-path-ignore-env">, Group<i_Group>,
def ptxas_path_EQ : Joined<["--"], "ptxas-path=">, Group<i_Group>,
HelpText<"Path to ptxas (used for compiling CUDA code)">;
def fcuda_flush_denormals_to_zero : Flag<["-"], "fcuda-flush-denormals-to-zero">,
- Flags<[CC1Option]>, HelpText<"Flush denormal floating point values to zero in CUDA device mode.">;
+ HelpText<"Flush denormal floating point values to zero in CUDA device mode.">;
def fno_cuda_flush_denormals_to_zero : Flag<["-"], "fno-cuda-flush-denormals-to-zero">;
-def fcuda_approx_transcendentals : Flag<["-"], "fcuda-approx-transcendentals">,
- Flags<[CC1Option]>, HelpText<"Use approximate transcendental functions">;
-def fno_cuda_approx_transcendentals : Flag<["-"], "fno-cuda-approx-transcendentals">;
-def fgpu_rdc : Flag<["-"], "fgpu-rdc">, Flags<[CC1Option]>,
- HelpText<"Generate relocatable device code, also known as separate compilation mode.">;
-def fno_gpu_rdc : Flag<["-"], "fno-gpu-rdc">;
+defm cuda_approx_transcendentals : OptInFFlag<"cuda-approx-transcendentals", "Use", "Don't use",
+ " approximate transcendental functions">;
+defm gpu_rdc : OptInFFlag<"gpu-rdc",
+ "Generate relocatable device code, also known as separate compilation mode", "", "">;
def : Flag<["-"], "fcuda-rdc">, Alias<fgpu_rdc>;
def : Flag<["-"], "fno-cuda-rdc">, Alias<fno_gpu_rdc>;
-def fcuda_short_ptr : Flag<["-"], "fcuda-short-ptr">, Flags<[CC1Option]>,
- HelpText<"Use 32-bit pointers for accessing const/local/shared address spaces.">;
-def fno_cuda_short_ptr : Flag<["-"], "fno-cuda-short-ptr">;
-def hip_device_lib_path_EQ : Joined<["--"], "hip-device-lib-path=">, Group<Link_Group>,
- HelpText<"HIP device library path">;
+defm cuda_short_ptr : OptInFFlag<"cuda-short-ptr",
+ "Use 32-bit pointers for accessing const/local/shared address spaces">;
+def rocm_path_EQ : Joined<["--"], "rocm-path=">, Group<i_Group>,
+ HelpText<"ROCm installation path, used for finding and automatically linking required bitcode libraries.">;
+def rocm_device_lib_path_EQ : Joined<["--"], "rocm-device-lib-path=">, Group<Link_Group>,
+ HelpText<"ROCm device library path. Alternative to rocm-path.">;
+def : Joined<["--"], "hip-device-lib-path=">, Alias<rocm_device_lib_path_EQ>;
def hip_device_lib_EQ : Joined<["--"], "hip-device-lib=">, Group<Link_Group>,
HelpText<"HIP device library">;
+def hip_version_EQ : Joined<["--"], "hip-version=">,
+ HelpText<"HIP version in the format of major.minor.patch">;
def fhip_dump_offload_linker_script : Flag<["-"], "fhip-dump-offload-linker-script">,
Group<f_Group>, Flags<[NoArgumentUnused, HelpHidden]>;
-def fhip_new_launch_api : Flag<["-"], "fhip-new-launch-api">,
- Flags<[CC1Option]>, HelpText<"Use new kernel launching API for HIP.">;
-def fno_hip_new_launch_api : Flag<["-"], "fno-hip-new-launch-api">;
-def fgpu_allow_device_init : Flag<["-"], "fgpu-allow-device-init">,
- Flags<[CC1Option]>, HelpText<"Allow device side init function in HIP">;
-def fno_gpu_allow_device_init : Flag<["-"], "fno-gpu-allow-device-init">;
+defm hip_new_launch_api : OptInFFlag<"hip-new-launch-api",
+ "Use", "Don't use", " new kernel launching API for HIP">;
+defm gpu_allow_device_init : OptInFFlag<"gpu-allow-device-init",
+ "Allow", "Don't allow", " device side init function in HIP">;
def gpu_max_threads_per_block_EQ : Joined<["--"], "gpu-max-threads-per-block=">,
Flags<[CC1Option]>,
HelpText<"Default max threads per block for kernel launch bounds for HIP">;
@@ -646,14 +690,17 @@ def emit_merged_ifs : Flag<["-"], "emit-merged-ifs">,
def interface_stub_version_EQ : JoinedOrSeparate<["-"], "interface-stub-version=">, Flags<[CC1Option]>;
def exported__symbols__list : Separate<["-"], "exported_symbols_list">;
def e : JoinedOrSeparate<["-"], "e">, Group<Link_Group>;
+def fmax_tokens_EQ : Joined<["-"], "fmax-tokens=">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Max total number of preprocessed tokens for -Wmax-tokens.">;
def fPIC : Flag<["-"], "fPIC">, Group<f_Group>;
def fno_PIC : Flag<["-"], "fno-PIC">, Group<f_Group>;
def fPIE : Flag<["-"], "fPIE">, Group<f_Group>;
def fno_PIE : Flag<["-"], "fno-PIE">, Group<f_Group>;
-def faccess_control : Flag<["-"], "faccess-control">, Group<f_Group>;
+defm access_control : OptOutFFlag<"no-access-control", "", "Disable C++ access control">;
def falign_functions : Flag<["-"], "falign-functions">, Group<f_Group>;
def falign_functions_EQ : Joined<["-"], "falign-functions=">, Group<f_Group>;
def fno_align_functions: Flag<["-"], "fno-align-functions">, Group<f_Group>;
+defm allow_editor_placeholders : OptInFFlag<"allow-editor-placeholders", "Treat editor placeholders as valid source code">;
def fallow_unsupported : Flag<["-"], "fallow-unsupported">, Group<f_Group>;
def fapple_kext : Flag<["-"], "fapple-kext">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use Apple's kernel extensions ABI">;
@@ -666,8 +713,7 @@ def static_libsan : Flag<["-"], "static-libsan">,
def : Flag<["-"], "shared-libasan">, Alias<shared_libsan>;
def fasm : Flag<["-"], "fasm">, Group<f_Group>;
-def fasm_blocks : Flag<["-"], "fasm-blocks">, Group<f_Group>, Flags<[CC1Option]>;
-def fno_asm_blocks : Flag<["-"], "fno-asm-blocks">, Group<f_Group>;
+defm asm_blocks : OptInFFlag<"asm-blocks", "">;
def fassume_sane_operator_new : Flag<["-"], "fassume-sane-operator-new">, Group<f_Group>;
def fastcp : Flag<["-"], "fastcp">, Group<f_Group>;
@@ -682,17 +728,10 @@ def fno_double_square_bracket_attributes : Flag<[ "-" ], "fno-double-square-brac
Group<f_Group>, Flags<[DriverOption, CC1Option]>,
HelpText<"Disable '[[]]' attributes in all C and C++ language modes">;
-def fautolink : Flag <["-"], "fautolink">, Group<f_Group>;
-def fno_autolink : Flag <["-"], "fno-autolink">, Group<f_Group>,
- Flags<[DriverOption, CC1Option]>,
- HelpText<"Disable generation of linker directives for automatic library linking">;
+defm autolink : OptOutFFlag<"autolink", "", "Disable generation of linker directives for automatic library linking">;
// C++ Coroutines TS
-def fcoroutines_ts : Flag <["-"], "fcoroutines-ts">, Group<f_Group>,
- Flags<[DriverOption, CC1Option]>,
- HelpText<"Enable support for the C++ Coroutines TS">;
-def fno_coroutines_ts : Flag <["-"], "fno-coroutines-ts">, Group<f_Group>,
- Flags<[DriverOption]>;
+defm coroutines_ts : OptInFFlag<"coroutines-ts", "Enable support for the C++ Coroutines TS">;
def fembed_bitcode_EQ : Joined<["-"], "fembed-bitcode=">,
Group<f_Group>, Flags<[DriverOption, CC1Option, CC1AsOption]>, MetaVarName<"<option>">,
@@ -703,10 +742,7 @@ def fembed_bitcode : Flag<["-"], "fembed-bitcode">, Group<f_Group>,
def fembed_bitcode_marker : Flag<["-"], "fembed-bitcode-marker">,
Alias<fembed_bitcode_EQ>, AliasArgs<["marker"]>,
HelpText<"Embed placeholder LLVM IR data as a marker">;
-def fgnu_inline_asm : Flag<["-"], "fgnu-inline-asm">, Group<f_Group>, Flags<[DriverOption]>;
-def fno_gnu_inline_asm : Flag<["-"], "fno-gnu-inline-asm">, Group<f_Group>,
- Flags<[DriverOption, CC1Option]>,
- HelpText<"Disable GNU style inline asm">;
+defm gnu_inline_asm : OptOutFFlag<"gnu-inline-asm", "", "Disable GNU style inline asm">;
def fprofile_sample_use : Flag<["-"], "fprofile-sample-use">, Group<f_Group>,
Flags<[CoreOption]>;
@@ -740,12 +776,8 @@ def fdebug_compilation_dir : Separate<["-"], "fdebug-compilation-dir">,
def fdebug_compilation_dir_EQ : Joined<["-"], "fdebug-compilation-dir=">,
Group<f_Group>, Flags<[CC1Option, CC1AsOption, CoreOption]>,
Alias<fdebug_compilation_dir>;
-def fdebug_info_for_profiling : Flag<["-"], "fdebug-info-for-profiling">,
- Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Emit extra debug info to make sample profile more accurate.">;
-def fno_debug_info_for_profiling : Flag<["-"], "fno-debug-info-for-profiling">,
- Group<f_Group>, Flags<[DriverOption]>,
- HelpText<"Do not emit extra debug info for sample profiler.">;
+defm debug_info_for_profiling : OptInFFlag<"debug-info-for-profiling",
+ "Emit extra debug info to make sample profile more accurate">;
def fprofile_instr_generate : Flag<["-"], "fprofile-instr-generate">,
Group<f_Group>, Flags<[CoreOption]>,
HelpText<"Generate instrumented code to collect execution counts into default.profraw file (overridden by '=' form of option or LLVM_PROFILE_FILE env var)">;
@@ -762,12 +794,9 @@ def fprofile_remapping_file_EQ : Joined<["-"], "fprofile-remapping-file=">,
HelpText<"Use the remappings described in <file> to match the profile data against names in the program">;
def fprofile_remapping_file : Separate<["-"], "fprofile-remapping-file">,
Group<f_Group>, Flags<[CoreOption]>, Alias<fprofile_remapping_file_EQ>;
-def fcoverage_mapping : Flag<["-"], "fcoverage-mapping">,
- Group<f_Group>, Flags<[CC1Option, CoreOption]>,
- HelpText<"Generate coverage mapping to enable code coverage analysis">;
-def fno_coverage_mapping : Flag<["-"], "fno-coverage-mapping">,
- Group<f_Group>, Flags<[DriverOption, CoreOption]>,
- HelpText<"Disable code coverage analysis">;
+defm coverage_mapping : OptInFFlag<"coverage-mapping",
+ "Generate coverage mapping to enable code coverage analysis", "Disable code coverage analysis", "",
+ [CoreOption]>;
def fprofile_generate : Flag<["-"], "fprofile-generate">,
Group<f_Group>, Flags<[CoreOption]>,
HelpText<"Generate instrumented code to collect execution counts into default.profraw (overridden by LLVM_PROFILE_FILE env var)">;
@@ -806,30 +835,26 @@ def forder_file_instrumentation : Flag<["-"], "forder-file-instrumentation">,
Group<f_Group>, Flags<[CC1Option, CoreOption]>,
HelpText<"Generate instrumented code to collect order file into default.profraw file (overridden by '=' form of option or LLVM_PROFILE_FILE env var)">;
-def faddrsig : Flag<["-"], "faddrsig">, Group<f_Group>, Flags<[CoreOption, CC1Option]>,
- HelpText<"Emit an address-significance table">;
-def fno_addrsig : Flag<["-"], "fno-addrsig">, Group<f_Group>, Flags<[CoreOption]>,
- HelpText<"Don't emit an address-significance table">;
-def fblocks : Flag<["-"], "fblocks">, Group<f_Group>, Flags<[CoreOption, CC1Option]>,
- HelpText<"Enable the 'blocks' language feature">;
+defm addrsig : OptInFFlag<"addrsig", "Emit", "Don't emit", " an address-significance table", [CoreOption]>;
+defm blocks : OptInFFlag<"blocks", "Enable the 'blocks' language feature", "", "", [CoreOption]>;
def fbootclasspath_EQ : Joined<["-"], "fbootclasspath=">, Group<f_Group>;
def fborland_extensions : Flag<["-"], "fborland-extensions">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Accept non-standard constructs supported by the Borland compiler">;
def fbuiltin : Flag<["-"], "fbuiltin">, Group<f_Group>, Flags<[CoreOption]>;
def fbuiltin_module_map : Flag <["-"], "fbuiltin-module-map">, Group<f_Group>,
Flags<[DriverOption]>, HelpText<"Load the clang builtins module map file.">;
-def fcaret_diagnostics : Flag<["-"], "fcaret-diagnostics">, Group<f_Group>;
+defm caret_diagnostics : OptOutFFlag<"caret-diagnostics", "", "">;
def fclang_abi_compat_EQ : Joined<["-"], "fclang-abi-compat=">, Group<f_clang_Group>,
Flags<[CC1Option]>, MetaVarName<"<version>">, Values<"<major>.<minor>,latest">,
HelpText<"Attempt to match the ABI of Clang <version>">;
def fclasspath_EQ : Joined<["-"], "fclasspath=">, Group<f_Group>;
-def fcolor_diagnostics : Flag<["-"], "fcolor-diagnostics">, Group<f_Group>,
- Flags<[CoreOption, CC1Option]>, HelpText<"Use colors in diagnostics">;
+defm color_diagnostics : OptInFFlag<"color-diagnostics", "Enable", "Disable", " colors in diagnostics", [CoreOption]>;
def fdiagnostics_color : Flag<["-"], "fdiagnostics-color">, Group<f_Group>,
Flags<[CoreOption, DriverOption]>;
def fdiagnostics_color_EQ : Joined<["-"], "fdiagnostics-color=">, Group<f_Group>;
def fansi_escape_codes : Flag<["-"], "fansi-escape-codes">, Group<f_Group>,
- Flags<[CoreOption, CC1Option]>, HelpText<"Use ANSI escape codes for diagnostics">;
+ Flags<[CoreOption, CC1Option]>, HelpText<"Use ANSI escape codes for diagnostics">,
+ MarshallingInfoFlag<"DiagnosticOpts->UseANSIEscapeCodes", "false">;
def fcomment_block_commands : CommaJoined<["-"], "fcomment-block-commands=">, Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Treat each comma separated argument in <arg> as a documentation comment block command">,
MetaVarName<"<arg>">;
@@ -840,7 +865,8 @@ def fno_record_command_line : Flag<["-"], "fno-record-command-line">,
Group<f_clang_Group>;
def : Flag<["-"], "frecord-gcc-switches">, Alias<frecord_command_line>;
def : Flag<["-"], "fno-record-gcc-switches">, Alias<fno_record_command_line>;
-def fcommon : Flag<["-"], "fcommon">, Group<f_Group>;
+def fcommon : Flag<["-"], "fcommon">, Group<f_Group>,
+ Flags<[CoreOption, CC1Option]>, HelpText<"Place uninitialized global variables in a common block">;
def fcompile_resource_EQ : Joined<["-"], "fcompile-resource=">, Group<f_Group>;
def fcomplete_member_pointers : Flag<["-"], "fcomplete-member-pointers">, Group<f_clang_Group>,
Flags<[CoreOption, CC1Option]>,
@@ -862,8 +888,7 @@ def fno_crash_diagnostics : Flag<["-"], "fno-crash-diagnostics">, Group<f_clang_
HelpText<"Disable auto-generation of preprocessed source files and a script for reproduction during a clang crash">;
def fcrash_diagnostics_dir : Joined<["-"], "fcrash-diagnostics-dir=">, Group<f_clang_Group>, Flags<[NoArgumentUnused, CoreOption]>;
def fcreate_profile : Flag<["-"], "fcreate-profile">, Group<f_Group>;
-def fcxx_exceptions: Flag<["-"], "fcxx-exceptions">, Group<f_Group>,
- HelpText<"Enable C++ exceptions">, Flags<[CC1Option]>;
+defm cxx_exceptions: OptInFFlag<"cxx-exceptions", "Enable C++ exceptions">;
def fcxx_modules : Flag <["-"], "fcxx-modules">, Group<f_Group>,
Flags<[DriverOption]>;
def fdebug_pass_arguments : Flag<["-"], "fdebug-pass-arguments">, Group<f_Group>;
@@ -882,9 +907,9 @@ def fdiagnostics_hotness_threshold_EQ : Joined<["-"], "fdiagnostics-hotness-thre
Group<f_Group>, Flags<[CC1Option]>, MetaVarName<"<number>">,
HelpText<"Prevent optimization remarks from being output if they do not have at least this profile count">;
def fdiagnostics_show_option : Flag<["-"], "fdiagnostics-show-option">, Group<f_Group>,
- Flags<[CC1Option]>, HelpText<"Print option name with mappable diagnostics">;
+ HelpText<"Print option name with mappable diagnostics">;
def fdiagnostics_show_note_include_stack : Flag<["-"], "fdiagnostics-show-note-include-stack">,
- Group<f_Group>, Flags<[CC1Option]>, HelpText<"Display include stacks for diagnostic notes">;
+ Group<f_Group>, Flags<[CC1Option]>, HelpText<"Display include stacks for diagnostic notes">;
def fdiagnostics_format_EQ : Joined<["-"], "fdiagnostics-format=">, Group<f_clang_Group>;
def fdiagnostics_show_category_EQ : Joined<["-"], "fdiagnostics-show-category=">, Group<f_clang_Group>;
def fdiagnostics_show_template_tree : Flag<["-"], "fdiagnostics-show-template-tree">,
@@ -900,8 +925,7 @@ def fdollars_in_identifiers : Flag<["-"], "fdollars-in-identifiers">, Group<f_Gr
HelpText<"Allow '$' in identifiers">, Flags<[CC1Option]>;
def fdwarf2_cfi_asm : Flag<["-"], "fdwarf2-cfi-asm">, Group<clang_ignored_f_Group>;
def fno_dwarf2_cfi_asm : Flag<["-"], "fno-dwarf2-cfi-asm">, Group<clang_ignored_f_Group>;
-def fdwarf_directory_asm : Flag<["-"], "fdwarf-directory-asm">, Group<f_Group>;
-def fno_dwarf_directory_asm : Flag<["-"], "fno-dwarf-directory-asm">, Group<f_Group>, Flags<[CC1Option]>;
+defm dwarf_directory_asm : OptOutFFlag<"dwarf-directory-asm", "", "">;
def felide_constructors : Flag<["-"], "felide-constructors">, Group<f_Group>;
def fno_elide_type : Flag<["-"], "fno-elide-type">, Group<f_Group>,
Flags<[CC1Option]>,
@@ -914,8 +938,7 @@ def femulated_tls : Flag<["-"], "femulated-tls">, Group<f_Group>, Flags<[CC1Opti
def fno_emulated_tls : Flag<["-"], "fno-emulated-tls">, Group<f_Group>, Flags<[CC1Option]>;
def fencoding_EQ : Joined<["-"], "fencoding=">, Group<f_Group>;
def ferror_limit_EQ : Joined<["-"], "ferror-limit=">, Group<f_Group>, Flags<[CoreOption]>;
-def fexceptions : Flag<["-"], "fexceptions">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Enable support for exception handling">;
+defm exceptions : OptInFFlag<"exceptions", "Enable", "Disable", " support for exception handling">;
def fdwarf_exceptions : Flag<["-"], "fdwarf-exceptions">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Use DWARF style exceptions">;
def fsjlj_exceptions : Flag<["-"], "fsjlj-exceptions">, Group<f_Group>,
@@ -924,6 +947,8 @@ def fseh_exceptions : Flag<["-"], "fseh-exceptions">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Use SEH style exceptions">;
def fwasm_exceptions : Flag<["-"], "fwasm-exceptions">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Use WebAssembly style exceptions">;
+def fignore_exceptions : Flag<["-"], "fignore-exceptions">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable support for ignoring exception handling constructs">;
def fexcess_precision_EQ : Joined<["-"], "fexcess-precision=">,
Group<clang_ignored_gcc_optimization_f_Group>;
def : Flag<["-"], "fexpensive-optimizations">, Group<clang_ignored_gcc_optimization_f_Group>;
@@ -939,37 +964,17 @@ def ffp_model_EQ : Joined<["-"], "ffp-model=">, Group<f_Group>, Flags<[DriverOpt
HelpText<"Controls the semantics of floating-point calculations.">;
def ffp_exception_behavior_EQ : Joined<["-"], "ffp-exception-behavior=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Specifies the exception behavior of floating-point operations.">;
-def ffast_math : Flag<["-"], "ffast-math">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Allow aggressive, lossy floating-point optimizations">;
-def fno_fast_math : Flag<["-"], "fno-fast-math">, Group<f_Group>;
-def fmath_errno : Flag<["-"], "fmath-errno">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Require math functions to indicate errors by setting errno">;
-def fno_math_errno : Flag<["-"], "fno-math-errno">, Group<f_Group>;
+defm fast_math : OptInFFlag<"fast-math", "Allow aggressive, lossy floating-point optimizations">;
+defm math_errno : OptInFFlag<"math-errno", "Require math functions to indicate errors by setting errno">;
def fbracket_depth_EQ : Joined<["-"], "fbracket-depth=">, Group<f_Group>, Flags<[CoreOption]>;
def fsignaling_math : Flag<["-"], "fsignaling-math">, Group<f_Group>;
def fno_signaling_math : Flag<["-"], "fno-signaling-math">, Group<f_Group>;
-def fjump_tables : Flag<["-"], "fjump-tables">, Group<f_Group>;
-def fno_jump_tables : Flag<["-"], "fno-jump-tables">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Do not use jump tables for lowering switches">;
-def fforce_enable_int128 : Flag<["-"], "fforce-enable-int128">,
- Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Enable support for int128_t type">;
-def fno_force_enable_int128 : Flag<["-"], "fno-force-enable-int128">,
- Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Disable support for int128_t type">;
-def fkeep_static_consts : Flag<["-"], "fkeep-static-consts">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Keep static const variables even if unused">;
-def ffixed_point : Flag<["-"], "ffixed-point">, Group<f_Group>,
- Flags<[CC1Option]>, HelpText<"Enable fixed point types">;
-def fno_fixed_point : Flag<["-"], "fno-fixed-point">, Group<f_Group>,
- HelpText<"Disable fixed point types">;
-def fcxx_static_destructors : Flag<["-"], "fc++-static-destructors">,
- Group<f_Group>,
- HelpText<"Enable C++ static destructor registration (the default)">;
-def fno_cxx_static_destructors : Flag<["-"], "fno-c++-static-destructors">,
- Group<f_Group>,
- Flags<[CC1Option]>,
- HelpText<"Disable C++ static destructor registration">;
+defm jump_tables : OptOutFFlag<"jump-tables", "Use", "Do not use", " jump tables for lowering switches">;
+defm force_enable_int128 : OptInFFlag<"force-enable-int128", "Enable", "Disable", " support for int128_t type">;
+defm keep_static_consts : OptInFFlag<"keep-static-consts", "Keep", "Don't keep", " static const variables if unused", [DriverOption]>;
+defm fixed_point : OptInFFlag<"fixed-point", "Enable", "Disable", " fixed point types">;
+defm cxx_static_destructors : OptOutFFlag<"c++-static-destructors", "",
+ "Disable C++ static destructor registration">;
def fsymbol_partition_EQ : Joined<["-"], "fsymbol-partition=">, Group<f_Group>,
Flags<[CC1Option]>;
@@ -1000,7 +1005,19 @@ def fno_sanitize_coverage
: CommaJoined<["-"], "fno-sanitize-coverage=">,
Group<f_clang_Group>, Flags<[CoreOption, DriverOption]>,
HelpText<"Disable specified features of coverage instrumentation for "
- "Sanitizers">, Values<"func,bb,edge,indirect-calls,trace-bb,trace-cmp,trace-div,trace-gep,8bit-counters,trace-pc,trace-pc-guard,no-prune,inline-8bit-counters">;
+ "Sanitizers">, Values<"func,bb,edge,indirect-calls,trace-bb,trace-cmp,trace-div,trace-gep,8bit-counters,trace-pc,trace-pc-guard,no-prune,inline-8bit-counters,inline-bool-flag">;
+def fsanitize_coverage_allowlist : Joined<["-"], "fsanitize-coverage-allowlist=">,
+ Group<f_clang_Group>, Flags<[CoreOption, DriverOption]>,
+ HelpText<"Restrict sanitizer coverage instrumentation exclusively to modules and functions that match the provided special case list, except the blocked ones">;
+def : Joined<["-"], "fsanitize-coverage-whitelist=">,
+ Group<f_clang_Group>, Flags<[CoreOption, HelpHidden]>, Alias<fsanitize_coverage_allowlist>,
+ HelpText<"Deprecated, use -fsanitize-coverage-allowlist= instead">;
+def fsanitize_coverage_blocklist : Joined<["-"], "fsanitize-coverage-blocklist=">,
+ Group<f_clang_Group>, Flags<[CoreOption, DriverOption]>,
+ HelpText<"Disable sanitizer coverage instrumentation for modules and functions that match the provided special case list, even the allowed ones">;
+def : Joined<["-"], "fsanitize-coverage-blacklist=">,
+ Group<f_clang_Group>, Flags<[CoreOption, HelpHidden]>, Alias<fsanitize_coverage_blocklist>,
+ HelpText<"Deprecated, use -fsanitize-coverage-blocklist= instead">;
def fsanitize_memory_track_origins_EQ : Joined<["-"], "fsanitize-memory-track-origins=">,
Group<f_clang_Group>,
HelpText<"Enable origins tracking in MemorySanitizer">;
@@ -1056,27 +1073,35 @@ def fsanitize_hwaddress_abi_EQ
: Joined<["-"], "fsanitize-hwaddress-abi=">,
Group<f_clang_Group>,
HelpText<"Select the HWAddressSanitizer ABI to target (interceptor or platform, default interceptor). This option is currently unused.">;
-def fsanitize_recover : Flag<["-"], "fsanitize-recover">, Group<f_clang_Group>;
-def fno_sanitize_recover : Flag<["-"], "fno-sanitize-recover">,
- Flags<[CoreOption, DriverOption]>,
- Group<f_clang_Group>;
def fsanitize_recover_EQ : CommaJoined<["-"], "fsanitize-recover=">,
Group<f_clang_Group>,
HelpText<"Enable recovery for specified sanitizers">;
-def fno_sanitize_recover_EQ
- : CommaJoined<["-"], "fno-sanitize-recover=">,
- Group<f_clang_Group>,
- Flags<[CoreOption, DriverOption]>,
- HelpText<"Disable recovery for specified sanitizers">;
+def fno_sanitize_recover_EQ : CommaJoined<["-"], "fno-sanitize-recover=">,
+ Group<f_clang_Group>, Flags<[CoreOption, DriverOption]>,
+ HelpText<"Disable recovery for specified sanitizers">;
+def fsanitize_recover : Flag<["-"], "fsanitize-recover">, Group<f_clang_Group>,
+ Alias<fsanitize_recover_EQ>, AliasArgs<["all"]>;
+def fno_sanitize_recover : Flag<["-"], "fno-sanitize-recover">,
+ Flags<[CoreOption, DriverOption]>, Group<f_clang_Group>,
+ Alias<fno_sanitize_recover_EQ>, AliasArgs<["all"]>;
def fsanitize_trap_EQ : CommaJoined<["-"], "fsanitize-trap=">, Group<f_clang_Group>,
HelpText<"Enable trapping for specified sanitizers">;
def fno_sanitize_trap_EQ : CommaJoined<["-"], "fno-sanitize-trap=">, Group<f_clang_Group>,
Flags<[CoreOption, DriverOption]>,
HelpText<"Disable trapping for specified sanitizers">;
-def fsanitize_undefined_trap_on_error : Flag<["-"], "fsanitize-undefined-trap-on-error">,
- Group<f_clang_Group>;
-def fno_sanitize_undefined_trap_on_error : Flag<["-"], "fno-sanitize-undefined-trap-on-error">,
- Group<f_clang_Group>;
+def fsanitize_trap : Flag<["-"], "fsanitize-trap">, Group<f_clang_Group>,
+ Alias<fsanitize_trap_EQ>, AliasArgs<["all"]>,
+ HelpText<"Enable trapping for all sanitizers">;
+def fno_sanitize_trap : Flag<["-"], "fno-sanitize-trap">, Group<f_clang_Group>,
+ Alias<fno_sanitize_trap_EQ>, AliasArgs<["all"]>,
+ Flags<[CoreOption, DriverOption]>,
+ HelpText<"Disable trapping for all sanitizers">;
+def fsanitize_undefined_trap_on_error
+ : Flag<["-"], "fsanitize-undefined-trap-on-error">, Group<f_clang_Group>,
+ Alias<fsanitize_trap_EQ>, AliasArgs<["undefined"]>;
+def fno_sanitize_undefined_trap_on_error
+ : Flag<["-"], "fno-sanitize-undefined-trap-on-error">, Group<f_clang_Group>,
+ Alias<fno_sanitize_trap_EQ>, AliasArgs<["undefined"]>;
def fsanitize_minimal_runtime : Flag<["-"], "fsanitize-minimal-runtime">,
Group<f_clang_Group>;
def fno_sanitize_minimal_runtime : Flag<["-"], "fno-sanitize-minimal-runtime">,
@@ -1173,30 +1198,19 @@ def ffp_contract : Joined<["-"], "ffp-contract=">, Group<f_Group>,
" | on (according to FP_CONTRACT pragma) | off (never fuse). Default"
" is 'fast' for CUDA/HIP and 'on' otherwise.">, Values<"fast,on,off">;
-def fstrict_float_cast_overflow : Flag<["-"],
- "fstrict-float-cast-overflow">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Assume that overflowing float-to-int casts are undefined (default)">;
-def fno_strict_float_cast_overflow : Flag<["-"],
- "fno-strict-float-cast-overflow">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Relax language rules and try to match the behavior of the target's native float-to-int conversion instructions">;
+defm strict_float_cast_overflow : OptOutFFlag<"strict-float-cast-overflow",
+ "Assume that overflowing float-to-int casts are undefined (default)",
+ "Relax language rules and try to match the behavior of the target's native float-to-int conversion instructions">;
def ffor_scope : Flag<["-"], "ffor-scope">, Group<f_Group>;
def fno_for_scope : Flag<["-"], "fno-for-scope">, Group<f_Group>;
-def frewrite_includes : Flag<["-"], "frewrite-includes">, Group<f_Group>,
- Flags<[CC1Option]>;
-def fno_rewrite_includes : Flag<["-"], "fno-rewrite-includes">, Group<f_Group>;
-
-def frewrite_imports : Flag<["-"], "frewrite-imports">, Group<f_Group>,
- Flags<[CC1Option]>;
-def fno_rewrite_imports : Flag<["-"], "fno-rewrite-imports">, Group<f_Group>;
+defm rewrite_imports : OptInFFlag<"rewrite-imports", "">;
+defm rewrite_includes : OptInFFlag<"rewrite-includes", "">;
-def fdelete_null_pointer_checks : Flag<["-"],
- "fdelete-null-pointer-checks">, Group<f_Group>,
- HelpText<"Treat usage of null pointers as undefined behavior.">;
-def fno_delete_null_pointer_checks : Flag<["-"],
- "fno-delete-null-pointer-checks">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Do not treat usage of null pointers as undefined behavior.">;
+defm delete_null_pointer_checks : OptOutFFlag<"delete-null-pointer-checks",
+ "Treat usage of null pointers as undefined behavior (default)",
+ "Do not treat usage of null pointers as undefined behavior">;
def frewrite_map_file : Separate<["-"], "frewrite-map-file">,
Group<f_Group>,
@@ -1205,9 +1219,7 @@ def frewrite_map_file_EQ : Joined<["-"], "frewrite-map-file=">,
Group<f_Group>,
Flags<[DriverOption]>;
-def fuse_line_directives : Flag<["-"], "fuse-line-directives">, Group<f_Group>,
- Flags<[CC1Option]>;
-def fno_use_line_directives : Flag<["-"], "fno-use-line-directives">, Group<f_Group>;
+defm use_line_directives : OptInFFlag<"use-line-directives", "Use #line in preprocessed output">;
def ffreestanding : Flag<["-"], "ffreestanding">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Assert that the compilation takes place in a freestanding environment">;
@@ -1216,9 +1228,7 @@ def fgnuc_version_EQ : Joined<["-"], "fgnuc-version=">, Group<f_Group>,
Flags<[CC1Option, CoreOption]>;
def fgnu_keywords : Flag<["-"], "fgnu-keywords">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Allow GNU-extension keywords regardless of language standard">;
-def fgnu89_inline : Flag<["-"], "fgnu89-inline">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Use the gnu89 inline semantics">;
-def fno_gnu89_inline : Flag<["-"], "fno-gnu89-inline">, Group<f_Group>;
+defm gnu89_inline : OptInFFlag<"gnu89-inline", "Use the gnu89 inline semantics">;
def fgnu_runtime : Flag<["-"], "fgnu-runtime">, Group<f_Group>,
HelpText<"Generate output compatible with the standard GNU Objective-C runtime">;
def fheinous_gnu_extensions : Flag<["-"], "fheinous-gnu-extensions">, Flags<[CC1Option]>;
@@ -1230,11 +1240,16 @@ def finline_functions : Flag<["-"], "finline-functions">, Group<f_clang_Group>,
def finline_hint_functions: Flag<["-"], "finline-hint-functions">, Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Inline functions which are (explicitly or implicitly) marked inline">;
def finline : Flag<["-"], "finline">, Group<clang_ignored_f_Group>;
+def fglobal_isel : Flag<["-"], "fglobal-isel">, Group<f_clang_Group>,
+ HelpText<"Enables the global instruction selector">;
def fexperimental_isel : Flag<["-"], "fexperimental-isel">, Group<f_clang_Group>,
- HelpText<"Enables the experimental global instruction selector">;
+ Alias<fglobal_isel>;
def fexperimental_new_pass_manager : Flag<["-"], "fexperimental-new-pass-manager">,
Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Enables an experimental new pass manager in LLVM.">;
+def fexperimental_strict_floating_point : Flag<["-"], "fexperimental-strict-floating-point">,
+ Group<f_clang_Group>, Flags<[CC1Option]>,
+ HelpText<"Enables experimental strict floating point in LLVM.">;
def finput_charset_EQ : Joined<["-"], "finput-charset=">, Group<f_Group>;
def fexec_charset_EQ : Joined<["-"], "fexec-charset=">, Group<f_Group>;
def finstrument_functions : Flag<["-"], "finstrument-functions">, Group<f_Group>, Flags<[CC1Option]>,
@@ -1249,11 +1264,7 @@ def fcf_protection : Flag<["-"], "fcf-protection">, Group<f_Group>, Flags<[CoreO
Alias<fcf_protection_EQ>, AliasArgs<["full"]>,
HelpText<"Enable cf-protection in 'full' mode">;
-def fxray_instrument : Flag<["-"], "fxray-instrument">, Group<f_Group>,
- Flags<[CC1Option]>,
- HelpText<"Generate XRay instrumentation sleds on function entry and exit">;
-def fnoxray_instrument : Flag<["-"], "fno-xray-instrument">, Group<f_Group>,
- Flags<[CC1Option]>;
+defm xray_instrument : OptInFFlag<"xray-instrument", "Generate XRay instrumentation sleds on function entry and exit">;
def fxray_instruction_threshold_EQ :
JoinedOrSeparate<["-"], "fxray-instruction-threshold=">,
@@ -1280,17 +1291,16 @@ def fxray_modes :
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"List of modes to link in by default into XRay instrumented binaries.">;
-def fxray_always_emit_customevents : Flag<["-"], "fxray-always-emit-customevents">, Group<f_Group>,
- Flags<[CC1Option]>,
- HelpText<"Determine whether to always emit __xray_customevent(...) calls even if the function it appears in is not always instrumented.">;
-def fnoxray_always_emit_customevents : Flag<["-"], "fno-xray-always-emit-customevents">, Group<f_Group>,
- Flags<[CC1Option]>;
+defm xray_always_emit_customevents : OptInFFlag<"xray-always-emit-customevents",
+ "Always emit __xray_customevent(...) calls even if the containing function is not always instrumented">;
-def fxray_always_emit_typedevents : Flag<["-"], "fxray-always-emit-typedevents">, Group<f_Group>,
- Flags<[CC1Option]>,
- HelpText<"Determine whether to always emit __xray_typedevent(...) calls even if the function it appears in is not always instrumented.">;
-def fnoxray_always_emit_typedevents : Flag<["-"], "fno-xray-always-emit-typedevents">, Group<f_Group>,
- Flags<[CC1Option]>;
+defm xray_always_emit_typedevents : OptInFFlag<"xray-always-emit-typedevents",
+ "Always emit __xray_typedevent(...) calls even if the containing function is not always instrumented">;
+
+defm xray_ignore_loops : OptInFFlag<"xray-ignore-loops",
+ "Don't instrument functions with loops unless they also meet the minimum function size">;
+defm xray_function_index : OptOutFFlag<"xray-function-index", "",
+ "Omit function index section at the expense of single-function patching performance">;
def fxray_link_deps : Flag<["-"], "fxray-link-deps">, Group<f_Group>,
Flags<[CC1Option]>,
@@ -1301,7 +1311,7 @@ def fnoxray_link_deps : Flag<["-"], "fnoxray-link-deps">, Group<f_Group>,
def fxray_instrumentation_bundle :
JoinedOrSeparate<["-"], "fxray-instrumentation-bundle=">,
Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Select which XRay instrumentation points to emit. Options: all, none, function, custom. Default is 'all'.">;
+ HelpText<"Select which XRay instrumentation points to emit. Options: all, none, function-entry, function-exit, function, custom. Default is 'all'. 'function' includes both 'function-entry' and 'function-exit'.">;
def ffine_grained_bitfield_accesses : Flag<["-"],
"ffine-grained-bitfield-accesses">, Group<f_clang_Group>, Flags<[CC1Option]>,
@@ -1310,6 +1320,13 @@ def fno_fine_grained_bitfield_accesses : Flag<["-"],
"fno-fine-grained-bitfield-accesses">, Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Use large-integer access for consecutive bitfield runs.">;
+def fexperimental_relative_cxx_abi_vtables : Flag<["-"], "fexperimental-relative-c++-abi-vtables">,
+ Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Use the experimental C++ class ABI for classes with virtual tables">;
+def fno_experimental_relative_cxx_abi_vtables : Flag<["-"], "fno-experimental-relative-c++-abi-vtables">,
+ Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Do not use the experimental C++ class ABI for classes with virtual tables">;
+
def flat__namespace : Flag<["-"], "flat_namespace">;
def flax_vector_conversions_EQ : Joined<["-"], "flax-vector-conversions=">, Group<f_Group>,
HelpText<"Enable implicit vector bit-casts">, Values<"none,integer,all">, Flags<[CC1Option]>;
@@ -1339,7 +1356,8 @@ def fmacro_backtrace_limit_EQ : Joined<["-"], "fmacro-backtrace-limit=">,
Group<f_Group>, Flags<[DriverOption, CoreOption]>;
def fmerge_all_constants : Flag<["-"], "fmerge-all-constants">, Group<f_Group>,
Flags<[CC1Option, CoreOption]>, HelpText<"Allow merging of constants">;
-def fmessage_length_EQ : Joined<["-"], "fmessage-length=">, Group<f_Group>;
+def fmessage_length_EQ : Joined<["-"], "fmessage-length=">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Format message diagnostics so that they fit within N columns">;
def fms_extensions : Flag<["-"], "fms-extensions">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
HelpText<"Accept some non-standard constructs supported by the Microsoft compiler">;
def fms_compatibility : Flag<["-"], "fms-compatibility">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
@@ -1415,6 +1433,13 @@ def fpch_validate_input_files_content:
def fno_pch_validate_input_files_content:
Flag <["-"], "fno_pch-validate-input-files-content">,
Group<f_Group>, Flags<[DriverOption]>;
+def fpch_instantiate_templates:
+ Flag <["-"], "fpch-instantiate-templates">,
+ Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Instantiate templates already while building a PCH">;
+def fno_pch_instantiate_templates:
+ Flag <["-"], "fno-pch-instantiate-templates">,
+ Group<f_Group>, Flags<[CC1Option]>;
def fmodules : Flag <["-"], "fmodules">, Group<f_Group>,
Flags<[DriverOption, CC1Option]>,
@@ -1431,6 +1456,8 @@ def fmodule_name_EQ : Joined<["-"], "fmodule-name=">, Group<f_Group>,
def fmodule_name : Separate<["-"], "fmodule-name">, Alias<fmodule_name_EQ>;
def fmodule_implementation_of : Separate<["-"], "fmodule-implementation-of">,
Flags<[CC1Option]>, Alias<fmodule_name_EQ>;
+def fsystem_module : Flag<["-"], "fsystem-module">, Flags<[CC1Option]>,
+ HelpText<"Build this module as a system module. Only used with -emit-module">;
def fmodule_map_file : Joined<["-"], "fmodule-map-file=">,
Group<f_Group>, Flags<[DriverOption,CC1Option]>, MetaVarName<"<file>">,
HelpText<"Load this module map file">;
@@ -1456,24 +1483,17 @@ def fmudflapth : Flag<["-"], "fmudflapth">, Group<f_Group>;
def fmudflap : Flag<["-"], "fmudflap">, Group<f_Group>;
def fnested_functions : Flag<["-"], "fnested-functions">, Group<f_Group>;
def fnext_runtime : Flag<["-"], "fnext-runtime">, Group<f_Group>;
-def fno_access_control : Flag<["-"], "fno-access-control">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Disable C++ access control">;
def fno_apple_pragma_pack : Flag<["-"], "fno-apple-pragma-pack">, Group<f_Group>;
def fno_asm : Flag<["-"], "fno-asm">, Group<f_Group>;
def fno_asynchronous_unwind_tables : Flag<["-"], "fno-asynchronous-unwind-tables">, Group<f_Group>;
def fno_assume_sane_operator_new : Flag<["-"], "fno-assume-sane-operator-new">, Group<f_Group>,
HelpText<"Don't assume that C++'s global operator new can't alias any pointer">,
Flags<[CC1Option]>;
-def fno_blocks : Flag<["-"], "fno-blocks">, Group<f_Group>, Flags<[CoreOption]>;
def fno_borland_extensions : Flag<["-"], "fno-borland-extensions">, Group<f_Group>;
def fno_builtin : Flag<["-"], "fno-builtin">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
HelpText<"Disable implicit builtin knowledge of functions">;
def fno_builtin_ : Joined<["-"], "fno-builtin-">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
HelpText<"Disable implicit builtin knowledge of a specific function">;
-def fno_caret_diagnostics : Flag<["-"], "fno-caret-diagnostics">, Group<f_Group>,
- Flags<[CC1Option]>;
-def fno_color_diagnostics : Flag<["-"], "fno-color-diagnostics">, Group<f_Group>,
- Flags<[CoreOption, CC1Option]>;
def fno_diagnostics_color : Flag<["-"], "fno-diagnostics-color">, Group<f_Group>,
Flags<[CoreOption, DriverOption]>;
def fno_common : Flag<["-"], "fno-common">, Group<f_Group>, Flags<[CC1Option]>,
@@ -1481,13 +1501,12 @@ def fno_common : Flag<["-"], "fno-common">, Group<f_Group>, Flags<[CC1Option]>,
def fno_constant_cfstrings : Flag<["-"], "fno-constant-cfstrings">, Group<f_Group>,
Flags<[CC1Option]>,
HelpText<"Disable creation of CodeFoundation-type constant strings">;
-def fno_cxx_exceptions: Flag<["-"], "fno-cxx-exceptions">, Group<f_Group>;
def fno_cxx_modules : Flag <["-"], "fno-cxx-modules">, Group<f_Group>,
Flags<[DriverOption]>;
def fno_diagnostics_fixit_info : Flag<["-"], "fno-diagnostics-fixit-info">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Do not include fixit information in diagnostics">;
def fno_diagnostics_show_hotness : Flag<["-"], "fno-diagnostics-show-hotness">, Group<f_Group>;
-def fno_diagnostics_show_option : Flag<["-"], "fno-diagnostics-show-option">, Group<f_Group>;
+def fno_diagnostics_show_option : Flag<["-"], "fno-diagnostics-show-option">, Group<f_Group>, Flags<[CC1Option]>;
def fno_diagnostics_show_note_include_stack : Flag<["-"], "fno-diagnostics-show-note-include-stack">,
Flags<[CC1Option]>, Group<f_Group>;
def fdigraphs : Flag<["-"], "fdigraphs">, Group<f_Group>, Flags<[CC1Option]>,
@@ -1501,12 +1520,13 @@ def fno_dollars_in_identifiers : Flag<["-"], "fno-dollars-in-identifiers">, Grou
def fno_elide_constructors : Flag<["-"], "fno-elide-constructors">, Group<f_Group>,
HelpText<"Disable C++ copy constructor elision">, Flags<[CC1Option]>;
def fno_eliminate_unused_debug_symbols : Flag<["-"], "fno-eliminate-unused-debug-symbols">, Group<f_Group>;
-def fno_exceptions : Flag<["-"], "fno-exceptions">, Group<f_Group>;
def fno_gnu_keywords : Flag<["-"], "fno-gnu-keywords">, Group<f_Group>, Flags<[CC1Option]>;
def fno_inline_functions : Flag<["-"], "fno-inline-functions">, Group<f_clang_Group>, Flags<[CC1Option]>;
def fno_inline : Flag<["-"], "fno-inline">, Group<f_clang_Group>, Flags<[CC1Option]>;
+def fno_global_isel : Flag<["-"], "fno-global-isel">, Group<f_clang_Group>,
+ HelpText<"Disables the global instruction selector">;
def fno_experimental_isel : Flag<["-"], "fno-experimental-isel">, Group<f_clang_Group>,
- HelpText<"Disables the experimental global instruction selector">;
+ Alias<fno_global_isel>;
def fno_experimental_new_pass_manager : Flag<["-"], "fno-experimental-new-pass-manager">,
Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Disables an experimental new pass manager in LLVM.">;
@@ -1546,13 +1566,7 @@ def fno_operator_names : Flag<["-"], "fno-operator-names">, Group<f_Group>,
HelpText<"Do not treat C++ operator name keywords as synonyms for operators">,
Flags<[CC1Option]>;
def fno_pascal_strings : Flag<["-"], "fno-pascal-strings">, Group<f_Group>;
-def fno_rtti : Flag<["-"], "fno-rtti">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Disable generation of rtti information">;
-def fno_rtti_data : Flag<["-"], "fno-rtti-data">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Control emission of RTTI data">;
def fno_short_enums : Flag<["-"], "fno-short-enums">, Group<f_Group>;
-def fno_show_column : Flag<["-"], "fno-show-column">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Do not include column number on diagnostics">;
def fno_show_source_location : Flag<["-"], "fno-show-source-location">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Do not include source location information with diagnostics">;
def fdiagnostics_absolute_paths : Flag<["-"], "fdiagnostics-absolute-paths">, Group<f_Group>,
@@ -1578,14 +1592,11 @@ def fno_use_cxa_atexit : Flag<["-"], "fno-use-cxa-atexit">, Group<f_Group>, Flag
HelpText<"Don't use __cxa_atexit for calling destructors">;
def fno_register_global_dtors_with_atexit : Flag<["-"], "fno-register-global-dtors-with-atexit">, Group<f_Group>,
HelpText<"Don't use atexit or __cxa_atexit to register global destructors">;
-def fno_use_init_array : Flag<["-"], "fno-use-init-array">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Don't use .init_array instead of .ctors">;
def fno_unit_at_a_time : Flag<["-"], "fno-unit-at-a-time">, Group<f_Group>;
def fno_unwind_tables : Flag<["-"], "fno-unwind-tables">, Group<f_Group>;
-def fno_verbose_asm : Flag<["-"], "fno-verbose-asm">, Group<f_Group>;
+def fno_verbose_asm : Flag<["-"], "fno-verbose-asm">, Group<f_Group>, Flags<[CC1Option]>;
def fno_working_directory : Flag<["-"], "fno-working-directory">, Group<f_Group>;
def fno_wrapv : Flag<["-"], "fno-wrapv">, Group<f_Group>;
-def fno_zero_initialized_in_bss : Flag<["-"], "fno-zero-initialized-in-bss">, Group<f_Group>;
def fobjc_arc : Flag<["-"], "fobjc-arc">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Synthesize retain and release calls for Objective-C pointers">;
def fno_objc_arc : Flag<["-"], "fno-objc-arc">, Group<f_Group>;
@@ -1686,6 +1697,12 @@ def fopenmp_optimistic_collapse : Flag<["-"], "fopenmp-optimistic-collapse">, Gr
Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
def fno_openmp_optimistic_collapse : Flag<["-"], "fno-openmp-optimistic-collapse">, Group<f_Group>,
Flags<[NoArgumentUnused, HelpHidden]>;
+def fopenmp_cuda_parallel_target_regions : Flag<["-"], "fopenmp-cuda-parallel-target-regions">, Group<f_Group>,
+ Flags<[CC1Option, NoArgumentUnused, HelpHidden]>,
+ HelpText<"Support parallel execution of target regions on Cuda-based devices.">;
+def fno_openmp_cuda_parallel_target_regions : Flag<["-"], "fno-openmp-cuda-parallel-target-regions">, Group<f_Group>,
+ Flags<[NoArgumentUnused, HelpHidden]>,
+ HelpText<"Support only serial execution of target regions on Cuda-based devices.">;
def static_openmp: Flag<["-"], "static-openmp">,
HelpText<"Use the static host OpenMP runtime while linking.">;
def fno_optimize_sibling_calls : Flag<["-"], "fno-optimize-sibling-calls">, Group<f_Group>;
@@ -1715,29 +1732,25 @@ def fpic : Flag<["-"], "fpic">, Group<f_Group>;
def fno_pic : Flag<["-"], "fno-pic">, Group<f_Group>;
def fpie : Flag<["-"], "fpie">, Group<f_Group>;
def fno_pie : Flag<["-"], "fno-pie">, Group<f_Group>;
-def fplt : Flag<["-"], "fplt">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Use the PLT to make function calls">;
-def fno_plt : Flag<["-"], "fno-plt">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Do not use the PLT to make function calls">;
-def fropi : Flag<["-"], "fropi">, Group<f_Group>, Flags<[CC1Option]>;
-def fno_ropi : Flag<["-"], "fno-ropi">, Group<f_Group>;
-def frwpi : Flag<["-"], "frwpi">, Group<f_Group>, Flags<[CC1Option]>;
-def fno_rwpi : Flag<["-"], "fno-rwpi">, Group<f_Group>;
+defm plt : OptOutFFlag<"plt", "",
+ "Use GOT indirection instead of PLT to make external function calls (x86 only)">;
+defm ropi : OptInFFlag<"ropi", "Generate read-only position independent code (ARM only)">;
+defm rwpi : OptInFFlag<"rwpi", "Generate read-write position independent code (ARM only)">;
def fplugin_EQ : Joined<["-"], "fplugin=">, Group<f_Group>, Flags<[DriverOption]>, MetaVarName<"<dsopath>">,
HelpText<"Load the named plugin (dynamic shared object)">;
def fpass_plugin_EQ : Joined<["-"], "fpass-plugin=">,
Group<f_Group>, Flags<[CC1Option]>, MetaVarName<"<dsopath>">,
HelpText<"Load pass plugin from a dynamic shared object file (only with new pass manager).">;
-def fpreserve_as_comments : Flag<["-"], "fpreserve-as-comments">, Group<f_Group>;
-def fno_preserve_as_comments : Flag<["-"], "fno-preserve-as-comments">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Do not preserve comments in inline assembly">;
+defm preserve_as_comments : OptOutFFlag<"preserve-as-comments", "",
+ "Do not preserve comments in inline assembly">;
def fprofile_arcs : Flag<["-"], "fprofile-arcs">, Group<f_Group>;
def fno_profile_arcs : Flag<["-"], "fno-profile-arcs">, Group<f_Group>;
def framework : Separate<["-"], "framework">, Flags<[LinkerInput]>;
def frandom_seed_EQ : Joined<["-"], "frandom-seed=">, Group<clang_ignored_f_Group>;
def freg_struct_return : Flag<["-"], "freg-struct-return">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Override the default ABI to return small structs in registers">;
-def frtti : Flag<["-"], "frtti">, Group<f_Group>;
+defm rtti : OptOutFFlag<"rtti", "", "Disable generation of rtti information">;
+defm rtti_data : OptOutFFlag<"rtti-data", "", "Disable generation of RTTI data">;
def : Flag<["-"], "fsched-interblock">, Group<clang_ignored_f_Group>;
def fshort_enums : Flag<["-"], "fshort-enums">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Allocate to an enum type only as many bytes as it needs for the declared range of possible values">;
@@ -1752,17 +1765,19 @@ def fno_short_wchar : Flag<["-"], "fno-short-wchar">, Group<f_Group>,
def fshow_overloads_EQ : Joined<["-"], "fshow-overloads=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Which overload candidates to show when overload resolution fails: "
"best|all; defaults to all">, Values<"best,all">;
-def fshow_column : Flag<["-"], "fshow-column">, Group<f_Group>, Flags<[CC1Option]>;
+defm show_column : OptOutFFlag<"show-column", "", "Do not include column number on diagnostics">;
def fshow_source_location : Flag<["-"], "fshow-source-location">, Group<f_Group>;
def fspell_checking : Flag<["-"], "fspell-checking">, Group<f_Group>;
def fspell_checking_limit_EQ : Joined<["-"], "fspell-checking-limit=">, Group<f_Group>;
def fsigned_bitfields : Flag<["-"], "fsigned-bitfields">, Group<f_Group>;
-def fsigned_char : Flag<["-"], "fsigned-char">, Group<f_Group>;
-def fno_signed_char : Flag<["-"], "fno-signed-char">, Group<f_Group>,
- Flags<[CC1Option]>, HelpText<"Char is unsigned">;
+defm signed_char : OptOutFFlag<"signed-char", "char is signed", "char is unsigned">;
def fsplit_stack : Flag<["-"], "fsplit-stack">, Group<f_Group>;
def fstack_protector_all : Flag<["-"], "fstack-protector-all">, Group<f_Group>,
HelpText<"Enable stack protectors for all functions">;
+def fstack_clash_protection : Flag<["-"], "fstack-clash-protection">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable stack clash protection">;
+def fnostack_clash_protection : Flag<["-"], "fnostack-clash-protection">, Group<f_Group>,
+ HelpText<"Disable stack clash protection">;
def fstack_protector_strong : Flag<["-"], "fstack-protector-strong">, Group<f_Group>,
HelpText<"Enable stack protectors for some functions vulnerable to stack smashing. "
"Compared to -fstack-protector, this uses a stronger heuristic "
@@ -1780,6 +1795,8 @@ def ftrivial_auto_var_init : Joined<["-"], "ftrivial-auto-var-init=">, Group<f_G
def enable_trivial_var_init_zero : Flag<["-"], "enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang">,
Flags<[CC1Option, CoreOption]>,
HelpText<"Trivial automatic variable initialization to zero is only here for benchmarks, it'll eventually be removed, and I'm OK with that because I'm only using it to benchmark">;
+def ftrivial_auto_var_init_stop_after : Joined<["-"], "ftrivial-auto-var-init-stop-after=">, Group<f_Group>,
+ Flags<[CC1Option, CoreOption]>, HelpText<"Stop initializing trivial automatic stack variables after the specified number of instances">;
def fstandalone_debug : Flag<["-"], "fstandalone-debug">, Group<f_Group>, Flags<[CoreOption]>,
HelpText<"Emit full debug info for all types used by the program">;
def fno_standalone_debug : Flag<["-"], "fno-standalone-debug">, Group<f_Group>, Flags<[CoreOption]>,
@@ -1827,6 +1844,7 @@ def foptimization_record_passes_EQ : Joined<["-"], "foptimization-record-passes=
MetaVarName<"<regex>">;
def ftest_coverage : Flag<["-"], "ftest-coverage">, Group<f_Group>;
+def fno_test_coverage : Flag<["-"], "fno-test-coverage">, Group<f_Group>;
def fvectorize : Flag<["-"], "fvectorize">, Group<f_Group>,
HelpText<"Enable the loop vectorization passes">;
def fno_vectorize : Flag<["-"], "fno-vectorize">, Group<f_Group>;
@@ -1875,10 +1893,7 @@ def funroll_loops : Flag<["-"], "funroll-loops">, Group<f_Group>,
HelpText<"Turn on loop unroller">, Flags<[CC1Option]>;
def fno_unroll_loops : Flag<["-"], "fno-unroll-loops">, Group<f_Group>,
HelpText<"Turn off loop unroller">, Flags<[CC1Option]>;
-def freroll_loops : Flag<["-"], "freroll-loops">, Group<f_Group>,
- HelpText<"Turn on loop reroller">, Flags<[CC1Option]>;
-def fno_reroll_loops : Flag<["-"], "fno-reroll-loops">, Group<f_Group>,
- HelpText<"Turn off loop reroller">;
+defm reroll_loops : OptInFFlag<"reroll-loops", "Turn on loop reroller">;
def ftrigraphs : Flag<["-"], "ftrigraphs">, Group<f_Group>,
HelpText<"Process trigraph sequences">, Flags<[CC1Option]>;
def fno_trigraphs : Flag<["-"], "fno-trigraphs">, Group<f_Group>,
@@ -1890,10 +1905,10 @@ def funwind_tables : Flag<["-"], "funwind-tables">, Group<f_Group>;
def fuse_cxa_atexit : Flag<["-"], "fuse-cxa-atexit">, Group<f_Group>;
def fregister_global_dtors_with_atexit : Flag<["-"], "fregister-global-dtors-with-atexit">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use atexit or __cxa_atexit to register global destructors">;
-def fuse_init_array : Flag<["-"], "fuse-init-array">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Use .init_array instead of .ctors">;
+defm use_init_array : OptOutFFlag<"use-init-array", "", "Use .ctors/.dtors instead of .init_array/.fini_array">;
def fno_var_tracking : Flag<["-"], "fno-var-tracking">, Group<clang_ignored_f_Group>;
-def fverbose_asm : Flag<["-"], "fverbose-asm">, Group<f_Group>;
+def fverbose_asm : Flag<["-"], "fverbose-asm">, Group<f_Group>,
+ HelpText<"Generate verbose assembly output">;
def dA : Flag<["-"], "dA">, Alias<fverbose_asm>;
def fvisibility_EQ : Joined<["-"], "fvisibility=">, Group<f_Group>,
HelpText<"Set the default symbol visibility for all global declarations">, Values<"hidden,default">;
@@ -1905,76 +1920,51 @@ def fvisibility_ms_compat : Flag<["-"], "fvisibility-ms-compat">, Group<f_Group>
"variables 'hidden' visibility by default">;
def fvisibility_global_new_delete_hidden : Flag<["-"], "fvisibility-global-new-delete-hidden">, Group<f_Group>,
HelpText<"Give global C++ operator new and delete declarations hidden visibility">, Flags<[CC1Option]>;
-def fwhole_program_vtables : Flag<["-"], "fwhole-program-vtables">, Group<f_Group>,
- Flags<[CoreOption, CC1Option]>,
- HelpText<"Enables whole-program vtable optimization. Requires -flto">;
-def fno_whole_program_vtables : Flag<["-"], "fno-whole-program-vtables">, Group<f_Group>,
- Flags<[CoreOption]>;
-def fsplit_lto_unit : Flag<["-"], "fsplit-lto-unit">, Group<f_Group>,
- Flags<[CoreOption, CC1Option]>,
- HelpText<"Enables splitting of the LTO unit.">;
-def fno_split_lto_unit : Flag<["-"], "fno-split-lto-unit">, Group<f_Group>,
- Flags<[CoreOption]>;
-def fforce_emit_vtables : Flag<["-"], "fforce-emit-vtables">, Group<f_Group>,
- Flags<[CC1Option]>,
- HelpText<"Emits more virtual tables to improve devirtualization">;
-def fno_force_emit_vtables : Flag<["-"], "fno-force-emit-vtables">, Group<f_Group>,
- Flags<[CoreOption]>;
-
-def fvirtual_function_elimination : Flag<["-"], "fvirtual-function-elimination">, Group<f_Group>,
- Flags<[CoreOption, CC1Option]>,
- HelpText<"Enables dead virtual function elimination optimization. Requires -flto=full">;
-def fno_virtual_function_elimination : Flag<["-"], "fno-virtual-function_elimination">, Group<f_Group>,
- Flags<[CoreOption]>;
+defm whole_program_vtables : OptInFFlag<"whole-program-vtables",
+ "Enables whole-program vtable optimization. Requires -flto", "", "", [CoreOption]>;
+defm split_lto_unit : OptInFFlag<"split-lto-unit",
+ "Enables splitting of the LTO unit", "", "", [CoreOption]>;
+defm force_emit_vtables : OptInFFlag<"force-emit-vtables",
+ "Emits more virtual tables to improve devirtualization", "", "", [CoreOption]>;
+defm virtual_function_elimination : OptInFFlag<"virtual-function-elimination",
+ "Enables dead virtual function elimination optimization. Requires -flto=full", "", "", [CoreOption]>;
def fwrapv : Flag<["-"], "fwrapv">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Treat signed integer overflow as two's complement">;
def fwritable_strings : Flag<["-"], "fwritable-strings">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Store string literals as writable data">;
-def fzero_initialized_in_bss : Flag<["-"], "fzero-initialized-in-bss">, Group<f_Group>;
-def ffunction_sections : Flag<["-"], "ffunction-sections">, Group<f_Group>,
- Flags<[CC1Option]>,
- HelpText<"Place each function in its own section (ELF Only)">;
-def fno_function_sections : Flag<["-"], "fno-function-sections">,
- Group<f_Group>, Flags<[CC1Option]>;
-def fdata_sections : Flag <["-"], "fdata-sections">, Group<f_Group>,
- Flags<[CC1Option]>, HelpText<"Place each data in its own section (ELF Only)">;
-def fno_data_sections : Flag <["-"], "fno-data-sections">, Group<f_Group>,
- Flags<[CC1Option]>;
-def fstack_size_section : Flag<["-"], "fstack-size-section">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Emit section containing metadata on function stack sizes">;
-def fno_stack_size_section : Flag<["-"], "fno-stack-size-section">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Don't emit section containing metadata on function stack sizes">;
-
-def funique_section_names : Flag <["-"], "funique-section-names">,
- Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Use unique names for text and data sections (ELF Only)">;
-def fno_unique_section_names : Flag <["-"], "fno-unique-section-names">,
- Group<f_Group>, Flags<[CC1Option]>;
-
-def fstrict_return : Flag<["-"], "fstrict-return">, Group<f_Group>,
- Flags<[CC1Option]>,
- HelpText<"Always treat control flow paths that fall off the end of a "
- "non-void function as unreachable">;
-def fno_strict_return : Flag<["-"], "fno-strict-return">, Group<f_Group>,
- Flags<[CC1Option]>;
+defm zero_initialized_in_bss : OptOutFFlag<"zero-initialized-in-bss", "", "Don't place zero initialized data in BSS">;
+defm function_sections : OptInFFlag<"function-sections", "Place each function in its own section">;
+def fbasic_block_sections_EQ : Joined<["-"], "fbasic-block-sections=">, Group<f_Group>,
+ Flags<[CC1Option, CC1AsOption]>,
+ HelpText<"Place each function's basic blocks in unique sections (ELF Only) : all | labels | none | list=<file>">,
+ DocBrief<[{Generate labels for each basic block or place each basic block or a subset of basic blocks in its own section.}]>,
+ Values<"all,labels,none,list=">;
+defm data_sections : OptInFFlag<"data-sections", "Place each data in its own section">;
+defm stack_size_section : OptInFFlag<"stack-size-section", "Emit section containing metadata on function stack sizes">;
+
+defm unique_basic_block_section_names : OptInFFlag<"unique-basic-block-section-names",
+ "Use unique names for basic block sections (ELF Only)">;
+defm unique_internal_linkage_names : OptInFFlag<"unique-internal-linkage-names",
+ "Uniqueify Internal Linkage Symbol Names by appending the MD5 hash of the module path">;
+defm unique_section_names : OptOutFFlag<"unique-section-names",
+ "", "Don't use unique names for text and data sections">;
+
+defm strict_return : OptOutFFlag<"strict-return", "",
+ "Don't treat control flow paths that fall off the end of a non-void function as unreachable">;
+
+def fenable_matrix : Flag<["-"], "fenable-matrix">, Group<f_Group>,
+ Flags<[CC1Option]>,
+ HelpText<"Enable matrix data type and related builtin functions">;
-def fallow_editor_placeholders : Flag<["-"], "fallow-editor-placeholders">,
- Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Treat editor placeholders as valid source code">;
-def fno_allow_editor_placeholders : Flag<["-"],
- "fno-allow-editor-placeholders">, Group<f_Group>;
def fdebug_types_section: Flag <["-"], "fdebug-types-section">, Group<f_Group>,
- Flags<[CC1Option]>, HelpText<"Place debug types in their own section (ELF Only)">;
-def fno_debug_types_section: Flag<["-"], "fno-debug-types-section">, Group<f_Group>,
- Flags<[CC1Option]>;
-def fdebug_ranges_base_address: Flag <["-"], "fdebug-ranges-base-address">, Group<f_Group>,
- Flags<[CC1Option]>, HelpText<"Use DWARF base address selection entries in debug_ranges">;
-def fno_debug_ranges_base_address: Flag <["-"], "fno-debug-ranges-base-address">, Group<f_Group>,
- Flags<[CC1Option]>;
+ HelpText<"Place debug types in their own section (ELF Only)">;
+def fno_debug_types_section: Flag<["-"], "fno-debug-types-section">, Group<f_Group>;
+defm debug_ranges_base_address : OptInFFlag<"debug-ranges-base-address",
+ "Use DWARF base address selection entries in .debug_ranges">;
def fsplit_dwarf_inlining: Flag <["-"], "fsplit-dwarf-inlining">, Group<f_Group>,
- Flags<[CC1Option]>, HelpText<"Provide minimal debug info in the object/executable to facilitate online symbolication/stack traces in the absence of .dwo/.dwp files when using Split DWARF">;
+ HelpText<"Provide minimal debug info in the object/executable to facilitate online symbolication/stack traces in the absence of .dwo/.dwp files when using Split DWARF">;
def fno_split_dwarf_inlining: Flag<["-"], "fno-split-dwarf-inlining">, Group<f_Group>,
Flags<[CC1Option]>;
def fdebug_default_version: Joined<["-"], "fdebug-default-version=">, Group<f_Group>,
@@ -1984,15 +1974,12 @@ def fdebug_prefix_map_EQ
Flags<[CC1Option,CC1AsOption]>,
HelpText<"remap file source paths in debug info">;
def ffile_prefix_map_EQ
- : Joined<["-"], "ffile-prefix-map=">, Group<f_Group>, Flags<[CC1Option]>,
+ : Joined<["-"], "ffile-prefix-map=">, Group<f_Group>,
HelpText<"remap file source paths in debug info and predefined preprocessor macros">;
def fmacro_prefix_map_EQ
: Joined<["-"], "fmacro-prefix-map=">, Group<Preprocessor_Group>, Flags<[CC1Option]>,
HelpText<"remap file source paths in predefined preprocessor macros">;
-def fforce_dwarf_frame : Flag<["-"], "fforce-dwarf-frame">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Always emit a debug frame section">;
-def fno_force_dwarf_frame : Flag<["-"], "fno-force-dwarf-frame">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Don't always emit a debug frame section">;
+defm force_dwarf_frame : OptInFFlag<"force-dwarf-frame", "Always emit a debug frame section">;
def g_Flag : Flag<["-"], "g">, Group<g_Group>,
HelpText<"Generate source-level debug information">;
def gline_tables_only : Flag<["-"], "gline-tables-only">, Group<gN_Group>,
@@ -2051,15 +2038,15 @@ def : Flag<["-"], "gno-record-gcc-switches">, Alias<gno_record_command_line>;
def gstrict_dwarf : Flag<["-"], "gstrict-dwarf">, Group<g_flags_Group>;
def gno_strict_dwarf : Flag<["-"], "gno-strict-dwarf">, Group<g_flags_Group>;
def gcolumn_info : Flag<["-"], "gcolumn-info">, Group<g_flags_Group>, Flags<[CoreOption]>;
-def gno_column_info : Flag<["-"], "gno-column-info">, Group<g_flags_Group>, Flags<[CoreOption]>;
+def gno_column_info : Flag<["-"], "gno-column-info">, Group<g_flags_Group>, Flags<[CoreOption, CC1Option]>;
def gsplit_dwarf : Flag<["-"], "gsplit-dwarf">, Group<g_flags_Group>;
def gsplit_dwarf_EQ : Joined<["-"], "gsplit-dwarf=">, Group<g_flags_Group>,
HelpText<"Set DWARF fission mode to either 'split' or 'single'">,
Values<"split,single">;
def ggnu_pubnames : Flag<["-"], "ggnu-pubnames">, Group<g_flags_Group>, Flags<[CC1Option]>;
-def gno_gnu_pubnames : Flag<["-"], "gno-gnu-pubnames">, Group<g_flags_Group>, Flags<[CC1Option]>;
+def gno_gnu_pubnames : Flag<["-"], "gno-gnu-pubnames">, Group<g_flags_Group>;
def gpubnames : Flag<["-"], "gpubnames">, Group<g_flags_Group>, Flags<[CC1Option]>;
-def gno_pubnames : Flag<["-"], "gno-pubnames">, Group<g_flags_Group>, Flags<[CC1Option]>;
+def gno_pubnames : Flag<["-"], "gno-pubnames">, Group<g_flags_Group>;
def gdwarf_aranges : Flag<["-"], "gdwarf-aranges">, Group<g_flags_Group>;
def gmodules : Flag <["-"], "gmodules">, Group<gN_Group>,
HelpText<"Generate debug info with external references to clang modules"
@@ -2076,6 +2063,10 @@ def gno_embed_source : Flag<["-"], "gno-embed-source">, Group<g_flags_Group>,
def headerpad__max__install__names : Joined<["-"], "headerpad_max_install_names">;
def help : Flag<["-", "--"], "help">, Flags<[CC1Option,CC1AsOption]>,
HelpText<"Display available options">;
+def ibuiltininc : Flag<["-"], "ibuiltininc">,
+ HelpText<"Enable builtin #include directories even when -nostdinc is used "
+ "before or after -ibuiltininc. "
+ "Using -nobuiltininc after the option disables it">;
def index_header_map : Flag<["-"], "index-header-map">, Flags<[CC1Option]>,
HelpText<"Make the next included directory (-I or -F) an indexer header map">;
def idirafter : JoinedOrSeparate<["-"], "idirafter">, Group<clang_i_Group>, Flags<[CC1Option]>,
@@ -2146,13 +2137,19 @@ def mno_iamcu : Flag<["-"], "mno-iamcu">, Group<m_Group>, Flags<[DriverOption, C
def malign_functions_EQ : Joined<["-"], "malign-functions=">, Group<clang_ignored_m_Group>;
def malign_loops_EQ : Joined<["-"], "malign-loops=">, Group<clang_ignored_m_Group>;
def malign_jumps_EQ : Joined<["-"], "malign-jumps=">, Group<clang_ignored_m_Group>;
-def malign_branch_EQ : CommaJoined<["-"], "malign-branch=">, Group<m_Group>;
-def malign_branch_boundary_EQ : Joined<["-"], "malign-branch-boundary=">, Group<m_Group>;
-def malign_branch_prefix_size_EQ : Joined<["-"], "malign-branch-prefix-size=">, Group<m_Group>;
-def mbranches_within_32B_boundaries : Flag<["-"], "mbranches-within-32B-boundaries">, Flags<[DriverOption]>, Group<m_Group>;
+def malign_branch_EQ : CommaJoined<["-"], "malign-branch=">, Group<m_Group>, Flags<[DriverOption]>,
+ HelpText<"Specify types of branches to align">;
+def malign_branch_boundary_EQ : Joined<["-"], "malign-branch-boundary=">, Group<m_Group>, Flags<[DriverOption]>,
+ HelpText<"Specify the boundary's size to align branches">;
+def mpad_max_prefix_size_EQ : Joined<["-"], "mpad-max-prefix-size=">, Group<m_Group>, Flags<[DriverOption]>,
+ HelpText<"Specify maximum number of prefixes to use for padding">;
+def mbranches_within_32B_boundaries : Flag<["-"], "mbranches-within-32B-boundaries">, Flags<[DriverOption]>, Group<m_Group>,
+ HelpText<"Align selected branches (fused, jcc, jmp) within 32-byte boundary">;
def mfancy_math_387 : Flag<["-"], "mfancy-math-387">, Group<clang_ignored_m_Group>;
def mlong_calls : Flag<["-"], "mlong-calls">, Group<m_Group>,
HelpText<"Generate branches with extended addressability, usually via indirect jumps.">;
+def mdouble_EQ : Joined<["-"], "mdouble=">, Group<m_Group>, Values<"32,64">, Flags<[CC1Option]>,
+ HelpText<"Force double to be 32 bits or 64 bits">;
def LongDouble_Group : OptionGroup<"<LongDouble group>">, Group<m_Group>,
DocName<"Long double flags">,
DocBrief<[{Selects the long double implementation}]>;
@@ -2181,7 +2178,7 @@ def mwatchos_simulator_version_min_EQ : Joined<["-"], "mwatchos-simulator-versio
def mwatchsimulator_version_min_EQ : Joined<["-"], "mwatchsimulator-version-min=">, Alias<mwatchos_simulator_version_min_EQ>;
def march_EQ : Joined<["-"], "march=">, Group<m_Group>, Flags<[CoreOption]>;
def masm_EQ : Joined<["-"], "masm=">, Group<m_Group>, Flags<[DriverOption]>;
-def mcmodel_EQ : Joined<["-"], "mcmodel=">, Group<m_Group>;
+def mcmodel_EQ : Joined<["-"], "mcmodel=">, Group<m_Group>, Flags<[CC1Option]>;
def mtls_size_EQ : Joined<["-"], "mtls-size=">, Group<m_Group>, Flags<[DriverOption, CC1Option]>,
HelpText<"Specify bit size of immediate TLS offsets (AArch64 ELF only): "
"12 (for 4KB) | 24 (for 16MB, default) | 32 (for 4GB) | 48 (for 256TB, needs -mcmodel=large)">;
@@ -2275,11 +2272,19 @@ def mlvi_cfi : Flag<["-"], "mlvi-cfi">, Group<m_Group>, Flags<[CoreOption,Driver
HelpText<"Enable only control-flow mitigations for Load Value Injection (LVI)">;
def mno_lvi_cfi : Flag<["-"], "mno-lvi-cfi">, Group<m_Group>, Flags<[CoreOption,DriverOption]>,
HelpText<"Disable control-flow mitigations for Load Value Injection (LVI)">;
+def m_seses : Flag<["-"], "mseses">, Group<m_Group>, Flags<[CoreOption, DriverOption]>,
+ HelpText<"Enable speculative execution side effect suppression (SESES). "
+ "Includes LVI control flow integrity mitigations">;
+def mno_seses : Flag<["-"], "mno-seses">, Group<m_Group>, Flags<[CoreOption, DriverOption]>,
+ HelpText<"Disable speculative execution side effect suppression (SESES)">;
def mrelax : Flag<["-"], "mrelax">, Group<m_riscv_Features_Group>,
HelpText<"Enable linker relaxation">;
def mno_relax : Flag<["-"], "mno-relax">, Group<m_riscv_Features_Group>,
HelpText<"Disable linker relaxation">;
+def msmall_data_limit_EQ : Joined<["-"], "msmall-data-limit=">, Group<m_Group>,
+ Alias<G>,
+ HelpText<"Put global and static data smaller than the limit into a special section">;
def msave_restore : Flag<["-"], "msave-restore">, Group<m_riscv_Features_Group>,
HelpText<"Enable using library calls for save and restore">;
def mno_save_restore : Flag<["-"], "mno-save-restore">, Group<m_riscv_Features_Group>,
@@ -2290,6 +2295,8 @@ def mcmodel_EQ_medlow : Flag<["-"], "mcmodel=medlow">, Group<m_riscv_Features_Gr
def mcmodel_EQ_medany : Flag<["-"], "mcmodel=medany">, Group<m_riscv_Features_Group>,
Flags<[CC1Option]>, Alias<mcmodel_EQ>, AliasArgs<["medium"]>,
HelpText<"Equivalent to -mcmodel=medium, compatible with RISC-V gcc.">;
+def menable_experimental_extensions : Flag<["-"], "menable-experimental-extensions">, Group<m_Group>,
+ HelpText<"Enable use of experimental RISC-V extensions.">;
def munaligned_access : Flag<["-"], "munaligned-access">, Group<m_arm_Features_Group>,
HelpText<"Allow memory accesses to be unaligned (AArch32/AArch64 only)">;
@@ -2316,6 +2323,9 @@ def mno_neg_immediates: Flag<["-"], "mno-neg-immediates">, Group<m_arm_Features_
def mcmse : Flag<["-"], "mcmse">, Group<m_arm_Features_Group>,
Flags<[DriverOption,CC1Option]>,
HelpText<"Allow use of CMSE (Armv8-M Security Extensions)">;
+def ForceAAPCSBitfieldLoad : Flag<["-"], "fAAPCSBitfieldLoad">, Group<m_arm_Features_Group>,
+ Flags<[DriverOption,CC1Option]>,
+ HelpText<"Follows the AAPCS standard that all volatile bit-field write generates at least one load. (ARM only).">;
def mgeneral_regs_only : Flag<["-"], "mgeneral-regs-only">, Group<m_aarch64_Features_Group>,
HelpText<"Generate code which only uses the general purpose registers (AArch64 only)">;
@@ -2327,7 +2337,7 @@ def mno_fix_cortex_a53_835769 : Flag<["-"], "mno-fix-cortex-a53-835769">,
HelpText<"Don't workaround Cortex-A53 erratum 835769 (AArch64 only)">;
foreach i = {1-31} in
def ffixed_x#i : Flag<["-"], "ffixed-x"#i>, Group<m_Group>,
- HelpText<"Reserve the "#i#" register (AArch64/RISC-V only)">;
+ HelpText<"Reserve the x"#i#" register (AArch64/RISC-V only)">;
foreach i = {8-15,18} in
def fcall_saved_x#i : Flag<["-"], "fcall-saved-x"#i>, Group<m_aarch64_Features_Group>,
@@ -2339,6 +2349,9 @@ def msign_return_address_EQ : Joined<["-"], "msign-return-address=">,
def mbranch_protection_EQ : Joined<["-"], "mbranch-protection=">,
HelpText<"Enforce targets of indirect branches and function returns">;
+def mharden_sls_EQ : Joined<["-"], "mharden-sls=">,
+ HelpText<"Select straight-line speculation hardening scope">;
+
def msimd128 : Flag<["-"], "msimd128">, Group<m_wasm_Features_Group>;
def munimplemented_simd128 : Flag<["-"], "munimplemented-simd128">, Group<m_wasm_Features_Group>;
def mno_unimplemented_simd128 : Flag<["-"], "mno-unimplemented-simd128">, Group<m_wasm_Features_Group>;
@@ -2361,6 +2374,9 @@ def mtail_call : Flag<["-"], "mtail-call">, Group<m_wasm_Features_Group>;
def mno_tail_call : Flag<["-"], "mno-tail-call">, Group<m_wasm_Features_Group>;
def mreference_types : Flag<["-"], "mreference-types">, Group<m_wasm_Features_Group>;
def mno_reference_types : Flag<["-"], "mno-reference-types">, Group<m_wasm_Features_Group>;
+def mexec_model_EQ : Joined<["-"], "mexec-model=">, Group<m_wasm_Features_Driver_Group>,
+ Values<"command,reactor">,
+ HelpText<"Execution model (WebAssembly only)">;
def mamdgpu_debugger_abi : Joined<["-"], "mamdgpu-debugger-abi=">,
Flags<[HelpHidden]>,
@@ -2395,6 +2411,8 @@ def faltivec : Flag<["-"], "faltivec">, Group<f_Group>, Flags<[DriverOption]>;
def fno_altivec : Flag<["-"], "fno-altivec">, Group<f_Group>, Flags<[DriverOption]>;
def maltivec : Flag<["-"], "maltivec">, Group<m_ppc_Features_Group>;
def mno_altivec : Flag<["-"], "mno-altivec">, Group<m_ppc_Features_Group>;
+def mpcrel: Flag<["-"], "mpcrel">, Group<m_ppc_Features_Group>;
+def mno_pcrel: Flag<["-"], "mno-pcrel">, Group<m_ppc_Features_Group>;
def mspe : Flag<["-"], "mspe">, Group<m_ppc_Features_Group>;
def mno_spe : Flag<["-"], "mno-spe">, Group<m_ppc_Features_Group>;
def mvsx : Flag<["-"], "mvsx">, Group<m_ppc_Features_Group>;
@@ -2408,6 +2426,10 @@ def mpower9_vector : Flag<["-"], "mpower9-vector">,
Group<m_ppc_Features_Group>;
def mno_power9_vector : Flag<["-"], "mno-power9-vector">,
Group<m_ppc_Features_Group>;
+def mpower10_vector : Flag<["-"], "mpower10-vector">,
+ Group<m_ppc_Features_Group>;
+def mno_power10_vector : Flag<["-"], "mno-power10-vector">,
+ Group<m_ppc_Features_Group>;
def mpower8_crypto : Flag<["-"], "mcrypto">,
Group<m_ppc_Features_Group>;
def mnopower8_crypto : Flag<["-"], "mno-crypto">,
@@ -2457,10 +2479,7 @@ def msvr4_struct_return : Flag<["-"], "msvr4-struct-return">,
def mvx : Flag<["-"], "mvx">, Group<m_Group>;
def mno_vx : Flag<["-"], "mno-vx">, Group<m_Group>;
-def fzvector : Flag<["-"], "fzvector">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Enable System z vector language extension">;
-def fno_zvector : Flag<["-"], "fno-zvector">, Group<f_Group>,
- Flags<[CC1Option]>;
+defm zvector : OptInFFlag<"zvector", "Enable System z vector language extension">;
def mzvector : Flag<["-"], "mzvector">, Alias<fzvector>;
def mno_zvector : Flag<["-"], "mno-zvector">, Alias<fno_zvector>;
@@ -2661,7 +2680,8 @@ def muclibc : Flag<["-"], "muclibc">, Group<m_libc_Group>, Flags<[HelpHidden]>;
def module_file_info : Flag<["-"], "module-file-info">, Flags<[DriverOption,CC1Option]>, Group<Action_Group>,
HelpText<"Provide information about a particular module file">;
def mthumb : Flag<["-"], "mthumb">, Group<m_Group>;
-def mtune_EQ : Joined<["-"], "mtune=">, Group<m_Group>;
+def mtune_EQ : Joined<["-"], "mtune=">, Group<m_Group>,
+ HelpText<"Accepted for compatibility with GCC. Currently has no effect.">;
def multi__module : Flag<["-"], "multi_module">;
def multiply__defined__unused : Separate<["-"], "multiply_defined_unused">;
def multiply__defined : Separate<["-"], "multiply_defined">;
@@ -2674,7 +2694,8 @@ def no_pedantic : Flag<["-", "--"], "no-pedantic">, Group<pedantic_Group>;
def no__dead__strip__inits__and__terms : Flag<["-"], "no_dead_strip_inits_and_terms">;
def nobuiltininc : Flag<["-"], "nobuiltininc">, Flags<[CC1Option, CoreOption]>,
HelpText<"Disable builtin #include directories">;
-def nocudainc : Flag<["-"], "nocudainc">;
+def nogpuinc : Flag<["-"], "nogpuinc">;
+def : Flag<["-"], "nocudainc">, Alias<nogpuinc>;
def nogpulib : Flag<["-"], "nogpulib">,
HelpText<"Do not link device library for CUDA/HIP device compilation">;
def : Flag<["-"], "nocudalib">, Alias<nogpulib>;
@@ -2728,6 +2749,8 @@ def print_resource_dir : Flag<["-", "--"], "print-resource-dir">,
HelpText<"Print the resource directory pathname">;
def print_search_dirs : Flag<["-", "--"], "print-search-dirs">,
HelpText<"Print the paths used for finding libraries and programs">;
+def print_targets : Flag<["-", "--"], "print-targets">,
+ HelpText<"Print the registered targets">;
def private__bundle : Flag<["-"], "private_bundle">;
def pthreads : Flag<["-"], "pthreads">;
def pthread : Flag<["-"], "pthread">, Flags<[CC1Option]>,
@@ -2864,11 +2887,7 @@ def x : JoinedOrSeparate<["-"], "x">, Flags<[DriverOption,CC1Option]>,
MetaVarName<"<language>">;
def y : Joined<["-"], "y">;
-def fintegrated_as : Flag<["-"], "fintegrated-as">, Flags<[DriverOption]>,
- Group<f_Group>, HelpText<"Enable the integrated assembler">;
-def fno_integrated_as : Flag<["-"], "fno-integrated-as">,
- Flags<[CC1Option, DriverOption]>, Group<f_Group>,
- HelpText<"Disable the integrated assembler">;
+defm integrated_as : OptOutFFlag<"integrated-as", "Enable the integrated assembler", "Disable the integrated assembler">;
def fintegrated_cc1 : Flag<["-"], "fintegrated-cc1">,
Flags<[CoreOption, DriverOption]>, Group<f_Group>,
@@ -3013,6 +3032,10 @@ def mv65 : Flag<["-"], "mv65">, Group<m_hexagon_Features_Group>,
Alias<mcpu_EQ>, AliasArgs<["hexagonv65"]>;
def mv66 : Flag<["-"], "mv66">, Group<m_hexagon_Features_Group>,
Alias<mcpu_EQ>, AliasArgs<["hexagonv66"]>;
+def mv67 : Flag<["-"], "mv67">, Group<m_hexagon_Features_Group>,
+ Alias<mcpu_EQ>, AliasArgs<["hexagonv67"]>;
+def mv67t : Flag<["-"], "mv67t">, Group<m_hexagon_Features_Group>,
+ Alias<mcpu_EQ>, AliasArgs<["hexagonv67t"]>;
def mhexagon_hvx : Flag<["-"], "mhvx">, Group<m_hexagon_Features_HVX_Group>,
HelpText<"Enable Hexagon Vector eXtensions">;
def mhexagon_hvx_EQ : Joined<["-"], "mhvx=">,
@@ -3055,6 +3078,12 @@ def m3dnow : Flag<["-"], "m3dnow">, Group<m_x86_Features_Group>;
def mno_3dnow : Flag<["-"], "mno-3dnow">, Group<m_x86_Features_Group>;
def m3dnowa : Flag<["-"], "m3dnowa">, Group<m_x86_Features_Group>;
def mno_3dnowa : Flag<["-"], "mno-3dnowa">, Group<m_x86_Features_Group>;
+def mamx_bf16 : Flag<["-"], "mamx-bf16">, Group<m_x86_Features_Group>;
+def mno_amx_bf16 : Flag<["-"], "mno-amx-bf16">, Group<m_x86_Features_Group>;
+def mtamx_int8 : Flag<["-"], "mamx-int8">, Group<m_x86_Features_Group>;
+def mno_amx_int8 : Flag<["-"], "mno-amx-int8">, Group<m_x86_Features_Group>;
+def mamx_tile : Flag<["-"], "mamx-tile">, Group<m_x86_Features_Group>;
+def mno_amx_tile : Flag<["-"], "mno-amx-tile">, Group<m_x86_Features_Group>;
def msse : Flag<["-"], "msse">, Group<m_x86_Features_Group>;
def mno_sse : Flag<["-"], "mno-sse">, Group<m_x86_Features_Group>;
def msse2 : Flag<["-"], "msse2">, Group<m_x86_Features_Group>;
@@ -3180,12 +3209,16 @@ def mrdseed : Flag<["-"], "mrdseed">, Group<m_x86_Features_Group>;
def mno_rdseed : Flag<["-"], "mno-rdseed">, Group<m_x86_Features_Group>;
def msahf : Flag<["-"], "msahf">, Group<m_x86_Features_Group>;
def mno_sahf : Flag<["-"], "mno-sahf">, Group<m_x86_Features_Group>;
+def mserialize : Flag<["-"], "mserialize">, Group<m_x86_Features_Group>;
+def mno_serialize : Flag<["-"], "mno-serialize">, Group<m_x86_Features_Group>;
def msgx : Flag<["-"], "msgx">, Group<m_x86_Features_Group>;
def mno_sgx : Flag<["-"], "mno-sgx">, Group<m_x86_Features_Group>;
def msha : Flag<["-"], "msha">, Group<m_x86_Features_Group>;
def mno_sha : Flag<["-"], "mno-sha">, Group<m_x86_Features_Group>;
def mtbm : Flag<["-"], "mtbm">, Group<m_x86_Features_Group>;
def mno_tbm : Flag<["-"], "mno-tbm">, Group<m_x86_Features_Group>;
+def mtsxldtrk : Flag<["-"], "mtsxldtrk">, Group<m_x86_Features_Group>;
+def mno_tsxldtrk : Flag<["-"], "mno-tsxldtrk">, Group<m_x86_Features_Group>;
def mvaes : Flag<["-"], "mvaes">, Group<m_x86_Features_Group>;
def mno_vaes : Flag<["-"], "mno-vaes">, Group<m_x86_Features_Group>;
def mvpclmulqdq : Flag<["-"], "mvpclmulqdq">, Group<m_x86_Features_Group>;
@@ -3230,11 +3263,9 @@ def Z_reserved_lib_cckext : Flag<["-"], "Z-reserved-lib-cckext">,
Flags<[LinkerInput, NoArgumentUnused, Unsupported]>, Group<reserved_lib_Group>;
// Ignored options
-// FIXME: multiclasess produce suffixes, not prefixes. This is fine for now
-// since it is only used in ignored options.
multiclass BooleanFFlag<string name> {
- def _f : Flag<["-"], "f"#name>;
- def _fno : Flag<["-"], "fno-"#name>;
+ def f#NAME : Flag<["-"], "f"#name>;
+ def fno_#NAME : Flag<["-"], "fno-"#name>;
}
defm : BooleanFFlag<"keep-inline-functions">, Group<clang_ignored_gcc_optimization_f_Group>;
@@ -3284,7 +3315,8 @@ defm inline_small_functions : BooleanFFlag<"inline-small-functions">,
defm ipa_cp : BooleanFFlag<"ipa-cp">,
Group<clang_ignored_gcc_optimization_f_Group>;
defm ivopts : BooleanFFlag<"ivopts">, Group<clang_ignored_gcc_optimization_f_Group>;
-def : Flag<["-"], "fno-semantic-interposition">, Group<clang_ignored_f_Group>;
+def fsemantic_interposition : Flag<["-"], "fsemantic-interposition">, Group<f_Group>, Flags<[CC1Option]>;
+def fno_semantic_interposition: Flag<["-"], "fno-semantic-interposition">, Group<f_Group>, Flags<[CC1Option]>;
defm non_call_exceptions : BooleanFFlag<"non-call-exceptions">, Group<clang_ignored_f_Group>;
defm peel_loops : BooleanFFlag<"peel-loops">, Group<clang_ignored_gcc_optimization_f_Group>;
defm permissive : BooleanFFlag<"permissive">, Group<clang_ignored_f_Group>;
@@ -3311,9 +3343,6 @@ defm strength_reduce :
defm tls_model : BooleanFFlag<"tls-model">, Group<clang_ignored_f_Group>;
defm tracer : BooleanFFlag<"tracer">, Group<clang_ignored_gcc_optimization_f_Group>;
defm tree_dce : BooleanFFlag<"tree-dce">, Group<clang_ignored_gcc_optimization_f_Group>;
-defm tree_loop_im : BooleanFFlag<"tree_loop_im">, Group<clang_ignored_gcc_optimization_f_Group>;
-defm tree_loop_ivcanon : BooleanFFlag<"tree_loop_ivcanon">, Group<clang_ignored_gcc_optimization_f_Group>;
-defm tree_loop_linear : BooleanFFlag<"tree_loop_linear">, Group<clang_ignored_gcc_optimization_f_Group>;
defm tree_salias : BooleanFFlag<"tree-salias">, Group<clang_ignored_f_Group>;
defm tree_ter : BooleanFFlag<"tree-ter">, Group<clang_ignored_gcc_optimization_f_Group>;
defm tree_vectorizer_verbose : BooleanFFlag<"tree-vectorizer-verbose">, Group<clang_ignored_f_Group>;
@@ -3405,7 +3434,1413 @@ defm stack_arrays : BooleanFFlag<"stack-arrays">, Group<gfortran_Group>;
defm underscoring : BooleanFFlag<"underscoring">, Group<gfortran_Group>;
defm whole_file : BooleanFFlag<"whole-file">, Group<gfortran_Group>;
+// C++ SYCL options
+def fsycl : Flag<["-"], "fsycl">, Group<sycl_Group>, Flags<[CC1Option, CoreOption]>,
+ HelpText<"Enable SYCL kernels compilation for device">;
+def fno_sycl : Flag<["-"], "fno-sycl">, Group<sycl_Group>, Flags<[CoreOption]>,
+ HelpText<"Disable SYCL kernels compilation for device">;
+def sycl_std_EQ : Joined<["-"], "sycl-std=">, Group<sycl_Group>, Flags<[CC1Option, NoArgumentUnused, CoreOption]>,
+ HelpText<"SYCL language standard to compile for.">, Values<"2017, 121, 1.2.1, sycl-1.2.1">;
+
+//===----------------------------------------------------------------------===//
+// CC1 Options
+//===----------------------------------------------------------------------===//
+
+let Flags = [CC1Option, NoDriverOption] in {
+
+//===----------------------------------------------------------------------===//
+// Target Options
+//===----------------------------------------------------------------------===//
+
+let Flags = [CC1Option, CC1AsOption, NoDriverOption] in {
+
+def target_cpu : Separate<["-"], "target-cpu">,
+ HelpText<"Target a specific cpu type">;
+def target_feature : Separate<["-"], "target-feature">,
+ HelpText<"Target specific attributes">;
+def triple : Separate<["-"], "triple">,
+ HelpText<"Specify target triple (e.g. i686-apple-darwin9)">,
+ MarshallingInfoString<"TargetOpts->Triple", "llvm::Triple::normalize(llvm::sys::getDefaultTargetTriple())", "std::string">,
+ AlwaysEmit, Normalizer<"normalizeTriple">, DenormalizeString;
+def target_abi : Separate<["-"], "target-abi">,
+ HelpText<"Target a particular ABI type">;
+def target_sdk_version_EQ : Joined<["-"], "target-sdk-version=">,
+ HelpText<"The version of target SDK used for compilation">;
+
+}
+
+def target_linker_version : Separate<["-"], "target-linker-version">,
+ HelpText<"Target linker version">;
+def triple_EQ : Joined<["-"], "triple=">, Alias<triple>;
+def mfpmath : Separate<["-"], "mfpmath">,
+ HelpText<"Which unit to use for fp math">;
+
+def fpadding_on_unsigned_fixed_point : Flag<["-"], "fpadding-on-unsigned-fixed-point">,
+ HelpText<"Force each unsigned fixed point type to have an extra bit of padding to align their scales with those of signed fixed point types">;
+def fno_padding_on_unsigned_fixed_point : Flag<["-"], "fno-padding-on-unsigned-fixed-point">;
+
+//===----------------------------------------------------------------------===//
+// Analyzer Options
+//===----------------------------------------------------------------------===//
+
+def analysis_UnoptimizedCFG : Flag<["-"], "unoptimized-cfg">,
+ HelpText<"Generate unoptimized CFGs for all analyses">;
+def analysis_CFGAddImplicitDtors : Flag<["-"], "cfg-add-implicit-dtors">,
+ HelpText<"Add C++ implicit destructors to CFGs for all analyses">;
+
+def analyzer_store : Separate<["-"], "analyzer-store">,
+ HelpText<"Source Code Analysis - Abstract Memory Store Models">;
+def analyzer_store_EQ : Joined<["-"], "analyzer-store=">, Alias<analyzer_store>;
+
+def analyzer_constraints : Separate<["-"], "analyzer-constraints">,
+ HelpText<"Source Code Analysis - Symbolic Constraint Engines">;
+def analyzer_constraints_EQ : Joined<["-"], "analyzer-constraints=">,
+ Alias<analyzer_constraints>;
+
+def analyzer_output : Separate<["-"], "analyzer-output">,
+ HelpText<"Source Code Analysis - Output Options">;
+def analyzer_output_EQ : Joined<["-"], "analyzer-output=">,
+ Alias<analyzer_output>;
+
+def analyzer_purge : Separate<["-"], "analyzer-purge">,
+ HelpText<"Source Code Analysis - Dead Symbol Removal Frequency">;
+def analyzer_purge_EQ : Joined<["-"], "analyzer-purge=">, Alias<analyzer_purge>;
+
+def analyzer_opt_analyze_headers : Flag<["-"], "analyzer-opt-analyze-headers">,
+ HelpText<"Force the static analyzer to analyze functions defined in header files">;
+def analyzer_opt_analyze_nested_blocks : Flag<["-"], "analyzer-opt-analyze-nested-blocks">,
+ HelpText<"Analyze the definitions of blocks in addition to functions">;
+def analyzer_display_progress : Flag<["-"], "analyzer-display-progress">,
+ HelpText<"Emit verbose output about the analyzer's progress">;
+def analyze_function : Separate<["-"], "analyze-function">,
+ HelpText<"Run analysis on specific function (for C++ include parameters in name)">;
+def analyze_function_EQ : Joined<["-"], "analyze-function=">, Alias<analyze_function>;
+def trim_egraph : Flag<["-"], "trim-egraph">,
+ HelpText<"Only show error-related paths in the analysis graph">;
+def analyzer_viz_egraph_graphviz : Flag<["-"], "analyzer-viz-egraph-graphviz">,
+ HelpText<"Display exploded graph using GraphViz">;
+def analyzer_dump_egraph : Separate<["-"], "analyzer-dump-egraph">,
+ HelpText<"Dump exploded graph to the specified file">;
+def analyzer_dump_egraph_EQ : Joined<["-"], "analyzer-dump-egraph=">, Alias<analyzer_dump_egraph>;
+
+def analyzer_inline_max_stack_depth : Separate<["-"], "analyzer-inline-max-stack-depth">,
+ HelpText<"Bound on stack depth while inlining (4 by default)">;
+def analyzer_inline_max_stack_depth_EQ : Joined<["-"], "analyzer-inline-max-stack-depth=">,
+ Alias<analyzer_inline_max_stack_depth>;
+
+def analyzer_inlining_mode : Separate<["-"], "analyzer-inlining-mode">,
+ HelpText<"Specify the function selection heuristic used during inlining">;
+def analyzer_inlining_mode_EQ : Joined<["-"], "analyzer-inlining-mode=">, Alias<analyzer_inlining_mode>;
+
+def analyzer_disable_retry_exhausted : Flag<["-"], "analyzer-disable-retry-exhausted">,
+ HelpText<"Do not re-analyze paths leading to exhausted nodes with a different strategy (may decrease code coverage)">;
+
+def analyzer_max_loop : Separate<["-"], "analyzer-max-loop">,
+ HelpText<"The maximum number of times the analyzer will go through a loop">;
+def analyzer_stats : Flag<["-"], "analyzer-stats">,
+ HelpText<"Print internal analyzer statistics.">;
+
+def analyzer_checker : Separate<["-"], "analyzer-checker">,
+ HelpText<"Choose analyzer checkers to enable">,
+ ValuesCode<[{
+ const char *Values =
+ #define GET_CHECKERS
+ #define CHECKER(FULLNAME, CLASS, HT, DOC_URI, IS_HIDDEN) FULLNAME ","
+ #include "clang/StaticAnalyzer/Checkers/Checkers.inc"
+ #undef GET_CHECKERS
+ #define GET_PACKAGES
+ #define PACKAGE(FULLNAME) FULLNAME ","
+ #include "clang/StaticAnalyzer/Checkers/Checkers.inc"
+ #undef GET_PACKAGES
+ ;
+ }]>;
+def analyzer_checker_EQ : Joined<["-"], "analyzer-checker=">,
+ Alias<analyzer_checker>;
+
+def analyzer_disable_checker : Separate<["-"], "analyzer-disable-checker">,
+ HelpText<"Choose analyzer checkers to disable">;
+def analyzer_disable_checker_EQ : Joined<["-"], "analyzer-disable-checker=">,
+ Alias<analyzer_disable_checker>;
+
+def analyzer_disable_all_checks : Flag<["-"], "analyzer-disable-all-checks">,
+ HelpText<"Disable all static analyzer checks">;
+
+def analyzer_checker_help : Flag<["-"], "analyzer-checker-help">,
+ HelpText<"Display the list of analyzer checkers that are available">;
+
+def analyzer_checker_help_alpha : Flag<["-"], "analyzer-checker-help-alpha">,
+ HelpText<"Display the list of in development analyzer checkers. These "
+ "are NOT considered safe, they are unstable and will emit incorrect "
+ "reports. Enable ONLY FOR DEVELOPMENT purposes">;
+
+def analyzer_checker_help_developer : Flag<["-"], "analyzer-checker-help-developer">,
+ HelpText<"Display the list of developer-only checkers such as modeling "
+ "and debug checkers">;
-include "CC1Options.td"
+def analyzer_config_help : Flag<["-"], "analyzer-config-help">,
+ HelpText<"Display the list of -analyzer-config options. These are meant for "
+ "development purposes only!">;
-include "CLCompatOptions.td"
+def analyzer_list_enabled_checkers : Flag<["-"], "analyzer-list-enabled-checkers">,
+ HelpText<"Display the list of enabled analyzer checkers">;
+
+def analyzer_config : Separate<["-"], "analyzer-config">,
+ HelpText<"Choose analyzer options to enable">;
+
+def analyzer_checker_option_help : Flag<["-"], "analyzer-checker-option-help">,
+ HelpText<"Display the list of checker and package options">;
+
+def analyzer_checker_option_help_alpha : Flag<["-"], "analyzer-checker-option-help-alpha">,
+ HelpText<"Display the list of in development checker and package options. "
+ "These are NOT considered safe, they are unstable and will emit "
+ "incorrect reports. Enable ONLY FOR DEVELOPMENT purposes">;
+
+def analyzer_checker_option_help_developer : Flag<["-"], "analyzer-checker-option-help-developer">,
+ HelpText<"Display the list of checker and package options meant for "
+ "development purposes only">;
+
+def analyzer_config_compatibility_mode : Separate<["-"], "analyzer-config-compatibility-mode">,
+ HelpText<"Don't emit errors on invalid analyzer-config inputs">;
+
+def analyzer_config_compatibility_mode_EQ : Joined<["-"], "analyzer-config-compatibility-mode=">,
+ Alias<analyzer_config_compatibility_mode>;
+
+def analyzer_werror : Flag<["-"], "analyzer-werror">,
+ HelpText<"Emit analyzer results as errors rather than warnings">;
+
+//===----------------------------------------------------------------------===//
+// Migrator Options
+//===----------------------------------------------------------------------===//
+def migrator_no_nsalloc_error : Flag<["-"], "no-ns-alloc-error">,
+ HelpText<"Do not error on use of NSAllocateCollectable/NSReallocateCollectable">;
+
+def migrator_no_finalize_removal : Flag<["-"], "no-finalize-removal">,
+ HelpText<"Do not remove finalize method in gc mode">;
+
+//===----------------------------------------------------------------------===//
+// CodeGen Options
+//===----------------------------------------------------------------------===//
+
+let Flags = [CC1Option, CC1AsOption, NoDriverOption] in {
+def debug_info_kind_EQ : Joined<["-"], "debug-info-kind=">;
+def debug_info_macro : Flag<["-"], "debug-info-macro">,
+ HelpText<"Emit macro debug information">;
+def default_function_attr : Separate<["-"], "default-function-attr">,
+ HelpText<"Apply given attribute to all functions">;
+def dwarf_version_EQ : Joined<["-"], "dwarf-version=">;
+def debugger_tuning_EQ : Joined<["-"], "debugger-tuning=">;
+def dwarf_debug_flags : Separate<["-"], "dwarf-debug-flags">,
+ HelpText<"The string to embed in the Dwarf debug flags record.">;
+def record_command_line : Separate<["-"], "record-command-line">,
+ HelpText<"The string to embed in the .LLVM.command.line section.">;
+def compress_debug_sections : Flag<["-", "--"], "compress-debug-sections">,
+ HelpText<"DWARF debug sections compression">;
+def compress_debug_sections_EQ : Joined<["-", "--"], "compress-debug-sections=">,
+ HelpText<"DWARF debug sections compression type">;
+def mno_exec_stack : Flag<["-"], "mnoexecstack">,
+ HelpText<"Mark the file as not needing an executable stack">;
+def massembler_no_warn : Flag<["-"], "massembler-no-warn">,
+ HelpText<"Make assembler not emit warnings">;
+def massembler_fatal_warnings : Flag<["-"], "massembler-fatal-warnings">,
+ HelpText<"Make assembler warnings fatal">;
+def mrelax_relocations : Flag<["--"], "mrelax-relocations">,
+ HelpText<"Use relaxable elf relocations">;
+def msave_temp_labels : Flag<["-"], "msave-temp-labels">,
+ HelpText<"Save temporary labels in the symbol table. "
+ "Note this may change .s semantics and shouldn't generally be used "
+ "on compiler-generated code.">;
+def mrelocation_model : Separate<["-"], "mrelocation-model">,
+ HelpText<"The relocation model to use">, Values<"static,pic,ropi,rwpi,ropi-rwpi,dynamic-no-pic">,
+ NormalizedValuesScope<"llvm::Reloc">,
+ NormalizedValues<["Static", "PIC_", "ROPI", "RWPI", "ROPI_RWPI", "DynamicNoPIC"]>,
+ MarshallingInfoString<"CodeGenOpts.RelocationModel", "PIC_", "Model">,
+ AutoNormalizeEnum;
+def fno_math_builtin : Flag<["-"], "fno-math-builtin">,
+ HelpText<"Disable implicit builtin knowledge of math functions">;
+}
+
+def disable_llvm_verifier : Flag<["-"], "disable-llvm-verifier">,
+ HelpText<"Don't run the LLVM IR verifier pass">;
+def disable_llvm_passes : Flag<["-"], "disable-llvm-passes">,
+ HelpText<"Use together with -emit-llvm to get pristine LLVM IR from the "
+ "frontend by not running any LLVM passes at all">;
+def disable_llvm_optzns : Flag<["-"], "disable-llvm-optzns">,
+ Alias<disable_llvm_passes>;
+def disable_lifetimemarkers : Flag<["-"], "disable-lifetime-markers">,
+ HelpText<"Disable lifetime-markers emission even when optimizations are "
+ "enabled">;
+def disable_O0_optnone : Flag<["-"], "disable-O0-optnone">,
+ HelpText<"Disable adding the optnone attribute to functions at O0">;
+def disable_red_zone : Flag<["-"], "disable-red-zone">,
+ HelpText<"Do not emit code that uses the red zone.">;
+def dwarf_ext_refs : Flag<["-"], "dwarf-ext-refs">,
+ HelpText<"Generate debug info with external references to clang modules"
+ " or precompiled headers">;
+def dwarf_explicit_import : Flag<["-"], "dwarf-explicit-import">,
+ HelpText<"Generate explicit import from anonymous namespace to containing"
+ " scope">;
+def debug_forward_template_params : Flag<["-"], "debug-forward-template-params">,
+ HelpText<"Emit complete descriptions of template parameters in forward"
+ " declarations">;
+def fforbid_guard_variables : Flag<["-"], "fforbid-guard-variables">,
+ HelpText<"Emit an error if a C++ static local initializer would need a guard variable">;
+def no_implicit_float : Flag<["-"], "no-implicit-float">,
+ HelpText<"Don't generate implicit floating point instructions">;
+def fdump_vtable_layouts : Flag<["-"], "fdump-vtable-layouts">,
+ HelpText<"Dump the layouts of all vtables that will be emitted in a translation unit">;
+def fmerge_functions : Flag<["-"], "fmerge-functions">,
+ HelpText<"Permit merging of identical functions when optimizing.">;
+def femit_coverage_notes : Flag<["-"], "femit-coverage-notes">,
+ HelpText<"Emit a gcov coverage notes file when compiling.">;
+def femit_coverage_data: Flag<["-"], "femit-coverage-data">,
+ HelpText<"Instrument the program to emit gcov coverage data when run.">;
+def coverage_data_file : Separate<["-"], "coverage-data-file">,
+ HelpText<"Emit coverage data to this filename.">;
+def coverage_data_file_EQ : Joined<["-"], "coverage-data-file=">,
+ Alias<coverage_data_file>;
+def coverage_notes_file : Separate<["-"], "coverage-notes-file">,
+ HelpText<"Emit coverage notes to this filename.">;
+def coverage_notes_file_EQ : Joined<["-"], "coverage-notes-file=">,
+ Alias<coverage_notes_file>;
+def coverage_version_EQ : Joined<["-"], "coverage-version=">,
+ HelpText<"Four-byte version string for gcov files.">;
+def test_coverage : Flag<["-"], "test-coverage">,
+ HelpText<"Do not generate coverage files or remove coverage changes from IR">;
+def dump_coverage_mapping : Flag<["-"], "dump-coverage-mapping">,
+ HelpText<"Dump the coverage mapping records, for testing">;
+def fuse_register_sized_bitfield_access: Flag<["-"], "fuse-register-sized-bitfield-access">,
+ HelpText<"Use register sized accesses to bit-fields, when possible.">;
+def relaxed_aliasing : Flag<["-"], "relaxed-aliasing">,
+ HelpText<"Turn off Type Based Alias Analysis">;
+def no_struct_path_tbaa : Flag<["-"], "no-struct-path-tbaa">,
+ HelpText<"Turn off struct-path aware Type Based Alias Analysis">;
+def new_struct_path_tbaa : Flag<["-"], "new-struct-path-tbaa">,
+ HelpText<"Enable enhanced struct-path aware Type Based Alias Analysis">;
+def mdebug_pass : Separate<["-"], "mdebug-pass">,
+ HelpText<"Enable additional debug output">;
+def mframe_pointer_EQ : Joined<["-"], "mframe-pointer=">,
+ HelpText<"Specify which frame pointers to retain (all, non-leaf, none).">, Values<"all,non-leaf,none">;
+def mdisable_tail_calls : Flag<["-"], "mdisable-tail-calls">,
+ HelpText<"Disable tail call optimization, keeping the call stack accurate">;
+def menable_no_infinities : Flag<["-"], "menable-no-infs">,
+ HelpText<"Allow optimization to assume there are no infinities.">;
+def menable_no_nans : Flag<["-"], "menable-no-nans">,
+ HelpText<"Allow optimization to assume there are no NaNs.">;
+def menable_unsafe_fp_math : Flag<["-"], "menable-unsafe-fp-math">,
+ HelpText<"Allow unsafe floating-point math optimizations which may decrease "
+ "precision">;
+def mreassociate : Flag<["-"], "mreassociate">,
+ HelpText<"Allow reassociation transformations for floating-point instructions">;
+def mabi_EQ_ieeelongdouble : Flag<["-"], "mabi=ieeelongdouble">,
+ HelpText<"Use IEEE 754 quadruple-precision for long double">;
+def mfloat_abi : Separate<["-"], "mfloat-abi">,
+ HelpText<"The float ABI to use">;
+def mtp : Separate<["-"], "mtp">,
+ HelpText<"Mode for reading thread pointer">;
+def mlimit_float_precision : Separate<["-"], "mlimit-float-precision">,
+ HelpText<"Limit float precision to the given value">;
+def split_stacks : Flag<["-"], "split-stacks">,
+ HelpText<"Try to use a split stack if possible.">;
+def mregparm : Separate<["-"], "mregparm">,
+ HelpText<"Limit the number of registers available for integer arguments">;
+def msmall_data_limit : Separate<["-"], "msmall-data-limit">,
+ HelpText<"Put global and static data smaller than the limit into a special section">;
+def munwind_tables : Flag<["-"], "munwind-tables">,
+ HelpText<"Generate unwinding tables for all functions">;
+def mconstructor_aliases : Flag<["-"], "mconstructor-aliases">,
+ HelpText<"Emit complete constructors and destructors as aliases when possible">;
+def mlink_bitcode_file : Separate<["-"], "mlink-bitcode-file">,
+ HelpText<"Link the given bitcode file before performing optimizations.">;
+def mlink_builtin_bitcode : Separate<["-"], "mlink-builtin-bitcode">,
+ HelpText<"Link and internalize needed symbols from the given bitcode file "
+ "before performing optimizations.">;
+def mlink_cuda_bitcode : Separate<["-"], "mlink-cuda-bitcode">,
+ Alias<mlink_builtin_bitcode>;
+def vectorize_loops : Flag<["-"], "vectorize-loops">,
+ HelpText<"Run the Loop vectorization passes">;
+def vectorize_slp : Flag<["-"], "vectorize-slp">,
+ HelpText<"Run the SLP vectorization passes">;
+def dependent_lib : Joined<["--"], "dependent-lib=">,
+ HelpText<"Add dependent library">;
+def linker_option : Joined<["--"], "linker-option=">,
+ HelpText<"Add linker option">;
+def fsanitize_coverage_type : Joined<["-"], "fsanitize-coverage-type=">,
+ HelpText<"Sanitizer coverage type">;
+def fsanitize_coverage_indirect_calls
+ : Flag<["-"], "fsanitize-coverage-indirect-calls">,
+ HelpText<"Enable sanitizer coverage for indirect calls">;
+def fsanitize_coverage_trace_bb
+ : Flag<["-"], "fsanitize-coverage-trace-bb">,
+ HelpText<"Enable basic block tracing in sanitizer coverage">;
+def fsanitize_coverage_trace_cmp
+ : Flag<["-"], "fsanitize-coverage-trace-cmp">,
+ HelpText<"Enable cmp instruction tracing in sanitizer coverage">;
+def fsanitize_coverage_trace_div
+ : Flag<["-"], "fsanitize-coverage-trace-div">,
+ HelpText<"Enable div instruction tracing in sanitizer coverage">;
+def fsanitize_coverage_trace_gep
+ : Flag<["-"], "fsanitize-coverage-trace-gep">,
+ HelpText<"Enable gep instruction tracing in sanitizer coverage">;
+def fsanitize_coverage_8bit_counters
+ : Flag<["-"], "fsanitize-coverage-8bit-counters">,
+ HelpText<"Enable frequency counters in sanitizer coverage">;
+def fsanitize_coverage_inline_8bit_counters
+ : Flag<["-"], "fsanitize-coverage-inline-8bit-counters">,
+ HelpText<"Enable inline 8-bit counters in sanitizer coverage">;
+def fsanitize_coverage_inline_bool_flag
+ : Flag<["-"], "fsanitize-coverage-inline-bool-flag">,
+ HelpText<"Enable inline bool flag in sanitizer coverage">;
+def fsanitize_coverage_pc_table
+ : Flag<["-"], "fsanitize-coverage-pc-table">,
+ HelpText<"Create a table of coverage-instrumented PCs">;
+def fsanitize_coverage_trace_pc
+ : Flag<["-"], "fsanitize-coverage-trace-pc">,
+ HelpText<"Enable PC tracing in sanitizer coverage">;
+def fsanitize_coverage_trace_pc_guard
+ : Flag<["-"], "fsanitize-coverage-trace-pc-guard">,
+ HelpText<"Enable PC tracing with guard in sanitizer coverage">;
+def fsanitize_coverage_no_prune
+ : Flag<["-"], "fsanitize-coverage-no-prune">,
+ HelpText<"Disable coverage pruning (i.e. instrument all blocks/edges)">;
+def fsanitize_coverage_stack_depth
+ : Flag<["-"], "fsanitize-coverage-stack-depth">,
+ HelpText<"Enable max stack depth tracing">;
+def fpatchable_function_entry_offset_EQ
+ : Joined<["-"], "fpatchable-function-entry-offset=">, MetaVarName<"<M>">,
+ HelpText<"Generate M NOPs before function entry">;
+def fprofile_instrument_EQ : Joined<["-"], "fprofile-instrument=">,
+ HelpText<"Enable PGO instrumentation. The accepted value is clang, llvm, "
+ "or none">, Values<"none,clang,llvm">;
+def fprofile_instrument_path_EQ : Joined<["-"], "fprofile-instrument-path=">,
+ HelpText<"Generate instrumented code to collect execution counts into "
+ "<file> (overridden by LLVM_PROFILE_FILE env var)">;
+def fprofile_instrument_use_path_EQ :
+ Joined<["-"], "fprofile-instrument-use-path=">,
+ HelpText<"Specify the profile path in PGO use compilation">;
+def flto_visibility_public_std:
+ Flag<["-"], "flto-visibility-public-std">,
+ HelpText<"Use public LTO visibility for classes in std and stdext namespaces">;
+def flto_unit: Flag<["-"], "flto-unit">,
+ HelpText<"Emit IR to support LTO unit features (CFI, whole program vtable opt)">;
+def fno_lto_unit: Flag<["-"], "fno-lto-unit">;
+def fdebug_pass_manager : Flag<["-"], "fdebug-pass-manager">,
+ HelpText<"Prints debug information for the new pass manager">;
+def fno_debug_pass_manager : Flag<["-"], "fno-debug-pass-manager">,
+ HelpText<"Disables debug printing for the new pass manager">;
+// The driver option takes the key as a parameter to the -msign-return-address=
+// and -mbranch-protection= options, but CC1 has a separate option so we
+// don't have to parse the parameter twice.
+def msign_return_address_key_EQ : Joined<["-"], "msign-return-address-key=">,
+ Values<"a_key,b_key">;
+def mbranch_target_enforce : Flag<["-"], "mbranch-target-enforce">;
+def fno_dllexport_inlines : Flag<["-"], "fno-dllexport-inlines">;
+def cfguard_no_checks : Flag<["-"], "cfguard-no-checks">,
+ HelpText<"Emit Windows Control Flow Guard tables only (no checks)">;
+def cfguard : Flag<["-"], "cfguard">,
+ HelpText<"Emit Windows Control Flow Guard tables and checks">;
+
+def fdenormal_fp_math_f32_EQ : Joined<["-"], "fdenormal-fp-math-f32=">,
+ Group<f_Group>;
+
+//===----------------------------------------------------------------------===//
+// Dependency Output Options
+//===----------------------------------------------------------------------===//
+
+def sys_header_deps : Flag<["-"], "sys-header-deps">,
+ HelpText<"Include system headers in dependency output">;
+def module_file_deps : Flag<["-"], "module-file-deps">,
+ HelpText<"Include module files in dependency output">;
+def header_include_file : Separate<["-"], "header-include-file">,
+ HelpText<"Filename (or -) to write header include output to">;
+def show_includes : Flag<["--"], "show-includes">,
+ HelpText<"Print cl.exe style /showIncludes to stdout">;
+
+//===----------------------------------------------------------------------===//
+// Diagnostic Options
+//===----------------------------------------------------------------------===//
+
+def diagnostic_log_file : Separate<["-"], "diagnostic-log-file">,
+ HelpText<"Filename (or -) to log diagnostics to">;
+def diagnostic_serialized_file : Separate<["-"], "serialize-diagnostic-file">,
+ MetaVarName<"<filename>">,
+ HelpText<"File for serializing diagnostics in a binary format">;
+
+def fdiagnostics_format : Separate<["-"], "fdiagnostics-format">,
+ HelpText<"Change diagnostic formatting to match IDE and command line tools">, Values<"clang,msvc,msvc-fallback,vi">;
+def fdiagnostics_show_category : Separate<["-"], "fdiagnostics-show-category">,
+ HelpText<"Print diagnostic category">, Values<"none,id,name">;
+def fno_diagnostics_use_presumed_location : Flag<["-"], "fno-diagnostics-use-presumed-location">,
+ HelpText<"Ignore #line directives when displaying diagnostic locations">;
+def ftabstop : Separate<["-"], "ftabstop">, MetaVarName<"<N>">,
+ HelpText<"Set the tab stop distance.">;
+def ferror_limit : Separate<["-"], "ferror-limit">, MetaVarName<"<N>">,
+ HelpText<"Set the maximum number of errors to emit before stopping (0 = no limit).">;
+def fmacro_backtrace_limit : Separate<["-"], "fmacro-backtrace-limit">, MetaVarName<"<N>">,
+ HelpText<"Set the maximum number of entries to print in a macro expansion backtrace (0 = no limit).">;
+def ftemplate_backtrace_limit : Separate<["-"], "ftemplate-backtrace-limit">, MetaVarName<"<N>">,
+ HelpText<"Set the maximum number of entries to print in a template instantiation backtrace (0 = no limit).">;
+def fconstexpr_backtrace_limit : Separate<["-"], "fconstexpr-backtrace-limit">, MetaVarName<"<N>">,
+ HelpText<"Set the maximum number of entries to print in a constexpr evaluation backtrace (0 = no limit).">;
+def fspell_checking_limit : Separate<["-"], "fspell-checking-limit">, MetaVarName<"<N>">,
+ HelpText<"Set the maximum number of times to perform spell checking on unrecognized identifiers (0 = no limit).">;
+def fcaret_diagnostics_max_lines :
+ Separate<["-"], "fcaret-diagnostics-max-lines">, MetaVarName<"<N>">,
+ HelpText<"Set the maximum number of source lines to show in a caret diagnostic">;
+def verify_EQ : CommaJoined<["-"], "verify=">,
+ MetaVarName<"<prefixes>">,
+ HelpText<"Verify diagnostic output using comment directives that start with"
+ " prefixes in the comma-separated sequence <prefixes>">;
+def verify : Flag<["-"], "verify">,
+ HelpText<"Equivalent to -verify=expected">;
+def verify_ignore_unexpected : Flag<["-"], "verify-ignore-unexpected">,
+ HelpText<"Ignore unexpected diagnostic messages">;
+def verify_ignore_unexpected_EQ : CommaJoined<["-"], "verify-ignore-unexpected=">,
+ HelpText<"Ignore unexpected diagnostic messages">;
+def Wno_rewrite_macros : Flag<["-"], "Wno-rewrite-macros">,
+ HelpText<"Silence ObjC rewriting warnings">;
+
+//===----------------------------------------------------------------------===//
+// Frontend Options
+//===----------------------------------------------------------------------===//
+
+// This isn't normally used, it is just here so we can parse a
+// CompilerInvocation out of a driver-derived argument vector.
+def cc1 : Flag<["-"], "cc1">;
+def cc1as : Flag<["-"], "cc1as">;
+
+def ast_merge : Separate<["-"], "ast-merge">,
+ MetaVarName<"<ast file>">,
+ HelpText<"Merge the given AST file into the translation unit being compiled.">;
+def aux_target_cpu : Separate<["-"], "aux-target-cpu">,
+ HelpText<"Target a specific auxiliary cpu type">;
+def aux_target_feature : Separate<["-"], "aux-target-feature">,
+ HelpText<"Target specific auxiliary attributes">;
+def aux_triple : Separate<["-"], "aux-triple">,
+ HelpText<"Auxiliary target triple.">;
+def code_completion_at : Separate<["-"], "code-completion-at">,
+ MetaVarName<"<file>:<line>:<column>">,
+ HelpText<"Dump code-completion information at a location">;
+def remap_file : Separate<["-"], "remap-file">,
+ MetaVarName<"<from>;<to>">,
+ HelpText<"Replace the contents of the <from> file with the contents of the <to> file">;
+def code_completion_at_EQ : Joined<["-"], "code-completion-at=">,
+ Alias<code_completion_at>;
+def code_completion_macros : Flag<["-"], "code-completion-macros">,
+ HelpText<"Include macros in code-completion results">;
+def code_completion_patterns : Flag<["-"], "code-completion-patterns">,
+ HelpText<"Include code patterns in code-completion results">;
+def no_code_completion_globals : Flag<["-"], "no-code-completion-globals">,
+ HelpText<"Do not include global declarations in code-completion results.">;
+def no_code_completion_ns_level_decls : Flag<["-"], "no-code-completion-ns-level-decls">,
+ HelpText<"Do not include declarations inside namespaces (incl. global namespace) in the code-completion results.">;
+def code_completion_brief_comments : Flag<["-"], "code-completion-brief-comments">,
+ HelpText<"Include brief documentation comments in code-completion results.">;
+def code_completion_with_fixits : Flag<["-"], "code-completion-with-fixits">,
+ HelpText<"Include code completion results which require small fix-its.">;
+def disable_free : Flag<["-"], "disable-free">,
+ HelpText<"Disable freeing of memory on exit">;
+def discard_value_names : Flag<["-"], "discard-value-names">,
+ HelpText<"Discard value names in LLVM IR">;
+def load : Separate<["-"], "load">, MetaVarName<"<dsopath>">,
+ HelpText<"Load the named plugin (dynamic shared object)">;
+def plugin : Separate<["-"], "plugin">, MetaVarName<"<name>">,
+ HelpText<"Use the named plugin action instead of the default action (use \"help\" to list available options)">;
+def plugin_arg : JoinedAndSeparate<["-"], "plugin-arg-">,
+ MetaVarName<"<name> <arg>">,
+ HelpText<"Pass <arg> to plugin <name>">;
+def add_plugin : Separate<["-"], "add-plugin">, MetaVarName<"<name>">,
+ HelpText<"Use the named plugin action in addition to the default action">;
+def ast_dump_filter : Separate<["-"], "ast-dump-filter">,
+ MetaVarName<"<dump_filter>">,
+ HelpText<"Use with -ast-dump or -ast-print to dump/print only AST declaration"
+ " nodes having a certain substring in a qualified name. Use"
+ " -ast-list to list all filterable declaration node names.">;
+def fno_modules_global_index : Flag<["-"], "fno-modules-global-index">,
+ HelpText<"Do not automatically generate or update the global module index">;
+def fno_modules_error_recovery : Flag<["-"], "fno-modules-error-recovery">,
+ HelpText<"Do not automatically import modules for error recovery">;
+def fmodule_map_file_home_is_cwd : Flag<["-"], "fmodule-map-file-home-is-cwd">,
+ HelpText<"Use the current working directory as the home directory of "
+ "module maps specified by -fmodule-map-file=<FILE>">;
+def fmodule_feature : Separate<["-"], "fmodule-feature">,
+ MetaVarName<"<feature>">,
+ HelpText<"Enable <feature> in module map requires declarations">;
+def fmodules_embed_file_EQ : Joined<["-"], "fmodules-embed-file=">,
+ MetaVarName<"<file>">,
+ HelpText<"Embed the contents of the specified file into the module file "
+ "being compiled.">;
+def fmodules_embed_all_files : Joined<["-"], "fmodules-embed-all-files">,
+ HelpText<"Embed the contents of all files read by this compilation into "
+ "the produced module file.">;
+def fmodules_local_submodule_visibility :
+ Flag<["-"], "fmodules-local-submodule-visibility">,
+ HelpText<"Enforce name visibility rules across submodules of the same "
+ "top-level module.">;
+def fmodules_codegen :
+ Flag<["-"], "fmodules-codegen">,
+ HelpText<"Generate code for uses of this module that assumes an explicit "
+ "object file will be built for the module">;
+def fmodules_debuginfo :
+ Flag<["-"], "fmodules-debuginfo">,
+ HelpText<"Generate debug info for types in an object file built from this "
+ "module and do not generate them elsewhere">;
+def fmodule_format_EQ : Joined<["-"], "fmodule-format=">,
+ HelpText<"Select the container format for clang modules and PCH. "
+ "Supported options are 'raw' and 'obj'.">;
+def ftest_module_file_extension_EQ :
+ Joined<["-"], "ftest-module-file-extension=">,
+ HelpText<"introduce a module file extension for testing purposes. "
+ "The argument is parsed as blockname:major:minor:hashed:user info">;
+def fconcepts_ts : Flag<["-"], "fconcepts-ts">,
+ HelpText<"Enable C++ Extensions for Concepts. (deprecated - use -std=c++2a)">;
+def fno_concept_satisfaction_caching : Flag<["-"],
+ "fno-concept-satisfaction-caching">,
+ HelpText<"Disable satisfaction caching for C++2a Concepts.">;
+
+def frecovery_ast : Flag<["-"], "frecovery-ast">,
+ HelpText<"Preserve expressions in AST rather than dropping them when "
+ "encountering semantic errors">;
+def fno_recovery_ast : Flag<["-"], "fno-recovery-ast">;
+def frecovery_ast_type : Flag<["-"], "frecovery-ast-type">,
+ HelpText<"Preserve the type for recovery expressions when possible "
+ "(experimental)">;
+def fno_recovery_ast_type : Flag<["-"], "fno-recovery-ast-type">;
+
+let Group = Action_Group in {
+
+def Eonly : Flag<["-"], "Eonly">,
+ HelpText<"Just run preprocessor, no output (for timings)">;
+def dump_raw_tokens : Flag<["-"], "dump-raw-tokens">,
+ HelpText<"Lex file in raw mode and dump raw tokens">;
+def analyze : Flag<["-"], "analyze">,
+ HelpText<"Run static analysis engine">;
+def dump_tokens : Flag<["-"], "dump-tokens">,
+ HelpText<"Run preprocessor, dump internal rep of tokens">;
+def init_only : Flag<["-"], "init-only">,
+ HelpText<"Only execute frontend initialization">;
+def fixit : Flag<["-"], "fixit">,
+ HelpText<"Apply fix-it advice to the input source">;
+def fixit_EQ : Joined<["-"], "fixit=">,
+ HelpText<"Apply fix-it advice creating a file with the given suffix">;
+def print_preamble : Flag<["-"], "print-preamble">,
+ HelpText<"Print the \"preamble\" of a file, which is a candidate for implicit"
+ " precompiled headers.">;
+def emit_html : Flag<["-"], "emit-html">,
+ HelpText<"Output input source as HTML">;
+def ast_print : Flag<["-"], "ast-print">,
+ HelpText<"Build ASTs and then pretty-print them">;
+def ast_list : Flag<["-"], "ast-list">,
+ HelpText<"Build ASTs and print the list of declaration node qualified names">;
+def ast_dump : Flag<["-"], "ast-dump">,
+ HelpText<"Build ASTs and then debug dump them">;
+def ast_dump_EQ : Joined<["-"], "ast-dump=">,
+ HelpText<"Build ASTs and then debug dump them in the specified format. "
+ "Supported formats include: default, json">;
+def ast_dump_all : Flag<["-"], "ast-dump-all">,
+ HelpText<"Build ASTs and then debug dump them, forcing deserialization">;
+def ast_dump_all_EQ : Joined<["-"], "ast-dump-all=">,
+ HelpText<"Build ASTs and then debug dump them in the specified format, "
+ "forcing deserialization. Supported formats include: default, json">;
+def ast_dump_decl_types : Flag<["-"], "ast-dump-decl-types">,
+ HelpText<"Include declaration types in AST dumps">;
+def templight_dump : Flag<["-"], "templight-dump">,
+ HelpText<"Dump templight information to stdout">;
+def ast_dump_lookups : Flag<["-"], "ast-dump-lookups">,
+ HelpText<"Build ASTs and then debug dump their name lookup tables">;
+def ast_view : Flag<["-"], "ast-view">,
+ HelpText<"Build ASTs and view them with GraphViz">;
+def emit_module : Flag<["-"], "emit-module">,
+ HelpText<"Generate pre-compiled module file from a module map">;
+def emit_module_interface : Flag<["-"], "emit-module-interface">,
+ HelpText<"Generate pre-compiled module file from a C++ module interface">;
+def emit_header_module : Flag<["-"], "emit-header-module">,
+ HelpText<"Generate pre-compiled module file from a set of header files">;
+def emit_pch : Flag<["-"], "emit-pch">,
+ HelpText<"Generate pre-compiled header file">;
+def emit_llvm_bc : Flag<["-"], "emit-llvm-bc">,
+ HelpText<"Build ASTs then convert to LLVM, emit .bc file">;
+def emit_llvm_only : Flag<["-"], "emit-llvm-only">,
+ HelpText<"Build ASTs and convert to LLVM, discarding output">;
+def emit_codegen_only : Flag<["-"], "emit-codegen-only">,
+ HelpText<"Generate machine code, but discard output">;
+def emit_obj : Flag<["-"], "emit-obj">,
+ HelpText<"Emit native object files">;
+def rewrite_test : Flag<["-"], "rewrite-test">,
+ HelpText<"Rewriter playground">;
+def rewrite_macros : Flag<["-"], "rewrite-macros">,
+ HelpText<"Expand macros without full preprocessing">;
+def migrate : Flag<["-"], "migrate">,
+ HelpText<"Migrate source code">;
+def compiler_options_dump : Flag<["-"], "compiler-options-dump">,
+ HelpText<"Dump the compiler configuration options">;
+def print_dependency_directives_minimized_source : Flag<["-"],
+ "print-dependency-directives-minimized-source">,
+ HelpText<"Print the output of the dependency directives source minimizer">;
+}
+
+def emit_llvm_uselists : Flag<["-"], "emit-llvm-uselists">,
+ HelpText<"Preserve order of LLVM use-lists when serializing">;
+def no_emit_llvm_uselists : Flag<["-"], "no-emit-llvm-uselists">,
+ HelpText<"Don't preserve order of LLVM use-lists when serializing">;
+
+def mt_migrate_directory : Separate<["-"], "mt-migrate-directory">,
+ HelpText<"Directory for temporary files produced during ARC or ObjC migration">;
+def arcmt_check : Flag<["-"], "arcmt-check">,
+ HelpText<"Check for ARC migration issues that need manual handling">;
+def arcmt_modify : Flag<["-"], "arcmt-modify">,
+ HelpText<"Apply modifications to files to conform to ARC">;
+def arcmt_migrate : Flag<["-"], "arcmt-migrate">,
+ HelpText<"Apply modifications and produces temporary files that conform to ARC">;
+
+def opt_record_file : Separate<["-"], "opt-record-file">,
+ HelpText<"File name to use for YAML optimization record output">;
+def opt_record_passes : Separate<["-"], "opt-record-passes">,
+ HelpText<"Only record remark information for passes whose names match the given regular expression">;
+def opt_record_format : Separate<["-"], "opt-record-format">,
+ HelpText<"The format used for serializing remarks (default: YAML)">;
+
+def print_stats : Flag<["-"], "print-stats">,
+ HelpText<"Print performance metrics and statistics">;
+def stats_file : Joined<["-"], "stats-file=">,
+ HelpText<"Filename to write statistics to">;
+def fdump_record_layouts : Flag<["-"], "fdump-record-layouts">,
+ HelpText<"Dump record layout information">;
+def fdump_record_layouts_simple : Flag<["-"], "fdump-record-layouts-simple">,
+ HelpText<"Dump record layout information in a simple form used for testing">;
+def fix_what_you_can : Flag<["-"], "fix-what-you-can">,
+ HelpText<"Apply fix-it advice even in the presence of unfixable errors">;
+def fix_only_warnings : Flag<["-"], "fix-only-warnings">,
+ HelpText<"Apply fix-it advice only for warnings, not errors">;
+def fixit_recompile : Flag<["-"], "fixit-recompile">,
+ HelpText<"Apply fix-it changes and recompile">;
+def fixit_to_temp : Flag<["-"], "fixit-to-temporary">,
+ HelpText<"Apply fix-it changes to temporary files">;
+
+def foverride_record_layout_EQ : Joined<["-"], "foverride-record-layout=">,
+ HelpText<"Override record layouts with those in the given file">;
+def pch_through_header_EQ : Joined<["-"], "pch-through-header=">,
+ HelpText<"Stop PCH generation after including this file. When using a PCH, "
+ "skip tokens until after this file is included.">;
+def pch_through_hdrstop_create : Flag<["-"], "pch-through-hdrstop-create">,
+ HelpText<"When creating a PCH, stop PCH generation after #pragma hdrstop.">;
+def pch_through_hdrstop_use : Flag<["-"], "pch-through-hdrstop-use">,
+ HelpText<"When using a PCH, skip tokens until after a #pragma hdrstop.">;
+def fno_pch_timestamp : Flag<["-"], "fno-pch-timestamp">,
+ HelpText<"Disable inclusion of timestamp in precompiled headers">;
+def building_pch_with_obj : Flag<["-"], "building-pch-with-obj">,
+ HelpText<"This compilation is part of building a PCH with corresponding object file.">;
+
+def aligned_alloc_unavailable : Flag<["-"], "faligned-alloc-unavailable">,
+ HelpText<"Aligned allocation/deallocation functions are unavailable">;
+
+//===----------------------------------------------------------------------===//
+// Language Options
+//===----------------------------------------------------------------------===//
+
+let Flags = [CC1Option, CC1AsOption, NoDriverOption] in {
+
+def version : Flag<["-"], "version">,
+ HelpText<"Print the compiler version">;
+def main_file_name : Separate<["-"], "main-file-name">,
+ HelpText<"Main file name to use for debug info and source if missing">;
+def split_dwarf_output : Separate<["-"], "split-dwarf-output">,
+ HelpText<"File name to use for split dwarf debug info output">;
+
+}
+
+def fblocks_runtime_optional : Flag<["-"], "fblocks-runtime-optional">,
+ HelpText<"Weakly link in the blocks runtime">;
+def fexternc_nounwind : Flag<["-"], "fexternc-nounwind">,
+ HelpText<"Assume all functions with C linkage do not unwind">;
+def split_dwarf_file : Separate<["-"], "split-dwarf-file">,
+ HelpText<"Name of the split dwarf debug info file to encode in the object file">;
+def fno_wchar : Flag<["-"], "fno-wchar">,
+ HelpText<"Disable C++ builtin type wchar_t">;
+def fconstant_string_class : Separate<["-"], "fconstant-string-class">,
+ MetaVarName<"<class name>">,
+ HelpText<"Specify the class to use for constant Objective-C string objects.">;
+def fobjc_arc_cxxlib_EQ : Joined<["-"], "fobjc-arc-cxxlib=">,
+ HelpText<"Objective-C++ Automatic Reference Counting standard library kind">, Values<"libc++,libstdc++,none">;
+def fobjc_runtime_has_weak : Flag<["-"], "fobjc-runtime-has-weak">,
+ HelpText<"The target Objective-C runtime supports ARC weak operations">;
+def fobjc_dispatch_method_EQ : Joined<["-"], "fobjc-dispatch-method=">,
+ HelpText<"Objective-C dispatch method to use">, Values<"legacy,non-legacy,mixed">;
+def disable_objc_default_synthesize_properties : Flag<["-"], "disable-objc-default-synthesize-properties">,
+ HelpText<"disable the default synthesis of Objective-C properties">;
+def fencode_extended_block_signature : Flag<["-"], "fencode-extended-block-signature">,
+ HelpText<"enable extended encoding of block type signature">;
+def function_alignment : Separate<["-"], "function-alignment">,
+ HelpText<"default alignment for functions">;
+def pic_level : Separate<["-"], "pic-level">,
+ HelpText<"Value for __PIC__">;
+def pic_is_pie : Flag<["-"], "pic-is-pie">,
+ HelpText<"File is for a position independent executable">;
+def fno_validate_pch : Flag<["-"], "fno-validate-pch">,
+ HelpText<"Disable validation of precompiled headers">;
+def fallow_pch_with_errors : Flag<["-"], "fallow-pch-with-compiler-errors">,
+ HelpText<"Accept a PCH file that was created with compiler errors">;
+def dump_deserialized_pch_decls : Flag<["-"], "dump-deserialized-decls">,
+ HelpText<"Dump declarations that are deserialized from PCH, for testing">;
+def error_on_deserialized_pch_decl : Separate<["-"], "error-on-deserialized-decl">,
+ HelpText<"Emit error if a specific declaration is deserialized from PCH, for testing">;
+def error_on_deserialized_pch_decl_EQ : Joined<["-"], "error-on-deserialized-decl=">,
+ Alias<error_on_deserialized_pch_decl>;
+def static_define : Flag<["-"], "static-define">,
+ HelpText<"Should __STATIC__ be defined">;
+def stack_protector : Separate<["-"], "stack-protector">,
+ HelpText<"Enable stack protectors">;
+def stack_protector_buffer_size : Separate<["-"], "stack-protector-buffer-size">,
+ HelpText<"Lower bound for a buffer to be considered for stack protection">;
+def fvisibility : Separate<["-"], "fvisibility">,
+ HelpText<"Default type and symbol visibility">;
+def ftype_visibility : Separate<["-"], "ftype-visibility">,
+ HelpText<"Default type visibility">;
+def fapply_global_visibility_to_externs : Flag<["-"], "fapply-global-visibility-to-externs">,
+ HelpText<"Apply global symbol visibility to external declarations without an explicit visibility">;
+def ftemplate_depth : Separate<["-"], "ftemplate-depth">,
+ HelpText<"Maximum depth of recursive template instantiation">;
+def foperator_arrow_depth : Separate<["-"], "foperator-arrow-depth">,
+ HelpText<"Maximum number of 'operator->'s to call for a member access">;
+def fconstexpr_depth : Separate<["-"], "fconstexpr-depth">,
+ HelpText<"Maximum depth of recursive constexpr function calls">;
+def fconstexpr_steps : Separate<["-"], "fconstexpr-steps">,
+ HelpText<"Maximum number of steps in constexpr function evaluation">;
+def fbracket_depth : Separate<["-"], "fbracket-depth">,
+ HelpText<"Maximum nesting level for parentheses, brackets, and braces">;
+def fconst_strings : Flag<["-"], "fconst-strings">,
+ HelpText<"Use a const qualified type for string literals in C and ObjC">;
+def fno_const_strings : Flag<["-"], "fno-const-strings">,
+ HelpText<"Don't use a const qualified type for string literals in C and ObjC">;
+def fno_bitfield_type_align : Flag<["-"], "fno-bitfield-type-align">,
+ HelpText<"Ignore bit-field types when aligning structures">;
+def ffake_address_space_map : Flag<["-"], "ffake-address-space-map">,
+ HelpText<"Use a fake address space map; OpenCL testing purposes only">;
+def faddress_space_map_mangling_EQ : Joined<["-"], "faddress-space-map-mangling=">, MetaVarName<"<yes|no|target>">,
+ HelpText<"Set the mode for address space map based mangling; OpenCL testing purposes only">;
+def funknown_anytype : Flag<["-"], "funknown-anytype">,
+ HelpText<"Enable parser support for the __unknown_anytype type; for testing purposes only">;
+def fdebugger_support : Flag<["-"], "fdebugger-support">,
+ HelpText<"Enable special debugger support behavior">;
+def fdebugger_cast_result_to_id : Flag<["-"], "fdebugger-cast-result-to-id">,
+ HelpText<"Enable casting unknown expression results to id">;
+def fdebugger_objc_literal : Flag<["-"], "fdebugger-objc-literal">,
+ HelpText<"Enable special debugger support for Objective-C subscripting and literals">;
+def fdeprecated_macro : Flag<["-"], "fdeprecated-macro">,
+ HelpText<"Defines the __DEPRECATED macro">;
+def fno_deprecated_macro : Flag<["-"], "fno-deprecated-macro">,
+ HelpText<"Undefines the __DEPRECATED macro">;
+def fobjc_subscripting_legacy_runtime : Flag<["-"], "fobjc-subscripting-legacy-runtime">,
+ HelpText<"Allow Objective-C array and dictionary subscripting in legacy runtime">;
+def vtordisp_mode_EQ : Joined<["-"], "vtordisp-mode=">,
+ HelpText<"Control vtordisp placement on win32 targets">;
+def fnative_half_type: Flag<["-"], "fnative-half-type">,
+ HelpText<"Use the native half type for __fp16 instead of promoting to float">;
+def fnative_half_arguments_and_returns : Flag<["-"], "fnative-half-arguments-and-returns">,
+ HelpText<"Use the native __fp16 type for arguments and returns (and skip ABI-specific lowering)">;
+def fallow_half_arguments_and_returns : Flag<["-"], "fallow-half-arguments-and-returns">,
+ HelpText<"Allow function arguments and returns of type half">;
+def fdefault_calling_conv_EQ : Joined<["-"], "fdefault-calling-conv=">,
+ HelpText<"Set default calling convention">, Values<"cdecl,fastcall,stdcall,vectorcall,regcall">;
+def finclude_default_header : Flag<["-"], "finclude-default-header">,
+ HelpText<"Include default header file for OpenCL">;
+def fdeclare_opencl_builtins : Flag<["-"], "fdeclare-opencl-builtins">,
+ HelpText<"Add OpenCL builtin function declarations (experimental)">;
+def fpreserve_vec3_type : Flag<["-"], "fpreserve-vec3-type">,
+ HelpText<"Preserve 3-component vector type">;
+def fwchar_type_EQ : Joined<["-"], "fwchar-type=">,
+ HelpText<"Select underlying type for wchar_t">, Values<"char,short,int">;
+def fsigned_wchar : Flag<["-"], "fsigned-wchar">,
+ HelpText<"Use a signed type for wchar_t">;
+def fno_signed_wchar : Flag<["-"], "fno-signed-wchar">,
+ HelpText<"Use an unsigned type for wchar_t">;
+def fcompatibility_qualified_id_block_param_type_checking : Flag<["-"], "fcompatibility-qualified-id-block-type-checking">,
+ HelpText<"Allow using blocks with parameters of more specific type than "
+ "the type system guarantees when a parameter is qualified id">;
+
+// FIXME: Remove these entirely once functionality/tests have been excised.
+def fobjc_gc_only : Flag<["-"], "fobjc-gc-only">, Group<f_Group>,
+ HelpText<"Use GC exclusively for Objective-C related memory management">;
+def fobjc_gc : Flag<["-"], "fobjc-gc">, Group<f_Group>,
+ HelpText<"Enable Objective-C garbage collection">;
+
+//===----------------------------------------------------------------------===//
+// Header Search Options
+//===----------------------------------------------------------------------===//
+
+def nostdsysteminc : Flag<["-"], "nostdsysteminc">,
+ HelpText<"Disable standard system #include directories">;
+def fdisable_module_hash : Flag<["-"], "fdisable-module-hash">,
+ HelpText<"Disable the module hash">;
+def fmodules_hash_content : Flag<["-"], "fmodules-hash-content">,
+ HelpText<"Enable hashing the content of a module file">;
+def fmodules_strict_context_hash : Flag<["-"], "fmodules-strict-context-hash">,
+ HelpText<"Enable hashing of all compiler options that could impact the "
+ "semantics of a module in an implicit build">,
+ MarshallingInfoFlag<"HeaderSearchOpts->ModulesStrictContextHash", "false">;
+def c_isystem : JoinedOrSeparate<["-"], "c-isystem">, MetaVarName<"<directory>">,
+ HelpText<"Add directory to the C SYSTEM include search path">;
+def objc_isystem : JoinedOrSeparate<["-"], "objc-isystem">,
+ MetaVarName<"<directory>">,
+ HelpText<"Add directory to the ObjC SYSTEM include search path">;
+def objcxx_isystem : JoinedOrSeparate<["-"], "objcxx-isystem">,
+ MetaVarName<"<directory>">,
+ HelpText<"Add directory to the ObjC++ SYSTEM include search path">;
+def internal_isystem : JoinedOrSeparate<["-"], "internal-isystem">,
+ MetaVarName<"<directory>">,
+ HelpText<"Add directory to the internal system include search path; these "
+ "are assumed to not be user-provided and are used to model system "
+ "and standard headers' paths.">;
+def internal_externc_isystem : JoinedOrSeparate<["-"], "internal-externc-isystem">,
+ MetaVarName<"<directory>">,
+ HelpText<"Add directory to the internal system include search path with "
+ "implicit extern \"C\" semantics; these are assumed to not be "
+ "user-provided and are used to model system and standard headers' "
+ "paths.">;
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Options
+//===----------------------------------------------------------------------===//
+
+def chain_include : Separate<["-"], "chain-include">, MetaVarName<"<file>">,
+ HelpText<"Include and chain a header file after turning it into PCH">;
+def preamble_bytes_EQ : Joined<["-"], "preamble-bytes=">,
+ HelpText<"Assume that the precompiled header is a precompiled preamble "
+ "covering the first N bytes of the main file">;
+def detailed_preprocessing_record : Flag<["-"], "detailed-preprocessing-record">,
+ HelpText<"include a detailed record of preprocessing actions">;
+def setup_static_analyzer : Flag<["-"], "setup-static-analyzer">,
+ HelpText<"Set up preprocessor for static analyzer (done automatically when static analyzer is run).">;
+def disable_pragma_debug_crash : Flag<["-"], "disable-pragma-debug-crash">,
+ HelpText<"Disable any #pragma clang __debug that can lead to crashing behavior. This is meant for testing.">;
+
+//===----------------------------------------------------------------------===//
+// OpenCL Options
+//===----------------------------------------------------------------------===//
+
+def cl_ext_EQ : CommaJoined<["-"], "cl-ext=">,
+ HelpText<"OpenCL only. Enable or disable OpenCL extensions. The argument is a comma-separated sequence of one or more extension names, each prefixed by '+' or '-'.">;
+
+//===----------------------------------------------------------------------===//
+// CUDA Options
+//===----------------------------------------------------------------------===//
+
+def fcuda_is_device : Flag<["-"], "fcuda-is-device">,
+ HelpText<"Generate code for CUDA device">;
+def fcuda_include_gpubinary : Separate<["-"], "fcuda-include-gpubinary">,
+ HelpText<"Incorporate CUDA device-side binary into host object file.">;
+def fcuda_allow_variadic_functions : Flag<["-"], "fcuda-allow-variadic-functions">,
+ HelpText<"Allow variadic functions in CUDA device code.">;
+def fno_cuda_host_device_constexpr : Flag<["-"], "fno-cuda-host-device-constexpr">,
+ HelpText<"Don't treat unattributed constexpr functions as __host__ __device__.">;
+
+//===----------------------------------------------------------------------===//
+// OpenMP Options
+//===----------------------------------------------------------------------===//
+
+def fopenmp_is_device : Flag<["-"], "fopenmp-is-device">,
+ HelpText<"Generate code only for an OpenMP target device.">;
+def fopenmp_host_ir_file_path : Separate<["-"], "fopenmp-host-ir-file-path">,
+ HelpText<"Path to the IR file produced by the frontend for the host.">;
+
+//===----------------------------------------------------------------------===//
+// SYCL Options
+//===----------------------------------------------------------------------===//
+
+def fsycl_is_device : Flag<["-"], "fsycl-is-device">,
+ HelpText<"Generate code for SYCL device.">;
+
+} // let Flags = [CC1Option]
+
+//===----------------------------------------------------------------------===//
+// cc1as-only Options
+//===----------------------------------------------------------------------===//
+
+let Flags = [CC1AsOption, NoDriverOption] in {
+
+// Language Options
+def n : Flag<["-"], "n">,
+ HelpText<"Don't automatically start assembly file with a text section">;
+
+// Frontend Options
+def filetype : Separate<["-"], "filetype">,
+ HelpText<"Specify the output file type ('asm', 'null', or 'obj')">;
+
+// Transliterate Options
+def output_asm_variant : Separate<["-"], "output-asm-variant">,
+ HelpText<"Select the asm variant index to use for output">;
+def show_encoding : Flag<["-"], "show-encoding">,
+ HelpText<"Show instruction encoding information in transliterate mode">;
+def show_inst : Flag<["-"], "show-inst">,
+ HelpText<"Show internal instruction representation in transliterate mode">;
+
+// Assemble Options
+def dwarf_debug_producer : Separate<["-"], "dwarf-debug-producer">,
+ HelpText<"The string to embed in the Dwarf debug AT_producer record.">;
+
+def defsym : Separate<["-"], "defsym">,
+ HelpText<"Define a value for a symbol">;
+
+} // let Flags = [CC1AsOption]
+
+//===----------------------------------------------------------------------===//
+// clang-cl Options
+//===----------------------------------------------------------------------===//
+
+def cl_Group : OptionGroup<"<clang-cl options>">, Flags<[CLOption]>,
+ HelpText<"CL.EXE COMPATIBILITY OPTIONS">;
+
+def cl_compile_Group : OptionGroup<"<clang-cl compile-only options>">,
+ Group<cl_Group>;
+
+def cl_ignored_Group : OptionGroup<"<clang-cl ignored options>">,
+ Group<cl_Group>;
+
+class CLFlag<string name> : Option<["/", "-"], name, KIND_FLAG>,
+ Group<cl_Group>, Flags<[CLOption, DriverOption]>;
+
+class CLCompileFlag<string name> : Option<["/", "-"], name, KIND_FLAG>,
+ Group<cl_compile_Group>, Flags<[CLOption, DriverOption]>;
+
+class CLIgnoredFlag<string name> : Option<["/", "-"], name, KIND_FLAG>,
+ Group<cl_ignored_Group>, Flags<[CLOption, DriverOption]>;
+
+class CLJoined<string name> : Option<["/", "-"], name, KIND_JOINED>,
+ Group<cl_Group>, Flags<[CLOption, DriverOption]>;
+
+class CLCompileJoined<string name> : Option<["/", "-"], name, KIND_JOINED>,
+ Group<cl_compile_Group>, Flags<[CLOption, DriverOption]>;
+
+class CLIgnoredJoined<string name> : Option<["/", "-"], name, KIND_JOINED>,
+ Group<cl_ignored_Group>, Flags<[CLOption, DriverOption, HelpHidden]>;
+
+class CLJoinedOrSeparate<string name> : Option<["/", "-"], name,
+ KIND_JOINED_OR_SEPARATE>, Group<cl_Group>, Flags<[CLOption, DriverOption]>;
+
+class CLCompileJoinedOrSeparate<string name> : Option<["/", "-"], name,
+ KIND_JOINED_OR_SEPARATE>, Group<cl_compile_Group>,
+ Flags<[CLOption, DriverOption]>;
+
+class CLRemainingArgsJoined<string name> : Option<["/", "-"], name,
+ KIND_REMAINING_ARGS_JOINED>, Group<cl_Group>, Flags<[CLOption, DriverOption]>;
+
+// Aliases:
+// (We don't put any of these in cl_compile_Group as the options they alias are
+// already in the right group.)
+
+def _SLASH_Brepro : CLFlag<"Brepro">,
+ HelpText<"Do not write current time into COFF output (breaks link.exe /incremental)">,
+ Alias<mno_incremental_linker_compatible>;
+def _SLASH_Brepro_ : CLFlag<"Brepro-">,
+ HelpText<"Write current time into COFF output (default)">,
+ Alias<mincremental_linker_compatible>;
+def _SLASH_C : CLFlag<"C">,
+ HelpText<"Do not discard comments when preprocessing">, Alias<C>;
+def _SLASH_c : CLFlag<"c">, HelpText<"Compile only">, Alias<c>;
+def _SLASH_d1PP : CLFlag<"d1PP">,
+ HelpText<"Retain macro definitions in /E mode">, Alias<dD>;
+def _SLASH_d1reportAllClassLayout : CLFlag<"d1reportAllClassLayout">,
+ HelpText<"Dump record layout information">,
+ Alias<Xclang>, AliasArgs<["-fdump-record-layouts"]>;
+def _SLASH_diagnostics_caret : CLFlag<"diagnostics:caret">,
+ HelpText<"Enable caret and column diagnostics (default)">;
+def _SLASH_diagnostics_column : CLFlag<"diagnostics:column">,
+ HelpText<"Disable caret diagnostics but keep column info">;
+def _SLASH_diagnostics_classic : CLFlag<"diagnostics:classic">,
+ HelpText<"Disable column and caret diagnostics">;
+def _SLASH_D : CLJoinedOrSeparate<"D">, HelpText<"Define macro">,
+ MetaVarName<"<macro[=value]>">, Alias<D>;
+def _SLASH_E : CLFlag<"E">, HelpText<"Preprocess to stdout">, Alias<E>;
+def _SLASH_fp_except : CLFlag<"fp:except">, HelpText<"">, Alias<ftrapping_math>;
+def _SLASH_fp_except_ : CLFlag<"fp:except-">,
+ HelpText<"">, Alias<fno_trapping_math>;
+def _SLASH_fp_fast : CLFlag<"fp:fast">, HelpText<"">, Alias<ffast_math>;
+def _SLASH_fp_precise : CLFlag<"fp:precise">,
+ HelpText<"">, Alias<fno_fast_math>;
+def _SLASH_fp_strict : CLFlag<"fp:strict">, HelpText<"">, Alias<fno_fast_math>;
+def _SLASH_GA : CLFlag<"GA">, Alias<ftlsmodel_EQ>, AliasArgs<["local-exec"]>,
+ HelpText<"Assume thread-local variables are defined in the executable">;
+def _SLASH_GR : CLFlag<"GR">, HelpText<"Emit RTTI data (default)">;
+def _SLASH_GR_ : CLFlag<"GR-">, HelpText<"Do not emit RTTI data">;
+def _SLASH_GF : CLIgnoredFlag<"GF">,
+ HelpText<"Enable string pooling (default)">;
+def _SLASH_GF_ : CLFlag<"GF-">, HelpText<"Disable string pooling">,
+ Alias<fwritable_strings>;
+def _SLASH_GS : CLFlag<"GS">,
+ HelpText<"Enable buffer security check (default)">;
+def _SLASH_GS_ : CLFlag<"GS-">, HelpText<"Disable buffer security check">;
+def : CLFlag<"Gs">, HelpText<"Use stack probes (default)">,
+ Alias<mstack_probe_size>, AliasArgs<["4096"]>;
+def _SLASH_Gs : CLJoined<"Gs">,
+ HelpText<"Set stack probe size (default 4096)">, Alias<mstack_probe_size>;
+def _SLASH_Gy : CLFlag<"Gy">, HelpText<"Put each function in its own section">,
+ Alias<ffunction_sections>;
+def _SLASH_Gy_ : CLFlag<"Gy-">,
+ HelpText<"Do not put each function in its own section (default)">,
+ Alias<fno_function_sections>;
+def _SLASH_Gw : CLFlag<"Gw">, HelpText<"Put each data item in its own section">,
+ Alias<fdata_sections>;
+def _SLASH_Gw_ : CLFlag<"Gw-">,
+ HelpText<"Do not put each data item in its own section (default)">,
+ Alias<fno_data_sections>;
+def _SLASH_help : CLFlag<"help">, Alias<help>,
+ HelpText<"Display available options">;
+def _SLASH_HELP : CLFlag<"HELP">, Alias<help>;
+def _SLASH_I : CLJoinedOrSeparate<"I">,
+ HelpText<"Add directory to include search path">, MetaVarName<"<dir>">,
+ Alias<I>;
+def _SLASH_J : CLFlag<"J">, HelpText<"Make char type unsigned">,
+ Alias<funsigned_char>;
+
+// The _SLASH_O option handles all the /O flags, but we also provide separate
+// aliased options to provide separate help messages.
+def _SLASH_O : CLJoined<"O">,
+ HelpText<"Set multiple /O flags at once; e.g. '/O2y-' for '/O2 /Oy-'">,
+ MetaVarName<"<flags>">;
+def : CLFlag<"O1">, Alias<_SLASH_O>, AliasArgs<["1"]>,
+ HelpText<"Optimize for size (like /Og /Os /Oy /Ob2 /GF /Gy)">;
+def : CLFlag<"O2">, Alias<_SLASH_O>, AliasArgs<["2"]>,
+ HelpText<"Optimize for speed (like /Og /Oi /Ot /Oy /Ob2 /GF /Gy)">;
+def : CLFlag<"Ob0">, Alias<_SLASH_O>, AliasArgs<["b0"]>,
+ HelpText<"Disable function inlining">;
+def : CLFlag<"Ob1">, Alias<_SLASH_O>, AliasArgs<["b1"]>,
+ HelpText<"Only inline functions explicitly or implicitly marked inline">;
+def : CLFlag<"Ob2">, Alias<_SLASH_O>, AliasArgs<["b2"]>,
+ HelpText<"Inline functions as deemed beneficial by the compiler">;
+def : CLFlag<"Od">, Alias<_SLASH_O>, AliasArgs<["d"]>,
+ HelpText<"Disable optimization">;
+def : CLFlag<"Og">, Alias<_SLASH_O>, AliasArgs<["g"]>,
+ HelpText<"No effect">;
+def : CLFlag<"Oi">, Alias<_SLASH_O>, AliasArgs<["i"]>,
+ HelpText<"Enable use of builtin functions">;
+def : CLFlag<"Oi-">, Alias<_SLASH_O>, AliasArgs<["i-"]>,
+ HelpText<"Disable use of builtin functions">;
+def : CLFlag<"Os">, Alias<_SLASH_O>, AliasArgs<["s"]>,
+ HelpText<"Optimize for size">;
+def : CLFlag<"Ot">, Alias<_SLASH_O>, AliasArgs<["t"]>,
+ HelpText<"Optimize for speed">;
+def : CLFlag<"Ox">, Alias<_SLASH_O>, AliasArgs<["x"]>,
+ HelpText<"Deprecated (like /Og /Oi /Ot /Oy /Ob2); use /O2">;
+def : CLFlag<"Oy">, Alias<_SLASH_O>, AliasArgs<["y"]>,
+ HelpText<"Enable frame pointer omission (x86 only)">;
+def : CLFlag<"Oy-">, Alias<_SLASH_O>, AliasArgs<["y-"]>,
+ HelpText<"Disable frame pointer omission (x86 only, default)">;
+
+def _SLASH_QUESTION : CLFlag<"?">, Alias<help>,
+ HelpText<"Display available options">;
+def _SLASH_Qvec : CLFlag<"Qvec">,
+ HelpText<"Enable the loop vectorization passes">, Alias<fvectorize>;
+def _SLASH_Qvec_ : CLFlag<"Qvec-">,
+ HelpText<"Disable the loop vectorization passes">, Alias<fno_vectorize>;
+def _SLASH_showIncludes : CLFlag<"showIncludes">,
+ HelpText<"Print info about included files to stderr">;
+def _SLASH_showIncludes_user : CLFlag<"showIncludes:user">,
+ HelpText<"Like /showIncludes but omit system headers">;
+def _SLASH_showFilenames : CLFlag<"showFilenames">,
+ HelpText<"Print the name of each compiled file">;
+def _SLASH_showFilenames_ : CLFlag<"showFilenames-">,
+ HelpText<"Do not print the name of each compiled file (default)">;
+def _SLASH_source_charset : CLCompileJoined<"source-charset:">,
+ HelpText<"Set source encoding, supports only UTF-8">,
+ Alias<finput_charset_EQ>;
+def _SLASH_execution_charset : CLCompileJoined<"execution-charset:">,
+ HelpText<"Set runtime encoding, supports only UTF-8">,
+ Alias<fexec_charset_EQ>;
+def _SLASH_std : CLCompileJoined<"std:">,
+ HelpText<"Set C++ version (c++14,c++17,c++latest)">;
+def _SLASH_U : CLJoinedOrSeparate<"U">, HelpText<"Undefine macro">,
+ MetaVarName<"<macro>">, Alias<U>;
+def _SLASH_validate_charset : CLFlag<"validate-charset">,
+ Alias<W_Joined>, AliasArgs<["invalid-source-encoding"]>;
+def _SLASH_validate_charset_ : CLFlag<"validate-charset-">,
+ Alias<W_Joined>, AliasArgs<["no-invalid-source-encoding"]>;
+def _SLASH_W0 : CLFlag<"W0">, HelpText<"Disable all warnings">, Alias<w>;
+def _SLASH_W1 : CLFlag<"W1">, HelpText<"Enable -Wall">, Alias<Wall>;
+def _SLASH_W2 : CLFlag<"W2">, HelpText<"Enable -Wall">, Alias<Wall>;
+def _SLASH_W3 : CLFlag<"W3">, HelpText<"Enable -Wall">, Alias<Wall>;
+def _SLASH_W4 : CLFlag<"W4">, HelpText<"Enable -Wall and -Wextra">, Alias<WCL4>;
+def _SLASH_Wall : CLFlag<"Wall">, HelpText<"Enable -Weverything">,
+ Alias<W_Joined>, AliasArgs<["everything"]>;
+def _SLASH_WX : CLFlag<"WX">, HelpText<"Treat warnings as errors">,
+ Alias<W_Joined>, AliasArgs<["error"]>;
+def _SLASH_WX_ : CLFlag<"WX-">,
+ HelpText<"Do not treat warnings as errors (default)">,
+ Alias<W_Joined>, AliasArgs<["no-error"]>;
+def _SLASH_w_flag : CLFlag<"w">, HelpText<"Disable all warnings">, Alias<w>;
+def _SLASH_wd4005 : CLFlag<"wd4005">, Alias<W_Joined>,
+ AliasArgs<["no-macro-redefined"]>;
+def _SLASH_wd4018 : CLFlag<"wd4018">, Alias<W_Joined>,
+ AliasArgs<["no-sign-compare"]>;
+def _SLASH_wd4100 : CLFlag<"wd4100">, Alias<W_Joined>,
+ AliasArgs<["no-unused-parameter"]>;
+def _SLASH_wd4910 : CLFlag<"wd4910">, Alias<W_Joined>,
+ AliasArgs<["no-dllexport-explicit-instantiation-decl"]>;
+def _SLASH_wd4996 : CLFlag<"wd4996">, Alias<W_Joined>,
+ AliasArgs<["no-deprecated-declarations"]>;
+def _SLASH_vd : CLJoined<"vd">, HelpText<"Control vtordisp placement">,
+ Alias<vtordisp_mode_EQ>;
+def _SLASH_X : CLFlag<"X">,
+ HelpText<"Do not add %INCLUDE% to include search path">, Alias<nostdlibinc>;
+def _SLASH_Zc_sizedDealloc : CLFlag<"Zc:sizedDealloc">,
+ HelpText<"Enable C++14 sized global deallocation functions">,
+ Alias<fsized_deallocation>;
+def _SLASH_Zc_sizedDealloc_ : CLFlag<"Zc:sizedDealloc-">,
+ HelpText<"Disable C++14 sized global deallocation functions">,
+ Alias<fno_sized_deallocation>;
+def _SLASH_Zc_alignedNew : CLFlag<"Zc:alignedNew">,
+ HelpText<"Enable C++17 aligned allocation functions">,
+ Alias<faligned_allocation>;
+def _SLASH_Zc_alignedNew_ : CLFlag<"Zc:alignedNew-">,
+ HelpText<"Disable C++17 aligned allocation functions">,
+ Alias<fno_aligned_allocation>;
+def _SLASH_Zc_char8_t : CLFlag<"Zc:char8_t">,
+ HelpText<"Enable char8_t from C++2a">,
+ Alias<fchar8__t>;
+def _SLASH_Zc_char8_t_ : CLFlag<"Zc:char8_t-">,
+ HelpText<"Disable char8_t from c++2a">,
+ Alias<fno_char8__t>;
+def _SLASH_Zc_strictStrings : CLFlag<"Zc:strictStrings">,
+ HelpText<"Treat string literals as const">, Alias<W_Joined>,
+ AliasArgs<["error=c++11-compat-deprecated-writable-strings"]>;
+def _SLASH_Zc_threadSafeInit : CLFlag<"Zc:threadSafeInit">,
+ HelpText<"Enable thread-safe initialization of static variables">,
+ Alias<fthreadsafe_statics>;
+def _SLASH_Zc_threadSafeInit_ : CLFlag<"Zc:threadSafeInit-">,
+ HelpText<"Disable thread-safe initialization of static variables">,
+ Alias<fno_threadsafe_statics>;
+def _SLASH_Zc_trigraphs : CLFlag<"Zc:trigraphs">,
+ HelpText<"Enable trigraphs">, Alias<ftrigraphs>;
+def _SLASH_Zc_trigraphs_off : CLFlag<"Zc:trigraphs-">,
+ HelpText<"Disable trigraphs (default)">, Alias<fno_trigraphs>;
+def _SLASH_Zc_twoPhase : CLFlag<"Zc:twoPhase">,
+ HelpText<"Enable two-phase name lookup in templates">,
+ Alias<fno_delayed_template_parsing>;
+def _SLASH_Zc_twoPhase_ : CLFlag<"Zc:twoPhase-">,
+ HelpText<"Disable two-phase name lookup in templates (default)">,
+ Alias<fdelayed_template_parsing>;
+def _SLASH_Z7 : CLFlag<"Z7">,
+ HelpText<"Enable CodeView debug information in object files">;
+def _SLASH_Zd : CLFlag<"Zd">,
+ HelpText<"Emit debug line number tables only">;
+def _SLASH_Zi : CLFlag<"Zi">, Alias<_SLASH_Z7>,
+ HelpText<"Like /Z7">;
+def _SLASH_Zp : CLJoined<"Zp">,
+ HelpText<"Set default maximum struct packing alignment">,
+ Alias<fpack_struct_EQ>;
+def _SLASH_Zp_flag : CLFlag<"Zp">,
+ HelpText<"Set default maximum struct packing alignment to 1">,
+ Alias<fpack_struct_EQ>, AliasArgs<["1"]>;
+def _SLASH_Zs : CLFlag<"Zs">, HelpText<"Syntax-check only">,
+ Alias<fsyntax_only>;
+def _SLASH_openmp_ : CLFlag<"openmp-">,
+ HelpText<"Disable OpenMP support">, Alias<fno_openmp>;
+def _SLASH_openmp : CLFlag<"openmp">, HelpText<"Enable OpenMP support">,
+ Alias<fopenmp>;
+def _SLASH_openmp_experimental : CLFlag<"openmp:experimental">,
+ HelpText<"Enable OpenMP support with experimental SIMD support">,
+ Alias<fopenmp>;
+
+// Non-aliases:
+
+def _SLASH_arch : CLCompileJoined<"arch:">,
+ HelpText<"Set architecture for code generation">;
+
+def _SLASH_M_Group : OptionGroup<"</M group>">, Group<cl_compile_Group>;
+def _SLASH_volatile_Group : OptionGroup<"</volatile group>">,
+ Group<cl_compile_Group>;
+
+def _SLASH_EH : CLJoined<"EH">, HelpText<"Set exception handling model">;
+def _SLASH_EP : CLFlag<"EP">,
+ HelpText<"Disable linemarker output and preprocess to stdout">;
+def _SLASH_FA : CLFlag<"FA">,
+ HelpText<"Output assembly code file during compilation">;
+def _SLASH_Fa : CLJoined<"Fa">,
+ HelpText<"Set assembly output file name (with /FA)">,
+ MetaVarName<"<file or dir/>">;
+def _SLASH_fallback : CLCompileFlag<"fallback">,
+ HelpText<"Fall back to cl.exe if clang-cl fails to compile">;
+def _SLASH_FI : CLJoinedOrSeparate<"FI">,
+ HelpText<"Include file before parsing">, Alias<include_>;
+def _SLASH_Fe : CLJoined<"Fe">,
+ HelpText<"Set output executable file name">,
+ MetaVarName<"<file or dir/>">;
+def _SLASH_Fi : CLCompileJoined<"Fi">,
+ HelpText<"Set preprocess output file name (with /P)">,
+ MetaVarName<"<file>">;
+def _SLASH_Fo : CLCompileJoined<"Fo">,
+ HelpText<"Set output object file (with /c)">,
+ MetaVarName<"<file or dir/>">;
+def _SLASH_guard : CLJoined<"guard:">,
+ HelpText<"Enable Control Flow Guard with /guard:cf, or only the table with /guard:cf,nochecks">;
+def _SLASH_GX : CLFlag<"GX">,
+ HelpText<"Deprecated; use /EHsc">;
+def _SLASH_GX_ : CLFlag<"GX-">,
+ HelpText<"Deprecated (like not passing /EH)">;
+def _SLASH_imsvc : CLJoinedOrSeparate<"imsvc">,
+ HelpText<"Add <dir> to system include search path, as if in %INCLUDE%">,
+ MetaVarName<"<dir>">;
+def _SLASH_LD : CLFlag<"LD">, HelpText<"Create DLL">;
+def _SLASH_LDd : CLFlag<"LDd">, HelpText<"Create debug DLL">;
+def _SLASH_link : CLRemainingArgsJoined<"link">,
+ HelpText<"Forward options to the linker">, MetaVarName<"<options>">;
+def _SLASH_MD : Option<["/", "-"], "MD", KIND_FLAG>, Group<_SLASH_M_Group>,
+ Flags<[CLOption, DriverOption]>, HelpText<"Use DLL run-time">;
+def _SLASH_MDd : Option<["/", "-"], "MDd", KIND_FLAG>, Group<_SLASH_M_Group>,
+ Flags<[CLOption, DriverOption]>, HelpText<"Use DLL debug run-time">;
+def _SLASH_MT : Option<["/", "-"], "MT", KIND_FLAG>, Group<_SLASH_M_Group>,
+ Flags<[CLOption, DriverOption]>, HelpText<"Use static run-time">;
+def _SLASH_MTd : Option<["/", "-"], "MTd", KIND_FLAG>, Group<_SLASH_M_Group>,
+ Flags<[CLOption, DriverOption]>, HelpText<"Use static debug run-time">;
+def _SLASH_o : CLJoinedOrSeparate<"o">,
+ HelpText<"Deprecated (set output file name); use /Fe or /Fe">,
+ MetaVarName<"<file or dir/>">;
+def _SLASH_P : CLFlag<"P">, HelpText<"Preprocess to file">;
+def _SLASH_Tc : CLCompileJoinedOrSeparate<"Tc">,
+ HelpText<"Treat <file> as C source file">, MetaVarName<"<file>">;
+def _SLASH_TC : CLCompileFlag<"TC">, HelpText<"Treat all source files as C">;
+def _SLASH_Tp : CLCompileJoinedOrSeparate<"Tp">,
+ HelpText<"Treat <file> as C++ source file">, MetaVarName<"<file>">;
+def _SLASH_TP : CLCompileFlag<"TP">, HelpText<"Treat all source files as C++">;
+def _SLASH_volatile_iso : Option<["/", "-"], "volatile:iso", KIND_FLAG>,
+ Group<_SLASH_volatile_Group>, Flags<[CLOption, DriverOption]>,
+ HelpText<"Volatile loads and stores have standard semantics">;
+def _SLASH_vmb : CLFlag<"vmb">,
+ HelpText<"Use a best-case representation method for member pointers">;
+def _SLASH_vmg : CLFlag<"vmg">,
+ HelpText<"Use a most-general representation for member pointers">;
+def _SLASH_vms : CLFlag<"vms">,
+ HelpText<"Set the default most-general representation to single inheritance">;
+def _SLASH_vmm : CLFlag<"vmm">,
+ HelpText<"Set the default most-general representation to "
+ "multiple inheritance">;
+def _SLASH_vmv : CLFlag<"vmv">,
+ HelpText<"Set the default most-general representation to "
+ "virtual inheritance">;
+def _SLASH_volatile_ms : Option<["/", "-"], "volatile:ms", KIND_FLAG>,
+ Group<_SLASH_volatile_Group>, Flags<[CLOption, DriverOption]>,
+ HelpText<"Volatile loads and stores have acquire and release semantics">;
+def _SLASH_clang : CLJoined<"clang:">,
+ HelpText<"Pass <arg> to the clang driver">, MetaVarName<"<arg>">;
+def _SLASH_Zl : CLFlag<"Zl">,
+ HelpText<"Do not let object file auto-link default libraries">;
+
+def _SLASH_Yc : CLJoined<"Yc">,
+ HelpText<"Generate a pch file for all code up to and including <filename>">,
+ MetaVarName<"<filename>">;
+def _SLASH_Yu : CLJoined<"Yu">,
+ HelpText<"Load a pch file and use it instead of all code up to "
+ "and including <filename>">,
+ MetaVarName<"<filename>">;
+def _SLASH_Y_ : CLFlag<"Y-">,
+ HelpText<"Disable precompiled headers, overrides /Yc and /Yu">;
+def _SLASH_Zc_dllexportInlines : CLFlag<"Zc:dllexportInlines">,
+ HelpText<"dllexport/dllimport inline member functions of dllexport/import classes (default)">;
+def _SLASH_Zc_dllexportInlines_ : CLFlag<"Zc:dllexportInlines-">,
+ HelpText<"Do not dllexport/dllimport inline member functions of dllexport/import classes">;
+def _SLASH_Fp : CLJoined<"Fp">,
+ HelpText<"Set pch file name (with /Yc and /Yu)">, MetaVarName<"<file>">;
+
+def _SLASH_Gd : CLFlag<"Gd">,
+ HelpText<"Set __cdecl as a default calling convention">;
+def _SLASH_Gr : CLFlag<"Gr">,
+ HelpText<"Set __fastcall as a default calling convention">;
+def _SLASH_Gz : CLFlag<"Gz">,
+ HelpText<"Set __stdcall as a default calling convention">;
+def _SLASH_Gv : CLFlag<"Gv">,
+ HelpText<"Set __vectorcall as a default calling convention">;
+def _SLASH_Gregcall : CLFlag<"Gregcall">,
+ HelpText<"Set __regcall as a default calling convention">;
+
+// Ignored:
+
+def _SLASH_analyze_ : CLIgnoredFlag<"analyze-">;
+def _SLASH_bigobj : CLIgnoredFlag<"bigobj">;
+def _SLASH_cgthreads : CLIgnoredJoined<"cgthreads">;
+def _SLASH_d2FastFail : CLIgnoredFlag<"d2FastFail">;
+def _SLASH_d2Zi_PLUS : CLIgnoredFlag<"d2Zi+">;
+def _SLASH_errorReport : CLIgnoredJoined<"errorReport">;
+def _SLASH_FC : CLIgnoredFlag<"FC">;
+def _SLASH_Fd : CLIgnoredJoined<"Fd">;
+def _SLASH_FS : CLIgnoredFlag<"FS">;
+def _SLASH_JMC : CLIgnoredFlag<"JMC">;
+def _SLASH_kernel_ : CLIgnoredFlag<"kernel-">;
+def _SLASH_nologo : CLIgnoredFlag<"nologo">;
+def _SLASH_permissive_ : CLIgnoredFlag<"permissive-">;
+def _SLASH_RTC : CLIgnoredJoined<"RTC">;
+def _SLASH_sdl : CLIgnoredFlag<"sdl">;
+def _SLASH_sdl_ : CLIgnoredFlag<"sdl-">;
+def _SLASH_utf8 : CLIgnoredFlag<"utf-8">,
+ HelpText<"Set source and runtime encoding to UTF-8 (default)">;
+def _SLASH_w : CLIgnoredJoined<"w">;
+def _SLASH_Zc___cplusplus : CLIgnoredFlag<"Zc:__cplusplus">;
+def _SLASH_Zc_auto : CLIgnoredFlag<"Zc:auto">;
+def _SLASH_Zc_forScope : CLIgnoredFlag<"Zc:forScope">;
+def _SLASH_Zc_inline : CLIgnoredFlag<"Zc:inline">;
+def _SLASH_Zc_rvalueCast : CLIgnoredFlag<"Zc:rvalueCast">;
+def _SLASH_Zc_ternary : CLIgnoredFlag<"Zc:ternary">;
+def _SLASH_Zc_wchar_t : CLIgnoredFlag<"Zc:wchar_t">;
+def _SLASH_ZH_MD5 : CLIgnoredFlag<"ZH:MD5">;
+def _SLASH_ZH_SHA1 : CLIgnoredFlag<"ZH:SHA1">;
+def _SLASH_ZH_SHA_256 : CLIgnoredFlag<"ZH:SHA_256">;
+def _SLASH_Zm : CLIgnoredJoined<"Zm">;
+def _SLASH_Zo : CLIgnoredFlag<"Zo">;
+def _SLASH_Zo_ : CLIgnoredFlag<"Zo-">;
+
+
+// Unsupported:
+
+def _SLASH_await : CLFlag<"await">;
+def _SLASH_constexpr : CLJoined<"constexpr:">;
+def _SLASH_AI : CLJoinedOrSeparate<"AI">;
+def _SLASH_Bt : CLFlag<"Bt">;
+def _SLASH_Bt_plus : CLFlag<"Bt+">;
+def _SLASH_clr : CLJoined<"clr">;
+def _SLASH_d2 : CLJoined<"d2">;
+def _SLASH_doc : CLJoined<"doc">;
+def _SLASH_FA_joined : CLJoined<"FA">;
+def _SLASH_favor : CLJoined<"favor">;
+def _SLASH_F : CLJoinedOrSeparate<"F">;
+def _SLASH_Fm : CLJoined<"Fm">;
+def _SLASH_Fr : CLJoined<"Fr">;
+def _SLASH_FR : CLJoined<"FR">;
+def _SLASH_FU : CLJoinedOrSeparate<"FU">;
+def _SLASH_Fx : CLFlag<"Fx">;
+def _SLASH_G1 : CLFlag<"G1">;
+def _SLASH_G2 : CLFlag<"G2">;
+def _SLASH_Ge : CLFlag<"Ge">;
+def _SLASH_Gh : CLFlag<"Gh">;
+def _SLASH_GH : CLFlag<"GH">;
+def _SLASH_GL : CLFlag<"GL">;
+def _SLASH_GL_ : CLFlag<"GL-">;
+def _SLASH_Gm : CLFlag<"Gm">;
+def _SLASH_Gm_ : CLFlag<"Gm-">;
+def _SLASH_GT : CLFlag<"GT">;
+def _SLASH_GZ : CLFlag<"GZ">;
+def _SLASH_H : CLFlag<"H">;
+def _SLASH_homeparams : CLFlag<"homeparams">;
+def _SLASH_hotpatch : CLFlag<"hotpatch">;
+def _SLASH_kernel : CLFlag<"kernel">;
+def _SLASH_LN : CLFlag<"LN">;
+def _SLASH_MP : CLJoined<"MP">;
+def _SLASH_Qfast_transcendentals : CLFlag<"Qfast_transcendentals">;
+def _SLASH_QIfist : CLFlag<"QIfist">;
+def _SLASH_QIntel_jcc_erratum : CLFlag<"QIntel-jcc-erratum">;
+def _SLASH_Qimprecise_fwaits : CLFlag<"Qimprecise_fwaits">;
+def _SLASH_Qpar : CLFlag<"Qpar">;
+def _SLASH_Qpar_report : CLJoined<"Qpar-report">;
+def _SLASH_Qsafe_fp_loads : CLFlag<"Qsafe_fp_loads">;
+def _SLASH_Qspectre : CLFlag<"Qspectre">;
+def _SLASH_Qspectre_load : CLFlag<"Qspectre-load">;
+def _SLASH_Qspectre_load_cf : CLFlag<"Qspectre-load-cf">;
+def _SLASH_Qvec_report : CLJoined<"Qvec-report">;
+def _SLASH_u : CLFlag<"u">;
+def _SLASH_V : CLFlag<"V">;
+def _SLASH_WL : CLFlag<"WL">;
+def _SLASH_Wp64 : CLFlag<"Wp64">;
+def _SLASH_Yd : CLFlag<"Yd">;
+def _SLASH_Yl : CLJoined<"Yl">;
+def _SLASH_Za : CLFlag<"Za">;
+def _SLASH_Zc : CLJoined<"Zc:">;
+def _SLASH_Ze : CLFlag<"Ze">;
+def _SLASH_Zg : CLFlag<"Zg">;
+def _SLASH_ZI : CLFlag<"ZI">;
+def _SLASH_ZW : CLJoined<"ZW">;
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Phases.h b/contrib/llvm-project/clang/include/clang/Driver/Phases.h
index 63931c00c890..ce914dd70514 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Phases.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Phases.h
@@ -22,10 +22,11 @@ namespace phases {
Assemble,
Link,
IfsMerge,
+ LastPhase = IfsMerge,
};
enum {
- MaxNumberOfPhases = Link + 1
+ MaxNumberOfPhases = LastPhase + 1
};
const char *getPhaseName(ID Id);
diff --git a/contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h b/contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h
index 0aebf8cb225d..934dab808e82 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h
@@ -27,6 +27,8 @@ class SanitizerArgs {
std::vector<std::string> UserBlacklistFiles;
std::vector<std::string> SystemBlacklistFiles;
+ std::vector<std::string> CoverageAllowlistFiles;
+ std::vector<std::string> CoverageBlocklistFiles;
int CoverageFeatures = 0;
int MsanTrackOrigins = 0;
bool MsanUseAfterDtor = true;
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Tool.h b/contrib/llvm-project/clang/include/clang/Driver/Tool.h
index 8d0491606978..cc0a09fb2747 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Tool.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Tool.h
@@ -10,7 +10,6 @@
#define LLVM_CLANG_DRIVER_TOOL_H
#include "clang/Basic/LLVM.h"
-#include "llvm/Support/Program.h"
namespace llvm {
namespace opt {
@@ -31,24 +30,6 @@ namespace driver {
/// Tool - Information on a specific compilation tool.
class Tool {
-public:
- // Documents the level of support for response files in this tool.
- // Response files are necessary if the command line gets too large,
- // requiring the arguments to be transferred to a file.
- enum ResponseFileSupport {
- // Provides full support for response files, which means we can transfer
- // all tool input arguments to a file. E.g.: clang, gcc, binutils and MSVC
- // tools.
- RF_Full,
- // Input file names can live in a file, but flags can't. E.g.: ld64 (Mac
- // OS X linker).
- RF_FileList,
- // Does not support response files: all arguments must be passed via
- // command line.
- RF_None
- };
-
-private:
/// The tool name (for debugging).
const char *Name;
@@ -58,20 +39,8 @@ private:
/// The tool chain this tool is a part of.
const ToolChain &TheToolChain;
- /// The level of support for response files seen in this tool
- const ResponseFileSupport ResponseSupport;
-
- /// The encoding to use when writing response files for this tool on Windows
- const llvm::sys::WindowsEncodingMethod ResponseEncoding;
-
- /// The flag used to pass a response file via command line to this tool
- const char *const ResponseFlag;
-
public:
- Tool(const char *Name, const char *ShortName, const ToolChain &TC,
- ResponseFileSupport ResponseSupport = RF_None,
- llvm::sys::WindowsEncodingMethod ResponseEncoding = llvm::sys::WEM_UTF8,
- const char *ResponseFlag = "@");
+ Tool(const char *Name, const char *ShortName, const ToolChain &TC);
public:
virtual ~Tool();
@@ -87,29 +56,6 @@ public:
virtual bool hasIntegratedCPP() const = 0;
virtual bool isLinkJob() const { return false; }
virtual bool isDsymutilJob() const { return false; }
- /// Returns the level of support for response files of this tool,
- /// whether it accepts arguments to be passed via a file on disk.
- ResponseFileSupport getResponseFilesSupport() const {
- return ResponseSupport;
- }
- /// Returns which encoding the response file should use. This is only
- /// relevant on Windows platforms where there are different encodings being
- /// accepted for different tools. On UNIX, UTF8 is universal.
- ///
- /// Windows use cases: - GCC and Binutils on mingw only accept ANSI response
- /// files encoded with the system current code page.
- /// - MSVC's CL.exe and LINK.exe accept UTF16 on Windows.
- /// - Clang accepts both UTF8 and UTF16.
- ///
- /// FIXME: When GNU tools learn how to parse UTF16 on Windows, we should
- /// always use UTF16 for Windows, which is the Windows official encoding for
- /// international characters.
- llvm::sys::WindowsEncodingMethod getResponseFileEncoding() const {
- return ResponseEncoding;
- }
- /// Returns which prefix to use when passing the name of a response
- /// file as a parameter to this tool.
- const char *getResponseFileFlag() const { return ResponseFlag; }
/// Does this tool have "good" standardized diagnostics, or should the
/// driver add an additional "command failed" diagnostic on failures.
diff --git a/contrib/llvm-project/clang/include/clang/Driver/ToolChain.h b/contrib/llvm-project/clang/include/clang/Driver/ToolChain.h
index 26d8d43dd2fc..7495e08fe6e6 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/ToolChain.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/ToolChain.h
@@ -16,7 +16,9 @@
#include "clang/Driver/Action.h"
#include "clang/Driver/Multilib.h"
#include "clang/Driver/Types.h"
+#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
@@ -137,6 +139,7 @@ private:
mutable std::unique_ptr<Tool> Flang;
mutable std::unique_ptr<Tool> Assemble;
mutable std::unique_ptr<Tool> Link;
+ mutable std::unique_ptr<Tool> StaticLibTool;
mutable std::unique_ptr<Tool> IfsMerge;
mutable std::unique_ptr<Tool> OffloadBundler;
mutable std::unique_ptr<Tool> OffloadWrapper;
@@ -145,6 +148,7 @@ private:
Tool *getFlang() const;
Tool *getAssemble() const;
Tool *getLink() const;
+ Tool *getStaticLibTool() const;
Tool *getIfsMerge() const;
Tool *getClangAs() const;
Tool *getOffloadBundler() const;
@@ -172,6 +176,7 @@ protected:
virtual Tool *buildAssembler() const;
virtual Tool *buildLinker() const;
+ virtual Tool *buildStaticLibTool() const;
virtual Tool *getTool(Action::ActionClass AC) const;
/// \name Utilities for implementing subclasses.
@@ -293,6 +298,22 @@ public:
const llvm::opt::DerivedArgList &Args, bool SameTripleAsHost,
SmallVectorImpl<llvm::opt::Arg *> &AllocatedArgs) const;
+ /// Append the argument following \p A to \p DAL assuming \p A is an Xarch
+ /// argument. If \p AllocatedArgs is null pointer, synthesized arguments are
+ /// added to \p DAL, otherwise they are appended to \p AllocatedArgs.
+ virtual void TranslateXarchArgs(
+ const llvm::opt::DerivedArgList &Args, llvm::opt::Arg *&A,
+ llvm::opt::DerivedArgList *DAL,
+ SmallVectorImpl<llvm::opt::Arg *> *AllocatedArgs = nullptr) const;
+
+ /// Translate -Xarch_ arguments. If there are no such arguments, return
+ /// a null pointer, otherwise return a DerivedArgList containing the
+ /// translated arguments.
+ virtual llvm::opt::DerivedArgList *
+ TranslateXarchArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind,
+ SmallVectorImpl<llvm::opt::Arg *> *AllocatedArgs) const;
+
/// Choose a tool to use to handle the action \p JA.
///
/// This can be overridden when a particular ToolChain needs to use
@@ -308,6 +329,9 @@ public:
/// the linker suffix or name.
std::string GetLinkerPath() const;
+ /// Returns the linker path for emitting a static library.
+ std::string GetStaticLibToolPath() const;
+
/// Dispatch to the specific toolchain for verbose printing.
///
/// This is used when handling the verbose option to print detailed,
@@ -395,6 +419,11 @@ public:
getCompilerRTArgString(const llvm::opt::ArgList &Args, StringRef Component,
FileType Type = ToolChain::FT_Static) const;
+ std::string getCompilerRTBasename(const llvm::opt::ArgList &Args,
+ StringRef Component,
+ FileType Type = ToolChain::FT_Static,
+ bool AddArch = true) const;
+
// Returns target specific runtime path if it exists.
virtual Optional<std::string> getRuntimePath() const;
@@ -513,6 +542,10 @@ public:
/// FIXME: this really belongs on some sort of DeploymentTarget abstraction
virtual bool hasBlocksRuntime() const { return true; }
+ /// Return the sysroot, possibly searching for a default sysroot using
+ /// target-specific logic.
+ virtual std::string computeSysRoot() const;
+
/// Add the clang cc1 arguments for system include paths.
///
/// This routine is responsible for adding the necessary cc1 arguments to
@@ -571,12 +604,19 @@ public:
virtual void AddCCKextLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
+ /// If a runtime library exists that sets global flags for unsafe floating
+ /// point math, return true.
+ ///
+ /// This checks for presence of the -Ofast, -ffast-math or -funsafe-math flags.
+ virtual bool isFastMathRuntimeAvailable(
+ const llvm::opt::ArgList &Args, std::string &Path) const;
+
/// AddFastMathRuntimeIfAvailable - If a runtime library exists that sets
/// global flags for unsafe floating point math, add it and return true.
///
/// This checks for presence of the -Ofast, -ffast-math or -funsafe-math flags.
- virtual bool AddFastMathRuntimeIfAvailable(
- const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs) const;
+ bool addFastMathRuntimeIfAvailable(
+ const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs) const;
/// addProfileRTLibs - When -fprofile-instr-profile is specified, try to pass
/// a suitable profile runtime library to the linker.
@@ -587,6 +627,10 @@ public:
virtual void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const;
+ /// Add arguments to use system-specific HIP includes.
+ virtual void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const;
+
/// Add arguments to use MCU GCC toolchain includes.
virtual void AddIAMCUIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const;
@@ -606,6 +650,15 @@ public:
/// Returns true when it's possible to split LTO unit to use whole
/// program devirtualization and CFI santiizers.
virtual bool canSplitThinLTOUnit() const { return true; }
+
+ /// Returns the output denormal handling type in the default floating point
+ /// environment for the given \p FPType if given. Otherwise, the default
+ /// assumed mode for any floating point type.
+ virtual llvm::DenormalMode getDefaultDenormalModeForType(
+ const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
+ const llvm::fltSemantics *FPType = nullptr) const {
+ return llvm::DenormalMode::getIEEE();
+ }
};
/// Set a ToolChain's effective triple. Reset it when the registration object
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Types.h b/contrib/llvm-project/clang/include/clang/Driver/Types.h
index c7c38fa52593..97bf5fd672ab 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Types.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Types.h
@@ -45,9 +45,6 @@ namespace types {
/// temp file of this type, or null if unspecified.
const char *getTypeTempSuffix(ID Id, bool CLMode = false);
- /// onlyAssembleType - Should this type only be assembled.
- bool onlyAssembleType(ID Id);
-
/// onlyPrecompileType - Should this type only be precompiled.
bool onlyPrecompileType(ID Id);
@@ -101,13 +98,12 @@ namespace types {
ID lookupTypeForTypeSpecifier(const char *Name);
/// getCompilationPhases - Get the list of compilation phases ('Phases') to be
- /// done for type 'Id'.
- void getCompilationPhases(
- ID Id,
- llvm::SmallVectorImpl<phases::ID> &Phases);
- void getCompilationPhases(const clang::driver::Driver &Driver,
- llvm::opt::DerivedArgList &DAL, ID Id,
- llvm::SmallVectorImpl<phases::ID> &Phases);
+ /// done for type 'Id' up until including LastPhase.
+ llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases>
+ getCompilationPhases(ID Id, phases::ID LastPhase = phases::LastPhase);
+ llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases>
+ getCompilationPhases(const clang::driver::Driver &Driver,
+ llvm::opt::DerivedArgList &DAL, ID Id);
/// lookupCXXTypeForCType - Lookup CXX input type that corresponds to given
/// C type (used for clang++ emulation of g++ behaviour)
diff --git a/contrib/llvm-project/clang/include/clang/Driver/XRayArgs.h b/contrib/llvm-project/clang/include/clang/Driver/XRayArgs.h
index fa2583f4b966..2f055e5c6d7d 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/XRayArgs.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/XRayArgs.h
@@ -30,6 +30,8 @@ class XRayArgs {
bool XRayAlwaysEmitCustomEvents = false;
bool XRayAlwaysEmitTypedEvents = false;
bool XRayRT = true;
+ bool XRayIgnoreLoops = false;
+ bool XRayFunctionIndex;
public:
/// Parses the XRay arguments from an argument list.
diff --git a/contrib/llvm-project/clang/include/clang/Format/Format.h b/contrib/llvm-project/clang/include/clang/Format/Format.h
index add2937f3b43..3549ec9eee0e 100644
--- a/contrib/llvm-project/clang/include/clang/Format/Format.h
+++ b/contrib/llvm-project/clang/include/clang/Format/Format.h
@@ -35,7 +35,12 @@ class DiagnosticConsumer;
namespace format {
-enum class ParseError { Success = 0, Error, Unsuitable };
+enum class ParseError {
+ Success = 0,
+ Error,
+ Unsuitable,
+ BinPackTrailingCommaConflict
+};
class ParseErrorCategory final : public std::error_category {
public:
const char *name() const noexcept override;
@@ -103,6 +108,17 @@ struct FormatStyle {
/// \endcode
bool AlignConsecutiveAssignments;
+ /// If ``true``, aligns consecutive bitfield members.
+ ///
+ /// This will align the bitfield separators of consecutive lines. This
+ /// will result in formattings like
+ /// \code
+ /// int aaaa : 1;
+ /// int b : 12;
+ /// int ccc : 8;
+ /// \endcode
+ bool AlignConsecutiveBitFields;
+
/// If ``true``, aligns consecutive declarations.
///
/// This will align the declaration names of consecutive lines. This
@@ -148,16 +164,43 @@ struct FormatStyle {
/// Options for aligning backslashes in escaped newlines.
EscapedNewlineAlignmentStyle AlignEscapedNewlines;
+ /// Different styles for aligning operands.
+ enum OperandAlignmentStyle {
+ /// Do not align operands of binary and ternary expressions.
+ /// The wrapped lines are indented ``ContinuationIndentWidth`` spaces from
+ /// the start of the line.
+ OAS_DontAlign,
+ /// Horizontally align operands of binary and ternary expressions.
+ ///
+ /// Specifically, this aligns operands of a single expression that needs
+ /// to be split over multiple lines, e.g.:
+ /// \code
+ /// int aaa = bbbbbbbbbbbbbbb +
+ /// ccccccccccccccc;
+ /// \endcode
+ ///
+ /// When ``BreakBeforeBinaryOperators`` is set, the wrapped operator is
+ /// aligned with the operand on the first line.
+ /// \code
+ /// int aaa = bbbbbbbbbbbbbbb
+ /// + ccccccccccccccc;
+ /// \endcode
+ OAS_Align,
+ /// Horizontally align operands of binary and ternary expressions.
+ ///
+ /// This is similar to ``AO_Align``, except when
+ /// ``BreakBeforeBinaryOperators`` is set, the operator is un-indented so
+ /// that the wrapped operand is aligned with the operand on the first line.
+ /// \code
+ /// int aaa = bbbbbbbbbbbbbbb
+ /// + ccccccccccccccc;
+ /// \endcode
+ OAS_AlignAfterOperator,
+ };
+
/// If ``true``, horizontally align operands of binary and ternary
/// expressions.
- ///
- /// Specifically, this aligns operands of a single expression that needs to be
- /// split over multiple lines, e.g.:
- /// \code
- /// int aaa = bbbbbbbbbbbbbbb +
- /// ccccccccccccccc;
- /// \endcode
- bool AlignOperands;
+ OperandAlignmentStyle AlignOperands;
/// If ``true``, aligns trailing comments.
/// \code
@@ -216,6 +259,20 @@ struct FormatStyle {
/// \endcode
bool AllowAllParametersOfDeclarationOnNextLine;
+ /// Allow short enums on a single line.
+ /// \code
+ /// true:
+ /// enum { A, B } myEnum;
+ ///
+ /// false:
+ /// enum
+ /// {
+ /// A,
+ /// B
+ /// } myEnum;
+ /// \endcode
+ bool AllowShortEnumsOnASingleLine;
+
/// Different styles for merging short blocks containing at most one
/// statement.
enum ShortBlockStyle {
@@ -544,6 +601,35 @@ struct FormatStyle {
/// \endcode
bool BinPackArguments;
+ /// The style of inserting trailing commas into container literals.
+ enum TrailingCommaStyle {
+ /// Do not insert trailing commas.
+ TCS_None,
+ /// Insert trailing commas in container literals that were wrapped over
+ /// multiple lines. Note that this is conceptually incompatible with
+ /// bin-packing, because the trailing comma is used as an indicator
+ /// that a container should be formatted one-per-line (i.e. not bin-packed).
+ /// So inserting a trailing comma counteracts bin-packing.
+ TCS_Wrapped,
+ };
+
+ /// If set to ``TCS_Wrapped`` will insert trailing commas in container
+ /// literals (arrays and objects) that wrap across multiple lines.
+ /// It is currently only available for JavaScript
+ /// and disabled by default ``TCS_None``.
+ /// ``InsertTrailingCommas`` cannot be used together with ``BinPackArguments``
+ /// as inserting the comma disables bin-packing.
+ /// \code
+ /// TSC_Wrapped:
+ /// const someArray = [
+ /// aaaaaaaaaaaaaaaaaaaaaaaaaa,
+ /// aaaaaaaaaaaaaaaaaaaaaaaaaa,
+ /// aaaaaaaaaaaaaaaaaaaaaaaaaa,
+ /// // ^ inserted
+ /// ]
+ /// \endcode
+ TrailingCommaStyle InsertTrailingCommas;
+
/// If ``false``, a function declaration's or function definition's
/// parameters will either all be on the same line or will have one line each.
/// \code
@@ -942,7 +1028,7 @@ struct FormatStyle {
/// int foo();
/// }
/// \endcode
- bool AfterExternBlock;
+ bool AfterExternBlock; // Partially superseded by IndentExternBlock
/// Wrap before ``catch``.
/// \code
/// true:
@@ -973,6 +1059,37 @@ struct FormatStyle {
/// }
/// \endcode
bool BeforeElse;
+ /// Wrap lambda block.
+ /// \code
+ /// true:
+ /// connect(
+ /// []()
+ /// {
+ /// foo();
+ /// bar();
+ /// });
+ ///
+ /// false:
+ /// connect([]() {
+ /// foo();
+ /// bar();
+ /// });
+ /// \endcode
+ bool BeforeLambdaBody;
+ /// Wrap before ``while``.
+ /// \code
+ /// true:
+ /// do {
+ /// foo();
+ /// }
+ /// while (1);
+ ///
+ /// false:
+ /// do {
+ /// foo();
+ /// } while (1);
+ /// \endcode
+ bool BeforeWhile;
/// Indent the wrapped braces themselves.
bool IndentBraces;
/// If ``false``, empty function body can be put on a single line.
@@ -1308,13 +1425,25 @@ struct FormatStyle {
/// For example: TESTSUITE
std::vector<std::string> NamespaceMacros;
+ /// A vector of macros which are whitespace-sensitive and shouldn't be
+ /// touched.
+ ///
+ /// These are expected to be macros of the form:
+ /// \code
+ /// STRINGIZE(...)
+ /// \endcode
+ ///
+ /// For example: STRINGIZE
+ std::vector<std::string> WhitespaceSensitiveMacros;
+
tooling::IncludeStyle IncludeStyle;
/// Indent case labels one level from the switch statement.
///
/// When ``false``, use the same indentation level as for the switch
/// statement. Switch statement body is always indented one level more than
- /// case labels.
+ /// case labels (except the first block following the case label, which
+ /// itself indents the code - unless IndentCaseBlocks is enabled).
/// \code
/// false: true:
/// switch (fool) { vs. switch (fool) {
@@ -1327,6 +1456,28 @@ struct FormatStyle {
/// \endcode
bool IndentCaseLabels;
+ /// Indent case label blocks one level from the case label.
+ ///
+ /// When ``false``, the block following the case label uses the same
+ /// indentation level as for the case label, treating the case label the same
+ /// as an if-statement.
+ /// When ``true``, the block gets indented as a scope block.
+ /// \code
+ /// false: true:
+ /// switch (fool) { vs. switch (fool) {
+ /// case 1: { case 1:
+ /// bar(); {
+ /// } break; bar();
+ /// default: { }
+ /// plop(); break;
+ /// } default:
+ /// } {
+ /// plop();
+ /// }
+ /// }
+ /// \endcode
+ bool IndentCaseBlocks;
+
/// Indent goto labels.
///
/// When ``false``, goto labels are flushed left.
@@ -1377,6 +1528,45 @@ struct FormatStyle {
/// The preprocessor directive indenting style to use.
PPDirectiveIndentStyle IndentPPDirectives;
+ /// Indents extern blocks
+ enum IndentExternBlockStyle {
+ /// Backwards compatible with AfterExternBlock's indenting.
+ /// \code
+ /// IndentExternBlock: AfterExternBlock
+ /// BraceWrapping.AfterExternBlock: true
+ /// extern "C"
+ /// {
+ /// void foo();
+ /// }
+ /// \endcode
+ ///
+ /// \code
+ /// IndentExternBlock: AfterExternBlock
+ /// BraceWrapping.AfterExternBlock: false
+ /// extern "C" {
+ /// void foo();
+ /// }
+ /// \endcode
+ IEBS_AfterExternBlock,
+ /// Does not indent extern blocks.
+ /// \code
+ /// extern "C" {
+ /// void foo();
+ /// }
+ /// \endcode
+ IEBS_NoIndent,
+ /// Indents extern blocks.
+ /// \code
+ /// extern "C" {
+ /// void foo();
+ /// }
+ /// \endcode
+ IEBS_Indent,
+ };
+
+ /// IndentExternBlockStyle is the type of indenting of extern blocks.
+ IndentExternBlockStyle IndentExternBlock;
+
/// The number of columns to use for indentation.
/// \code
/// IndentWidth: 3
@@ -1646,6 +1836,29 @@ struct FormatStyle {
/// ``@property (readonly)`` instead of ``@property(readonly)``.
bool ObjCSpaceAfterProperty;
+ /// Break parameters list into lines when there is nested block
+ /// parameters in a fuction call.
+ /// \code
+ /// false:
+ /// - (void)_aMethod
+ /// {
+ /// [self.test1 t:self w:self callback:^(typeof(self) self, NSNumber
+ /// *u, NSNumber *v) {
+ /// u = c;
+ /// }]
+ /// }
+ /// true:
+ /// - (void)_aMethod
+ /// {
+ /// [self.test1 t:self
+ /// w:self
+ /// callback:^(typeof(self) self, NSNumber *u, NSNumber *v) {
+ /// u = c;
+ /// }]
+ /// }
+ /// \endcode
+ bool ObjCBreakBeforeNestedBlockParam;
+
/// Add a space in front of an Objective-C protocol list, i.e. use
/// ``Foo <Protocol>`` instead of ``Foo<Protocol>``.
bool ObjCSpaceBeforeProtocolList;
@@ -1872,6 +2085,17 @@ struct FormatStyle {
/// }
/// \endcode
SBPO_ControlStatements,
+ /// Same as ``SBPO_ControlStatements`` except this option doesn't apply to
+ /// ForEach macros. This is useful in projects where ForEach macros are
+ /// treated as function calls instead of control statements.
+ /// \code
+ /// void f() {
+ /// Q_FOREACH(...) {
+ /// f();
+ /// }
+ /// }
+ /// \endcode
+ SBPO_ControlStatementsExceptForEachMacros,
/// Put a space before opening parentheses only if the parentheses are not
/// empty i.e. '()'
/// \code
@@ -2047,8 +2271,12 @@ struct FormatStyle {
UT_Never,
/// Use tabs only for indentation.
UT_ForIndentation,
- /// Use tabs only for line continuation and indentation.
+ /// Fill all leading whitespace with tabs, and use spaces for alignment that
+ /// appears within a line (e.g. consecutive assignments and declarations).
UT_ForContinuationAndIndentation,
+ /// Use tabs for line continuation and indentation, and spaces for
+ /// alignment.
+ UT_AlignWithSpaces,
/// Use tabs whenever we need to fill whitespace that spans at least from
/// one tab stop to the next one.
UT_Always
@@ -2065,6 +2293,7 @@ struct FormatStyle {
return AccessModifierOffset == R.AccessModifierOffset &&
AlignAfterOpenBracket == R.AlignAfterOpenBracket &&
AlignConsecutiveAssignments == R.AlignConsecutiveAssignments &&
+ AlignConsecutiveBitFields == R.AlignConsecutiveBitFields &&
AlignConsecutiveDeclarations == R.AlignConsecutiveDeclarations &&
AlignEscapedNewlines == R.AlignEscapedNewlines &&
AlignOperands == R.AlignOperands &&
@@ -2074,6 +2303,7 @@ struct FormatStyle {
R.AllowAllConstructorInitializersOnNextLine &&
AllowAllParametersOfDeclarationOnNextLine ==
R.AllowAllParametersOfDeclarationOnNextLine &&
+ AllowShortEnumsOnASingleLine == R.AllowShortEnumsOnASingleLine &&
AllowShortBlocksOnASingleLine == R.AllowShortBlocksOnASingleLine &&
AllowShortCaseLabelsOnASingleLine ==
R.AllowShortCaseLabelsOnASingleLine &&
@@ -2119,8 +2349,10 @@ struct FormatStyle {
IncludeStyle.IncludeIsMainSourceRegex ==
R.IncludeStyle.IncludeIsMainSourceRegex &&
IndentCaseLabels == R.IndentCaseLabels &&
+ IndentCaseBlocks == R.IndentCaseBlocks &&
IndentGotoLabels == R.IndentGotoLabels &&
IndentPPDirectives == R.IndentPPDirectives &&
+ IndentExternBlock == R.IndentExternBlock &&
IndentWidth == R.IndentWidth && Language == R.Language &&
IndentWrappedFunctionNames == R.IndentWrappedFunctionNames &&
JavaImportGroups == R.JavaImportGroups &&
@@ -2135,6 +2367,8 @@ struct FormatStyle {
NamespaceMacros == R.NamespaceMacros &&
ObjCBinPackProtocolList == R.ObjCBinPackProtocolList &&
ObjCBlockIndentWidth == R.ObjCBlockIndentWidth &&
+ ObjCBreakBeforeNestedBlockParam ==
+ R.ObjCBreakBeforeNestedBlockParam &&
ObjCSpaceAfterProperty == R.ObjCSpaceAfterProperty &&
ObjCSpaceBeforeProtocolList == R.ObjCSpaceBeforeProtocolList &&
PenaltyBreakAssignment == R.PenaltyBreakAssignment &&
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/ASTConsumers.h b/contrib/llvm-project/clang/include/clang/Frontend/ASTConsumers.h
index af8c4a517dcd..98cfc7cadc0d 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/ASTConsumers.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/ASTConsumers.h
@@ -39,7 +39,7 @@ std::unique_ptr<ASTConsumer> CreateASTPrinter(std::unique_ptr<raw_ostream> OS,
std::unique_ptr<ASTConsumer>
CreateASTDumper(std::unique_ptr<raw_ostream> OS, StringRef FilterString,
bool DumpDecls, bool Deserialize, bool DumpLookups,
- ASTDumpOutputFormat Format);
+ bool DumpDeclTypes, ASTDumpOutputFormat Format);
// AST Decl node lister: prints qualified names of all filterable AST Decl
// nodes.
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/ASTUnit.h b/contrib/llvm-project/clang/include/clang/Frontend/ASTUnit.h
index a36655150d4e..50ab86ebad97 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/ASTUnit.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/ASTUnit.h
@@ -172,7 +172,7 @@ private:
/// Sorted (by file offset) vector of pairs of file offset/Decl.
using LocDeclsTy = SmallVector<std::pair<unsigned, Decl *>, 64>;
- using FileDeclsTy = llvm::DenseMap<FileID, LocDeclsTy *>;
+ using FileDeclsTy = llvm::DenseMap<FileID, std::unique_ptr<LocDeclsTy>>;
/// Map from FileID to the file-level declarations that it contains.
/// The files and decls are only local (and non-preamble) ones.
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/CommandLineSourceLoc.h b/contrib/llvm-project/clang/include/clang/Frontend/CommandLineSourceLoc.h
index e95d100f6a76..0827433462e1 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/CommandLineSourceLoc.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/CommandLineSourceLoc.h
@@ -38,7 +38,7 @@ public:
// If both tail splits were valid integers, return success.
if (!ColSplit.second.getAsInteger(10, PSL.Column) &&
!LineSplit.second.getAsInteger(10, PSL.Line)) {
- PSL.FileName = LineSplit.first;
+ PSL.FileName = std::string(LineSplit.first);
// On the command-line, stdin may be specified via "-". Inside the
// compiler, stdin is called "<stdin>".
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h b/contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h
index e501dde465cc..cb935becaef1 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h
@@ -128,7 +128,7 @@ class CompilerInstance : public ModuleLoader {
/// The set of top-level modules that has already been built on the
/// fly as part of this overall compilation action.
- std::map<std::string, std::string> BuiltModules;
+ std::map<std::string, std::string, std::less<>> BuiltModules;
/// Should we delete the BuiltModules when we're done?
bool DeleteBuiltModules = true;
@@ -390,9 +390,7 @@ public:
/// @name Virtual File System
/// {
- llvm::vfs::FileSystem &getVirtualFileSystem() const {
- return getFileManager().getVirtualFileSystem();
- }
+ llvm::vfs::FileSystem &getVirtualFileSystem() const;
/// }
/// @name File Manager
@@ -515,7 +513,7 @@ public:
/// {
IntrusiveRefCntPtr<ASTReader> getASTReader() const;
- void setModuleManager(IntrusiveRefCntPtr<ASTReader> Reader);
+ void setASTReader(IntrusiveRefCntPtr<ASTReader> Reader);
std::shared_ptr<ModuleDependencyCollector> getModuleDepCollector() const;
void setModuleDepCollector(
@@ -766,10 +764,7 @@ public:
static bool InitializeSourceManager(const FrontendInputFile &Input,
DiagnosticsEngine &Diags,
FileManager &FileMgr,
- SourceManager &SourceMgr,
- HeaderSearch *HS,
- DependencyOutputOptions &DepOpts,
- const FrontendOptions &Opts);
+ SourceManager &SourceMgr);
/// }
@@ -781,7 +776,6 @@ public:
return std::move(OutputStream);
}
- // Create module manager.
void createASTReader();
bool loadModuleFile(StringRef FileName);
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/CompilerInvocation.h b/contrib/llvm-project/clang/include/clang/Frontend/CompilerInvocation.h
index f3253d5b40e3..c723fc084c85 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/CompilerInvocation.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/CompilerInvocation.h
@@ -59,8 +59,7 @@ class TargetOptions;
/// report the error(s).
bool ParseDiagnosticArgs(DiagnosticOptions &Opts, llvm::opt::ArgList &Args,
DiagnosticsEngine *Diags = nullptr,
- bool DefaultDiagColor = true,
- bool DefaultShowOpt = true);
+ bool DefaultDiagColor = true);
class CompilerInvocationBase {
public:
@@ -154,9 +153,12 @@ public:
/// one of the vaild-to-access (albeit arbitrary) states.
///
/// \param [out] Res - The resulting invocation.
+ /// \param [in] CommandLineArgs - Array of argument strings, this must not
+ /// contain "-cc1".
static bool CreateFromArgs(CompilerInvocation &Res,
ArrayRef<const char *> CommandLineArgs,
- DiagnosticsEngine &Diags);
+ DiagnosticsEngine &Diags,
+ const char *Argv0 = nullptr);
/// Get the directory where the compiler headers
/// reside, relative to the compiler binary (found by the passed in
@@ -184,6 +186,18 @@ public:
/// identifying the conditions under which the module was built.
std::string getModuleHash() const;
+ using StringAllocator = llvm::function_ref<const char *(const llvm::Twine &)>;
+ /// Generate a cc1-compatible command line arguments from this instance.
+ ///
+ /// \param [out] Args - The generated arguments. Note that the caller is
+ /// responsible for inserting the path to the clang executable and "-cc1" if
+ /// desired.
+ /// \param SA - A function that given a Twine can allocate storage for a given
+ /// command line argument and return a pointer to the newly allocated string.
+ /// The returned pointer is what gets appended to Args.
+ void generateCC1CommandLine(llvm::SmallVectorImpl<const char *> &Args,
+ StringAllocator SA) const;
+
/// @}
/// @name Option Subgroups
/// @{
@@ -222,6 +236,16 @@ public:
}
/// @}
+
+private:
+ /// Parse options for flags that expose marshalling information in their
+ /// table-gen definition
+ ///
+ /// \param Args - The argument list containing the arguments to parse
+ /// \param Diags - The DiagnosticsEngine associated with CreateFromArgs
+ /// \returns - True if parsing was successful, false otherwise
+ bool parseSimpleArgs(const llvm::opt::ArgList &Args,
+ DiagnosticsEngine &Diags);
};
IntrusiveRefCntPtr<llvm::vfs::FileSystem>
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/FrontendAction.h b/contrib/llvm-project/clang/include/clang/Frontend/FrontendAction.h
index e994e24cf5af..c9f9f080c141 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/FrontendAction.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/FrontendAction.h
@@ -312,6 +312,7 @@ protected:
bool BeginSourceFileAction(CompilerInstance &CI) override;
void ExecuteAction() override;
void EndSourceFileAction() override;
+ bool shouldEraseOutputFiles() override;
public:
/// Construct a WrapperFrontendAction from an existing action, taking
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h b/contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h
index 89ac20075fa4..9ca2bfda2138 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h
@@ -119,17 +119,13 @@ protected:
bool hasASTFileSupport() const override { return false; }
};
-class GenerateInterfaceStubAction : public ASTFrontendAction {
-protected:
- TranslationUnitKind getTranslationUnitKind() override { return TU_Module; }
-
- bool hasASTFileSupport() const override { return false; }
-};
-
-class GenerateInterfaceIfsExpV1Action : public GenerateInterfaceStubAction {
+class GenerateInterfaceStubsAction : public ASTFrontendAction {
protected:
std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) override;
+
+ TranslationUnitKind getTranslationUnitKind() override { return TU_Module; }
+ bool hasASTFileSupport() const override { return false; }
};
class GenerateModuleFromModuleMapAction : public GenerateModuleAction {
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h b/contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h
index 09969b596d63..b2be33032c08 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h
@@ -90,7 +90,7 @@ enum ActionKind {
GeneratePCH,
/// Generate Interface Stub Files.
- GenerateInterfaceIfsExpV1,
+ GenerateInterfaceStubs,
/// Only execute frontend initialization.
InitOnly,
@@ -285,6 +285,9 @@ public:
/// Whether we include lookup table dumps in AST dumps.
unsigned ASTDumpLookups : 1;
+ /// Whether we include declaration type dumps in AST dumps.
+ unsigned ASTDumpDeclTypes : 1;
+
/// Whether we are performing an implicit module build.
unsigned BuildingImplicitModule : 1;
@@ -297,6 +300,9 @@ public:
/// Should a temporary file be used during compilation.
unsigned UseTemporary : 1;
+ /// When using -emit-module, treat the modulemap as a system module.
+ unsigned IsSystemModule : 1;
+
CodeCompleteOptions CodeCompleteOpts;
/// Specifies the output format of the AST.
@@ -426,9 +432,15 @@ public:
/// (in the format produced by -fdump-record-layouts).
std::string OverrideRecordLayoutsFile;
- /// Auxiliary triple for CUDA compilation.
+ /// Auxiliary triple for CUDA/HIP compilation.
std::string AuxTriple;
+ /// Auxiliary target CPU for CUDA/HIP compilation.
+ Optional<std::string> AuxTargetCPU;
+
+ /// Auxiliary target features for CUDA/HIP compilation.
+ Optional<std::vector<std::string>> AuxTargetFeatures;
+
/// Filename to write statistics to.
std::string StatsFile;
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/LogDiagnosticPrinter.h b/contrib/llvm-project/clang/include/clang/Frontend/LogDiagnosticPrinter.h
index 4816275cdc60..ec22a8b6cc5f 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/LogDiagnosticPrinter.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/LogDiagnosticPrinter.h
@@ -66,7 +66,7 @@ public:
std::unique_ptr<raw_ostream> StreamOwner);
void setDwarfDebugFlags(StringRef Value) {
- DwarfDebugFlags = Value;
+ DwarfDebugFlags = std::string(Value);
}
void BeginSourceFile(const LangOptions &LO, const Preprocessor *PP) override {
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h b/contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h
index 5ae77735576c..0f7e9d895a00 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h
@@ -16,6 +16,7 @@
#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/MD5.h"
#include <cstddef>
@@ -33,12 +34,13 @@ class FileSystem;
namespace clang {
class CompilerInstance;
class CompilerInvocation;
+class Decl;
class DeclGroupRef;
class PCHContainerOperations;
/// Runs lexer to compute suggested preamble bounds.
PreambleBounds ComputePreambleBounds(const LangOptions &LangOpts,
- llvm::MemoryBuffer *Buffer,
+ const llvm::MemoryBuffer *Buffer,
unsigned MaxLines);
class PreambleCallbacks;
@@ -94,6 +96,11 @@ public:
/// be used for logging and debugging purposes only.
std::size_t getSize() const;
+ /// Returned string is not null-terminated.
+ llvm::StringRef getContents() const {
+ return {PreambleBytes.data(), PreambleBytes.size()};
+ }
+
/// Check whether PrecompiledPreamble can be reused for the new contents(\p
/// MainFileBuffer) of the main file.
bool CanReuse(const CompilerInvocation &Invocation,
@@ -122,7 +129,8 @@ public:
private:
PrecompiledPreamble(PCHStorage Storage, std::vector<char> PreambleBytes,
bool PreambleEndsAtStartOfLine,
- llvm::StringMap<PreambleFileHash> FilesInPreamble);
+ llvm::StringMap<PreambleFileHash> FilesInPreamble,
+ llvm::StringSet<> MissingFiles);
/// A temp file that would be deleted on destructor call. If destructor is not
/// called for any reason, the file will be deleted at static objects'
@@ -243,6 +251,15 @@ private:
/// If any of the files have changed from one compile to the next,
/// the preamble must be thrown away.
llvm::StringMap<PreambleFileHash> FilesInPreamble;
+ /// Files that were not found during preamble building. If any of these now
+ /// exist then the preamble should not be reused.
+ ///
+ /// Storing *all* the missing files that could invalidate the preamble would
+ /// make it too expensive to revalidate (when the include path has many
+ /// entries, each #include will miss half of them on average).
+ /// Instead, we track only files that could have satisfied an #include that
+ /// was ultimately not found.
+ llvm::StringSet<> MissingFiles;
/// The contents of the file that was used to precompile the preamble. Only
/// contains first PreambleBounds::Size bytes. Used to compare if the relevant
/// part of the file has not changed, so that preamble can be reused.
@@ -277,6 +294,10 @@ public:
virtual std::unique_ptr<PPCallbacks> createPPCallbacks();
/// The returned CommentHandler will be added to the preprocessor if not null.
virtual CommentHandler *getCommentHandler();
+ /// Determines which function bodies are parsed, by default skips everything.
+ /// Only used if FrontendOpts::SkipFunctionBodies is true.
+ /// See ASTConsumer::shouldSkipFunctionBody.
+ virtual bool shouldSkipFunctionBody(Decl *D) { return true; }
};
enum class BuildPreambleError {
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h b/contrib/llvm-project/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h
index 965a14410832..a97cd138d159 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h
@@ -10,6 +10,7 @@
#define LLVM_CLANG_FRONTEND_VERIFYDIAGNOSTICCONSUMER_H
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Lex/Preprocessor.h"
@@ -189,11 +190,10 @@ public:
///
class Directive {
public:
- static std::unique_ptr<Directive> create(bool RegexKind,
- SourceLocation DirectiveLoc,
- SourceLocation DiagnosticLoc,
- bool MatchAnyLine, StringRef Text,
- unsigned Min, unsigned Max);
+ static std::unique_ptr<Directive>
+ create(bool RegexKind, SourceLocation DirectiveLoc,
+ SourceLocation DiagnosticLoc, bool MatchAnyFileAndLine,
+ bool MatchAnyLine, StringRef Text, unsigned Min, unsigned Max);
public:
/// Constant representing n or more matches.
@@ -204,6 +204,7 @@ public:
const std::string Text;
unsigned Min, Max;
bool MatchAnyLine;
+ bool MatchAnyFileAndLine; // `MatchAnyFileAndLine` implies `MatchAnyLine`.
Directive(const Directive &) = delete;
Directive &operator=(const Directive &) = delete;
@@ -218,9 +219,11 @@ public:
protected:
Directive(SourceLocation DirectiveLoc, SourceLocation DiagnosticLoc,
- bool MatchAnyLine, StringRef Text, unsigned Min, unsigned Max)
- : DirectiveLoc(DirectiveLoc), DiagnosticLoc(DiagnosticLoc),
- Text(Text), Min(Min), Max(Max), MatchAnyLine(MatchAnyLine) {
+ bool MatchAnyFileAndLine, bool MatchAnyLine, StringRef Text,
+ unsigned Min, unsigned Max)
+ : DirectiveLoc(DirectiveLoc), DiagnosticLoc(DiagnosticLoc), Text(Text),
+ Min(Min), Max(Max), MatchAnyLine(MatchAnyLine || MatchAnyFileAndLine),
+ MatchAnyFileAndLine(MatchAnyFileAndLine) {
assert(!DirectiveLoc.isInvalid() && "DirectiveLoc is invalid!");
assert((!DiagnosticLoc.isInvalid() || MatchAnyLine) &&
"DiagnosticLoc is invalid!");
diff --git a/contrib/llvm-project/clang/include/clang/Index/IndexSymbol.h b/contrib/llvm-project/clang/include/clang/Index/IndexSymbol.h
index 2e1e6005d68a..de98b8147e8a 100644
--- a/contrib/llvm-project/clang/include/clang/Index/IndexSymbol.h
+++ b/contrib/llvm-project/clang/include/clang/Index/IndexSymbol.h
@@ -54,6 +54,9 @@ enum class SymbolKind : uint8_t {
Parameter,
Using,
+ TemplateTypeParm,
+ TemplateTemplateParm,
+ NonTypeTemplateParm,
};
enum class SymbolLanguage : uint8_t {
diff --git a/contrib/llvm-project/clang/include/clang/Index/IndexingAction.h b/contrib/llvm-project/clang/include/clang/Index/IndexingAction.h
index 9ed2a018f161..4baa2d5e7260 100644
--- a/contrib/llvm-project/clang/include/clang/Index/IndexingAction.h
+++ b/contrib/llvm-project/clang/include/clang/Index/IndexingAction.h
@@ -30,22 +30,21 @@ namespace serialization {
}
namespace index {
- class IndexDataConsumer;
+class IndexDataConsumer;
/// Creates an ASTConsumer that indexes all symbols (macros and AST decls).
+std::unique_ptr<ASTConsumer>
+createIndexingASTConsumer(std::shared_ptr<IndexDataConsumer> DataConsumer,
+ const IndexingOptions &Opts,
+ std::shared_ptr<Preprocessor> PP);
+
std::unique_ptr<ASTConsumer> createIndexingASTConsumer(
std::shared_ptr<IndexDataConsumer> DataConsumer,
const IndexingOptions &Opts, std::shared_ptr<Preprocessor> PP,
+ // Prefer to set Opts.ShouldTraverseDecl and use the above overload.
+ // This version is only needed if used to *track* function body parsing.
std::function<bool(const Decl *)> ShouldSkipFunctionBody);
-inline std::unique_ptr<ASTConsumer> createIndexingASTConsumer(
- std::shared_ptr<IndexDataConsumer> DataConsumer,
- const IndexingOptions &Opts, std::shared_ptr<Preprocessor> PP) {
- return createIndexingASTConsumer(
- std::move(DataConsumer), Opts, std::move(PP),
- /*ShouldSkipFunctionBody=*/[](const Decl *) { return false; });
-}
-
/// Creates a frontend action that indexes all symbols (macros and AST decls).
std::unique_ptr<FrontendAction>
createIndexingAction(std::shared_ptr<IndexDataConsumer> DataConsumer,
diff --git a/contrib/llvm-project/clang/include/clang/Index/IndexingOptions.h b/contrib/llvm-project/clang/include/clang/Index/IndexingOptions.h
index bbfd6e4a72c6..9f5c03d1b3b9 100644
--- a/contrib/llvm-project/clang/include/clang/Index/IndexingOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Index/IndexingOptions.h
@@ -14,6 +14,7 @@
#include <string>
namespace clang {
+class Decl;
namespace index {
struct IndexingOptions {
@@ -34,6 +35,12 @@ struct IndexingOptions {
// Has no effect if IndexFunctionLocals are false.
bool IndexParametersInDeclarations = false;
bool IndexTemplateParameters = false;
+
+ // If set, skip indexing inside some declarations for performance.
+ // This prevents traversal, so skipping a struct means its declaration an
+ // members won't be indexed, but references elsewhere to that struct will be.
+ // Currently this is only checked for top-level declarations.
+ std::function<bool(const Decl *)> ShouldTraverseDecl;
};
} // namespace index
diff --git a/contrib/llvm-project/clang/include/clang/Lex/DirectoryLookup.h b/contrib/llvm-project/clang/include/clang/Lex/DirectoryLookup.h
index d526319a68c6..da2ae9fce1aa 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/DirectoryLookup.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/DirectoryLookup.h
@@ -14,13 +14,12 @@
#define LLVM_CLANG_LEX_DIRECTORYLOOKUP_H
#include "clang/Basic/LLVM.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/ModuleMap.h"
namespace clang {
class HeaderMap;
-class DirectoryEntry;
-class FileEntry;
class HeaderSearch;
class Module;
diff --git a/contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h b/contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h
index 0d20dafe2cb1..28c57dbe3b8e 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h
@@ -302,7 +302,7 @@ public:
void AddIncludeAlias(StringRef Source, StringRef Dest) {
if (!IncludeAliases)
IncludeAliases.reset(new IncludeAliasMap);
- (*IncludeAliases)[Source] = Dest;
+ (*IncludeAliases)[Source] = std::string(Dest);
}
/// Maps one header file name to a different header
@@ -321,7 +321,7 @@ public:
/// Set the path to the module cache.
void setModuleCachePath(StringRef CachePath) {
- ModuleCachePath = CachePath;
+ ModuleCachePath = std::string(CachePath);
}
/// Retrieve the path to the module cache.
@@ -476,6 +476,13 @@ public:
/// This routine does not consider the effect of \#import
bool isFileMultipleIncludeGuarded(const FileEntry *File);
+ /// Determine whether the given file is known to have ever been \#imported
+ /// (or if it has been \#included and we've encountered a \#pragma once).
+ bool hasFileBeenImported(const FileEntry *File) {
+ const HeaderFileInfo *FI = getExistingFileInfo(File);
+ return FI && FI->isImport;
+ }
+
/// This method returns a HeaderMap for the specified
/// FileEntry, uniquing them through the 'HeaderMaps' datastructure.
const HeaderMap *CreateHeaderMap(const FileEntry *FE);
@@ -559,6 +566,12 @@ public:
ModuleMap::KnownHeader findModuleForHeader(const FileEntry *File,
bool AllowTextual = false) const;
+ /// Retrieve all the modules corresponding to the given file.
+ ///
+ /// \ref findModuleForHeader should typically be used instead of this.
+ ArrayRef<ModuleMap::KnownHeader>
+ findAllModulesForHeader(const FileEntry *File) const;
+
/// Read the contents of the given module map file.
///
/// \param File The module map file.
diff --git a/contrib/llvm-project/clang/include/clang/Lex/HeaderSearchOptions.h b/contrib/llvm-project/clang/include/clang/Lex/HeaderSearchOptions.h
index 5c19a41986b5..3af49e175395 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/HeaderSearchOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/HeaderSearchOptions.h
@@ -115,7 +115,7 @@ public:
std::string ModuleUserBuildPath;
/// The mapping of module names to prebuilt module files.
- std::map<std::string, std::string> PrebuiltModuleFiles;
+ std::map<std::string, std::string, std::less<>> PrebuiltModuleFiles;
/// The directories used to load prebuilt module files.
std::vector<std::string> PrebuiltModulePaths;
@@ -239,11 +239,11 @@ public:
}
void AddVFSOverlayFile(StringRef Name) {
- VFSOverlayFiles.push_back(Name);
+ VFSOverlayFiles.push_back(std::string(Name));
}
void AddPrebuiltModulePath(StringRef Name) {
- PrebuiltModulePaths.push_back(Name);
+ PrebuiltModulePaths.push_back(std::string(Name));
}
};
diff --git a/contrib/llvm-project/clang/include/clang/Lex/LiteralSupport.h b/contrib/llvm-project/clang/include/clang/Lex/LiteralSupport.h
index b9d64c24a00b..0c4f0fe277b7 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/LiteralSupport.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/LiteralSupport.h
@@ -40,7 +40,9 @@ void expandUCNs(SmallVectorImpl<char> &Buf, StringRef Input);
/// of a ppnumber, classifying it as either integer, floating, or erroneous,
/// determines the radix of the value and can convert it to a useful value.
class NumericLiteralParser {
- Preprocessor &PP; // needed for diagnostics
+ const SourceManager &SM;
+ const LangOptions &LangOpts;
+ DiagnosticsEngine &Diags;
const char *const ThisTokBegin;
const char *const ThisTokEnd;
@@ -54,9 +56,9 @@ class NumericLiteralParser {
SmallString<32> UDSuffixBuf;
public:
- NumericLiteralParser(StringRef TokSpelling,
- SourceLocation TokLoc,
- Preprocessor &PP);
+ NumericLiteralParser(StringRef TokSpelling, SourceLocation TokLoc,
+ const SourceManager &SM, const LangOptions &LangOpts,
+ const TargetInfo &Target, DiagnosticsEngine &Diags);
bool hadError : 1;
bool isUnsigned : 1;
bool isLong : 1; // This is *not* set for long long.
@@ -71,7 +73,9 @@ public:
bool isFract : 1; // 1.0hr/r/lr/uhr/ur/ulr
bool isAccum : 1; // 1.0hk/k/lk/uhk/uk/ulk
- bool isFixedPointLiteral() const { return saw_fixed_point_suffix; }
+ bool isFixedPointLiteral() const {
+ return (saw_period || saw_exponent) && saw_fixed_point_suffix;
+ }
bool isIntegerLiteral() const {
return !saw_period && !saw_exponent && !isFixedPointLiteral();
diff --git a/contrib/llvm-project/clang/include/clang/Lex/ModuleMap.h b/contrib/llvm-project/clang/include/clang/Lex/ModuleMap.h
index 1e6b28d4aa3d..5b164039080b 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/ModuleMap.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/ModuleMap.h
@@ -413,13 +413,17 @@ public:
/// Is this a compiler builtin header?
static bool isBuiltinHeader(StringRef FileName);
+ bool isBuiltinHeader(const FileEntry *File);
/// Add a module map callback.
void addModuleMapCallbacks(std::unique_ptr<ModuleMapCallbacks> Callback) {
Callbacks.push_back(std::move(Callback));
}
- /// Retrieve the module that owns the given header file, if any.
+ /// Retrieve the module that owns the given header file, if any. Note that
+ /// this does not implicitly load module maps, except for builtin headers,
+ /// and does not consult the external source. (Those checks are the
+ /// responsibility of \ref HeaderSearch.)
///
/// \param File The header file that is likely to be included.
///
@@ -433,13 +437,19 @@ public:
KnownHeader findModuleForHeader(const FileEntry *File,
bool AllowTextual = false);
- /// Retrieve all the modules that contain the given header file. This
- /// may not include umbrella modules, nor information from external sources,
- /// if they have not yet been inferred / loaded.
+ /// Retrieve all the modules that contain the given header file. Note that
+ /// this does not implicitly load module maps, except for builtin headers,
+ /// and does not consult the external source. (Those checks are the
+ /// responsibility of \ref HeaderSearch.)
///
/// Typically, \ref findModuleForHeader should be used instead, as it picks
/// the preferred module for the header.
- ArrayRef<KnownHeader> findAllModulesForHeader(const FileEntry *File) const;
+ ArrayRef<KnownHeader> findAllModulesForHeader(const FileEntry *File);
+
+ /// Like \ref findAllModulesForHeader, but do not attempt to infer module
+ /// ownership from umbrella headers if we've not already done so.
+ ArrayRef<KnownHeader>
+ findResolvedModulesForHeader(const FileEntry *File) const;
/// Resolve all lazy header directives for the specified file.
///
@@ -605,9 +615,7 @@ public:
return &I->second;
}
- void addAdditionalModuleMapFile(const Module *M, const FileEntry *ModuleMap) {
- AdditionalModMaps[M].insert(ModuleMap);
- }
+ void addAdditionalModuleMapFile(const Module *M, const FileEntry *ModuleMap);
/// Resolve all of the unresolved exports in the given module.
///
diff --git a/contrib/llvm-project/clang/include/clang/Lex/PPCallbacks.h b/contrib/llvm-project/clang/include/clang/Lex/PPCallbacks.h
index 1edcb567de66..de5e8eb2ca22 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/PPCallbacks.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/PPCallbacks.h
@@ -308,7 +308,7 @@ public:
/// read.
virtual void HasInclude(SourceLocation Loc, StringRef FileName, bool IsAngled,
Optional<FileEntryRef> File,
- SrcMgr::CharacteristicKind FileType) {}
+ SrcMgr::CharacteristicKind FileType);
/// Hook called when a source range is skipped.
/// \param Range The SourceRange that was skipped. The range begins at the
@@ -374,7 +374,6 @@ public:
/// Simple wrapper class for chaining callbacks.
class PPChainedCallbacks : public PPCallbacks {
- virtual void anchor();
std::unique_ptr<PPCallbacks> First, Second;
public:
@@ -382,6 +381,8 @@ public:
std::unique_ptr<PPCallbacks> _Second)
: First(std::move(_First)), Second(std::move(_Second)) {}
+ ~PPChainedCallbacks() override;
+
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
FileID PrevFID) override {
@@ -490,10 +491,7 @@ public:
void HasInclude(SourceLocation Loc, StringRef FileName, bool IsAngled,
Optional<FileEntryRef> File,
- SrcMgr::CharacteristicKind FileType) override {
- First->HasInclude(Loc, FileName, IsAngled, File, FileType);
- Second->HasInclude(Loc, FileName, IsAngled, File, FileType);
- }
+ SrcMgr::CharacteristicKind FileType) override;
void PragmaOpenCLExtension(SourceLocation NameLoc, const IdentifierInfo *Name,
SourceLocation StateLoc, unsigned State) override {
diff --git a/contrib/llvm-project/clang/include/clang/Lex/Pragma.h b/contrib/llvm-project/clang/include/clang/Lex/Pragma.h
index e9434269c19c..cf8cca5414ea 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/Pragma.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/Pragma.h
@@ -96,11 +96,10 @@ public:
class PragmaNamespace : public PragmaHandler {
/// Handlers - This is a map of the handlers in this namespace with their name
/// as key.
- llvm::StringMap<PragmaHandler *> Handlers;
+ llvm::StringMap<std::unique_ptr<PragmaHandler>> Handlers;
public:
explicit PragmaNamespace(StringRef Name) : PragmaHandler(Name) {}
- ~PragmaNamespace() override;
/// FindHandler - Check to see if there is already a handler for the
/// specified name. If not, return the handler for the null name if it
diff --git a/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h b/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
index 9716196b95c2..5cd017fa925f 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
@@ -416,6 +416,14 @@ class Preprocessor {
/// of phase 4 of translation or for some other situation.
unsigned LexLevel = 0;
+ /// The number of (LexLevel 0) preprocessor tokens.
+ unsigned TokenCount = 0;
+
+ /// The maximum number of (LexLevel 0) tokens before issuing a -Wmax-tokens
+ /// warning, or zero for unlimited.
+ unsigned MaxTokens = 0;
+ SourceLocation MaxTokensOverrideLoc;
+
public:
struct PreambleSkipInfo {
SourceLocation HashTokenLoc;
@@ -1010,6 +1018,19 @@ public:
}
/// \}
+ /// Get the number of tokens processed so far.
+ unsigned getTokenCount() const { return TokenCount; }
+
+ /// Get the max number of tokens before issuing a -Wmax-tokens warning.
+ unsigned getMaxTokens() const { return MaxTokens; }
+
+ void overrideMaxTokens(unsigned Value, SourceLocation Loc) {
+ MaxTokens = Value;
+ MaxTokensOverrideLoc = Loc;
+ };
+
+ SourceLocation getMaxTokensOverrideLoc() const { return MaxTokensOverrideLoc; }
+
/// Register a function that would be called on each token in the final
/// expanded token stream.
/// This also reports annotation tokens produced by the parser.
@@ -1163,7 +1184,7 @@ public:
///
/// These predefines are automatically injected when parsing the main file.
void setPredefines(const char *P) { Predefines = P; }
- void setPredefines(StringRef P) { Predefines = P; }
+ void setPredefines(StringRef P) { Predefines = std::string(P); }
/// Return information about the specified preprocessor
/// identifier token.
@@ -1541,6 +1562,12 @@ public:
void EnterAnnotationToken(SourceRange Range, tok::TokenKind Kind,
void *AnnotationVal);
+ /// Determine whether it's possible for a future call to Lex to produce an
+ /// annotation token created by a previous call to EnterAnnotationToken.
+ bool mightHavePendingAnnotationTokens() {
+ return CurLexerKind != CLK_Lexer;
+ }
+
/// Update the current token to represent the provided
/// identifier, in order to cache an action performed by typo correction.
void TypoCorrectToken(const Token &Tok) {
@@ -2203,21 +2230,23 @@ private:
ModuleBegin,
ModuleImport,
SkippedModuleImport,
+ Failure,
} Kind;
Module *ModuleForHeader = nullptr;
ImportAction(ActionKind AK, Module *Mod = nullptr)
: Kind(AK), ModuleForHeader(Mod) {
- assert((AK == None || Mod) && "no module for module action");
+ assert((AK == None || Mod || AK == Failure) &&
+ "no module for module action");
}
};
Optional<FileEntryRef> LookupHeaderIncludeOrImport(
- const DirectoryLookup *&CurDir, StringRef Filename,
+ const DirectoryLookup *&CurDir, StringRef &Filename,
SourceLocation FilenameLoc, CharSourceRange FilenameRange,
const Token &FilenameTok, bool &IsFrameworkFound, bool IsImportDecl,
bool &IsMapped, const DirectoryLookup *LookupFrom,
- const FileEntry *LookupFromFile, StringRef LookupFilename,
+ const FileEntry *LookupFromFile, StringRef &LookupFilename,
SmallVectorImpl<char> &RelativePath, SmallVectorImpl<char> &SearchPath,
ModuleMap::KnownHeader &SuggestedModule, bool isAngled);
@@ -2249,20 +2278,22 @@ public:
/// into a module, or is outside any module, returns nullptr.
Module *getModuleForLocation(SourceLocation Loc);
- /// We want to produce a diagnostic at location IncLoc concerning a
- /// missing module import.
- ///
- /// \param IncLoc The location at which the missing import was detected.
- /// \param M The desired module.
- /// \param MLoc A location within the desired module at which some desired
- /// effect occurred (eg, where a desired entity was declared).
- ///
- /// \return A file that can be #included to import a module containing MLoc.
- /// Null if no such file could be determined or if a #include is not
- /// appropriate.
- const FileEntry *getModuleHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
- Module *M,
- SourceLocation MLoc);
+ /// We want to produce a diagnostic at location IncLoc concerning an
+ /// unreachable effect at location MLoc (eg, where a desired entity was
+ /// declared or defined). Determine whether the right way to make MLoc
+ /// reachable is by #include, and if so, what header should be included.
+ ///
+ /// This is not necessarily fast, and might load unexpected module maps, so
+ /// should only be called by code that intends to produce an error.
+ ///
+ /// \param IncLoc The location at which the missing effect was detected.
+ /// \param MLoc A location within an unimported module at which the desired
+ /// effect occurred.
+ /// \return A file that can be #included to provide the desired effect. Null
+ /// if no such file could be determined or if a #include is not
+ /// appropriate (eg, if a module should be imported instead).
+ const FileEntry *getHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
+ SourceLocation MLoc);
bool isRecordingPreamble() const {
return PreambleConditionalStack.isRecording();
diff --git a/contrib/llvm-project/clang/include/clang/Lex/PreprocessorOptions.h b/contrib/llvm-project/clang/include/clang/Lex/PreprocessorOptions.h
index 8b2146059f85..c551f87e0d7b 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/PreprocessorOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/PreprocessorOptions.h
@@ -195,15 +195,19 @@ public:
public:
PreprocessorOptions() : PrecompiledPreambleBytes(0, false) {}
- void addMacroDef(StringRef Name) { Macros.emplace_back(Name, false); }
- void addMacroUndef(StringRef Name) { Macros.emplace_back(Name, true); }
+ void addMacroDef(StringRef Name) {
+ Macros.emplace_back(std::string(Name), false);
+ }
+ void addMacroUndef(StringRef Name) {
+ Macros.emplace_back(std::string(Name), true);
+ }
void addRemappedFile(StringRef From, StringRef To) {
- RemappedFiles.emplace_back(From, To);
+ RemappedFiles.emplace_back(std::string(From), std::string(To));
}
void addRemappedFile(StringRef From, llvm::MemoryBuffer *To) {
- RemappedFileBuffers.emplace_back(From, To);
+ RemappedFileBuffers.emplace_back(std::string(From), To);
}
void clearRemappedFiles() {
diff --git a/contrib/llvm-project/clang/include/clang/Parse/Parser.h b/contrib/llvm-project/clang/include/clang/Parse/Parser.h
index 41f46861d089..e809d87b59a0 100644
--- a/contrib/llvm-project/clang/include/clang/Parse/Parser.h
+++ b/contrib/llvm-project/clang/include/clang/Parse/Parser.h
@@ -13,7 +13,6 @@
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
-#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
@@ -24,6 +23,7 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
@@ -49,6 +49,10 @@ namespace clang {
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
+ struct OMPTraitProperty;
+ struct OMPTraitSelector;
+ struct OMPTraitSet;
+ class OMPTraitInfo;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
@@ -178,6 +182,7 @@ class Parser : public CodeCompletionHandler {
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
+ std::unique_ptr<PragmaHandler> FloatControlHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
@@ -201,6 +206,8 @@ class Parser : public CodeCompletionHandler {
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
+ std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler;
+ std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
@@ -234,6 +241,9 @@ class Parser : public CodeCompletionHandler {
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
+ /// Current kind of OpenMP clause
+ OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown;
+
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
@@ -270,6 +280,22 @@ class Parser : public CodeCompletionHandler {
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
+ void MaybeDestroyTemplateIds() {
+ if (!TemplateIds.empty() &&
+ (Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens()))
+ DestroyTemplateIds();
+ }
+ void DestroyTemplateIds();
+
+ /// RAII object to destroy TemplateIdAnnotations where possible, from a
+ /// likely-good position during parsing.
+ struct DestroyTemplateIdAnnotationsRAIIObj {
+ Parser &Self;
+
+ DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {}
+ ~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); }
+ };
+
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
@@ -719,6 +745,10 @@ private:
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
+ /// Handle the annotation token produced for
+ /// #pragma float_control
+ void HandlePragmaFloatControl();
+
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
@@ -761,13 +791,17 @@ public:
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
- static ParsedType getTypeAnnotation(const Token &Tok) {
+ static TypeResult getTypeAnnotation(const Token &Tok) {
+ if (!Tok.getAnnotationValue())
+ return TypeError();
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
- static void setTypeAnnotation(Token &Tok, ParsedType T) {
- Tok.setAnnotationValue(T.getAsOpaquePtr());
+ static void setTypeAnnotation(Token &Tok, TypeResult T) {
+ assert((T.isInvalid() || T.get()) &&
+ "produced a valid-but-null type annotation?");
+ Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
@@ -1057,12 +1091,40 @@ public:
}
};
+ /// Introduces zero or more scopes for parsing. The scopes will all be exited
+ /// when the object is destroyed.
+ class MultiParseScope {
+ Parser &Self;
+ unsigned NumScopes = 0;
+
+ MultiParseScope(const MultiParseScope&) = delete;
+
+ public:
+ MultiParseScope(Parser &Self) : Self(Self) {}
+ void Enter(unsigned ScopeFlags) {
+ Self.EnterScope(ScopeFlags);
+ ++NumScopes;
+ }
+ void Exit() {
+ while (NumScopes) {
+ Self.ExitScope();
+ --NumScopes;
+ }
+ }
+ ~MultiParseScope() {
+ Exit();
+ }
+ };
+
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
+ /// Re-enter the template scopes for a declaration that might be a template.
+ unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D);
+
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
@@ -1111,7 +1173,8 @@ public:
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
- /// stop at a ';' character.
+ /// stop at a ';' character. Balances (), [], and {} delimiter tokens while
+ /// skipping.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
@@ -1246,13 +1309,7 @@ private:
Decl *D;
CachedTokens Toks;
- /// Whether this member function had an associated template
- /// scope. When true, D is a template declaration.
- /// otherwise, it is a member function declaration.
- bool TemplateScope;
-
- explicit LexedMethod(Parser* P, Decl *MD)
- : Self(P), D(MD), TemplateScope(false) {}
+ explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {}
void ParseLexedMethodDefs() override;
};
@@ -1282,8 +1339,7 @@ private:
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
- : Self(P), Method(M), TemplateScope(false),
- ExceptionSpecTokens(nullptr) {}
+ : Self(P), Method(M), ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
@@ -1292,11 +1348,6 @@ private:
/// Method - The method declaration.
Decl *Method;
- /// Whether this member function had an associated template
- /// scope. When true, D is a template declaration.
- /// otherwise, it is a member function declaration.
- bool TemplateScope;
-
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
@@ -1341,18 +1392,13 @@ private:
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
- : TopLevelClass(TopLevelClass), TemplateScope(false),
- IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
+ : TopLevelClass(TopLevelClass), IsInterface(IsInterface),
+ TagOrTemplate(TagOrTemplate) {}
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
- /// Whether this class had an associated template
- /// scope. When true, TagOrTemplate is a template declaration;
- /// otherwise, it is a tag declaration.
- bool TemplateScope : 1;
-
/// Whether this class is an __interface.
bool IsInterface : 1;
@@ -1451,11 +1497,14 @@ private:
SourceRange getSourceRange() const LLVM_READONLY;
};
+ // In ParseCXXInlineMethods.cpp.
+ struct ReenterTemplateScopeRAII;
+ struct ReenterClassScopeRAII;
+
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
- static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
@@ -1699,6 +1748,8 @@ public:
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
+ ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
+
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
@@ -1749,6 +1800,7 @@ private:
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
+ ExprResult ParseUniqueStableNameExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
@@ -1791,8 +1843,6 @@ private:
SourceLocation LParenLoc,
SourceLocation RParenLoc);
- ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
-
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
@@ -1811,7 +1861,9 @@ private:
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
- bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType,
+ bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
+ ParsedType ObjectType,
+ bool ObjectHasErrors,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
@@ -1950,7 +2002,8 @@ private:
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
- ExprResult ParseInitializerWithPotentialDesignator();
+ ExprResult ParseInitializerWithPotentialDesignator(
+ llvm::function_ref<void(const Designation &)> CodeCompleteCB);
//===--------------------------------------------------------------------===//
// clang Expressions
@@ -2018,8 +2071,9 @@ private:
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
- SourceLocation Loc,
- Sema::ConditionKind CK);
+ SourceLocation Loc, Sema::ConditionKind CK,
+ SourceLocation *LParenLoc = nullptr,
+ SourceLocation *RParenLoc = nullptr);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
@@ -2146,6 +2200,68 @@ private:
llvm_unreachable("Missing DeclSpecContext case");
}
+ /// Whether a defining-type-specifier is permitted in a given context.
+ enum class AllowDefiningTypeSpec {
+ /// The grammar doesn't allow a defining-type-specifier here, and we must
+ /// not parse one (eg, because a '{' could mean something else).
+ No,
+ /// The grammar doesn't allow a defining-type-specifier here, but we permit
+ /// one for error recovery purposes. Sema will reject.
+ NoButErrorRecovery,
+ /// The grammar allows a defining-type-specifier here, even though it's
+ /// always invalid. Sema will reject.
+ YesButInvalid,
+ /// The grammar allows a defining-type-specifier here, and one can be valid.
+ Yes
+ };
+
+ /// Is this a context in which we are parsing defining-type-specifiers (and
+ /// so permit class and enum definitions in addition to non-defining class and
+ /// enum elaborated-type-specifiers)?
+ static AllowDefiningTypeSpec
+ isDefiningTypeSpecifierContext(DeclSpecContext DSC) {
+ switch (DSC) {
+ case DeclSpecContext::DSC_normal:
+ case DeclSpecContext::DSC_class:
+ case DeclSpecContext::DSC_top_level:
+ case DeclSpecContext::DSC_alias_declaration:
+ case DeclSpecContext::DSC_objc_method_result:
+ return AllowDefiningTypeSpec::Yes;
+
+ case DeclSpecContext::DSC_condition:
+ case DeclSpecContext::DSC_template_param:
+ return AllowDefiningTypeSpec::YesButInvalid;
+
+ case DeclSpecContext::DSC_template_type_arg:
+ case DeclSpecContext::DSC_type_specifier:
+ return AllowDefiningTypeSpec::NoButErrorRecovery;
+
+ case DeclSpecContext::DSC_trailing:
+ return AllowDefiningTypeSpec::No;
+ }
+ llvm_unreachable("Missing DeclSpecContext case");
+ }
+
+ /// Is this a context in which an opaque-enum-declaration can appear?
+ static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) {
+ switch (DSC) {
+ case DeclSpecContext::DSC_normal:
+ case DeclSpecContext::DSC_class:
+ case DeclSpecContext::DSC_top_level:
+ return true;
+
+ case DeclSpecContext::DSC_alias_declaration:
+ case DeclSpecContext::DSC_objc_method_result:
+ case DeclSpecContext::DSC_condition:
+ case DeclSpecContext::DSC_template_param:
+ case DeclSpecContext::DSC_template_type_arg:
+ case DeclSpecContext::DSC_type_specifier:
+ case DeclSpecContext::DSC_trailing:
+ return false;
+ }
+ llvm_unreachable("Missing DeclSpecContext case");
+ }
+
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
@@ -2236,7 +2352,7 @@ private:
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
- Decl *TagDecl);
+ RecordDecl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
@@ -2373,17 +2489,14 @@ private:
True, False, Ambiguous, Error
};
- /// Based only on the given token kind, determine whether we know that
- /// we're at the start of an expression or a type-specifier-seq (which may
- /// be an expression, in C++).
+ /// Determine whether we could have an enum-base.
///
- /// This routine does not attempt to resolve any of the trick cases, e.g.,
- /// those involving lookup of identifiers.
+ /// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise
+ /// only consider this to be an enum-base if the next token is a '{'.
///
- /// \returns \c TPR_true if this token starts an expression, \c TPR_false if
- /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
- /// tell.
- TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
+ /// \return \c false if this cannot possibly be an enum base; \c true
+ /// otherwise.
+ bool isEnumBase(bool AllowSemi);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
@@ -2438,6 +2551,10 @@ private:
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
+ /// Try to skip a possibly empty sequence of 'attribute-specifier's without
+ /// full validation of the syntactic structure of attributes.
+ bool TrySkipAttributes();
+
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
@@ -2564,13 +2681,15 @@ private:
D.takeAttributes(attrs, endLoc);
}
}
- void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
+ bool MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
+ return true;
}
+ return false;
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
@@ -2687,6 +2806,7 @@ private:
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
+ ExprResult ParseExtIntegerArgument();
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
@@ -2900,11 +3020,12 @@ private:
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
+ ParsedType ObjectType,
+ bool ObjectHadErrors,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
- ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
@@ -2917,20 +3038,69 @@ private:
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
- /// Parses OpenMP context selectors and calls \p Callback for each
- /// successfully parsed context selector.
- bool
- parseOpenMPContextSelectors(SourceLocation Loc,
- SmallVectorImpl<Sema::OMPCtxSelectorData> &Data);
+
+ /// Parse a property kind into \p TIProperty for the selector set \p Set and
+ /// selector \p Selector.
+ void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty,
+ llvm::omp::TraitSet Set,
+ llvm::omp::TraitSelector Selector,
+ llvm::StringMap<SourceLocation> &Seen);
+
+ /// Parse a selector kind into \p TISelector for the selector set \p Set.
+ void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector,
+ llvm::omp::TraitSet Set,
+ llvm::StringMap<SourceLocation> &Seen);
+
+ /// Parse a selector set kind into \p TISet.
+ void parseOMPTraitSetKind(OMPTraitSet &TISet,
+ llvm::StringMap<SourceLocation> &Seen);
+
+ /// Parses an OpenMP context property.
+ void parseOMPContextProperty(OMPTraitSelector &TISelector,
+ llvm::omp::TraitSet Set,
+ llvm::StringMap<SourceLocation> &Seen);
+
+ /// Parses an OpenMP context selector.
+ void parseOMPContextSelector(OMPTraitSelector &TISelector,
+ llvm::omp::TraitSet Set,
+ llvm::StringMap<SourceLocation> &SeenSelectors);
+
+ /// Parses an OpenMP context selector set.
+ void parseOMPContextSelectorSet(OMPTraitSet &TISet,
+ llvm::StringMap<SourceLocation> &SeenSets);
+
+ /// Parses OpenMP context selectors.
+ bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI);
+
+ /// Parse a `match` clause for an '#pragma omp declare variant'. Return true
+ /// if there was an error.
+ bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
+
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
+
+ /// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if
+ /// it is not the current token.
+ void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind);
+
+ /// Check the \p FoundKind against the \p ExpectedKind, if not issue an error
+ /// that the "end" matching the "begin" directive of kind \p BeginKind was not
+ /// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd
+ /// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`.
+ void parseOMPEndDirective(OpenMPDirectiveKind BeginKind,
+ OpenMPDirectiveKind ExpectedKind,
+ OpenMPDirectiveKind FoundKind,
+ SourceLocation MatchingLoc,
+ SourceLocation FoundLoc,
+ bool SkipUntilOpenMPEnd);
+
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
@@ -2949,6 +3119,10 @@ private:
DeclarationName &Name,
AccessSpecifier AS = AS_none);
+ /// Tries to parse cast part of OpenMP array shaping operation:
+ /// '[' expression ']' { '[' expression ']' } ')'.
+ bool tryParseOpenMPArrayShapingCastPart();
+
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
@@ -2993,11 +3167,13 @@ private:
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
+ /// \param DKind Directive kind.
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
- OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
+ OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
+ OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
@@ -3015,6 +3191,16 @@ private:
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
+ /// Parses and creates OpenMP 5.0 iterators expression:
+ /// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier =
+ /// <range-specification> }+ ')'
+ ExprResult ParseOpenMPIteratorsExpr();
+
+ /// Parses allocators and traits in the context of the uses_allocator clause.
+ /// Expected format:
+ /// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')'
+ OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind);
+
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
@@ -3024,32 +3210,31 @@ public:
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
- Expr *TailExpr = nullptr;
+ Expr *DepModOrTailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
- SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers>
+ SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
MapTypeModifiers;
- SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers>
+ SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers>
MapTypeModifiersLoc;
bool IsMapTypeImplicit = false;
- SourceLocation DepLinMapLastLoc;
+ SourceLocation ExtraModifierLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
- bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
- bool AllowDestructorName,
- bool AllowConstructorName,
+ bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
+ bool ObjectHadErrors, bool EnteringContext,
+ bool AllowDestructorName, bool AllowConstructorName,
bool AllowDeductionGuide,
- ParsedType ObjectType,
- SourceLocation *TemplateKWLoc,
- UnqualifiedId &Result);
+ SourceLocation *TemplateKWLoc, UnqualifiedId &Result);
+
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
@@ -3074,7 +3259,7 @@ private:
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
- bool ParseTemplateParameters(unsigned Depth,
+ bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
@@ -3098,7 +3283,8 @@ private:
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
- bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
+ bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
+ SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
@@ -3165,6 +3351,27 @@ private:
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
+
+ class GNUAsmQualifiers {
+ unsigned Qualifiers = AQ_unspecified;
+
+ public:
+ enum AQ {
+ AQ_unspecified = 0,
+ AQ_volatile = 1,
+ AQ_inline = 2,
+ AQ_goto = 4,
+ };
+ static const char *getQualifierName(AQ Qualifier);
+ bool setAsmQualifier(AQ Qualifier);
+ inline bool isVolatile() const { return Qualifiers & AQ_volatile; };
+ inline bool isInline() const { return Qualifiers & AQ_inline; };
+ inline bool isGoto() const { return Qualifiers & AQ_goto; }
+ };
+ bool isGCCAsmStatement(const Token &TokAfterAsm) const;
+ bool isGNUAsmQualifier(const Token &TokAfterAsm) const;
+ GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const;
+ bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ);
};
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Parse/RAIIObjectsForParser.h b/contrib/llvm-project/clang/include/clang/Parse/RAIIObjectsForParser.h
index fb092c050783..bc1754614ad9 100644
--- a/contrib/llvm-project/clang/include/clang/Parse/RAIIObjectsForParser.h
+++ b/contrib/llvm-project/clang/include/clang/Parse/RAIIObjectsForParser.h
@@ -294,9 +294,9 @@ namespace clang {
bool OldVal;
public:
- ParsingOpenMPDirectiveRAII(Parser &P)
+ ParsingOpenMPDirectiveRAII(Parser &P, bool Value = true)
: P(P), OldVal(P.OpenMPDirectiveParsing) {
- P.OpenMPDirectiveParsing = true;
+ P.OpenMPDirectiveParsing = Value;
}
/// This can be used to restore the state early, before the dtor
@@ -459,26 +459,6 @@ namespace clang {
}
void skipToEnd();
};
-
- /// RAIIObject to destroy the contents of a SmallVector of
- /// TemplateIdAnnotation pointers and clear the vector.
- class DestroyTemplateIdAnnotationsRAIIObj {
- SmallVectorImpl<TemplateIdAnnotation *> &Container;
-
- public:
- DestroyTemplateIdAnnotationsRAIIObj(
- SmallVectorImpl<TemplateIdAnnotation *> &Container)
- : Container(Container) {}
-
- ~DestroyTemplateIdAnnotationsRAIIObj() {
- for (SmallVectorImpl<TemplateIdAnnotation *>::iterator I =
- Container.begin(),
- E = Container.end();
- I != E; ++I)
- (*I)->Destroy();
- Container.clear();
- }
- };
} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h b/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h
index 1559b51ea77f..8db03babfb1e 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h
@@ -23,6 +23,7 @@
#define LLVM_CLANG_SEMA_DECLSPEC_H
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjCCommon.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/Lambda.h"
@@ -186,14 +187,14 @@ public:
SourceLocation getLastQualifierNameLoc() const;
/// No scope specifier.
- bool isEmpty() const { return !Range.isValid(); }
+ bool isEmpty() const { return Range.isInvalid() && getScopeRep() == nullptr; }
/// A scope specifier is present, but may be valid or invalid.
bool isNotEmpty() const { return !isEmpty(); }
/// An error occurred during parsing of the scope specifier.
- bool isInvalid() const { return isNotEmpty() && getScopeRep() == nullptr; }
+ bool isInvalid() const { return Range.isValid() && getScopeRep() == nullptr; }
/// A scope specifier is present, and it refers to a real scope.
- bool isValid() const { return isNotEmpty() && getScopeRep() != nullptr; }
+ bool isValid() const { return getScopeRep() != nullptr; }
/// Indicate that this nested-name-specifier is invalid.
void SetInvalid(SourceRange R) {
@@ -278,7 +279,9 @@ public:
static const TST TST_char32 = clang::TST_char32;
static const TST TST_int = clang::TST_int;
static const TST TST_int128 = clang::TST_int128;
+ static const TST TST_extint = clang::TST_extint;
static const TST TST_half = clang::TST_half;
+ static const TST TST_BFloat16 = clang::TST_BFloat16;
static const TST TST_float = clang::TST_float;
static const TST TST_double = clang::TST_double;
static const TST TST_float16 = clang::TST_Float16;
@@ -413,7 +416,7 @@ private:
T == TST_underlyingType || T == TST_atomic);
}
static bool isExprRep(TST T) {
- return (T == TST_typeofExpr || T == TST_decltype);
+ return (T == TST_typeofExpr || T == TST_decltype || T == TST_extint);
}
static bool isTemplateIdRep(TST T) {
return (T == TST_auto || T == TST_decltype_auto);
@@ -668,6 +671,13 @@ public:
unsigned &DiagID, ParsedType Rep,
const PrintingPolicy &Policy);
bool SetTypeSpecType(TST T, SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID, TypeResult Rep,
+ const PrintingPolicy &Policy) {
+ if (Rep.isInvalid())
+ return SetTypeSpecError();
+ return SetTypeSpecType(T, Loc, PrevSpec, DiagID, Rep.get(), Policy);
+ }
+ bool SetTypeSpecType(TST T, SourceLocation Loc, const char *&PrevSpec,
unsigned &DiagID, Decl *Rep, bool Owned,
const PrintingPolicy &Policy);
bool SetTypeSpecType(TST T, SourceLocation TagKwLoc,
@@ -697,6 +707,9 @@ public:
bool SetTypePipe(bool isPipe, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
const PrintingPolicy &Policy);
+ bool SetExtIntType(SourceLocation KWLoc, Expr *BitWidth,
+ const char *&PrevSpec, unsigned &DiagID,
+ const PrintingPolicy &Policy);
bool SetTypeSpecSat(SourceLocation Loc, const char *&PrevSpec,
unsigned &DiagID);
bool SetTypeSpecError();
@@ -830,31 +843,10 @@ public:
DQ_CSNullability = 0x40
};
- /// PropertyAttributeKind - list of property attributes.
- /// Keep this list in sync with LLVM's Dwarf.h ApplePropertyAttributes.
- enum ObjCPropertyAttributeKind {
- DQ_PR_noattr = 0x0,
- DQ_PR_readonly = 0x01,
- DQ_PR_getter = 0x02,
- DQ_PR_assign = 0x04,
- DQ_PR_readwrite = 0x08,
- DQ_PR_retain = 0x10,
- DQ_PR_copy = 0x20,
- DQ_PR_nonatomic = 0x40,
- DQ_PR_setter = 0x80,
- DQ_PR_atomic = 0x100,
- DQ_PR_weak = 0x200,
- DQ_PR_strong = 0x400,
- DQ_PR_unsafe_unretained = 0x800,
- DQ_PR_nullability = 0x1000,
- DQ_PR_null_resettable = 0x2000,
- DQ_PR_class = 0x4000,
- DQ_PR_direct = 0x8000,
- };
-
ObjCDeclSpec()
- : objcDeclQualifier(DQ_None), PropertyAttributes(DQ_PR_noattr),
- Nullability(0), GetterName(nullptr), SetterName(nullptr) { }
+ : objcDeclQualifier(DQ_None),
+ PropertyAttributes(ObjCPropertyAttribute::kind_noattr), Nullability(0),
+ GetterName(nullptr), SetterName(nullptr) {}
ObjCDeclQualifier getObjCDeclQualifier() const {
return (ObjCDeclQualifier)objcDeclQualifier;
@@ -866,32 +858,35 @@ public:
objcDeclQualifier = (ObjCDeclQualifier) (objcDeclQualifier & ~DQVal);
}
- ObjCPropertyAttributeKind getPropertyAttributes() const {
- return ObjCPropertyAttributeKind(PropertyAttributes);
+ ObjCPropertyAttribute::Kind getPropertyAttributes() const {
+ return ObjCPropertyAttribute::Kind(PropertyAttributes);
}
- void setPropertyAttributes(ObjCPropertyAttributeKind PRVal) {
+ void setPropertyAttributes(ObjCPropertyAttribute::Kind PRVal) {
PropertyAttributes =
- (ObjCPropertyAttributeKind)(PropertyAttributes | PRVal);
+ (ObjCPropertyAttribute::Kind)(PropertyAttributes | PRVal);
}
NullabilityKind getNullability() const {
- assert(((getObjCDeclQualifier() & DQ_CSNullability) ||
- (getPropertyAttributes() & DQ_PR_nullability)) &&
- "Objective-C declspec doesn't have nullability");
+ assert(
+ ((getObjCDeclQualifier() & DQ_CSNullability) ||
+ (getPropertyAttributes() & ObjCPropertyAttribute::kind_nullability)) &&
+ "Objective-C declspec doesn't have nullability");
return static_cast<NullabilityKind>(Nullability);
}
SourceLocation getNullabilityLoc() const {
- assert(((getObjCDeclQualifier() & DQ_CSNullability) ||
- (getPropertyAttributes() & DQ_PR_nullability)) &&
- "Objective-C declspec doesn't have nullability");
+ assert(
+ ((getObjCDeclQualifier() & DQ_CSNullability) ||
+ (getPropertyAttributes() & ObjCPropertyAttribute::kind_nullability)) &&
+ "Objective-C declspec doesn't have nullability");
return NullabilityLoc;
}
void setNullability(SourceLocation loc, NullabilityKind kind) {
- assert(((getObjCDeclQualifier() & DQ_CSNullability) ||
- (getPropertyAttributes() & DQ_PR_nullability)) &&
- "Set the nullability declspec or property attribute first");
+ assert(
+ ((getObjCDeclQualifier() & DQ_CSNullability) ||
+ (getPropertyAttributes() & ObjCPropertyAttribute::kind_nullability)) &&
+ "Set the nullability declspec or property attribute first");
Nullability = static_cast<unsigned>(kind);
NullabilityLoc = loc;
}
@@ -918,8 +913,8 @@ private:
// (space saving is negligible).
unsigned objcDeclQualifier : 7;
- // NOTE: VC++ treats enums as signed, avoid using ObjCPropertyAttributeKind
- unsigned PropertyAttributes : 16;
+ // NOTE: VC++ treats enums as signed, avoid using ObjCPropertyAttribute::Kind
+ unsigned PropertyAttributes : NumObjCPropertyAttrsBits;
unsigned Nullability : 2;
@@ -1518,6 +1513,8 @@ struct DeclaratorChunk {
struct MemberPointerTypeInfo {
/// The type qualifiers: const/volatile/restrict/__unaligned/_Atomic.
unsigned TypeQuals : 5;
+ /// Location of the '*' token.
+ unsigned StarLoc;
// CXXScopeSpec has a constructor, so it can't be a direct member.
// So we need some pointer-aligned storage and a bit of trickery.
alignas(CXXScopeSpec) char ScopeMem[sizeof(CXXScopeSpec)];
@@ -1660,11 +1657,13 @@ struct DeclaratorChunk {
static DeclaratorChunk getMemberPointer(const CXXScopeSpec &SS,
unsigned TypeQuals,
- SourceLocation Loc) {
+ SourceLocation StarLoc,
+ SourceLocation EndLoc) {
DeclaratorChunk I;
I.Kind = MemberPointer;
I.Loc = SS.getBeginLoc();
- I.EndLoc = Loc;
+ I.EndLoc = EndLoc;
+ I.Mem.StarLoc = StarLoc.getRawEncoding();
I.Mem.TypeQuals = TypeQuals;
new (I.Mem.ScopeMem) CXXScopeSpec(SS);
return I;
diff --git a/contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h b/contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h
index c79ca0e71df5..2854b4893484 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h
@@ -193,6 +193,15 @@ public:
llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>>
&LPTMap) {}
+ /// Read the set of decls to be checked for deferred diags.
+ ///
+ /// The external source should append its own potentially emitted function
+ /// and variable decls which may cause deferred diags. Note that this routine
+ /// may be invoked multiple times; the external source should take care not to
+ /// introduce the same declarations repeatedly.
+ virtual void ReadDeclsToCheckForDeferredDiags(
+ llvm::SmallVector<Decl *, 4> &Decls) {}
+
/// \copydoc Sema::CorrectTypo
/// \note LookupKind must correspond to a valid Sema::LookupNameKind
///
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Initialization.h b/contrib/llvm-project/clang/include/clang/Sema/Initialization.h
index f726f3836307..ca9e0a198cb9 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Initialization.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Initialization.h
@@ -689,6 +689,9 @@ public:
return Context >= IC_StaticCast;
}
+ /// Determine whether this initialization is a static cast.
+ bool isStaticCast() const { return Context == IC_StaticCast; }
+
/// Determine whether this initialization is a C-style cast.
bool isCStyleOrFunctionalCast() const {
return Context >= IC_CStyleCast;
@@ -999,6 +1002,9 @@ public:
/// Non-const lvalue reference binding to a vector element.
FK_NonConstLValueReferenceBindingToVectorElement,
+ /// Non-const lvalue reference binding to a matrix element.
+ FK_NonConstLValueReferenceBindingToMatrixElement,
+
/// Non-const lvalue reference binding to an lvalue of unrelated
/// type.
FK_NonConstLValueReferenceBindingToUnrelated,
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Lookup.h b/contrib/llvm-project/clang/include/clang/Sema/Lookup.h
index 0466d06d753b..c6edc2df5b9f 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Lookup.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Lookup.h
@@ -348,7 +348,7 @@ public:
/// program.
static bool isVisible(Sema &SemaRef, NamedDecl *D) {
// If this declaration is not hidden, it's visible.
- if (!D->isHidden())
+ if (D->isUnconditionallyVisible())
return true;
// During template instantiation, we can refer to hidden declarations, if
diff --git a/contrib/llvm-project/clang/include/clang/Sema/MultiplexExternalSemaSource.h b/contrib/llvm-project/clang/include/clang/Sema/MultiplexExternalSemaSource.h
index dcbac9f0ba10..e94dd5d46871 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/MultiplexExternalSemaSource.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/MultiplexExternalSemaSource.h
@@ -332,6 +332,15 @@ public:
llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>>
&LPTMap) override;
+ /// Read the set of decls to be checked for deferred diags.
+ ///
+ /// The external source should append its own potentially emitted function
+ /// and variable decls which may cause deferred diags. Note that this routine
+ /// may be invoked multiple times; the external source should take care not to
+ /// introduce the same declarations repeatedly.
+ void ReadDeclsToCheckForDeferredDiags(
+ llvm::SmallVector<Decl *, 4> &Decls) override;
+
/// \copydoc ExternalSemaSource::CorrectTypo
/// \note Returns the first nonempty correction.
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Overload.h b/contrib/llvm-project/clang/include/clang/Sema/Overload.h
index 1394c6236965..5023525aa41b 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Overload.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Overload.h
@@ -677,6 +677,24 @@ class Sema;
StdInitializerListElement = V;
}
+ /// Form an "implicit" conversion sequence from nullptr_t to bool, for a
+ /// direct-initialization of a bool object from nullptr_t.
+ static ImplicitConversionSequence getNullptrToBool(QualType SourceType,
+ QualType DestType,
+ bool NeedLValToRVal) {
+ ImplicitConversionSequence ICS;
+ ICS.setStandard();
+ ICS.Standard.setAsIdentityConversion();
+ ICS.Standard.setFromType(SourceType);
+ if (NeedLValToRVal)
+ ICS.Standard.First = ICK_Lvalue_To_Rvalue;
+ ICS.Standard.setToType(0, SourceType);
+ ICS.Standard.Second = ICK_Boolean_Conversion;
+ ICS.Standard.setToType(1, DestType);
+ ICS.Standard.setToType(2, DestType);
+ return ICS;
+ }
+
// The result of a comparison between implicit conversion
// sequences. Use Sema::CompareImplicitConversionSequences to
// actually perform the comparison.
@@ -850,6 +868,8 @@ class Sema;
return static_cast<OverloadCandidateRewriteKind>(RewriteKind);
}
+ bool isReversed() const { return getRewriteKind() & CRK_Reversed; }
+
/// hasAmbiguousConversion - Returns whether this overload
/// candidate requires an ambiguous conversion or not.
bool hasAmbiguousConversion() const {
@@ -888,7 +908,7 @@ class Sema;
private:
friend class OverloadCandidateSet;
OverloadCandidate()
- : IsADLCandidate(CallExpr::NotADL), RewriteKind(CRK_None) {}
+ : IsSurrogate(false), IsADLCandidate(CallExpr::NotADL), RewriteKind(CRK_None) {}
};
/// OverloadCandidateSet - A set of overload candidates, used in C++
@@ -963,6 +983,14 @@ class Sema;
return CRK;
}
+ /// Determines whether this operator could be implemented by a function
+ /// with reversed parameter order.
+ bool isReversible() {
+ return AllowRewrittenCandidates && OriginalOperator &&
+ (getRewrittenOverloadedOperator(OriginalOperator) != OO_None ||
+ shouldAddReversed(OriginalOperator));
+ }
+
/// Determine whether we should consider looking for and adding reversed
/// candidates for operator Op.
bool shouldAddReversed(OverloadedOperatorKind Op);
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Ownership.h b/contrib/llvm-project/clang/include/clang/Sema/Ownership.h
index f395282c0c52..7c7b1d35c9fd 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Ownership.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Ownership.h
@@ -116,7 +116,7 @@ namespace llvm {
template <class T>
struct PointerLikeTypeTraits<clang::OpaquePtr<T>> {
- enum { NumLowBitsAvailable = 0 };
+ static constexpr int NumLowBitsAvailable = 0;
static inline void *getAsVoidPointer(clang::OpaquePtr<T> P) {
// FIXME: Doesn't work? return P.getAs< void >();
@@ -278,6 +278,7 @@ namespace clang {
inline ExprResult ExprError() { return ExprResult(true); }
inline StmtResult StmtError() { return StmtResult(true); }
+ inline TypeResult TypeError() { return TypeResult(true); }
inline ExprResult ExprError(const DiagnosticBuilder&) { return ExprError(); }
inline StmtResult StmtError(const DiagnosticBuilder&) { return StmtError(); }
diff --git a/contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h b/contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h
index d9d8585970d9..21e030fe5134 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h
@@ -18,12 +18,12 @@
#include "clang/Basic/AttributeCommonInfo.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceLocation.h"
-#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Ownership.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Registry.h"
#include "llvm/Support/VersionTuple.h"
#include <cassert>
#include <cstddef>
@@ -37,6 +37,85 @@ class Decl;
class Expr;
class IdentifierInfo;
class LangOptions;
+class ParsedAttr;
+class Sema;
+class TargetInfo;
+
+struct ParsedAttrInfo {
+ /// Corresponds to the Kind enum.
+ unsigned AttrKind : 16;
+ /// The number of required arguments of this attribute.
+ unsigned NumArgs : 4;
+ /// The number of optional arguments of this attributes.
+ unsigned OptArgs : 4;
+ /// True if the parsing does not match the semantic content.
+ unsigned HasCustomParsing : 1;
+ /// True if this attribute is only available for certain targets.
+ unsigned IsTargetSpecific : 1;
+ /// True if this attribute applies to types.
+ unsigned IsType : 1;
+ /// True if this attribute applies to statements.
+ unsigned IsStmt : 1;
+ /// True if this attribute has any spellings that are known to gcc.
+ unsigned IsKnownToGCC : 1;
+ /// True if this attribute is supported by #pragma clang attribute.
+ unsigned IsSupportedByPragmaAttribute : 1;
+ /// The syntaxes supported by this attribute and how they're spelled.
+ struct Spelling {
+ AttributeCommonInfo::Syntax Syntax;
+ const char *NormalizedFullName;
+ };
+ ArrayRef<Spelling> Spellings;
+
+ ParsedAttrInfo(AttributeCommonInfo::Kind AttrKind =
+ AttributeCommonInfo::NoSemaHandlerAttribute)
+ : AttrKind(AttrKind), NumArgs(0), OptArgs(0), HasCustomParsing(0),
+ IsTargetSpecific(0), IsType(0), IsStmt(0), IsKnownToGCC(0),
+ IsSupportedByPragmaAttribute(0) {}
+
+ virtual ~ParsedAttrInfo() = default;
+
+ /// Check if this attribute appertains to D, and issue a diagnostic if not.
+ virtual bool diagAppertainsToDecl(Sema &S, const ParsedAttr &Attr,
+ const Decl *D) const {
+ return true;
+ }
+ /// Check if this attribute is allowed by the language we are compiling, and
+ /// issue a diagnostic if not.
+ virtual bool diagLangOpts(Sema &S, const ParsedAttr &Attr) const {
+ return true;
+ }
+ /// Check if this attribute is allowed when compiling for the given target.
+ virtual bool existsInTarget(const TargetInfo &Target) const {
+ return true;
+ }
+ /// Convert the spelling index of Attr to a semantic spelling enum value.
+ virtual unsigned
+ spellingIndexToSemanticSpelling(const ParsedAttr &Attr) const {
+ return UINT_MAX;
+ }
+ /// Populate Rules with the match rules of this attribute.
+ virtual void getPragmaAttributeMatchRules(
+ llvm::SmallVectorImpl<std::pair<attr::SubjectMatchRule, bool>> &Rules,
+ const LangOptions &LangOpts) const {
+ }
+ enum AttrHandling {
+ NotHandled,
+ AttributeApplied,
+ AttributeNotApplied
+ };
+ /// If this ParsedAttrInfo knows how to handle this ParsedAttr applied to this
+ /// Decl then do so and return either AttributeApplied if it was applied or
+ /// AttributeNotApplied if it wasn't. Otherwise return NotHandled.
+ virtual AttrHandling handleDeclAttribute(Sema &S, Decl *D,
+ const ParsedAttr &Attr) const {
+ return NotHandled;
+ }
+
+ static const ParsedAttrInfo &get(const AttributeCommonInfo &A);
+};
+
+typedef llvm::Registry<ParsedAttrInfo> ParsedAttrInfoRegistry;
/// Represents information about a change in availability for
/// an entity, which is part of the encoding of the 'availability'
@@ -181,6 +260,8 @@ private:
const Expr *MessageExpr;
+ const ParsedAttrInfo &Info;
+
ArgsUnion *getArgsBuffer() { return getTrailingObjects<ArgsUnion>(); }
ArgsUnion const *getArgsBuffer() const {
return getTrailingObjects<ArgsUnion>();
@@ -207,7 +288,8 @@ private:
EllipsisLoc(ellipsisLoc), NumArgs(numArgs), Invalid(false),
UsedAsTypeAttr(false), IsAvailability(false),
IsTypeTagForDatatype(false), IsProperty(false), HasParsedType(false),
- HasProcessingCache(false), IsPragmaClangAttribute(false) {
+ HasProcessingCache(false), IsPragmaClangAttribute(false),
+ Info(ParsedAttrInfo::get(*this)) {
if (numArgs)
memcpy(getArgsBuffer(), args, numArgs * sizeof(ArgsUnion));
}
@@ -225,7 +307,8 @@ private:
NumArgs(1), Invalid(false), UsedAsTypeAttr(false), IsAvailability(true),
IsTypeTagForDatatype(false), IsProperty(false), HasParsedType(false),
HasProcessingCache(false), IsPragmaClangAttribute(false),
- UnavailableLoc(unavailable), MessageExpr(messageExpr) {
+ UnavailableLoc(unavailable), MessageExpr(messageExpr),
+ Info(ParsedAttrInfo::get(*this)) {
ArgsUnion PVal(Parm);
memcpy(getArgsBuffer(), &PVal, sizeof(ArgsUnion));
new (getAvailabilityData()) detail::AvailabilityData(
@@ -242,7 +325,7 @@ private:
NumArgs(3), Invalid(false), UsedAsTypeAttr(false),
IsAvailability(false), IsTypeTagForDatatype(false), IsProperty(false),
HasParsedType(false), HasProcessingCache(false),
- IsPragmaClangAttribute(false) {
+ IsPragmaClangAttribute(false), Info(ParsedAttrInfo::get(*this)) {
ArgsUnion *Args = getArgsBuffer();
Args[0] = Parm1;
Args[1] = Parm2;
@@ -259,7 +342,7 @@ private:
NumArgs(1), Invalid(false), UsedAsTypeAttr(false),
IsAvailability(false), IsTypeTagForDatatype(true), IsProperty(false),
HasParsedType(false), HasProcessingCache(false),
- IsPragmaClangAttribute(false) {
+ IsPragmaClangAttribute(false), Info(ParsedAttrInfo::get(*this)) {
ArgsUnion PVal(ArgKind);
memcpy(getArgsBuffer(), &PVal, sizeof(ArgsUnion));
detail::TypeTagForDatatypeData &ExtraData = getTypeTagForDatatypeDataSlot();
@@ -277,7 +360,7 @@ private:
NumArgs(0), Invalid(false), UsedAsTypeAttr(false),
IsAvailability(false), IsTypeTagForDatatype(false), IsProperty(false),
HasParsedType(true), HasProcessingCache(false),
- IsPragmaClangAttribute(false) {
+ IsPragmaClangAttribute(false), Info(ParsedAttrInfo::get(*this)) {
new (&getTypeBuffer()) ParsedType(typeArg);
}
@@ -291,7 +374,7 @@ private:
NumArgs(0), Invalid(false), UsedAsTypeAttr(false),
IsAvailability(false), IsTypeTagForDatatype(false), IsProperty(true),
HasParsedType(false), HasProcessingCache(false),
- IsPragmaClangAttribute(false) {
+ IsPragmaClangAttribute(false), Info(ParsedAttrInfo::get(*this)) {
new (&getPropertyDataBuffer()) detail::PropertyData(getterId, setterId);
}
@@ -534,7 +617,10 @@ public:
}
}
- AttributeCommonInfo::Kind getKind() const { return getParsedKind(); }
+ AttributeCommonInfo::Kind getKind() const {
+ return AttributeCommonInfo::Kind(Info.AttrKind);
+ }
+ const ParsedAttrInfo &getInfo() const { return Info; }
};
class AttributePool;
diff --git a/contrib/llvm-project/clang/include/clang/Sema/ParsedTemplate.h b/contrib/llvm-project/clang/include/clang/Sema/ParsedTemplate.h
index 82d00494b0d6..f0245b93c7eb 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/ParsedTemplate.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/ParsedTemplate.h
@@ -169,7 +169,9 @@ namespace clang {
/// template-name.
ParsedTemplateTy Template;
- /// The kind of template that Template refers to.
+ /// The kind of template that Template refers to. If this is
+ /// TNK_Non_template, an error was encountered and diagnosed
+ /// when parsing or looking up the template name.
TemplateNameKind Kind;
/// The location of the '<' before the template argument
@@ -183,6 +185,10 @@ namespace clang {
/// NumArgs - The number of template arguments.
unsigned NumArgs;
+ /// Whether an error was encountered in the template arguments.
+ /// If so, NumArgs and the trailing arguments are best-effort.
+ bool ArgsInvalid;
+
/// Retrieves a pointer to the template arguments
ParsedTemplateArgument *getTemplateArgs() {
return getTrailingObjects<ParsedTemplateArgument>();
@@ -195,13 +201,13 @@ namespace clang {
IdentifierInfo *Name, OverloadedOperatorKind OperatorKind,
ParsedTemplateTy OpaqueTemplateName, TemplateNameKind TemplateKind,
SourceLocation LAngleLoc, SourceLocation RAngleLoc,
- ArrayRef<ParsedTemplateArgument> TemplateArgs,
+ ArrayRef<ParsedTemplateArgument> TemplateArgs, bool ArgsInvalid,
SmallVectorImpl<TemplateIdAnnotation *> &CleanupList) {
TemplateIdAnnotation *TemplateId = new (llvm::safe_malloc(
totalSizeToAlloc<ParsedTemplateArgument>(TemplateArgs.size())))
TemplateIdAnnotation(TemplateKWLoc, TemplateNameLoc, Name,
OperatorKind, OpaqueTemplateName, TemplateKind,
- LAngleLoc, RAngleLoc, TemplateArgs);
+ LAngleLoc, RAngleLoc, TemplateArgs, ArgsInvalid);
CleanupList.push_back(TemplateId);
return TemplateId;
}
@@ -213,6 +219,20 @@ namespace clang {
this->~TemplateIdAnnotation();
free(this);
}
+
+ /// Determine whether this might be a type template.
+ bool mightBeType() const {
+ return Kind == TNK_Non_template ||
+ Kind == TNK_Type_template ||
+ Kind == TNK_Dependent_template_name ||
+ Kind == TNK_Undeclared_template;
+ }
+
+ bool hasInvalidName() const { return Kind == TNK_Non_template; }
+ bool hasInvalidArgs() const { return ArgsInvalid; }
+
+ bool isInvalid() const { return hasInvalidName() || hasInvalidArgs(); }
+
private:
TemplateIdAnnotation(const TemplateIdAnnotation &) = delete;
@@ -222,11 +242,12 @@ namespace clang {
ParsedTemplateTy OpaqueTemplateName,
TemplateNameKind TemplateKind,
SourceLocation LAngleLoc, SourceLocation RAngleLoc,
- ArrayRef<ParsedTemplateArgument> TemplateArgs) noexcept
+ ArrayRef<ParsedTemplateArgument> TemplateArgs,
+ bool ArgsInvalid) noexcept
: TemplateKWLoc(TemplateKWLoc), TemplateNameLoc(TemplateNameLoc),
Name(Name), Operator(OperatorKind), Template(OpaqueTemplateName),
Kind(TemplateKind), LAngleLoc(LAngleLoc), RAngleLoc(RAngleLoc),
- NumArgs(TemplateArgs.size()) {
+ NumArgs(TemplateArgs.size()), ArgsInvalid(ArgsInvalid) {
std::uninitialized_copy(TemplateArgs.begin(), TemplateArgs.end(),
getTemplateArgs());
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Scope.h b/contrib/llvm-project/clang/include/clang/Sema/Scope.h
index 6133425a42a6..b7260f15fe1b 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Scope.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Scope.h
@@ -320,15 +320,28 @@ public:
/// isDeclScope - Return true if this is the scope that the specified decl is
/// declared in.
- bool isDeclScope(Decl *D) {
- return DeclsInScope.count(D) != 0;
+ bool isDeclScope(const Decl *D) const { return DeclsInScope.count(D) != 0; }
+
+ /// Get the entity corresponding to this scope.
+ DeclContext *getEntity() const {
+ return isTemplateParamScope() ? nullptr : Entity;
}
- DeclContext *getEntity() const { return Entity; }
- void setEntity(DeclContext *E) { Entity = E; }
+ /// Get the DeclContext in which to continue unqualified lookup after a
+ /// lookup in this scope.
+ DeclContext *getLookupEntity() const { return Entity; }
- bool hasErrorOccurred() const { return ErrorTrap.hasErrorOccurred(); }
+ void setEntity(DeclContext *E) {
+ assert(!isTemplateParamScope() &&
+ "entity associated with template param scope");
+ Entity = E;
+ }
+ void setLookupEntity(DeclContext *E) { Entity = E; }
+ /// Determine whether any unrecoverable errors have occurred within this
+ /// scope. Note that this may return false even if the scope contains invalid
+ /// declarations or statements, if the errors for those invalid constructs
+ /// were suppressed because some prior invalid construct was referenced.
bool hasUnrecoverableErrorOccurred() const {
return ErrorTrap.hasUnrecoverableErrorOccurred();
}
diff --git a/contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h b/contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h
index 3c4847a2932c..f0f9cb9e40ae 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h
@@ -174,9 +174,11 @@ public:
/// First SEH '__try' statement in the current function.
SourceLocation FirstSEHTryLoc;
+private:
/// Used to determine if errors occurred in this function or block.
DiagnosticErrorTrap ErrorTrap;
+public:
/// A SwitchStmt, along with a flag indicating if its list of case statements
/// is incomplete (because we dropped an invalid one while parsing).
using SwitchInfo = llvm::PointerIntPair<SwitchStmt*, 1, bool>;
@@ -375,6 +377,17 @@ public:
virtual ~FunctionScopeInfo();
+ /// Determine whether an unrecoverable error has occurred within this
+ /// function. Note that this may return false even if the function body is
+ /// invalid, because the errors may be suppressed if they're caused by prior
+ /// invalid declarations.
+ ///
+ /// FIXME: Migrate the caller of this to use containsErrors() instead once
+ /// it's ready.
+ bool hasUnrecoverableErrorOccurred() const {
+ return ErrorTrap.hasUnrecoverableErrorOccurred();
+ }
+
/// Record that a weak object was accessed.
///
/// Part of the implementation of -Wrepeated-use-of-weak.
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Sema.h b/contrib/llvm-project/clang/include/clang/Sema/Sema.h
index 842e49602274..6f7ad8076718 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Sema.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Sema.h
@@ -15,15 +15,17 @@
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
+#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
-#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
@@ -35,6 +37,7 @@
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
+#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
@@ -55,6 +58,7 @@
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
@@ -373,12 +377,22 @@ class Sema final {
ArrayRef<QualType> Args);
public:
+ /// The maximum alignment, same as in llvm::Value. We duplicate them here
+ /// because that allows us not to duplicate the constants in clang code,
+ /// which we must to since we can't directly use the llvm constants.
+ /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
+ ///
+ /// This is the greatest alignment value supported by load, store, and alloca
+ /// instructions, and global values.
+ static const unsigned MaxAlignmentExponent = 29;
+ static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
+
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
- FPOptions FPFeatures;
+ FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
@@ -476,10 +490,41 @@ public:
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
- void Act(SourceLocation PragmaLocation,
- PragmaMsStackAction Action,
- llvm::StringRef StackSlotLabel,
- ValueType Value);
+
+ void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
+ llvm::StringRef StackSlotLabel, ValueType Value) {
+ if (Action == PSK_Reset) {
+ CurrentValue = DefaultValue;
+ CurrentPragmaLocation = PragmaLocation;
+ return;
+ }
+ if (Action & PSK_Push)
+ Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
+ PragmaLocation);
+ else if (Action & PSK_Pop) {
+ if (!StackSlotLabel.empty()) {
+ // If we've got a label, try to find it and jump there.
+ auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
+ return x.StackSlotLabel == StackSlotLabel;
+ });
+ // If we found the label so pop from there.
+ if (I != Stack.rend()) {
+ CurrentValue = I->Value;
+ CurrentPragmaLocation = I->PragmaLocation;
+ Stack.erase(std::prev(I.base()), Stack.end());
+ }
+ } else if (!Stack.empty()) {
+ // We do not have a label, just pop the last entry.
+ CurrentValue = Stack.back().Value;
+ CurrentPragmaLocation = Stack.back().PragmaLocation;
+ Stack.pop_back();
+ }
+ }
+ if (Action & PSK_Set) {
+ CurrentValue = Value;
+ CurrentPragmaLocation = PragmaLocation;
+ }
+ }
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
@@ -541,6 +586,18 @@ public:
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
+ // This stack tracks the current state of Sema.CurFPFeatures.
+ PragmaStack<unsigned> FpPragmaStack;
+ FPOptionsOverride CurFPFeatureOverrides() {
+ FPOptionsOverride result;
+ if (!FpPragmaStack.hasValue()) {
+ result = FPOptionsOverride();
+ } else {
+ result = FPOptionsOverride(FpPragmaStack.CurrentValue);
+ }
+ return result;
+ }
+
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
@@ -602,16 +659,16 @@ public:
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
- /// cleanup that are created by the current full expression. The
- /// element type here is ExprWithCleanups::Object.
- SmallVector<BlockDecl*, 8> ExprCleanupObjects;
+ /// cleanup that are created by the current full expression.
+ SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
- using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
+ using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
+ llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
@@ -854,6 +911,11 @@ public:
}
};
+ /// Whether the AST is currently being rebuilt to correct immediate
+ /// invocations. Immediate invocation candidates and references to consteval
+ /// functions aren't tracked when this is set.
+ bool RebuildingImmediateInvocation = false;
+
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
@@ -1065,6 +1127,8 @@ public:
PotentiallyEvaluatedIfUsed
};
+ using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
+
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
@@ -1111,6 +1175,13 @@ public:
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
+ /// Set of candidates for starting an immediate invocation.
+ llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
+
+ /// Set of DeclRefExprs referencing a consteval function when used in a
+ /// context not already known to be immediately invoked.
+ llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
+
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
@@ -1323,16 +1394,23 @@ public:
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
- /// Records and restores the FP_CONTRACT state on entry/exit of compound
+ /// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
- class FPContractStateRAII {
+ class FPFeaturesStateRAII {
public:
- FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
- ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
+ FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
+ OldOverrides = S.FpPragmaStack.CurrentValue;
+ }
+ ~FPFeaturesStateRAII() {
+ S.CurFPFeatures = OldFPFeaturesState;
+ S.FpPragmaStack.CurrentValue = OldOverrides;
+ }
+ unsigned getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
+ unsigned OldOverrides;
};
void addImplicitTypedef(StringRef Name, QualType T);
@@ -1351,7 +1429,7 @@ public:
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
- FPOptions &getFPOptions() { return FPFeatures; }
+ FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
@@ -1467,6 +1545,15 @@ public:
void emitAndClearUnusedLocalTypedefWarnings();
+ private:
+ /// Function or variable declarations to be checked for whether the deferred
+ /// diagnostics should be emitted.
+ SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags;
+
+ public:
+ // Emit all deferred diagnostics.
+ void emitDeferredDiags();
+
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
@@ -1588,6 +1675,9 @@ public:
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
+ QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
+ SourceLocation AttrLoc);
+
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
@@ -1642,6 +1732,7 @@ public:
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
+ QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
@@ -1653,6 +1744,10 @@ public:
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
+ /// Determine whether the callee of a particular function call can throw.
+ /// E, D and Loc are all optional.
+ static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
+ SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
@@ -1714,6 +1809,7 @@ public:
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
+ protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
@@ -1738,6 +1834,37 @@ public:
}
};
+ /// A derivative of BoundTypeDiagnoser for which the diagnostic's type
+ /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
+ /// For example, a diagnostic with no other parameters would generally have
+ /// the form "...%select{incomplete|sizeless}0 type %1...".
+ template <typename... Ts>
+ class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
+ public:
+ SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
+ : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
+
+ void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
+ const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
+ this->emit(DB, std::index_sequence_for<Ts...>());
+ DB << T->isSizelessType() << T;
+ }
+ };
+
+ enum class CompleteTypeKind {
+ /// Apply the normal rules for complete types. In particular,
+ /// treat all sizeless types as incomplete.
+ Normal,
+
+ /// Relax the normal rules for complete types so that they include
+ /// sizeless built-in types.
+ AcceptSizeless,
+
+ // FIXME: Eventually we should flip the default to Normal and opt in
+ // to AcceptSizeless rather than opt out of it.
+ Default = AcceptSizeless
+ };
+
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
@@ -1751,7 +1878,7 @@ private:
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
- TypeDiagnoser *Diagnoser);
+ CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
@@ -1787,7 +1914,7 @@ public:
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
- return !D->isHidden() || isVisibleSlow(D);
+ return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
@@ -1842,13 +1969,22 @@ public:
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
- bool isCompleteType(SourceLocation Loc, QualType T) {
- return !RequireCompleteTypeImpl(Loc, T, nullptr);
+ bool isCompleteType(SourceLocation Loc, QualType T,
+ CompleteTypeKind Kind = CompleteTypeKind::Default) {
+ return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
- TypeDiagnoser &Diagnoser);
+ CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
+ bool RequireCompleteType(SourceLocation Loc, QualType T,
+ CompleteTypeKind Kind, unsigned DiagID);
+
bool RequireCompleteType(SourceLocation Loc, QualType T,
- unsigned DiagID);
+ TypeDiagnoser &Diagnoser) {
+ return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
+ }
+ bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
+ return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
+ }
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
@@ -1857,14 +1993,29 @@ public:
return RequireCompleteType(Loc, T, Diagnoser);
}
+ template <typename... Ts>
+ bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
+ const Ts &... Args) {
+ SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
+ return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
+ }
+
void completeExprArrayBound(Expr *E);
- bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
+ bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
+ TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
- return RequireCompleteExprType(E, Diagnoser);
+ return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
+ }
+
+ template <typename... Ts>
+ bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
+ const Ts &... Args) {
+ SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
+ return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
@@ -2282,11 +2433,13 @@ public:
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
- void ActOnParamUnparsedDefaultArgument(Decl *param,
- SourceLocation EqualLoc,
+ void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
- bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
+ ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param,
+ Expr *DefaultArg,
+ SourceLocation EqualLoc);
+ void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
@@ -2760,8 +2913,6 @@ public:
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
- DeclContext *getContainingDC(DeclContext *DC);
-
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
@@ -2771,6 +2922,11 @@ public:
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
+ /// Enter a template parameter scope, after it's been associated with a particular
+ /// DeclContext. Causes lookup within the scope to chain through enclosing contexts
+ /// in the correct order.
+ void EnterTemplatedContext(Scope *S, DeclContext *DC);
+
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
@@ -2869,7 +3025,7 @@ public:
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
- StringRef Uuid);
+ StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
@@ -2900,6 +3056,10 @@ public:
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
+ WebAssemblyImportNameAttr *mergeImportNameAttr(
+ Decl *D, const WebAssemblyImportNameAttr &AL);
+ WebAssemblyImportModuleAttr *mergeImportModuleAttr(
+ Decl *D, const WebAssemblyImportModuleAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
@@ -2953,10 +3113,19 @@ public:
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
+ enum class AllowedExplicit {
+ /// Allow no explicit functions to be used.
+ None,
+ /// Allow explicit conversion functions but not explicit constructors.
+ Conversions,
+ /// Allow both explicit conversion functions and explicit constructors.
+ All
+ };
+
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
- bool AllowExplicit,
+ AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
@@ -3263,7 +3432,8 @@ public:
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
- EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
+ EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
+ ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
@@ -3473,6 +3643,9 @@ public:
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
+ /// Look up a name following ~ in a destructor name. This is an ordinary
+ /// lookup, but prefers tags to typedefs.
+ LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
@@ -3583,7 +3756,7 @@ private:
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
- TypoRecoveryCallback TRC);
+ TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
@@ -3674,7 +3847,8 @@ public:
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
- FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl);
+ FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
+ bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
@@ -3727,32 +3901,28 @@ public:
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
+ /// \param RecoverUncorrectedTypos If true, when typo correction fails, it
+ /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
+ ///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
- ExprResult
- CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
- llvm::function_ref<ExprResult(Expr *)> Filter =
- [](Expr *E) -> ExprResult { return E; });
-
- ExprResult
- CorrectDelayedTyposInExpr(Expr *E,
- llvm::function_ref<ExprResult(Expr *)> Filter) {
- return CorrectDelayedTyposInExpr(E, nullptr, Filter);
- }
-
- ExprResult
- CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
- llvm::function_ref<ExprResult(Expr *)> Filter =
- [](Expr *E) -> ExprResult { return E; }) {
- return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
- }
-
- ExprResult
- CorrectDelayedTyposInExpr(ExprResult ER,
- llvm::function_ref<ExprResult(Expr *)> Filter) {
- return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
+ ExprResult CorrectDelayedTyposInExpr(
+ Expr *E, VarDecl *InitDecl = nullptr,
+ bool RecoverUncorrectedTypos = false,
+ llvm::function_ref<ExprResult(Expr *)> Filter =
+ [](Expr *E) -> ExprResult { return E; });
+
+ ExprResult CorrectDelayedTyposInExpr(
+ ExprResult ER, VarDecl *InitDecl = nullptr,
+ bool RecoverUncorrectedTypos = false,
+ llvm::function_ref<ExprResult(Expr *)> Filter =
+ [](Expr *E) -> ExprResult { return E; }) {
+ return ER.isInvalid()
+ ? ER
+ : CorrectDelayedTyposInExpr(ER.get(), InitDecl,
+ RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
@@ -3779,6 +3949,11 @@ public:
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
+ /// Attempts to produce a RecoveryExpr after some AST node cannot be created.
+ ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
+ ArrayRef<Expr *> SubExprs,
+ QualType T = QualType());
+
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
@@ -3787,6 +3962,8 @@ public:
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
+ void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
+ FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
@@ -4200,7 +4377,8 @@ public:
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
- StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
+ StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
+ ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
@@ -4441,6 +4619,8 @@ public:
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
+ void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
+
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
@@ -4577,6 +4757,10 @@ public:
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
+ /// Try to convert an expression \p E to type \p Ty. Returns the result of the
+ /// conversion.
+ ExprResult tryConvertExprToType(Expr *E, QualType Ty);
+
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
@@ -4703,6 +4887,15 @@ public:
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
+ ExprResult BuildUniqueStableName(SourceLocation Loc, TypeSourceInfo *Operand);
+ ExprResult BuildUniqueStableName(SourceLocation Loc, Expr *E);
+ ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
+ SourceLocation LParen,
+ SourceLocation RParen, ParsedType Ty);
+ ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
+ SourceLocation LParen,
+ SourceLocation RParen, Expr *E);
+
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
@@ -4773,9 +4966,36 @@ public:
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
+
+ ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
+ Expr *ColumnIdx,
+ SourceLocation RBLoc);
+
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
- Expr *LowerBound, SourceLocation ColonLoc,
- Expr *Length, SourceLocation RBLoc);
+ Expr *LowerBound,
+ SourceLocation ColonLocFirst,
+ SourceLocation ColonLocSecond,
+ Expr *Length, Expr *Stride,
+ SourceLocation RBLoc);
+ ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
+ SourceLocation RParenLoc,
+ ArrayRef<Expr *> Dims,
+ ArrayRef<SourceRange> Brackets);
+
+ /// Data structure for iterator expression.
+ struct OMPIteratorData {
+ IdentifierInfo *DeclIdent = nullptr;
+ SourceLocation DeclIdentLoc;
+ ParsedType Type;
+ OMPIteratorExpr::IteratorRange Range;
+ SourceLocation AssignLoc;
+ SourceLocation ColonLoc;
+ SourceLocation SecColonLoc;
+ };
+
+ ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
+ SourceLocation LLoc, SourceLocation RLoc,
+ ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
@@ -5530,6 +5750,10 @@ public:
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
+ /// Wrap the expression in a ConstantExpr if it is a potential immediate
+ /// invocation.
+ ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
+
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
@@ -5557,7 +5781,8 @@ public:
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
- /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
+ /// ActOnCXXNamedCast - Parse
+ /// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
@@ -6241,15 +6466,10 @@ public:
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
- bool CheckConstraintExpression(Expr *CE, Token NextToken = Token(),
+ bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
- /// Check whether the given type-dependent expression will be the name of a
- /// function or another callable function-like entity (e.g. a function
- // template or overload set) for any substitution.
- bool IsDependentFunctionNameExpr(Expr *E);
-
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
@@ -6522,6 +6742,22 @@ public:
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
+ /// Mark destructors of virtual bases of this class referenced. In the Itanium
+ /// C++ ABI, this is done when emitting a destructor for any non-abstract
+ /// class. In the Microsoft C++ ABI, this is done any time a class's
+ /// destructor is referenced.
+ void MarkVirtualBaseDestructorsReferenced(
+ SourceLocation Location, CXXRecordDecl *ClassDecl,
+ llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
+
+ /// Do semantic checks to allow the complete destructor variant to be emitted
+ /// when the destructor is defined in another translation unit. In the Itanium
+ /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
+ /// can be emitted in separate TUs. To emit the complete variant, run a subset
+ /// of the checks performed when emitting a regular destructor.
+ void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
+ CXXDestructorDecl *Dtor);
+
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
@@ -6607,7 +6843,8 @@ public:
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
- unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
+ unsigned ActOnReenterTemplateScope(Decl *Template,
+ llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
@@ -6700,7 +6937,7 @@ public:
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
- unsigned AmbigiousBaseConvID,
+ unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
@@ -6728,7 +6965,7 @@ public:
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
- void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
+ void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
@@ -6872,6 +7109,27 @@ public:
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
+ enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
+ /// Whether and why a template name is required in this lookup.
+ class RequiredTemplateKind {
+ public:
+ /// Template name is required if TemplateKWLoc is valid.
+ RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
+ : TemplateKW(TemplateKWLoc) {}
+ /// Template name is unconditionally required.
+ RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
+
+ SourceLocation getTemplateKeywordLoc() const {
+ return TemplateKW.getValueOr(SourceLocation());
+ }
+ bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
+ bool isRequired() const { return TemplateKW != SourceLocation(); }
+ explicit operator bool() const { return isRequired(); }
+
+ private:
+ llvm::Optional<SourceLocation> TemplateKW;
+ };
+
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
@@ -6881,12 +7139,11 @@ public:
/// functions (but no function templates).
FoundFunctions,
};
- bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
- QualType ObjectType, bool EnteringContext,
- bool &MemberOfUnknownSpecialization,
- SourceLocation TemplateKWLoc = SourceLocation(),
- AssumedTemplateKind *ATK = nullptr,
- bool Disambiguation = false);
+ bool LookupTemplateName(
+ LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
+ bool EnteringContext, bool &MemberOfUnknownSpecialization,
+ RequiredTemplateKind RequiredTemplate = SourceLocation(),
+ AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
@@ -7099,7 +7356,7 @@ public:
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
- TemplateNameKind ActOnDependentTemplateName(
+ TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
@@ -7953,12 +8210,10 @@ public:
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
- FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
- FunctionTemplateDecl *FT2,
- SourceLocation Loc,
- TemplatePartialOrderingContext TPOC,
- unsigned NumCallArguments1,
- unsigned NumCallArguments2);
+ FunctionTemplateDecl *getMoreSpecializedTemplate(
+ FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
+ TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
+ unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
@@ -8100,6 +8355,12 @@ public:
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
+ /// We are initializing a structured binding.
+ InitializingStructuredBinding,
+
+ /// We are marking a class as __dllexport.
+ MarkingClassDllexported,
+
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
@@ -8602,9 +8863,17 @@ public:
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
- assert(S.PendingInstantiations.empty() &&
- "PendingInstantiations should be empty before it is discarded.");
- S.PendingInstantiations.swap(SavedPendingInstantiations);
+ if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
+ assert(S.PendingInstantiations.empty() &&
+ "PendingInstantiations should be empty before it is discarded.");
+ S.PendingInstantiations.swap(SavedPendingInstantiations);
+ } else {
+ // Template instantiations in the PCH may be delayed until the TU.
+ S.PendingInstantiations.swap(SavedPendingInstantiations);
+ S.PendingInstantiations.insert(S.PendingInstantiations.end(),
+ SavedPendingInstantiations.begin(),
+ SavedPendingInstantiations.end());
+ }
}
private:
@@ -8833,6 +9102,8 @@ public:
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
+ bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
+ ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
@@ -9275,8 +9546,8 @@ public:
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
- bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
- bool Diagnose = true);
+ bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
+ bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
@@ -9386,6 +9657,18 @@ public:
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
+ /// Are precise floating point semantics currently enabled?
+ bool isPreciseFPEnabled() {
+ return !CurFPFeatures.getAllowFPReassociate() &&
+ !CurFPFeatures.getNoSignedZero() &&
+ !CurFPFeatures.getAllowReciprocal() &&
+ !CurFPFeatures.getAllowApproxFunc();
+ }
+
+ /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
+ void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
+ PragmaFloatControlKind Value);
+
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
@@ -9422,11 +9705,21 @@ public:
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
- void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
+ void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
+
+ /// Called on well formed
+ /// \#pragma clang fp reassociate
+ void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
- void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
+ void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
+
+ /// Called to set rounding mode for floating point operations.
+ void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
+
+ /// Called to set exception behavior for floating point operations.
+ void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
@@ -9564,6 +9857,9 @@ public:
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
+ /// Check that the expression co_await promise.final_suspend() shall not be
+ /// potentially-throwing.
+ bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
@@ -9594,7 +9890,7 @@ public:
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
- CurrOpenCLExtension = Ext;
+ CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
@@ -9662,22 +9958,6 @@ private:
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
- /// Check whether we're allowed to call Callee from the current function.
- void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
- bool CheckForDelayedContext = true);
-
- /// Check whether we're allowed to call Callee from the current function.
- void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
- bool CheckCaller = true);
-
- /// Check if the expression is allowed to be used in expressions for the
- /// OpenMP devices.
- void checkOpenMPDeviceExpr(const Expr *E);
-
- /// Finishes analysis of the deferred functions calls that may be declared as
- /// host/nohost during device/host compilation.
- void finalizeOpenMPDelayedAnalysis();
-
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
@@ -9693,17 +9973,54 @@ private:
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
- /// Marks all the functions that might be required for the currently active
- /// OpenMP context.
- void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
- FunctionDecl *Func,
- bool MightBeOdrUse);
+ /// Helper to keep information about the current `omp begin/end declare
+ /// variant` nesting.
+ struct OMPDeclareVariantScope {
+ /// The associated OpenMP context selector.
+ OMPTraitInfo *TI;
+
+ /// The associated OpenMP context selector mangling.
+ std::string NameSuffix;
+
+ OMPDeclareVariantScope(OMPTraitInfo &TI);
+ };
+
+ /// The current `omp begin/end declare variant` scopes.
+ SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
+
+ /// The declarator \p D defines a function in the scope \p S which is nested
+ /// in an `omp begin/end declare variant` scope. In this method we create a
+ /// declaration for \p D and rename \p D according to the OpenMP context
+ /// selector of the surrounding scope.
+ FunctionDecl *
+ ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S,
+ Declarator &D);
+
+ /// Register \p FD as specialization of \p BaseFD in the current `omp
+ /// begin/end declare variant` scope.
+ void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
+ FunctionDecl *FD, FunctionDecl *BaseFD);
public:
- /// Struct to store the context selectors info for declare variant directive.
- using OMPCtxStringType = SmallString<8>;
- using OMPCtxSelectorData =
- OpenMPCtxSelectorData<SmallVector<OMPCtxStringType, 4>, ExprResult>;
+
+ /// Can we exit a scope at the moment.
+ bool isInOpenMPDeclareVariantScope() {
+ return !OMPDeclareVariantScopes.empty();
+ }
+
+ /// Given the potential call expression \p Call, determine if there is a
+ /// specialization via the OpenMP declare variant mechanism available. If
+ /// there is, return the specialized call expression, otherwise return the
+ /// original \p Call.
+ ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
+ SourceLocation LParenLoc, MultiExprArg ArgExprs,
+ SourceLocation RParenLoc, Expr *ExecConfig);
+
+ /// Handle a `omp begin declare variant`.
+ void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
+
+ /// Handle a `omp end declare variant`.
+ void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
@@ -9745,7 +10062,8 @@ public:
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
- bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
+ OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
+ unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
@@ -9755,7 +10073,15 @@ public:
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
- bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
+ bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
+ unsigned CaptureLevel) const;
+
+ /// Check if the specified global variable must be captured by outer capture
+ /// regions.
+ /// \param Level Relative level of nested OpenMP construct for that
+ /// the check is performed.
+ bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
+ unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
@@ -9862,6 +10188,11 @@ public:
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
+ /// Finishes analysis of the deferred functions calls that may be declared as
+ /// host/nohost during device/host compilation.
+ void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
+ const FunctionDecl *Callee,
+ SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
@@ -9979,6 +10310,14 @@ public:
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
+ /// Called on well-formed '\#pragma omp depobj'.
+ StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc);
+ /// Called on well-formed '\#pragma omp scan'.
+ StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
@@ -10158,7 +10497,8 @@ public:
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
- OpenMPLinearClauseKind LinKind, QualType Type);
+ OpenMPLinearClauseKind LinKind, QualType Type,
+ bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
@@ -10174,10 +10514,12 @@ public:
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
+ /// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
- Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(
- DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR);
+ Optional<std::pair<FunctionDecl *, Expr *>>
+ checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
+ OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
@@ -10185,11 +10527,9 @@ public:
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
- /// \param Data Set of context-specific data for the specified context
- /// selector.
+ /// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
- SourceRange SR,
- ArrayRef<OMPCtxSelectorData> Data);
+ OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
@@ -10248,6 +10588,10 @@ public:
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
+ /// Called on well-formed 'detach' clause.
+ OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
@@ -10256,7 +10600,7 @@ public:
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
- OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
+ OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
@@ -10267,6 +10611,18 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
+ /// Called on well-formed 'order' clause.
+ OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
+ SourceLocation KindLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
+ /// Called on well-formed 'update' clause.
+ OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
+ SourceLocation KindLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
@@ -10306,6 +10662,21 @@ public:
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
+ /// Called on well-formed 'acq_rel' clause.
+ OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
+ SourceLocation EndLoc);
+ /// Called on well-formed 'acquire' clause.
+ OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
+ SourceLocation EndLoc);
+ /// Called on well-formed 'release' clause.
+ OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
+ SourceLocation EndLoc);
+ /// Called on well-formed 'relaxed' clause.
+ OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
+ SourceLocation EndLoc);
+ /// Called on well-formed 'destroy' clause.
+ OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc,
+ SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
@@ -10337,13 +10708,23 @@ public:
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
- OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
+ OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
- SourceLocation DepLinMapLastLoc);
+ SourceLocation ExtraModifierLoc);
+ /// Called on well-formed 'inclusive' clause.
+ OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
+ /// Called on well-formed 'exclusive' clause.
+ OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
@@ -10371,9 +10752,10 @@ public:
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
- ArrayRef<Expr *> VarList, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
- CXXScopeSpec &ReductionIdScopeSpec,
+ ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation ModifierLoc, SourceLocation ColonLoc,
+ SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
@@ -10418,15 +10800,21 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
+ /// Called on well-formed 'depobj' pseudo clause.
+ OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
- ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
- SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
- SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation EndLoc);
+ ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
+ SourceLocation DepLoc, SourceLocation ColonLoc,
+ ArrayRef<Expr *> VarList, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
- OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
+ OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
+ Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
+ SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
@@ -10475,6 +10863,9 @@ public:
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
+ /// Called on well-formed 'use_device_addr' clause.
+ OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
@@ -10484,6 +10875,27 @@ public:
SourceLocation LParenLoc,
SourceLocation EndLoc);
+ /// Data for list of allocators.
+ struct UsesAllocatorsData {
+ /// Allocator.
+ Expr *Allocator = nullptr;
+ /// Allocator traits.
+ Expr *AllocatorTraits = nullptr;
+ /// Locations of '(' and ')' symbols.
+ SourceLocation LParenLoc, RParenLoc;
+ };
+ /// Called on well-formed 'uses_allocators' clause.
+ OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc,
+ ArrayRef<UsesAllocatorsData> Data);
+ /// Called on well-formed 'affinity' clause.
+ OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation ColonLoc,
+ SourceLocation EndLoc, Expr *Modifier,
+ ArrayRef<Expr *> Locators);
+
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
@@ -10540,9 +10952,8 @@ public:
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
- // the operand. This is DefaultFunctionArrayLvalueConversion,
- // except that it assumes the operand isn't of function or array
- // type.
+ // the operand. This function is a no-op if the operand has a function type
+ // or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
@@ -10650,6 +11061,11 @@ public:
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
+ /// IncompatibleFunctionPointer - The assignment is between two function
+ /// pointers types that are not compatible, but we accept them as an
+ /// extension.
+ IncompatibleFunctionPointer,
+
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
@@ -10879,6 +11295,13 @@ public:
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
+ /// Type checking for matrix binary operators.
+ QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ bool IsCompAssign);
+ QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, bool IsCompAssign);
+
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
@@ -11192,18 +11615,6 @@ public:
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
- /// A partial call graph maintained during CUDA/OpenMP device code compilation
- /// to support deferred diagnostics.
- ///
- /// Functions are only added here if, at the time they're considered, they are
- /// not known-emitted. As soon as we discover that a function is
- /// known-emitted, we remove it and everything it transitively calls from this
- /// set and add those functions to DeviceKnownEmittedFns.
- llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
- /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
- SourceLocation>>
- DeviceCallGraph;
-
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
@@ -11278,14 +11689,6 @@ public:
llvm::Optional<unsigned> PartialDiagId;
};
- /// Indicate that this function (and thus everything it transtively calls)
- /// will be codegen'ed, and emit any deferred diagnostics on this function and
- /// its (transitive) callees.
- void markKnownEmitted(
- Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
- SourceLocation OrigLoc,
- const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
-
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
@@ -11344,6 +11747,10 @@ public:
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
+ /// Check if the expression is allowed to be used in expressions for the
+ /// offloading devices.
+ void checkDeviceDecl(const ValueDecl *D, SourceLocation Loc);
+
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
@@ -11366,6 +11773,8 @@ public:
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
+ static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
+
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
@@ -11404,6 +11813,10 @@ public:
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
+ /// May add implicit CUDAConstantAttr attribute to VD, depending on VD
+ /// and current compilation settings.
+ void MaybeAddCUDAConstantAttr(VarDecl *VD);
+
public:
/// Check whether we're allowed to call Callee from the current context.
///
@@ -11421,12 +11834,13 @@ public:
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
+ void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
+
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
- /// CUDA lambdas declared inside __device__ or __global__ functions inherit
- /// the __device__ attribute. Similarly, lambdas inside __host__ __device__
- /// functions become __host__ __device__ themselves.
+ /// CUDA lambdas by default is host device function unless it has explicit
+ /// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
@@ -11567,7 +11981,13 @@ public:
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
- void CodeCompleteAfterIf(Scope *S);
+ /// Trigger code completion for a record of \p BaseType. \p InitExprs are
+ /// expressions in the initializer list seen so far and \p D is the current
+ /// Designation being parsed.
+ void CodeCompleteDesignator(const QualType BaseType,
+ llvm::ArrayRef<Expr *> InitExprs,
+ const Designation &D);
+ void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
@@ -11583,6 +12003,7 @@ public:
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
+ void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
@@ -11696,27 +12117,50 @@ private:
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
+
+ bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
+
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
- bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
-
- bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
+ bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
+ bool WantCDE);
+ bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
+
+ bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
+ bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
+ ArrayRef<int> ArgNums);
+ bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, int ArgNum);
+ bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
+ bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
+ ArrayRef<int> ArgNums);
+ bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
+ bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
+ bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
@@ -11752,12 +12196,23 @@ private:
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
- bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum);
- bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum);
+ bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
+ unsigned ArgBits);
+ bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
+ unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
+
+ // Matrix builtin handling.
+ ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
+ ExprResult CallResult);
+ ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
+ ExprResult CallResult);
+ ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
+ ExprResult CallResult);
+
public:
enum FormatStringType {
FST_Scanf,
@@ -12062,6 +12517,40 @@ public:
ConstructorDestructor,
BuiltinFunction
};
+ /// Creates a DeviceDiagBuilder that emits the diagnostic if the current
+ /// context is "used as device code".
+ ///
+ /// - If CurLexicalContext is a kernel function or it is known that the
+ /// function will be emitted for the device, emits the diagnostics
+ /// immediately.
+ /// - If CurLexicalContext is a function and we are compiling
+ /// for the device, but we don't know that this function will be codegen'ed
+ /// for devive yet, creates a diagnostic which is emitted if and when we
+ /// realize that the function will be codegen'ed.
+ ///
+ /// Example usage:
+ ///
+ /// Diagnose __float128 type usage only from SYCL device code if the current
+ /// target doesn't support it
+ /// if (!S.Context.getTargetInfo().hasFloat128Type() &&
+ /// S.getLangOpts().SYCLIsDevice)
+ /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
+ DeviceDiagBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
+
+ /// Check whether we're allowed to call Callee from the current context.
+ ///
+ /// - If the call is never allowed in a semantically-correct program
+ /// emits an error and returns false.
+ ///
+ /// - If the call is allowed in semantically-correct programs, but only if
+ /// it's never codegen'ed, creates a deferred diagnostic to be emitted if
+ /// and when the caller is codegen'ed, and returns true.
+ ///
+ /// - Otherwise, returns true without emitting any diagnostics.
+ ///
+ /// Adds Callee to DeviceCallGraph if we don't know if its caller will be
+ /// codegen'ed yet.
+ bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
};
/// RAII object that enters a new expression evaluation context.
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Template.h b/contrib/llvm-project/clang/include/clang/Sema/Template.h
index 47d6143bdc9a..91d175fdd050 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Template.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Template.h
@@ -42,6 +42,17 @@ class TypedefNameDecl;
class TypeSourceInfo;
class VarDecl;
+/// The kind of template substitution being performed.
+enum class TemplateSubstitutionKind : char {
+ /// We are substituting template parameters for template arguments in order
+ /// to form a template specialization.
+ Specialization,
+ /// We are substituting template parameters for (typically) other template
+ /// parameters in order to rewrite a declaration as a different declaration
+ /// (for example, when forming a deduction guide from a constructor).
+ Rewrite,
+};
+
/// Data structure that captures multiple levels of template argument
/// lists for use in template instantiation.
///
@@ -73,6 +84,9 @@ class VarDecl;
/// being substituted.
unsigned NumRetainedOuterLevels = 0;
+ /// The kind of substitution described by this argument list.
+ TemplateSubstitutionKind Kind = TemplateSubstitutionKind::Specialization;
+
public:
/// Construct an empty set of template argument lists.
MultiLevelTemplateArgumentList() = default;
@@ -83,6 +97,18 @@ class VarDecl;
addOuterTemplateArguments(&TemplateArgs);
}
+ void setKind(TemplateSubstitutionKind K) { Kind = K; }
+
+ /// Determine the kind of template substitution being performed.
+ TemplateSubstitutionKind getKind() const { return Kind; }
+
+ /// Determine whether we are rewriting template parameters rather than
+ /// substituting for them. If so, we should not leave references to the
+ /// original template parameters behind.
+ bool isRewrite() const {
+ return Kind == TemplateSubstitutionKind::Rewrite;
+ }
+
/// Determine the number of levels in this template argument
/// list.
unsigned getNumLevels() const {
@@ -95,6 +121,10 @@ class VarDecl;
return TemplateArgumentLists.size();
}
+ unsigned getNumRetainedOuterLevels() const {
+ return NumRetainedOuterLevels;
+ }
+
/// Determine how many of the \p OldDepth outermost template parameter
/// lists would be removed by substituting these arguments.
unsigned getNewDepth(unsigned OldDepth) const {
@@ -159,6 +189,9 @@ class VarDecl;
void addOuterRetainedLevel() {
++NumRetainedOuterLevels;
}
+ void addOuterRetainedLevels(unsigned Num) {
+ NumRetainedOuterLevels += Num;
+ }
/// Retrieve the innermost template argument list.
const ArgList &getInnermost() const {
@@ -422,6 +455,9 @@ class VarDecl;
NamedDecl *
getPartiallySubstitutedPack(const TemplateArgument **ExplicitArgs = nullptr,
unsigned *NumExplicitArgs = nullptr) const;
+
+ /// Determine whether D is a pack expansion created in this scope.
+ bool isLocalPackExpansion(const Decl *D);
};
class TemplateDeclInstantiator
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h b/contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h
index 44a12c875da7..c6f9f1d1a08f 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h
@@ -30,84 +30,84 @@
namespace clang {
namespace serialization {
- /// AST file major version number supported by this version of
- /// Clang.
- ///
- /// Whenever the AST file format changes in a way that makes it
- /// incompatible with previous versions (such that a reader
- /// designed for the previous version could not support reading
- /// the new version), this number should be increased.
- ///
- /// Version 4 of AST files also requires that the version control branch and
- /// revision match exactly, since there is no backward compatibility of
- /// AST files at this time.
- const unsigned VERSION_MAJOR = 8;
-
- /// AST file minor version number supported by this version of
- /// Clang.
- ///
- /// Whenever the AST format changes in a way that is still
- /// compatible with previous versions (such that a reader designed
- /// for the previous version could still support reading the new
- /// version by ignoring new kinds of subblocks), this number
- /// should be increased.
- const unsigned VERSION_MINOR = 0;
-
- /// An ID number that refers to an identifier in an AST file.
- ///
- /// The ID numbers of identifiers are consecutive (in order of discovery)
- /// and start at 1. 0 is reserved for NULL.
- using IdentifierID = uint32_t;
-
- /// An ID number that refers to a declaration in an AST file.
- ///
- /// The ID numbers of declarations are consecutive (in order of
- /// discovery), with values below NUM_PREDEF_DECL_IDS being reserved.
- /// At the start of a chain of precompiled headers, declaration ID 1 is
- /// used for the translation unit declaration.
- using DeclID = uint32_t;
-
- // FIXME: Turn these into classes so we can have some type safety when
- // we go from local ID to global and vice-versa.
- using LocalDeclID = DeclID;
- using GlobalDeclID = DeclID;
-
- /// An ID number that refers to a type in an AST file.
- ///
- /// The ID of a type is partitioned into two parts: the lower
- /// three bits are used to store the const/volatile/restrict
- /// qualifiers (as with QualType) and the upper bits provide a
- /// type index. The type index values are partitioned into two
- /// sets. The values below NUM_PREDEF_TYPE_IDs are predefined type
- /// IDs (based on the PREDEF_TYPE_*_ID constants), with 0 as a
- /// placeholder for "no type". Values from NUM_PREDEF_TYPE_IDs are
- /// other types that have serialized representations.
- using TypeID = uint32_t;
-
- /// A type index; the type ID with the qualifier bits removed.
- class TypeIdx {
- uint32_t Idx = 0;
-
- public:
- TypeIdx() = default;
- explicit TypeIdx(uint32_t index) : Idx(index) {}
-
- uint32_t getIndex() const { return Idx; }
-
- TypeID asTypeID(unsigned FastQuals) const {
- if (Idx == uint32_t(-1))
- return TypeID(-1);
-
- return (Idx << Qualifiers::FastWidth) | FastQuals;
- }
-
- static TypeIdx fromTypeID(TypeID ID) {
- if (ID == TypeID(-1))
- return TypeIdx(-1);
-
- return TypeIdx(ID >> Qualifiers::FastWidth);
- }
- };
+/// AST file major version number supported by this version of
+/// Clang.
+///
+/// Whenever the AST file format changes in a way that makes it
+/// incompatible with previous versions (such that a reader
+/// designed for the previous version could not support reading
+/// the new version), this number should be increased.
+///
+/// Version 4 of AST files also requires that the version control branch and
+/// revision match exactly, since there is no backward compatibility of
+/// AST files at this time.
+const unsigned VERSION_MAJOR = 11;
+
+/// AST file minor version number supported by this version of
+/// Clang.
+///
+/// Whenever the AST format changes in a way that is still
+/// compatible with previous versions (such that a reader designed
+/// for the previous version could still support reading the new
+/// version by ignoring new kinds of subblocks), this number
+/// should be increased.
+const unsigned VERSION_MINOR = 0;
+
+/// An ID number that refers to an identifier in an AST file.
+///
+/// The ID numbers of identifiers are consecutive (in order of discovery)
+/// and start at 1. 0 is reserved for NULL.
+using IdentifierID = uint32_t;
+
+/// An ID number that refers to a declaration in an AST file.
+///
+/// The ID numbers of declarations are consecutive (in order of
+/// discovery), with values below NUM_PREDEF_DECL_IDS being reserved.
+/// At the start of a chain of precompiled headers, declaration ID 1 is
+/// used for the translation unit declaration.
+using DeclID = uint32_t;
+
+// FIXME: Turn these into classes so we can have some type safety when
+// we go from local ID to global and vice-versa.
+using LocalDeclID = DeclID;
+using GlobalDeclID = DeclID;
+
+/// An ID number that refers to a type in an AST file.
+///
+/// The ID of a type is partitioned into two parts: the lower
+/// three bits are used to store the const/volatile/restrict
+/// qualifiers (as with QualType) and the upper bits provide a
+/// type index. The type index values are partitioned into two
+/// sets. The values below NUM_PREDEF_TYPE_IDs are predefined type
+/// IDs (based on the PREDEF_TYPE_*_ID constants), with 0 as a
+/// placeholder for "no type". Values from NUM_PREDEF_TYPE_IDs are
+/// other types that have serialized representations.
+using TypeID = uint32_t;
+
+/// A type index; the type ID with the qualifier bits removed.
+class TypeIdx {
+ uint32_t Idx = 0;
+
+public:
+ TypeIdx() = default;
+ explicit TypeIdx(uint32_t index) : Idx(index) {}
+
+ uint32_t getIndex() const { return Idx; }
+
+ TypeID asTypeID(unsigned FastQuals) const {
+ if (Idx == uint32_t(-1))
+ return TypeID(-1);
+
+ return (Idx << Qualifiers::FastWidth) | FastQuals;
+ }
+
+ static TypeIdx fromTypeID(TypeID ID) {
+ if (ID == TypeID(-1))
+ return TypeIdx(-1);
+
+ return TypeIdx(ID >> Qualifiers::FastWidth);
+ }
+};
/// A structure for putting "fast"-unqualified QualTypes into a
/// DenseMap. This uses the standard pointer hash function.
@@ -181,7 +181,7 @@ namespace serialization {
/// Raw source location of end of range.
unsigned End;
- /// Offset in the AST file.
+ /// Offset in the AST file relative to ModuleFile::MacroOffsetsBase.
uint32_t BitOffset;
PPEntityOffset(SourceRange R, uint32_t BitOffset)
@@ -216,17 +216,43 @@ namespace serialization {
}
};
- /// Source range/offset of a preprocessed entity.
+ /// Offset in the AST file. Use splitted 64-bit integer into low/high
+ /// parts to keep structure alignment 32-bit (it is important because
+ /// blobs in bitstream are 32-bit aligned). This structure is serialized
+ /// "as is" to the AST file.
+ struct UnderalignedInt64 {
+ uint32_t BitOffsetLow = 0;
+ uint32_t BitOffsetHigh = 0;
+
+ UnderalignedInt64() = default;
+ UnderalignedInt64(uint64_t BitOffset) { setBitOffset(BitOffset); }
+
+ void setBitOffset(uint64_t Offset) {
+ BitOffsetLow = Offset;
+ BitOffsetHigh = Offset >> 32;
+ }
+
+ uint64_t getBitOffset() const {
+ return BitOffsetLow | (uint64_t(BitOffsetHigh) << 32);
+ }
+ };
+
+ /// Source location and bit offset of a declaration.
struct DeclOffset {
/// Raw source location.
unsigned Loc = 0;
- /// Offset in the AST file.
- uint32_t BitOffset = 0;
+ /// Offset relative to the start of the DECLTYPES_BLOCK block. Keep
+ /// structure alignment 32-bit and avoid padding gap because undefined
+ /// value in the padding affects AST hash.
+ UnderalignedInt64 BitOffset;
DeclOffset() = default;
- DeclOffset(SourceLocation Loc, uint32_t BitOffset)
- : Loc(Loc.getRawEncoding()), BitOffset(BitOffset) {}
+ DeclOffset(SourceLocation Loc, uint64_t BitOffset,
+ uint64_t DeclTypesBlockStartOffset) {
+ setLocation(Loc);
+ setBitOffset(BitOffset, DeclTypesBlockStartOffset);
+ }
void setLocation(SourceLocation L) {
Loc = L.getRawEncoding();
@@ -235,6 +261,15 @@ namespace serialization {
SourceLocation getLocation() const {
return SourceLocation::getFromRawEncoding(Loc);
}
+
+ void setBitOffset(uint64_t Offset,
+ const uint64_t DeclTypesBlockStartOffset) {
+ BitOffset.setBitOffset(Offset - DeclTypesBlockStartOffset);
+ }
+
+ uint64_t getBitOffset(const uint64_t DeclTypesBlockStartOffset) const {
+ return BitOffset.getBitOffset() + DeclTypesBlockStartOffset;
+ }
};
/// The number of predefined preprocessed entity IDs.
@@ -362,6 +397,9 @@ namespace serialization {
/// Record code for the signature that identifiers this AST file.
SIGNATURE = 1,
+ /// Record code for the content hash of the AST block.
+ AST_BLOCK_HASH,
+
/// Record code for the diagnostic options table.
DIAGNOSTIC_OPTIONS,
@@ -650,7 +688,13 @@ namespace serialization {
PP_CONDITIONAL_STACK = 62,
/// A table of skipped ranges within the preprocessing record.
- PPD_SKIPPED_RANGES = 63
+ PPD_SKIPPED_RANGES = 63,
+
+ /// Record code for the Decls to be checked for deferred diags.
+ DECLS_TO_CHECK_FOR_DEFERRED_DIAGS = 64,
+
+ /// Record code for \#pragma float_control options.
+ FLOAT_CONTROL_PRAGMA_OPTIONS = 65,
};
/// Record types used within a source manager block.
@@ -1013,6 +1057,18 @@ namespace serialization {
/// \brief The '_Sat unsigned long _Fract' type
PREDEF_TYPE_SAT_ULONG_FRACT_ID = 69,
+ /// The placeholder type for OpenMP array shaping operation.
+ PREDEF_TYPE_OMP_ARRAY_SHAPING = 70,
+
+ /// The placeholder type for OpenMP iterator expression.
+ PREDEF_TYPE_OMP_ITERATOR = 71,
+
+ /// A placeholder type for incomplete matrix index operations.
+ PREDEF_TYPE_INCOMPLETE_MATRIX_IDX = 72,
+
+ /// \brief The '__bf16' type
+ PREDEF_TYPE_BFLOAT16_ID = 73,
+
/// OpenCL image types with auto numeration
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
PREDEF_TYPE_##Id##_ID,
@@ -1125,27 +1181,30 @@ namespace serialization {
/// The internal '__builtin_ms_va_list' typedef.
PREDEF_DECL_BUILTIN_MS_VA_LIST_ID = 11,
+ /// The predeclared '_GUID' struct.
+ PREDEF_DECL_BUILTIN_MS_GUID_ID = 12,
+
/// The extern "C" context.
- PREDEF_DECL_EXTERN_C_CONTEXT_ID = 12,
+ PREDEF_DECL_EXTERN_C_CONTEXT_ID = 13,
/// The internal '__make_integer_seq' template.
- PREDEF_DECL_MAKE_INTEGER_SEQ_ID = 13,
+ PREDEF_DECL_MAKE_INTEGER_SEQ_ID = 14,
/// The internal '__NSConstantString' typedef.
- PREDEF_DECL_CF_CONSTANT_STRING_ID = 14,
+ PREDEF_DECL_CF_CONSTANT_STRING_ID = 15,
/// The internal '__NSConstantString' tag type.
- PREDEF_DECL_CF_CONSTANT_STRING_TAG_ID = 15,
+ PREDEF_DECL_CF_CONSTANT_STRING_TAG_ID = 16,
/// The internal '__type_pack_element' template.
- PREDEF_DECL_TYPE_PACK_ELEMENT_ID = 16,
+ PREDEF_DECL_TYPE_PACK_ELEMENT_ID = 17,
};
/// The number of declaration IDs that are predefined.
///
/// For more information about predefined declarations, see the
/// \c PredefinedDeclIDs type and the PREDEF_DECL_*_ID constants.
- const unsigned int NUM_PREDEF_DECL_IDS = 17;
+ const unsigned int NUM_PREDEF_DECL_IDS = 18;
/// Record of updates for a declaration that was modified after
/// being deserialized. This can occur within DECLTYPES_BLOCK_ID.
@@ -1219,6 +1278,9 @@ namespace serialization {
/// A MSPropertyDecl record.
DECL_MS_PROPERTY,
+ /// A MSGuidDecl record.
+ DECL_MS_GUID,
+
/// A VarDecl record.
DECL_VAR,
@@ -1547,6 +1609,9 @@ namespace serialization {
/// An ArraySubscriptExpr record.
EXPR_ARRAY_SUBSCRIPT,
+ /// An MatrixSubscriptExpr record.
+ EXPR_MATRIX_SUBSCRIPT,
+
/// A CallExpr record.
EXPR_CALL,
@@ -1631,6 +1696,9 @@ namespace serialization {
/// An AtomicExpr record.
EXPR_ATOMIC,
+ /// A RecoveryExpr record.
+ EXPR_RECOVERY,
+
// Objective-C
/// An ObjCStringLiteral record.
@@ -1738,9 +1806,15 @@ namespace serialization {
/// A CXXConstCastExpr record.
EXPR_CXX_CONST_CAST,
+ /// A CXXAddrspaceCastExpr record.
+ EXPR_CXX_ADDRSPACE_CAST,
+
/// A CXXFunctionalCastExpr record.
EXPR_CXX_FUNCTIONAL_CAST,
+ /// A BuiltinBitCastExpr record.
+ EXPR_BUILTIN_BIT_CAST,
+
/// A UserDefinedLiteral record.
EXPR_USER_DEFINED_LITERAL,
@@ -1825,6 +1899,8 @@ namespace serialization {
STMT_OMP_BARRIER_DIRECTIVE,
STMT_OMP_TASKWAIT_DIRECTIVE,
STMT_OMP_FLUSH_DIRECTIVE,
+ STMT_OMP_DEPOBJ_DIRECTIVE,
+ STMT_OMP_SCAN_DIRECTIVE,
STMT_OMP_ORDERED_DIRECTIVE,
STMT_OMP_ATOMIC_DIRECTIVE,
STMT_OMP_TARGET_DIRECTIVE,
@@ -1860,6 +1936,8 @@ namespace serialization {
STMT_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE,
STMT_OMP_TARGET_TEAMS_DISTRIBUTE_SIMD_DIRECTIVE,
EXPR_OMP_ARRAY_SECTION,
+ EXPR_OMP_ARRAY_SHAPING,
+ EXPR_OMP_ITERATOR,
// ARC
EXPR_OBJC_BRIDGED_CAST, // ObjCBridgedCastExpr
@@ -1871,6 +1949,9 @@ namespace serialization {
EXPR_COAWAIT,
EXPR_COYIELD,
EXPR_DEPENDENT_COAWAIT,
+
+ // FixedPointLiteral
+ EXPR_FIXEDPOINT_LITERAL,
};
/// The kinds of designators that can occur in a
@@ -1899,6 +1980,9 @@ namespace serialization {
CTOR_INITIALIZER_INDIRECT_MEMBER
};
+ /// Kinds of cleanup objects owned by ExprWithCleanups.
+ enum CleanupObjectKind { COK_Block, COK_CompoundLiteral };
+
/// Describes the redeclarations of a declaration.
struct LocalRedeclarationsInfo {
// The ID of the first declaration
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h b/contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h
index e74bf00e0872..a80366f0ee04 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h
@@ -723,9 +723,10 @@ private:
struct PendingMacroInfo {
ModuleFile *M;
- uint64_t MacroDirectivesOffset;
+ /// Offset relative to ModuleFile::MacroOffsetsBase.
+ uint32_t MacroDirectivesOffset;
- PendingMacroInfo(ModuleFile *M, uint64_t MacroDirectivesOffset)
+ PendingMacroInfo(ModuleFile *M, uint32_t MacroDirectivesOffset)
: M(M), MacroDirectivesOffset(MacroDirectivesOffset) {}
};
@@ -856,6 +857,18 @@ private:
int PragmaMSPointersToMembersState = -1;
SourceLocation PointersToMembersPragmaLocation;
+ /// The pragma float_control state.
+ Optional<unsigned> FpPragmaCurrentValue;
+ SourceLocation FpPragmaCurrentLocation;
+ struct FpPragmaStackEntry {
+ unsigned Value;
+ SourceLocation Location;
+ SourceLocation PushLocation;
+ StringRef SlotLabel;
+ };
+ llvm::SmallVector<FpPragmaStackEntry, 2> FpPragmaStack;
+ llvm::SmallVector<std::string, 2> FpPragmaStrings;
+
/// The pragma pack state.
Optional<unsigned> PragmaPackCurrentValue;
SourceLocation PragmaPackCurrentLocation;
@@ -890,6 +903,12 @@ private:
// A list of late parsed template function data.
SmallVector<uint64_t, 1> LateParsedTemplates;
+ /// The IDs of all decls to be checked for deferred diags.
+ ///
+ /// Sema tracks these to emit deferred diags.
+ SmallVector<uint64_t, 4> DeclsToCheckForDeferredDiags;
+
+
public:
struct ImportedSubmodule {
serialization::SubmoduleID ID;
@@ -1348,7 +1367,7 @@ private:
unsigned PreviousGeneration = 0);
RecordLocation getLocalBitOffset(uint64_t GlobalOffset);
- uint64_t getGlobalBitOffset(ModuleFile &M, uint32_t LocalOffset);
+ uint64_t getGlobalBitOffset(ModuleFile &M, uint64_t LocalOffset);
/// Returns the first preprocessed entity ID that begins or ends after
/// \arg Loc.
@@ -1871,7 +1890,8 @@ public:
/// ReadBlockAbbrevs - Enter a subblock of the specified BlockID with the
/// specified cursor. Read the abbreviations that are at the top of the block
/// and then leave the cursor pointing into the block.
- static bool ReadBlockAbbrevs(llvm::BitstreamCursor &Cursor, unsigned BlockID);
+ static bool ReadBlockAbbrevs(llvm::BitstreamCursor &Cursor, unsigned BlockID,
+ uint64_t *StartOfBlockOffset = nullptr);
/// Finds all the visible declarations with a given name.
/// The current implementation of this method just loads the entire
@@ -1983,6 +2003,9 @@ public:
void ReadUnusedLocalTypedefNameCandidates(
llvm::SmallSetVector<const TypedefNameDecl *, 4> &Decls) override;
+ void ReadDeclsToCheckForDeferredDiags(
+ llvm::SmallVector<Decl *, 4> &Decls) override;
+
void ReadReferencedSelectors(
SmallVectorImpl<std::pair<Selector, SourceLocation>> &Sels) override;
@@ -2196,7 +2219,7 @@ public:
/// \param MacroDirectivesOffset Offset of the serialized macro directive
/// history.
void addPendingMacro(IdentifierInfo *II, ModuleFile *M,
- uint64_t MacroDirectivesOffset);
+ uint32_t MacroDirectivesOffset);
/// Read the set of macros defined by this external macro source.
void ReadDefinedMacros() override;
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ASTRecordReader.h b/contrib/llvm-project/clang/include/clang/Serialization/ASTRecordReader.h
index f6dc8b2b7ae2..7248e6fa6c21 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ASTRecordReader.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ASTRecordReader.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_SERIALIZATION_ASTRECORDREADER_H
#define LLVM_CLANG_SERIALIZATION_ASTRECORDREADER_H
+#include "clang/AST/ASTContext.h"
#include "clang/AST/AbstractBasicReader.h"
#include "clang/Lex/Token.h"
#include "clang/Serialization/ASTReader.h"
@@ -22,6 +23,7 @@
#include "llvm/ADT/APSInt.h"
namespace clang {
+class OMPTraitInfo;
/// An object for streaming information from a record.
class ASTRecordReader
@@ -117,7 +119,7 @@ public:
//readExceptionSpecInfo(SmallVectorImpl<QualType> &ExceptionStorage);
/// Get the global offset corresponding to a local offset.
- uint64_t getGlobalBitOffset(uint32_t LocalOffset) {
+ uint64_t getGlobalBitOffset(uint64_t LocalOffset) {
return Reader->getGlobalBitOffset(*F, LocalOffset);
}
@@ -258,6 +260,9 @@ public:
return Reader->ReadCXXTemporary(*F, Record, Idx);
}
+ /// Read an OMPTraitInfo object, advancing Idx.
+ OMPTraitInfo *readOMPTraitInfo();
+
/// Read an OpenMP clause, advancing Idx.
OMPClause *readOMPClause();
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ASTRecordWriter.h b/contrib/llvm-project/clang/include/clang/Serialization/ASTRecordWriter.h
index 43af68628ecc..491207c9de90 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ASTRecordWriter.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ASTRecordWriter.h
@@ -266,6 +266,9 @@ public:
void AddCXXDefinitionData(const CXXRecordDecl *D);
+ /// Write an OMPTraitInfo object.
+ void writeOMPTraitInfo(const OMPTraitInfo *TI);
+
void writeOMPClause(OMPClause *C);
/// Emit a string.
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h b/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h
index c0a943adf2c7..7a6664af65d8 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h
@@ -27,6 +27,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -61,6 +62,7 @@ class CXXRecordDecl;
class CXXTemporary;
class FileEntry;
class FPOptions;
+class FPOptionsOverride;
class FunctionDecl;
class HeaderSearch;
class HeaderSearchOptions;
@@ -137,6 +139,12 @@ private:
/// The module we're currently writing, if any.
Module *WritingModule = nullptr;
+ /// The offset of the first bit inside the AST_BLOCK.
+ uint64_t ASTBlockStartOffset = 0;
+
+ /// The range representing all the AST_BLOCK.
+ std::pair<uint64_t, uint64_t> ASTBlockRange;
+
/// The base directory for any relative paths we emit.
std::string BaseDirectory;
@@ -206,6 +214,10 @@ private:
/// the declaration's ID.
std::vector<serialization::DeclOffset> DeclOffsets;
+ /// The offset of the DECLTYPES_BLOCK. The offsets in DeclOffsets
+ /// are relative to this value.
+ uint64_t DeclTypesBlockStartOffset = 0;
+
/// Sorted (by file offset) vector of pairs of file offset/DeclID.
using LocDeclIDsTy =
SmallVector<std::pair<unsigned, serialization::DeclID>, 64>;
@@ -216,7 +228,8 @@ private:
/// indicates the index that this particular vector has in the global one.
unsigned FirstDeclIndex;
};
- using FileDeclIDsTy = llvm::DenseMap<FileID, DeclIDInFileInfo *>;
+ using FileDeclIDsTy =
+ llvm::DenseMap<FileID, std::unique_ptr<DeclIDInFileInfo>>;
/// Map from file SLocEntries to info about the file-level declarations
/// that it contains.
@@ -243,7 +256,7 @@ private:
/// Offset of each type in the bitstream, indexed by
/// the type's ID.
- std::vector<uint32_t> TypeOffsets;
+ std::vector<serialization::UnderalignedInt64> TypeOffsets;
/// The first ID number we can use for our own identifiers.
serialization::IdentID FirstIdentID = serialization::NUM_PREDEF_IDENT_IDS;
@@ -277,7 +290,8 @@ private:
/// The macro infos to emit.
std::vector<MacroInfoToEmitData> MacroInfosToEmit;
- llvm::DenseMap<const IdentifierInfo *, uint64_t> IdentMacroDirectivesOffsetMap;
+ llvm::DenseMap<const IdentifierInfo *, uint32_t>
+ IdentMacroDirectivesOffsetMap;
/// @name FlushStmt Caches
/// @{
@@ -439,7 +453,7 @@ private:
/// A list of the module file extension writers.
std::vector<std::unique_ptr<ModuleFileExtensionWriter>>
- ModuleFileExtensionWriters;
+ ModuleFileExtensionWriters;
/// Retrieve or create a submodule ID for this module.
unsigned getSubmoduleID(Module *Mod);
@@ -456,7 +470,8 @@ private:
ASTContext &Context);
/// Calculate hash of the pcm content.
- static ASTFileSignature createSignature(StringRef Bytes);
+ static std::pair<ASTFileSignature, ASTFileSignature>
+ createSignature(StringRef AllBytes, StringRef ASTBlockBytes);
void WriteInputFiles(SourceManager &SourceMgr, HeaderSearchOptions &HSOpts,
bool Modules);
@@ -464,7 +479,8 @@ private:
const Preprocessor &PP);
void WritePreprocessor(const Preprocessor &PP, bool IsModule);
void WriteHeaderSearch(const HeaderSearch &HS);
- void WritePreprocessorDetail(PreprocessingRecord &PPRec);
+ void WritePreprocessorDetail(PreprocessingRecord &PPRec,
+ uint64_t MacroOffsetsBase);
void WriteSubmodules(Module *WritingModule);
void WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag,
@@ -491,7 +507,7 @@ private:
bool IsModule);
void WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord);
void WriteDeclContextVisibleUpdate(const DeclContext *DC);
- void WriteFPPragmaOptions(const FPOptions &Opts);
+ void WriteFPPragmaOptions(const FPOptionsOverride &Opts);
void WriteOpenCLExtensions(Sema &SemaRef);
void WriteOpenCLExtensionTypes(Sema &SemaRef);
void WriteOpenCLExtensionDecls(Sema &SemaRef);
@@ -502,6 +518,7 @@ private:
void WriteMSStructPragmaOptions(Sema &SemaRef);
void WriteMSPointersToMembersPragmaOptions(Sema &SemaRef);
void WritePackPragmaOptions(Sema &SemaRef);
+ void WriteFloatControlPragmaOptions(Sema &SemaRef);
void WriteModuleFileExtension(Sema &SemaRef,
ModuleFileExtensionWriter &Writer);
@@ -588,7 +605,7 @@ public:
/// Determine the ID of an already-emitted macro.
serialization::MacroID getMacroID(MacroInfo *MI);
- uint64_t getMacroDirectivesOffset(const IdentifierInfo *Name);
+ uint32_t getMacroDirectivesOffset(const IdentifierInfo *Name);
/// Emit a reference to a type.
void AddTypeRef(QualType T, RecordDataImpl &Record);
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ModuleFile.h b/contrib/llvm-project/clang/include/clang/Serialization/ModuleFile.h
index 8f3eb0220637..cec29da69372 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ModuleFile.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ModuleFile.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_SERIALIZATION_MODULEFILE_H
#define LLVM_CLANG_SERIALIZATION_MODULEFILE_H
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Serialization/ASTBitCodes.h"
@@ -34,8 +35,6 @@
namespace clang {
-class FileEntry;
-
namespace serialization {
/// Specifies the kind of module that has been loaded.
@@ -169,6 +168,10 @@ public:
/// and modification time to identify this particular file.
ASTFileSignature Signature;
+ /// The signature of the AST block of the module file, this can be used to
+ /// unique module files based on AST contents.
+ ASTFileSignature ASTBlockHash;
+
/// Whether this module has been directly imported by the
/// user.
bool DirectlyImported = false;
@@ -186,6 +189,9 @@ public:
/// The global bit offset (or base) of this module
uint64_t GlobalBitOffset = 0;
+ /// The bit offset of the AST block of this module.
+ uint64_t ASTBlockStartOffset = 0;
+
/// The serialized bitstream data for this file.
StringRef Data;
@@ -243,6 +249,9 @@ public:
/// Cursor used to read source location entries.
llvm::BitstreamCursor SLocEntryCursor;
+ /// The bit offset to the start of the SOURCE_MANAGER_BLOCK.
+ uint64_t SourceManagerBlockStartOffset = 0;
+
/// The number of source location entries in this AST file.
unsigned LocalNumSLocEntries = 0;
@@ -252,6 +261,10 @@ public:
/// The base offset in the source manager's view of this module.
unsigned SLocEntryBaseOffset = 0;
+ /// Base file offset for the offsets in SLocEntryOffsets. Real file offset
+ /// for the entry is SLocEntryOffsetsBase + SLocEntryOffsets[i].
+ uint64_t SLocEntryOffsetsBase = 0;
+
/// Offsets for all of the source location entries in the
/// AST file.
const uint32_t *SLocEntryOffsets = nullptr;
@@ -303,6 +316,10 @@ public:
/// The number of macros in this AST file.
unsigned LocalNumMacros = 0;
+ /// Base file offset for the offsets in MacroOffsets. Real file offset for
+ /// the entry is MacroOffsetsBase + MacroOffsets[i].
+ uint64_t MacroOffsetsBase = 0;
+
/// Offsets of macros in the preprocessor block.
///
/// This array is indexed by the macro ID (-1), and provides
@@ -402,11 +419,14 @@ public:
// === Declarations ===
- /// DeclsCursor - This is a cursor to the start of the DECLS_BLOCK block. It
- /// has read all the abbreviations at the start of the block and is ready to
- /// jump around with these in context.
+ /// DeclsCursor - This is a cursor to the start of the DECLTYPES_BLOCK block.
+ /// It has read all the abbreviations at the start of the block and is ready
+ /// to jump around with these in context.
llvm::BitstreamCursor DeclsCursor;
+ /// The offset to the start of the DECLTYPES_BLOCK block.
+ uint64_t DeclsBlockStartOffset = 0;
+
/// The number of declarations in this AST file.
unsigned LocalNumDecls = 0;
@@ -451,7 +471,7 @@ public:
/// Offset of each type within the bitstream, indexed by the
/// type ID, or the representation of a Type*.
- const uint32_t *TypeOffsets = nullptr;
+ const UnderalignedInt64 *TypeOffsets = nullptr;
/// Base type ID for types local to this module as represented in
/// the global type ID space.
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/TypeBitCodes.def b/contrib/llvm-project/clang/include/clang/Serialization/TypeBitCodes.def
index 38c73ccb7daf..e92e05810648 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/TypeBitCodes.def
+++ b/contrib/llvm-project/clang/include/clang/Serialization/TypeBitCodes.def
@@ -58,5 +58,9 @@ TYPE_BIT_CODE(DependentSizedExtVector, DEPENDENT_SIZED_EXT_VECTOR, 46)
TYPE_BIT_CODE(DependentAddressSpace, DEPENDENT_ADDRESS_SPACE, 47)
TYPE_BIT_CODE(DependentVector, DEPENDENT_SIZED_VECTOR, 48)
TYPE_BIT_CODE(MacroQualified, MACRO_QUALIFIED, 49)
+TYPE_BIT_CODE(ExtInt, EXT_INT, 50)
+TYPE_BIT_CODE(DependentExtInt, DEPENDENT_EXT_INT, 51)
+TYPE_BIT_CODE(ConstantMatrix, CONSTANT_MATRIX, 52)
+TYPE_BIT_CODE(DependentSizedMatrix, DEPENDENT_SIZE_MATRIX, 53)
#undef TYPE_BIT_CODE
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h
index c7732333d9ba..e2be957821b9 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h
@@ -28,7 +28,7 @@ class CheckerRegistry;
#define GET_CHECKERS
#define CHECKER(FULLNAME, CLASS, HELPTEXT, DOC_URI, IS_HIDDEN) \
void register##CLASS(CheckerManager &mgr); \
- bool shouldRegister##CLASS(const LangOptions &LO);
+ bool shouldRegister##CLASS(const CheckerManager &mgr);
#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
#undef CHECKER
#undef GET_CHECKERS
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td
index 6625d79559f5..98d26aaa637d 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td
@@ -112,6 +112,8 @@ class Checker<string name = ""> {
list<CmdLineOption> CheckerOptions;
// This field is optional.
list<Checker> Dependencies;
+ // This field is optional.
+ list<Checker> WeakDependencies;
bits<2> Documentation;
Package ParentPackage;
bit Hidden = 0;
@@ -122,8 +124,13 @@ class CheckerOptions<list<CmdLineOption> opts> {
list<CmdLineOption> CheckerOptions = opts;
}
-/// Describes dependencies in between checkers. For example, InnerPointerChecker
-/// relies on information MallocBase gathers.
+/// Describes (strong) dependencies in between checkers. This is important for
+/// modeling checkers, for example, MallocBase depends on the proper modeling of
+/// string operations, so it depends on CStringBase. A checker may only be
+/// enabled if none of its dependencies (transitively) is disabled. Dependencies
+/// are always registered before the dependent checker, and its checker
+/// callbacks are also evaluated sooner.
+/// One may only depend on a purely modeling checker (that emits no diagnostis).
/// Example:
/// def InnerPointerChecker : Checker<"InnerPointer">,
/// HelpText<"Check for inner pointers of C++ containers used after "
@@ -133,6 +140,24 @@ class Dependencies<list<Checker> Deps = []> {
list<Checker> Dependencies = Deps;
}
+/// Describes preferred registration and evaluation order in between checkers.
+/// Unlike strong dependencies, this expresses dependencies in between
+/// diagnostics, and *not* modeling. In the case of an unsatisfied (disabled)
+/// weak dependency, the dependent checker might still be registered. If the
+/// weak dependency is satisfied, it'll be registered, and its checker
+/// callbacks will be evaluated before the dependent checker. This can be used
+/// to ensure that a more specific warning would be displayed in place of a
+/// generic one, should multiple checkers detect the same bug. For example,
+/// non-null parameter bugs are detected by NonNullParamChecker due to the
+/// nonnull attribute, and StdLibraryFunctionsChecker as it models standard
+/// functions, and the former is the more specific one. While freeing a
+/// dangling pointer is a bug, if it is also a double free, we would like to
+/// recognize it as such first and foremost. This works best for fatal error
+/// node generation, otherwise both warnings may be present and in any order.
+class WeakDependencies<list<Checker> Deps = []> {
+ list<Checker> WeakDependencies = Deps;
+}
+
/// Marks a checker or a package hidden. Hidden entries are meant for developers
/// only, and aren't exposed to end users.
class Hidden { bit Hidden = 1; }
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td
index d235273cda41..cbd925400328 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td
@@ -71,6 +71,9 @@ def InsecureAPI : Package<"insecureAPI">, ParentPackage<Security>;
def SecurityAlpha : Package<"security">, ParentPackage<Alpha>;
def Taint : Package<"taint">, ParentPackage<SecurityAlpha>;
+def CERT : Package<"cert">, ParentPackage<SecurityAlpha>;
+def POS : Package<"pos">, ParentPackage<CERT>;
+
def Unix : Package<"unix">;
def UnixAlpha : Package<"unix">, ParentPackage<Alpha>;
def CString : Package<"cstring">, ParentPackage<Unix>;
@@ -99,6 +102,8 @@ def LLVMAlpha : Package<"llvm">, ParentPackage<Alpha>;
// any diagnostics. These checkers are always turned on; this package is
// intended for API modeling that is not controlled by the target triple.
def APIModeling : Package<"apiModeling">, Hidden;
+def APIModelingAlpha : Package<"apiModeling">, ParentPackage<Alpha>, Hidden;
+
def GoogleAPIModeling : Package<"google">, ParentPackage<APIModeling>, Hidden;
def LLVMAPIModeling : Package<"llvm">, ParentPackage<APIModeling>, Hidden;
@@ -109,6 +114,10 @@ def CloneDetectionAlpha : Package<"clone">, ParentPackage<Alpha>;
def NonDeterminismAlpha : Package<"nondeterminism">, ParentPackage<Alpha>;
def Fuchsia : Package<"fuchsia">;
+def FuchsiaAlpha : Package<"fuchsia">, ParentPackage<Alpha>;
+
+def WebKit : Package<"webkit">;
+def WebKitAlpha : Package<"webkit">, ParentPackage<Alpha>;
//===----------------------------------------------------------------------===//
// Core Checkers.
@@ -116,14 +125,71 @@ def Fuchsia : Package<"fuchsia">;
let ParentPackage = Core in {
-def DereferenceChecker : Checker<"NullDereference">,
- HelpText<"Check for dereferences of null pointers">,
- Documentation<HasDocumentation>;
+def CallAndMessageModeling : Checker<"CallAndMessageModeling">,
+ HelpText<"Responsible for essential modeling and assumptions after a "
+ "function/method call. For instance, if we can't reason about the "
+ "nullability of the implicit this parameter after a method call, "
+ "this checker conservatively assumes it to be non-null">,
+ Documentation<HasDocumentation>,
+ Hidden;
def CallAndMessageChecker : Checker<"CallAndMessage">,
HelpText<"Check for logical errors for function calls and Objective-C "
"message expressions (e.g., uninitialized arguments, null function "
"pointers)">,
+ CheckerOptions<[
+ CmdLineOption<Boolean,
+ "FunctionPointer",
+ "Check whether a called function pointer is null or "
+ "undefined",
+ "true",
+ Released>,
+ CmdLineOption<Boolean,
+ "ParameterCount",
+ "Check whether a function was called with the appropriate "
+ "number of arguments",
+ "true",
+ Released>,
+ CmdLineOption<Boolean,
+ "CXXThisMethodCall",
+ "Check whether the implicit this parameter is null or "
+ "undefined upon a method call",
+ "true",
+ Released>,
+ CmdLineOption<Boolean,
+ "CXXDeallocationArg",
+ "Check whether the argument of operator delete is undefined",
+ "true",
+ Released>,
+ CmdLineOption<Boolean,
+ "ArgInitializedness",
+ "Check whether any of the pass-by-value parameters is "
+ "undefined",
+ "true",
+ Released>,
+ CmdLineOption<Boolean,
+ "ArgPointeeInitializedness",
+ "Check whether the pointee of a pass-by-reference or "
+ "pass-by-pointer is undefined",
+ "false",
+ InAlpha>,
+ CmdLineOption<Boolean,
+ "NilReceiver",
+ "Check whether the reciever in the message expression is nil",
+ "true",
+ Released>,
+ CmdLineOption<Boolean,
+ "UndefReceiver",
+ "Check whether the reciever in the message expression is "
+ "undefined",
+ "true",
+ Released>,
+ ]>,
+ Documentation<HasDocumentation>,
+ Dependencies<[CallAndMessageModeling]>;
+
+def DereferenceChecker : Checker<"NullDereference">,
+ HelpText<"Check for dereferences of null pointers">,
Documentation<HasDocumentation>;
def NonNullParamChecker : Checker<"NonNullParamChecker">,
@@ -155,7 +221,8 @@ def StackAddrEscapeChecker : Checker<"StackAddressEscape">,
def DynamicTypePropagation : Checker<"DynamicTypePropagation">,
HelpText<"Generate dynamic type information">,
- Documentation<NotDocumented>;
+ Documentation<NotDocumented>,
+ Hidden;
def NonnullGlobalConstantsChecker: Checker<"NonnilStringConstants">,
HelpText<"Assume that const string-like globals are non-null">,
@@ -205,13 +272,6 @@ def SizeofPointerChecker : Checker<"SizeofPtr">,
HelpText<"Warn about unintended use of sizeof() on pointer expressions">,
Documentation<HasAlphaDocumentation>;
-def CallAndMessageUnInitRefArg : Checker<"CallAndMessageUnInitRefArg">,
- HelpText<"Check for logical errors for function calls and Objective-C "
- "message expressions (e.g., uninitialized arguments, null function "
- "pointers, and pointer to undefined variables)">,
- Dependencies<[CallAndMessageChecker]>,
- Documentation<HasAlphaDocumentation>;
-
def TestAfterDivZeroChecker : Checker<"TestAfterDivZero">,
HelpText<"Check for division by variable that is later compared against 0. "
"Either the comparison is useless or there is division by zero.">,
@@ -227,6 +287,16 @@ def StackAddrAsyncEscapeChecker : Checker<"StackAddressAsyncEscape">,
Dependencies<[StackAddrEscapeBase]>,
Documentation<HasAlphaDocumentation>;
+def PthreadLockBase : Checker<"PthreadLockBase">,
+ HelpText<"Helper registering multiple checks.">,
+ Documentation<NotDocumented>,
+ Hidden;
+
+def C11LockChecker : Checker<"C11Lock">,
+ HelpText<"Simple lock -> unlock checker">,
+ Dependencies<[PthreadLockBase]>,
+ Documentation<HasAlphaDocumentation>;
+
} // end "alpha.core"
//===----------------------------------------------------------------------===//
@@ -279,7 +349,24 @@ let ParentPackage = APIModeling in {
def StdCLibraryFunctionsChecker : Checker<"StdCLibraryFunctions">,
HelpText<"Improve modeling of the C standard library functions">,
- Documentation<NotDocumented>;
+ Dependencies<[CallAndMessageModeling]>,
+ CheckerOptions<[
+ CmdLineOption<Boolean,
+ "DisplayLoadedSummaries",
+ "If set to true, the checker displays the found summaries "
+ "for the given translation unit.",
+ "false",
+ Released,
+ Hide>,
+ CmdLineOption<Boolean,
+ "ModelPOSIX",
+ "If set to true, the checker models functions from the "
+ "POSIX standard.",
+ "false",
+ InAlpha>
+ ]>,
+ Documentation<NotDocumented>,
+ Hidden;
def TrustNonnullChecker : Checker<"TrustNonnull">,
HelpText<"Trust that returns from framework methods annotated with _Nonnull "
@@ -431,6 +518,7 @@ def ChrootChecker : Checker<"Chroot">,
def PthreadLockChecker : Checker<"PthreadLock">,
HelpText<"Simple lock -> unlock checker">,
+ Dependencies<[PthreadLockBase]>,
Documentation<HasAlphaDocumentation>;
def StreamChecker : Checker<"Stream">,
@@ -445,6 +533,14 @@ def BlockInCriticalSectionChecker : Checker<"BlockInCriticalSection">,
HelpText<"Check for calls to blocking functions inside a critical section">,
Documentation<HasAlphaDocumentation>;
+def StdCLibraryFunctionArgsChecker : Checker<"StdCLibraryFunctionArgs">,
+ HelpText<"Check constraints of arguments of C standard library functions, "
+ "such as whether the parameter of isalpha is in the range [0, 255] "
+ "or is EOF.">,
+ Dependencies<[StdCLibraryFunctionsChecker]>,
+ WeakDependencies<[NonNullParamChecker]>,
+ Documentation<NotDocumented>;
+
} // end "alpha.unix"
//===----------------------------------------------------------------------===//
@@ -467,7 +563,13 @@ def NewDeleteChecker : Checker<"NewDelete">,
def NewDeleteLeaksChecker : Checker<"NewDeleteLeaks">,
HelpText<"Check for memory leaks. Traces memory managed by new/delete.">,
- Dependencies<[NewDeleteChecker]>,
+ Dependencies<[DynamicMemoryModeling]>,
+ Documentation<HasDocumentation>;
+
+def PlacementNewChecker : Checker<"PlacementNew">,
+ HelpText<"Check if default placement new is provided with pointers to "
+ "sufficient storage capacity">,
+ Dependencies<[DynamicMemoryModeling]>,
Documentation<HasDocumentation>;
def CXXSelfAssignmentChecker : Checker<"SelfAssignment">,
@@ -475,9 +577,17 @@ def CXXSelfAssignmentChecker : Checker<"SelfAssignment">,
Documentation<NotDocumented>,
Hidden;
-def SmartPtrModeling: Checker<"SmartPtr">,
+def SmartPtrModeling: Checker<"SmartPtrModeling">,
HelpText<"Model behavior of C++ smart pointers">,
Documentation<NotDocumented>,
+ CheckerOptions<[
+ CmdLineOption<Boolean,
+ "ModelSmartPtrDereference",
+ "Enable modeling for SmartPtr null dereferences",
+ "false",
+ InAlpha,
+ Hide>,
+ ]>,
Hidden;
def MoveChecker: Checker<"Move">,
@@ -585,6 +695,11 @@ def VirtualCallChecker : Checker<"VirtualCall">,
let ParentPackage = CplusplusAlpha in {
+def ContainerModeling : Checker<"ContainerModeling">,
+ HelpText<"Models C++ containers">,
+ Documentation<NotDocumented>,
+ Hidden;
+
def DeleteWithNonVirtualDtorChecker : Checker<"DeleteWithNonVirtualDtor">,
HelpText<"Reports destructions of polymorphic objects with a non-virtual "
"destructor in their base class">,
@@ -596,9 +711,23 @@ def EnumCastOutOfRangeChecker : Checker<"EnumCastOutOfRange">,
def IteratorModeling : Checker<"IteratorModeling">,
HelpText<"Models iterators of C++ containers">,
+ Dependencies<[ContainerModeling]>,
Documentation<NotDocumented>,
Hidden;
+def STLAlgorithmModeling : Checker<"STLAlgorithmModeling">,
+ HelpText<"Models the algorithm library of the C++ STL.">,
+ CheckerOptions<[
+ CmdLineOption<Boolean,
+ "AggressiveStdFindModeling",
+ "Enables exploration of the failure branch in std::find-like "
+ "functions.",
+ "false",
+ Released>
+ ]>,
+ Dependencies<[ContainerModeling]>,
+ Documentation<NotDocumented>;
+
def InvalidatedIteratorChecker : Checker<"InvalidatedIterator">,
HelpText<"Check for use of invalidated iterators">,
Dependencies<[IteratorModeling]>,
@@ -615,11 +744,10 @@ def MismatchedIteratorChecker : Checker<"MismatchedIterator">,
Dependencies<[IteratorModeling]>,
Documentation<HasAlphaDocumentation>;
-def PlacementNewChecker : Checker<"PlacementNew">,
- HelpText<"Check if default placement new is provided with pointers to "
- "sufficient storage capacity">,
- Dependencies<[NewDeleteChecker]>,
- Documentation<HasDocumentation>;
+def SmartPtrChecker: Checker<"SmartPtr">,
+ HelpText<"Find the dereference of null SmrtPtr">,
+ Dependencies<[SmartPtrModeling]>,
+ Documentation<HasAlphaDocumentation>;
} // end: "alpha.cplusplus"
@@ -798,6 +926,15 @@ def FloatLoopCounter : Checker<"FloatLoopCounter">,
} // end "security"
+let ParentPackage = POS in {
+
+ def PutenvWithAuto : Checker<"34c">,
+ HelpText<"Finds calls to the 'putenv' function which pass a pointer to "
+ "an automatic variable as the argument.">,
+ Documentation<HasDocumentation>;
+
+} // end "alpha.cert.pos"
+
let ParentPackage = SecurityAlpha in {
def ArrayBoundChecker : Checker<"ArrayBound">,
@@ -978,15 +1115,6 @@ def RetainCountChecker : Checker<"RetainCount">,
HelpText<"Check for leaks and improper reference count management">,
CheckerOptions<[
CmdLineOption<Boolean,
- "CheckOSObject",
- "Find violations of retain-release rules applied to XNU "
- "OSObject instances. By default, the checker only checks "
- "retain-release rules for Objective-C NSObject instances "
- "and CoreFoundation objects.",
- "true",
- InAlpha,
- Hide>,
- CmdLineOption<Boolean,
"TrackNSCFStartParam",
"Check not only that the code follows retain-release rules "
"with respect to objects it allocates or borrows from "
@@ -1059,13 +1187,15 @@ def MissingInvalidationMethod : Checker<"MissingInvalidationMethod">,
def DirectIvarAssignment : Checker<"DirectIvarAssignment">,
HelpText<"Check for direct assignments to instance variables">,
- Documentation<HasAlphaDocumentation>;
-
-def DirectIvarAssignmentForAnnotatedFunctions :
- Checker<"DirectIvarAssignmentForAnnotatedFunctions">,
- HelpText<"Check for direct assignments to instance variables in the methods "
- "annotated with objc_no_direct_instance_variable_assignment">,
- Dependencies<[DirectIvarAssignment]>,
+ CheckerOptions<[
+ CmdLineOption<Boolean,
+ "AnnotatedFunctions",
+ "Check for direct assignments to instance variables in the "
+ "methods annotated with "
+ "objc_no_direct_instance_variable_assignment",
+ "false",
+ InAlpha>
+ ]>,
Documentation<HasAlphaDocumentation>;
} // end "alpha.osx.cocoa"
@@ -1226,6 +1356,30 @@ def AnalysisOrderChecker : Checker<"AnalysisOrder">,
Released,
Hide>,
CmdLineOption<Boolean,
+ "PreStmtCXXDeleteExpr",
+ "",
+ "false",
+ Released,
+ Hide>,
+ CmdLineOption<Boolean,
+ "PostStmtCXXDeleteExpr",
+ "",
+ "false",
+ Released,
+ Hide>,
+ CmdLineOption<Boolean,
+ "PreStmtCXXConstructExpr",
+ "",
+ "false",
+ Released,
+ Hide>,
+ CmdLineOption<Boolean,
+ "PostStmtCXXConstructExpr",
+ "",
+ "false",
+ Released,
+ Hide>,
+ CmdLineOption<Boolean,
"PreStmtOffsetOfExpr",
"",
"false",
@@ -1238,6 +1392,12 @@ def AnalysisOrderChecker : Checker<"AnalysisOrder">,
Released,
Hide>,
CmdLineOption<Boolean,
+ "EvalCall",
+ "",
+ "false",
+ Released,
+ Hide>,
+ CmdLineOption<Boolean,
"PreCall",
"",
"false",
@@ -1256,6 +1416,12 @@ def AnalysisOrderChecker : Checker<"AnalysisOrder">,
Released,
Hide>,
CmdLineOption<Boolean,
+ "EndAnalysis",
+ "",
+ "false",
+ Released,
+ Hide>,
+ CmdLineOption<Boolean,
"NewAllocator",
"",
"false",
@@ -1350,6 +1516,16 @@ def TaintTesterChecker : Checker<"TaintTest">,
HelpText<"Mark tainted symbols as such.">,
Documentation<NotDocumented>;
+// This checker *technically* depends on SteamChecker, but we don't allow
+// dependency checkers to emit diagnostics, and a debug checker isn't worth
+// the chore needed to create a modeling portion on its own. Since this checker
+// is for development purposes only anyways, make sure that StreamChecker is
+// also enabled, at least for the time being.
+def StreamTesterChecker : Checker<"StreamTester">,
+ HelpText<"Add test functions to StreamChecker for test and debugging "
+ "purposes.">,
+ Documentation<NotDocumented>;
+
def ExprInspectionChecker : Checker<"ExprInspection">,
HelpText<"Check the analyzer's understanding of expressions">,
Documentation<NotDocumented>;
@@ -1362,9 +1538,20 @@ def ReportStmts : Checker<"ReportStmts">,
HelpText<"Emits a warning for every statement.">,
Documentation<NotDocumented>;
+def DebugContainerModeling : Checker<"DebugContainerModeling">,
+ HelpText<"Check the analyzer's understanding of C++ containers">,
+ Dependencies<[ContainerModeling]>,
+ Documentation<NotDocumented>;
+
def DebugIteratorModeling : Checker<"DebugIteratorModeling">,
HelpText<"Check the analyzer's understanding of C++ iterators">,
- Dependencies<[IteratorModeling]>,
+ Dependencies<[DebugContainerModeling, IteratorModeling]>,
+ Documentation<NotDocumented>;
+
+def StdCLibraryFunctionsTesterChecker : Checker<"StdCLibraryFunctionsTester">,
+ HelpText<"Add test functions to the summary map, so testing of individual "
+ "summary constituents becomes possible.">,
+ Dependencies<[StdCLibraryFunctionsChecker]>,
Documentation<NotDocumented>;
} // end "debug"
@@ -1442,5 +1629,37 @@ def FuchsiaHandleChecker : Checker<"HandleChecker">,
HelpText<"A Checker that detect leaks related to Fuchsia handles">,
Documentation<HasDocumentation>;
+}
+
+let ParentPackage = FuchsiaAlpha in {
+
+def FuchsiaLockChecker : Checker<"Lock">,
+ HelpText<"Check for the correct usage of locking APIs.">,
+ Dependencies<[PthreadLockBase]>,
+ Documentation<HasDocumentation>;
+
} // end fuchsia
+//===----------------------------------------------------------------------===//
+// WebKit checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = WebKit in {
+
+def RefCntblBaseVirtualDtorChecker : Checker<"RefCntblBaseVirtualDtor">,
+ HelpText<"Check for any ref-countable base class having virtual destructor.">,
+ Documentation<HasDocumentation>;
+
+def NoUncountedMemberChecker : Checker<"NoUncountedMemberChecker">,
+ HelpText<"Check for no uncounted member variables.">,
+ Documentation<HasDocumentation>;
+
+} // end webkit
+
+let ParentPackage = WebKitAlpha in {
+
+def UncountedCallArgsChecker : Checker<"UncountedCallArgsChecker">,
+ HelpText<"Check uncounted call arguments.">,
+ Documentation<HasDocumentation>;
+
+} // end alpha.webkit
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h
index f7bd5b58aab5..0f33909daec0 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h
@@ -18,6 +18,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValVisitor.h"
+#include "llvm/ADT/StringExtras.h"
namespace clang {
@@ -179,7 +180,7 @@ public:
return OS.str();
}
- std::string VisitVarRegion(const VarRegion *R) {
+ std::string VisitNonParamVarRegion(const NonParamVarRegion *R) {
const VarDecl *VD = R->getDecl();
std::string Name = VD->getQualifiedNameAsString();
if (isa<ParmVarDecl>(VD))
@@ -216,6 +217,39 @@ public:
"' inside " + Visit(R->getSuperRegion());
}
+ std::string VisitParamVarRegion(const ParamVarRegion *R) {
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+
+ const ParmVarDecl *PVD = R->getDecl();
+ std::string Name = PVD->getQualifiedNameAsString();
+ if (!Name.empty()) {
+ OS << "parameter '" << Name << "'";
+ return std::string(OS.str());
+ }
+
+ unsigned Index = R->getIndex() + 1;
+ OS << Index << llvm::getOrdinalSuffix(Index) << " parameter of ";
+ const Decl *Parent = R->getStackFrame()->getDecl();
+ if (const auto *FD = dyn_cast<FunctionDecl>(Parent))
+ OS << "function '" << FD->getQualifiedNameAsString() << "()'";
+ else if (const auto *CD = dyn_cast<CXXConstructorDecl>(Parent))
+ OS << "C++ constructor '" << CD->getQualifiedNameAsString() << "()'";
+ else if (const auto *MD = dyn_cast<ObjCMethodDecl>(Parent)) {
+ if (MD->isClassMethod())
+ OS << "Objective-C method '+" << MD->getQualifiedNameAsString() << "'";
+ else
+ OS << "Objective-C method '-" << MD->getQualifiedNameAsString() << "'";
+ } else if (isa<BlockDecl>(Parent)) {
+ if (cast<BlockDecl>(Parent)->isConversionFromLambda())
+ OS << "lambda";
+ else
+ OS << "block";
+ }
+
+ return std::string(OS.str());
+ }
+
std::string VisitSVal(SVal V) {
std::string Str;
llvm::raw_string_ostream OS(Str);
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Analyses.def b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Analyses.def
index 377451576148..c4e5f5be6fd7 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Analyses.def
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Analyses.def
@@ -14,41 +14,80 @@
#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN)
#endif
-ANALYSIS_STORE(RegionStore, "region", "Use region-based analyzer store", CreateRegionStoreManager)
+ANALYSIS_STORE(RegionStore, "region", "Use region-based analyzer store",
+ CreateRegionStoreManager)
#ifndef ANALYSIS_CONSTRAINTS
#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATFN)
#endif
-ANALYSIS_CONSTRAINTS(RangeConstraints, "range", "Use constraint tracking of concrete value ranges", CreateRangeConstraintManager)
-ANALYSIS_CONSTRAINTS(Z3Constraints, "z3", "Use Z3 contraint solver", CreateZ3ConstraintManager)
+ANALYSIS_CONSTRAINTS(RangeConstraints, "range",
+ "Use constraint tracking of concrete value ranges",
+ CreateRangeConstraintManager)
+
+ANALYSIS_CONSTRAINTS(Z3Constraints, "z3", "Use Z3 contraint solver",
+ CreateZ3ConstraintManager)
#ifndef ANALYSIS_DIAGNOSTICS
#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN)
#endif
-ANALYSIS_DIAGNOSTICS(HTML, "html", "Output analysis results using HTML", createHTMLDiagnosticConsumer)
-ANALYSIS_DIAGNOSTICS(HTML_SINGLE_FILE, "html-single-file", "Output analysis results using HTML (not allowing for multi-file bugs)", createHTMLSingleFileDiagnosticConsumer)
-ANALYSIS_DIAGNOSTICS(PLIST, "plist", "Output analysis results using Plists", createPlistDiagnosticConsumer)
-ANALYSIS_DIAGNOSTICS(PLIST_MULTI_FILE, "plist-multi-file", "Output analysis results using Plists (allowing for multi-file bugs)", createPlistMultiFileDiagnosticConsumer)
-ANALYSIS_DIAGNOSTICS(PLIST_HTML, "plist-html", "Output analysis results using HTML wrapped with Plists", createPlistHTMLDiagnosticConsumer)
-ANALYSIS_DIAGNOSTICS(SARIF, "sarif", "Output analysis results in a SARIF file", createSarifDiagnosticConsumer)
-ANALYSIS_DIAGNOSTICS(TEXT, "text", "Text output of analysis results", createTextPathDiagnosticConsumer)
+ANALYSIS_DIAGNOSTICS(HTML, "html", "Output analysis results using HTML",
+ createHTMLDiagnosticConsumer)
+
+ANALYSIS_DIAGNOSTICS(
+ HTML_SINGLE_FILE, "html-single-file",
+ "Output analysis results using HTML (not allowing for multi-file bugs)",
+ createHTMLSingleFileDiagnosticConsumer)
+
+ANALYSIS_DIAGNOSTICS(PLIST, "plist", "Output analysis results using Plists",
+ createPlistDiagnosticConsumer)
+
+ANALYSIS_DIAGNOSTICS(
+ PLIST_MULTI_FILE, "plist-multi-file",
+ "Output analysis results using Plists (allowing for multi-file bugs)",
+ createPlistMultiFileDiagnosticConsumer)
+
+ANALYSIS_DIAGNOSTICS(PLIST_HTML, "plist-html",
+ "Output analysis results using HTML wrapped with Plists",
+ createPlistHTMLDiagnosticConsumer)
+
+ANALYSIS_DIAGNOSTICS(SARIF, "sarif", "Output analysis results in a SARIF file",
+ createSarifDiagnosticConsumer)
+
+ANALYSIS_DIAGNOSTICS(TEXT, "text", "Text output of analysis results to stderr",
+ createTextPathDiagnosticConsumer)
+
+ANALYSIS_DIAGNOSTICS(TEXT_MINIMAL, "text-minimal",
+ "Emits minimal diagnostics to stderr, stating only the "
+ "warning message and the associated notes. Usually "
+ "used in addition to other analysis types",
+ createTextMinimalPathDiagnosticConsumer)
#ifndef ANALYSIS_PURGE
#define ANALYSIS_PURGE(NAME, CMDFLAG, DESC)
#endif
-ANALYSIS_PURGE(PurgeStmt, "statement", "Purge symbols, bindings, and constraints before every statement")
-ANALYSIS_PURGE(PurgeBlock, "block", "Purge symbols, bindings, and constraints before every basic block")
-ANALYSIS_PURGE(PurgeNone, "none", "Do not purge symbols, bindings, or constraints")
+ANALYSIS_PURGE(
+ PurgeStmt, "statement",
+ "Purge symbols, bindings, and constraints before every statement")
+
+ANALYSIS_PURGE(
+ PurgeBlock, "block",
+ "Purge symbols, bindings, and constraints before every basic block")
+
+ANALYSIS_PURGE(PurgeNone, "none",
+ "Do not purge symbols, bindings, or constraints")
#ifndef ANALYSIS_INLINING_MODE
#define ANALYSIS_INLINING_MODE(NAME, CMDFLAG, DESC)
#endif
-ANALYSIS_INLINING_MODE(All, "all", "Analyze all functions as top level")
-ANALYSIS_INLINING_MODE(NoRedundancy, "noredundancy", "Do not analyze a function which has been previously inlined")
+ANALYSIS_INLINING_MODE(All, "all", "Analyze all functions as top level")
+
+ANALYSIS_INLINING_MODE(
+ NoRedundancy, "noredundancy",
+ "Do not analyze a function which has been previously inlined")
#undef ANALYSIS_STORE
#undef ANALYSIS_CONSTRAINTS
@@ -56,4 +95,3 @@ ANALYSIS_INLINING_MODE(NoRedundancy, "noredundancy", "Do not analyze a function
#undef ANALYSIS_PURGE
#undef ANALYSIS_INLINING_MODE
#undef ANALYSIS_IPA
-
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def
index 00febf688195..f0359d2dbb3c 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def
@@ -306,10 +306,14 @@ ANALYZER_OPTION(bool, ShouldTrackConditionsDebug, "track-conditions-debug",
"Whether to place an event at each tracked condition.",
false)
-ANALYZER_OPTION(bool, ShouldEmitFixItHintsAsRemarks, "fixits-as-remarks",
- "Emit fix-it hints as remarks for testing purposes",
+ANALYZER_OPTION(bool, ShouldApplyFixIts, "apply-fixits",
+ "Apply the fix-it hints to the files",
false)
+ANALYZER_OPTION(bool, ShouldDisplayCheckerNameForText, "display-checker-name",
+ "Display the checker name for textual outputs",
+ true)
+
//===----------------------------------------------------------------------===//
// Unsigned analyzer options.
//===----------------------------------------------------------------------===//
@@ -317,10 +321,17 @@ ANALYZER_OPTION(bool, ShouldEmitFixItHintsAsRemarks, "fixits-as-remarks",
ANALYZER_OPTION(unsigned, CTUImportThreshold, "ctu-import-threshold",
"The maximal amount of translation units that is considered "
"for import when inlining functions during CTU analysis. "
- "Lowering this threshold can alleviate the memory burder of "
+ "Lowering this threshold can alleviate the memory burden of "
"analysis with many interdependent definitions located in "
- "various translation units.",
- 100u)
+ "various translation units. This is valid only for non C++ "
+ "source files.",
+ 24u)
+
+ANALYZER_OPTION(unsigned, CTUImportCppThreshold, "ctu-import-cpp-threshold",
+ "The maximal amount of translation units that is considered "
+ "for import when inlining functions during CTU analysis of C++ "
+ "source files.",
+ 8u)
ANALYZER_OPTION(
unsigned, AlwaysInlineSize, "ipa-always-inline-size",
@@ -374,10 +385,25 @@ ANALYZER_OPTION(StringRef, CTUDir, "ctu-dir",
"The directory containing the CTU related files.", "")
ANALYZER_OPTION(StringRef, CTUIndexName, "ctu-index-name",
- "the name of the file containing the CTU index of definitions.",
+ "The name of the file containing the CTU index of definitions. "
+ "The index file maps USR-names to identifiers. An identifier "
+ "can end with an '.ast' suffix, indicating the indentifier is "
+ "a path to a pch-dump. Otherwise the identifier is regarded as "
+ "path to a source file which is parsed on-demand. Relative "
+ "paths are prefixed with ctu-dir, absolute paths are used "
+ "unmodified during lookup.",
"externalDefMap.txt")
ANALYZER_OPTION(
+ StringRef, CTUInvocationList, "ctu-invocation-list",
+ "The path to the YAML format file containing a mapping from source file "
+ "paths to command-line invocations represented as a list of arguments. "
+ "This invocation is used produce the source-file's AST in case on-demand "
+ "loading is performed. Example file-content: "
+ "{/main.cpp: [clang++, /main.cpp], other.cpp: [clang++, /other.cpp]}",
+ "invocations.yaml")
+
+ANALYZER_OPTION(
StringRef, ModelPath, "model-path",
"The analyzer can inline an alternative implementation written in C at the "
"call site if the called function's body is not available. This is a path "
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h
index ce16095e10c0..d2df24a6e21b 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h
@@ -292,9 +292,7 @@ public:
};
bool isUnknownAnalyzerConfig(StringRef Name) const {
-
- assert(std::is_sorted(AnalyzerConfigCmdFlags.begin(),
- AnalyzerConfigCmdFlags.end()));
+ assert(llvm::is_sorted(AnalyzerConfigCmdFlags));
return !std::binary_search(AnalyzerConfigCmdFlags.begin(),
AnalyzerConfigCmdFlags.end(), Name);
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
index 69593e2b6c93..27bc0dda1f1c 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
@@ -17,12 +17,14 @@
#include "clang/Analysis/PathDiagnostic.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
+#include "clang/Lex/Preprocessor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
@@ -134,7 +136,7 @@ protected:
SmallVector<FixItHint, 4> Fixits;
BugReport(Kind kind, const BugType &bt, StringRef desc)
- : K(kind), BT(bt), Description(desc) {}
+ : BugReport(kind, bt, "", desc) {}
BugReport(Kind K, const BugType &BT, StringRef ShortDescription,
StringRef Description)
@@ -368,16 +370,13 @@ protected:
public:
PathSensitiveBugReport(const BugType &bt, StringRef desc,
const ExplodedNode *errorNode)
- : BugReport(Kind::PathSensitive, bt, desc), ErrorNode(errorNode),
- ErrorNodeRange(getStmt() ? getStmt()->getSourceRange()
- : SourceRange()) {}
+ : PathSensitiveBugReport(bt, desc, desc, errorNode) {}
PathSensitiveBugReport(const BugType &bt, StringRef shortDesc, StringRef desc,
const ExplodedNode *errorNode)
- : BugReport(Kind::PathSensitive, bt, shortDesc, desc),
- ErrorNode(errorNode),
- ErrorNodeRange(getStmt() ? getStmt()->getSourceRange()
- : SourceRange()) {}
+ : PathSensitiveBugReport(bt, shortDesc, desc, errorNode,
+ /*LocationToUnique*/ {},
+ /*DeclToUnique*/ nullptr) {}
/// Create a PathSensitiveBugReport with a custom uniqueing location.
///
@@ -390,11 +389,13 @@ public:
const ExplodedNode *errorNode,
PathDiagnosticLocation LocationToUnique,
const Decl *DeclToUnique)
- : BugReport(Kind::PathSensitive, bt, desc), ErrorNode(errorNode),
- ErrorNodeRange(getStmt() ? getStmt()->getSourceRange() : SourceRange()),
- UniqueingLocation(LocationToUnique), UniqueingDecl(DeclToUnique) {
- assert(errorNode);
- }
+ : PathSensitiveBugReport(bt, desc, desc, errorNode, LocationToUnique,
+ DeclToUnique) {}
+
+ PathSensitiveBugReport(const BugType &bt, StringRef shortDesc, StringRef desc,
+ const ExplodedNode *errorNode,
+ PathDiagnosticLocation LocationToUnique,
+ const Decl *DeclToUnique);
static bool classof(const BugReport *R) {
return R->getKind() == Kind::PathSensitive;
@@ -566,6 +567,7 @@ public:
virtual ASTContext &getASTContext() = 0;
virtual SourceManager &getSourceManager() = 0;
virtual AnalyzerOptions &getAnalyzerOptions() = 0;
+ virtual Preprocessor &getPreprocessor() = 0;
};
/// BugReporter is a utility class for generating PathDiagnostics for analysis.
@@ -587,7 +589,7 @@ private:
std::vector<BugReportEquivClass *> EQClassesVector;
public:
- BugReporter(BugReporterData &d) : D(d) {}
+ BugReporter(BugReporterData &d);
virtual ~BugReporter();
/// Generate and flush diagnostics for all bug reports.
@@ -608,6 +610,8 @@ public:
const AnalyzerOptions &getAnalyzerOptions() { return D.getAnalyzerOptions(); }
+ Preprocessor &getPreprocessor() { return D.getPreprocessor(); }
+
/// Add the given report to the set of reports tracked by BugReporter.
///
/// The reports are usually generated by the checkers. Further, they are
@@ -628,7 +632,7 @@ public:
ArrayRef<FixItHint> Fixits = None);
private:
- llvm::StringMap<BugType *> StrBugTypes;
+ llvm::StringMap<std::unique_ptr<BugType>> StrBugTypes;
/// Returns a BugType that is associated with the given name and
/// category.
@@ -722,7 +726,8 @@ public:
class NoteTag : public ProgramPointTag {
public:
using Callback =
- std::function<std::string(BugReporterContext &, BugReport &)>;
+ std::function<std::string(BugReporterContext &,
+ PathSensitiveBugReport &)>;
private:
static int Kind;
@@ -739,7 +744,7 @@ public:
}
Optional<std::string> generateMessage(BugReporterContext &BRC,
- BugReport &R) const {
+ PathSensitiveBugReport &R) const {
std::string Msg = Cb(BRC, R);
if (Msg.empty())
return None;
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h
index de0ee5de81b5..365b1ff1bfe3 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h
@@ -386,6 +386,8 @@ public:
void finalizeVisitor(BugReporterContext &BRC, const ExplodedNode *EndPathNode,
PathSensitiveBugReport &BR) override;
+ void addConstraints(const ExplodedNode *N,
+ bool OverwriteConstraintsOnExistingSyms);
};
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h
index 237053df7e44..49ab25eca2dd 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h
@@ -78,13 +78,16 @@ public:
const char *description)
: BugType(checker, name, categories::LogicError), desc(description) {}
+ BuiltinBug(class CheckerNameRef checker, const char *name)
+ : BugType(checker, name, categories::LogicError), desc(name) {}
+
BuiltinBug(const CheckerBase *checker, const char *name)
: BugType(checker, name, categories::LogicError), desc(name) {}
StringRef getDescription() const { return desc; }
};
-} // end ento namespace
+} // namespace ento
} // end clang namespace
#endif
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h
index 22c1a7dd98cc..637b89fd9036 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h
@@ -11,16 +11,16 @@
// Common strings used for the "category" of many static analyzer issues.
namespace clang {
- namespace ento {
- namespace categories {
- extern const char * const CoreFoundationObjectiveC;
- extern const char * const LogicError;
- extern const char * const MemoryRefCount;
- extern const char * const MemoryError;
- extern const char * const UnixAPI;
- extern const char * const CXXObjectLifecycle;
- }
- }
-}
+namespace ento {
+namespace categories {
+extern const char *const CoreFoundationObjectiveC;
+extern const char *const LogicError;
+extern const char *const MemoryRefCount;
+extern const char *const MemoryError;
+extern const char *const UnixAPI;
+extern const char *const CXXObjectLifecycle;
+extern const char *const SecurityError;
+} // namespace categories
+} // namespace ento
+} // namespace clang
#endif
-
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Checker.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Checker.h
index 0c7acdbc3a97..fdba49664615 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Checker.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Checker.h
@@ -285,9 +285,9 @@ public:
class NewAllocator {
template <typename CHECKER>
- static void _checkNewAllocator(void *checker, const CXXNewExpr *NE,
- SVal Target, CheckerContext &C) {
- ((const CHECKER *)checker)->checkNewAllocator(NE, Target, C);
+ static void _checkNewAllocator(void *checker, const CXXAllocatorCall &Call,
+ CheckerContext &C) {
+ ((const CHECKER *)checker)->checkNewAllocator(Call, C);
}
public:
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
index 246ff8f90d35..d2f71baa56a4 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
@@ -14,6 +14,7 @@
#define LLVM_CLANG_STATICANALYZER_CORE_CHECKERMANAGER_H
#include "clang/Analysis/ProgramPoint.h"
+#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/LangOptions.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
@@ -36,15 +37,18 @@ class TranslationUnitDecl;
namespace ento {
class AnalysisManager;
+class CXXAllocatorCall;
class BugReporter;
class CallEvent;
class CheckerBase;
class CheckerContext;
class CheckerRegistry;
+struct CheckerRegistryData;
class ExplodedGraph;
class ExplodedNode;
class ExplodedNodeSet;
class ExprEngine;
+struct EvalCallOptions;
class MemRegion;
struct NodeBuilderContext;
class ObjCMethodCall;
@@ -121,14 +125,38 @@ enum class ObjCMessageVisitKind {
};
class CheckerManager {
- ASTContext &Context;
+ ASTContext *Context = nullptr;
const LangOptions LangOpts;
- AnalyzerOptions &AOptions;
+ const AnalyzerOptions &AOptions;
+ const Preprocessor *PP = nullptr;
CheckerNameRef CurrentCheckerName;
+ DiagnosticsEngine &Diags;
+ std::unique_ptr<CheckerRegistryData> RegistryData;
public:
- CheckerManager(ASTContext &Context, AnalyzerOptions &AOptions)
- : Context(Context), LangOpts(Context.getLangOpts()), AOptions(AOptions) {}
+ // These constructors are defined in the Frontend library, because
+ // CheckerRegistry, a crucial component of the initialization is in there.
+ // CheckerRegistry cannot be moved to the Core library, because the checker
+ // registration functions are defined in the Checkers library, and the library
+ // dependencies look like this: Core -> Checkers -> Frontend.
+
+ CheckerManager(
+ ASTContext &Context, AnalyzerOptions &AOptions, const Preprocessor &PP,
+ ArrayRef<std::string> plugins,
+ ArrayRef<std::function<void(CheckerRegistry &)>> checkerRegistrationFns);
+
+ /// Constructs a CheckerManager that ignores all non TblGen-generated
+ /// checkers. Useful for unit testing, unless the checker infrastructure
+ /// itself is tested.
+ CheckerManager(ASTContext &Context, AnalyzerOptions &AOptions,
+ const Preprocessor &PP)
+ : CheckerManager(Context, AOptions, PP, {}, {}) {}
+
+ /// Constructs a CheckerManager without requiring an AST. No checker
+ /// registration will take place. Only useful when one needs to print the
+ /// help flags through CheckerRegistryData, and the AST is unavalaible.
+ CheckerManager(AnalyzerOptions &AOptions, const LangOptions &LangOpts,
+ DiagnosticsEngine &Diags, ArrayRef<std::string> plugins);
~CheckerManager();
@@ -140,14 +168,25 @@ public:
void finishedCheckerRegistration();
const LangOptions &getLangOpts() const { return LangOpts; }
- AnalyzerOptions &getAnalyzerOptions() { return AOptions; }
- ASTContext &getASTContext() { return Context; }
+ const AnalyzerOptions &getAnalyzerOptions() const { return AOptions; }
+ const Preprocessor &getPreprocessor() const {
+ assert(PP);
+ return *PP;
+ }
+ const CheckerRegistryData &getCheckerRegistryData() const {
+ return *RegistryData;
+ }
+ DiagnosticsEngine &getDiagnostics() const { return Diags; }
+ ASTContext &getASTContext() const {
+ assert(Context);
+ return *Context;
+ }
/// Emits an error through a DiagnosticsEngine about an invalid user supplied
/// checker option value.
void reportInvalidCheckerOptionValue(const CheckerBase *C,
StringRef OptionName,
- StringRef ExpectedValueDesc);
+ StringRef ExpectedValueDesc) const;
using CheckerRef = CheckerBase *;
using CheckerTag = const void *;
@@ -327,11 +366,9 @@ public:
ExprEngine &Eng);
/// Run checkers between C++ operator new and constructor calls.
- void runCheckersForNewAllocator(const CXXNewExpr *NE, SVal Target,
- ExplodedNodeSet &Dst,
- ExplodedNode *Pred,
- ExprEngine &Eng,
- bool wasInlined = false);
+ void runCheckersForNewAllocator(const CXXAllocatorCall &Call,
+ ExplodedNodeSet &Dst, ExplodedNode *Pred,
+ ExprEngine &Eng, bool wasInlined = false);
/// Run checkers for live symbols.
///
@@ -400,9 +437,9 @@ public:
/// Run checkers for evaluating a call.
///
/// Warning: Currently, the CallEvent MUST come from a CallExpr!
- void runCheckersForEvalCall(ExplodedNodeSet &Dst,
- const ExplodedNodeSet &Src,
- const CallEvent &CE, ExprEngine &Eng);
+ void runCheckersForEvalCall(ExplodedNodeSet &Dst, const ExplodedNodeSet &Src,
+ const CallEvent &CE, ExprEngine &Eng,
+ const EvalCallOptions &CallOpts);
/// Run checkers for the entire Translation Unit.
void runCheckersOnEndOfTranslationUnit(const TranslationUnitDecl *TU,
@@ -472,7 +509,7 @@ public:
CheckerFn<void (const Stmt *, CheckerContext &)>;
using CheckNewAllocatorFunc =
- CheckerFn<void (const CXXNewExpr *, SVal, CheckerContext &)>;
+ CheckerFn<void(const CXXAllocatorCall &Call, CheckerContext &)>;
using CheckDeadSymbolsFunc =
CheckerFn<void (SymbolReaper &, CheckerContext &)>;
@@ -620,7 +657,7 @@ private:
/// Returns the checkers that have registered for callbacks of the
/// given \p Kind.
const std::vector<CheckObjCMessageFunc> &
- getObjCMessageCheckers(ObjCMessageVisitKind Kind);
+ getObjCMessageCheckers(ObjCMessageVisitKind Kind) const;
std::vector<CheckObjCMessageFunc> PreObjCMessageCheckers;
std::vector<CheckObjCMessageFunc> PostObjCMessageCheckers;
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerRegistryData.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerRegistryData.h
new file mode 100644
index 000000000000..43248d8e6bb8
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerRegistryData.h
@@ -0,0 +1,226 @@
+//===- CheckerRegistryData.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the data structures to which the TableGen file Checkers.td
+// maps to, as well as what was parsed from the the specific invocation (whether
+// a checker/package is enabled, their options values, etc).
+//
+// The parsing of the invocation is done by CheckerRegistry, which is found in
+// the Frontend library. This allows the Core and Checkers libraries to utilize
+// this information, such as enforcing rules on checker dependency bug emission,
+// ensuring all checker options were queried, etc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_STATICANALYZER_CORE_CHECKERREGISTRYDATA_H
+#define LLVM_CLANG_STATICANALYZER_CORE_CHECKERREGISTRYDATA_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+
+class AnalyzerOptions;
+
+namespace ento {
+
+class CheckerManager;
+
+/// Initialization functions perform any necessary setup for a checker.
+/// They should include a call to CheckerManager::registerChecker.
+using RegisterCheckerFn = void (*)(CheckerManager &);
+using ShouldRegisterFunction = bool (*)(const CheckerManager &);
+
+/// Specifies a command line option. It may either belong to a checker or a
+/// package.
+struct CmdLineOption {
+ StringRef OptionType;
+ StringRef OptionName;
+ StringRef DefaultValStr;
+ StringRef Description;
+ StringRef DevelopmentStatus;
+ bool IsHidden;
+
+ CmdLineOption(StringRef OptionType, StringRef OptionName,
+ StringRef DefaultValStr, StringRef Description,
+ StringRef DevelopmentStatus, bool IsHidden)
+ : OptionType(OptionType), OptionName(OptionName),
+ DefaultValStr(DefaultValStr), Description(Description),
+ DevelopmentStatus(DevelopmentStatus), IsHidden(IsHidden) {
+
+ assert((OptionType == "bool" || OptionType == "string" ||
+ OptionType == "int") &&
+ "Unknown command line option type!");
+
+ assert((OptionType != "bool" ||
+ (DefaultValStr == "true" || DefaultValStr == "false")) &&
+ "Invalid value for boolean command line option! Maybe incorrect "
+ "parameters to the addCheckerOption or addPackageOption method?");
+
+ int Tmp;
+ assert((OptionType != "int" || !DefaultValStr.getAsInteger(0, Tmp)) &&
+ "Invalid value for integer command line option! Maybe incorrect "
+ "parameters to the addCheckerOption or addPackageOption method?");
+ (void)Tmp;
+
+ assert((DevelopmentStatus == "alpha" || DevelopmentStatus == "beta" ||
+ DevelopmentStatus == "released") &&
+ "Invalid development status!");
+ }
+
+ LLVM_DUMP_METHOD void dump() const;
+ LLVM_DUMP_METHOD void dumpToStream(llvm::raw_ostream &Out) const;
+};
+
+using CmdLineOptionList = llvm::SmallVector<CmdLineOption, 0>;
+
+struct CheckerInfo;
+
+using CheckerInfoList = std::vector<CheckerInfo>;
+using CheckerInfoListRange = llvm::iterator_range<CheckerInfoList::iterator>;
+using ConstCheckerInfoList = llvm::SmallVector<const CheckerInfo *, 0>;
+using CheckerInfoSet = llvm::SetVector<const CheckerInfo *>;
+
+/// Specifies a checker. Note that this isn't what we call a checker object,
+/// it merely contains everything required to create one.
+struct CheckerInfo {
+ enum class StateFromCmdLine {
+ // This checker wasn't explicitly enabled or disabled.
+ State_Unspecified,
+ // This checker was explicitly disabled.
+ State_Disabled,
+ // This checker was explicitly enabled.
+ State_Enabled
+ };
+
+ RegisterCheckerFn Initialize = nullptr;
+ ShouldRegisterFunction ShouldRegister = nullptr;
+ StringRef FullName;
+ StringRef Desc;
+ StringRef DocumentationUri;
+ CmdLineOptionList CmdLineOptions;
+ bool IsHidden = false;
+ StateFromCmdLine State = StateFromCmdLine::State_Unspecified;
+
+ ConstCheckerInfoList Dependencies;
+ ConstCheckerInfoList WeakDependencies;
+
+ bool isEnabled(const CheckerManager &mgr) const {
+ return State == StateFromCmdLine::State_Enabled && ShouldRegister(mgr);
+ }
+
+ bool isDisabled(const CheckerManager &mgr) const {
+ return State == StateFromCmdLine::State_Disabled || !ShouldRegister(mgr);
+ }
+
+ // Since each checker must have a different full name, we can identify
+ // CheckerInfo objects by them.
+ bool operator==(const CheckerInfo &Rhs) const {
+ return FullName == Rhs.FullName;
+ }
+
+ CheckerInfo(RegisterCheckerFn Fn, ShouldRegisterFunction sfn, StringRef Name,
+ StringRef Desc, StringRef DocsUri, bool IsHidden)
+ : Initialize(Fn), ShouldRegister(sfn), FullName(Name), Desc(Desc),
+ DocumentationUri(DocsUri), IsHidden(IsHidden) {}
+
+ // Used for lower_bound.
+ explicit CheckerInfo(StringRef FullName) : FullName(FullName) {}
+
+ LLVM_DUMP_METHOD void dump() const;
+ LLVM_DUMP_METHOD void dumpToStream(llvm::raw_ostream &Out) const;
+};
+
+using StateFromCmdLine = CheckerInfo::StateFromCmdLine;
+
+/// Specifies a package. Each package option is implicitly an option for all
+/// checkers within the package.
+struct PackageInfo {
+ StringRef FullName;
+ CmdLineOptionList CmdLineOptions;
+
+ // Since each package must have a different full name, we can identify
+ // CheckerInfo objects by them.
+ bool operator==(const PackageInfo &Rhs) const {
+ return FullName == Rhs.FullName;
+ }
+
+ explicit PackageInfo(StringRef FullName) : FullName(FullName) {}
+
+ LLVM_DUMP_METHOD void dump() const;
+ LLVM_DUMP_METHOD void dumpToStream(llvm::raw_ostream &Out) const;
+};
+
+using PackageInfoList = llvm::SmallVector<PackageInfo, 0>;
+
+namespace checker_registry {
+
+template <class T> struct FullNameLT {
+ bool operator()(const T &Lhs, const T &Rhs) {
+ return Lhs.FullName < Rhs.FullName;
+ }
+};
+
+using PackageNameLT = FullNameLT<PackageInfo>;
+using CheckerNameLT = FullNameLT<CheckerInfo>;
+
+template <class CheckerOrPackageInfoList>
+std::conditional_t<std::is_const<CheckerOrPackageInfoList>::value,
+ typename CheckerOrPackageInfoList::const_iterator,
+ typename CheckerOrPackageInfoList::iterator>
+binaryFind(CheckerOrPackageInfoList &Collection, StringRef FullName) {
+
+ using CheckerOrPackage = typename CheckerOrPackageInfoList::value_type;
+ using CheckerOrPackageFullNameLT = FullNameLT<CheckerOrPackage>;
+
+ assert(llvm::is_sorted(Collection, CheckerOrPackageFullNameLT{}) &&
+ "In order to efficiently gather checkers/packages, this function "
+ "expects them to be already sorted!");
+
+ return llvm::lower_bound(Collection, CheckerOrPackage(FullName),
+ CheckerOrPackageFullNameLT{});
+}
+} // namespace checker_registry
+
+struct CheckerRegistryData {
+public:
+ CheckerInfoSet EnabledCheckers;
+
+ CheckerInfoList Checkers;
+ PackageInfoList Packages;
+ /// Used for counting how many checkers belong to a certain package in the
+ /// \c Checkers field. For convenience purposes.
+ llvm::StringMap<size_t> PackageSizes;
+
+ /// Contains all (FullName, CmdLineOption) pairs. Similarly to dependencies,
+ /// we only modify the actual CheckerInfo and PackageInfo objects once all
+ /// of them have been added.
+ llvm::SmallVector<std::pair<StringRef, CmdLineOption>, 0> PackageOptions;
+ llvm::SmallVector<std::pair<StringRef, CmdLineOption>, 0> CheckerOptions;
+
+ llvm::SmallVector<std::pair<StringRef, StringRef>, 0> Dependencies;
+ llvm::SmallVector<std::pair<StringRef, StringRef>, 0> WeakDependencies;
+
+ CheckerInfoListRange getMutableCheckersForCmdLineArg(StringRef CmdLineArg);
+
+ /// Prints the name and description of all checkers in this registry.
+ /// This output is not intended to be machine-parseable.
+ void printCheckerWithDescList(const AnalyzerOptions &AnOpts, raw_ostream &Out,
+ size_t MaxNameChars = 30) const;
+ void printEnabledCheckerList(raw_ostream &Out) const;
+ void printCheckerOptionList(const AnalyzerOptions &AnOpts,
+ raw_ostream &Out) const;
+};
+
+} // namespace ento
+} // namespace clang
+
+#endif // LLVM_CLANG_STATICANALYZER_CORE_CHECKERREGISTRYDATA_H
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h
index d605a6a667f6..c76e9c0326af 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h
@@ -16,6 +16,7 @@
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/Lex/Preprocessor.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
@@ -32,6 +33,7 @@ class AnalysisManager : public BugReporterData {
AnalysisDeclContextManager AnaCtxMgr;
ASTContext &Ctx;
+ Preprocessor &PP;
const LangOptions &LangOpts;
PathDiagnosticConsumers PathConsumers;
@@ -44,7 +46,7 @@ class AnalysisManager : public BugReporterData {
public:
AnalyzerOptions &options;
- AnalysisManager(ASTContext &ctx,
+ AnalysisManager(ASTContext &ctx, Preprocessor &PP,
const PathDiagnosticConsumers &Consumers,
StoreManagerCreator storemgr,
ConstraintManagerCreator constraintmgr,
@@ -61,6 +63,8 @@ public:
return AnaCtxMgr;
}
+ Preprocessor &getPreprocessor() override { return PP; }
+
StoreManagerCreator getStoreManagerCreator() {
return CreateStoreMgr;
}
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
index ac218bc070e9..a001c0dc7030 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
@@ -157,6 +157,10 @@ public:
const llvm::APSInt &Convert(QualType T, const llvm::APSInt &From) {
APSIntType TargetType = getAPSIntType(T);
+ return Convert(TargetType, From);
+ }
+
+ const llvm::APSInt &Convert(APSIntType TargetType, const llvm::APSInt &From) {
if (TargetType == APSIntType(From))
return From;
@@ -177,11 +181,19 @@ public:
}
const llvm::APSInt &getMaxValue(QualType T) {
- return getValue(getAPSIntType(T).getMaxValue());
+ return getMaxValue(getAPSIntType(T));
}
const llvm::APSInt &getMinValue(QualType T) {
- return getValue(getAPSIntType(T).getMinValue());
+ return getMinValue(getAPSIntType(T));
+ }
+
+ const llvm::APSInt &getMaxValue(APSIntType T) {
+ return getValue(T.getMaxValue());
+ }
+
+ const llvm::APSInt &getMinValue(APSIntType T) {
+ return getValue(T.getMinValue());
}
const llvm::APSInt &Add1(const llvm::APSInt &V) {
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
index fc1cc9138826..a2a98c558a4b 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
@@ -39,6 +39,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
@@ -63,9 +64,13 @@ enum CallEventKind {
CE_BEG_CXX_INSTANCE_CALLS = CE_CXXMember,
CE_END_CXX_INSTANCE_CALLS = CE_CXXDestructor,
CE_CXXConstructor,
+ CE_CXXInheritedConstructor,
+ CE_BEG_CXX_CONSTRUCTOR_CALLS = CE_CXXConstructor,
+ CE_END_CXX_CONSTRUCTOR_CALLS = CE_CXXInheritedConstructor,
CE_CXXAllocator,
+ CE_CXXDeallocator,
CE_BEG_FUNCTION_CALLS = CE_Function,
- CE_END_FUNCTION_CALLS = CE_CXXAllocator,
+ CE_END_FUNCTION_CALLS = CE_CXXDeallocator,
CE_Block,
CE_ObjCMessage
};
@@ -196,6 +201,7 @@ public:
/// Returns the kind of call this is.
virtual Kind getKind() const = 0;
+ virtual StringRef getKindAsString() const = 0;
/// Returns the declaration of the function or method that will be
/// called. May be null.
@@ -258,6 +264,13 @@ public:
/// calls.
bool isCalled(const CallDescription &CD) const;
+ /// Returns true whether the CallEvent is any of the CallDescriptions supplied
+ /// as a parameter.
+ template <typename FirstCallDesc, typename... CallDescs>
+ bool isCalled(const FirstCallDesc &First, const CallDescs &... Rest) const {
+ return isCalled(First) || isCalled(Rest...);
+ }
+
/// Returns a source range for the entire call, suitable for
/// outputting in diagnostics.
virtual SourceRange getSourceRange() const {
@@ -389,9 +402,10 @@ public:
const StackFrameContext *getCalleeStackFrame(unsigned BlockCount) const;
/// Returns memory location for a parameter variable within the callee stack
- /// frame. May fail; returns null on failure.
- const VarRegion *getParameterLocation(unsigned Index,
- unsigned BlockCount) const;
+ /// frame. The behavior is undefined if the block count is different from the
+ /// one that is there when call happens. May fail; returns null on failure.
+ const ParamVarRegion *getParameterLocation(unsigned Index,
+ unsigned BlockCount) const;
/// Returns true if on the current path, the argument was constructed by
/// calling a C++ constructor over it. This is an internal detail of the
@@ -421,6 +435,15 @@ public:
return CallArgumentIndex;
}
+ /// Returns the construction context of the call, if it is a C++ constructor
+ /// call or a call of a function returning a C++ class instance. Otherwise
+ /// return nullptr.
+ const ConstructionContext *getConstructionContext() const;
+
+ /// If the call returns a C++ record type then the region of its return value
+ /// can be retrieved from its construction context.
+ Optional<SVal> getReturnValueUnderConstruction() const;
+
// Iterator access to formal parameters and their types.
private:
struct GetTypeFn {
@@ -520,6 +543,9 @@ public:
}
Kind getKind() const override { return CE_Function; }
+ virtual StringRef getKindAsString() const override {
+ return "SimpleFunctionCall";
+ }
static bool classof(const CallEvent *CA) {
return CA->getKind() == CE_Function;
@@ -528,7 +554,7 @@ public:
/// Represents a call to a block.
///
-/// Example: <tt>^{ /* ... */ }()</tt>
+/// Example: <tt>^{ statement-body }()</tt>
class BlockCall : public CallEvent {
friend class CallEventManager;
@@ -624,13 +650,12 @@ public:
void getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
BindingsTy &Bindings) const override;
- ArrayRef<ParmVarDecl*> parameters() const override;
+ ArrayRef<ParmVarDecl *> parameters() const override;
Kind getKind() const override { return CE_Block; }
+ virtual StringRef getKindAsString() const override { return "BlockCall"; }
- static bool classof(const CallEvent *CA) {
- return CA->getKind() == CE_Block;
- }
+ static bool classof(const CallEvent *CA) { return CA->getKind() == CE_Block; }
};
/// Represents a non-static C++ member function call, no matter how
@@ -702,6 +727,7 @@ public:
RuntimeDefinition getRuntimeDefinition() const override;
Kind getKind() const override { return CE_CXXMember; }
+ virtual StringRef getKindAsString() const override { return "CXXMemberCall"; }
static bool classof(const CallEvent *CA) {
return CA->getKind() == CE_CXXMember;
@@ -741,6 +767,9 @@ public:
const Expr *getCXXThisExpr() const override;
Kind getKind() const override { return CE_CXXMemberOperator; }
+ virtual StringRef getKindAsString() const override {
+ return "CXXMemberOperatorCall";
+ }
static bool classof(const CallEvent *CA) {
return CA->getKind() == CE_CXXMemberOperator;
@@ -759,6 +788,10 @@ public:
// to implicit this-parameter on the declaration.
return CallArgumentIndex + 1;
}
+
+ OverloadedOperatorKind getOverloadedOperator() const {
+ return getOriginExpr()->getOperator();
+ }
};
/// Represents an implicit call to a C++ destructor.
@@ -805,16 +838,47 @@ public:
}
Kind getKind() const override { return CE_CXXDestructor; }
+ virtual StringRef getKindAsString() const override {
+ return "CXXDestructorCall";
+ }
static bool classof(const CallEvent *CA) {
return CA->getKind() == CE_CXXDestructor;
}
};
+/// Represents any constructor invocation. This includes regular constructors
+/// and inherited constructors.
+class AnyCXXConstructorCall : public AnyFunctionCall {
+protected:
+ AnyCXXConstructorCall(const Expr *E, const MemRegion *Target,
+ ProgramStateRef St, const LocationContext *LCtx)
+ : AnyFunctionCall(E, St, LCtx) {
+ assert(E && (isa<CXXConstructExpr>(E) || isa<CXXInheritedCtorInitExpr>(E)));
+ // Target may be null when the region is unknown.
+ Data = Target;
+ }
+
+ void getExtraInvalidatedValues(ValueList &Values,
+ RegionAndSymbolInvalidationTraits *ETraits) const override;
+
+ void getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
+ BindingsTy &Bindings) const override;
+
+public:
+ /// Returns the value of the implicit 'this' object.
+ SVal getCXXThisVal() const;
+
+ static bool classof(const CallEvent *Call) {
+ return Call->getKind() >= CE_BEG_CXX_CONSTRUCTOR_CALLS &&
+ Call->getKind() <= CE_END_CXX_CONSTRUCTOR_CALLS;
+ }
+};
+
/// Represents a call to a C++ constructor.
///
/// Example: \c T(1)
-class CXXConstructorCall : public AnyFunctionCall {
+class CXXConstructorCall : public AnyCXXConstructorCall {
friend class CallEventManager;
protected:
@@ -827,17 +891,12 @@ protected:
/// \param LCtx The location context at this point in the program.
CXXConstructorCall(const CXXConstructExpr *CE, const MemRegion *Target,
ProgramStateRef St, const LocationContext *LCtx)
- : AnyFunctionCall(CE, St, LCtx) {
- Data = Target;
- }
+ : AnyCXXConstructorCall(CE, Target, St, LCtx) {}
CXXConstructorCall(const CXXConstructorCall &Other) = default;
void cloneTo(void *Dest) const override { new (Dest) CXXConstructorCall(*this); }
- void getExtraInvalidatedValues(ValueList &Values,
- RegionAndSymbolInvalidationTraits *ETraits) const override;
-
public:
virtual const CXXConstructExpr *getOriginExpr() const {
return cast<CXXConstructExpr>(AnyFunctionCall::getOriginExpr());
@@ -853,19 +912,96 @@ public:
return getOriginExpr()->getArg(Index);
}
- /// Returns the value of the implicit 'this' object.
- SVal getCXXThisVal() const;
-
- void getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
- BindingsTy &Bindings) const override;
-
Kind getKind() const override { return CE_CXXConstructor; }
+ virtual StringRef getKindAsString() const override {
+ return "CXXConstructorCall";
+ }
static bool classof(const CallEvent *CA) {
return CA->getKind() == CE_CXXConstructor;
}
};
+/// Represents a call to a C++ inherited constructor.
+///
+/// Example: \c class T : public S { using S::S; }; T(1);
+///
+// Note, it is difficult to model the parameters. This is one of the reasons
+// why we skip analysis of inheriting constructors as top-level functions.
+// CXXInheritedCtorInitExpr doesn't take arguments and doesn't model parameter
+// initialization because there is none: the arguments in the outer
+// CXXConstructExpr directly initialize the parameters of the base class
+// constructor, and no copies are made. (Making a copy of the parameter is
+// incorrect, at least if it's done in an observable way.) The derived class
+// constructor doesn't even exist in the formal model.
+/// E.g., in:
+///
+/// struct X { X *p = this; ~X() {} };
+/// struct A { A(X x) : b(x.p == &x) {} bool b; };
+/// struct B : A { using A::A; };
+/// B b = X{};
+///
+/// ... b.b is initialized to true.
+class CXXInheritedConstructorCall : public AnyCXXConstructorCall {
+ friend class CallEventManager;
+
+protected:
+ CXXInheritedConstructorCall(const CXXInheritedCtorInitExpr *CE,
+ const MemRegion *Target, ProgramStateRef St,
+ const LocationContext *LCtx)
+ : AnyCXXConstructorCall(CE, Target, St, LCtx) {}
+
+ CXXInheritedConstructorCall(const CXXInheritedConstructorCall &Other) =
+ default;
+
+ void cloneTo(void *Dest) const override {
+ new (Dest) CXXInheritedConstructorCall(*this);
+ }
+
+public:
+ virtual const CXXInheritedCtorInitExpr *getOriginExpr() const {
+ return cast<CXXInheritedCtorInitExpr>(AnyFunctionCall::getOriginExpr());
+ }
+
+ const CXXConstructorDecl *getDecl() const override {
+ return getOriginExpr()->getConstructor();
+ }
+
+ /// Obtain the stack frame of the inheriting constructor. Argument expressions
+ /// can be found on the call site of that stack frame.
+ const StackFrameContext *getInheritingStackFrame() const;
+
+ /// Obtain the CXXConstructExpr for the sub-class that inherited the current
+ /// constructor (possibly indirectly). It's the statement that contains
+ /// argument expressions.
+ const CXXConstructExpr *getInheritingConstructor() const {
+ return cast<CXXConstructExpr>(getInheritingStackFrame()->getCallSite());
+ }
+
+ unsigned getNumArgs() const override {
+ return getInheritingConstructor()->getNumArgs();
+ }
+
+ const Expr *getArgExpr(unsigned Index) const override {
+ return getInheritingConstructor()->getArg(Index);
+ }
+
+ virtual SVal getArgSVal(unsigned Index) const override {
+ return getState()->getSVal(
+ getArgExpr(Index),
+ getInheritingStackFrame()->getParent()->getStackFrame());
+ }
+
+ Kind getKind() const override { return CE_CXXInheritedConstructor; }
+ virtual StringRef getKindAsString() const override {
+ return "CXXInheritedConstructorCall";
+ }
+
+ static bool classof(const CallEvent *CA) {
+ return CA->getKind() == CE_CXXInheritedConstructor;
+ }
+};
+
/// Represents the memory allocation call in a C++ new-expression.
///
/// This is a call to "operator new".
@@ -889,6 +1025,12 @@ public:
return getOriginExpr()->getOperatorNew();
}
+ SVal getObjectUnderConstruction() const {
+ return ExprEngine::getObjectUnderConstruction(getState(), getOriginExpr(),
+ getLocationContext())
+ .getValue();
+ }
+
/// Number of non-placement arguments to the call. It is equal to 2 for
/// C++17 aligned operator new() calls that have alignment implicitly
/// passed as the second argument, and to 1 for other operator new() calls.
@@ -916,12 +1058,64 @@ public:
}
Kind getKind() const override { return CE_CXXAllocator; }
+ virtual StringRef getKindAsString() const override {
+ return "CXXAllocatorCall";
+ }
static bool classof(const CallEvent *CE) {
return CE->getKind() == CE_CXXAllocator;
}
};
+/// Represents the memory deallocation call in a C++ delete-expression.
+///
+/// This is a call to "operator delete".
+// FIXME: CXXDeleteExpr isn't present for custom delete operators, or even for
+// some those that are in the standard library, like the no-throw or align_val
+// versions.
+// Some pointers:
+// http://lists.llvm.org/pipermail/cfe-dev/2020-April/065080.html
+// clang/test/Analysis/cxx-dynamic-memory-analysis-order.cpp
+// clang/unittests/StaticAnalyzer/CallEventTest.cpp
+class CXXDeallocatorCall : public AnyFunctionCall {
+ friend class CallEventManager;
+
+protected:
+ CXXDeallocatorCall(const CXXDeleteExpr *E, ProgramStateRef St,
+ const LocationContext *LCtx)
+ : AnyFunctionCall(E, St, LCtx) {}
+ CXXDeallocatorCall(const CXXDeallocatorCall &Other) = default;
+
+ void cloneTo(void *Dest) const override {
+ new (Dest) CXXDeallocatorCall(*this);
+ }
+
+public:
+ virtual const CXXDeleteExpr *getOriginExpr() const {
+ return cast<CXXDeleteExpr>(AnyFunctionCall::getOriginExpr());
+ }
+
+ const FunctionDecl *getDecl() const override {
+ return getOriginExpr()->getOperatorDelete();
+ }
+
+ unsigned getNumArgs() const override { return getDecl()->getNumParams(); }
+
+ const Expr *getArgExpr(unsigned Index) const override {
+ // CXXDeleteExpr's only have a single argument.
+ return getOriginExpr()->getArgument();
+ }
+
+ Kind getKind() const override { return CE_CXXDeallocator; }
+ virtual StringRef getKindAsString() const override {
+ return "CXXDeallocatorCall";
+ }
+
+ static bool classof(const CallEvent *CE) {
+ return CE->getKind() == CE_CXXDeallocator;
+ }
+};
+
/// Represents the ways an Objective-C message send can occur.
//
// Note to maintainers: OCM_Message should always be last, since it does not
@@ -992,9 +1186,6 @@ public:
/// Returns the value of the receiver at the time of this call.
SVal getReceiverSVal() const;
- /// Return the value of 'self' if available.
- SVal getSelfSVal() const;
-
/// Get the interface for the receiver.
///
/// This works whether this is an instance message or a class message.
@@ -1039,6 +1230,9 @@ public:
ArrayRef<ParmVarDecl*> parameters() const override;
Kind getKind() const override { return CE_ObjCMessage; }
+ virtual StringRef getKindAsString() const override {
+ return "ObjCMethodCall";
+ }
static bool classof(const CallEvent *CA) {
return CA->getKind() == CE_ObjCMessage;
@@ -1225,6 +1419,13 @@ public:
return create<CXXConstructorCall>(E, Target, State, LCtx);
}
+ CallEventRef<CXXInheritedConstructorCall>
+ getCXXInheritedConstructorCall(const CXXInheritedCtorInitExpr *E,
+ const MemRegion *Target, ProgramStateRef State,
+ const LocationContext *LCtx) {
+ return create<CXXInheritedConstructorCall>(E, Target, State, LCtx);
+ }
+
CallEventRef<CXXDestructorCall>
getCXXDestructorCall(const CXXDestructorDecl *DD, const Stmt *Trigger,
const MemRegion *Target, bool IsBase,
@@ -1237,6 +1438,12 @@ public:
const LocationContext *LCtx) {
return create<CXXAllocatorCall>(E, State, LCtx);
}
+
+ CallEventRef<CXXDeallocatorCall>
+ getCXXDeallocatorCall(const CXXDeleteExpr *E, ProgramStateRef State,
+ const LocationContext *LCtx) {
+ return create<CXXDeallocatorCall>(E, State, LCtx);
+ }
};
template <typename T>
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
index bd8760cf0ce0..8fd7f52585b8 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
@@ -107,6 +107,8 @@ public:
return getBugReporter().getSourceManager();
}
+ Preprocessor &getPreprocessor() { return getBugReporter().getPreprocessor(); }
+
SValBuilder &getSValBuilder() {
return Eng.getSValBuilder();
}
@@ -173,8 +175,7 @@ public:
/// @param Pred The transition will be generated from the specified Pred node
/// to the newly generated node.
/// @param Tag The tag to uniquely identify the creation site.
- ExplodedNode *addTransition(ProgramStateRef State,
- ExplodedNode *Pred,
+ ExplodedNode *addTransition(ProgramStateRef State, ExplodedNode *Pred,
const ProgramPointTag *Tag = nullptr) {
return addTransitionImpl(State, false, Pred, Tag);
}
@@ -187,6 +188,14 @@ public:
return addTransitionImpl(State ? State : getState(), true, Pred, Tag);
}
+ /// Add a sink node to the current path of execution, halting analysis.
+ void addSink(ProgramStateRef State = nullptr,
+ const ProgramPointTag *Tag = nullptr) {
+ if (!State)
+ State = getState();
+ addTransition(State, generateSink(State, getPredecessor()));
+ }
+
/// Generate a transition to a node that will be used to report
/// an error. This node will be a sink. That is, it will stop exploration of
/// the given path.
@@ -256,10 +265,12 @@ public:
/// @param IsPrunable Whether the note is prunable. It allows BugReporter
/// to omit the note from the report if it would make the displayed
/// bug path significantly shorter.
- const NoteTag *getNoteTag(std::function<std::string(BugReport &)> &&Cb,
- bool IsPrunable = false) {
+ const NoteTag
+ *getNoteTag(std::function<std::string(PathSensitiveBugReport &)> &&Cb,
+ bool IsPrunable = false) {
return getNoteTag(
- [Cb](BugReporterContext &, BugReport &BR) { return Cb(BR); },
+ [Cb](BugReporterContext &,
+ PathSensitiveBugReport &BR) { return Cb(BR); },
IsPrunable);
}
@@ -272,7 +283,8 @@ public:
/// bug path significantly shorter.
const NoteTag *getNoteTag(std::function<std::string()> &&Cb,
bool IsPrunable = false) {
- return getNoteTag([Cb](BugReporterContext &, BugReport &) { return Cb(); },
+ return getNoteTag([Cb](BugReporterContext &,
+ PathSensitiveBugReport &) { return Cb(); },
IsPrunable);
}
@@ -284,7 +296,9 @@ public:
/// bug path significantly shorter.
const NoteTag *getNoteTag(StringRef Note, bool IsPrunable = false) {
return getNoteTag(
- [Note](BugReporterContext &, BugReport &) { return Note; }, IsPrunable);
+ [Note](BugReporterContext &,
+ PathSensitiveBugReport &) { return std::string(Note); },
+ IsPrunable);
}
/// Returns the word that should be used to refer to the declaration
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
index b53c042a1ca1..f253c14cc487 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
@@ -14,6 +14,7 @@
#define LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_CHECKERHELPERS_H
#include "clang/AST/Stmt.h"
+#include "llvm/ADT/Optional.h"
#include <tuple>
namespace clang {
@@ -22,6 +23,7 @@ class Expr;
class VarDecl;
class QualType;
class AttributedType;
+class Preprocessor;
namespace ento {
@@ -62,8 +64,13 @@ enum class Nullability : char {
/// Get nullability annotation for a given type.
Nullability getNullabilityAnnotation(QualType Type);
-} // end GR namespace
+/// Try to parse the value of a defined preprocessor macro. We can only parse
+/// simple expressions that consist of an optional minus sign token and then a
+/// token for an integer. If we cannot parse the value then None is returned.
+llvm::Optional<int> tryExpandAsInteger(StringRef Macro, const Preprocessor &PP);
-} // end clang namespace
+} // namespace ento
+
+} // namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h
index f85c37379158..335536b6a310 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h
@@ -32,7 +32,7 @@ namespace clang {
namespace ento {
class ProgramStateManager;
-class SubEngine;
+class ExprEngine;
class SymbolReaper;
class ConditionTruthVal {
@@ -96,11 +96,7 @@ public:
// If StTrue is infeasible, asserting the falseness of Cond is unnecessary
// because the existing constraints already establish this.
if (!StTrue) {
-#ifndef __OPTIMIZE__
- // This check is expensive and should be disabled even in Release+Asserts
- // builds.
- // FIXME: __OPTIMIZE__ is a GNU extension that Clang implements but MSVC
- // does not. Is there a good equivalent there?
+#ifdef EXPENSIVE_CHECKS
assert(assume(State, Cond, false) && "System is over constrained.");
#endif
return ProgramStatePair((ProgramStateRef)nullptr, State);
@@ -197,10 +193,11 @@ protected:
std::unique_ptr<ConstraintManager>
CreateRangeConstraintManager(ProgramStateManager &statemgr,
- SubEngine *subengine);
+ ExprEngine *exprengine);
std::unique_ptr<ConstraintManager>
-CreateZ3ConstraintManager(ProgramStateManager &statemgr, SubEngine *subengine);
+CreateZ3ConstraintManager(ProgramStateManager &statemgr,
+ ExprEngine *exprengine);
} // namespace ento
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
index 278193ef99ed..2aca2c99ef4f 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
@@ -41,7 +41,7 @@ class LabelDecl;
namespace ento {
class FunctionSummariesTy;
-class SubEngine;
+class ExprEngine;
//===----------------------------------------------------------------------===//
/// CoreEngine - Implements the core logic of the graph-reachability
@@ -69,7 +69,7 @@ public:
std::vector<std::pair<const CFGBlock *, const ExplodedNode *>>;
private:
- SubEngine &SubEng;
+ ExprEngine &ExprEng;
/// G - The simulation graph. Each node is a (location,state) pair.
mutable ExplodedGraph G;
@@ -129,7 +129,7 @@ private:
public:
/// Construct a CoreEngine object to analyze the provided CFG.
- CoreEngine(SubEngine &subengine,
+ CoreEngine(ExprEngine &exprengine,
FunctionSummariesTy *FS,
AnalyzerOptions &Opts);
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h
new file mode 100644
index 000000000000..398f9b6ac33a
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h
@@ -0,0 +1,53 @@
+//===- DynamicSize.h - Dynamic size related APIs ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines APIs that track and query dynamic size information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_DYNAMICSIZE_H
+#define LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_DYNAMICSIZE_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+
+namespace clang {
+namespace ento {
+
+/// Get the stored dynamic size for the region \p MR.
+DefinedOrUnknownSVal getDynamicSize(ProgramStateRef State, const MemRegion *MR,
+ SValBuilder &SVB);
+
+/// Get the stored element count of the region \p MR.
+DefinedOrUnknownSVal getDynamicElementCount(ProgramStateRef State,
+ const MemRegion *MR,
+ SValBuilder &SVB,
+ QualType ElementTy);
+
+/// Get the dynamic size for a symbolic value that represents a buffer. If
+/// there is an offsetting to the underlying buffer we consider that too.
+/// Returns with an SVal that represents the size, this is Unknown if the
+/// engine cannot deduce the size.
+/// E.g.
+/// char buf[3];
+/// (buf); // size is 3
+/// (buf + 1); // size is 2
+/// (buf + 3); // size is 0
+/// (buf + 4); // size is -1
+///
+/// char *bufptr;
+/// (bufptr) // size is unknown
+SVal getDynamicSizeWithOffset(ProgramStateRef State, const SVal &BufV);
+
+} // namespace ento
+} // namespace clang
+
+#endif // LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_DYNAMICSIZE_H
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h
index 356401d77561..2679339537e8 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h
@@ -36,6 +36,10 @@ DynamicTypeInfo getDynamicTypeInfo(ProgramStateRef State, const MemRegion *MR);
const DynamicTypeInfo *getRawDynamicTypeInfo(ProgramStateRef State,
const MemRegion *MR);
+/// Get dynamic type information stored in a class object represented by \p Sym.
+DynamicTypeInfo getClassObjectDynamicTypeInfo(ProgramStateRef State,
+ SymbolRef Sym);
+
/// Get dynamic cast information from \p CastFromTy to \p CastToTy of \p MR.
const DynamicCastInfo *getDynamicCastInfo(ProgramStateRef State,
const MemRegion *MR,
@@ -50,6 +54,16 @@ ProgramStateRef setDynamicTypeInfo(ProgramStateRef State, const MemRegion *MR,
ProgramStateRef setDynamicTypeInfo(ProgramStateRef State, const MemRegion *MR,
QualType NewTy, bool CanBeSubClassed = true);
+/// Set constraint on a type contained in a class object; return the new state.
+ProgramStateRef setClassObjectDynamicTypeInfo(ProgramStateRef State,
+ SymbolRef Sym,
+ DynamicTypeInfo NewTy);
+
+/// Set constraint on a type contained in a class object; return the new state.
+ProgramStateRef setClassObjectDynamicTypeInfo(ProgramStateRef State,
+ SymbolRef Sym, QualType NewTy,
+ bool CanBeSubClassed = true);
+
/// Set dynamic type and cast information of the region; return the new state.
ProgramStateRef setDynamicTypeAndCastInfo(ProgramStateRef State,
const MemRegion *MR,
@@ -63,6 +77,10 @@ ProgramStateRef removeDeadTypes(ProgramStateRef State, SymbolReaper &SR);
/// Removes the dead cast informations from \p State.
ProgramStateRef removeDeadCasts(ProgramStateRef State, SymbolReaper &SR);
+/// Removes the dead Class object type informations from \p State.
+ProgramStateRef removeDeadClassObjectTypes(ProgramStateRef State,
+ SymbolReaper &SR);
+
void printDynamicTypeInfoJson(raw_ostream &Out, ProgramStateRef State,
const char *NL = "\n", unsigned int Space = 0,
bool IsDot = false);
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h
index 6262c4a1ce37..6d2b495dc0f5 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h
@@ -33,6 +33,8 @@ public:
/// Returns the currently inferred upper bound on the runtime type.
QualType getType() const { return DynTy; }
+ operator bool() const { return isValid(); }
+
bool operator==(const DynamicTypeInfo &RHS) const {
return DynTy == RHS.DynTy && CanBeASubClass == RHS.CanBeASubClass;
}
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
index 6e676c512b89..cdfe986355c5 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
@@ -21,6 +21,7 @@
#include "clang/Analysis/DomainSpecific/ObjCNoReturn.h"
#include "clang/Analysis/ProgramPoint.h"
#include "clang/Basic/LLVM.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
@@ -29,9 +30,9 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/WorkList.h"
#include "llvm/ADT/ArrayRef.h"
#include <cassert>
@@ -42,6 +43,8 @@ namespace clang {
class AnalysisDeclContextManager;
class AnalyzerOptions;
class ASTContext;
+class CFGBlock;
+class CFGElement;
class ConstructionContext;
class CXXBindTemporaryExpr;
class CXXCatchStmt;
@@ -72,16 +75,58 @@ class CrossTranslationUnitContext;
namespace ento {
+class AnalysisManager;
class BasicValueFactory;
+class BlockCounter;
+class BranchNodeBuilder;
class CallEvent;
class CheckerManager;
class ConstraintManager;
class CXXTempObjectRegion;
+class EndOfFunctionNodeBuilder;
+class ExplodedNodeSet;
+class ExplodedNode;
+class IndirectGotoNodeBuilder;
class MemRegion;
+struct NodeBuilderContext;
+class NodeBuilderWithSinks;
+class ProgramState;
+class ProgramStateManager;
class RegionAndSymbolInvalidationTraits;
class SymbolManager;
+class SwitchNodeBuilder;
+
+/// Hints for figuring out of a call should be inlined during evalCall().
+struct EvalCallOptions {
+ /// This call is a constructor or a destructor for which we do not currently
+ /// compute the this-region correctly.
+ bool IsCtorOrDtorWithImproperlyModeledTargetRegion = false;
+
+ /// This call is a constructor or a destructor for a single element within
+ /// an array, a part of array construction or destruction.
+ bool IsArrayCtorOrDtor = false;
+
+ /// This call is a constructor or a destructor of a temporary value.
+ bool IsTemporaryCtorOrDtor = false;
+
+ /// This call is a constructor for a temporary that is lifetime-extended
+ /// by binding it to a reference-type field within an aggregate,
+ /// for example 'A { const C &c; }; A a = { C() };'
+ bool IsTemporaryLifetimeExtendedViaAggregate = false;
+
+ /// This call is a pre-C++17 elidable constructor that we failed to elide
+ /// because we failed to compute the target region into which
+ /// this constructor would have been ultimately elided. Analysis that
+ /// we perform in this case is still correct but it behaves differently,
+ /// as if copy elision is disabled.
+ bool IsElidableCtorThatHasNotBeenElided = false;
+
+ EvalCallOptions() {}
+};
+
+class ExprEngine {
+ void anchor();
-class ExprEngine : public SubEngine {
public:
/// The modes of inlining, which override the default analysis-wide settings.
enum InliningModes {
@@ -92,27 +137,6 @@ public:
Inline_Minimal = 0x1
};
- /// Hints for figuring out of a call should be inlined during evalCall().
- struct EvalCallOptions {
- /// This call is a constructor or a destructor for which we do not currently
- /// compute the this-region correctly.
- bool IsCtorOrDtorWithImproperlyModeledTargetRegion = false;
-
- /// This call is a constructor or a destructor for a single element within
- /// an array, a part of array construction or destruction.
- bool IsArrayCtorOrDtor = false;
-
- /// This call is a constructor or a destructor of a temporary value.
- bool IsTemporaryCtorOrDtor = false;
-
- /// This call is a constructor for a temporary that is lifetime-extended
- /// by binding it to a reference-type field within an aggregate,
- /// for example 'A { const C &c; }; A a = { C() };'
- bool IsTemporaryLifetimeExtendedViaAggregate = false;
-
- EvalCallOptions() {}
- };
-
private:
cross_tu::CrossTranslationUnitContext &CTU;
@@ -161,7 +185,7 @@ public:
SetOfConstDecls *VisitedCalleesIn,
FunctionSummariesTy *FS, InliningModes HowToInlineIn);
- ~ExprEngine() override = default;
+ virtual ~ExprEngine() = default;
/// Returns true if there is still simulation state on the worklist.
bool ExecuteWorkList(const LocationContext *L, unsigned Steps = 150000) {
@@ -181,7 +205,7 @@ public:
/// getContext - Return the ASTContext associated with this analysis.
ASTContext &getContext() const { return AMgr.getASTContext(); }
- AnalysisManager &getAnalysisManager() override { return AMgr; }
+ AnalysisManager &getAnalysisManager() { return AMgr; }
AnalysisDeclContextManager &getAnalysisDeclContextManager() {
return AMgr.getAnalysisDeclContextManager();
@@ -196,7 +220,7 @@ public:
BugReporter &getBugReporter() { return BR; }
cross_tu::CrossTranslationUnitContext *
- getCrossTranslationUnitContext() override {
+ getCrossTranslationUnitContext() {
return &CTU;
}
@@ -232,7 +256,7 @@ public:
/// getInitialState - Return the initial state used for the root vertex
/// in the ExplodedGraph.
- ProgramStateRef getInitialState(const LocationContext *InitLoc) override;
+ ProgramStateRef getInitialState(const LocationContext *InitLoc);
ExplodedGraph &getGraph() { return G; }
const ExplodedGraph &getGraph() const { return G; }
@@ -270,7 +294,7 @@ public:
/// processCFGElement - Called by CoreEngine. Used to generate new successor
/// nodes by processing the 'effects' of a CFG element.
void processCFGElement(const CFGElement E, ExplodedNode *Pred,
- unsigned StmtIdx, NodeBuilderContext *Ctx) override;
+ unsigned StmtIdx, NodeBuilderContext *Ctx);
void ProcessStmt(const Stmt *S, ExplodedNode *Pred);
@@ -296,7 +320,7 @@ public:
/// Called by CoreEngine when processing the entrance of a CFGBlock.
void processCFGBlockEntrance(const BlockEdge &L,
NodeBuilderWithSinks &nodeBuilder,
- ExplodedNode *Pred) override;
+ ExplodedNode *Pred);
/// ProcessBranch - Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a branch condition.
@@ -305,7 +329,7 @@ public:
ExplodedNode *Pred,
ExplodedNodeSet &Dst,
const CFGBlock *DstT,
- const CFGBlock *DstF) override;
+ const CFGBlock *DstF);
/// Called by CoreEngine.
/// Used to generate successor nodes for temporary destructors depending
@@ -314,7 +338,7 @@ public:
NodeBuilderContext &BldCtx,
ExplodedNode *Pred, ExplodedNodeSet &Dst,
const CFGBlock *DstT,
- const CFGBlock *DstF) override;
+ const CFGBlock *DstF);
/// Called by CoreEngine. Used to processing branching behavior
/// at static initializers.
@@ -323,27 +347,27 @@ public:
ExplodedNode *Pred,
ExplodedNodeSet &Dst,
const CFGBlock *DstT,
- const CFGBlock *DstF) override;
+ const CFGBlock *DstF);
/// processIndirectGoto - Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a computed goto jump.
- void processIndirectGoto(IndirectGotoNodeBuilder& builder) override;
+ void processIndirectGoto(IndirectGotoNodeBuilder& builder);
/// ProcessSwitch - Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a switch statement.
- void processSwitch(SwitchNodeBuilder& builder) override;
+ void processSwitch(SwitchNodeBuilder& builder);
/// Called by CoreEngine. Used to notify checkers that processing a
/// function has begun. Called for both inlined and and top-level functions.
void processBeginOfFunction(NodeBuilderContext &BC,
ExplodedNode *Pred, ExplodedNodeSet &Dst,
- const BlockEdge &L) override;
+ const BlockEdge &L);
/// Called by CoreEngine. Used to notify checkers that processing a
/// function has ended. Called for both inlined and and top-level functions.
void processEndOfFunction(NodeBuilderContext& BC,
ExplodedNode *Pred,
- const ReturnStmt *RS = nullptr) override;
+ const ReturnStmt *RS = nullptr);
/// Remove dead bindings/symbols before exiting a function.
void removeDeadOnEndOfFunction(NodeBuilderContext& BC,
@@ -352,19 +376,19 @@ public:
/// Generate the entry node of the callee.
void processCallEnter(NodeBuilderContext& BC, CallEnter CE,
- ExplodedNode *Pred) override;
+ ExplodedNode *Pred);
/// Generate the sequence of nodes that simulate the call exit and the post
/// visit for CallExpr.
- void processCallExit(ExplodedNode *Pred) override;
+ void processCallExit(ExplodedNode *Pred);
/// Called by CoreEngine when the analysis worklist has terminated.
- void processEndWorklist() override;
+ void processEndWorklist();
/// evalAssume - Callback function invoked by the ConstraintManager when
/// making assumptions about state values.
ProgramStateRef processAssume(ProgramStateRef state, SVal cond,
- bool assumption) override;
+ bool assumption);
/// processRegionChanges - Called by ProgramStateManager whenever a change is made
/// to the store. Used to update checkers that track region values.
@@ -374,14 +398,21 @@ public:
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
const LocationContext *LCtx,
- const CallEvent *Call) override;
+ const CallEvent *Call);
+
+ inline ProgramStateRef
+ processRegionChange(ProgramStateRef state,
+ const MemRegion* MR,
+ const LocationContext *LCtx) {
+ return processRegionChanges(state, nullptr, MR, MR, LCtx, nullptr);
+ }
/// printJson - Called by ProgramStateManager to print checker-specific data.
void printJson(raw_ostream &Out, ProgramStateRef State,
const LocationContext *LCtx, const char *NL,
- unsigned int Space, bool IsDot) const override;
+ unsigned int Space, bool IsDot) const;
- ProgramStateManager &getStateManager() override { return StateMgr; }
+ ProgramStateManager &getStateManager() { return StateMgr; }
StoreManager &getStoreManager() { return StateMgr.getStoreManager(); }
@@ -527,6 +558,9 @@ public:
void VisitCXXConstructExpr(const CXXConstructExpr *E, ExplodedNode *Pred,
ExplodedNodeSet &Dst);
+ void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
void VisitCXXDestructor(QualType ObjectType, const MemRegion *Dest,
const Stmt *S, bool IsBaseDtor,
ExplodedNode *Pred, ExplodedNodeSet &Dst,
@@ -605,23 +639,11 @@ public:
const ConstructionContextItem &Item,
const LocationContext *LC);
-protected:
- /// evalBind - Handle the semantics of binding a value to a specific location.
- /// This method is used by evalStore, VisitDeclStmt, and others.
- void evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE, ExplodedNode *Pred,
- SVal location, SVal Val, bool atDeclInit = false,
- const ProgramPoint *PP = nullptr);
-
/// Call PointerEscape callback when a value escapes as a result of bind.
ProgramStateRef processPointerEscapedOnBind(
ProgramStateRef State, ArrayRef<std::pair<SVal, SVal>> LocAndVals,
const LocationContext *LCtx, PointerEscapeKind Kind,
- const CallEvent *Call) override;
-
- ProgramStateRef
- processPointerEscapedOnBind(ProgramStateRef State,
- SVal Loc, SVal Val,
- const LocationContext *LCtx);
+ const CallEvent *Call);
/// Call PointerEscape callback when a value escapes as a result of
/// region invalidation.
@@ -631,7 +653,19 @@ protected:
const InvalidatedSymbols *Invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
const CallEvent *Call,
- RegionAndSymbolInvalidationTraits &ITraits) override;
+ RegionAndSymbolInvalidationTraits &ITraits);
+
+private:
+ /// evalBind - Handle the semantics of binding a value to a specific location.
+ /// This method is used by evalStore, VisitDeclStmt, and others.
+ void evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE, ExplodedNode *Pred,
+ SVal location, SVal Val, bool atDeclInit = false,
+ const ProgramPoint *PP = nullptr);
+
+ ProgramStateRef
+ processPointerEscapedOnBind(ProgramStateRef State,
+ SVal Loc, SVal Val,
+ const LocationContext *LCtx);
/// A simple wrapper when you only need to notify checkers of pointer-escape
/// of some values.
@@ -683,6 +717,35 @@ public:
const CallEvent &Call,
const EvalCallOptions &CallOpts = {});
+ /// Find location of the object that is being constructed by a given
+ /// constructor. This should ideally always succeed but due to not being
+ /// fully implemented it sometimes indicates that it failed via its
+ /// out-parameter CallOpts; in such cases a fake temporary region is
+ /// returned, which is better than nothing but does not represent
+ /// the actual behavior of the program.
+ SVal computeObjectUnderConstruction(
+ const Expr *E, ProgramStateRef State, const LocationContext *LCtx,
+ const ConstructionContext *CC, EvalCallOptions &CallOpts);
+
+ /// Update the program state with all the path-sensitive information
+ /// that's necessary to perform construction of an object with a given
+ /// syntactic construction context. V and CallOpts have to be obtained from
+ /// computeObjectUnderConstruction() invoked with the same set of
+ /// the remaining arguments (E, State, LCtx, CC).
+ ProgramStateRef updateObjectsUnderConstruction(
+ SVal V, const Expr *E, ProgramStateRef State, const LocationContext *LCtx,
+ const ConstructionContext *CC, const EvalCallOptions &CallOpts);
+
+ /// A convenient wrapper around computeObjectUnderConstruction
+ /// and updateObjectsUnderConstruction.
+ std::pair<ProgramStateRef, SVal> handleConstructionContext(
+ const Expr *E, ProgramStateRef State, const LocationContext *LCtx,
+ const ConstructionContext *CC, EvalCallOptions &CallOpts) {
+ SVal V = computeObjectUnderConstruction(E, State, LCtx, CC, CallOpts);
+ return std::make_pair(
+ updateObjectsUnderConstruction(V, E, State, LCtx, CC, CallOpts), V);
+ }
+
private:
ProgramStateRef finishArgumentConstruction(ProgramStateRef State,
const CallEvent &Call);
@@ -801,15 +864,10 @@ private:
/// constructing into an existing region.
const CXXConstructExpr *findDirectConstructorForCurrentCFGElement();
- /// Update the program state with all the path-sensitive information
- /// that's necessary to perform construction of an object with a given
- /// syntactic construction context. If the construction context is unavailable
- /// or unusable for any reason, a dummy temporary region is returned, and the
- /// IsConstructorWithImproperlyModeledTargetRegion flag is set in \p CallOpts.
- /// Returns the updated program state and the new object's this-region.
- std::pair<ProgramStateRef, SVal> prepareForObjectConstruction(
- const Expr *E, ProgramStateRef State, const LocationContext *LCtx,
- const ConstructionContext *CC, EvalCallOptions &CallOpts);
+ /// Common code that handles either a CXXConstructExpr or a
+ /// CXXInheritedCtorInitExpr.
+ void handleConstructor(const Expr *E, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
/// Store the location of a C++ object corresponding to a statement
/// until the statement is actually encountered. For example, if a DeclStmt
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
index 71cbbe28fc25..9f85347db5df 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
@@ -112,7 +112,7 @@ public:
virtual void Profile(llvm::FoldingSetNodeID& ID) const = 0;
- virtual MemRegionManager* getMemRegionManager() const = 0;
+ virtual MemRegionManager &getMemRegionManager() const = 0;
const MemSpaceRegion *getMemorySpace() const;
@@ -198,14 +198,13 @@ public:
/// for example, the set of global variables, the stack frame, etc.
class MemSpaceRegion : public MemRegion {
protected:
- MemRegionManager *Mgr;
+ MemRegionManager &Mgr;
- MemSpaceRegion(MemRegionManager *mgr, Kind k) : MemRegion(k), Mgr(mgr) {
+ MemSpaceRegion(MemRegionManager &mgr, Kind k) : MemRegion(k), Mgr(mgr) {
assert(classof(this));
- assert(mgr);
}
- MemRegionManager* getMemRegionManager() const override { return Mgr; }
+ MemRegionManager &getMemRegionManager() const override { return Mgr; }
public:
bool isBoundable() const override { return false; }
@@ -223,7 +222,7 @@ public:
class CodeSpaceRegion : public MemSpaceRegion {
friend class MemRegionManager;
- CodeSpaceRegion(MemRegionManager *mgr)
+ CodeSpaceRegion(MemRegionManager &mgr)
: MemSpaceRegion(mgr, CodeSpaceRegionKind) {}
public:
@@ -238,7 +237,7 @@ class GlobalsSpaceRegion : public MemSpaceRegion {
virtual void anchor();
protected:
- GlobalsSpaceRegion(MemRegionManager *mgr, Kind k) : MemSpaceRegion(mgr, k) {
+ GlobalsSpaceRegion(MemRegionManager &mgr, Kind k) : MemSpaceRegion(mgr, k) {
assert(classof(this));
}
@@ -259,7 +258,7 @@ class StaticGlobalSpaceRegion : public GlobalsSpaceRegion {
const CodeTextRegion *CR;
- StaticGlobalSpaceRegion(MemRegionManager *mgr, const CodeTextRegion *cr)
+ StaticGlobalSpaceRegion(MemRegionManager &mgr, const CodeTextRegion *cr)
: GlobalsSpaceRegion(mgr, StaticGlobalSpaceRegionKind), CR(cr) {
assert(cr);
}
@@ -286,7 +285,7 @@ class NonStaticGlobalSpaceRegion : public GlobalsSpaceRegion {
void anchor() override;
protected:
- NonStaticGlobalSpaceRegion(MemRegionManager *mgr, Kind k)
+ NonStaticGlobalSpaceRegion(MemRegionManager &mgr, Kind k)
: GlobalsSpaceRegion(mgr, k) {
assert(classof(this));
}
@@ -304,7 +303,7 @@ public:
class GlobalSystemSpaceRegion : public NonStaticGlobalSpaceRegion {
friend class MemRegionManager;
- GlobalSystemSpaceRegion(MemRegionManager *mgr)
+ GlobalSystemSpaceRegion(MemRegionManager &mgr)
: NonStaticGlobalSpaceRegion(mgr, GlobalSystemSpaceRegionKind) {}
public:
@@ -323,7 +322,7 @@ public:
class GlobalImmutableSpaceRegion : public NonStaticGlobalSpaceRegion {
friend class MemRegionManager;
- GlobalImmutableSpaceRegion(MemRegionManager *mgr)
+ GlobalImmutableSpaceRegion(MemRegionManager &mgr)
: NonStaticGlobalSpaceRegion(mgr, GlobalImmutableSpaceRegionKind) {}
public:
@@ -340,7 +339,7 @@ public:
class GlobalInternalSpaceRegion : public NonStaticGlobalSpaceRegion {
friend class MemRegionManager;
- GlobalInternalSpaceRegion(MemRegionManager *mgr)
+ GlobalInternalSpaceRegion(MemRegionManager &mgr)
: NonStaticGlobalSpaceRegion(mgr, GlobalInternalSpaceRegionKind) {}
public:
@@ -354,7 +353,7 @@ public:
class HeapSpaceRegion : public MemSpaceRegion {
friend class MemRegionManager;
- HeapSpaceRegion(MemRegionManager *mgr)
+ HeapSpaceRegion(MemRegionManager &mgr)
: MemSpaceRegion(mgr, HeapSpaceRegionKind) {}
public:
@@ -368,7 +367,7 @@ public:
class UnknownSpaceRegion : public MemSpaceRegion {
friend class MemRegionManager;
- UnknownSpaceRegion(MemRegionManager *mgr)
+ UnknownSpaceRegion(MemRegionManager &mgr)
: MemSpaceRegion(mgr, UnknownSpaceRegionKind) {}
public:
@@ -385,7 +384,7 @@ class StackSpaceRegion : public MemSpaceRegion {
const StackFrameContext *SFC;
protected:
- StackSpaceRegion(MemRegionManager *mgr, Kind k, const StackFrameContext *sfc)
+ StackSpaceRegion(MemRegionManager &mgr, Kind k, const StackFrameContext *sfc)
: MemSpaceRegion(mgr, k), SFC(sfc) {
assert(classof(this));
assert(sfc);
@@ -405,7 +404,7 @@ public:
class StackLocalsSpaceRegion : public StackSpaceRegion {
friend class MemRegionManager;
- StackLocalsSpaceRegion(MemRegionManager *mgr, const StackFrameContext *sfc)
+ StackLocalsSpaceRegion(MemRegionManager &mgr, const StackFrameContext *sfc)
: StackSpaceRegion(mgr, StackLocalsSpaceRegionKind, sfc) {}
public:
@@ -420,7 +419,7 @@ class StackArgumentsSpaceRegion : public StackSpaceRegion {
private:
friend class MemRegionManager;
- StackArgumentsSpaceRegion(MemRegionManager *mgr, const StackFrameContext *sfc)
+ StackArgumentsSpaceRegion(MemRegionManager &mgr, const StackFrameContext *sfc)
: StackSpaceRegion(mgr, StackArgumentsSpaceRegionKind, sfc) {}
public:
@@ -449,12 +448,7 @@ public:
return superRegion;
}
- /// getExtent - Returns the size of the region in bytes.
- virtual DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const {
- return UnknownVal();
- }
-
- MemRegionManager* getMemRegionManager() const override;
+ MemRegionManager &getMemRegionManager() const override;
bool isSubRegionOf(const MemRegion* R) const override;
@@ -491,8 +485,6 @@ public:
bool isBoundable() const override { return true; }
- DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const override;
-
void Profile(llvm::FoldingSetNodeID& ID) const override;
void dumpToStream(raw_ostream &os) const override;
@@ -552,8 +544,6 @@ public:
return T.getTypePtrOrNull() ? T.getDesugaredType(Context) : T;
}
- DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const override;
-
static bool classof(const MemRegion* R) {
unsigned k = R->getKind();
return k >= BEGIN_TYPED_VALUE_REGIONS && k <= END_TYPED_VALUE_REGIONS;
@@ -782,8 +772,6 @@ public:
bool isBoundable() const override { return true; }
- DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const override;
-
void Profile(llvm::FoldingSetNodeID& ID) const override;
static void ProfileRegion(llvm::FoldingSetNodeID& ID,
@@ -817,8 +805,6 @@ public:
QualType getValueType() const override { return Str->getType(); }
- DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const override;
-
bool isBoundable() const override { return false; }
void Profile(llvm::FoldingSetNodeID& ID) const override {
@@ -904,20 +890,12 @@ public:
class DeclRegion : public TypedValueRegion {
protected:
- const ValueDecl *D;
-
- DeclRegion(const ValueDecl *d, const MemRegion *sReg, Kind k)
- : TypedValueRegion(sReg, k), D(d) {
+ DeclRegion(const MemRegion *sReg, Kind k) : TypedValueRegion(sReg, k) {
assert(classof(this));
- assert(d && d->isCanonicalDecl());
}
- static void ProfileRegion(llvm::FoldingSetNodeID& ID, const Decl *D,
- const MemRegion* superRegion, Kind k);
-
public:
- const ValueDecl *getDecl() const { return D; }
- void Profile(llvm::FoldingSetNodeID& ID) const override;
+ virtual const ValueDecl *getDecl() const = 0;
static bool classof(const MemRegion* R) {
unsigned k = R->getKind();
@@ -928,9 +906,9 @@ public:
class VarRegion : public DeclRegion {
friend class MemRegionManager;
- // Constructors and private methods.
- VarRegion(const VarDecl *vd, const MemRegion *sReg)
- : DeclRegion(vd, sReg, VarRegionKind) {
+protected:
+ // Constructors and protected methods.
+ VarRegion(const MemRegion *sReg, Kind k) : DeclRegion(sReg, k) {
// VarRegion appears in unknown space when it's a block variable as seen
// from a block using it, when this block is analyzed at top-level.
// Other block variables appear within block data regions,
@@ -939,17 +917,45 @@ class VarRegion : public DeclRegion {
isa<BlockDataRegion>(sReg) || isa<UnknownSpaceRegion>(sReg));
}
- static void ProfileRegion(llvm::FoldingSetNodeID& ID, const VarDecl *VD,
- const MemRegion *superRegion) {
- DeclRegion::ProfileRegion(ID, VD, superRegion, VarRegionKind);
+public:
+ const VarDecl *getDecl() const override = 0;
+
+ const StackFrameContext *getStackFrame() const;
+
+ QualType getValueType() const override {
+ // FIXME: We can cache this if needed.
+ return getDecl()->getType();
}
-public:
- void Profile(llvm::FoldingSetNodeID& ID) const override;
+ static bool classof(const MemRegion *R) {
+ unsigned k = R->getKind();
+ return k >= BEGIN_VAR_REGIONS && k <= END_VAR_REGIONS;
+ }
+};
- const VarDecl *getDecl() const { return cast<VarDecl>(D); }
+class NonParamVarRegion : public VarRegion {
+ friend class MemRegionManager;
- const StackFrameContext *getStackFrame() const;
+ const VarDecl *VD;
+
+ // Constructors and private methods.
+ NonParamVarRegion(const VarDecl *vd, const MemRegion *sReg)
+ : VarRegion(sReg, NonParamVarRegionKind), VD(vd) {
+ // VarRegion appears in unknown space when it's a block variable as seen
+ // from a block using it, when this block is analyzed at top-level.
+ // Other block variables appear within block data regions,
+ // which, unlike everything else on this list, are not memory spaces.
+ assert(isa<GlobalsSpaceRegion>(sReg) || isa<StackSpaceRegion>(sReg) ||
+ isa<BlockDataRegion>(sReg) || isa<UnknownSpaceRegion>(sReg));
+ }
+
+ static void ProfileRegion(llvm::FoldingSetNodeID &ID, const VarDecl *VD,
+ const MemRegion *superRegion);
+
+public:
+ void Profile(llvm::FoldingSetNodeID &ID) const override;
+
+ const VarDecl *getDecl() const override { return VD; }
QualType getValueType() const override {
// FIXME: We can cache this if needed.
@@ -963,7 +969,50 @@ public:
void printPrettyAsExpr(raw_ostream &os) const override;
static bool classof(const MemRegion* R) {
- return R->getKind() == VarRegionKind;
+ return R->getKind() == NonParamVarRegionKind;
+ }
+};
+
+/// ParamVarRegion - Represents a region for paremters. Only parameters of the
+/// function in the current stack frame are represented as `ParamVarRegion`s.
+/// Parameters of top-level analyzed functions as well as captured paremeters
+/// by lambdas and blocks are repesented as `VarRegion`s.
+
+// FIXME: `ParamVarRegion` only supports parameters of functions, C++
+// constructors, blocks and Objective-C methods with existing `Decl`. Upon
+// implementing stack frame creations for functions without decl (functions
+// passed by unknown function pointer) methods of `ParamVarRegion` must be
+// updated.
+class ParamVarRegion : public VarRegion {
+ friend class MemRegionManager;
+
+ const Expr *OriginExpr;
+ unsigned Index;
+
+ ParamVarRegion(const Expr *OE, unsigned Idx, const MemRegion *SReg)
+ : VarRegion(SReg, ParamVarRegionKind), OriginExpr(OE), Index(Idx) {
+ assert(!cast<StackSpaceRegion>(SReg)->getStackFrame()->inTopFrame());
+ }
+
+ static void ProfileRegion(llvm::FoldingSetNodeID &ID, const Expr *OE,
+ unsigned Idx, const MemRegion *SReg);
+
+public:
+ const Expr *getOriginExpr() const { return OriginExpr; }
+ unsigned getIndex() const { return Index; }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const override;
+
+ void dumpToStream(raw_ostream &os) const override;
+
+ QualType getValueType() const override;
+ const ParmVarDecl *getDecl() const override;
+
+ bool canPrintPrettyAsExpr() const override;
+ void printPrettyAsExpr(raw_ostream &os) const override;
+
+ static bool classof(const MemRegion *R) {
+ return R->getKind() == ParamVarRegionKind;
}
};
@@ -1005,24 +1054,28 @@ private:
class FieldRegion : public DeclRegion {
friend class MemRegionManager;
- FieldRegion(const FieldDecl *fd, const SubRegion* sReg)
- : DeclRegion(fd, sReg, FieldRegionKind) {}
+ const FieldDecl *FD;
+
+ FieldRegion(const FieldDecl *fd, const SubRegion *sReg)
+ : DeclRegion(sReg, FieldRegionKind), FD(fd) {}
- static void ProfileRegion(llvm::FoldingSetNodeID& ID, const FieldDecl *FD,
+ static void ProfileRegion(llvm::FoldingSetNodeID &ID, const FieldDecl *FD,
const MemRegion* superRegion) {
- DeclRegion::ProfileRegion(ID, FD, superRegion, FieldRegionKind);
+ ID.AddInteger(static_cast<unsigned>(FieldRegionKind));
+ ID.AddPointer(FD);
+ ID.AddPointer(superRegion);
}
public:
- const FieldDecl *getDecl() const { return cast<FieldDecl>(D); }
+ const FieldDecl *getDecl() const override { return FD; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override;
QualType getValueType() const override {
// FIXME: We can cache this if needed.
return getDecl()->getType();
}
- DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const override;
-
void dumpToStream(raw_ostream &os) const override;
bool canPrintPretty() const override;
@@ -1038,13 +1091,18 @@ public:
class ObjCIvarRegion : public DeclRegion {
friend class MemRegionManager;
+ const ObjCIvarDecl *IVD;
+
ObjCIvarRegion(const ObjCIvarDecl *ivd, const SubRegion *sReg);
static void ProfileRegion(llvm::FoldingSetNodeID& ID, const ObjCIvarDecl *ivd,
const MemRegion* superRegion);
public:
- const ObjCIvarDecl *getDecl() const;
+ const ObjCIvarDecl *getDecl() const override;
+
+ void Profile(llvm::FoldingSetNodeID& ID) const override;
+
QualType getValueType() const override;
bool canPrintPrettyAsExpr() const override;
@@ -1242,8 +1300,9 @@ const RegionTy* MemRegion::castAs() const {
//===----------------------------------------------------------------------===//
class MemRegionManager {
- ASTContext &C;
+ ASTContext &Ctx;
llvm::BumpPtrAllocator& A;
+
llvm::FoldingSet<MemRegion> Regions;
GlobalInternalSpaceRegion *InternalGlobals = nullptr;
@@ -1262,13 +1321,18 @@ class MemRegionManager {
CodeSpaceRegion *code = nullptr;
public:
- MemRegionManager(ASTContext &c, llvm::BumpPtrAllocator &a) : C(c), A(a) {}
+ MemRegionManager(ASTContext &c, llvm::BumpPtrAllocator &a) : Ctx(c), A(a) {}
~MemRegionManager();
- ASTContext &getContext() { return C; }
+ ASTContext &getContext() { return Ctx; }
llvm::BumpPtrAllocator &getAllocator() { return A; }
+ /// \returns The static size in bytes of the region \p MR.
+ /// \note The region \p MR must be a 'SubRegion'.
+ DefinedOrUnknownSVal getStaticSize(const MemRegion *MR,
+ SValBuilder &SVB) const;
+
/// getStackLocalsRegion - Retrieve the memory region associated with the
/// specified stack frame.
const StackLocalsSpaceRegion *
@@ -1322,11 +1386,18 @@ public:
/// getVarRegion - Retrieve or create the memory region associated with
/// a specified VarDecl and LocationContext.
- const VarRegion* getVarRegion(const VarDecl *D, const LocationContext *LC);
+ const VarRegion *getVarRegion(const VarDecl *VD, const LocationContext *LC);
/// getVarRegion - Retrieve or create the memory region associated with
- /// a specified VarDecl and super region.
- const VarRegion *getVarRegion(const VarDecl *D, const MemRegion *superR);
+ /// a specified VarDecl and LocationContext.
+ const NonParamVarRegion *getNonParamVarRegion(const VarDecl *VD,
+ const MemRegion *superR);
+
+ /// getParamVarRegion - Retrieve or create the memory region
+ /// associated with a specified CallExpr, Index and LocationContext.
+ const ParamVarRegion *getParamVarRegion(const Expr *OriginExpr,
+ unsigned Index,
+ const LocationContext *LC);
/// getElementRegion - Retrieve the memory region associated with the
/// associated element type, index, and super region.
@@ -1434,7 +1505,7 @@ private:
//===----------------------------------------------------------------------===//
inline ASTContext &MemRegion::getContext() const {
- return getMemRegionManager()->getContext();
+ return getMemRegionManager().getContext();
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
index bdd12a3ffe33..9a34639e2707 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
@@ -39,7 +39,7 @@ class CallEvent;
class CallEventManager;
typedef std::unique_ptr<ConstraintManager>(*ConstraintManagerCreator)(
- ProgramStateManager &, SubEngine *);
+ ProgramStateManager &, ExprEngine *);
typedef std::unique_ptr<StoreManager>(*StoreManagerCreator)(
ProgramStateManager &);
@@ -298,6 +298,9 @@ public:
LLVM_NODISCARD ProgramStateRef enterStackFrame(
const CallEvent &Call, const StackFrameContext *CalleeCtx) const;
+ /// Return the value of 'self' if available in the given context.
+ SVal getSelfSVal(const LocationContext *LC) const;
+
/// Get the lvalue for a base class object reference.
Loc getLValue(const CXXBaseSpecifier &BaseSpec, const SubRegion *Super) const;
@@ -305,6 +308,10 @@ public:
Loc getLValue(const CXXRecordDecl *BaseClass, const SubRegion *Super,
bool IsVirtual) const;
+ /// Get the lvalue for a parameter.
+ Loc getLValue(const Expr *Call, unsigned Index,
+ const LocationContext *LC) const;
+
/// Get the lvalue for a variable reference.
Loc getLValue(const VarDecl *D, const LocationContext *LC) const;
@@ -457,8 +464,8 @@ class ProgramStateManager {
friend class ProgramState;
friend void ProgramStateRelease(const ProgramState *state);
private:
- /// Eng - The SubEngine that owns this state manager.
- SubEngine *Eng; /* Can be null. */
+ /// Eng - The ExprEngine that owns this state manager.
+ ExprEngine *Eng; /* Can be null. */
EnvironmentManager EnvMgr;
std::unique_ptr<StoreManager> StoreMgr;
@@ -490,7 +497,7 @@ public:
StoreManagerCreator CreateStoreManager,
ConstraintManagerCreator CreateConstraintManager,
llvm::BumpPtrAllocator& alloc,
- SubEngine *subeng);
+ ExprEngine *expreng);
~ProgramStateManager();
@@ -531,7 +538,7 @@ public:
StoreManager &getStoreManager() { return *StoreMgr; }
ConstraintManager &getConstraintManager() { return *ConstraintMgr; }
- SubEngine &getOwningEngine() { return *Eng; }
+ ExprEngine &getOwningEngine() { return *Eng; }
ProgramStateRef
removeDeadBindingsFromEnvironmentAndStore(ProgramStateRef St,
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h
index a9ca3451d8f3..a42eebd7d4e8 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h
@@ -30,6 +30,10 @@ public:
: std::pair<const llvm::APSInt *, const llvm::APSInt *>(&from, &to) {
assert(from <= to);
}
+
+ Range(const llvm::APSInt &point)
+ : std::pair<const llvm::APSInt *, const llvm::APSInt *>(&point, &point) {}
+
bool Includes(const llvm::APSInt &v) const {
return *first <= v && v <= *second;
}
@@ -89,6 +93,9 @@ public:
RangeSet(Factory &F, const llvm::APSInt &from, const llvm::APSInt &to)
: ranges(F.add(F.getEmptySet(), Range(from, to))) {}
+ /// Construct a new RangeSet representing the given point as a range.
+ RangeSet(Factory &F, const llvm::APSInt &point) : RangeSet(F, point, point) {}
+
/// Profile - Generates a hash profile of this RangeSet for use
/// by FoldingSet.
void Profile(llvm::FoldingSetNodeID &ID) const { ranges.Profile(ID); }
@@ -100,14 +107,17 @@ public:
return ranges.isSingleton() ? ranges.begin()->getConcreteValue() : nullptr;
}
+ /// Get a minimal value covered by the ranges in the set
+ const llvm::APSInt &getMinValue() const;
+ /// Get a maximal value covered by the ranges in the set
+ const llvm::APSInt &getMaxValue() const;
+
private:
void IntersectInRange(BasicValueFactory &BV, Factory &F,
const llvm::APSInt &Lower, const llvm::APSInt &Upper,
PrimRangeSet &newRanges, PrimRangeSet::iterator &i,
PrimRangeSet::iterator &e) const;
- const llvm::APSInt &getMinValue() const;
-
bool pin(llvm::APSInt &Lower, llvm::APSInt &Upper) const;
public:
@@ -124,7 +134,6 @@ public:
}
};
-
class ConstraintRange {};
using ConstraintRangeTy = llvm::ImmutableMap<SymbolRef, RangeSet>;
@@ -137,8 +146,8 @@ struct ProgramStateTrait<ConstraintRange>
class RangedConstraintManager : public SimpleConstraintManager {
public:
- RangedConstraintManager(SubEngine *SE, SValBuilder &SB)
- : SimpleConstraintManager(SE, SB) {}
+ RangedConstraintManager(ExprEngine *EE, SValBuilder &SB)
+ : SimpleConstraintManager(EE, SB) {}
~RangedConstraintManager() override;
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Regions.def b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Regions.def
index 3c52c2bc7142..44ab31fc9f2e 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Regions.def
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Regions.def
@@ -73,9 +73,13 @@ ABSTRACT_REGION(SubRegion, MemRegion)
ABSTRACT_REGION(DeclRegion, TypedValueRegion)
REGION(FieldRegion, DeclRegion)
REGION(ObjCIvarRegion, DeclRegion)
- REGION(VarRegion, DeclRegion)
- REGION_RANGE(DECL_REGIONS, FieldRegionKind,
- VarRegionKind)
+ ABSTRACT_REGION(VarRegion, DeclRegion)
+ REGION(NonParamVarRegion, VarRegion)
+ REGION(ParamVarRegion, VarRegion)
+ REGION_RANGE(VAR_REGIONS, NonParamVarRegionKind,
+ ParamVarRegionKind)
+ REGION_RANGE(DECL_REGIONS, FieldRegionKind,
+ ParamVarRegionKind)
REGION(ElementRegion, TypedValueRegion)
REGION(ObjCStringRegion, TypedValueRegion)
REGION(StringRegion, TypedValueRegion)
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h
index 1712501b13bd..6a0f5f10874e 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_SMTCONSTRAINTMANAGER_H
#include "clang/Basic/JsonSupport.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h"
@@ -30,8 +31,9 @@ class SMTConstraintManager : public clang::ento::SimpleConstraintManager {
mutable llvm::SMTSolverRef Solver = llvm::CreateZ3Solver();
public:
- SMTConstraintManager(clang::ento::SubEngine *SE, clang::ento::SValBuilder &SB)
- : SimpleConstraintManager(SE, SB) {}
+ SMTConstraintManager(clang::ento::ExprEngine *EE,
+ clang::ento::SValBuilder &SB)
+ : SimpleConstraintManager(EE, SB) {}
virtual ~SMTConstraintManager() = default;
//===------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SimpleConstraintManager.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SimpleConstraintManager.h
index 6bf5e94afdbb..87e927f5b480 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SimpleConstraintManager.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SimpleConstraintManager.h
@@ -21,12 +21,12 @@ namespace clang {
namespace ento {
class SimpleConstraintManager : public ConstraintManager {
- SubEngine *SU;
+ ExprEngine *EE;
SValBuilder &SVB;
public:
- SimpleConstraintManager(SubEngine *subengine, SValBuilder &SB)
- : SU(subengine), SVB(SB) {}
+ SimpleConstraintManager(ExprEngine *exprengine, SValBuilder &SB)
+ : EE(exprengine), SVB(SB) {}
~SimpleConstraintManager() override;
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
index cbff29953944..c3b590e4784e 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
@@ -148,14 +148,6 @@ public:
virtual SVal getLValueElement(QualType elementType, NonLoc offset, SVal Base);
- // FIXME: This should soon be eliminated altogether; clients should deal with
- // region extents directly.
- virtual DefinedOrUnknownSVal getSizeInElements(ProgramStateRef state,
- const MemRegion *region,
- QualType EleTy) {
- return UnknownVal();
- }
-
/// ArrayToPointer - Used by ExprEngine::VistCast to handle implicit
/// conversions between arrays and pointers.
virtual SVal ArrayToPointer(Loc Array, QualType ElementTy) = 0;
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h
deleted file mode 100644
index a7f3c28d4373..000000000000
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h
+++ /dev/null
@@ -1,178 +0,0 @@
-//== SubEngine.h - Interface of the subengine of CoreEngine --------*- C++ -*-//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interface of a subengine of the CoreEngine.
-//
-//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_SUBENGINE_H
-#define LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_SUBENGINE_H
-
-#include "clang/Analysis/ProgramPoint.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
-#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-
-namespace clang {
-
-class CFGBlock;
-class CFGElement;
-class LocationContext;
-class Stmt;
-
-namespace cross_tu {
-class CrossTranslationUnitContext;
-}
-
-namespace ento {
-
-struct NodeBuilderContext;
-class AnalysisManager;
-class ExplodedNodeSet;
-class ExplodedNode;
-class ProgramState;
-class ProgramStateManager;
-class BlockCounter;
-class BranchNodeBuilder;
-class IndirectGotoNodeBuilder;
-class SwitchNodeBuilder;
-class EndOfFunctionNodeBuilder;
-class NodeBuilderWithSinks;
-class MemRegion;
-
-class SubEngine {
- virtual void anchor();
-public:
- virtual ~SubEngine() {}
-
- virtual ProgramStateRef getInitialState(const LocationContext *InitLoc) = 0;
-
- virtual AnalysisManager &getAnalysisManager() = 0;
-
- virtual cross_tu::CrossTranslationUnitContext *
- getCrossTranslationUnitContext() = 0;
-
- virtual ProgramStateManager &getStateManager() = 0;
-
- /// Called by CoreEngine. Used to generate new successor
- /// nodes by processing the 'effects' of a block-level statement.
- virtual void processCFGElement(const CFGElement E, ExplodedNode* Pred,
- unsigned StmtIdx, NodeBuilderContext *Ctx)=0;
-
- /// Called by CoreEngine when it starts processing a CFGBlock. The
- /// SubEngine is expected to populate dstNodes with new nodes representing
- /// updated analysis state, or generate no nodes at all if it doesn't.
- virtual void processCFGBlockEntrance(const BlockEdge &L,
- NodeBuilderWithSinks &nodeBuilder,
- ExplodedNode *Pred) = 0;
-
- /// Called by CoreEngine. Used to generate successor
- /// nodes by processing the 'effects' of a branch condition.
- virtual void processBranch(const Stmt *Condition,
- NodeBuilderContext& BuilderCtx,
- ExplodedNode *Pred,
- ExplodedNodeSet &Dst,
- const CFGBlock *DstT,
- const CFGBlock *DstF) = 0;
-
- /// Called by CoreEngine.
- /// Used to generate successor nodes for temporary destructors depending
- /// on whether the corresponding constructor was visited.
- virtual void processCleanupTemporaryBranch(const CXXBindTemporaryExpr *BTE,
- NodeBuilderContext &BldCtx,
- ExplodedNode *Pred,
- ExplodedNodeSet &Dst,
- const CFGBlock *DstT,
- const CFGBlock *DstF) = 0;
-
- /// Called by CoreEngine. Used to processing branching behavior
- /// at static initializers.
- virtual void processStaticInitializer(const DeclStmt *DS,
- NodeBuilderContext& BuilderCtx,
- ExplodedNode *Pred,
- ExplodedNodeSet &Dst,
- const CFGBlock *DstT,
- const CFGBlock *DstF) = 0;
-
- /// Called by CoreEngine. Used to generate successor
- /// nodes by processing the 'effects' of a computed goto jump.
- virtual void processIndirectGoto(IndirectGotoNodeBuilder& builder) = 0;
-
- /// Called by CoreEngine. Used to generate successor
- /// nodes by processing the 'effects' of a switch statement.
- virtual void processSwitch(SwitchNodeBuilder& builder) = 0;
-
- /// Called by CoreEngine. Used to notify checkers that processing a
- /// function has begun. Called for both inlined and and top-level functions.
- virtual void processBeginOfFunction(NodeBuilderContext &BC,
- ExplodedNode *Pred,
- ExplodedNodeSet &Dst,
- const BlockEdge &L) = 0;
-
- /// Called by CoreEngine. Used to notify checkers that processing a
- /// function has ended. Called for both inlined and and top-level functions.
- virtual void processEndOfFunction(NodeBuilderContext& BC,
- ExplodedNode *Pred,
- const ReturnStmt *RS = nullptr) = 0;
-
- // Generate the entry node of the callee.
- virtual void processCallEnter(NodeBuilderContext& BC, CallEnter CE,
- ExplodedNode *Pred) = 0;
-
- // Generate the first post callsite node.
- virtual void processCallExit(ExplodedNode *Pred) = 0;
-
- /// Called by ConstraintManager. Used to call checker-specific
- /// logic for handling assumptions on symbolic values.
- virtual ProgramStateRef processAssume(ProgramStateRef state,
- SVal cond, bool assumption) = 0;
-
- /// processRegionChanges - Called by ProgramStateManager whenever a change is
- /// made to the store. Used to update checkers that track region values.
- virtual ProgramStateRef
- processRegionChanges(ProgramStateRef state,
- const InvalidatedSymbols *invalidated,
- ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions,
- const LocationContext *LCtx,
- const CallEvent *Call) = 0;
-
-
- inline ProgramStateRef
- processRegionChange(ProgramStateRef state,
- const MemRegion* MR,
- const LocationContext *LCtx) {
- return processRegionChanges(state, nullptr, MR, MR, LCtx, nullptr);
- }
-
- virtual ProgramStateRef processPointerEscapedOnBind(
- ProgramStateRef State, ArrayRef<std::pair<SVal, SVal>> LocAndVals,
- const LocationContext *LCtx, PointerEscapeKind Kind,
- const CallEvent *Call) = 0;
-
- virtual ProgramStateRef
- notifyCheckersOfPointerEscape(ProgramStateRef State,
- const InvalidatedSymbols *Invalidated,
- ArrayRef<const MemRegion *> ExplicitRegions,
- const CallEvent *Call,
- RegionAndSymbolInvalidationTraits &HTraits) = 0;
-
- /// printJson - Called by ProgramStateManager to print checker-specific data.
- virtual void printJson(raw_ostream &Out, ProgramStateRef State,
- const LocationContext *LCtx, const char *NL,
- unsigned int Space, bool IsDot) const = 0;
-
- /// Called by CoreEngine when the analysis worklist is either empty or the
- // maximum number of analysis steps have been reached.
- virtual void processEndWorklist() = 0;
-};
-
-} // end GR namespace
-
-} // end clang namespace
-
-#endif
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
index d212e23da6fc..390ced8c29f8 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
@@ -326,140 +326,88 @@ public:
Kind k = SE->getKind();
return k >= BEGIN_BINARYSYMEXPRS && k <= END_BINARYSYMEXPRS;
}
-};
-
-/// Represents a symbolic expression like 'x' + 3.
-class SymIntExpr : public BinarySymExpr {
- const SymExpr *LHS;
- const llvm::APSInt& RHS;
-public:
- SymIntExpr(const SymExpr *lhs, BinaryOperator::Opcode op,
- const llvm::APSInt &rhs, QualType t)
- : BinarySymExpr(SymIntExprKind, op, t), LHS(lhs), RHS(rhs) {
- assert(lhs);
+protected:
+ static unsigned computeOperandComplexity(const SymExpr *Value) {
+ return Value->computeComplexity();
}
-
- void dumpToStream(raw_ostream &os) const override;
-
- const SymExpr *getLHS() const { return LHS; }
- const llvm::APSInt &getRHS() const { return RHS; }
-
- unsigned computeComplexity() const override {
- if (Complexity == 0)
- Complexity = 1 + LHS->computeComplexity();
- return Complexity;
+ static unsigned computeOperandComplexity(const llvm::APSInt &Value) {
+ return 1;
}
- static void Profile(llvm::FoldingSetNodeID& ID, const SymExpr *lhs,
- BinaryOperator::Opcode op, const llvm::APSInt& rhs,
- QualType t) {
- ID.AddInteger((unsigned) SymIntExprKind);
- ID.AddPointer(lhs);
- ID.AddInteger(op);
- ID.AddPointer(&rhs);
- ID.Add(t);
+ static const llvm::APSInt *getPointer(const llvm::APSInt &Value) {
+ return &Value;
}
+ static const SymExpr *getPointer(const SymExpr *Value) { return Value; }
- void Profile(llvm::FoldingSetNodeID& ID) override {
- Profile(ID, LHS, getOpcode(), RHS, getType());
- }
-
- // Implement isa<T> support.
- static bool classof(const SymExpr *SE) {
- return SE->getKind() == SymIntExprKind;
- }
+ static void dumpToStreamImpl(raw_ostream &os, const SymExpr *Value);
+ static void dumpToStreamImpl(raw_ostream &os, const llvm::APSInt &Value);
+ static void dumpToStreamImpl(raw_ostream &os, BinaryOperator::Opcode op);
};
-/// Represents a symbolic expression like 3 - 'x'.
-class IntSymExpr : public BinarySymExpr {
- const llvm::APSInt& LHS;
- const SymExpr *RHS;
+/// Template implementation for all binary symbolic expressions
+template <class LHSTYPE, class RHSTYPE, SymExpr::Kind ClassKind>
+class BinarySymExprImpl : public BinarySymExpr {
+ LHSTYPE LHS;
+ RHSTYPE RHS;
public:
- IntSymExpr(const llvm::APSInt &lhs, BinaryOperator::Opcode op,
- const SymExpr *rhs, QualType t)
- : BinarySymExpr(IntSymExprKind, op, t), LHS(lhs), RHS(rhs) {
- assert(rhs);
+ BinarySymExprImpl(LHSTYPE lhs, BinaryOperator::Opcode op, RHSTYPE rhs,
+ QualType t)
+ : BinarySymExpr(ClassKind, op, t), LHS(lhs), RHS(rhs) {
+ assert(getPointer(lhs));
+ assert(getPointer(rhs));
}
- void dumpToStream(raw_ostream &os) const override;
+ void dumpToStream(raw_ostream &os) const override {
+ dumpToStreamImpl(os, LHS);
+ dumpToStreamImpl(os, getOpcode());
+ dumpToStreamImpl(os, RHS);
+ }
- const SymExpr *getRHS() const { return RHS; }
- const llvm::APSInt &getLHS() const { return LHS; }
+ LHSTYPE getLHS() const { return LHS; }
+ RHSTYPE getRHS() const { return RHS; }
unsigned computeComplexity() const override {
if (Complexity == 0)
- Complexity = 1 + RHS->computeComplexity();
+ Complexity =
+ computeOperandComplexity(RHS) + computeOperandComplexity(LHS);
return Complexity;
}
- static void Profile(llvm::FoldingSetNodeID& ID, const llvm::APSInt& lhs,
- BinaryOperator::Opcode op, const SymExpr *rhs,
- QualType t) {
- ID.AddInteger((unsigned) IntSymExprKind);
- ID.AddPointer(&lhs);
+ static void Profile(llvm::FoldingSetNodeID &ID, LHSTYPE lhs,
+ BinaryOperator::Opcode op, RHSTYPE rhs, QualType t) {
+ ID.AddInteger((unsigned)ClassKind);
+ ID.AddPointer(getPointer(lhs));
ID.AddInteger(op);
- ID.AddPointer(rhs);
+ ID.AddPointer(getPointer(rhs));
ID.Add(t);
}
- void Profile(llvm::FoldingSetNodeID& ID) override {
+ void Profile(llvm::FoldingSetNodeID &ID) override {
Profile(ID, LHS, getOpcode(), RHS, getType());
}
// Implement isa<T> support.
- static bool classof(const SymExpr *SE) {
- return SE->getKind() == IntSymExprKind;
- }
+ static bool classof(const SymExpr *SE) { return SE->getKind() == ClassKind; }
};
-/// Represents a symbolic expression like 'x' + 'y'.
-class SymSymExpr : public BinarySymExpr {
- const SymExpr *LHS;
- const SymExpr *RHS;
-
-public:
- SymSymExpr(const SymExpr *lhs, BinaryOperator::Opcode op, const SymExpr *rhs,
- QualType t)
- : BinarySymExpr(SymSymExprKind, op, t), LHS(lhs), RHS(rhs) {
- assert(lhs);
- assert(rhs);
- }
-
- const SymExpr *getLHS() const { return LHS; }
- const SymExpr *getRHS() const { return RHS; }
-
- void dumpToStream(raw_ostream &os) const override;
-
- unsigned computeComplexity() const override {
- if (Complexity == 0)
- Complexity = RHS->computeComplexity() + LHS->computeComplexity();
- return Complexity;
- }
-
- static void Profile(llvm::FoldingSetNodeID& ID, const SymExpr *lhs,
- BinaryOperator::Opcode op, const SymExpr *rhs, QualType t) {
- ID.AddInteger((unsigned) SymSymExprKind);
- ID.AddPointer(lhs);
- ID.AddInteger(op);
- ID.AddPointer(rhs);
- ID.Add(t);
- }
+/// Represents a symbolic expression like 'x' + 3.
+using SymIntExpr = BinarySymExprImpl<const SymExpr *, const llvm::APSInt &,
+ SymExpr::Kind::SymIntExprKind>;
- void Profile(llvm::FoldingSetNodeID& ID) override {
- Profile(ID, LHS, getOpcode(), RHS, getType());
- }
+/// Represents a symbolic expression like 3 - 'x'.
+using IntSymExpr = BinarySymExprImpl<const llvm::APSInt &, const SymExpr *,
+ SymExpr::Kind::IntSymExprKind>;
- // Implement isa<T> support.
- static bool classof(const SymExpr *SE) {
- return SE->getKind() == SymSymExprKind;
- }
-};
+/// Represents a symbolic expression like 'x' + 'y'.
+using SymSymExpr = BinarySymExprImpl<const SymExpr *, const SymExpr *,
+ SymExpr::Kind::SymSymExprKind>;
class SymbolManager {
using DataSetTy = llvm::FoldingSet<SymExpr>;
- using SymbolDependTy = llvm::DenseMap<SymbolRef, SymbolRefSmallVectorTy *>;
+ using SymbolDependTy =
+ llvm::DenseMap<SymbolRef, std::unique_ptr<SymbolRefSmallVectorTy>>;
DataSetTy DataSet;
@@ -476,7 +424,6 @@ public:
SymbolManager(ASTContext &ctx, BasicValueFactory &bv,
llvm::BumpPtrAllocator& bpalloc)
: SymbolDependencies(16), BPAlloc(bpalloc), BV(bv), Ctx(ctx) {}
- ~SymbolManager();
static bool canSymbolicate(QualType T);
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalysisConsumer.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalysisConsumer.h
index 2d24e6a9586b..bcc29a60ad70 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalysisConsumer.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalysisConsumer.h
@@ -55,7 +55,7 @@ public:
std::unique_ptr<AnalysisASTConsumer>
CreateAnalysisConsumer(CompilerInstance &CI);
-} // end GR namespace
+} // namespace ento
} // end clang namespace
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalyzerHelpFlags.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalyzerHelpFlags.h
new file mode 100644
index 000000000000..a30c241e1350
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalyzerHelpFlags.h
@@ -0,0 +1,30 @@
+//===-- AnalyzerHelpFlags.h - Query functions for --help flags --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_STATICANALYZER_FRONTEND_ANALYZERHELPFLAGS_H
+#define LLVM_CLANG_STATICANALYZER_FRONTEND_ANALYZERHELPFLAGS_H
+
+namespace llvm {
+class raw_ostream;
+} // namespace llvm
+
+namespace clang {
+
+class CompilerInstance;
+
+namespace ento {
+
+void printCheckerHelp(llvm::raw_ostream &OS, CompilerInstance &CI);
+void printEnabledCheckerList(llvm::raw_ostream &OS, CompilerInstance &CI);
+void printAnalyzerConfigList(llvm::raw_ostream &OS);
+void printCheckerConfigList(llvm::raw_ostream &OS, CompilerInstance &CI);
+
+} // namespace ento
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/CheckerRegistration.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/CheckerRegistration.h
deleted file mode 100644
index 52a534499002..000000000000
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/CheckerRegistration.h
+++ /dev/null
@@ -1,38 +0,0 @@
-//===-- CheckerRegistration.h - Checker Registration Function ---*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_STATICANALYZER_FRONTEND_CHECKERREGISTRATION_H
-#define LLVM_CLANG_STATICANALYZER_FRONTEND_CHECKERREGISTRATION_H
-
-#include "clang/AST/ASTContext.h"
-#include "clang/Basic/LLVM.h"
-#include <functional>
-#include <memory>
-#include <string>
-
-namespace clang {
- class AnalyzerOptions;
- class LangOptions;
- class DiagnosticsEngine;
-
-namespace ento {
- class CheckerManager;
- class CheckerRegistry;
-
- std::unique_ptr<CheckerManager> createCheckerManager(
- ASTContext &context,
- AnalyzerOptions &opts,
- ArrayRef<std::string> plugins,
- ArrayRef<std::function<void(CheckerRegistry &)>> checkerRegistrationFns,
- DiagnosticsEngine &diags);
-
-} // end ento namespace
-
-} // end namespace clang
-
-#endif
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/CheckerRegistry.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/CheckerRegistry.h
index bc258160ada4..43dbfb158515 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/CheckerRegistry.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/CheckerRegistry.h
@@ -5,16 +5,22 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
+//
+// Contains the logic for parsing the TableGen file Checkers.td, and parsing the
+// specific invocation of the analyzer (which checker/package is enabled, values
+// of their options, etc). This is in the frontend library because checker
+// registry functions are called from here but are defined in the dependent
+// library libStaticAnalyzerCheckers, but the actual data structure that holds
+// the parsed information is in the Core library.
+//
+//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_STATICANALYZER_CORE_CHECKERREGISTRY_H
-#define LLVM_CLANG_STATICANALYZER_CORE_CHECKERREGISTRY_H
+#ifndef LLVM_CLANG_STATICANALYZER_FRONTEND_CHECKERREGISTRY_H
+#define LLVM_CLANG_STATICANALYZER_FRONTEND_CHECKERREGISTRY_H
#include "clang/Basic/LLVM.h"
-#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "llvm/ADT/StringMap.h"
+#include "clang/StaticAnalyzer/Core/CheckerRegistryData.h"
#include "llvm/ADT/StringRef.h"
-#include <cstddef>
-#include <vector>
// FIXME: move this information to an HTML file in docs/.
// At the very least, a checker plugin is a dynamic library that exports
@@ -69,10 +75,11 @@ namespace clang {
class AnalyzerOptions;
class DiagnosticsEngine;
-class LangOptions;
namespace ento {
+class CheckerManager;
+
/// Manages a set of available checkers for running a static analysis.
/// The checkers are organized into packages by full name, where including
/// a package will recursively include all subpackages and checkers within it.
@@ -81,161 +88,60 @@ namespace ento {
/// "core.builtin", or the full name "core.builtin.NoReturnFunctionChecker".
class CheckerRegistry {
public:
- CheckerRegistry(ArrayRef<std::string> plugins, DiagnosticsEngine &diags,
- AnalyzerOptions &AnOpts, const LangOptions &LangOpts,
+ CheckerRegistry(CheckerRegistryData &Data, ArrayRef<std::string> Plugins,
+ DiagnosticsEngine &Diags, AnalyzerOptions &AnOpts,
ArrayRef<std::function<void(CheckerRegistry &)>>
- checkerRegistrationFns = {});
-
- /// Initialization functions perform any necessary setup for a checker.
- /// They should include a call to CheckerManager::registerChecker.
- using InitializationFunction = void (*)(CheckerManager &);
- using ShouldRegisterFunction = bool (*)(const LangOptions &);
-
- /// Specifies a command line option. It may either belong to a checker or a
- /// package.
- struct CmdLineOption {
- StringRef OptionType;
- StringRef OptionName;
- StringRef DefaultValStr;
- StringRef Description;
- StringRef DevelopmentStatus;
- bool IsHidden;
-
- CmdLineOption(StringRef OptionType, StringRef OptionName,
- StringRef DefaultValStr, StringRef Description,
- StringRef DevelopmentStatus, bool IsHidden)
- : OptionType(OptionType), OptionName(OptionName),
- DefaultValStr(DefaultValStr), Description(Description),
- DevelopmentStatus(DevelopmentStatus), IsHidden(IsHidden) {
-
- assert((OptionType == "bool" || OptionType == "string" ||
- OptionType == "int") &&
- "Unknown command line option type!");
-
- assert((OptionType != "bool" ||
- (DefaultValStr == "true" || DefaultValStr == "false")) &&
- "Invalid value for boolean command line option! Maybe incorrect "
- "parameters to the addCheckerOption or addPackageOption method?");
-
- int Tmp;
- assert((OptionType != "int" || !DefaultValStr.getAsInteger(0, Tmp)) &&
- "Invalid value for integer command line option! Maybe incorrect "
- "parameters to the addCheckerOption or addPackageOption method?");
- (void)Tmp;
-
- assert((DevelopmentStatus == "alpha" || DevelopmentStatus == "beta" ||
- DevelopmentStatus == "released") &&
- "Invalid development status!");
- }
- };
-
- using CmdLineOptionList = llvm::SmallVector<CmdLineOption, 0>;
-
- struct CheckerInfo;
-
- using CheckerInfoList = std::vector<CheckerInfo>;
- using CheckerInfoListRange = llvm::iterator_range<CheckerInfoList::iterator>;
- using ConstCheckerInfoList = llvm::SmallVector<const CheckerInfo *, 0>;
- using CheckerInfoSet = llvm::SetVector<const CheckerInfo *>;
-
- /// Specifies a checker. Note that this isn't what we call a checker object,
- /// it merely contains everything required to create one.
- struct CheckerInfo {
- enum class StateFromCmdLine {
- // This checker wasn't explicitly enabled or disabled.
- State_Unspecified,
- // This checker was explicitly disabled.
- State_Disabled,
- // This checker was explicitly enabled.
- State_Enabled
- };
-
- InitializationFunction Initialize = nullptr;
- ShouldRegisterFunction ShouldRegister = nullptr;
- StringRef FullName;
- StringRef Desc;
- StringRef DocumentationUri;
- CmdLineOptionList CmdLineOptions;
- bool IsHidden = false;
- StateFromCmdLine State = StateFromCmdLine::State_Unspecified;
-
- ConstCheckerInfoList Dependencies;
+ CheckerRegistrationFns = {});
- bool isEnabled(const LangOptions &LO) const {
- return State == StateFromCmdLine::State_Enabled && ShouldRegister(LO);
- }
+ /// Collects all enabled checkers in the field EnabledCheckers. It preserves
+ /// the order of insertion, as dependencies have to be enabled before the
+ /// checkers that depend on them.
+ void initializeRegistry(const CheckerManager &Mgr);
- bool isDisabled(const LangOptions &LO) const {
- return State == StateFromCmdLine::State_Disabled && ShouldRegister(LO);
- }
-
- // Since each checker must have a different full name, we can identify
- // CheckerInfo objects by them.
- bool operator==(const CheckerInfo &Rhs) const {
- return FullName == Rhs.FullName;
- }
-
- CheckerInfo(InitializationFunction Fn, ShouldRegisterFunction sfn,
- StringRef Name, StringRef Desc, StringRef DocsUri,
- bool IsHidden)
- : Initialize(Fn), ShouldRegister(sfn), FullName(Name), Desc(Desc),
- DocumentationUri(DocsUri), IsHidden(IsHidden) {}
-
- // Used for lower_bound.
- explicit CheckerInfo(StringRef FullName) : FullName(FullName) {}
- };
-
- using StateFromCmdLine = CheckerInfo::StateFromCmdLine;
-
- /// Specifies a package. Each package option is implicitly an option for all
- /// checkers within the package.
- struct PackageInfo {
- StringRef FullName;
- CmdLineOptionList CmdLineOptions;
-
- // Since each package must have a different full name, we can identify
- // CheckerInfo objects by them.
- bool operator==(const PackageInfo &Rhs) const {
- return FullName == Rhs.FullName;
- }
-
- explicit PackageInfo(StringRef FullName) : FullName(FullName) {}
- };
-
- using PackageInfoList = llvm::SmallVector<PackageInfo, 0>;
private:
- template <typename T> static void initializeManager(CheckerManager &mgr) {
- mgr.registerChecker<T>();
+ /// Default initialization function for checkers -- since CheckerManager
+ /// includes this header, we need to make it a template parameter, and since
+ /// the checker must be a template parameter as well, we can't put this in the
+ /// cpp file.
+ template <typename MGR, typename T> static void initializeManager(MGR &mgr) {
+ mgr.template registerChecker<T>();
}
- template <typename T> static bool returnTrue(const LangOptions &LO) {
+ template <typename T> static bool returnTrue(const CheckerManager &mgr) {
return true;
}
public:
/// Adds a checker to the registry. Use this non-templated overload when your
/// checker requires custom initialization.
- void addChecker(InitializationFunction Fn, ShouldRegisterFunction sfn,
+ void addChecker(RegisterCheckerFn Fn, ShouldRegisterFunction sfn,
StringRef FullName, StringRef Desc, StringRef DocsUri,
bool IsHidden);
/// Adds a checker to the registry. Use this templated overload when your
/// checker does not require any custom initialization.
+ /// This function isn't really needed and probably causes more headaches than
+ /// the tiny convenience that it provides, but external plugins might use it,
+ /// and there isn't a strong incentive to remove it.
template <class T>
void addChecker(StringRef FullName, StringRef Desc, StringRef DocsUri,
bool IsHidden = false) {
// Avoid MSVC's Compiler Error C2276:
// http://msdn.microsoft.com/en-us/library/850cstw1(v=VS.80).aspx
- addChecker(&CheckerRegistry::initializeManager<T>,
+ addChecker(&CheckerRegistry::initializeManager<CheckerManager, T>,
&CheckerRegistry::returnTrue<T>, FullName, Desc, DocsUri,
IsHidden);
}
- /// Makes the checker with the full name \p fullName depends on the checker
+ /// Makes the checker with the full name \p fullName depend on the checker
/// called \p dependency.
void addDependency(StringRef FullName, StringRef Dependency);
+ /// Makes the checker with the full name \p fullName weak depend on the
+ /// checker called \p dependency.
+ void addWeakDependency(StringRef FullName, StringRef Dependency);
+
/// Registers an option to a given checker. A checker option will always have
/// the following format:
/// CheckerFullName:OptionName=Value
@@ -265,7 +171,7 @@ public:
void addPackageOption(StringRef OptionType, StringRef PackageFullName,
StringRef OptionName, StringRef DefaultValStr,
StringRef Description, StringRef DevelopmentStatus,
- bool IsHidden = false);
+ bool IsHidden = false);
// FIXME: This *really* should be added to the frontend flag descriptions.
/// Initializes a CheckerManager by calling the initialization functions for
@@ -277,49 +183,17 @@ public:
/// Check if every option corresponds to a specific checker or package.
void validateCheckerOptions() const;
- /// Prints the name and description of all checkers in this registry.
- /// This output is not intended to be machine-parseable.
- void printCheckerWithDescList(raw_ostream &Out,
- size_t MaxNameChars = 30) const;
- void printEnabledCheckerList(raw_ostream &Out) const;
- void printCheckerOptionList(raw_ostream &Out) const;
-
private:
- /// Collect all enabled checkers. The returned container preserves the order
- /// of insertion, as dependencies have to be enabled before the checkers that
- /// depend on them.
- CheckerInfoSet getEnabledCheckers() const;
-
- /// Return an iterator range of mutable CheckerInfos \p CmdLineArg applies to.
- /// For example, it'll return the checkers for the core package, if
- /// \p CmdLineArg is "core".
- CheckerInfoListRange getMutableCheckersForCmdLineArg(StringRef CmdLineArg);
-
- CheckerInfoList Checkers;
- PackageInfoList Packages;
- /// Used for couting how many checkers belong to a certain package in the
- /// \c Checkers field. For convenience purposes.
- llvm::StringMap<size_t> PackageSizes;
-
- /// Contains all (Dependendent checker, Dependency) pairs. We need this, as
- /// we'll resolve dependencies after all checkers were added first.
- llvm::SmallVector<std::pair<StringRef, StringRef>, 0> Dependencies;
- void resolveDependencies();
-
- /// Contains all (FullName, CmdLineOption) pairs. Similarly to dependencies,
- /// we only modify the actual CheckerInfo and PackageInfo objects once all
- /// of them have been added.
- llvm::SmallVector<std::pair<StringRef, CmdLineOption>, 0> PackageOptions;
- llvm::SmallVector<std::pair<StringRef, CmdLineOption>, 0> CheckerOptions;
-
+ template <bool IsWeak> void resolveDependencies();
void resolveCheckerAndPackageOptions();
+ CheckerRegistryData &Data;
+
DiagnosticsEngine &Diags;
AnalyzerOptions &AnOpts;
- const LangOptions &LangOpts;
};
} // namespace ento
} // namespace clang
-#endif // LLVM_CLANG_STATICANALYZER_CORE_CHECKERREGISTRY_H
+#endif // LLVM_CLANG_STATICANALYZER_FRONTEND_CHECKERREGISTRY_H
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h
index 878b65a1b143..2b12330e4f2d 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h
@@ -20,6 +20,8 @@ class AnalyzerOptions;
namespace ento {
+class CheckerManager;
+
//===----------------------------------------------------------------------===//
// AST Consumer Actions
//===----------------------------------------------------------------------===//
@@ -51,23 +53,7 @@ private:
llvm::StringMap<Stmt *> &Bodies;
};
-void printCheckerHelp(raw_ostream &OS,
- ArrayRef<std::string> plugins,
- AnalyzerOptions &opts,
- DiagnosticsEngine &diags,
- const LangOptions &LangOpts);
-void printEnabledCheckerList(raw_ostream &OS, ArrayRef<std::string> plugins,
- AnalyzerOptions &opts,
- DiagnosticsEngine &diags,
- const LangOptions &LangOpts);
-void printAnalyzerConfigList(raw_ostream &OS);
-void printCheckerConfigList(raw_ostream &OS, ArrayRef<std::string> plugins,
- AnalyzerOptions &opts,
- DiagnosticsEngine &diags,
- const LangOptions &LangOpts);
-
-} // end GR namespace
-
+} // namespace ento
} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Testing/CommandLineArgs.h b/contrib/llvm-project/clang/include/clang/Testing/CommandLineArgs.h
new file mode 100644
index 000000000000..95979a2bfb80
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Testing/CommandLineArgs.h
@@ -0,0 +1,41 @@
+//===--- CommandLineArgs.h ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines language options for Clang unittests.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TESTING_COMMANDLINEARGS_H
+#define LLVM_CLANG_TESTING_COMMANDLINEARGS_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/StringRef.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+
+enum TestLanguage {
+ Lang_C89,
+ Lang_C99,
+ Lang_CXX03,
+ Lang_CXX11,
+ Lang_CXX14,
+ Lang_CXX17,
+ Lang_CXX20,
+ Lang_OpenCL,
+ Lang_OBJCXX
+};
+
+std::vector<std::string> getCommandLineArgsForTesting(TestLanguage Lang);
+
+StringRef getFilenameForTesting(TestLanguage Lang);
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Testing/TestClangConfig.h b/contrib/llvm-project/clang/include/clang/Testing/TestClangConfig.h
new file mode 100644
index 000000000000..eefa36dc2ebb
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Testing/TestClangConfig.h
@@ -0,0 +1,85 @@
+//===--- TestClangConfig.h ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TESTING_TESTCLANGCONFIG_H
+#define LLVM_CLANG_TESTING_TESTCLANGCONFIG_H
+
+#include "clang/Testing/CommandLineArgs.h"
+#include "llvm/Support/raw_ostream.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+
+/// A Clang configuration for end-to-end tests that can be converted to
+/// command line arguments for the driver.
+///
+/// The configuration is represented as typed, named values, making it easier
+/// and safer to work with compared to an array of string command line flags.
+struct TestClangConfig {
+ TestLanguage Language;
+
+ /// The argument of the `-target` command line flag.
+ std::string Target;
+
+ bool isC() const { return Language == Lang_C89 || Language == Lang_C99; }
+
+ bool isC99OrLater() const { return Language == Lang_C99; }
+
+ bool isCXX() const {
+ return Language == Lang_CXX03 || Language == Lang_CXX11 ||
+ Language == Lang_CXX14 || Language == Lang_CXX17 ||
+ Language == Lang_CXX20;
+ }
+
+ bool isCXX11OrLater() const {
+ return Language == Lang_CXX11 || Language == Lang_CXX14 ||
+ Language == Lang_CXX17 || Language == Lang_CXX20;
+ }
+
+ bool isCXX14OrLater() const {
+ return Language == Lang_CXX14 || Language == Lang_CXX17 ||
+ Language == Lang_CXX20;
+ }
+
+ bool isCXX17OrLater() const {
+ return Language == Lang_CXX17 || Language == Lang_CXX20;
+ }
+
+ bool supportsCXXDynamicExceptionSpecification() const {
+ return Language == Lang_CXX03 || Language == Lang_CXX11 ||
+ Language == Lang_CXX14;
+ }
+
+ bool hasDelayedTemplateParsing() const {
+ return Target == "x86_64-pc-win32-msvc";
+ }
+
+ std::vector<std::string> getCommandLineArgs() const {
+ std::vector<std::string> Result = getCommandLineArgsForTesting(Language);
+ Result.push_back("-target");
+ Result.push_back(Target);
+ return Result;
+ }
+
+ std::string toString() const {
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ OS << "{ Language=" << Language << ", Target=" << Target << " }";
+ return OS.str();
+ }
+
+ friend std::ostream &operator<<(std::ostream &OS,
+ const TestClangConfig &ClangConfig) {
+ return OS << ClangConfig.toString();
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiff.h b/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiff.h
index c1cc124e1e9f..c772ad84c139 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiff.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiff.h
@@ -37,11 +37,11 @@ enum ChangeKind {
struct Node {
NodeId Parent, LeftMostDescendant, RightMostDescendant;
int Depth, Height, Shift = 0;
- ast_type_traits::DynTypedNode ASTNode;
+ DynTypedNode ASTNode;
SmallVector<NodeId, 4> Children;
ChangeKind Change = None;
- ast_type_traits::ASTNodeKind getType() const;
+ ASTNodeKind getType() const;
StringRef getTypeLabel() const;
bool isLeaf() const { return Children.empty(); }
llvm::Optional<StringRef> getIdentifier() const;
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiffInternal.h b/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiffInternal.h
index 0c15b30cc69c..1e784ef43ac1 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiffInternal.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiffInternal.h
@@ -15,7 +15,7 @@
namespace clang {
namespace diff {
-using DynTypedNode = ast_type_traits::DynTypedNode;
+using DynTypedNode = DynTypedNode;
class SyntaxTree;
class SyntaxTreeImpl;
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/AllTUsExecution.h b/contrib/llvm-project/clang/include/clang/Tooling/AllTUsExecution.h
index 1e618b5ba2f0..43f2792457e7 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/AllTUsExecution.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/AllTUsExecution.h
@@ -56,7 +56,7 @@ public:
ToolResults *getToolResults() override { return Results.get(); }
void mapVirtualFile(StringRef FilePath, StringRef Content) override {
- OverlayFiles[FilePath] = Content;
+ OverlayFiles[FilePath] = std::string(Content);
}
private:
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Core/Diagnostic.h b/contrib/llvm-project/clang/include/clang/Tooling/Core/Diagnostic.h
index 4e0feba6d7dc..123874f9ccf7 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Core/Diagnostic.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Core/Diagnostic.h
@@ -47,6 +47,17 @@ struct DiagnosticMessage {
llvm::StringMap<Replacements> Fix;
};
+/// Represents a range within a specific source file.
+struct FileByteRange {
+ FileByteRange() = default;
+
+ FileByteRange(const SourceManager &Sources, CharSourceRange Range);
+
+ std::string FilePath;
+ unsigned FileOffset;
+ unsigned Length;
+};
+
/// Represents the diagnostic with the level of severity and possible
/// fixes to be applied.
struct Diagnostic {
@@ -62,7 +73,8 @@ struct Diagnostic {
Diagnostic(llvm::StringRef DiagnosticName, const DiagnosticMessage &Message,
const SmallVector<DiagnosticMessage, 1> &Notes, Level DiagLevel,
- llvm::StringRef BuildDirectory);
+ llvm::StringRef BuildDirectory,
+ const SmallVector<FileByteRange, 1> &Ranges);
/// Name identifying the Diagnostic.
std::string DiagnosticName;
@@ -84,6 +96,10 @@ struct Diagnostic {
///
/// Note: it is empty in unittest.
std::string BuildDirectory;
+
+ /// Extra source ranges associated with the diagnostic (in addition to the
+ /// location of the Message above).
+ SmallVector<FileByteRange, 1> Ranges;
};
/// Collection of Diagnostics generated from a single translation unit.
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
index a0c1900f7ed9..1c106ed4b765 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
@@ -11,13 +11,69 @@
#include "clang/Tooling/DependencyScanning/DependencyScanningService.h"
#include "clang/Tooling/DependencyScanning/DependencyScanningWorker.h"
+#include "clang/Tooling/DependencyScanning/ModuleDepCollector.h"
#include "clang/Tooling/JSONCompilationDatabase.h"
+#include "llvm/ADT/StringSet.h"
#include <string>
namespace clang{
namespace tooling{
namespace dependencies{
+/// The full dependencies and module graph for a specific input.
+struct FullDependencies {
+ /// The name of the C++20 module this translation unit exports. This may
+ /// include `:` for C++20 module partitons.
+ ///
+ /// If the translation unit is not a module then this will be empty.
+ std::string ExportedModuleName;
+
+ /// The context hash represents the set of compiler options that may make one
+ /// version of a module incompatible with another. This includes things like
+ /// language mode, predefined macros, header search paths, etc...
+ ///
+ /// Modules with the same name but a different \c ContextHash should be
+ /// treated as separate modules for the purpose of a build.
+ std::string ContextHash;
+
+ /// A collection of absolute paths to files that this translation unit
+ /// directly depends on, not including transitive dependencies.
+ std::vector<std::string> FileDeps;
+
+ /// A list of modules this translation unit directly depends on, not including
+ /// transitive dependencies.
+ ///
+ /// This may include modules with a different context hash when it can be
+ /// determined that the differences are benign for this compilation.
+ std::vector<ClangModuleDep> ClangModuleDeps;
+
+ /// A partial addtional set of command line arguments that can be used to
+ /// build this translation unit.
+ ///
+ /// Call \c getFullAdditionalCommandLine() to get a command line suitable for
+ /// appending to the original command line to pass to clang.
+ std::vector<std::string> AdditionalNonPathCommandLine;
+
+ /// Gets the full addtional command line suitable for appending to the
+ /// original command line to pass to clang.
+ ///
+ /// \param LookupPCMPath this function is called to fill in `-fmodule-file=`
+ /// flags and for the `-o` flag. It needs to return a
+ /// path for where the PCM for the given module is to
+ /// be located.
+ /// \param LookupModuleDeps this fucntion is called to collect the full
+ /// transitive set of dependencies for this
+ /// compilation.
+ std::vector<std::string> getAdditionalCommandLine(
+ std::function<StringRef(ClangModuleDep)> LookupPCMPath,
+ std::function<const ModuleDeps &(ClangModuleDep)> LookupModuleDeps) const;
+};
+
+struct FullDependenciesResult {
+ FullDependencies FullDeps;
+ std::vector<ModuleDeps> DiscoveredModules;
+};
+
/// The high-level implementation of the dependency discovery tool that runs on
/// an individual worker thread.
class DependencyScanningTool {
@@ -35,8 +91,23 @@ public:
getDependencyFile(const tooling::CompilationDatabase &Compilations,
StringRef CWD);
+ /// Collect the full module depenedency graph for the input, ignoring any
+ /// modules which have already been seen.
+ ///
+ /// \param AlreadySeen this is used to not report modules that have previously
+ /// been reported. Use the same `llvm::StringSet<>` for all
+ /// calls to `getFullDependencies` for a single
+ /// `DependencyScanningTool` for a single build. Use a
+ /// different one for different tools, and clear it between
+ /// builds.
+ ///
+ /// \returns a \c StringError with the diagnostic output if clang errors
+ /// occurred, \c FullDependencies otherwise.
+ llvm::Expected<FullDependenciesResult>
+ getFullDependencies(const tooling::CompilationDatabase &Compilations,
+ StringRef CWD, const llvm::StringSet<> &AlreadySeen);
+
private:
- const ScanningOutputFormat Format;
DependencyScanningWorker Worker;
};
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
index 7a9fc276fcaa..c490bb38c167 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
@@ -19,8 +19,8 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/raw_ostream.h"
-
#include <string>
+#include <unordered_map>
namespace clang {
namespace tooling {
@@ -28,16 +28,82 @@ namespace dependencies {
class DependencyConsumer;
+/// This is used to refer to a specific module.
+///
+/// See \c ModuleDeps for details about what these members mean.
+struct ClangModuleDep {
+ std::string ModuleName;
+ std::string ContextHash;
+};
+
struct ModuleDeps {
+ /// The name of the module. This may include `:` for C++20 module partitons,
+ /// or a header-name for C++20 header units.
std::string ModuleName;
- std::string ClangModuleMapFile;
- std::string ModulePCMPath;
+
+ /// The context hash of a module represents the set of compiler options that
+ /// may make one version of a module incompatible with another. This includes
+ /// things like language mode, predefined macros, header search paths, etc...
+ ///
+ /// Modules with the same name but a different \c ContextHash should be
+ /// treated as separate modules for the purpose of a build.
std::string ContextHash;
+
+ /// The path to the modulemap file which defines this module.
+ ///
+ /// This can be used to explicitly build this module. This file will
+ /// additionally appear in \c FileDeps as a dependency.
+ std::string ClangModuleMapFile;
+
+ /// The path to where an implicit build would put the PCM for this module.
+ std::string ImplicitModulePCMPath;
+
+ /// A collection of absolute paths to files that this module directly depends
+ /// on, not including transitive dependencies.
llvm::StringSet<> FileDeps;
- llvm::StringSet<> ClangModuleDeps;
+
+ /// A list of modules this module directly depends on, not including
+ /// transitive dependencies.
+ ///
+ /// This may include modules with a different context hash when it can be
+ /// determined that the differences are benign for this compilation.
+ std::vector<ClangModuleDep> ClangModuleDeps;
+
+ /// A partial command line that can be used to build this module.
+ ///
+ /// Call \c getFullCommandLine() to get a command line suitable for passing to
+ /// clang.
+ std::vector<std::string> NonPathCommandLine;
+
+ // Used to track which modules that were discovered were directly imported by
+ // the primary TU.
bool ImportedByMainFile = false;
+
+ /// Gets the full command line suitable for passing to clang.
+ ///
+ /// \param LookupPCMPath this function is called to fill in `-fmodule-file=`
+ /// flags and for the `-o` flag. It needs to return a
+ /// path for where the PCM for the given module is to
+ /// be located.
+ /// \param LookupModuleDeps this fucntion is called to collect the full
+ /// transitive set of dependencies for this
+ /// compilation.
+ std::vector<std::string> getFullCommandLine(
+ std::function<StringRef(ClangModuleDep)> LookupPCMPath,
+ std::function<const ModuleDeps &(ClangModuleDep)> LookupModuleDeps) const;
};
+namespace detail {
+/// Append the `-fmodule-file=` and `-fmodule-map-file=` arguments for the
+/// modules in \c Modules transitively, along with other needed arguments to
+/// use explicitly built modules.
+void appendCommonModuleArguments(
+ llvm::ArrayRef<ClangModuleDep> Modules,
+ std::function<StringRef(ClangModuleDep)> LookupPCMPath,
+ std::function<const ModuleDeps &(ClangModuleDep)> LookupModuleDeps,
+ std::vector<std::string> &Result);
+} // namespace detail
+
class ModuleDepCollector;
class ModuleDepCollectorPP final : public PPCallbacks {
@@ -54,6 +120,8 @@ public:
StringRef SearchPath, StringRef RelativePath,
const Module *Imported,
SrcMgr::CharacteristicKind FileType) override;
+ void moduleImport(SourceLocation ImportLoc, ModuleIdPath Path,
+ const Module *Imported) override;
void EndOfMainFile() override;
@@ -62,16 +130,18 @@ private:
ModuleDepCollector &MDC;
llvm::DenseSet<const Module *> DirectDeps;
+ void handleImport(const Module *Imported);
void handleTopLevelModule(const Module *M);
- void addAllSubmoduleDeps(const Module *M, ModuleDeps &MD);
- void addModuleDep(const Module *M, ModuleDeps &MD);
-
- void addDirectDependencies(const Module *Mod);
+ void addAllSubmoduleDeps(const Module *M, ModuleDeps &MD,
+ llvm::DenseSet<const Module *> &AddedModules);
+ void addModuleDep(const Module *M, ModuleDeps &MD,
+ llvm::DenseSet<const Module *> &AddedModules);
};
class ModuleDepCollector final : public DependencyCollector {
public:
- ModuleDepCollector(CompilerInstance &I, DependencyConsumer &C);
+ ModuleDepCollector(std::unique_ptr<DependencyOutputOptions> Opts,
+ CompilerInstance &I, DependencyConsumer &C);
void attachToPreprocessor(Preprocessor &PP) override;
void attachToASTReader(ASTReader &R) override;
@@ -85,6 +155,7 @@ private:
std::string ContextHash;
std::vector<std::string> MainDeps;
std::unordered_map<std::string, ModuleDeps> Deps;
+ std::unique_ptr<DependencyOutputOptions> Opts;
};
} // end namespace dependencies
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DiagnosticsYaml.h b/contrib/llvm-project/clang/include/clang/Tooling/DiagnosticsYaml.h
index 366ee6f6703b..38fbcfc1da95 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DiagnosticsYaml.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DiagnosticsYaml.h
@@ -22,10 +22,19 @@
LLVM_YAML_IS_SEQUENCE_VECTOR(clang::tooling::Diagnostic)
LLVM_YAML_IS_SEQUENCE_VECTOR(clang::tooling::DiagnosticMessage)
+LLVM_YAML_IS_SEQUENCE_VECTOR(clang::tooling::FileByteRange)
namespace llvm {
namespace yaml {
+template <> struct MappingTraits<clang::tooling::FileByteRange> {
+ static void mapping(IO &Io, clang::tooling::FileByteRange &R) {
+ Io.mapRequired("FilePath", R.FilePath);
+ Io.mapRequired("FileOffset", R.FileOffset);
+ Io.mapRequired("Length", R.Length);
+ }
+};
+
template <> struct MappingTraits<clang::tooling::DiagnosticMessage> {
static void mapping(IO &Io, clang::tooling::DiagnosticMessage &M) {
Io.mapRequired("Message", M.Message);
@@ -58,19 +67,20 @@ template <> struct MappingTraits<clang::tooling::Diagnostic> {
NormalizedDiagnostic(const IO &, const clang::tooling::Diagnostic &D)
: DiagnosticName(D.DiagnosticName), Message(D.Message), Notes(D.Notes),
- DiagLevel(D.DiagLevel), BuildDirectory(D.BuildDirectory) {}
+ DiagLevel(D.DiagLevel), BuildDirectory(D.BuildDirectory),
+ Ranges(D.Ranges) {}
clang::tooling::Diagnostic denormalize(const IO &) {
return clang::tooling::Diagnostic(DiagnosticName, Message, Notes,
- DiagLevel, BuildDirectory);
+ DiagLevel, BuildDirectory, Ranges);
}
std::string DiagnosticName;
clang::tooling::DiagnosticMessage Message;
- llvm::StringMap<clang::tooling::Replacements> Fix;
SmallVector<clang::tooling::DiagnosticMessage, 1> Notes;
clang::tooling::Diagnostic::Level DiagLevel;
std::string BuildDirectory;
+ SmallVector<clang::tooling::FileByteRange, 1> Ranges;
};
static void mapping(IO &Io, clang::tooling::Diagnostic &D) {
@@ -79,8 +89,9 @@ template <> struct MappingTraits<clang::tooling::Diagnostic> {
Io.mapRequired("DiagnosticName", Keys->DiagnosticName);
Io.mapRequired("DiagnosticMessage", Keys->Message);
Io.mapOptional("Notes", Keys->Notes);
-
- // FIXME: Export properly all the different fields.
+ Io.mapOptional("Level", Keys->DiagLevel);
+ Io.mapOptional("BuildDirectory", Keys->BuildDirectory);
+ Io.mapOptional("Ranges", Keys->Ranges);
}
};
@@ -92,6 +103,14 @@ template <> struct MappingTraits<clang::tooling::TranslationUnitDiagnostics> {
Io.mapRequired("Diagnostics", Doc.Diagnostics);
}
};
+
+template <> struct ScalarEnumerationTraits<clang::tooling::Diagnostic::Level> {
+ static void enumeration(IO &IO, clang::tooling::Diagnostic::Level &Value) {
+ IO.enumCase(Value, "Warning", clang::tooling::Diagnostic::Warning);
+ IO.enumCase(Value, "Error", clang::tooling::Diagnostic::Error);
+ }
+};
+
} // end namespace yaml
} // end namespace llvm
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/ASTSelection.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/ASTSelection.h
index 9122b5c73c98..239be36012c3 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/ASTSelection.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/ASTSelection.h
@@ -13,6 +13,7 @@
#include "clang/AST/Stmt.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
+#include "llvm/Support/raw_ostream.h"
#include <vector>
namespace clang {
@@ -48,12 +49,11 @@ enum class SourceSelectionKind {
/// actually be selected, e.g. a statement in macro whose child is in a macro
/// argument.
struct SelectedASTNode {
- ast_type_traits::DynTypedNode Node;
+ DynTypedNode Node;
SourceSelectionKind SelectionKind;
std::vector<SelectedASTNode> Children;
- SelectedASTNode(const ast_type_traits::DynTypedNode &Node,
- SourceSelectionKind SelectionKind)
+ SelectedASTNode(const DynTypedNode &Node, SourceSelectionKind SelectionKind)
: Node(Node), SelectionKind(SelectionKind) {}
SelectedASTNode(SelectedASTNode &&) = default;
SelectedASTNode &operator=(SelectedASTNode &&) = default;
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/AtomicChange.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/AtomicChange.h
index 32e4624fc8e7..f1034a3d0579 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/AtomicChange.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/AtomicChange.h
@@ -17,6 +17,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include "clang/Tooling/Core/Replacement.h"
+#include "llvm/ADT/Any.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
@@ -41,6 +42,9 @@ public:
/// is being changed, e.g. the call to a refactored method.
AtomicChange(const SourceManager &SM, SourceLocation KeyPosition);
+ AtomicChange(const SourceManager &SM, SourceLocation KeyPosition,
+ llvm::Any Metadata);
+
/// Creates an atomic change for \p FilePath with a customized key.
AtomicChange(llvm::StringRef FilePath, llvm::StringRef Key)
: Key(Key), FilePath(FilePath) {}
@@ -70,7 +74,7 @@ public:
/// conflicts among replacements, use this to set an error description.
/// Thereby, places that cannot be fixed automatically can be gathered when
/// applying changes.
- void setError(llvm::StringRef Error) { this->Error = Error; }
+ void setError(llvm::StringRef Error) { this->Error = std::string(Error); }
/// Returns whether an error has been set on this list.
bool hasError() const { return !Error.empty(); }
@@ -120,6 +124,8 @@ public:
return RemovedHeaders;
}
+ const llvm::Any &getMetadata() const { return Metadata; }
+
private:
AtomicChange() {}
@@ -135,6 +141,12 @@ private:
std::vector<std::string> InsertedHeaders;
std::vector<std::string> RemovedHeaders;
tooling::Replacements Replaces;
+
+ // This field stores metadata which is ignored for the purposes of applying
+ // edits to source, but may be useful for other consumers of AtomicChanges. In
+ // particular, consumers can use this to direct how they want to consume each
+ // edit.
+ llvm::Any Metadata;
};
using AtomicChanges = std::vector<AtomicChange>;
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h
index f25f526e146c..84122b111ee1 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h
@@ -20,8 +20,8 @@ namespace clang {
namespace tooling {
/// A refactoring option that stores a value of type \c T.
-template <typename T, typename = typename std::enable_if<
- traits::IsValidOptionType<T>::value>::type>
+template <typename T,
+ typename = std::enable_if_t<traits::IsValidOptionType<T>::value>>
class OptionalRefactoringOption : public RefactoringOption {
public:
void passToVisitor(RefactoringOptionVisitor &Visitor) final override {
@@ -39,8 +39,8 @@ protected:
};
/// A required refactoring option that stores a value of type \c T.
-template <typename T, typename = typename std::enable_if<
- traits::IsValidOptionType<T>::value>::type>
+template <typename T,
+ typename = std::enable_if_t<traits::IsValidOptionType<T>::value>>
class RequiredRefactoringOption : public OptionalRefactoringOption<T> {
public:
using ValueType = T;
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/ReplacementsYaml.h b/contrib/llvm-project/clang/include/clang/Tooling/ReplacementsYaml.h
index 2e3e401652e2..83e35d623255 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/ReplacementsYaml.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/ReplacementsYaml.h
@@ -35,13 +35,7 @@ template <> struct MappingTraits<clang::tooling::Replacement> {
NormalizedReplacement(const IO &, const clang::tooling::Replacement &R)
: FilePath(R.getFilePath()), Offset(R.getOffset()),
- Length(R.getLength()), ReplacementText(R.getReplacementText()) {
- size_t lineBreakPos = ReplacementText.find('\n');
- while (lineBreakPos != std::string::npos) {
- ReplacementText.replace(lineBreakPos, 1, "\n\n");
- lineBreakPos = ReplacementText.find('\n', lineBreakPos + 2);
- }
- }
+ Length(R.getLength()), ReplacementText(R.getReplacementText()) {}
clang::tooling::Replacement denormalize(const IO &) {
return clang::tooling::Replacement(FilePath, Offset, Length,
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Nodes.h b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Nodes.h
index 25acc1757428..d97b127638bb 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Nodes.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Nodes.h
@@ -38,10 +38,25 @@ enum class NodeKind : uint16_t {
Leaf,
TranslationUnit,
- // Expressions
+ // Expressions.
UnknownExpression,
-
- // Statements
+ PrefixUnaryOperatorExpression,
+ PostfixUnaryOperatorExpression,
+ BinaryOperatorExpression,
+ ParenExpression,
+ IntegerLiteralExpression,
+ CharacterLiteralExpression,
+ FloatingLiteralExpression,
+ StringLiteralExpression,
+ BoolLiteralExpression,
+ CxxNullPtrExpression,
+ IntegerUserDefinedLiteralExpression,
+ FloatUserDefinedLiteralExpression,
+ CharUserDefinedLiteralExpression,
+ StringUserDefinedLiteralExpression,
+ IdExpression,
+
+ // Statements.
UnknownStatement,
DeclarationStatement,
EmptyStatement,
@@ -58,23 +73,54 @@ enum class NodeKind : uint16_t {
ExpressionStatement,
CompoundStatement,
- // Declarations
+ // Declarations.
UnknownDeclaration,
EmptyDeclaration,
StaticAssertDeclaration,
LinkageSpecificationDeclaration,
SimpleDeclaration,
+ TemplateDeclaration,
+ ExplicitTemplateInstantiation,
NamespaceDefinition,
NamespaceAliasDefinition,
UsingNamespaceDirective,
UsingDeclaration,
- TypeAliasDeclaration
+ TypeAliasDeclaration,
+
+ // Declarators.
+ SimpleDeclarator,
+ ParenDeclarator,
+
+ ArraySubscript,
+ TrailingReturnType,
+ ParametersAndQualifiers,
+ MemberPointer,
+ NestedNameSpecifier,
+ NameSpecifier,
+ UnqualifiedId
};
/// For debugging purposes.
llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, NodeKind K);
/// A relation between a parent and child node, e.g. 'left-hand-side of
/// a binary expression'. Used for implementing accessors.
+///
+/// Some roles describe parent/child relations that occur multiple times in
+/// language grammar. We define only one role to describe all instances of such
+/// recurring relations. For example, grammar for both "if" and "while"
+/// statements requires an opening paren and a closing paren. The opening
+/// paren token is assigned the OpenParen role regardless of whether it appears
+/// as a child of IfStatement or WhileStatement node. More generally, when
+/// grammar requires a certain fixed token (like a specific keyword, or an
+/// opening paren), we define a role for this token and use it across all
+/// grammar rules with the same requirement. Names of such reusable roles end
+/// with a ~Token or a ~Keyword suffix.
+///
+/// Some roles are assigned only to child nodes of one specific parent syntax
+/// node type. Names of such roles start with the name of the parent syntax tree
+/// node type. For example, a syntax node with a role
+/// BinaryOperatorExpression_leftHandSide can only appear as a child of a
+/// BinaryOperatorExpression node.
enum class NodeRole : uint8_t {
// Roles common to multiple node kinds.
/// A node without a parent
@@ -87,12 +133,21 @@ enum class NodeRole : uint8_t {
CloseParen,
/// A keywords that introduces some grammar construct, e.g. 'if', 'try', etc.
IntroducerKeyword,
+ /// A token that represents a literal, e.g. 'nullptr', '1', 'true', etc.
+ LiteralToken,
+ /// Tokens or Keywords
+ ArrowToken,
+ ExternKeyword,
/// An inner statement for those that have only a single child of kind
/// statement, e.g. loop body for while, for, etc; inner statement for case,
/// default, etc.
BodyStatement,
// Roles specific to particular node kinds.
+ OperatorExpression_operatorToken,
+ UnaryOperatorExpression_operand,
+ BinaryOperatorExpression_leftHandSide,
+ BinaryOperatorExpression_rightHandSide,
CaseStatement_value,
IfStatement_thenStatement,
IfStatement_elseKeyword,
@@ -101,11 +156,24 @@ enum class NodeRole : uint8_t {
ExpressionStatement_expression,
CompoundStatement_statement,
StaticAssertDeclaration_condition,
- StaticAssertDeclaration_message
+ StaticAssertDeclaration_message,
+ SimpleDeclaration_declarator,
+ TemplateDeclaration_declaration,
+ ExplicitTemplateInstantiation_declaration,
+ ArraySubscript_sizeExpression,
+ TrailingReturnType_declarator,
+ ParametersAndQualifiers_parameter,
+ ParametersAndQualifiers_trailingReturn,
+ IdExpression_id,
+ IdExpression_qualifier,
+ NestedNameSpecifier_specifier,
+ ParenExpression_subExpression
};
/// For debugging purposes.
llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, NodeRole R);
+class SimpleDeclarator;
+
/// A root node for a translation unit. Parent is always null.
class TranslationUnit final : public Tree {
public:
@@ -126,6 +194,56 @@ public:
}
};
+/// A sequence of these specifiers make a `nested-name-specifier`.
+/// e.g. the `std::` or `vector<int>::` in `std::vector<int>::size`.
+class NameSpecifier final : public Tree {
+public:
+ NameSpecifier() : Tree(NodeKind::NameSpecifier) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::NameSpecifier;
+ }
+};
+
+/// Models a `nested-name-specifier`. C++ [expr.prim.id.qual]
+/// e.g. the `std::vector<int>::` in `std::vector<int>::size`.
+class NestedNameSpecifier final : public Tree {
+public:
+ NestedNameSpecifier() : Tree(NodeKind::NestedNameSpecifier) {}
+ static bool classof(const Node *N) {
+ return N->kind() <= NodeKind::NestedNameSpecifier;
+ }
+ std::vector<syntax::NameSpecifier *> specifiers();
+};
+
+/// Models an `unqualified-id`. C++ [expr.prim.id.unqual]
+/// e.g. the `size` in `std::vector<int>::size`.
+class UnqualifiedId final : public Tree {
+public:
+ UnqualifiedId() : Tree(NodeKind::UnqualifiedId) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::UnqualifiedId;
+ }
+};
+
+/// Models an `id-expression`, e.g. `std::vector<int>::size`.
+/// C++ [expr.prim.id]
+/// id-expression:
+/// unqualified-id
+/// qualified-id
+/// qualified-id:
+/// nested-name-specifier template_opt unqualified-id
+class IdExpression final : public Expression {
+public:
+ IdExpression() : Expression(NodeKind::IdExpression) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::IdExpression;
+ }
+ syntax::NestedNameSpecifier *qualifier();
+ // TODO after expose `id-expression` from `DependentScopeDeclRefExpr`:
+ // Add accessor for `template_opt`.
+ syntax::UnqualifiedId *unqualifiedId();
+};
+
/// An expression of an unknown kind, i.e. one not currently handled by the
/// syntax tree.
class UnknownExpression final : public Expression {
@@ -136,6 +254,209 @@ public:
}
};
+/// Models a parenthesized expression `(E)`. C++ [expr.prim.paren]
+/// e.g. `(3 + 2)` in `a = 1 + (3 + 2);`
+class ParenExpression final : public Expression {
+public:
+ ParenExpression() : Expression(NodeKind::ParenExpression) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::ParenExpression;
+ }
+ syntax::Leaf *openParen();
+ syntax::Expression *subExpression();
+ syntax::Leaf *closeParen();
+};
+
+/// Expression for integer literals. C++ [lex.icon]
+class IntegerLiteralExpression final : public Expression {
+public:
+ IntegerLiteralExpression() : Expression(NodeKind::IntegerLiteralExpression) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::IntegerLiteralExpression;
+ }
+ syntax::Leaf *literalToken();
+};
+
+/// Expression for character literals. C++ [lex.ccon]
+class CharacterLiteralExpression final : public Expression {
+public:
+ CharacterLiteralExpression()
+ : Expression(NodeKind::CharacterLiteralExpression) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::CharacterLiteralExpression;
+ }
+ syntax::Leaf *literalToken();
+};
+
+/// Expression for floating-point literals. C++ [lex.fcon]
+class FloatingLiteralExpression final : public Expression {
+public:
+ FloatingLiteralExpression()
+ : Expression(NodeKind::FloatingLiteralExpression) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::FloatingLiteralExpression;
+ }
+ syntax::Leaf *literalToken();
+};
+
+/// Expression for string-literals. C++ [lex.string]
+class StringLiteralExpression final : public Expression {
+public:
+ StringLiteralExpression() : Expression(NodeKind::StringLiteralExpression) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::StringLiteralExpression;
+ }
+ syntax::Leaf *literalToken();
+};
+
+/// Expression for boolean literals. C++ [lex.bool]
+class BoolLiteralExpression final : public Expression {
+public:
+ BoolLiteralExpression() : Expression(NodeKind::BoolLiteralExpression) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::BoolLiteralExpression;
+ }
+ syntax::Leaf *literalToken();
+};
+
+/// Expression for the `nullptr` literal. C++ [lex.nullptr]
+class CxxNullPtrExpression final : public Expression {
+public:
+ CxxNullPtrExpression() : Expression(NodeKind::CxxNullPtrExpression) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::CxxNullPtrExpression;
+ }
+ syntax::Leaf *nullPtrKeyword();
+};
+
+/// Expression for user-defined literal. C++ [lex.ext]
+/// user-defined-literal:
+/// user-defined-integer-literal
+/// user-defined-floating-point-literal
+/// user-defined-string-literal
+/// user-defined-character-literal
+class UserDefinedLiteralExpression : public Expression {
+public:
+ UserDefinedLiteralExpression(NodeKind K) : Expression(K) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::IntegerUserDefinedLiteralExpression ||
+ N->kind() == NodeKind::FloatUserDefinedLiteralExpression ||
+ N->kind() == NodeKind::CharUserDefinedLiteralExpression ||
+ N->kind() == NodeKind::StringUserDefinedLiteralExpression;
+ }
+ syntax::Leaf *literalToken();
+};
+
+/// Expression for user-defined-integer-literal. C++ [lex.ext]
+class IntegerUserDefinedLiteralExpression final
+ : public UserDefinedLiteralExpression {
+public:
+ IntegerUserDefinedLiteralExpression()
+ : UserDefinedLiteralExpression(
+ NodeKind::IntegerUserDefinedLiteralExpression) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::IntegerUserDefinedLiteralExpression;
+ }
+};
+
+/// Expression for user-defined-floating-point-literal. C++ [lex.ext]
+class FloatUserDefinedLiteralExpression final
+ : public UserDefinedLiteralExpression {
+public:
+ FloatUserDefinedLiteralExpression()
+ : UserDefinedLiteralExpression(
+ NodeKind::FloatUserDefinedLiteralExpression) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::FloatUserDefinedLiteralExpression;
+ }
+};
+
+/// Expression for user-defined-character-literal. C++ [lex.ext]
+class CharUserDefinedLiteralExpression final
+ : public UserDefinedLiteralExpression {
+public:
+ CharUserDefinedLiteralExpression()
+ : UserDefinedLiteralExpression(
+ NodeKind::CharUserDefinedLiteralExpression) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::CharUserDefinedLiteralExpression;
+ }
+};
+
+/// Expression for user-defined-string-literal. C++ [lex.ext]
+class StringUserDefinedLiteralExpression final
+ : public UserDefinedLiteralExpression {
+public:
+ StringUserDefinedLiteralExpression()
+ : UserDefinedLiteralExpression(
+ NodeKind::StringUserDefinedLiteralExpression) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::StringUserDefinedLiteralExpression;
+ }
+};
+
+/// An abstract class for prefix and postfix unary operators.
+class UnaryOperatorExpression : public Expression {
+public:
+ UnaryOperatorExpression(NodeKind K) : Expression(K) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::PrefixUnaryOperatorExpression ||
+ N->kind() == NodeKind::PostfixUnaryOperatorExpression;
+ }
+ syntax::Leaf *operatorToken();
+ syntax::Expression *operand();
+};
+
+/// <operator> <operand>
+///
+/// For example:
+/// +a -b
+/// !c not c
+/// ~d compl d
+/// *e &f
+/// ++h --h
+/// __real i __imag i
+class PrefixUnaryOperatorExpression final : public UnaryOperatorExpression {
+public:
+ PrefixUnaryOperatorExpression()
+ : UnaryOperatorExpression(NodeKind::PrefixUnaryOperatorExpression) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::PrefixUnaryOperatorExpression;
+ }
+};
+
+/// <operand> <operator>
+///
+/// For example:
+/// a++
+/// b--
+class PostfixUnaryOperatorExpression final : public UnaryOperatorExpression {
+public:
+ PostfixUnaryOperatorExpression()
+ : UnaryOperatorExpression(NodeKind::PostfixUnaryOperatorExpression) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::PostfixUnaryOperatorExpression;
+ }
+};
+
+/// <lhs> <operator> <rhs>
+///
+/// For example:
+/// a + b
+/// a bitor 1
+/// a |= b
+/// a and_eq b
+class BinaryOperatorExpression final : public Expression {
+public:
+ BinaryOperatorExpression() : Expression(NodeKind::BinaryOperatorExpression) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::BinaryOperatorExpression;
+ }
+ syntax::Expression *lhs();
+ syntax::Leaf *operatorToken();
+ syntax::Expression *rhs();
+};
+
/// An abstract node for C++ statements, e.g. 'while', 'if', etc.
/// FIXME: add accessors for semicolon of statements that have it.
class Statement : public Tree {
@@ -375,6 +696,36 @@ public:
static bool classof(const Node *N) {
return N->kind() == NodeKind::SimpleDeclaration;
}
+ /// FIXME: use custom iterator instead of 'vector'.
+ std::vector<syntax::SimpleDeclarator *> declarators();
+};
+
+/// template <template-parameters> <declaration>
+class TemplateDeclaration final : public Declaration {
+public:
+ TemplateDeclaration() : Declaration(NodeKind::TemplateDeclaration) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::TemplateDeclaration;
+ }
+ syntax::Leaf *templateKeyword();
+ syntax::Declaration *declaration();
+};
+
+/// template <declaration>
+/// Examples:
+/// template struct X<int>
+/// template void foo<int>()
+/// template int var<double>
+class ExplicitTemplateInstantiation final : public Declaration {
+public:
+ ExplicitTemplateInstantiation()
+ : Declaration(NodeKind::ExplicitTemplateInstantiation) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::ExplicitTemplateInstantiation;
+ }
+ syntax::Leaf *templateKeyword();
+ syntax::Leaf *externKeyword();
+ syntax::Declaration *declaration();
};
/// namespace <name> { <decls> }
@@ -424,6 +775,113 @@ public:
}
};
+/// Covers a name, an initializer and a part of the type outside declaration
+/// specifiers. Examples are:
+/// `*a` in `int *a`
+/// `a[10]` in `int a[10]`
+/// `*a = nullptr` in `int *a = nullptr`
+/// Declarators can be unnamed too:
+/// `**` in `new int**`
+/// `* = nullptr` in `void foo(int* = nullptr)`
+/// Most declarators you encounter are instances of SimpleDeclarator. They may
+/// contain an inner declarator inside parentheses, we represent it as
+/// ParenDeclarator. E.g.
+/// `(*a)` in `int (*a) = 10`
+class Declarator : public Tree {
+public:
+ Declarator(NodeKind K) : Tree(K) {}
+ static bool classof(const Node *N) {
+ return NodeKind::SimpleDeclarator <= N->kind() &&
+ N->kind() <= NodeKind::ParenDeclarator;
+ }
+};
+
+/// A top-level declarator without parentheses. See comment of Declarator for
+/// more details.
+class SimpleDeclarator final : public Declarator {
+public:
+ SimpleDeclarator() : Declarator(NodeKind::SimpleDeclarator) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::SimpleDeclarator;
+ }
+};
+
+/// Declarator inside parentheses.
+/// E.g. `(***a)` from `int (***a) = nullptr;`
+/// See comment of Declarator for more details.
+class ParenDeclarator final : public Declarator {
+public:
+ ParenDeclarator() : Declarator(NodeKind::ParenDeclarator) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::ParenDeclarator;
+ }
+ syntax::Leaf *lparen();
+ syntax::Leaf *rparen();
+};
+
+/// Array size specified inside a declarator.
+/// E.g:
+/// `[10]` in `int a[10];`
+/// `[static 10]` in `void f(int xs[static 10]);`
+class ArraySubscript final : public Tree {
+public:
+ ArraySubscript() : Tree(NodeKind::ArraySubscript) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::ArraySubscript;
+ }
+ // TODO: add an accessor for the "static" keyword.
+ syntax::Leaf *lbracket();
+ syntax::Expression *sizeExpression();
+ syntax::Leaf *rbracket();
+};
+
+/// Trailing return type after the parameter list, including the arrow token.
+/// E.g. `-> int***`.
+class TrailingReturnType final : public Tree {
+public:
+ TrailingReturnType() : Tree(NodeKind::TrailingReturnType) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::TrailingReturnType;
+ }
+ // TODO: add accessors for specifiers.
+ syntax::Leaf *arrowToken();
+ syntax::SimpleDeclarator *declarator();
+};
+
+/// Parameter list for a function type and a trailing return type, if the
+/// function has one.
+/// E.g.:
+/// `(int a) volatile ` in `int foo(int a) volatile;`
+/// `(int a) &&` in `int foo(int a) &&;`
+/// `() -> int` in `auto foo() -> int;`
+/// `() const` in `int foo() const;`
+/// `() noexcept` in `int foo() noexcept;`
+/// `() throw()` in `int foo() throw();`
+///
+/// (!) override doesn't belong here.
+class ParametersAndQualifiers final : public Tree {
+public:
+ ParametersAndQualifiers() : Tree(NodeKind::ParametersAndQualifiers) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::ParametersAndQualifiers;
+ }
+ syntax::Leaf *lparen();
+ /// FIXME: use custom iterator instead of 'vector'.
+ std::vector<syntax::SimpleDeclaration *> parameters();
+ syntax::Leaf *rparen();
+ syntax::TrailingReturnType *trailingReturn();
+};
+
+/// Member pointer inside a declarator
+/// E.g. `X::*` in `int X::* a = 0;`
+class MemberPointer final : public Tree {
+public:
+ MemberPointer() : Tree(NodeKind::MemberPointer) {}
+ static bool classof(const Node *N) {
+ return N->kind() == NodeKind::MemberPointer;
+ }
+};
+
} // namespace syntax
} // namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tokens.h b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tokens.h
index a210815d49f9..a7f9369ddfff 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tokens.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tokens.h
@@ -171,11 +171,16 @@ llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Token &T);
/// To build a token buffer use the TokenCollector class. You can also compute
/// the spelled tokens of a file using the tokenize() helper.
///
-/// FIXME: allow to map from spelled to expanded tokens when use-case shows up.
/// FIXME: allow mappings into macro arguments.
class TokenBuffer {
public:
TokenBuffer(const SourceManager &SourceMgr) : SourceMgr(&SourceMgr) {}
+
+ TokenBuffer(TokenBuffer &&) = default;
+ TokenBuffer(const TokenBuffer &) = delete;
+ TokenBuffer &operator=(TokenBuffer &&) = default;
+ TokenBuffer &operator=(const TokenBuffer &) = delete;
+
/// All tokens produced by the preprocessor after all macro replacements,
/// directives, etc. Source locations found in the clang AST will always
/// point to one of these tokens.
@@ -191,18 +196,20 @@ public:
/// token range R.
llvm::ArrayRef<syntax::Token> expandedTokens(SourceRange R) const;
- /// Find the subrange of spelled tokens that produced the corresponding \p
- /// Expanded tokens.
+ /// Returns the subrange of spelled tokens corresponding to AST node spanning
+ /// \p Expanded. This is the text that should be replaced if a refactoring
+ /// were to rewrite the node. If \p Expanded is empty, the returned value is
+ /// llvm::None.
///
- /// EXPECTS: \p Expanded is a subrange of expandedTokens().
- ///
- /// Will fail if the expanded tokens do not correspond to a
- /// sequence of spelled tokens. E.g. for the following example:
+ /// Will fail if the expanded tokens do not correspond to a sequence of
+ /// spelled tokens. E.g. for the following example:
///
/// #define FIRST f1 f2 f3
/// #define SECOND s1 s2 s3
+ /// #define ID2(X, Y) X Y
///
/// a FIRST b SECOND c // expanded tokens are: a f1 f2 f3 b s1 s2 s3 c
+ /// d ID2(e f g, h) i // expanded tokens are: d e f g h i
///
/// the results would be:
/// expanded => spelled
@@ -212,12 +219,44 @@ public:
/// a f1 f2 f3 => a FIRST
/// a f1 => can't map
/// s1 s2 => can't map
+ /// e f => e f
+ /// g h => can't map
///
- /// If \p Expanded is empty, the returned value is llvm::None.
+ /// EXPECTS: \p Expanded is a subrange of expandedTokens().
/// Complexity is logarithmic.
llvm::Optional<llvm::ArrayRef<syntax::Token>>
spelledForExpanded(llvm::ArrayRef<syntax::Token> Expanded) const;
+ /// Find the subranges of expanded tokens, corresponding to \p Spelled.
+ ///
+ /// Some spelled tokens may not be present in the expanded token stream, so
+ /// this function can return an empty vector, e.g. for tokens of macro
+ /// directives or disabled preprocessor branches.
+ ///
+ /// Some spelled tokens can be duplicated in the expanded token stream
+ /// multiple times and this function will return multiple results in those
+ /// cases. This happens when \p Spelled is inside a macro argument.
+ ///
+ /// FIXME: return correct results on macro arguments. For now, we return an
+ /// empty list.
+ ///
+ /// (!) will return empty vector on tokens from #define body:
+ /// E.g. for the following example:
+ ///
+ /// #define FIRST(A) f1 A = A f2
+ /// #define SECOND s
+ ///
+ /// a FIRST(arg) b SECOND c // expanded tokens are: a f1 arg = arg f2 b s
+ /// The results would be
+ /// spelled => expanded
+ /// ------------------------
+ /// #define FIRST => {}
+ /// a FIRST(arg) => {a f1 arg = arg f2}
+ /// arg => {arg, arg} // arg #1 is before `=` and arg #2 is
+ /// // after `=` in the expanded tokens.
+ llvm::SmallVector<llvm::ArrayRef<syntax::Token>, 1>
+ expandedForSpelled(llvm::ArrayRef<syntax::Token> Spelled) const;
+
/// An expansion produced by the preprocessor, includes macro expansions and
/// preprocessor directives. Preprocessor always maps a non-empty range of
/// spelled tokens to a (possibly empty) range of expanded tokens. Here is a
@@ -245,6 +284,10 @@ public:
/// "DECL", "(", "a", ")", ";"}
llvm::ArrayRef<syntax::Token> spelledTokens(FileID FID) const;
+ /// Returns the spelled Token starting at Loc, if there are no such tokens
+ /// returns nullptr.
+ const syntax::Token *spelledTokenAt(SourceLocation Loc) const;
+
/// Get all tokens that expand a macro in \p FID. For the following input
/// #define FOO B
/// #define FOO2(X) int X
@@ -303,6 +346,12 @@ private:
std::pair<const syntax::Token *, const Mapping *>
spelledForExpandedToken(const syntax::Token *Expanded) const;
+ /// Returns a mapping starting before \p Spelled token, or nullptr if no
+ /// such mapping exists.
+ static const Mapping *
+ mappingStartingBeforeSpelled(const MarkedFile &F,
+ const syntax::Token *Spelled);
+
/// Token stream produced after preprocessing, conceputally this captures the
/// same stream as 'clang -E' (excluding the preprocessor directives like
/// #file, etc.).
@@ -317,11 +366,16 @@ private:
/// This always returns 0-2 tokens.
llvm::ArrayRef<syntax::Token>
spelledTokensTouching(SourceLocation Loc, const syntax::TokenBuffer &Tokens);
+llvm::ArrayRef<syntax::Token>
+spelledTokensTouching(SourceLocation Loc, llvm::ArrayRef<syntax::Token> Tokens);
/// The identifier token that overlaps or touches a spelling location Loc.
/// If there is none, returns nullptr.
const syntax::Token *
spelledIdentifierTouching(SourceLocation Loc,
+ llvm::ArrayRef<syntax::Token> Tokens);
+const syntax::Token *
+spelledIdentifierTouching(SourceLocation Loc,
const syntax::TokenBuffer &Tokens);
/// Lex the text buffer, corresponding to \p FID, in raw mode and record the
@@ -334,6 +388,12 @@ spelledIdentifierTouching(SourceLocation Loc,
/// The result will *not* have a 'eof' token at the end.
std::vector<syntax::Token> tokenize(FileID FID, const SourceManager &SM,
const LangOptions &LO);
+/// Similar to one above, instead of whole file tokenizes a part of it. Note
+/// that, the first token might be incomplete if FR.startOffset is not at the
+/// beginning of a token, and the last token returned will start before the
+/// FR.endOffset but might end after it.
+std::vector<syntax::Token>
+tokenize(const FileRange &FR, const SourceManager &SM, const LangOptions &LO);
/// Collects tokens for the main file while running the frontend action. An
/// instance of this object should be created on
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tree.h b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tree.h
index 8702fe60ce1b..bc581004c46e 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tree.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tree.h
@@ -126,6 +126,8 @@ private:
// FactoryImpl sets CanModify flag.
friend class FactoryImpl;
+ void setRole(NodeRole NR);
+
Tree *Parent;
Node *NextSibling;
unsigned Kind : 16;
@@ -171,8 +173,11 @@ private:
/// Prepend \p Child to the list of children and and sets the parent pointer.
/// A very low-level operation that does not check any invariants, only used
/// by TreeBuilder and FactoryImpl.
- /// EXPECTS: Role != NodeRoleDetached.
+ /// EXPECTS: Role != Detached.
void prependChildLowLevel(Node *Child, NodeRole Role);
+ /// Like the previous overload, but does not set role for \p Child.
+ /// EXPECTS: Child->Role != Detached
+ void prependChildLowLevel(Node *Child);
friend class TreeBuilder;
friend class FactoryImpl;
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Tooling.h b/contrib/llvm-project/clang/include/clang/Tooling/Tooling.h
index f759789170d9..4fb0c18be95e 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Tooling.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Tooling.h
@@ -225,7 +225,8 @@ std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
std::shared_ptr<PCHContainerOperations> PCHContainerOps =
std::make_shared<PCHContainerOperations>(),
ArgumentsAdjuster Adjuster = getClangStripDependencyFileAdjuster(),
- const FileContentMappings &VirtualMappedFiles = FileContentMappings());
+ const FileContentMappings &VirtualMappedFiles = FileContentMappings(),
+ DiagnosticConsumer *DiagConsumer = nullptr);
/// Utility to run a FrontendAction in a single clang invocation.
class ToolInvocation {
@@ -504,7 +505,8 @@ void addTargetAndModeForProgramName(std::vector<std::string> &CommandLine,
/// Creates a \c CompilerInvocation.
CompilerInvocation *newInvocation(DiagnosticsEngine *Diagnostics,
- const llvm::opt::ArgStringList &CC1Args);
+ const llvm::opt::ArgStringList &CC1Args,
+ const char *const BinaryName);
} // namespace tooling
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Parsing.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Parsing.h
new file mode 100644
index 000000000000..8e51f595cd5b
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Parsing.h
@@ -0,0 +1,41 @@
+//===--- Parsing.h - Parsing library for Transformer ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Defines parsing functions for Transformer types.
+/// FIXME: Currently, only supports `RangeSelectors` but parsers for other
+/// Transformer types are under development.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLING_REFACTOR_PARSING_H_
+#define LLVM_CLANG_TOOLING_REFACTOR_PARSING_H_
+
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Tooling/Transformer/RangeSelector.h"
+#include "llvm/Support/Error.h"
+#include <functional>
+#include <string>
+
+namespace clang {
+namespace transformer {
+
+/// Parses a string representation of a \c RangeSelector. The grammar of these
+/// strings is closely based on the (sub)grammar of \c RangeSelectors as they'd
+/// appear in C++ code. However, this language constrains the set of permissible
+/// strings (for node ids) -- it does not support escapes in the
+/// string. Additionally, the \c charRange combinator is not supported, because
+/// there is no representation of values of type \c CharSourceRange in this
+/// (little) language.
+llvm::Expected<RangeSelector> parseRangeSelector(llvm::StringRef Input);
+
+} // namespace transformer
+} // namespace clang
+
+#endif // LLVM_CLANG_TOOLING_REFACTOR_PARSING_H_
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RangeSelector.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RangeSelector.h
index 9f556d206321..2807037bc208 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RangeSelector.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RangeSelector.h
@@ -32,10 +32,20 @@ inline RangeSelector charRange(CharSourceRange R) {
}
/// Selects from the start of \p Begin and to the end of \p End.
-RangeSelector range(RangeSelector Begin, RangeSelector End);
+RangeSelector enclose(RangeSelector Begin, RangeSelector End);
/// Convenience version of \c range where end-points are bound nodes.
-RangeSelector range(std::string BeginID, std::string EndID);
+RangeSelector encloseNodes(std::string BeginID, std::string EndID);
+
+/// DEPRECATED. Use `enclose`.
+inline RangeSelector range(RangeSelector Begin, RangeSelector End) {
+ return enclose(std::move(Begin), std::move(End));
+}
+
+/// DEPRECATED. Use `encloseNodes`.
+inline RangeSelector range(std::string BeginID, std::string EndID) {
+ return encloseNodes(std::move(BeginID), std::move(EndID));
+}
/// Selects the (empty) range [B,B) when \p Selector selects the range [B,E).
RangeSelector before(RangeSelector Selector);
@@ -43,7 +53,7 @@ RangeSelector before(RangeSelector Selector);
/// Selects the the point immediately following \p Selector. That is, the
/// (empty) range [E,E), when \p Selector selects either
/// * the CharRange [B,E) or
-/// * the TokenRange [B,E'] where the token at E' spans the range [E,E').
+/// * the TokenRange [B,E'] where the token at E' spans the range [E',E).
RangeSelector after(RangeSelector Selector);
/// Selects a node, including trailing semicolon (for non-expression
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RewriteRule.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RewriteRule.h
index 7daf6ea154be..d9e68717d5c8 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RewriteRule.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RewriteRule.h
@@ -21,6 +21,7 @@
#include "clang/Tooling/Refactoring/AtomicChange.h"
#include "clang/Tooling/Transformer/MatchConsumer.h"
#include "clang/Tooling/Transformer/RangeSelector.h"
+#include "llvm/ADT/Any.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Error.h"
@@ -30,6 +31,20 @@
namespace clang {
namespace transformer {
+/// A concrete description of a source edit, represented by a character range in
+/// the source to be replaced and a corresponding replacement string.
+struct Edit {
+ CharSourceRange Range;
+ std::string Replacement;
+ llvm::Any Metadata;
+};
+
+/// Maps a match result to a list of concrete edits (with possible
+/// failure). This type is a building block of rewrite rules, but users will
+/// generally work in terms of `ASTEdit`s (below) rather than directly in terms
+/// of `EditGenerator`.
+using EditGenerator = MatchConsumer<llvm::SmallVector<Edit, 1>>;
+
using TextGenerator = std::shared_ptr<MatchComputation<std::string>>;
// Description of a source-code edit, expressed in terms of an AST node.
@@ -72,8 +87,22 @@ struct ASTEdit {
RangeSelector TargetRange;
TextGenerator Replacement;
TextGenerator Note;
+ llvm::Any Metadata;
};
+/// Lifts a list of `ASTEdit`s into an `EditGenerator`.
+///
+/// The `EditGenerator` will return an empty vector if any of the edits apply to
+/// portions of the source that are ineligible for rewriting (certain
+/// interactions with macros, for example) and it will fail if any invariants
+/// are violated relating to bound nodes in the match. However, it does not
+/// fail in the case of conflicting edits -- conflict handling is left to
+/// clients. We recommend use of the \c AtomicChange or \c Replacements classes
+/// for assistance in detecting such conflicts.
+EditGenerator editList(llvm::SmallVector<ASTEdit, 1> Edits);
+// Convenience form of `editList` for a single edit.
+EditGenerator edit(ASTEdit);
+
/// Format of the path in an include directive -- angle brackets or quotes.
enum class IncludeFormat {
Quoted,
@@ -106,7 +135,7 @@ enum class IncludeFormat {
struct RewriteRule {
struct Case {
ast_matchers::internal::DynTypedMatcher Matcher;
- SmallVector<ASTEdit, 1> Edits;
+ EditGenerator Edits;
TextGenerator Explanation;
// Include paths to add to the file affected by this case. These are
// bundled with the `Case`, rather than the `RewriteRule`, because each case
@@ -123,16 +152,22 @@ struct RewriteRule {
/// Convenience function for constructing a simple \c RewriteRule.
RewriteRule makeRule(ast_matchers::internal::DynTypedMatcher M,
- SmallVector<ASTEdit, 1> Edits,
- TextGenerator Explanation = nullptr);
+ EditGenerator Edits, TextGenerator Explanation = nullptr);
+
+/// Convenience function for constructing a \c RewriteRule from multiple
+/// `ASTEdit`s.
+inline RewriteRule makeRule(ast_matchers::internal::DynTypedMatcher M,
+ llvm::SmallVector<ASTEdit, 1> Edits,
+ TextGenerator Explanation = nullptr) {
+ return makeRule(std::move(M), editList(std::move(Edits)),
+ std::move(Explanation));
+}
/// Convenience overload of \c makeRule for common case of only one edit.
inline RewriteRule makeRule(ast_matchers::internal::DynTypedMatcher M,
ASTEdit Edit,
TextGenerator Explanation = nullptr) {
- SmallVector<ASTEdit, 1> Edits;
- Edits.emplace_back(std::move(Edit));
- return makeRule(std::move(M), std::move(Edits), std::move(Explanation));
+ return makeRule(std::move(M), edit(std::move(Edit)), std::move(Explanation));
}
/// For every case in Rule, adds an include directive for the given header. The
@@ -203,7 +238,8 @@ inline ASTEdit change(RangeSelector Target, TextGenerator Replacement) {
/// changeTo(cat("bar()")))
/// \endcode
inline ASTEdit changeTo(TextGenerator Replacement) {
- return changeTo(node(RewriteRule::RootID), std::move(Replacement));
+ return changeTo(node(std::string(RewriteRule::RootID)),
+ std::move(Replacement));
}
/// DEPRECATED: use \c changeTo.
inline ASTEdit change(TextGenerator Replacement) {
@@ -225,6 +261,11 @@ inline ASTEdit insertAfter(RangeSelector S, TextGenerator Replacement) {
/// Removes the source selected by \p S.
ASTEdit remove(RangeSelector S);
+inline ASTEdit withMetadata(ASTEdit edit, llvm::Any Metadata) {
+ edit.Metadata = std::move(Metadata);
+ return edit;
+}
+
/// The following three functions are a low-level part of the RewriteRule
/// API. We expose them for use in implementing the fixtures that interpret
/// RewriteRule, like Transformer and TransfomerTidy, or for more advanced
@@ -240,11 +281,13 @@ namespace detail {
/// supports mixing matchers of different kinds.
ast_matchers::internal::DynTypedMatcher buildMatcher(const RewriteRule &Rule);
-/// Builds a set of matchers that cover the rule (one for each distinct node
-/// matcher base kind: Stmt, Decl, etc.). Node-matchers for `QualType` and
-/// `Type` are not permitted, since such nodes carry no source location
-/// information and are therefore not relevant for rewriting. If any such
-/// matchers are included, will return an empty vector.
+/// Builds a set of matchers that cover the rule.
+///
+/// One matcher is built for each distinct node matcher base kind: Stmt, Decl,
+/// etc. Node-matchers for `QualType` and `Type` are not permitted, since such
+/// nodes carry no source location information and are therefore not relevant
+/// for rewriting. If any such matchers are included, will return an empty
+/// vector.
std::vector<ast_matchers::internal::DynTypedMatcher>
buildMatchers(const RewriteRule &Rule);
@@ -259,28 +302,6 @@ getRuleMatchLoc(const ast_matchers::MatchFinder::MatchResult &Result);
const RewriteRule::Case &
findSelectedCase(const ast_matchers::MatchFinder::MatchResult &Result,
const RewriteRule &Rule);
-
-/// A source "transformation," represented by a character range in the source to
-/// be replaced and a corresponding replacement string.
-struct Transformation {
- CharSourceRange Range;
- std::string Replacement;
-};
-
-/// Attempts to translate `Edits`, which are in terms of AST nodes bound in the
-/// match `Result`, into Transformations, which are in terms of the source code
-/// text.
-///
-/// Returns an empty vector if any of the edits apply to portions of the source
-/// that are ineligible for rewriting (certain interactions with macros, for
-/// example). Fails if any invariants are violated relating to bound nodes in
-/// the match. However, it does not fail in the case of conflicting edits --
-/// conflict handling is left to clients. We recommend use of the \c
-/// AtomicChange or \c Replacements classes for assistance in detecting such
-/// conflicts.
-Expected<SmallVector<Transformation, 1>>
-translateEdits(const ast_matchers::MatchFinder::MatchResult &Result,
- llvm::ArrayRef<ASTEdit> Edits);
} // namespace detail
} // namespace transformer
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCode.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCode.h
index bc9cc3d2a258..2c7eb65371cf 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCode.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCode.h
@@ -20,9 +20,10 @@
namespace clang {
namespace tooling {
-/// Extends \p Range to include the token \p Next, if it immediately follows the
-/// end of the range. Otherwise, returns \p Range unchanged.
-CharSourceRange maybeExtendRange(CharSourceRange Range, tok::TokenKind Next,
+/// Extends \p Range to include the token \p Terminator, if it immediately
+/// follows the end of the range. Otherwise, returns \p Range unchanged.
+CharSourceRange maybeExtendRange(CharSourceRange Range,
+ tok::TokenKind Terminator,
ASTContext &Context);
/// Returns the source range spanning the node, extended to include \p Next, if
@@ -35,6 +36,13 @@ CharSourceRange getExtendedRange(const T &Node, tok::TokenKind Next,
Next, Context);
}
+/// Returns the logical source range of the node extended to include associated
+/// comments and whitespace before and after the node, and associated
+/// terminators. The returned range consists of file locations, if valid file
+/// locations can be found for the associated content; otherwise, an invalid
+/// range is returned.
+CharSourceRange getAssociatedRange(const Decl &D, ASTContext &Context);
+
/// Returns the source-code text in the specified range.
StringRef getText(CharSourceRange Range, const ASTContext &Context);
@@ -73,13 +81,18 @@ StringRef getExtendedText(const T &Node, tok::TokenKind Next,
return getText(getExtendedRange(Node, Next, Context), Context);
}
-// Attempts to resolve the given range to one that can be edited by a rewrite;
-// generally, one that starts and ends within a particular file. It supports
-// a limited set of cases involving source locations in macro expansions.
+/// Determines whether \p Range is one that can be edited by a rewrite;
+/// generally, one that starts and ends within a particular file.
+llvm::Error validateEditRange(const CharSourceRange &Range,
+ const SourceManager &SM);
+
+/// Attempts to resolve the given range to one that can be edited by a rewrite;
+/// generally, one that starts and ends within a particular file. It supports a
+/// limited set of cases involving source locations in macro expansions. If a
+/// value is returned, it satisfies \c validateEditRange.
llvm::Optional<CharSourceRange>
getRangeForEdit(const CharSourceRange &EditRange, const SourceManager &SM,
const LangOptions &LangOpts);
-
inline llvm::Optional<CharSourceRange>
getRangeForEdit(const CharSourceRange &EditRange, const ASTContext &Context) {
return getRangeForEdit(EditRange, Context.getSourceManager(),
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Stencil.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Stencil.h
index 0363b689dc5b..1b50a670f70b 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Stencil.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Stencil.h
@@ -69,14 +69,6 @@ template <typename... Ts> Stencil cat(Ts &&... Parts) {
// Functions for conveniently building stencils.
//
-/// DEPRECATED: Use `cat` instead.
-/// \returns exactly the text provided.
-Stencil text(llvm::StringRef Text);
-
-/// DEPRECATED: Use `cat` instead.
-/// \returns the source corresponding to the selected range.
-Stencil selection(RangeSelector Selector);
-
/// Generates the source of the expression bound to \p Id, wrapping it in
/// parentheses if it may parse differently depending on context. For example, a
/// binary operation is always wrapped, while a variable reference is never
@@ -112,7 +104,7 @@ Stencil maybeAddressOf(llvm::StringRef ExprId);
/// Additionally, `e` is wrapped in parentheses, if needed.
Stencil access(llvm::StringRef BaseId, Stencil Member);
inline Stencil access(llvm::StringRef BaseId, llvm::StringRef Member) {
- return access(BaseId, text(Member));
+ return access(BaseId, detail::makeStencil(Member));
}
/// Chooses between the two stencil parts, based on whether \p ID is bound in
@@ -123,7 +115,8 @@ Stencil ifBound(llvm::StringRef Id, Stencil TrueStencil, Stencil FalseStencil);
/// match.
inline Stencil ifBound(llvm::StringRef Id, llvm::StringRef TrueText,
llvm::StringRef FalseText) {
- return ifBound(Id, text(TrueText), text(FalseText));
+ return ifBound(Id, detail::makeStencil(TrueText),
+ detail::makeStencil(FalseText));
}
/// Wraps a \c MatchConsumer in a \c Stencil, so that it can be used in a \c
diff --git a/contrib/llvm-project/clang/include/clang/module.modulemap b/contrib/llvm-project/clang/include/clang/module.modulemap
index b3e2108d3fa6..13d4dbf9dc2e 100644
--- a/contrib/llvm-project/clang/include/clang/module.modulemap
+++ b/contrib/llvm-project/clang/include/clang/module.modulemap
@@ -38,11 +38,14 @@ module Clang_Basic {
textual header "Basic/BuiltinsBPF.def"
textual header "Basic/Builtins.def"
textual header "Basic/BuiltinsHexagon.def"
+ textual header "Basic/BuiltinsHexagonDep.def"
+ textual header "Basic/BuiltinsHexagonMapCustomDep.def"
textual header "Basic/BuiltinsLe64.def"
textual header "Basic/BuiltinsMips.def"
textual header "Basic/BuiltinsNEON.def"
textual header "Basic/BuiltinsNVPTX.def"
textual header "Basic/BuiltinsPPC.def"
+ textual header "Basic/BuiltinsSVE.def"
textual header "Basic/BuiltinsSystemZ.def"
textual header "Basic/BuiltinsWebAssembly.def"
textual header "Basic/BuiltinsX86.def"
@@ -51,6 +54,7 @@ module Clang_Basic {
textual header "Basic/CodeGenOptions.def"
textual header "Basic/DiagnosticOptions.def"
textual header "Basic/Features.def"
+ textual header "Basic/FPOptions.def"
textual header "Basic/MSP430Target.def"
textual header "Basic/LangOptions.def"
textual header "Basic/OpenCLExtensions.def"
@@ -149,6 +153,12 @@ module Clang_StaticAnalyzer_Frontend {
module * { export * }
}
+module Clang_Testing {
+ requires cplusplus
+ umbrella "Testing"
+ module * { export * }
+}
+
module Clang_Tooling {
requires cplusplus umbrella "Tooling" module * { export * }
// FIXME: Exclude these headers to avoid pulling all of the AST matchers
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp
index a9018c1c4bdf..e18def8a0b19 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "Internals.h"
+#include "clang/ARCMigrate/ARCMT.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/Basic/DiagnosticCategories.h"
#include "clang/Frontend/ASTUnit.h"
@@ -189,7 +190,7 @@ createInvocationForMigration(CompilerInvocation &origCI,
PPOpts.Includes.insert(PPOpts.Includes.begin(), OriginalFile);
PPOpts.ImplicitPCHInclude.clear();
}
- std::string define = getARCMTMacroName();
+ std::string define = std::string(getARCMTMacroName());
define += '=';
CInvok->getPreprocessorOpts().addMacroDef(define);
CInvok->getLangOpts()->ObjCAutoRefCount = true;
@@ -296,7 +297,7 @@ bool arcmt::checkForManualIssues(
for (CapturedDiagList::iterator
I = capturedDiags.begin(), E = capturedDiags.end(); I != E; ++I)
arcDiags.push_back(*I);
- writeARCDiagsToPlist(plistOut, arcDiags,
+ writeARCDiagsToPlist(std::string(plistOut), arcDiags,
Ctx.getSourceManager(), Ctx.getLangOpts());
}
@@ -598,7 +599,7 @@ bool MigrationProcess::applyTransform(TransformFn trans,
RewriteBuffer &buf = I->second;
const FileEntry *file = Ctx.getSourceManager().getFileEntryForID(FID);
assert(file);
- std::string newFname = file->getName();
+ std::string newFname = std::string(file->getName());
newFname += "-trans";
SmallString<512> newText;
llvm::raw_svector_ostream vecOS(newText);
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/FileRemapper.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/FileRemapper.cpp
index a031fe22ac13..0222583c015b 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/FileRemapper.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/FileRemapper.cpp
@@ -43,7 +43,7 @@ std::string FileRemapper::getRemapInfoFile(StringRef outputDir) {
assert(!outputDir.empty());
SmallString<128> InfoFile = outputDir;
llvm::sys::path::append(InfoFile, "remap");
- return InfoFile.str();
+ return std::string(InfoFile.str());
}
bool FileRemapper::initFromDisk(StringRef outputDir, DiagnosticsEngine &Diag,
@@ -56,7 +56,7 @@ bool FileRemapper::initFromFile(StringRef filePath, DiagnosticsEngine &Diag,
bool ignoreIfFilesChanged) {
assert(FromToMappings.empty() &&
"initFromDisk should be called before any remap calls");
- std::string infoFile = filePath;
+ std::string infoFile = std::string(filePath);
if (!llvm::sys::fs::exists(infoFile))
return false;
@@ -120,7 +120,7 @@ bool FileRemapper::flushToFile(StringRef outputPath, DiagnosticsEngine &Diag) {
using namespace llvm::sys;
std::error_code EC;
- std::string infoFile = outputPath;
+ std::string infoFile = std::string(outputPath);
llvm::raw_fd_ostream infoOut(infoFile, EC, llvm::sys::fs::OF_None);
if (EC)
return report(EC.message(), Diag);
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/Internals.h b/contrib/llvm-project/clang/lib/ARCMigrate/Internals.h
index 47fc09317500..ed0136e4867a 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/Internals.h
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/Internals.h
@@ -9,13 +9,15 @@
#ifndef LLVM_CLANG_LIB_ARCMIGRATE_INTERNALS_H
#define LLVM_CLANG_LIB_ARCMIGRATE_INTERNALS_H
-#include "clang/ARCMigrate/ARCMT.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Frontend/MigratorOptions.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include <list>
namespace clang {
+ class ASTContext;
class Sema;
class Stmt;
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp
index 4c6e9f2432f6..51c4a460cc25 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp
@@ -114,21 +114,15 @@ public:
return *Summaries;
}
- ObjCMigrateASTConsumer(StringRef migrateDir,
- unsigned astMigrateActions,
- FileRemapper &remapper,
- FileManager &fileMgr,
+ ObjCMigrateASTConsumer(StringRef migrateDir, unsigned astMigrateActions,
+ FileRemapper &remapper, FileManager &fileMgr,
const PPConditionalDirectiveRecord *PPRec,
- Preprocessor &PP,
- bool isOutputFile,
+ Preprocessor &PP, bool isOutputFile,
ArrayRef<std::string> WhiteList)
- : MigrateDir(migrateDir),
- ASTMigrateActions(astMigrateActions),
- NSIntegerTypedefed(nullptr), NSUIntegerTypedefed(nullptr),
- Remapper(remapper), FileMgr(fileMgr), PPRec(PPRec), PP(PP),
- IsOutputFile(isOutputFile),
- FoundationIncluded(false){
-
+ : MigrateDir(migrateDir), ASTMigrateActions(astMigrateActions),
+ NSIntegerTypedefed(nullptr), NSUIntegerTypedefed(nullptr),
+ Remapper(remapper), FileMgr(fileMgr), PPRec(PPRec), PP(PP),
+ IsOutputFile(isOutputFile), FoundationIncluded(false) {
// FIXME: StringSet should have insert(iter, iter) to use here.
for (const std::string &Val : WhiteList)
WhiteListFilenames.insert(Val);
@@ -191,12 +185,10 @@ protected:
} // end anonymous namespace
ObjCMigrateAction::ObjCMigrateAction(
- std::unique_ptr<FrontendAction> WrappedAction,
- StringRef migrateDir,
- unsigned migrateAction)
- : WrapperFrontendAction(std::move(WrappedAction)), MigrateDir(migrateDir),
- ObjCMigAction(migrateAction),
- CompInst(nullptr) {
+ std::unique_ptr<FrontendAction> WrappedAction, StringRef migrateDir,
+ unsigned migrateAction)
+ : WrapperFrontendAction(std::move(WrappedAction)), MigrateDir(migrateDir),
+ ObjCMigAction(migrateAction), CompInst(nullptr) {
if (MigrateDir.empty())
MigrateDir = "."; // user current directory if none is given.
}
@@ -533,7 +525,7 @@ static void rewriteToObjCProperty(const ObjCMethodDecl *Getter,
// after that; e.g. isContinuous will become continuous.
StringRef PropertyNameStringRef(PropertyNameString);
PropertyNameStringRef = PropertyNameStringRef.drop_front(LengthOfPrefix);
- PropertyNameString = PropertyNameStringRef;
+ PropertyNameString = std::string(PropertyNameStringRef);
bool NoLowering = (isUppercase(PropertyNameString[0]) &&
PropertyNameString.size() > 1 &&
isUppercase(PropertyNameString[1]));
@@ -994,7 +986,7 @@ static void ReplaceWithClasstype(const ObjCMigrateASTConsumer &ASTC,
if (TypeSourceInfo *TSInfo = OM->getReturnTypeSourceInfo()) {
TypeLoc TL = TSInfo->getTypeLoc();
R = SourceRange(TL.getBeginLoc(), TL.getEndLoc()); {
- ClassString = IDecl->getName();
+ ClassString = std::string(IDecl->getName());
ClassString += "*";
}
}
@@ -1320,7 +1312,7 @@ void ObjCMigrateASTConsumer::migrateFactoryMethod(ASTContext &Ctx,
if (!IDecl)
return;
- std::string StringClassName = IDecl->getName();
+ std::string StringClassName = std::string(IDecl->getName());
StringRef LoweredClassName(StringClassName);
std::string StringLoweredClassName = LoweredClassName.lower();
LoweredClassName = StringLoweredClassName;
@@ -1330,7 +1322,7 @@ void ObjCMigrateASTConsumer::migrateFactoryMethod(ASTContext &Ctx,
if (!MethodIdName)
return;
- std::string MethodName = MethodIdName->getName();
+ std::string MethodName = std::string(MethodIdName->getName());
if (OIT_Family == OIT_Singleton || OIT_Family == OIT_ReturnsSelf) {
StringRef STRefMethodName(MethodName);
size_t len = 0;
@@ -1342,7 +1334,7 @@ void ObjCMigrateASTConsumer::migrateFactoryMethod(ASTContext &Ctx,
len = strlen("default");
else
return;
- MethodName = STRefMethodName.substr(len);
+ MethodName = std::string(STRefMethodName.substr(len));
}
std::string MethodNameSubStr = MethodName.substr(0, 3);
StringRef MethodNamePrefix(MethodNameSubStr);
@@ -1351,7 +1343,7 @@ void ObjCMigrateASTConsumer::migrateFactoryMethod(ASTContext &Ctx,
size_t Ix = LoweredClassName.rfind(MethodNamePrefix);
if (Ix == StringRef::npos)
return;
- std::string ClassNamePostfix = LoweredClassName.substr(Ix);
+ std::string ClassNamePostfix = std::string(LoweredClassName.substr(Ix));
StringRef LoweredMethodName(MethodName);
std::string StringLoweredMethodName = LoweredMethodName.lower();
LoweredMethodName = StringLoweredMethodName;
@@ -2010,7 +2002,7 @@ static std::vector<std::string> getWhiteListFilenames(StringRef DirPath) {
directory_iterator DE;
for (; !EC && DI != DE; DI = DI.increment(EC)) {
if (is_regular_file(DI->path()))
- Filenames.push_back(filename(DI->path()));
+ Filenames.push_back(std::string(filename(DI->path())));
}
return Filenames;
@@ -2153,7 +2145,7 @@ private:
if (Val.getAsInteger(10, Entry.RemoveLen))
Ignore = true;
} else if (Key == "text") {
- Entry.Text = Val;
+ Entry.Text = std::string(Val);
}
}
@@ -2224,7 +2216,7 @@ static std::string applyEditsToTemp(const FileEntry *FE,
TmpOut.write(NewText.data(), NewText.size());
TmpOut.close();
- return TempPath.str();
+ return std::string(TempPath.str());
}
bool arcmt::getFileRemappingsFromFileList(
@@ -2277,7 +2269,7 @@ bool arcmt::getFileRemappingsFromFileList(
continue;
}
- remap.emplace_back(I->first->getName(), TempFile);
+ remap.emplace_back(std::string(I->first->getName()), TempFile);
}
return hasErrorOccurred;
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/TransGCAttrs.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/TransGCAttrs.cpp
index 5e3162197ed1..8f5f3cff17cb 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/TransGCAttrs.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/TransGCAttrs.cpp
@@ -231,8 +231,7 @@ static void checkAllAtProps(MigrationContext &MigrateCtx,
SmallVector<std::pair<AttributedTypeLoc, ObjCPropertyDecl *>, 4> ATLs;
bool hasWeak = false, hasStrong = false;
- ObjCPropertyDecl::PropertyAttributeKind
- Attrs = ObjCPropertyDecl::OBJC_PR_noattr;
+ ObjCPropertyAttribute::Kind Attrs = ObjCPropertyAttribute::kind_noattr;
for (IndivPropsTy::iterator
PI = IndProps.begin(), PE = IndProps.end(); PI != PE; ++PI) {
ObjCPropertyDecl *PD = *PI;
@@ -274,7 +273,7 @@ static void checkAllAtProps(MigrationContext &MigrateCtx,
else
toAttr = "unsafe_unretained";
}
- if (Attrs & ObjCPropertyDecl::OBJC_PR_assign)
+ if (Attrs & ObjCPropertyAttribute::kind_assign)
MigrateCtx.rewritePropertyAttribute("assign", toAttr, AtLoc);
else
MigrateCtx.addPropertyAttribute(toAttr, AtLoc);
@@ -302,8 +301,8 @@ static void checkAllProps(MigrationContext &MigrateCtx,
for (unsigned i = 0, e = AllProps.size(); i != e; ++i) {
ObjCPropertyDecl *PD = AllProps[i];
if (PD->getPropertyAttributesAsWritten() &
- (ObjCPropertyDecl::OBJC_PR_assign |
- ObjCPropertyDecl::OBJC_PR_readonly)) {
+ (ObjCPropertyAttribute::kind_assign |
+ ObjCPropertyAttribute::kind_readonly)) {
SourceLocation AtLoc = PD->getAtLoc();
if (AtLoc.isInvalid())
continue;
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/TransProperties.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/TransProperties.cpp
index 0675fb0baeb8..cba2256ef97b 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/TransProperties.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/TransProperties.cpp
@@ -168,22 +168,22 @@ private:
}
void rewriteProperty(PropsTy &props, SourceLocation atLoc) {
- ObjCPropertyDecl::PropertyAttributeKind propAttrs = getPropertyAttrs(props);
+ ObjCPropertyAttribute::Kind propAttrs = getPropertyAttrs(props);
- if (propAttrs & (ObjCPropertyDecl::OBJC_PR_copy |
- ObjCPropertyDecl::OBJC_PR_unsafe_unretained |
- ObjCPropertyDecl::OBJC_PR_strong |
- ObjCPropertyDecl::OBJC_PR_weak))
+ if (propAttrs &
+ (ObjCPropertyAttribute::kind_copy |
+ ObjCPropertyAttribute::kind_unsafe_unretained |
+ ObjCPropertyAttribute::kind_strong | ObjCPropertyAttribute::kind_weak))
return;
- if (propAttrs & ObjCPropertyDecl::OBJC_PR_retain) {
+ if (propAttrs & ObjCPropertyAttribute::kind_retain) {
// strong is the default.
return doPropAction(PropAction_RetainReplacedWithStrong, props, atLoc);
}
bool HasIvarAssignedAPlusOneObject = hasIvarAssignedAPlusOneObject(props);
- if (propAttrs & ObjCPropertyDecl::OBJC_PR_assign) {
+ if (propAttrs & ObjCPropertyAttribute::kind_assign) {
if (HasIvarAssignedAPlusOneObject)
return doPropAction(PropAction_AssignRemoved, props, atLoc);
return doPropAction(PropAction_AssignRewritten, props, atLoc);
@@ -287,7 +287,10 @@ private:
public:
PlusOneAssign(ObjCIvarDecl *D) : Ivar(D) {}
- bool VisitBinAssign(BinaryOperator *E) {
+ bool VisitBinaryOperator(BinaryOperator *E) {
+ if (E->getOpcode() != BO_Assign)
+ return true;
+
Expr *lhs = E->getLHS()->IgnoreParenImpCasts();
if (ObjCIvarRefExpr *RE = dyn_cast<ObjCIvarRefExpr>(lhs)) {
if (RE->getDecl() != Ivar)
@@ -354,11 +357,10 @@ private:
return ty;
}
- ObjCPropertyDecl::PropertyAttributeKind
- getPropertyAttrs(PropsTy &props) const {
+ ObjCPropertyAttribute::Kind getPropertyAttrs(PropsTy &props) const {
assert(!props.empty());
- ObjCPropertyDecl::PropertyAttributeKind
- attrs = props[0].PropD->getPropertyAttributesAsWritten();
+ ObjCPropertyAttribute::Kind attrs =
+ props[0].PropD->getPropertyAttributesAsWritten();
#ifndef NDEBUG
for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I)
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/TransProtectedScope.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/TransProtectedScope.cpp
index 9e9e9cb7a96d..154e0b54800f 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/TransProtectedScope.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/TransProtectedScope.cpp
@@ -11,9 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "Transforms.h"
#include "Internals.h"
+#include "Transforms.h"
#include "clang/AST/ASTContext.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Sema/SemaDiagnostic.h"
using namespace clang;
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
index d28bd378acc1..81e67628fb1f 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
@@ -118,13 +118,11 @@ public:
ObjCPropertyDecl *PD = PID->getPropertyDecl();
ObjCMethodDecl *setterM = PD->getSetterMethodDecl();
if (!(setterM && setterM->isDefined())) {
- ObjCPropertyDecl::PropertyAttributeKind AttrKind =
- PD->getPropertyAttributes();
- if (AttrKind &
- (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_copy |
- ObjCPropertyDecl::OBJC_PR_strong))
- SynthesizedProperties[PD] = PID;
+ ObjCPropertyAttribute::Kind AttrKind = PD->getPropertyAttributes();
+ if (AttrKind & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_copy |
+ ObjCPropertyAttribute::kind_strong))
+ SynthesizedProperties[PD] = PID;
}
}
}
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp
index 59b80a917e56..e274a540e408 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp
@@ -8,6 +8,7 @@
#include "Transforms.h"
#include "Internals.h"
+#include "clang/ARCMigrate/ARCMT.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
diff --git a/contrib/llvm-project/clang/lib/AST/APValue.cpp b/contrib/llvm-project/clang/lib/AST/APValue.cpp
index 50f8d05dacb4..f3828bb54c1d 100644
--- a/contrib/llvm-project/clang/lib/AST/APValue.cpp
+++ b/contrib/llvm-project/clang/lib/AST/APValue.cpp
@@ -378,11 +378,6 @@ void APValue::swap(APValue &RHS) {
memcpy(RHS.Data.buffer, TmpData, DataSize);
}
-LLVM_DUMP_METHOD void APValue::dump() const {
- dump(llvm::errs());
- llvm::errs() << '\n';
-}
-
static double GetApproxValue(const llvm::APFloat &F) {
llvm::APFloat V = F;
bool ignored;
@@ -391,85 +386,6 @@ static double GetApproxValue(const llvm::APFloat &F) {
return V.convertToDouble();
}
-void APValue::dump(raw_ostream &OS) const {
- switch (getKind()) {
- case None:
- OS << "None";
- return;
- case Indeterminate:
- OS << "Indeterminate";
- return;
- case Int:
- OS << "Int: " << getInt();
- return;
- case Float:
- OS << "Float: " << GetApproxValue(getFloat());
- return;
- case FixedPoint:
- OS << "FixedPoint : " << getFixedPoint();
- return;
- case Vector:
- OS << "Vector: ";
- getVectorElt(0).dump(OS);
- for (unsigned i = 1; i != getVectorLength(); ++i) {
- OS << ", ";
- getVectorElt(i).dump(OS);
- }
- return;
- case ComplexInt:
- OS << "ComplexInt: " << getComplexIntReal() << ", " << getComplexIntImag();
- return;
- case ComplexFloat:
- OS << "ComplexFloat: " << GetApproxValue(getComplexFloatReal())
- << ", " << GetApproxValue(getComplexFloatImag());
- return;
- case LValue:
- OS << "LValue: <todo>";
- return;
- case Array:
- OS << "Array: ";
- for (unsigned I = 0, N = getArrayInitializedElts(); I != N; ++I) {
- getArrayInitializedElt(I).dump(OS);
- if (I != getArraySize() - 1) OS << ", ";
- }
- if (hasArrayFiller()) {
- OS << getArraySize() - getArrayInitializedElts() << " x ";
- getArrayFiller().dump(OS);
- }
- return;
- case Struct:
- OS << "Struct ";
- if (unsigned N = getStructNumBases()) {
- OS << " bases: ";
- getStructBase(0).dump(OS);
- for (unsigned I = 1; I != N; ++I) {
- OS << ", ";
- getStructBase(I).dump(OS);
- }
- }
- if (unsigned N = getStructNumFields()) {
- OS << " fields: ";
- getStructField(0).dump(OS);
- for (unsigned I = 1; I != N; ++I) {
- OS << ", ";
- getStructField(I).dump(OS);
- }
- }
- return;
- case Union:
- OS << "Union: ";
- getUnionValue().dump(OS);
- return;
- case MemberPointer:
- OS << "MemberPointer: <todo>";
- return;
- case AddrLabelDiff:
- OS << "AddrLabelDiff: <todo>";
- return;
- }
- llvm_unreachable("Unknown APValue kind!");
-}
-
void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx,
QualType Ty) const {
switch (getKind()) {
diff --git a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
index 1be72efe4de8..2ba643f12a82 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
@@ -29,15 +29,17 @@
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/ParentMapContext.h"
#include "clang/AST/RawCommentList.h"
#include "clang/AST/RecordLayout.h"
-#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
@@ -54,6 +56,7 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Linkage.h"
+#include "clang/Basic/Module.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/SanitizerBlacklist.h"
#include "clang/Basic/SourceLocation.h"
@@ -97,34 +100,8 @@
using namespace clang;
enum FloatingRank {
- Float16Rank, HalfRank, FloatRank, DoubleRank, LongDoubleRank, Float128Rank
+ BFloat16Rank, Float16Rank, HalfRank, FloatRank, DoubleRank, LongDoubleRank, Float128Rank
};
-const Expr *ASTContext::traverseIgnored(const Expr *E) const {
- return traverseIgnored(const_cast<Expr *>(E));
-}
-
-Expr *ASTContext::traverseIgnored(Expr *E) const {
- if (!E)
- return nullptr;
-
- switch (Traversal) {
- case ast_type_traits::TK_AsIs:
- return E;
- case ast_type_traits::TK_IgnoreImplicitCastsAndParentheses:
- return E->IgnoreParenImpCasts();
- case ast_type_traits::TK_IgnoreUnlessSpelledInSource:
- return E->IgnoreUnlessSpelledInSource();
- }
- llvm_unreachable("Invalid Traversal type!");
-}
-
-ast_type_traits::DynTypedNode
-ASTContext::traverseIgnored(const ast_type_traits::DynTypedNode &N) const {
- if (const auto *E = N.get<Expr>()) {
- return ast_type_traits::DynTypedNode::create(*traverseIgnored(E));
- }
- return N;
-}
/// \returns location that is relevant when searching for Doc comments related
/// to \p D.
@@ -321,6 +298,12 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile);
}
+void ASTContext::addComment(const RawComment &RC) {
+ assert(LangOpts.RetainCommentsFromSystemHeaders ||
+ !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()));
+ Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc);
+}
+
/// If we have a 'templated' declaration for a template, adjust 'D' to
/// refer to the actual template.
/// If we have an implicit instantiation, adjust 'D' to refer to template.
@@ -493,10 +476,20 @@ void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls,
if (Comments.empty() || Decls.empty())
return;
- // See if there are any new comments that are not attached to a decl.
- // The location doesn't have to be precise - we care only about the file.
- const FileID File =
- SourceMgr.getDecomposedLoc((*Decls.begin())->getLocation()).first;
+ FileID File;
+ for (Decl *D : Decls) {
+ SourceLocation Loc = D->getLocation();
+ if (Loc.isValid()) {
+ // See if there are any new comments that are not attached to a decl.
+ // The location doesn't have to be precise - we care only about the file.
+ File = SourceMgr.getDecomposedLoc(Loc).first;
+ break;
+ }
+ }
+
+ if (File.isInvalid())
+ return;
+
auto CommentsInThisFile = Comments.getCommentsInFile(File);
if (!CommentsInThisFile || CommentsInThisFile->empty() ||
CommentsInThisFile->rbegin()->second->isAttached())
@@ -661,7 +654,7 @@ comments::FullComment *ASTContext::getCommentForDecl(
return FC;
}
-void
+void
ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &C,
TemplateTemplateParmDecl *Parm) {
@@ -899,6 +892,7 @@ CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
case TargetCXXABI::GenericMIPS:
case TargetCXXABI::GenericItanium:
case TargetCXXABI::WebAssembly:
+ case TargetCXXABI::XL:
return CreateItaniumCXXABI(*this);
case TargetCXXABI::Microsoft:
return CreateMicrosoftCXXABI(*this);
@@ -913,6 +907,12 @@ interp::Context &ASTContext::getInterpContext() {
return *InterpContext.get();
}
+ParentMapContext &ASTContext::getParentMapContext() {
+ if (!ParentMapCtx)
+ ParentMapCtx.reset(new ParentMapContext(*this));
+ return *ParentMapCtx.get();
+}
+
static const LangASMap *getAddressSpaceMap(const TargetInfo &T,
const LangOptions &LOpts) {
if (LOpts.FakeAddressSpaceMap) {
@@ -1008,80 +1008,9 @@ ASTContext::~ASTContext() {
Value->~APValue();
}
-class ASTContext::ParentMap {
- /// Contains parents of a node.
- using ParentVector = llvm::SmallVector<ast_type_traits::DynTypedNode, 2>;
-
- /// Maps from a node to its parents. This is used for nodes that have
- /// pointer identity only, which are more common and we can save space by
- /// only storing a unique pointer to them.
- using ParentMapPointers = llvm::DenseMap<
- const void *,
- llvm::PointerUnion<const Decl *, const Stmt *,
- ast_type_traits::DynTypedNode *, ParentVector *>>;
-
- /// Parent map for nodes without pointer identity. We store a full
- /// DynTypedNode for all keys.
- using ParentMapOtherNodes = llvm::DenseMap<
- ast_type_traits::DynTypedNode,
- llvm::PointerUnion<const Decl *, const Stmt *,
- ast_type_traits::DynTypedNode *, ParentVector *>>;
-
- ParentMapPointers PointerParents;
- ParentMapOtherNodes OtherParents;
- class ASTVisitor;
-
- static ast_type_traits::DynTypedNode
- getSingleDynTypedNodeFromParentMap(ParentMapPointers::mapped_type U) {
- if (const auto *D = U.dyn_cast<const Decl *>())
- return ast_type_traits::DynTypedNode::create(*D);
- if (const auto *S = U.dyn_cast<const Stmt *>())
- return ast_type_traits::DynTypedNode::create(*S);
- return *U.get<ast_type_traits::DynTypedNode *>();
- }
-
- template <typename NodeTy, typename MapTy>
- static ASTContext::DynTypedNodeList getDynNodeFromMap(const NodeTy &Node,
- const MapTy &Map) {
- auto I = Map.find(Node);
- if (I == Map.end()) {
- return llvm::ArrayRef<ast_type_traits::DynTypedNode>();
- }
- if (const auto *V = I->second.template dyn_cast<ParentVector *>()) {
- return llvm::makeArrayRef(*V);
- }
- return getSingleDynTypedNodeFromParentMap(I->second);
- }
-
-public:
- ParentMap(ASTContext &Ctx);
- ~ParentMap() {
- for (const auto &Entry : PointerParents) {
- if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
- delete Entry.second.get<ast_type_traits::DynTypedNode *>();
- } else if (Entry.second.is<ParentVector *>()) {
- delete Entry.second.get<ParentVector *>();
- }
- }
- for (const auto &Entry : OtherParents) {
- if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
- delete Entry.second.get<ast_type_traits::DynTypedNode *>();
- } else if (Entry.second.is<ParentVector *>()) {
- delete Entry.second.get<ParentVector *>();
- }
- }
- }
-
- DynTypedNodeList getParents(const ast_type_traits::DynTypedNode &Node) {
- if (Node.getNodeKind().hasPointerIdentity())
- return getDynNodeFromMap(Node.getMemoizationData(), PointerParents);
- return getDynNodeFromMap(Node, OtherParents);
- }
-};
-
void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
TraversalScope = TopLevelDecls;
- Parents.clear();
+ getParentMapContext().clear();
}
void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
@@ -1176,6 +1105,15 @@ void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) {
Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end());
}
+ArrayRef<Module *>
+ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) {
+ auto MergedIt =
+ MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl()));
+ if (MergedIt == MergedDefModules.end())
+ return None;
+ return MergedIt->second;
+}
+
void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
if (LazyInitializers.empty())
return;
@@ -1445,8 +1383,13 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
// Placeholder type for OMP array sections.
- if (LangOpts.OpenMP)
+ if (LangOpts.OpenMP) {
InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
+ InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping);
+ InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator);
+ }
+ if (LangOpts.MatrixTypes)
+ InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx);
// C99 6.2.5p11.
FloatComplexTy = getComplexType(FloatTy);
@@ -1505,8 +1448,16 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
// half type (OpenCL 6.1.1.1) / ARM NEON __fp16
InitBuiltinType(HalfTy, BuiltinType::Half);
+ InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16);
+
// Builtin type used to help define __builtin_va_list.
VaListTagDecl = nullptr;
+
+ // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
+ if (LangOpts.MicrosoftExt || LangOpts.Borland) {
+ MSGuidTagDecl = buildImplicitRecord("_GUID");
+ TUDecl->addDecl(MSGuidTagDecl);
+ }
}
DiagnosticsEngine &ASTContext::getDiagnostics() const {
@@ -1679,7 +1630,8 @@ void ASTContext::getOverriddenMethods(
}
void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
- assert(!Import->NextLocalImport && "Import declaration already in the chain");
+ assert(!Import->getNextLocalImport() &&
+ "Import declaration already in the chain");
assert(!Import->isFromASTFile() && "Non-local import declaration");
if (!FirstLocalImport) {
FirstLocalImport = Import;
@@ -1687,7 +1639,7 @@ void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
return;
}
- LastLocalImport->NextLocalImport = Import;
+ LastLocalImport->setNextLocalImport(Import);
LastLocalImport = Import;
}
@@ -1701,6 +1653,8 @@ const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
switch (T->castAs<BuiltinType>()->getKind()) {
default:
llvm_unreachable("Not a floating point type!");
+ case BuiltinType::BFloat16:
+ return Target->getBFloat16Format();
case BuiltinType::Float16:
case BuiltinType::Half:
return Target->getHalfFormat();
@@ -1813,6 +1767,10 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
return toCharUnitsFromBits(Align);
}
+CharUnits ASTContext::getExnObjectAlignment() const {
+ return toCharUnitsFromBits(Target->getExnObjectAlignment());
+}
+
// getTypeInfoDataSizeInChars - Return the size of a type, in
// chars. If the type is a record, its data size is returned. This is
// the size of the memcpy that's performed when assigning this type
@@ -1943,24 +1901,24 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::IncompleteArray:
case Type::VariableArray:
- Width = 0;
- Align = getTypeAlign(cast<ArrayType>(T)->getElementType());
- break;
-
case Type::ConstantArray: {
- const auto *CAT = cast<ConstantArrayType>(T);
+ // Model non-constant sized arrays as size zero, but track the alignment.
+ uint64_t Size = 0;
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
+ Size = CAT->getSize().getZExtValue();
- TypeInfo EltInfo = getTypeInfo(CAT->getElementType());
- uint64_t Size = CAT->getSize().getZExtValue();
+ TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType());
assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
"Overflow in array type bit size evaluation");
Width = EltInfo.Width * Size;
Align = EltInfo.Align;
+ AlignIsRequired = EltInfo.AlignIsRequired;
if (!getTargetInfo().getCXXABI().isMicrosoft() ||
getTargetInfo().getPointerWidth(0) == 64)
Width = llvm::alignTo(Width, Align);
break;
}
+
case Type::ExtVector:
case Type::Vector: {
const auto *VT = cast<VectorType>(T);
@@ -1980,6 +1938,17 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
break;
}
+ case Type::ConstantMatrix: {
+ const auto *MT = cast<ConstantMatrixType>(T);
+ TypeInfo ElementInfo = getTypeInfo(MT->getElementType());
+ // The internal layout of a matrix value is implementation defined.
+ // Initially be ABI compatible with arrays with respect to alignment and
+ // size.
+ Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns();
+ Align = ElementInfo.Align;
+ break;
+ }
+
case Type::Builtin:
switch (cast<BuiltinType>(T)->getKind()) {
default: llvm_unreachable("Unknown builtin type!");
@@ -2080,6 +2049,10 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Width = Target->getLongFractWidth();
Align = Target->getLongFractAlign();
break;
+ case BuiltinType::BFloat16:
+ Width = Target->getBFloat16Width();
+ Align = Target->getBFloat16Align();
+ break;
case BuiltinType::Float16:
case BuiltinType::Half:
if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
@@ -2158,16 +2131,17 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
// Because the length is only known at runtime, we use a dummy value
// of 0 for the static length. The alignment values are those defined
// by the Procedure Call Standard for the Arm Architecture.
-#define SVE_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, IsSigned, IsFP)\
- case BuiltinType::Id: \
- Width = 0; \
- Align = 128; \
- break;
-#define SVE_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
- case BuiltinType::Id: \
- Width = 0; \
- Align = 16; \
- break;
+#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
+ IsSigned, IsFP, IsBF) \
+ case BuiltinType::Id: \
+ Width = 0; \
+ Align = 128; \
+ break;
+#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
+ case BuiltinType::Id: \
+ Width = 0; \
+ Align = 16; \
+ break;
#include "clang/Basic/AArch64SVEACLETypes.def"
}
break;
@@ -2215,11 +2189,25 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
case Type::ObjCInterface: {
const auto *ObjCI = cast<ObjCInterfaceType>(T);
+ if (ObjCI->getDecl()->isInvalidDecl()) {
+ Width = 8;
+ Align = 8;
+ break;
+ }
const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
Width = toBits(Layout.getSize());
Align = toBits(Layout.getAlignment());
break;
}
+ case Type::ExtInt: {
+ const auto *EIT = cast<ExtIntType>(T);
+ Align =
+ std::min(static_cast<unsigned>(std::max(
+ getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))),
+ Target->getLongLongAlign());
+ Width = llvm::alignTo(EIT->getNumBits(), Align);
+ break;
+ }
case Type::Record:
case Type::Enum: {
const auto *TT = cast<TagType>(T);
@@ -3396,6 +3384,8 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
case Type::DependentVector:
case Type::ExtVector:
case Type::DependentSizedExtVector:
+ case Type::ConstantMatrix:
+ case Type::DependentSizedMatrix:
case Type::DependentAddressSpace:
case Type::ObjCObject:
case Type::ObjCInterface:
@@ -3416,6 +3406,8 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
case Type::Auto:
case Type::DeducedTemplateSpecialization:
case Type::PackExpansion:
+ case Type::ExtInt:
+ case Type::DependentExtInt:
llvm_unreachable("type should never be variably-modified");
// These types can be variably-modified but should never need to
@@ -3642,6 +3634,33 @@ QualType ASTContext::getIncompleteArrayType(QualType elementType,
return QualType(newType, 0);
}
+/// getScalableVectorType - Return the unique reference to a scalable vector
+/// type of the specified element type and size. VectorType must be a built-in
+/// type.
+QualType ASTContext::getScalableVectorType(QualType EltTy,
+ unsigned NumElts) const {
+ if (Target->hasAArch64SVETypes()) {
+ uint64_t EltTySize = getTypeSize(EltTy);
+#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
+ IsSigned, IsFP, IsBF) \
+ if (!EltTy->isBooleanType() && \
+ ((EltTy->hasIntegerRepresentation() && \
+ EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
+ (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
+ IsFP && !IsBF) || \
+ (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
+ IsBF && !IsFP)) && \
+ EltTySize == ElBits && NumElts == NumEls) { \
+ return SingletonId; \
+ }
+#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
+ if (EltTy->isBooleanType() && NumElts == NumEls) \
+ return SingletonId;
+#include "clang/Basic/AArch64SVEACLETypes.def"
+ }
+ return QualType();
+}
+
/// getVectorType - Return the unique reference to a vector type of
/// the specified element type and size. VectorType must be a built-in type.
QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
@@ -3701,10 +3720,10 @@ ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr,
(void)CanonCheck;
DependentVectorTypes.InsertNode(New, InsertPos);
} else {
- QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
- SourceLocation());
+ QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr,
+ SourceLocation(), VecKind);
New = new (*this, TypeAlignment) DependentVectorType(
- *this, VecType, CanonExtTy, SizeExpr, AttrLoc, VecKind);
+ *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
}
}
@@ -3785,6 +3804,78 @@ ASTContext::getDependentSizedExtVectorType(QualType vecType,
return QualType(New, 0);
}
+QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows,
+ unsigned NumColumns) const {
+ llvm::FoldingSetNodeID ID;
+ ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns,
+ Type::ConstantMatrix);
+
+ assert(MatrixType::isValidElementType(ElementTy) &&
+ "need a valid element type");
+ assert(ConstantMatrixType::isDimensionValid(NumRows) &&
+ ConstantMatrixType::isDimensionValid(NumColumns) &&
+ "need valid matrix dimensions");
+ void *InsertPos = nullptr;
+ if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(MTP, 0);
+
+ QualType Canonical;
+ if (!ElementTy.isCanonical()) {
+ Canonical =
+ getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns);
+
+ ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!NewIP && "Matrix type shouldn't already exist in the map");
+ (void)NewIP;
+ }
+
+ auto *New = new (*this, TypeAlignment)
+ ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical);
+ MatrixTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy,
+ Expr *RowExpr,
+ Expr *ColumnExpr,
+ SourceLocation AttrLoc) const {
+ QualType CanonElementTy = getCanonicalType(ElementTy);
+ llvm::FoldingSetNodeID ID;
+ DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr,
+ ColumnExpr);
+
+ void *InsertPos = nullptr;
+ DependentSizedMatrixType *Canon =
+ DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!Canon) {
+ Canon = new (*this, TypeAlignment) DependentSizedMatrixType(
+ *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc);
+#ifndef NDEBUG
+ DependentSizedMatrixType *CanonCheck =
+ DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CanonCheck && "Dependent-sized matrix canonical type broken");
+#endif
+ DependentSizedMatrixTypes.InsertNode(Canon, InsertPos);
+ Types.push_back(Canon);
+ }
+
+ // Already have a canonical version of the matrix type
+ //
+ // If it exactly matches the requested type, use it directly.
+ if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr &&
+ Canon->getRowExpr() == ColumnExpr)
+ return QualType(Canon, 0);
+
+ // Use Canon as the canonical type for newly-built type.
+ DependentSizedMatrixType *New = new (*this, TypeAlignment)
+ DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr,
+ ColumnExpr, AttrLoc);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType,
Expr *AddrSpaceExpr,
SourceLocation AttrLoc) const {
@@ -4088,6 +4179,39 @@ QualType ASTContext::getWritePipeType(QualType T) const {
return getPipeType(T, false);
}
+QualType ASTContext::getExtIntType(bool IsUnsigned, unsigned NumBits) const {
+ llvm::FoldingSetNodeID ID;
+ ExtIntType::Profile(ID, IsUnsigned, NumBits);
+
+ void *InsertPos = nullptr;
+ if (ExtIntType *EIT = ExtIntTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(EIT, 0);
+
+ auto *New = new (*this, TypeAlignment) ExtIntType(IsUnsigned, NumBits);
+ ExtIntTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+QualType ASTContext::getDependentExtIntType(bool IsUnsigned,
+ Expr *NumBitsExpr) const {
+ assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent");
+ llvm::FoldingSetNodeID ID;
+ DependentExtIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr);
+
+ void *InsertPos = nullptr;
+ if (DependentExtIntType *Existing =
+ DependentExtIntTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(Existing, 0);
+
+ auto *New = new (*this, TypeAlignment)
+ DependentExtIntType(*this, IsUnsigned, NumBitsExpr);
+ DependentExtIntTypes.InsertNode(New, InsertPos);
+
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
#ifndef NDEBUG
static bool NeedsInjectedClassNameType(const RecordDecl *D) {
if (!isa<CXXRecordDecl>(D)) return false;
@@ -4600,7 +4724,7 @@ TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) {
} else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
Expr *E = new (*this) DeclRefExpr(
*this, NTTP, /*enclosing*/ false,
- NTTP->getType().getNonLValueExprType(*this),
+ NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this),
Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
if (NTTP->isParameterPack())
@@ -4872,7 +4996,7 @@ ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
ArrayRef<ObjCProtocolDecl *> protocols) const {
// Look in the folding set for an existing type.
llvm::FoldingSetNodeID ID;
- ObjCTypeParamType::Profile(ID, Decl, protocols);
+ ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols);
void *InsertPos = nullptr;
if (ObjCTypeParamType *TypeParam =
ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
@@ -4898,6 +5022,17 @@ ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
return QualType(newType, 0);
}
+void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig,
+ ObjCTypeParamDecl *New) const {
+ New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType()));
+ // Update TypeForDecl after updating TypeSourceInfo.
+ auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl());
+ SmallVector<ObjCProtocolDecl *, 8> protocols;
+ protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end());
+ QualType UpdatedTy = getObjCTypeParamType(New, protocols);
+ New->setTypeForDecl(UpdatedTy.getTypePtr());
+}
+
/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
/// protocol list adopt all protocols in QT's qualified-id protocol
/// list.
@@ -5158,8 +5293,12 @@ ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
void *Mem = Allocate(sizeof(AutoType) +
sizeof(TemplateArgument) * TypeConstraintArgs.size(),
TypeAlignment);
- auto *AT = new (Mem) AutoType(DeducedType, Keyword, IsDependent, IsPack,
- TypeConstraintConcept, TypeConstraintArgs);
+ auto *AT = new (Mem) AutoType(
+ DeducedType, Keyword,
+ (IsDependent ? TypeDependence::DependentInstantiation
+ : TypeDependence::None) |
+ (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None),
+ TypeConstraintConcept, TypeConstraintArgs);
Types.push_back(AT);
if (InsertPos)
AutoTypes.InsertNode(AT, InsertPos);
@@ -5219,11 +5358,11 @@ QualType ASTContext::getAtomicType(QualType T) const {
/// getAutoDeductType - Get type pattern for deducing against 'auto'.
QualType ASTContext::getAutoDeductType() const {
if (AutoDeductTy.isNull())
- AutoDeductTy = QualType(
- new (*this, TypeAlignment) AutoType(QualType(), AutoTypeKeyword::Auto,
- /*dependent*/false, /*pack*/false,
- /*concept*/nullptr, /*args*/{}),
- 0);
+ AutoDeductTy = QualType(new (*this, TypeAlignment)
+ AutoType(QualType(), AutoTypeKeyword::Auto,
+ TypeDependence::None,
+ /*concept*/ nullptr, /*args*/ {}),
+ 0);
return AutoDeductTy;
}
@@ -5859,6 +5998,7 @@ static FloatingRank getFloatingRank(QualType T) {
case BuiltinType::Double: return DoubleRank;
case BuiltinType::LongDouble: return LongDoubleRank;
case BuiltinType::Float128: return Float128Rank;
+ case BuiltinType::BFloat16: return BFloat16Rank;
}
}
@@ -5871,6 +6011,7 @@ QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size,
FloatingRank EltRank = getFloatingRank(Size);
if (Domain->isComplexType()) {
switch (EltRank) {
+ case BFloat16Rank: llvm_unreachable("Complex bfloat16 is not supported");
case Float16Rank:
case HalfRank: llvm_unreachable("Complex half is not supported");
case FloatRank: return FloatComplexTy;
@@ -5883,6 +6024,7 @@ QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size,
assert(Domain->isRealFloatingType() && "Unknown domain!");
switch (EltRank) {
case Float16Rank: return HalfTy;
+ case BFloat16Rank: return BFloat16Ty;
case HalfRank: return HalfTy;
case FloatRank: return FloatTy;
case DoubleRank: return DoubleTy;
@@ -5919,6 +6061,11 @@ int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const {
unsigned ASTContext::getIntegerRank(const Type *T) const {
assert(T->isCanonicalUnqualified() && "T should be canonicalized");
+ // Results in this 'losing' to any type of the same size, but winning if
+ // larger.
+ if (const auto *EIT = dyn_cast<ExtIntType>(T))
+ return 0 + (EIT->getNumBits() << 3);
+
switch (cast<BuiltinType>(T)->getKind()) {
default: llvm_unreachable("getIntegerRank(): not a built-in integer");
case BuiltinType::Bool:
@@ -6309,39 +6456,39 @@ QualType ASTContext::getBlockDescriptorExtendedType() const {
return getTagDeclType(BlockDescriptorExtendedType);
}
-TargetInfo::OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const {
+OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const {
const auto *BT = dyn_cast<BuiltinType>(T);
if (!BT) {
if (isa<PipeType>(T))
- return TargetInfo::OCLTK_Pipe;
+ return OCLTK_Pipe;
- return TargetInfo::OCLTK_Default;
+ return OCLTK_Default;
}
switch (BT->getKind()) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id: \
- return TargetInfo::OCLTK_Image;
+ return OCLTK_Image;
#include "clang/Basic/OpenCLImageTypes.def"
case BuiltinType::OCLClkEvent:
- return TargetInfo::OCLTK_ClkEvent;
+ return OCLTK_ClkEvent;
case BuiltinType::OCLEvent:
- return TargetInfo::OCLTK_Event;
+ return OCLTK_Event;
case BuiltinType::OCLQueue:
- return TargetInfo::OCLTK_Queue;
+ return OCLTK_Queue;
case BuiltinType::OCLReserveID:
- return TargetInfo::OCLTK_ReserveID;
+ return OCLTK_ReserveID;
case BuiltinType::OCLSampler:
- return TargetInfo::OCLTK_Sampler;
+ return OCLTK_Sampler;
default:
- return TargetInfo::OCLTK_Default;
+ return OCLTK_Default;
}
}
@@ -6414,6 +6561,24 @@ bool ASTContext::getByrefLifetime(QualType Ty,
return true;
}
+CanQualType ASTContext::getNSUIntegerType() const {
+ assert(Target && "Expected target to be initialized");
+ const llvm::Triple &T = Target->getTriple();
+ // Windows is LLP64 rather than LP64
+ if (T.isOSWindows() && T.isArch64Bit())
+ return UnsignedLongLongTy;
+ return UnsignedLongTy;
+}
+
+CanQualType ASTContext::getNSIntegerType() const {
+ assert(Target && "Expected target to be initialized");
+ const llvm::Triple &T = Target->getTriple();
+ // Windows is LLP64 rather than LP64
+ if (T.isOSWindows() && T.isArch64Bit())
+ return LongLongTy;
+ return LongTy;
+}
+
TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
if (!ObjCInstanceTypeDecl)
ObjCInstanceTypeDecl =
@@ -6717,11 +6882,11 @@ ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
if (PD->isReadOnly()) {
S += ",R";
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_copy)
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy)
S += ",C";
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain)
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain)
S += ",&";
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak)
S += ",W";
} else {
switch (PD->getSetterKind()) {
@@ -6737,15 +6902,15 @@ ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
if (Dynamic)
S += ",D";
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic)
S += ",N";
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) {
S += ",G";
S += PD->getGetterName().getAsString();
}
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) {
S += ",S";
S += PD->getSetterName().getAsString();
}
@@ -6837,6 +7002,7 @@ static char getObjCEncodingForPrimitiveType(const ASTContext *C,
case BuiltinType::LongDouble: return 'D';
case BuiltinType::NullPtr: return '*'; // like char*
+ case BuiltinType::BFloat16:
case BuiltinType::Float16:
case BuiltinType::Float128:
case BuiltinType::Half:
@@ -7277,6 +7443,11 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
*NotEncodedT = T;
return;
+ case Type::ConstantMatrix:
+ if (NotEncodedT)
+ *NotEncodedT = T;
+ return;
+
// We could see an undeduced auto type here during error recovery.
// Just ignore it.
case Type::Auto:
@@ -7284,6 +7455,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
return;
case Type::Pipe:
+ case Type::ExtInt:
#define ABSTRACT_TYPE(KIND, BASE)
#define TYPE(KIND, BASE)
#define DEPENDENT_TYPE(KIND, BASE) \
@@ -7805,6 +7977,57 @@ CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
}
+static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) {
+ // typedef struct __va_list_tag {
+ RecordDecl *VaListTagDecl;
+ VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
+ VaListTagDecl->startDefinition();
+
+ const size_t NumFields = 3;
+ QualType FieldTypes[NumFields];
+ const char *FieldNames[NumFields];
+
+ // void *CurrentSavedRegisterArea;
+ FieldTypes[0] = Context->getPointerType(Context->VoidTy);
+ FieldNames[0] = "__current_saved_reg_area_pointer";
+
+ // void *SavedRegAreaEnd;
+ FieldTypes[1] = Context->getPointerType(Context->VoidTy);
+ FieldNames[1] = "__saved_reg_area_end_pointer";
+
+ // void *OverflowArea;
+ FieldTypes[2] = Context->getPointerType(Context->VoidTy);
+ FieldNames[2] = "__overflow_area_pointer";
+
+ // Create fields
+ for (unsigned i = 0; i < NumFields; ++i) {
+ FieldDecl *Field = FieldDecl::Create(
+ const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(),
+ SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i],
+ /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false, ICIS_NoInit);
+ Field->setAccess(AS_public);
+ VaListTagDecl->addDecl(Field);
+ }
+ VaListTagDecl->completeDefinition();
+ Context->VaListTagDecl = VaListTagDecl;
+ QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+
+ // } __va_list_tag;
+ TypedefDecl *VaListTagTypedefDecl =
+ Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
+
+ QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl);
+
+ // typedef __va_list_tag __builtin_va_list[1];
+ llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
+ QualType VaListTagArrayType = Context->getConstantArrayType(
+ VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0);
+
+ return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
+}
+
static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
TargetInfo::BuiltinVaListKind Kind) {
switch (Kind) {
@@ -7824,6 +8047,8 @@ static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
return CreateAAPCSABIBuiltinVaListDecl(Context);
case TargetInfo::SystemZBuiltinVaList:
return CreateSystemZBuiltinVaListDecl(Context);
+ case TargetInfo::HexagonBuiltinVaList:
+ return CreateHexagonBuiltinVaListDecl(Context);
}
llvm_unreachable("Unhandled __builtin_va_list type kind");
@@ -8102,6 +8327,16 @@ static bool areCompatVectorTypes(const VectorType *LHS,
LHS->getNumElements() == RHS->getNumElements();
}
+/// areCompatMatrixTypes - Return true if the two specified matrix types are
+/// compatible.
+static bool areCompatMatrixTypes(const ConstantMatrixType *LHS,
+ const ConstantMatrixType *RHS) {
+ assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
+ return LHS->getElementType() == RHS->getElementType() &&
+ LHS->getNumRows() == RHS->getNumRows() &&
+ LHS->getNumColumns() == RHS->getNumColumns();
+}
+
bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
QualType SecondVec) {
assert(FirstVec->isVectorType() && "FirstVec should be a vector type");
@@ -8384,10 +8619,18 @@ bool ASTContext::canAssignObjCInterfacesInBlockPointer(
RHSOPT->isObjCQualifiedIdType());
}
- if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType())
- return finish(ObjCQualifiedIdTypesAreCompatible(
- (BlockReturnType ? LHSOPT : RHSOPT),
- (BlockReturnType ? RHSOPT : LHSOPT), false));
+ if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) {
+ if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking)
+ // Use for block parameters previous type checking for compatibility.
+ return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) ||
+ // Or corrected type checking as in non-compat mode.
+ (!BlockReturnType &&
+ ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false)));
+ else
+ return finish(ObjCQualifiedIdTypesAreCompatible(
+ (BlockReturnType ? LHSOPT : RHSOPT),
+ (BlockReturnType ? RHSOPT : LHSOPT), false));
+ }
const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
@@ -8738,8 +8981,8 @@ bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
bool ASTContext::canBindObjCObjectType(QualType To, QualType From) {
return canAssignObjCInterfaces(
- getObjCObjectPointerType(To)->getAs<ObjCObjectPointerType>(),
- getObjCObjectPointerType(From)->getAs<ObjCObjectPointerType>());
+ getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(),
+ getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>());
}
/// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible,
@@ -8805,8 +9048,8 @@ QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs,
}
QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
- bool OfBlockPointer,
- bool Unqualified) {
+ bool OfBlockPointer, bool Unqualified,
+ bool AllowCXX) {
const auto *lbase = lhs->castAs<FunctionType>();
const auto *rbase = rhs->castAs<FunctionType>();
const auto *lproto = dyn_cast<FunctionProtoType>(lbase);
@@ -8880,7 +9123,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn);
if (lproto && rproto) { // two C99 style function prototypes
- assert(!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec() &&
+ assert((AllowCXX ||
+ (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) &&
"C++ shouldn't be here");
// Compatible functions must have the same number of parameters
if (lproto->getNumParams() != rproto->getNumParams())
@@ -8944,7 +9188,7 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
const FunctionProtoType *proto = lproto ? lproto : rproto;
if (proto) {
- assert(!proto->hasExceptionSpec() && "C++ shouldn't be here");
+ assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here");
if (proto->isVariadic())
return {};
// Check that the types are compatible with the types that
@@ -9298,6 +9542,11 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
RHSCan->castAs<VectorType>()))
return LHS;
return {};
+ case Type::ConstantMatrix:
+ if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(),
+ RHSCan->castAs<ConstantMatrixType>()))
+ return LHS;
+ return {};
case Type::ObjCObject: {
// Check if the types are assignment compatible.
// FIXME: This should be type compatibility, e.g. whether
@@ -9323,6 +9572,21 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
assert(LHS != RHS &&
"Equivalent pipe types should have already been handled!");
return {};
+ case Type::ExtInt: {
+ // Merge two ext-int types, while trying to preserve typedef info.
+ bool LHSUnsigned = LHS->castAs<ExtIntType>()->isUnsigned();
+ bool RHSUnsigned = RHS->castAs<ExtIntType>()->isUnsigned();
+ unsigned LHSBits = LHS->castAs<ExtIntType>()->getNumBits();
+ unsigned RHSBits = RHS->castAs<ExtIntType>()->getNumBits();
+
+ // Like unsigned/int, shouldn't have a type if they dont match.
+ if (LHSUnsigned != RHSUnsigned)
+ return {};
+
+ if (LHSBits != RHSBits)
+ return {};
+ return LHS;
+ }
}
llvm_unreachable("Invalid Type::Class!");
@@ -9463,6 +9727,8 @@ unsigned ASTContext::getIntWidth(QualType T) const {
T = ET->getDecl()->getIntegerType();
if (T->isBooleanType())
return 1;
+ if(const auto *EIT = T->getAs<ExtIntType>())
+ return EIT->getNumBits();
// For builtin types, just use the standard type sizing method
return (unsigned)getTypeSize(T);
}
@@ -9644,6 +9910,11 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
// Read the base type.
switch (*Str++) {
default: llvm_unreachable("Unknown builtin type letter!");
+ case 'y':
+ assert(HowLong == 0 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'y'!");
+ Type = Context.BFloat16Ty;
+ break;
case 'v':
assert(HowLong == 0 && !Signed && !Unsigned &&
"Bad modifiers used with 'v'!");
@@ -9739,6 +10010,19 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
else
Type = Context.getLValueReferenceType(Type);
break;
+ case 'q': {
+ char *End;
+ unsigned NumElements = strtoul(Str, &End, 10);
+ assert(End != Str && "Missing vector size");
+ Str = End;
+
+ QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
+ RequiresICE, false);
+ assert(!RequiresICE && "Can't require vector ICE");
+
+ Type = Context.getScalableVectorType(ElementType, NumElements);
+ break;
+ }
case 'V': {
char *End;
unsigned NumElements = strtoul(Str, &End, 10);
@@ -10131,6 +10415,8 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
return true;
else if (isa<PragmaDetectMismatchDecl>(D))
return true;
+ else if (isa<OMPRequiresDecl>(D))
+ return true;
else if (isa<OMPThreadPrivateDecl>(D))
return !D->getDeclContext()->isDependentContext();
else if (isa<OMPAllocateDecl>(D))
@@ -10320,10 +10606,15 @@ bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
VTableContextBase *ASTContext::getVTableContext() {
if (!VTContext.get()) {
- if (Target->getCXXABI().isMicrosoft())
+ auto ABI = Target->getCXXABI();
+ if (ABI.isMicrosoft())
VTContext.reset(new MicrosoftVTableContext(*this));
- else
- VTContext.reset(new ItaniumVTableContext(*this));
+ else {
+ auto ComponentLayout = getLangOpts().RelativeCXXABIVTables
+ ? ItaniumVTableContext::Relative
+ : ItaniumVTableContext::Pointer;
+ VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout));
+ }
}
return VTContext.get();
}
@@ -10341,6 +10632,7 @@ MangleContext *ASTContext::createMangleContext(const TargetInfo *T) {
case TargetCXXABI::iOS64:
case TargetCXXABI::WebAssembly:
case TargetCXXABI::WatchOS:
+ case TargetCXXABI::XL:
return ItaniumMangleContext::create(*this, getDiagnostics());
case TargetCXXABI::Microsoft:
return MicrosoftMangleContext::create(*this, getDiagnostics());
@@ -10382,8 +10674,10 @@ QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth,
/// getRealTypeForBitwidth -
/// sets floating point QualTy according to specified bitwidth.
/// Returns empty type if there is no appropriate target types.
-QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth) const {
- TargetInfo::RealType Ty = getTargetInfo().getRealTypeByWidth(DestWidth);
+QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth,
+ bool ExplicitIEEE) const {
+ TargetInfo::RealType Ty =
+ getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitIEEE);
switch (Ty) {
case TargetInfo::Float:
return FloatTy;
@@ -10512,6 +10806,23 @@ ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const {
return Result;
}
+MSGuidDecl *
+ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const {
+ assert(MSGuidTagDecl && "building MS GUID without MS extensions?");
+
+ llvm::FoldingSetNodeID ID;
+ MSGuidDecl::Profile(ID, Parts);
+
+ void *InsertPos;
+ if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos))
+ return Existing;
+
+ QualType GUIDType = getMSGuidType().withConst();
+ MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts);
+ MSGuidDecls.InsertNode(New, InsertPos);
+ return New;
+}
+
bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
const llvm::Triple &T = getTargetInfo().getTriple();
if (!T.isOSDarwin())
@@ -10530,146 +10841,6 @@ bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits);
}
-/// Template specializations to abstract away from pointers and TypeLocs.
-/// @{
-template <typename T>
-static ast_type_traits::DynTypedNode createDynTypedNode(const T &Node) {
- return ast_type_traits::DynTypedNode::create(*Node);
-}
-template <>
-ast_type_traits::DynTypedNode createDynTypedNode(const TypeLoc &Node) {
- return ast_type_traits::DynTypedNode::create(Node);
-}
-template <>
-ast_type_traits::DynTypedNode
-createDynTypedNode(const NestedNameSpecifierLoc &Node) {
- return ast_type_traits::DynTypedNode::create(Node);
-}
-/// @}
-
-/// A \c RecursiveASTVisitor that builds a map from nodes to their
-/// parents as defined by the \c RecursiveASTVisitor.
-///
-/// Note that the relationship described here is purely in terms of AST
-/// traversal - there are other relationships (for example declaration context)
-/// in the AST that are better modeled by special matchers.
-///
-/// FIXME: Currently only builds up the map using \c Stmt and \c Decl nodes.
-class ASTContext::ParentMap::ASTVisitor
- : public RecursiveASTVisitor<ASTVisitor> {
-public:
- ASTVisitor(ParentMap &Map, ASTContext &Context)
- : Map(Map), Context(Context) {}
-
-private:
- friend class RecursiveASTVisitor<ASTVisitor>;
-
- using VisitorBase = RecursiveASTVisitor<ASTVisitor>;
-
- bool shouldVisitTemplateInstantiations() const { return true; }
-
- bool shouldVisitImplicitCode() const { return true; }
-
- template <typename T, typename MapNodeTy, typename BaseTraverseFn,
- typename MapTy>
- bool TraverseNode(T Node, MapNodeTy MapNode, BaseTraverseFn BaseTraverse,
- MapTy *Parents) {
- if (!Node)
- return true;
- if (ParentStack.size() > 0) {
- // FIXME: Currently we add the same parent multiple times, but only
- // when no memoization data is available for the type.
- // For example when we visit all subexpressions of template
- // instantiations; this is suboptimal, but benign: the only way to
- // visit those is with hasAncestor / hasParent, and those do not create
- // new matches.
- // The plan is to enable DynTypedNode to be storable in a map or hash
- // map. The main problem there is to implement hash functions /
- // comparison operators for all types that DynTypedNode supports that
- // do not have pointer identity.
- auto &NodeOrVector = (*Parents)[MapNode];
- if (NodeOrVector.isNull()) {
- if (const auto *D = ParentStack.back().get<Decl>())
- NodeOrVector = D;
- else if (const auto *S = ParentStack.back().get<Stmt>())
- NodeOrVector = S;
- else
- NodeOrVector = new ast_type_traits::DynTypedNode(ParentStack.back());
- } else {
- if (!NodeOrVector.template is<ParentVector *>()) {
- auto *Vector = new ParentVector(
- 1, getSingleDynTypedNodeFromParentMap(NodeOrVector));
- delete NodeOrVector
- .template dyn_cast<ast_type_traits::DynTypedNode *>();
- NodeOrVector = Vector;
- }
-
- auto *Vector = NodeOrVector.template get<ParentVector *>();
- // Skip duplicates for types that have memoization data.
- // We must check that the type has memoization data before calling
- // std::find() because DynTypedNode::operator== can't compare all
- // types.
- bool Found = ParentStack.back().getMemoizationData() &&
- std::find(Vector->begin(), Vector->end(),
- ParentStack.back()) != Vector->end();
- if (!Found)
- Vector->push_back(ParentStack.back());
- }
- }
- ParentStack.push_back(createDynTypedNode(Node));
- bool Result = BaseTraverse();
- ParentStack.pop_back();
- return Result;
- }
-
- bool TraverseDecl(Decl *DeclNode) {
- return TraverseNode(
- DeclNode, DeclNode, [&] { return VisitorBase::TraverseDecl(DeclNode); },
- &Map.PointerParents);
- }
-
- bool TraverseStmt(Stmt *StmtNode) {
- Stmt *FilteredNode = StmtNode;
- if (auto *ExprNode = dyn_cast_or_null<Expr>(FilteredNode))
- FilteredNode = Context.traverseIgnored(ExprNode);
- return TraverseNode(FilteredNode, FilteredNode,
- [&] { return VisitorBase::TraverseStmt(FilteredNode); },
- &Map.PointerParents);
- }
-
- bool TraverseTypeLoc(TypeLoc TypeLocNode) {
- return TraverseNode(
- TypeLocNode, ast_type_traits::DynTypedNode::create(TypeLocNode),
- [&] { return VisitorBase::TraverseTypeLoc(TypeLocNode); },
- &Map.OtherParents);
- }
-
- bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSLocNode) {
- return TraverseNode(
- NNSLocNode, ast_type_traits::DynTypedNode::create(NNSLocNode),
- [&] { return VisitorBase::TraverseNestedNameSpecifierLoc(NNSLocNode); },
- &Map.OtherParents);
- }
-
- ParentMap &Map;
- ASTContext &Context;
- llvm::SmallVector<ast_type_traits::DynTypedNode, 16> ParentStack;
-};
-
-ASTContext::ParentMap::ParentMap(ASTContext &Ctx) {
- ASTVisitor(*this, Ctx).TraverseAST(Ctx);
-}
-
-ASTContext::DynTypedNodeList
-ASTContext::getParents(const ast_type_traits::DynTypedNode &Node) {
- std::unique_ptr<ParentMap> &P = Parents[Traversal];
- if (!P)
- // We build the parent map for the traversal scope (usually whole TU), as
- // hasAncestor can escape any subtree.
- P = std::make_unique<ParentMap>(*this);
- return P->getParents(Node);
-}
-
bool
ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl,
const ObjCMethodDecl *MethodImpl) {
@@ -10980,3 +11151,16 @@ void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
Target->getTargetOpts().Features);
}
}
+
+OMPTraitInfo &ASTContext::getNewOMPTraitInfo() {
+ OMPTraitInfoVector.emplace_back(new OMPTraitInfo());
+ return *OMPTraitInfoVector.back();
+}
+
+const DiagnosticBuilder &
+clang::operator<<(const DiagnosticBuilder &DB,
+ const ASTContext::SectionInfo &Section) {
+ if (Section.Decl)
+ return DB << Section.Decl;
+ return DB << "a prior #pragma section";
+}
diff --git a/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp b/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
index ea4d0dea58a3..05adf226bae3 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
@@ -1715,8 +1715,9 @@ class TemplateDiff {
bool FromDefault, bool ToDefault, bool Same) {
assert((FromTD || ToTD) && "Only one template argument may be missing.");
- std::string FromName = FromTD ? FromTD->getName() : "(no argument)";
- std::string ToName = ToTD ? ToTD->getName() : "(no argument)";
+ std::string FromName =
+ std::string(FromTD ? FromTD->getName() : "(no argument)");
+ std::string ToName = std::string(ToTD ? ToTD->getName() : "(no argument)");
if (FromTD && ToTD && FromName == ToName) {
FromName = FromTD->getQualifiedNameAsString();
ToName = ToTD->getQualifiedNameAsString();
diff --git a/contrib/llvm-project/clang/lib/AST/ASTDumper.cpp b/contrib/llvm-project/clang/lib/AST/ASTDumper.cpp
index 22196a1a2600..284e5bdbc6b0 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTDumper.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTDumper.cpp
@@ -54,7 +54,7 @@ void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
NodeDumper.AddChild([=] {
NodeDumper.dumpBareDeclRef(*RI);
- if ((*RI)->isHidden())
+ if (!(*RI)->isUnconditionallyVisible())
OS << " hidden";
// If requested, dump the redecl chain for this lookup.
@@ -159,17 +159,22 @@ void QualType::dump(const char *msg) const {
dump();
}
-LLVM_DUMP_METHOD void QualType::dump() const { dump(llvm::errs()); }
+LLVM_DUMP_METHOD void QualType::dump() const {
+ ASTDumper Dumper(llvm::errs(), /*ShowColors=*/false);
+ Dumper.Visit(*this);
+}
-LLVM_DUMP_METHOD void QualType::dump(llvm::raw_ostream &OS) const {
- ASTDumper Dumper(OS, nullptr, nullptr);
+LLVM_DUMP_METHOD void QualType::dump(llvm::raw_ostream &OS,
+ const ASTContext &Context) const {
+ ASTDumper Dumper(OS, Context, Context.getDiagnostics().getShowColors());
Dumper.Visit(*this);
}
-LLVM_DUMP_METHOD void Type::dump() const { dump(llvm::errs()); }
+LLVM_DUMP_METHOD void Type::dump() const { QualType(this, 0).dump(); }
-LLVM_DUMP_METHOD void Type::dump(llvm::raw_ostream &OS) const {
- QualType(this, 0).dump(OS);
+LLVM_DUMP_METHOD void Type::dump(llvm::raw_ostream &OS,
+ const ASTContext &Context) const {
+ QualType(this, 0).dump(OS, Context);
}
//===----------------------------------------------------------------------===//
@@ -189,8 +194,7 @@ LLVM_DUMP_METHOD void Decl::dump(raw_ostream &OS, bool Deserialize,
(void)Deserialize; // FIXME?
P.Visit(this);
} else {
- ASTDumper P(OS, &Ctx.getCommentCommandTraits(), &SM,
- SM.getDiagnostics().getShowColors(), Ctx.getPrintingPolicy());
+ ASTDumper P(OS, Ctx, Ctx.getDiagnostics().getShowColors());
P.setDeserialize(Deserialize);
P.Visit(this);
}
@@ -198,9 +202,7 @@ LLVM_DUMP_METHOD void Decl::dump(raw_ostream &OS, bool Deserialize,
LLVM_DUMP_METHOD void Decl::dumpColor() const {
const ASTContext &Ctx = getASTContext();
- ASTDumper P(llvm::errs(), &Ctx.getCommentCommandTraits(),
- &Ctx.getSourceManager(), /*ShowColors*/ true,
- Ctx.getPrintingPolicy());
+ ASTDumper P(llvm::errs(), Ctx, /*ShowColors=*/true);
P.Visit(this);
}
@@ -214,10 +216,8 @@ LLVM_DUMP_METHOD void DeclContext::dumpLookups(raw_ostream &OS,
const DeclContext *DC = this;
while (!DC->isTranslationUnit())
DC = DC->getParent();
- ASTContext &Ctx = cast<TranslationUnitDecl>(DC)->getASTContext();
- const SourceManager &SM = Ctx.getSourceManager();
- ASTDumper P(OS, &Ctx.getCommentCommandTraits(), &Ctx.getSourceManager(),
- SM.getDiagnostics().getShowColors(), Ctx.getPrintingPolicy());
+ const ASTContext &Ctx = cast<TranslationUnitDecl>(DC)->getASTContext();
+ ASTDumper P(OS, Ctx, Ctx.getDiagnostics().getShowColors());
P.setDeserialize(Deserialize);
P.dumpLookups(this, DumpDecls);
}
@@ -226,27 +226,19 @@ LLVM_DUMP_METHOD void DeclContext::dumpLookups(raw_ostream &OS,
// Stmt method implementations
//===----------------------------------------------------------------------===//
-LLVM_DUMP_METHOD void Stmt::dump(SourceManager &SM) const {
- dump(llvm::errs(), SM);
-}
-
-LLVM_DUMP_METHOD void Stmt::dump(raw_ostream &OS, SourceManager &SM) const {
- ASTDumper P(OS, nullptr, &SM);
- P.Visit(this);
-}
-
-LLVM_DUMP_METHOD void Stmt::dump(raw_ostream &OS) const {
- ASTDumper P(OS, nullptr, nullptr);
+LLVM_DUMP_METHOD void Stmt::dump() const {
+ ASTDumper P(llvm::errs(), /*ShowColors=*/false);
P.Visit(this);
}
-LLVM_DUMP_METHOD void Stmt::dump() const {
- ASTDumper P(llvm::errs(), nullptr, nullptr);
+LLVM_DUMP_METHOD void Stmt::dump(raw_ostream &OS,
+ const ASTContext &Context) const {
+ ASTDumper P(OS, Context, Context.getDiagnostics().getShowColors());
P.Visit(this);
}
LLVM_DUMP_METHOD void Stmt::dumpColor() const {
- ASTDumper P(llvm::errs(), nullptr, nullptr, /*ShowColors*/true);
+ ASTDumper P(llvm::errs(), /*ShowColors=*/true);
P.Visit(this);
}
@@ -255,27 +247,42 @@ LLVM_DUMP_METHOD void Stmt::dumpColor() const {
//===----------------------------------------------------------------------===//
LLVM_DUMP_METHOD void Comment::dump() const {
- dump(llvm::errs(), nullptr, nullptr);
-}
-
-LLVM_DUMP_METHOD void Comment::dump(const ASTContext &Context) const {
- dump(llvm::errs(), &Context.getCommentCommandTraits(),
- &Context.getSourceManager());
+ const auto *FC = dyn_cast<FullComment>(this);
+ if (!FC)
+ return;
+ ASTDumper Dumper(llvm::errs(), /*ShowColors=*/false);
+ Dumper.Visit(FC, FC);
}
-void Comment::dump(raw_ostream &OS, const CommandTraits *Traits,
- const SourceManager *SM) const {
- const FullComment *FC = dyn_cast<FullComment>(this);
+LLVM_DUMP_METHOD void Comment::dump(raw_ostream &OS,
+ const ASTContext &Context) const {
+ const auto *FC = dyn_cast<FullComment>(this);
if (!FC)
return;
- ASTDumper D(OS, Traits, SM);
- D.Visit(FC, FC);
+ ASTDumper Dumper(OS, Context, Context.getDiagnostics().getShowColors());
+ Dumper.Visit(FC, FC);
}
LLVM_DUMP_METHOD void Comment::dumpColor() const {
- const FullComment *FC = dyn_cast<FullComment>(this);
+ const auto *FC = dyn_cast<FullComment>(this);
if (!FC)
return;
- ASTDumper D(llvm::errs(), nullptr, nullptr, /*ShowColors*/true);
- D.Visit(FC, FC);
+ ASTDumper Dumper(llvm::errs(), /*ShowColors=*/true);
+ Dumper.Visit(FC, FC);
+}
+
+//===----------------------------------------------------------------------===//
+// APValue method implementations
+//===----------------------------------------------------------------------===//
+
+LLVM_DUMP_METHOD void APValue::dump() const {
+ ASTDumper Dumper(llvm::errs(), /*ShowColors=*/false);
+ Dumper.Visit(*this, /*Ty=*/QualType());
+}
+
+LLVM_DUMP_METHOD void APValue::dump(raw_ostream &OS,
+ const ASTContext &Context) const {
+ ASTDumper Dumper(llvm::errs(), Context,
+ Context.getDiagnostics().getShowColors());
+ Dumper.Visit(*this, /*Ty=*/Context.getPointerType(Context.CharTy));
}
diff --git a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
index ab4e961dcd4c..3779e0cb872b 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
@@ -186,36 +186,25 @@ namespace clang {
return import(*From);
}
- template <class T>
- Expected<std::tuple<T>>
- importSeq(const T &From) {
- Expected<T> ToOrErr = import(From);
- if (!ToOrErr)
- return ToOrErr.takeError();
- return std::make_tuple<T>(std::move(*ToOrErr));
- }
-
- // Import multiple objects with a single function call.
- // This should work for every type for which a variant of `import` exists.
- // The arguments are processed from left to right and import is stopped on
- // first error.
- template <class THead, class... TTail>
- Expected<std::tuple<THead, TTail...>>
- importSeq(const THead &FromHead, const TTail &...FromTail) {
- Expected<std::tuple<THead>> ToHeadOrErr = importSeq(FromHead);
- if (!ToHeadOrErr)
- return ToHeadOrErr.takeError();
- Expected<std::tuple<TTail...>> ToTailOrErr = importSeq(FromTail...);
- if (!ToTailOrErr)
- return ToTailOrErr.takeError();
- return std::tuple_cat(*ToHeadOrErr, *ToTailOrErr);
+ // Helper for chaining together multiple imports. If an error is detected,
+ // subsequent imports will return default constructed nodes, so that failure
+ // can be detected with a single conditional branch after a sequence of
+ // imports.
+ template <typename T> T importChecked(Error &Err, const T &From) {
+ // Don't attempt to import nodes if we hit an error earlier.
+ if (Err)
+ return T{};
+ Expected<T> MaybeVal = import(From);
+ if (!MaybeVal) {
+ Err = MaybeVal.takeError();
+ return T{};
+ }
+ return *MaybeVal;
}
-// Wrapper for an overload set.
+ // Wrapper for an overload set.
template <typename ToDeclT> struct CallOverloadedCreateFun {
- template <typename... Args>
- auto operator()(Args &&... args)
- -> decltype(ToDeclT::Create(std::forward<Args>(args)...)) {
+ template <typename... Args> decltype(auto) operator()(Args &&... args) {
return ToDeclT::Create(std::forward<Args>(args)...);
}
};
@@ -474,7 +463,7 @@ namespace clang {
ParmVarDecl *ToParam);
template <typename T>
- bool hasSameVisibilityContext(T *Found, T *From);
+ bool hasSameVisibilityContextAndLinkage(T *Found, T *From);
bool IsStructuralMatch(Decl *From, Decl *To, bool Complain);
bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord,
@@ -599,6 +588,7 @@ namespace clang {
ExpectedStmt VisitIntegerLiteral(IntegerLiteral *E);
ExpectedStmt VisitFloatingLiteral(FloatingLiteral *E);
ExpectedStmt VisitImaginaryLiteral(ImaginaryLiteral *E);
+ ExpectedStmt VisitFixedPointLiteral(FixedPointLiteral *E);
ExpectedStmt VisitCharacterLiteral(CharacterLiteral *E);
ExpectedStmt VisitStringLiteral(StringLiteral *E);
ExpectedStmt VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
@@ -659,7 +649,7 @@ namespace clang {
template<typename IIter, typename OIter>
Error ImportArrayChecked(IIter Ibegin, IIter Iend, OIter Obegin) {
- using ItemT = typename std::remove_reference<decltype(*Obegin)>::type;
+ using ItemT = std::remove_reference_t<decltype(*Obegin)>;
for (; Ibegin != Iend; ++Ibegin, ++Obegin) {
Expected<ItemT> ToOrErr = import(*Ibegin);
if (!ToOrErr)
@@ -984,7 +974,10 @@ Expected<LambdaCapture> ASTNodeImporter::import(const LambdaCapture &From) {
}
template <typename T>
-bool ASTNodeImporter::hasSameVisibilityContext(T *Found, T *From) {
+bool ASTNodeImporter::hasSameVisibilityContextAndLinkage(T *Found, T *From) {
+ if (Found->getLinkageInternal() != From->getLinkageInternal())
+ return false;
+
if (From->hasExternalFormalLinkage())
return Found->hasExternalFormalLinkage();
if (Importer.GetFromTU(Found) != From->getTranslationUnitDecl())
@@ -997,8 +990,11 @@ bool ASTNodeImporter::hasSameVisibilityContext(T *Found, T *From) {
}
template <>
-bool ASTNodeImporter::hasSameVisibilityContext(TypedefNameDecl *Found,
+bool ASTNodeImporter::hasSameVisibilityContextAndLinkage(TypedefNameDecl *Found,
TypedefNameDecl *From) {
+ if (Found->getLinkageInternal() != From->getLinkageInternal())
+ return false;
+
if (From->isInAnonymousNamespace() && Found->isInAnonymousNamespace())
return Importer.GetFromTU(Found) == From->getTranslationUnitDecl();
return From->isInAnonymousNamespace() == Found->isInAnonymousNamespace();
@@ -1149,12 +1145,11 @@ ASTNodeImporter::VisitMemberPointerType(const MemberPointerType *T) {
ExpectedType
ASTNodeImporter::VisitConstantArrayType(const ConstantArrayType *T) {
- QualType ToElementType;
- const Expr *ToSizeExpr;
- if (auto Imp = importSeq(T->getElementType(), T->getSizeExpr()))
- std::tie(ToElementType, ToSizeExpr) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToElementType = importChecked(Err, T->getElementType());
+ auto ToSizeExpr = importChecked(Err, T->getSizeExpr());
+ if (Err)
+ return std::move(Err);
return Importer.getToContext().getConstantArrayType(
ToElementType, T->getSize(), ToSizeExpr, T->getSizeModifier(),
@@ -1174,15 +1169,12 @@ ASTNodeImporter::VisitIncompleteArrayType(const IncompleteArrayType *T) {
ExpectedType
ASTNodeImporter::VisitVariableArrayType(const VariableArrayType *T) {
- QualType ToElementType;
- Expr *ToSizeExpr;
- SourceRange ToBracketsRange;
- if (auto Imp = importSeq(
- T->getElementType(), T->getSizeExpr(), T->getBracketsRange()))
- std::tie(ToElementType, ToSizeExpr, ToBracketsRange) = *Imp;
- else
- return Imp.takeError();
-
+ Error Err = Error::success();
+ QualType ToElementType = importChecked(Err, T->getElementType());
+ Expr *ToSizeExpr = importChecked(Err, T->getSizeExpr());
+ SourceRange ToBracketsRange = importChecked(Err, T->getBracketsRange());
+ if (Err)
+ return std::move(Err);
return Importer.getToContext().getVariableArrayType(
ToElementType, ToSizeExpr, T->getSizeModifier(),
T->getIndexTypeCVRQualifiers(), ToBracketsRange);
@@ -1190,14 +1182,12 @@ ASTNodeImporter::VisitVariableArrayType(const VariableArrayType *T) {
ExpectedType ASTNodeImporter::VisitDependentSizedArrayType(
const DependentSizedArrayType *T) {
- QualType ToElementType;
- Expr *ToSizeExpr;
- SourceRange ToBracketsRange;
- if (auto Imp = importSeq(
- T->getElementType(), T->getSizeExpr(), T->getBracketsRange()))
- std::tie(ToElementType, ToSizeExpr, ToBracketsRange) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ QualType ToElementType = importChecked(Err, T->getElementType());
+ Expr *ToSizeExpr = importChecked(Err, T->getSizeExpr());
+ SourceRange ToBracketsRange = importChecked(Err, T->getBracketsRange());
+ if (Err)
+ return std::move(Err);
// SizeExpr may be null if size is not specified directly.
// For example, 'int a[]'.
@@ -1262,26 +1252,24 @@ ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
}
FunctionProtoType::ExtProtoInfo FromEPI = T->getExtProtoInfo();
+ Error Err = Error::success();
FunctionProtoType::ExtProtoInfo ToEPI;
-
- auto Imp = importSeq(
- FromEPI.ExceptionSpec.NoexceptExpr,
- FromEPI.ExceptionSpec.SourceDecl,
- FromEPI.ExceptionSpec.SourceTemplate);
- if (!Imp)
- return Imp.takeError();
-
ToEPI.ExtInfo = FromEPI.ExtInfo;
ToEPI.Variadic = FromEPI.Variadic;
ToEPI.HasTrailingReturn = FromEPI.HasTrailingReturn;
ToEPI.TypeQuals = FromEPI.TypeQuals;
ToEPI.RefQualifier = FromEPI.RefQualifier;
ToEPI.ExceptionSpec.Type = FromEPI.ExceptionSpec.Type;
+ ToEPI.ExceptionSpec.NoexceptExpr =
+ importChecked(Err, FromEPI.ExceptionSpec.NoexceptExpr);
+ ToEPI.ExceptionSpec.SourceDecl =
+ importChecked(Err, FromEPI.ExceptionSpec.SourceDecl);
+ ToEPI.ExceptionSpec.SourceTemplate =
+ importChecked(Err, FromEPI.ExceptionSpec.SourceTemplate);
ToEPI.ExceptionSpec.Exceptions = ExceptionTypes;
- std::tie(
- ToEPI.ExceptionSpec.NoexceptExpr,
- ToEPI.ExceptionSpec.SourceDecl,
- ToEPI.ExceptionSpec.SourceTemplate) = *Imp;
+
+ if (Err)
+ return std::move(Err);
return Importer.getToContext().getFunctionType(
*ToReturnTypeOrErr, ArgTypes, ToEPI);
@@ -1289,12 +1277,11 @@ ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
ExpectedType ASTNodeImporter::VisitUnresolvedUsingType(
const UnresolvedUsingType *T) {
- UnresolvedUsingTypenameDecl *ToD;
- Decl *ToPrevD;
- if (auto Imp = importSeq(T->getDecl(), T->getDecl()->getPreviousDecl()))
- std::tie(ToD, ToPrevD) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToD = importChecked(Err, T->getDecl());
+ auto ToPrevD = importChecked(Err, T->getDecl()->getPreviousDecl());
+ if (Err)
+ return std::move(Err);
return Importer.getToContext().getTypeDeclType(
ToD, cast_or_null<TypeDecl>(ToPrevD));
@@ -1753,7 +1740,7 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
// fix since operations such as code generation will expect this to be so.
if (ImportedOrErr) {
FieldDecl *FieldFrom = dyn_cast_or_null<FieldDecl>(From);
- Decl *ImportedDecl = (Decl*)*ImportedOrErr;
+ Decl *ImportedDecl = *ImportedOrErr;
FieldDecl *FieldTo = dyn_cast_or_null<FieldDecl>(ImportedDecl);
if (FieldFrom && FieldTo) {
const RecordType *RecordFrom = FieldFrom->getType()->getAs<RecordType>();
@@ -1903,6 +1890,19 @@ Error ASTNodeImporter::ImportDefinition(
// set in CXXRecordDecl::CreateLambda. We must import the contained
// decls here and finish the definition.
(To->isLambda() && shouldForceImportDeclContext(Kind))) {
+ if (To->isLambda()) {
+ auto *FromCXXRD = cast<CXXRecordDecl>(From);
+ SmallVector<LambdaCapture, 8> ToCaptures;
+ ToCaptures.reserve(FromCXXRD->capture_size());
+ for (const auto &FromCapture : FromCXXRD->captures()) {
+ if (auto ToCaptureOrErr = import(FromCapture))
+ ToCaptures.push_back(*ToCaptureOrErr);
+ else
+ return ToCaptureOrErr.takeError();
+ }
+ cast<CXXRecordDecl>(To)->setCaptures(ToCaptures);
+ }
+
Error Result = ImportDeclContext(From, /*ForceImport=*/true);
// Finish the definition of the lambda, set isBeingDefined to false.
if (To->isLambda())
@@ -2248,14 +2248,13 @@ ExpectedDecl ASTNodeImporter::VisitStaticAssertDecl(StaticAssertDecl *D) {
DeclContext *DC = *DCOrErr;
DeclContext *LexicalDC = DC;
- SourceLocation ToLocation, ToRParenLoc;
- Expr *ToAssertExpr;
- StringLiteral *ToMessage;
- if (auto Imp = importSeq(
- D->getLocation(), D->getAssertExpr(), D->getMessage(), D->getRParenLoc()))
- std::tie(ToLocation, ToAssertExpr, ToMessage, ToRParenLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToLocation = importChecked(Err, D->getLocation());
+ auto ToRParenLoc = importChecked(Err, D->getRParenLoc());
+ auto ToAssertExpr = importChecked(Err, D->getAssertExpr());
+ auto ToMessage = importChecked(Err, D->getMessage());
+ if (Err)
+ return std::move(Err);
StaticAssertDecl *ToD;
if (GetImportedOrCreateDecl(
@@ -2364,17 +2363,15 @@ ExpectedDecl ASTNodeImporter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
// NOTE: No conflict resolution is done for namespace aliases now.
- SourceLocation ToNamespaceLoc, ToAliasLoc, ToTargetNameLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- NamespaceDecl *ToNamespace;
- if (auto Imp = importSeq(
- D->getNamespaceLoc(), D->getAliasLoc(), D->getQualifierLoc(),
- D->getTargetNameLoc(), D->getNamespace()))
- std::tie(
- ToNamespaceLoc, ToAliasLoc, ToQualifierLoc, ToTargetNameLoc,
- ToNamespace) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToNamespaceLoc = importChecked(Err, D->getNamespaceLoc());
+ auto ToAliasLoc = importChecked(Err, D->getAliasLoc());
+ auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc());
+ auto ToTargetNameLoc = importChecked(Err, D->getTargetNameLoc());
+ auto ToNamespace = importChecked(Err, D->getNamespace());
+ if (Err)
+ return std::move(Err);
+
IdentifierInfo *ToIdentifier = Importer.Import(D->getIdentifier());
NamespaceAliasDecl *ToD;
@@ -2415,7 +2412,7 @@ ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
if (auto *FoundTypedef = dyn_cast<TypedefNameDecl>(FoundDecl)) {
- if (!hasSameVisibilityContext(FoundTypedef, D))
+ if (!hasSameVisibilityContextAndLinkage(FoundTypedef, D))
continue;
QualType FromUT = D->getUnderlyingType();
@@ -2443,17 +2440,16 @@ ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
}
}
- QualType ToUnderlyingType;
- TypeSourceInfo *ToTypeSourceInfo;
- SourceLocation ToBeginLoc;
- if (auto Imp = importSeq(
- D->getUnderlyingType(), D->getTypeSourceInfo(), D->getBeginLoc()))
- std::tie(ToUnderlyingType, ToTypeSourceInfo, ToBeginLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToUnderlyingType = importChecked(Err, D->getUnderlyingType());
+ auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo());
+ auto ToBeginLoc = importChecked(Err, D->getBeginLoc());
+ if (Err)
+ return std::move(Err);
// Create the new typedef node.
// FIXME: ToUnderlyingType is not used.
+ (void)ToUnderlyingType;
TypedefNameDecl *ToTypedef;
if (IsAlias) {
if (GetImportedOrCreateDecl<TypeAliasDecl>(
@@ -2521,12 +2517,11 @@ ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
}
}
- TemplateParameterList *ToTemplateParameters;
- TypeAliasDecl *ToTemplatedDecl;
- if (auto Imp = importSeq(D->getTemplateParameters(), D->getTemplatedDecl()))
- std::tie(ToTemplateParameters, ToTemplatedDecl) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToTemplateParameters = importChecked(Err, D->getTemplateParameters());
+ auto ToTemplatedDecl = importChecked(Err, D->getTemplatedDecl());
+ if (Err)
+ return std::move(Err);
TypeAliasTemplateDecl *ToAlias;
if (GetImportedOrCreateDecl(ToAlias, D, Importer.getToContext(), DC, Loc,
@@ -2603,6 +2598,7 @@ ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
IDNS |= Decl::IDNS_Ordinary;
// We may already have an enum of the same name; try to find and match it.
+ EnumDecl *PrevDecl = nullptr;
if (!DC->isFunctionOrMethod() && SearchName) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
auto FoundDecls =
@@ -2617,10 +2613,15 @@ ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
}
if (auto *FoundEnum = dyn_cast<EnumDecl>(FoundDecl)) {
- if (!hasSameVisibilityContext(FoundEnum, D))
+ if (!hasSameVisibilityContextAndLinkage(FoundEnum, D))
continue;
- if (IsStructuralMatch(D, FoundEnum))
- return Importer.MapImported(D, FoundEnum);
+ if (IsStructuralMatch(D, FoundEnum)) {
+ EnumDecl *FoundDef = FoundEnum->getDefinition();
+ if (D->isThisDeclarationADefinition() && FoundDef)
+ return Importer.MapImported(D, FoundDef);
+ PrevDecl = FoundEnum->getMostRecentDecl();
+ break;
+ }
ConflictingDecls.push_back(FoundDecl);
}
}
@@ -2636,21 +2637,19 @@ ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
}
}
- SourceLocation ToBeginLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- QualType ToIntegerType;
- SourceRange ToBraceRange;
- if (auto Imp = importSeq(D->getBeginLoc(), D->getQualifierLoc(),
- D->getIntegerType(), D->getBraceRange()))
- std::tie(ToBeginLoc, ToQualifierLoc, ToIntegerType, ToBraceRange) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToBeginLoc = importChecked(Err, D->getBeginLoc());
+ auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc());
+ auto ToIntegerType = importChecked(Err, D->getIntegerType());
+ auto ToBraceRange = importChecked(Err, D->getBraceRange());
+ if (Err)
+ return std::move(Err);
// Create the enum declaration.
EnumDecl *D2;
if (GetImportedOrCreateDecl(
D2, D, Importer.getToContext(), DC, ToBeginLoc,
- Loc, Name.getAsIdentifierInfo(), nullptr, D->isScoped(),
+ Loc, Name.getAsIdentifierInfo(), PrevDecl, D->isScoped(),
D->isScopedUsingClassTag(), D->isFixed()))
return D2;
@@ -2736,7 +2735,7 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
if (!IsStructuralMatch(D, FoundRecord, false))
continue;
- if (!hasSameVisibilityContext(FoundRecord, D))
+ if (!hasSameVisibilityContextAndLinkage(FoundRecord, D))
continue;
if (IsStructuralMatch(D, FoundRecord)) {
@@ -2796,7 +2795,7 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
return CDeclOrErr.takeError();
D2CXX->setLambdaMangling(DCXX->getLambdaManglingNumber(), *CDeclOrErr,
DCXX->hasKnownLambdaInternalLinkage());
- } else if (DCXX->isInjectedClassName()) {
+ } else if (DCXX->isInjectedClassName()) {
// We have to be careful to do a similar dance to the one in
// Sema::ActOnStartCXXMemberDeclarations
const bool DelayTypeCreation = true;
@@ -3184,7 +3183,7 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
continue;
if (auto *FoundFunction = dyn_cast<FunctionDecl>(FoundDecl)) {
- if (!hasSameVisibilityContext(FoundFunction, D))
+ if (!hasSameVisibilityContextAndLinkage(FoundFunction, D))
continue;
if (IsStructuralMatch(D, FoundFunction)) {
@@ -3287,18 +3286,16 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
FromReturnTy, FromFPT->getParamTypes(), FromEPI);
}
- QualType T;
- TypeSourceInfo *TInfo;
- SourceLocation ToInnerLocStart, ToEndLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- Expr *TrailingRequiresClause;
- if (auto Imp = importSeq(
- FromTy, D->getTypeSourceInfo(), D->getInnerLocStart(),
- D->getQualifierLoc(), D->getEndLoc(), D->getTrailingRequiresClause()))
- std::tie(T, TInfo, ToInnerLocStart, ToQualifierLoc, ToEndLoc,
- TrailingRequiresClause) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto T = importChecked(Err, FromTy);
+ auto TInfo = importChecked(Err, D->getTypeSourceInfo());
+ auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart());
+ auto ToEndLoc = importChecked(Err, D->getEndLoc());
+ auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc());
+ auto TrailingRequiresClause =
+ importChecked(Err, D->getTrailingRequiresClause());
+ if (Err)
+ return std::move(Err);
// Import the function parameters.
SmallVector<ParmVarDecl *, 8> Parameters;
@@ -3314,10 +3311,10 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
if (auto *FromConstructor = dyn_cast<CXXConstructorDecl>(D)) {
Expr *ExplicitExpr = nullptr;
if (FromConstructor->getExplicitSpecifier().getExpr()) {
- auto Imp = importSeq(FromConstructor->getExplicitSpecifier().getExpr());
+ auto Imp = import(FromConstructor->getExplicitSpecifier().getExpr());
if (!Imp)
return Imp.takeError();
- std::tie(ExplicitExpr) = *Imp;
+ ExplicitExpr = *Imp;
}
if (GetImportedOrCreateDecl<CXXConstructorDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
@@ -3332,16 +3329,12 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
return ToFunction;
} else if (CXXDestructorDecl *FromDtor = dyn_cast<CXXDestructorDecl>(D)) {
- auto Imp =
- importSeq(const_cast<FunctionDecl *>(FromDtor->getOperatorDelete()),
- FromDtor->getOperatorDeleteThisArg());
-
- if (!Imp)
- return Imp.takeError();
-
- FunctionDecl *ToOperatorDelete;
- Expr *ToThisArg;
- std::tie(ToOperatorDelete, ToThisArg) = *Imp;
+ Error Err = Error::success();
+ auto ToOperatorDelete = importChecked(
+ Err, const_cast<FunctionDecl *>(FromDtor->getOperatorDelete()));
+ auto ToThisArg = importChecked(Err, FromDtor->getOperatorDeleteThisArg());
+ if (Err)
+ return std::move(Err);
if (GetImportedOrCreateDecl<CXXDestructorDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
@@ -3356,10 +3349,10 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
dyn_cast<CXXConversionDecl>(D)) {
Expr *ExplicitExpr = nullptr;
if (FromConversion->getExplicitSpecifier().getExpr()) {
- auto Imp = importSeq(FromConversion->getExplicitSpecifier().getExpr());
+ auto Imp = import(FromConversion->getExplicitSpecifier().getExpr());
if (!Imp)
return Imp.takeError();
- std::tie(ExplicitExpr) = *Imp;
+ ExplicitExpr = *Imp;
}
if (GetImportedOrCreateDecl<CXXConversionDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
@@ -3558,18 +3551,14 @@ ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
}
}
- QualType ToType;
- TypeSourceInfo *ToTInfo;
- Expr *ToBitWidth;
- SourceLocation ToInnerLocStart;
- Expr *ToInitializer;
- if (auto Imp = importSeq(
- D->getType(), D->getTypeSourceInfo(), D->getBitWidth(),
- D->getInnerLocStart(), D->getInClassInitializer()))
- std::tie(
- ToType, ToTInfo, ToBitWidth, ToInnerLocStart, ToInitializer) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, D->getType());
+ auto ToTInfo = importChecked(Err, D->getTypeSourceInfo());
+ auto ToBitWidth = importChecked(Err, D->getBitWidth());
+ auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart());
+ auto ToInitializer = importChecked(Err, D->getInClassInitializer());
+ if (Err)
+ return std::move(Err);
FieldDecl *ToField;
if (GetImportedOrCreateDecl(ToField, D, Importer.getToContext(), DC,
@@ -3657,6 +3646,54 @@ ExpectedDecl ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
return ToIndirectField;
}
+/// Used as return type of getFriendCountAndPosition.
+struct FriendCountAndPosition {
+ /// Number of similar looking friends.
+ unsigned int TotalCount;
+ /// Index of the specific FriendDecl.
+ unsigned int IndexOfDecl;
+};
+
+template <class T>
+static FriendCountAndPosition getFriendCountAndPosition(
+ const FriendDecl *FD,
+ llvm::function_ref<T(const FriendDecl *)> GetCanTypeOrDecl) {
+ unsigned int FriendCount = 0;
+ llvm::Optional<unsigned int> FriendPosition;
+ const auto *RD = cast<CXXRecordDecl>(FD->getLexicalDeclContext());
+
+ T TypeOrDecl = GetCanTypeOrDecl(FD);
+
+ for (const FriendDecl *FoundFriend : RD->friends()) {
+ if (FoundFriend == FD) {
+ FriendPosition = FriendCount;
+ ++FriendCount;
+ } else if (!FoundFriend->getFriendDecl() == !FD->getFriendDecl() &&
+ GetCanTypeOrDecl(FoundFriend) == TypeOrDecl) {
+ ++FriendCount;
+ }
+ }
+
+ assert(FriendPosition && "Friend decl not found in own parent.");
+
+ return {FriendCount, *FriendPosition};
+}
+
+static FriendCountAndPosition getFriendCountAndPosition(const FriendDecl *FD) {
+ if (FD->getFriendType())
+ return getFriendCountAndPosition<QualType>(FD, [](const FriendDecl *F) {
+ if (TypeSourceInfo *TSI = F->getFriendType())
+ return TSI->getType().getCanonicalType();
+ llvm_unreachable("Wrong friend object type.");
+ });
+ else
+ return getFriendCountAndPosition<Decl *>(FD, [](const FriendDecl *F) {
+ if (Decl *D = F->getFriendDecl())
+ return D->getCanonicalDecl();
+ llvm_unreachable("Wrong friend object type.");
+ });
+}
+
ExpectedDecl ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
// Import the major distinguishing characteristics of a declaration.
DeclContext *DC, *LexicalDC;
@@ -3665,25 +3702,37 @@ ExpectedDecl ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
// Determine whether we've already imported this decl.
// FriendDecl is not a NamedDecl so we cannot use lookup.
- auto *RD = cast<CXXRecordDecl>(DC);
+ // We try to maintain order and count of redundant friend declarations.
+ const auto *RD = cast<CXXRecordDecl>(DC);
FriendDecl *ImportedFriend = RD->getFirstFriend();
+ SmallVector<FriendDecl *, 2> ImportedEquivalentFriends;
while (ImportedFriend) {
+ bool Match = false;
if (D->getFriendDecl() && ImportedFriend->getFriendDecl()) {
- if (IsStructuralMatch(D->getFriendDecl(), ImportedFriend->getFriendDecl(),
- /*Complain=*/false))
- return Importer.MapImported(D, ImportedFriend);
-
+ Match =
+ IsStructuralMatch(D->getFriendDecl(), ImportedFriend->getFriendDecl(),
+ /*Complain=*/false);
} else if (D->getFriendType() && ImportedFriend->getFriendType()) {
- if (Importer.IsStructurallyEquivalent(
- D->getFriendType()->getType(),
- ImportedFriend->getFriendType()->getType(), true))
- return Importer.MapImported(D, ImportedFriend);
+ Match = Importer.IsStructurallyEquivalent(
+ D->getFriendType()->getType(),
+ ImportedFriend->getFriendType()->getType(), /*Complain=*/false);
}
+ if (Match)
+ ImportedEquivalentFriends.push_back(ImportedFriend);
+
ImportedFriend = ImportedFriend->getNextFriend();
}
+ FriendCountAndPosition CountAndPosition = getFriendCountAndPosition(D);
+
+ assert(ImportedEquivalentFriends.size() <= CountAndPosition.TotalCount &&
+ "Class with non-matching friends is imported, ODR check wrong?");
+ if (ImportedEquivalentFriends.size() == CountAndPosition.TotalCount)
+ return Importer.MapImported(
+ D, ImportedEquivalentFriends[CountAndPosition.IndexOfDecl]);
// Not found. Create it.
+ // The declarations will be put into order later by ImportDeclContext.
FriendDecl::FriendUnion ToFU;
if (NamedDecl *FriendD = D->getFriendDecl()) {
NamedDecl *ToFriendD;
@@ -3760,15 +3809,13 @@ ExpectedDecl ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
}
}
- QualType ToType;
- TypeSourceInfo *ToTypeSourceInfo;
- Expr *ToBitWidth;
- SourceLocation ToInnerLocStart;
- if (auto Imp = importSeq(
- D->getType(), D->getTypeSourceInfo(), D->getBitWidth(), D->getInnerLocStart()))
- std::tie(ToType, ToTypeSourceInfo, ToBitWidth, ToInnerLocStart) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, D->getType());
+ auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo());
+ auto ToBitWidth = importChecked(Err, D->getBitWidth());
+ auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart());
+ if (Err)
+ return std::move(Err);
ObjCIvarDecl *ToIvar;
if (GetImportedOrCreateDecl(
@@ -3818,7 +3865,7 @@ ExpectedDecl ASTNodeImporter::VisitVarDecl(VarDecl *D) {
continue;
if (auto *FoundVar = dyn_cast<VarDecl>(FoundDecl)) {
- if (!hasSameVisibilityContext(FoundVar, D))
+ if (!hasSameVisibilityContextAndLinkage(FoundVar, D))
continue;
if (Importer.IsStructurallyEquivalent(D->getType(),
FoundVar->getType())) {
@@ -3882,16 +3929,13 @@ ExpectedDecl ASTNodeImporter::VisitVarDecl(VarDecl *D) {
}
}
- QualType ToType;
- TypeSourceInfo *ToTypeSourceInfo;
- SourceLocation ToInnerLocStart;
- NestedNameSpecifierLoc ToQualifierLoc;
- if (auto Imp = importSeq(
- D->getType(), D->getTypeSourceInfo(), D->getInnerLocStart(),
- D->getQualifierLoc()))
- std::tie(ToType, ToTypeSourceInfo, ToInnerLocStart, ToQualifierLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, D->getType());
+ auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo());
+ auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart());
+ auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc());
+ if (Err)
+ return std::move(Err);
// Create the imported variable.
VarDecl *ToVar;
@@ -3911,6 +3955,13 @@ ExpectedDecl ASTNodeImporter::VisitVarDecl(VarDecl *D) {
ToVar->setPreviousDecl(Recent);
}
+ // Import the described template, if any.
+ if (D->getDescribedVarTemplate()) {
+ auto ToVTOrErr = import(D->getDescribedVarTemplate());
+ if (!ToVTOrErr)
+ return ToVTOrErr.takeError();
+ }
+
if (Error Err = ImportInitializer(D, ToVar))
return std::move(Err);
@@ -3934,13 +3985,12 @@ ExpectedDecl ASTNodeImporter::VisitImplicitParamDecl(ImplicitParamDecl *D) {
// into the function declaration's context afterward.
DeclContext *DC = Importer.getToContext().getTranslationUnitDecl();
- DeclarationName ToDeclName;
- SourceLocation ToLocation;
- QualType ToType;
- if (auto Imp = importSeq(D->getDeclName(), D->getLocation(), D->getType()))
- std::tie(ToDeclName, ToLocation, ToType) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToDeclName = importChecked(Err, D->getDeclName());
+ auto ToLocation = importChecked(Err, D->getLocation());
+ auto ToType = importChecked(Err, D->getType());
+ if (Err)
+ return std::move(Err);
// Create the imported parameter.
ImplicitParamDecl *ToParm = nullptr;
@@ -3978,18 +4028,14 @@ ExpectedDecl ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
// into the function declaration's context afterward.
DeclContext *DC = Importer.getToContext().getTranslationUnitDecl();
- DeclarationName ToDeclName;
- SourceLocation ToLocation, ToInnerLocStart;
- QualType ToType;
- TypeSourceInfo *ToTypeSourceInfo;
- if (auto Imp = importSeq(
- D->getDeclName(), D->getLocation(), D->getType(), D->getInnerLocStart(),
- D->getTypeSourceInfo()))
- std::tie(
- ToDeclName, ToLocation, ToType, ToInnerLocStart,
- ToTypeSourceInfo) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToDeclName = importChecked(Err, D->getDeclName());
+ auto ToLocation = importChecked(Err, D->getLocation());
+ auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart());
+ auto ToType = importChecked(Err, D->getType());
+ auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo());
+ if (Err)
+ return std::move(Err);
ParmVarDecl *ToParm;
if (GetImportedOrCreateDecl(ToParm, D, Importer.getToContext(), DC,
@@ -4092,14 +4138,13 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
}
}
- SourceLocation ToEndLoc;
- QualType ToReturnType;
- TypeSourceInfo *ToReturnTypeSourceInfo;
- if (auto Imp = importSeq(
- D->getEndLoc(), D->getReturnType(), D->getReturnTypeSourceInfo()))
- std::tie(ToEndLoc, ToReturnType, ToReturnTypeSourceInfo) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToEndLoc = importChecked(Err, D->getEndLoc());
+ auto ToReturnType = importChecked(Err, D->getReturnType());
+ auto ToReturnTypeSourceInfo =
+ importChecked(Err, D->getReturnTypeSourceInfo());
+ if (Err)
+ return std::move(Err);
ObjCMethodDecl *ToMethod;
if (GetImportedOrCreateDecl(
@@ -4160,14 +4205,13 @@ ExpectedDecl ASTNodeImporter::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) {
if (ToD)
return ToD;
- SourceLocation ToVarianceLoc, ToLocation, ToColonLoc;
- TypeSourceInfo *ToTypeSourceInfo;
- if (auto Imp = importSeq(
- D->getVarianceLoc(), D->getLocation(), D->getColonLoc(),
- D->getTypeSourceInfo()))
- std::tie(ToVarianceLoc, ToLocation, ToColonLoc, ToTypeSourceInfo) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToVarianceLoc = importChecked(Err, D->getVarianceLoc());
+ auto ToLocation = importChecked(Err, D->getLocation());
+ auto ToColonLoc = importChecked(Err, D->getColonLoc());
+ auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo());
+ if (Err)
+ return std::move(Err);
ObjCTypeParamDecl *Result;
if (GetImportedOrCreateDecl(
@@ -4201,16 +4245,14 @@ ExpectedDecl ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
= ToInterface->FindCategoryDeclaration(Name.getAsIdentifierInfo());
ObjCCategoryDecl *ToCategory = MergeWithCategory;
if (!ToCategory) {
- SourceLocation ToAtStartLoc, ToCategoryNameLoc;
- SourceLocation ToIvarLBraceLoc, ToIvarRBraceLoc;
- if (auto Imp = importSeq(
- D->getAtStartLoc(), D->getCategoryNameLoc(),
- D->getIvarLBraceLoc(), D->getIvarRBraceLoc()))
- std::tie(
- ToAtStartLoc, ToCategoryNameLoc,
- ToIvarLBraceLoc, ToIvarRBraceLoc) = *Imp;
- else
- return Imp.takeError();
+
+ Error Err = Error::success();
+ auto ToAtStartLoc = importChecked(Err, D->getAtStartLoc());
+ auto ToCategoryNameLoc = importChecked(Err, D->getCategoryNameLoc());
+ auto ToIvarLBraceLoc = importChecked(Err, D->getIvarLBraceLoc());
+ auto ToIvarRBraceLoc = importChecked(Err, D->getIvarRBraceLoc());
+ if (Err)
+ return std::move(Err);
if (GetImportedOrCreateDecl(ToCategory, D, Importer.getToContext(), DC,
ToAtStartLoc, Loc,
@@ -4419,13 +4461,12 @@ ExpectedDecl ASTNodeImporter::VisitUsingDecl(UsingDecl *D) {
if (ToD)
return ToD;
- SourceLocation ToLoc, ToUsingLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- if (auto Imp = importSeq(
- D->getNameInfo().getLoc(), D->getUsingLoc(), D->getQualifierLoc()))
- std::tie(ToLoc, ToUsingLoc, ToQualifierLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToLoc = importChecked(Err, D->getNameInfo().getLoc());
+ auto ToUsingLoc = importChecked(Err, D->getUsingLoc());
+ auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc());
+ if (Err)
+ return std::move(Err);
DeclarationNameInfo NameInfo(Name, ToLoc);
if (Error Err = ImportDeclarationNameLoc(D->getNameInfo(), NameInfo))
@@ -4516,18 +4557,15 @@ ExpectedDecl ASTNodeImporter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
if (!ToComAncestorOrErr)
return ToComAncestorOrErr.takeError();
- NamespaceDecl *ToNominatedNamespace;
- SourceLocation ToUsingLoc, ToNamespaceKeyLocation, ToIdentLocation;
- NestedNameSpecifierLoc ToQualifierLoc;
- if (auto Imp = importSeq(
- D->getNominatedNamespace(), D->getUsingLoc(),
- D->getNamespaceKeyLocation(), D->getQualifierLoc(),
- D->getIdentLocation()))
- std::tie(
- ToNominatedNamespace, ToUsingLoc, ToNamespaceKeyLocation,
- ToQualifierLoc, ToIdentLocation) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToNominatedNamespace = importChecked(Err, D->getNominatedNamespace());
+ auto ToUsingLoc = importChecked(Err, D->getUsingLoc());
+ auto ToNamespaceKeyLocation =
+ importChecked(Err, D->getNamespaceKeyLocation());
+ auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc());
+ auto ToIdentLocation = importChecked(Err, D->getIdentLocation());
+ if (Err)
+ return std::move(Err);
UsingDirectiveDecl *ToUsingDir;
if (GetImportedOrCreateDecl(ToUsingDir, D, Importer.getToContext(), DC,
@@ -4555,14 +4593,13 @@ ExpectedDecl ASTNodeImporter::VisitUnresolvedUsingValueDecl(
if (ToD)
return ToD;
- SourceLocation ToLoc, ToUsingLoc, ToEllipsisLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- if (auto Imp = importSeq(
- D->getNameInfo().getLoc(), D->getUsingLoc(), D->getQualifierLoc(),
- D->getEllipsisLoc()))
- std::tie(ToLoc, ToUsingLoc, ToQualifierLoc, ToEllipsisLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToLoc = importChecked(Err, D->getNameInfo().getLoc());
+ auto ToUsingLoc = importChecked(Err, D->getUsingLoc());
+ auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc());
+ auto ToEllipsisLoc = importChecked(Err, D->getEllipsisLoc());
+ if (Err)
+ return std::move(Err);
DeclarationNameInfo NameInfo(Name, ToLoc);
if (Error Err = ImportDeclarationNameLoc(D->getNameInfo(), NameInfo))
@@ -4592,14 +4629,13 @@ ExpectedDecl ASTNodeImporter::VisitUnresolvedUsingTypenameDecl(
if (ToD)
return ToD;
- SourceLocation ToUsingLoc, ToTypenameLoc, ToEllipsisLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- if (auto Imp = importSeq(
- D->getUsingLoc(), D->getTypenameLoc(), D->getQualifierLoc(),
- D->getEllipsisLoc()))
- std::tie(ToUsingLoc, ToTypenameLoc, ToQualifierLoc, ToEllipsisLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToUsingLoc = importChecked(Err, D->getUsingLoc());
+ auto ToTypenameLoc = importChecked(Err, D->getTypenameLoc());
+ auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc());
+ auto ToEllipsisLoc = importChecked(Err, D->getEllipsisLoc());
+ if (Err)
+ return std::move(Err);
UnresolvedUsingTypenameDecl *ToUsing;
if (GetImportedOrCreateDecl(ToUsing, D, Importer.getToContext(), DC,
@@ -4834,12 +4870,12 @@ ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
if (Error Err = ImportDeclContext(D, DC, LexicalDC))
return std::move(Err);
- SourceLocation ToLocation, ToAtStartLoc, ToCategoryNameLoc;
- if (auto Imp = importSeq(
- D->getLocation(), D->getAtStartLoc(), D->getCategoryNameLoc()))
- std::tie(ToLocation, ToAtStartLoc, ToCategoryNameLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToLocation = importChecked(Err, D->getLocation());
+ auto ToAtStartLoc = importChecked(Err, D->getAtStartLoc());
+ auto ToCategoryNameLoc = importChecked(Err, D->getCategoryNameLoc());
+ if (Err)
+ return std::move(Err);
if (GetImportedOrCreateDecl(
ToImpl, D, Importer.getToContext(), DC,
@@ -4879,16 +4915,14 @@ ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
if (Error Err = ImportDeclContext(D, DC, LexicalDC))
return std::move(Err);
- SourceLocation ToLocation, ToAtStartLoc, ToSuperClassLoc;
- SourceLocation ToIvarLBraceLoc, ToIvarRBraceLoc;
- if (auto Imp = importSeq(
- D->getLocation(), D->getAtStartLoc(), D->getSuperClassLoc(),
- D->getIvarLBraceLoc(), D->getIvarRBraceLoc()))
- std::tie(
- ToLocation, ToAtStartLoc, ToSuperClassLoc,
- ToIvarLBraceLoc, ToIvarRBraceLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToLocation = importChecked(Err, D->getLocation());
+ auto ToAtStartLoc = importChecked(Err, D->getAtStartLoc());
+ auto ToSuperClassLoc = importChecked(Err, D->getSuperClassLoc());
+ auto ToIvarLBraceLoc = importChecked(Err, D->getIvarLBraceLoc());
+ auto ToIvarRBraceLoc = importChecked(Err, D->getIvarRBraceLoc());
+ if (Err)
+ return std::move(Err);
if (GetImportedOrCreateDecl(Impl, D, Importer.getToContext(),
DC, Iface, Super,
@@ -4978,14 +5012,13 @@ ExpectedDecl ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
}
}
- QualType ToType;
- TypeSourceInfo *ToTypeSourceInfo;
- SourceLocation ToAtLoc, ToLParenLoc;
- if (auto Imp = importSeq(
- D->getType(), D->getTypeSourceInfo(), D->getAtLoc(), D->getLParenLoc()))
- std::tie(ToType, ToTypeSourceInfo, ToAtLoc, ToLParenLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, D->getType());
+ auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo());
+ auto ToAtLoc = importChecked(Err, D->getAtLoc());
+ auto ToLParenLoc = importChecked(Err, D->getLParenLoc());
+ if (Err)
+ return std::move(Err);
// Create the new property.
ObjCPropertyDecl *ToProperty;
@@ -4996,22 +5029,15 @@ ExpectedDecl ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
ToTypeSourceInfo, D->getPropertyImplementation()))
return ToProperty;
- Selector ToGetterName, ToSetterName;
- SourceLocation ToGetterNameLoc, ToSetterNameLoc;
- ObjCMethodDecl *ToGetterMethodDecl, *ToSetterMethodDecl;
- ObjCIvarDecl *ToPropertyIvarDecl;
- if (auto Imp = importSeq(
- D->getGetterName(), D->getSetterName(),
- D->getGetterNameLoc(), D->getSetterNameLoc(),
- D->getGetterMethodDecl(), D->getSetterMethodDecl(),
- D->getPropertyIvarDecl()))
- std::tie(
- ToGetterName, ToSetterName,
- ToGetterNameLoc, ToSetterNameLoc,
- ToGetterMethodDecl, ToSetterMethodDecl,
- ToPropertyIvarDecl) = *Imp;
- else
- return Imp.takeError();
+ auto ToGetterName = importChecked(Err, D->getGetterName());
+ auto ToSetterName = importChecked(Err, D->getSetterName());
+ auto ToGetterNameLoc = importChecked(Err, D->getGetterNameLoc());
+ auto ToSetterNameLoc = importChecked(Err, D->getSetterNameLoc());
+ auto ToGetterMethodDecl = importChecked(Err, D->getGetterMethodDecl());
+ auto ToSetterMethodDecl = importChecked(Err, D->getSetterMethodDecl());
+ auto ToPropertyIvarDecl = importChecked(Err, D->getPropertyIvarDecl());
+ if (Err)
+ return std::move(Err);
ToProperty->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToProperty);
@@ -5048,12 +5074,14 @@ ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
= InImpl->FindPropertyImplDecl(Property->getIdentifier(),
Property->getQueryKind());
if (!ToImpl) {
- SourceLocation ToBeginLoc, ToLocation, ToPropertyIvarDeclLoc;
- if (auto Imp = importSeq(
- D->getBeginLoc(), D->getLocation(), D->getPropertyIvarDeclLoc()))
- std::tie(ToBeginLoc, ToLocation, ToPropertyIvarDeclLoc) = *Imp;
- else
- return Imp.takeError();
+
+ Error Err = Error::success();
+ auto ToBeginLoc = importChecked(Err, D->getBeginLoc());
+ auto ToLocation = importChecked(Err, D->getLocation());
+ auto ToPropertyIvarDeclLoc =
+ importChecked(Err, D->getPropertyIvarDeclLoc());
+ if (Err)
+ return std::move(Err);
if (GetImportedOrCreateDecl(ToImpl, D, Importer.getToContext(), DC,
ToBeginLoc,
@@ -5131,20 +5159,16 @@ ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
// Import the type-constraint
if (const TypeConstraint *TC = D->getTypeConstraint()) {
- NestedNameSpecifierLoc ToNNS;
- DeclarationName ToName;
- SourceLocation ToNameLoc;
- NamedDecl *ToFoundDecl;
- ConceptDecl *ToNamedConcept;
- Expr *ToIDC;
- if (auto Imp = importSeq(TC->getNestedNameSpecifierLoc(),
- TC->getConceptNameInfo().getName(), TC->getConceptNameInfo().getLoc(),
- TC->getFoundDecl(), TC->getNamedConcept(),
- TC->getImmediatelyDeclaredConstraint()))
- std::tie(ToNNS, ToName, ToNameLoc, ToFoundDecl, ToNamedConcept,
- ToIDC) = *Imp;
- else
- return Imp.takeError();
+
+ Error Err = Error::success();
+ auto ToNNS = importChecked(Err, TC->getNestedNameSpecifierLoc());
+ auto ToName = importChecked(Err, TC->getConceptNameInfo().getName());
+ auto ToNameLoc = importChecked(Err, TC->getConceptNameInfo().getLoc());
+ auto ToFoundDecl = importChecked(Err, TC->getFoundDecl());
+ auto ToNamedConcept = importChecked(Err, TC->getNamedConcept());
+ auto ToIDC = importChecked(Err, TC->getImmediatelyDeclaredConstraint());
+ if (Err)
+ return std::move(Err);
TemplateArgumentListInfo ToTAInfo;
const auto *ASTTemplateArgs = TC->getTemplateArgsAsWritten();
@@ -5166,18 +5190,15 @@ ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
ExpectedDecl
ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
- DeclarationName ToDeclName;
- SourceLocation ToLocation, ToInnerLocStart;
- QualType ToType;
- TypeSourceInfo *ToTypeSourceInfo;
- if (auto Imp = importSeq(
- D->getDeclName(), D->getLocation(), D->getType(), D->getTypeSourceInfo(),
- D->getInnerLocStart()))
- std::tie(
- ToDeclName, ToLocation, ToType, ToTypeSourceInfo,
- ToInnerLocStart) = *Imp;
- else
- return Imp.takeError();
+
+ Error Err = Error::success();
+ auto ToDeclName = importChecked(Err, D->getDeclName());
+ auto ToLocation = importChecked(Err, D->getLocation());
+ auto ToType = importChecked(Err, D->getType());
+ auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo());
+ auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart());
+ if (Err)
+ return std::move(Err);
// FIXME: Import default argument.
@@ -5257,7 +5278,7 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
Decl *Found = FoundDecl;
auto *FoundTemplate = dyn_cast<ClassTemplateDecl>(Found);
if (FoundTemplate) {
- if (!hasSameVisibilityContext(FoundTemplate, D))
+ if (!hasSameVisibilityContextAndLinkage(FoundTemplate, D))
continue;
if (IsStructuralMatch(D, FoundTemplate)) {
@@ -5520,20 +5541,6 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
}
ExpectedDecl ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
- // If this variable has a definition in the translation unit we're coming
- // from,
- // but this particular declaration is not that definition, import the
- // definition and map to that.
- auto *Definition =
- cast_or_null<VarDecl>(D->getTemplatedDecl()->getDefinition());
- if (Definition && Definition != D->getTemplatedDecl()) {
- if (ExpectedDecl ImportedDefOrErr = import(
- Definition->getDescribedVarTemplate()))
- return Importer.MapImported(D, *ImportedDefOrErr);
- else
- return ImportedDefOrErr.takeError();
- }
-
// Import the major distinguishing characteristics of this variable template.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
@@ -5547,19 +5554,30 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
// We may already have a template of the same name; try to find and match it.
assert(!DC->isFunctionOrMethod() &&
"Variable templates cannot be declared at function scope");
+
SmallVector<NamedDecl *, 4> ConflictingDecls;
auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
+ VarTemplateDecl *FoundByLookup = nullptr;
for (auto *FoundDecl : FoundDecls) {
if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary))
continue;
- Decl *Found = FoundDecl;
- if (VarTemplateDecl *FoundTemplate = dyn_cast<VarTemplateDecl>(Found)) {
+ if (VarTemplateDecl *FoundTemplate = dyn_cast<VarTemplateDecl>(FoundDecl)) {
+ // Use the templated decl, some linkage flags are set only there.
+ if (!hasSameVisibilityContextAndLinkage(FoundTemplate->getTemplatedDecl(),
+ D->getTemplatedDecl()))
+ continue;
if (IsStructuralMatch(D, FoundTemplate)) {
- // The variable templates structurally match; call it the same template.
- Importer.MapImported(D->getTemplatedDecl(),
- FoundTemplate->getTemplatedDecl());
- return Importer.MapImported(D, FoundTemplate);
+ // The Decl in the "From" context has a definition, but in the
+ // "To" context we already have a definition.
+ VarTemplateDecl *FoundDef = getTemplateDefinition(FoundTemplate);
+ if (D->isThisDeclarationADefinition() && FoundDef)
+ // FIXME Check for ODR error if the two definitions have
+ // different initializers?
+ return Importer.MapImported(D, FoundDef);
+
+ FoundByLookup = FoundTemplate;
+ break;
}
ConflictingDecls.push_back(FoundDecl);
}
@@ -5604,6 +5622,18 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
ToVarTD->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToVarTD);
+ if (FoundByLookup) {
+ auto *Recent =
+ const_cast<VarTemplateDecl *>(FoundByLookup->getMostRecentDecl());
+ if (!ToTemplated->getPreviousDecl()) {
+ auto *PrevTemplated =
+ FoundByLookup->getTemplatedDecl()->getMostRecentDecl();
+ if (ToTemplated != PrevTemplated)
+ ToTemplated->setPreviousDecl(PrevTemplated);
+ }
+ ToVarTD->setPreviousDecl(Recent);
+ }
+
if (DTemplated->isThisDeclarationADefinition() &&
!ToTemplated->isThisDeclarationADefinition()) {
// FIXME: Import definition!
@@ -5785,7 +5815,7 @@ ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
continue;
if (auto *FoundTemplate = dyn_cast<FunctionTemplateDecl>(FoundDecl)) {
- if (!hasSameVisibilityContext(FoundTemplate, D))
+ if (!hasSameVisibilityContextAndLinkage(FoundTemplate, D))
continue;
if (IsStructuralMatch(D, FoundTemplate)) {
FunctionTemplateDecl *TemplateWithDef =
@@ -5931,14 +5961,13 @@ ExpectedStmt ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitDeclStmt(DeclStmt *S) {
- auto Imp = importSeq(S->getDeclGroup(), S->getBeginLoc(), S->getEndLoc());
- if (!Imp)
- return Imp.takeError();
-
- DeclGroupRef ToDG;
- SourceLocation ToBeginLoc, ToEndLoc;
- std::tie(ToDG, ToBeginLoc, ToEndLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToDG = importChecked(Err, S->getDeclGroup());
+ auto ToBeginLoc = importChecked(Err, S->getBeginLoc());
+ auto ToEndLoc = importChecked(Err, S->getEndLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) DeclStmt(ToDG, ToBeginLoc, ToEndLoc);
}
@@ -5970,17 +5999,16 @@ ExpectedStmt ASTNodeImporter::VisitCompoundStmt(CompoundStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitCaseStmt(CaseStmt *S) {
- auto Imp = importSeq(
- S->getLHS(), S->getRHS(), S->getSubStmt(), S->getCaseLoc(),
- S->getEllipsisLoc(), S->getColonLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToLHS, *ToRHS;
- Stmt *ToSubStmt;
- SourceLocation ToCaseLoc, ToEllipsisLoc, ToColonLoc;
- std::tie(ToLHS, ToRHS, ToSubStmt, ToCaseLoc, ToEllipsisLoc, ToColonLoc) =
- *Imp;
+
+ Error Err = Error::success();
+ auto ToLHS = importChecked(Err, S->getLHS());
+ auto ToRHS = importChecked(Err, S->getRHS());
+ auto ToSubStmt = importChecked(Err, S->getSubStmt());
+ auto ToCaseLoc = importChecked(Err, S->getCaseLoc());
+ auto ToEllipsisLoc = importChecked(Err, S->getEllipsisLoc());
+ auto ToColonLoc = importChecked(Err, S->getColonLoc());
+ if (Err)
+ return std::move(Err);
auto *ToStmt = CaseStmt::Create(Importer.getToContext(), ToLHS, ToRHS,
ToCaseLoc, ToEllipsisLoc, ToColonLoc);
@@ -5990,27 +6018,26 @@ ExpectedStmt ASTNodeImporter::VisitCaseStmt(CaseStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitDefaultStmt(DefaultStmt *S) {
- auto Imp = importSeq(S->getDefaultLoc(), S->getColonLoc(), S->getSubStmt());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToDefaultLoc, ToColonLoc;
- Stmt *ToSubStmt;
- std::tie(ToDefaultLoc, ToColonLoc, ToSubStmt) = *Imp;
+ Error Err = Error::success();
+ auto ToDefaultLoc = importChecked(Err, S->getDefaultLoc());
+ auto ToColonLoc = importChecked(Err, S->getColonLoc());
+ auto ToSubStmt = importChecked(Err, S->getSubStmt());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) DefaultStmt(
ToDefaultLoc, ToColonLoc, ToSubStmt);
}
ExpectedStmt ASTNodeImporter::VisitLabelStmt(LabelStmt *S) {
- auto Imp = importSeq(S->getIdentLoc(), S->getDecl(), S->getSubStmt());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToIdentLoc;
- LabelDecl *ToLabelDecl;
- Stmt *ToSubStmt;
- std::tie(ToIdentLoc, ToLabelDecl, ToSubStmt) = *Imp;
+ Error Err = Error::success();
+ auto ToIdentLoc = importChecked(Err, S->getIdentLoc());
+ auto ToLabelDecl = importChecked(Err, S->getDecl());
+ auto ToSubStmt = importChecked(Err, S->getSubStmt());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) LabelStmt(
ToIdentLoc, ToLabelDecl, ToSubStmt);
@@ -6033,19 +6060,17 @@ ExpectedStmt ASTNodeImporter::VisitAttributedStmt(AttributedStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitIfStmt(IfStmt *S) {
- auto Imp = importSeq(
- S->getIfLoc(), S->getInit(), S->getConditionVariable(), S->getCond(),
- S->getThen(), S->getElseLoc(), S->getElse());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToIfLoc, ToElseLoc;
- Stmt *ToInit, *ToThen, *ToElse;
- VarDecl *ToConditionVariable;
- Expr *ToCond;
- std::tie(
- ToIfLoc, ToInit, ToConditionVariable, ToCond, ToThen, ToElseLoc, ToElse) =
- *Imp;
+
+ Error Err = Error::success();
+ auto ToIfLoc = importChecked(Err, S->getIfLoc());
+ auto ToInit = importChecked(Err, S->getInit());
+ auto ToConditionVariable = importChecked(Err, S->getConditionVariable());
+ auto ToCond = importChecked(Err, S->getCond());
+ auto ToThen = importChecked(Err, S->getThen());
+ auto ToElseLoc = importChecked(Err, S->getElseLoc());
+ auto ToElse = importChecked(Err, S->getElse());
+ if (Err)
+ return std::move(Err);
return IfStmt::Create(Importer.getToContext(), ToIfLoc, S->isConstexpr(),
ToInit, ToConditionVariable, ToCond, ToThen, ToElseLoc,
@@ -6053,17 +6078,15 @@ ExpectedStmt ASTNodeImporter::VisitIfStmt(IfStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitSwitchStmt(SwitchStmt *S) {
- auto Imp = importSeq(
- S->getInit(), S->getConditionVariable(), S->getCond(),
- S->getBody(), S->getSwitchLoc());
- if (!Imp)
- return Imp.takeError();
-
- Stmt *ToInit, *ToBody;
- VarDecl *ToConditionVariable;
- Expr *ToCond;
- SourceLocation ToSwitchLoc;
- std::tie(ToInit, ToConditionVariable, ToCond, ToBody, ToSwitchLoc) = *Imp;
+
+ Error Err = Error::success();
+ auto ToInit = importChecked(Err, S->getInit());
+ auto ToConditionVariable = importChecked(Err, S->getConditionVariable());
+ auto ToCond = importChecked(Err, S->getCond());
+ auto ToBody = importChecked(Err, S->getBody());
+ auto ToSwitchLoc = importChecked(Err, S->getSwitchLoc());
+ if (Err)
+ return std::move(Err);
auto *ToStmt = SwitchStmt::Create(Importer.getToContext(), ToInit,
ToConditionVariable, ToCond);
@@ -6088,52 +6111,49 @@ ExpectedStmt ASTNodeImporter::VisitSwitchStmt(SwitchStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitWhileStmt(WhileStmt *S) {
- auto Imp = importSeq(
- S->getConditionVariable(), S->getCond(), S->getBody(), S->getWhileLoc());
- if (!Imp)
- return Imp.takeError();
- VarDecl *ToConditionVariable;
- Expr *ToCond;
- Stmt *ToBody;
- SourceLocation ToWhileLoc;
- std::tie(ToConditionVariable, ToCond, ToBody, ToWhileLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToConditionVariable = importChecked(Err, S->getConditionVariable());
+ auto ToCond = importChecked(Err, S->getCond());
+ auto ToBody = importChecked(Err, S->getBody());
+ auto ToWhileLoc = importChecked(Err, S->getWhileLoc());
+ auto ToLParenLoc = importChecked(Err, S->getLParenLoc());
+ auto ToRParenLoc = importChecked(Err, S->getRParenLoc());
+ if (Err)
+ return std::move(Err);
return WhileStmt::Create(Importer.getToContext(), ToConditionVariable, ToCond,
- ToBody, ToWhileLoc);
+ ToBody, ToWhileLoc, ToLParenLoc, ToRParenLoc);
}
ExpectedStmt ASTNodeImporter::VisitDoStmt(DoStmt *S) {
- auto Imp = importSeq(
- S->getBody(), S->getCond(), S->getDoLoc(), S->getWhileLoc(),
- S->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
- Stmt *ToBody;
- Expr *ToCond;
- SourceLocation ToDoLoc, ToWhileLoc, ToRParenLoc;
- std::tie(ToBody, ToCond, ToDoLoc, ToWhileLoc, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToBody = importChecked(Err, S->getBody());
+ auto ToCond = importChecked(Err, S->getCond());
+ auto ToDoLoc = importChecked(Err, S->getDoLoc());
+ auto ToWhileLoc = importChecked(Err, S->getWhileLoc());
+ auto ToRParenLoc = importChecked(Err, S->getRParenLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) DoStmt(
ToBody, ToCond, ToDoLoc, ToWhileLoc, ToRParenLoc);
}
ExpectedStmt ASTNodeImporter::VisitForStmt(ForStmt *S) {
- auto Imp = importSeq(
- S->getInit(), S->getCond(), S->getConditionVariable(), S->getInc(),
- S->getBody(), S->getForLoc(), S->getLParenLoc(), S->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
-
- Stmt *ToInit;
- Expr *ToCond, *ToInc;
- VarDecl *ToConditionVariable;
- Stmt *ToBody;
- SourceLocation ToForLoc, ToLParenLoc, ToRParenLoc;
- std::tie(
- ToInit, ToCond, ToConditionVariable, ToInc, ToBody, ToForLoc,
- ToLParenLoc, ToRParenLoc) = *Imp;
+
+ Error Err = Error::success();
+ auto ToInit = importChecked(Err, S->getInit());
+ auto ToCond = importChecked(Err, S->getCond());
+ auto ToConditionVariable = importChecked(Err, S->getConditionVariable());
+ auto ToInc = importChecked(Err, S->getInc());
+ auto ToBody = importChecked(Err, S->getBody());
+ auto ToForLoc = importChecked(Err, S->getForLoc());
+ auto ToLParenLoc = importChecked(Err, S->getLParenLoc());
+ auto ToRParenLoc = importChecked(Err, S->getRParenLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ForStmt(
Importer.getToContext(),
@@ -6142,26 +6162,26 @@ ExpectedStmt ASTNodeImporter::VisitForStmt(ForStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitGotoStmt(GotoStmt *S) {
- auto Imp = importSeq(S->getLabel(), S->getGotoLoc(), S->getLabelLoc());
- if (!Imp)
- return Imp.takeError();
- LabelDecl *ToLabel;
- SourceLocation ToGotoLoc, ToLabelLoc;
- std::tie(ToLabel, ToGotoLoc, ToLabelLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToLabel = importChecked(Err, S->getLabel());
+ auto ToGotoLoc = importChecked(Err, S->getGotoLoc());
+ auto ToLabelLoc = importChecked(Err, S->getLabelLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) GotoStmt(
ToLabel, ToGotoLoc, ToLabelLoc);
}
ExpectedStmt ASTNodeImporter::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
- auto Imp = importSeq(S->getGotoLoc(), S->getStarLoc(), S->getTarget());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToGotoLoc, ToStarLoc;
- Expr *ToTarget;
- std::tie(ToGotoLoc, ToStarLoc, ToTarget) = *Imp;
+ Error Err = Error::success();
+ auto ToGotoLoc = importChecked(Err, S->getGotoLoc());
+ auto ToStarLoc = importChecked(Err, S->getStarLoc());
+ auto ToTarget = importChecked(Err, S->getTarget());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) IndirectGotoStmt(
ToGotoLoc, ToStarLoc, ToTarget);
@@ -6182,30 +6202,26 @@ ExpectedStmt ASTNodeImporter::VisitBreakStmt(BreakStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitReturnStmt(ReturnStmt *S) {
- auto Imp = importSeq(
- S->getReturnLoc(), S->getRetValue(), S->getNRVOCandidate());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToReturnLoc;
- Expr *ToRetValue;
- const VarDecl *ToNRVOCandidate;
- std::tie(ToReturnLoc, ToRetValue, ToNRVOCandidate) = *Imp;
+ Error Err = Error::success();
+ auto ToReturnLoc = importChecked(Err, S->getReturnLoc());
+ auto ToRetValue = importChecked(Err, S->getRetValue());
+ auto ToNRVOCandidate = importChecked(Err, S->getNRVOCandidate());
+ if (Err)
+ return std::move(Err);
return ReturnStmt::Create(Importer.getToContext(), ToReturnLoc, ToRetValue,
ToNRVOCandidate);
}
ExpectedStmt ASTNodeImporter::VisitCXXCatchStmt(CXXCatchStmt *S) {
- auto Imp = importSeq(
- S->getCatchLoc(), S->getExceptionDecl(), S->getHandlerBlock());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToCatchLoc;
- VarDecl *ToExceptionDecl;
- Stmt *ToHandlerBlock;
- std::tie(ToCatchLoc, ToExceptionDecl, ToHandlerBlock) = *Imp;
+ Error Err = Error::success();
+ auto ToCatchLoc = importChecked(Err, S->getCatchLoc());
+ auto ToExceptionDecl = importChecked(Err, S->getExceptionDecl());
+ auto ToHandlerBlock = importChecked(Err, S->getHandlerBlock());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) CXXCatchStmt (
ToCatchLoc, ToExceptionDecl, ToHandlerBlock);
@@ -6234,24 +6250,22 @@ ExpectedStmt ASTNodeImporter::VisitCXXTryStmt(CXXTryStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
- auto Imp1 = importSeq(
- S->getInit(), S->getRangeStmt(), S->getBeginStmt(), S->getEndStmt(),
- S->getCond(), S->getInc(), S->getLoopVarStmt(), S->getBody());
- if (!Imp1)
- return Imp1.takeError();
- auto Imp2 = importSeq(
- S->getForLoc(), S->getCoawaitLoc(), S->getColonLoc(), S->getRParenLoc());
- if (!Imp2)
- return Imp2.takeError();
-
- DeclStmt *ToRangeStmt, *ToBeginStmt, *ToEndStmt, *ToLoopVarStmt;
- Expr *ToCond, *ToInc;
- Stmt *ToInit, *ToBody;
- std::tie(
- ToInit, ToRangeStmt, ToBeginStmt, ToEndStmt, ToCond, ToInc, ToLoopVarStmt,
- ToBody) = *Imp1;
- SourceLocation ToForLoc, ToCoawaitLoc, ToColonLoc, ToRParenLoc;
- std::tie(ToForLoc, ToCoawaitLoc, ToColonLoc, ToRParenLoc) = *Imp2;
+
+ Error Err = Error::success();
+ auto ToInit = importChecked(Err, S->getInit());
+ auto ToRangeStmt = importChecked(Err, S->getRangeStmt());
+ auto ToBeginStmt = importChecked(Err, S->getBeginStmt());
+ auto ToEndStmt = importChecked(Err, S->getEndStmt());
+ auto ToCond = importChecked(Err, S->getCond());
+ auto ToInc = importChecked(Err, S->getInc());
+ auto ToLoopVarStmt = importChecked(Err, S->getLoopVarStmt());
+ auto ToBody = importChecked(Err, S->getBody());
+ auto ToForLoc = importChecked(Err, S->getForLoc());
+ auto ToCoawaitLoc = importChecked(Err, S->getCoawaitLoc());
+ auto ToColonLoc = importChecked(Err, S->getColonLoc());
+ auto ToRParenLoc = importChecked(Err, S->getRParenLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) CXXForRangeStmt(
ToInit, ToRangeStmt, ToBeginStmt, ToEndStmt, ToCond, ToInc, ToLoopVarStmt,
@@ -6260,16 +6274,14 @@ ExpectedStmt ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
ExpectedStmt
ASTNodeImporter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
- auto Imp = importSeq(
- S->getElement(), S->getCollection(), S->getBody(),
- S->getForLoc(), S->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
-
- Stmt *ToElement, *ToBody;
- Expr *ToCollection;
- SourceLocation ToForLoc, ToRParenLoc;
- std::tie(ToElement, ToCollection, ToBody, ToForLoc, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToElement = importChecked(Err, S->getElement());
+ auto ToCollection = importChecked(Err, S->getCollection());
+ auto ToBody = importChecked(Err, S->getBody());
+ auto ToForLoc = importChecked(Err, S->getForLoc());
+ auto ToRParenLoc = importChecked(Err, S->getRParenLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ObjCForCollectionStmt(ToElement,
ToCollection,
@@ -6279,16 +6291,14 @@ ASTNodeImporter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
- auto Imp = importSeq(
- S->getAtCatchLoc(), S->getRParenLoc(), S->getCatchParamDecl(),
- S->getCatchBody());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToAtCatchLoc, ToRParenLoc;
- VarDecl *ToCatchParamDecl;
- Stmt *ToCatchBody;
- std::tie(ToAtCatchLoc, ToRParenLoc, ToCatchParamDecl, ToCatchBody) = *Imp;
+ Error Err = Error::success();
+ auto ToAtCatchLoc = importChecked(Err, S->getAtCatchLoc());
+ auto ToRParenLoc = importChecked(Err, S->getRParenLoc());
+ auto ToCatchParamDecl = importChecked(Err, S->getCatchParamDecl());
+ auto ToCatchBody = importChecked(Err, S->getCatchBody());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ObjCAtCatchStmt (
ToAtCatchLoc, ToRParenLoc, ToCatchParamDecl, ToCatchBody);
@@ -6306,14 +6316,13 @@ ExpectedStmt ASTNodeImporter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
- auto Imp = importSeq(
- S->getAtTryLoc(), S->getTryBody(), S->getFinallyStmt());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToAtTryLoc;
- Stmt *ToTryBody, *ToFinallyStmt;
- std::tie(ToAtTryLoc, ToTryBody, ToFinallyStmt) = *Imp;
+ Error Err = Error::success();
+ auto ToAtTryLoc = importChecked(Err, S->getAtTryLoc());
+ auto ToTryBody = importChecked(Err, S->getTryBody());
+ auto ToFinallyStmt = importChecked(Err, S->getFinallyStmt());
+ if (Err)
+ return std::move(Err);
SmallVector<Stmt *, 1> ToCatchStmts(S->getNumCatchStmts());
for (unsigned CI = 0, CE = S->getNumCatchStmts(); CI != CE; ++CI) {
@@ -6330,17 +6339,15 @@ ExpectedStmt ASTNodeImporter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
ToFinallyStmt);
}
-ExpectedStmt ASTNodeImporter::VisitObjCAtSynchronizedStmt
- (ObjCAtSynchronizedStmt *S) {
- auto Imp = importSeq(
- S->getAtSynchronizedLoc(), S->getSynchExpr(), S->getSynchBody());
- if (!Imp)
- return Imp.takeError();
+ExpectedStmt
+ASTNodeImporter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
- SourceLocation ToAtSynchronizedLoc;
- Expr *ToSynchExpr;
- Stmt *ToSynchBody;
- std::tie(ToAtSynchronizedLoc, ToSynchExpr, ToSynchBody) = *Imp;
+ Error Err = Error::success();
+ auto ToAtSynchronizedLoc = importChecked(Err, S->getAtSynchronizedLoc());
+ auto ToSynchExpr = importChecked(Err, S->getSynchExpr());
+ auto ToSynchBody = importChecked(Err, S->getSynchBody());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ObjCAtSynchronizedStmt(
ToAtSynchronizedLoc, ToSynchExpr, ToSynchBody);
@@ -6379,18 +6386,15 @@ ExpectedStmt ASTNodeImporter::VisitExpr(Expr *E) {
}
ExpectedStmt ASTNodeImporter::VisitVAArgExpr(VAArgExpr *E) {
- auto Imp = importSeq(
- E->getBuiltinLoc(), E->getSubExpr(), E->getWrittenTypeInfo(),
- E->getRParenLoc(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToBuiltinLoc, ToRParenLoc;
- Expr *ToSubExpr;
- TypeSourceInfo *ToWrittenTypeInfo;
- QualType ToType;
- std::tie(ToBuiltinLoc, ToSubExpr, ToWrittenTypeInfo, ToRParenLoc, ToType) =
- *Imp;
+
+ Error Err = Error::success();
+ auto ToBuiltinLoc = importChecked(Err, E->getBuiltinLoc());
+ auto ToSubExpr = importChecked(Err, E->getSubExpr());
+ auto ToWrittenTypeInfo = importChecked(Err, E->getWrittenTypeInfo());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) VAArgExpr(
ToBuiltinLoc, ToSubExpr, ToWrittenTypeInfo, ToRParenLoc, ToType,
@@ -6398,31 +6402,27 @@ ExpectedStmt ASTNodeImporter::VisitVAArgExpr(VAArgExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitChooseExpr(ChooseExpr *E) {
- auto Imp = importSeq(E->getCond(), E->getLHS(), E->getRHS(),
- E->getBuiltinLoc(), E->getRParenLoc(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToCond;
- Expr *ToLHS;
- Expr *ToRHS;
- SourceLocation ToBuiltinLoc, ToRParenLoc;
- QualType ToType;
- std::tie(ToCond, ToLHS, ToRHS, ToBuiltinLoc, ToRParenLoc, ToType) = *Imp;
+
+ Error Err = Error::success();
+ auto ToCond = importChecked(Err, E->getCond());
+ auto ToLHS = importChecked(Err, E->getLHS());
+ auto ToRHS = importChecked(Err, E->getRHS());
+ auto ToBuiltinLoc = importChecked(Err, E->getBuiltinLoc());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
ExprValueKind VK = E->getValueKind();
ExprObjectKind OK = E->getObjectKind();
- bool TypeDependent = ToCond->isTypeDependent();
- bool ValueDependent = ToCond->isValueDependent();
-
// The value of CondIsTrue only matters if the value is not
// condition-dependent.
bool CondIsTrue = !E->isConditionDependent() && E->isConditionTrue();
return new (Importer.getToContext())
ChooseExpr(ToBuiltinLoc, ToCond, ToLHS, ToRHS, ToType, VK, OK,
- ToRParenLoc, CondIsTrue, TypeDependent, ValueDependent);
+ ToRParenLoc, CondIsTrue);
}
ExpectedStmt ASTNodeImporter::VisitGNUNullExpr(GNUNullExpr *E) {
@@ -6438,33 +6438,28 @@ ExpectedStmt ASTNodeImporter::VisitGNUNullExpr(GNUNullExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitPredefinedExpr(PredefinedExpr *E) {
- auto Imp = importSeq(
- E->getBeginLoc(), E->getType(), E->getFunctionName());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToBeginLoc;
- QualType ToType;
- StringLiteral *ToFunctionName;
- std::tie(ToBeginLoc, ToType, ToFunctionName) = *Imp;
+ Error Err = Error::success();
+ auto ToBeginLoc = importChecked(Err, E->getBeginLoc());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToFunctionName = importChecked(Err, E->getFunctionName());
+ if (Err)
+ return std::move(Err);
return PredefinedExpr::Create(Importer.getToContext(), ToBeginLoc, ToType,
E->getIdentKind(), ToFunctionName);
}
ExpectedStmt ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
- auto Imp = importSeq(
- E->getQualifierLoc(), E->getTemplateKeywordLoc(), E->getDecl(),
- E->getLocation(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- NestedNameSpecifierLoc ToQualifierLoc;
- SourceLocation ToTemplateKeywordLoc, ToLocation;
- ValueDecl *ToDecl;
- QualType ToType;
- std::tie(ToQualifierLoc, ToTemplateKeywordLoc, ToDecl, ToLocation, ToType) =
- *Imp;
+
+ Error Err = Error::success();
+ auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc());
+ auto ToTemplateKeywordLoc = importChecked(Err, E->getTemplateKeywordLoc());
+ auto ToDecl = importChecked(Err, E->getDecl());
+ auto ToLocation = importChecked(Err, E->getLocation());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
NamedDecl *ToFoundD = nullptr;
if (E->getDecl() != E->getFoundDecl()) {
@@ -6584,6 +6579,20 @@ ExpectedStmt ASTNodeImporter::VisitImaginaryLiteral(ImaginaryLiteral *E) {
*ToSubExprOrErr, *ToTypeOrErr);
}
+ExpectedStmt ASTNodeImporter::VisitFixedPointLiteral(FixedPointLiteral *E) {
+ auto ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+
+ ExpectedSLoc ToLocationOrErr = import(E->getLocation());
+ if (!ToLocationOrErr)
+ return ToLocationOrErr.takeError();
+
+ return new (Importer.getToContext()) FixedPointLiteral(
+ Importer.getToContext(), E->getValue(), *ToTypeOrErr, *ToLocationOrErr,
+ Importer.getToContext().getFixedPointScale(*ToTypeOrErr));
+}
+
ExpectedStmt ASTNodeImporter::VisitCharacterLiteral(CharacterLiteral *E) {
ExpectedType ToTypeOrErr = import(E->getType());
if (!ToTypeOrErr)
@@ -6613,17 +6622,14 @@ ExpectedStmt ASTNodeImporter::VisitStringLiteral(StringLiteral *E) {
}
ExpectedStmt ASTNodeImporter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
- auto Imp = importSeq(
- E->getLParenLoc(), E->getTypeSourceInfo(), E->getType(),
- E->getInitializer());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToLParenLoc;
- TypeSourceInfo *ToTypeSourceInfo;
- QualType ToType;
- Expr *ToInitializer;
- std::tie(ToLParenLoc, ToTypeSourceInfo, ToType, ToInitializer) = *Imp;
+
+ Error Err = Error::success();
+ auto ToLParenLoc = importChecked(Err, E->getLParenLoc());
+ auto ToTypeSourceInfo = importChecked(Err, E->getTypeSourceInfo());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToInitializer = importChecked(Err, E->getInitializer());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) CompoundLiteralExpr(
ToLParenLoc, ToTypeSourceInfo, ToType, E->getValueKind(),
@@ -6631,14 +6637,13 @@ ExpectedStmt ASTNodeImporter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitAtomicExpr(AtomicExpr *E) {
- auto Imp = importSeq(
- E->getBuiltinLoc(), E->getType(), E->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToBuiltinLoc, ToRParenLoc;
- QualType ToType;
- std::tie(ToBuiltinLoc, ToType, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToBuiltinLoc = importChecked(Err, E->getBuiltinLoc());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ if (Err)
+ return std::move(Err);
SmallVector<Expr *, 6> ToExprs(E->getNumSubExprs());
if (Error Err = ImportArrayChecked(
@@ -6647,33 +6652,30 @@ ExpectedStmt ASTNodeImporter::VisitAtomicExpr(AtomicExpr *E) {
return std::move(Err);
return new (Importer.getToContext()) AtomicExpr(
+
ToBuiltinLoc, ToExprs, ToType, E->getOp(), ToRParenLoc);
}
ExpectedStmt ASTNodeImporter::VisitAddrLabelExpr(AddrLabelExpr *E) {
- auto Imp = importSeq(
- E->getAmpAmpLoc(), E->getLabelLoc(), E->getLabel(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToAmpAmpLoc, ToLabelLoc;
- LabelDecl *ToLabel;
- QualType ToType;
- std::tie(ToAmpAmpLoc, ToLabelLoc, ToLabel, ToType) = *Imp;
+ Error Err = Error::success();
+ auto ToAmpAmpLoc = importChecked(Err, E->getAmpAmpLoc());
+ auto ToLabelLoc = importChecked(Err, E->getLabelLoc());
+ auto ToLabel = importChecked(Err, E->getLabel());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) AddrLabelExpr(
ToAmpAmpLoc, ToLabelLoc, ToLabel, ToType);
}
-
ExpectedStmt ASTNodeImporter::VisitConstantExpr(ConstantExpr *E) {
- auto Imp = importSeq(E->getSubExpr());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToSubExpr;
- std::tie(ToSubExpr) = *Imp;
+ Error Err = Error::success();
+ auto ToSubExpr = importChecked(Err, E->getSubExpr());
+ if (Err)
+ return std::move(Err);
// TODO : Handle APValue::ValueKind that require importing.
+
APValue::ValueKind Kind = E->getResultAPValueKind();
if (Kind == APValue::Int || Kind == APValue::Float ||
Kind == APValue::FixedPoint || Kind == APValue::ComplexFloat ||
@@ -6682,15 +6684,13 @@ ExpectedStmt ASTNodeImporter::VisitConstantExpr(ConstantExpr *E) {
E->getAPValueResult());
return ConstantExpr::Create(Importer.getToContext(), ToSubExpr);
}
-
ExpectedStmt ASTNodeImporter::VisitParenExpr(ParenExpr *E) {
- auto Imp = importSeq(E->getLParen(), E->getRParen(), E->getSubExpr());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToLParen, ToRParen;
- Expr *ToSubExpr;
- std::tie(ToLParen, ToRParen, ToSubExpr) = *Imp;
+ Error Err = Error::success();
+ auto ToLParen = importChecked(Err, E->getLParen());
+ auto ToRParen = importChecked(Err, E->getRParen());
+ auto ToSubExpr = importChecked(Err, E->getSubExpr());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext())
ParenExpr(ToLParen, ToRParen, ToSubExpr);
@@ -6714,15 +6714,13 @@ ExpectedStmt ASTNodeImporter::VisitParenListExpr(ParenListExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitStmtExpr(StmtExpr *E) {
- auto Imp = importSeq(
- E->getSubStmt(), E->getType(), E->getLParenLoc(), E->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
-
- CompoundStmt *ToSubStmt;
- QualType ToType;
- SourceLocation ToLParenLoc, ToRParenLoc;
- std::tie(ToSubStmt, ToType, ToLParenLoc, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToSubStmt = importChecked(Err, E->getSubStmt());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToLParenLoc = importChecked(Err, E->getLParenLoc());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext())
StmtExpr(ToSubStmt, ToType, ToLParenLoc, ToRParenLoc,
@@ -6730,30 +6728,28 @@ ExpectedStmt ASTNodeImporter::VisitStmtExpr(StmtExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitUnaryOperator(UnaryOperator *E) {
- auto Imp = importSeq(
- E->getSubExpr(), E->getType(), E->getOperatorLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToSubExpr;
- QualType ToType;
- SourceLocation ToOperatorLoc;
- std::tie(ToSubExpr, ToType, ToOperatorLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToSubExpr = importChecked(Err, E->getSubExpr());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ if (Err)
+ return std::move(Err);
- return new (Importer.getToContext()) UnaryOperator(
- ToSubExpr, E->getOpcode(), ToType, E->getValueKind(), E->getObjectKind(),
- ToOperatorLoc, E->canOverflow());
+ return UnaryOperator::Create(
+ Importer.getToContext(), ToSubExpr, E->getOpcode(), ToType,
+ E->getValueKind(), E->getObjectKind(), ToOperatorLoc, E->canOverflow(),
+ E->getFPOptionsOverride());
}
ExpectedStmt
-ASTNodeImporter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
- auto Imp = importSeq(E->getType(), E->getOperatorLoc(), E->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
- QualType ToType;
- SourceLocation ToOperatorLoc, ToRParenLoc;
- std::tie(ToType, ToOperatorLoc, ToRParenLoc) = *Imp;
+ASTNodeImporter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ if (Err)
+ return std::move(Err);
if (E->isArgumentType()) {
Expected<TypeSourceInfo *> ToArgumentTypeInfoOrErr =
@@ -6775,53 +6771,49 @@ ASTNodeImporter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) {
- auto Imp = importSeq(
- E->getLHS(), E->getRHS(), E->getType(), E->getOperatorLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToLHS, *ToRHS;
- QualType ToType;
- SourceLocation ToOperatorLoc;
- std::tie(ToLHS, ToRHS, ToType, ToOperatorLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToLHS = importChecked(Err, E->getLHS());
+ auto ToRHS = importChecked(Err, E->getRHS());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ if (Err)
+ return std::move(Err);
- return new (Importer.getToContext()) BinaryOperator(
- ToLHS, ToRHS, E->getOpcode(), ToType, E->getValueKind(),
- E->getObjectKind(), ToOperatorLoc, E->getFPFeatures());
+ return BinaryOperator::Create(
+ Importer.getToContext(), ToLHS, ToRHS, E->getOpcode(), ToType,
+ E->getValueKind(), E->getObjectKind(), ToOperatorLoc,
+ E->getFPFeatures(Importer.getFromContext().getLangOpts()));
}
ExpectedStmt ASTNodeImporter::VisitConditionalOperator(ConditionalOperator *E) {
- auto Imp = importSeq(
- E->getCond(), E->getQuestionLoc(), E->getLHS(), E->getColonLoc(),
- E->getRHS(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToCond, *ToLHS, *ToRHS;
- SourceLocation ToQuestionLoc, ToColonLoc;
- QualType ToType;
- std::tie(ToCond, ToQuestionLoc, ToLHS, ToColonLoc, ToRHS, ToType) = *Imp;
+ Error Err = Error::success();
+ auto ToCond = importChecked(Err, E->getCond());
+ auto ToQuestionLoc = importChecked(Err, E->getQuestionLoc());
+ auto ToLHS = importChecked(Err, E->getLHS());
+ auto ToColonLoc = importChecked(Err, E->getColonLoc());
+ auto ToRHS = importChecked(Err, E->getRHS());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ConditionalOperator(
ToCond, ToQuestionLoc, ToLHS, ToColonLoc, ToRHS, ToType,
E->getValueKind(), E->getObjectKind());
}
-ExpectedStmt ASTNodeImporter::VisitBinaryConditionalOperator(
- BinaryConditionalOperator *E) {
- auto Imp = importSeq(
- E->getCommon(), E->getOpaqueValue(), E->getCond(), E->getTrueExpr(),
- E->getFalseExpr(), E->getQuestionLoc(), E->getColonLoc(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToCommon, *ToCond, *ToTrueExpr, *ToFalseExpr;
- OpaqueValueExpr *ToOpaqueValue;
- SourceLocation ToQuestionLoc, ToColonLoc;
- QualType ToType;
- std::tie(
- ToCommon, ToOpaqueValue, ToCond, ToTrueExpr, ToFalseExpr, ToQuestionLoc,
- ToColonLoc, ToType) = *Imp;
+ExpectedStmt
+ASTNodeImporter::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
+ Error Err = Error::success();
+ auto ToCommon = importChecked(Err, E->getCommon());
+ auto ToOpaqueValue = importChecked(Err, E->getOpaqueValue());
+ auto ToCond = importChecked(Err, E->getCond());
+ auto ToTrueExpr = importChecked(Err, E->getTrueExpr());
+ auto ToFalseExpr = importChecked(Err, E->getFalseExpr());
+ auto ToQuestionLoc = importChecked(Err, E->getQuestionLoc());
+ auto ToColonLoc = importChecked(Err, E->getColonLoc());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) BinaryConditionalOperator(
ToCommon, ToOpaqueValue, ToCond, ToTrueExpr, ToFalseExpr,
@@ -6830,19 +6822,15 @@ ExpectedStmt ASTNodeImporter::VisitBinaryConditionalOperator(
}
ExpectedStmt ASTNodeImporter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
- auto Imp = importSeq(
- E->getBeginLoc(), E->getQueriedTypeSourceInfo(),
- E->getDimensionExpression(), E->getEndLoc(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToBeginLoc, ToEndLoc;
- TypeSourceInfo *ToQueriedTypeSourceInfo;
- Expr *ToDimensionExpression;
- QualType ToType;
- std::tie(
- ToBeginLoc, ToQueriedTypeSourceInfo, ToDimensionExpression, ToEndLoc,
- ToType) = *Imp;
+ Error Err = Error::success();
+ auto ToBeginLoc = importChecked(Err, E->getBeginLoc());
+ auto ToQueriedTypeSourceInfo =
+ importChecked(Err, E->getQueriedTypeSourceInfo());
+ auto ToDimensionExpression = importChecked(Err, E->getDimensionExpression());
+ auto ToEndLoc = importChecked(Err, E->getEndLoc());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ArrayTypeTraitExpr(
ToBeginLoc, E->getTrait(), ToQueriedTypeSourceInfo, E->getValue(),
@@ -6850,15 +6838,13 @@ ExpectedStmt ASTNodeImporter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
- auto Imp = importSeq(
- E->getBeginLoc(), E->getQueriedExpression(), E->getEndLoc(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToBeginLoc, ToEndLoc;
- Expr *ToQueriedExpression;
- QualType ToType;
- std::tie(ToBeginLoc, ToQueriedExpression, ToEndLoc, ToType) = *Imp;
+ Error Err = Error::success();
+ auto ToBeginLoc = importChecked(Err, E->getBeginLoc());
+ auto ToQueriedExpression = importChecked(Err, E->getQueriedExpression());
+ auto ToEndLoc = importChecked(Err, E->getEndLoc());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ExpressionTraitExpr(
ToBeginLoc, E->getTrait(), ToQueriedExpression, E->getValue(),
@@ -6866,30 +6852,25 @@ ExpectedStmt ASTNodeImporter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
- auto Imp = importSeq(
- E->getLocation(), E->getType(), E->getSourceExpr());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToLocation;
- QualType ToType;
- Expr *ToSourceExpr;
- std::tie(ToLocation, ToType, ToSourceExpr) = *Imp;
+ Error Err = Error::success();
+ auto ToLocation = importChecked(Err, E->getLocation());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToSourceExpr = importChecked(Err, E->getSourceExpr());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) OpaqueValueExpr(
ToLocation, ToType, E->getValueKind(), E->getObjectKind(), ToSourceExpr);
}
ExpectedStmt ASTNodeImporter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
- auto Imp = importSeq(
- E->getLHS(), E->getRHS(), E->getType(), E->getRBracketLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToLHS, *ToRHS;
- SourceLocation ToRBracketLoc;
- QualType ToType;
- std::tie(ToLHS, ToRHS, ToType, ToRBracketLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToLHS = importChecked(Err, E->getLHS());
+ auto ToRHS = importChecked(Err, E->getRHS());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToRBracketLoc = importChecked(Err, E->getRBracketLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ArraySubscriptExpr(
ToLHS, ToRHS, ToType, E->getValueKind(), E->getObjectKind(),
@@ -6898,22 +6879,22 @@ ExpectedStmt ASTNodeImporter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
ExpectedStmt
ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
- auto Imp = importSeq(
- E->getLHS(), E->getRHS(), E->getType(), E->getComputationLHSType(),
- E->getComputationResultType(), E->getOperatorLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToLHS, *ToRHS;
- QualType ToType, ToComputationLHSType, ToComputationResultType;
- SourceLocation ToOperatorLoc;
- std::tie(ToLHS, ToRHS, ToType, ToComputationLHSType, ToComputationResultType,
- ToOperatorLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToLHS = importChecked(Err, E->getLHS());
+ auto ToRHS = importChecked(Err, E->getRHS());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToComputationLHSType = importChecked(Err, E->getComputationLHSType());
+ auto ToComputationResultType =
+ importChecked(Err, E->getComputationResultType());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ if (Err)
+ return std::move(Err);
- return new (Importer.getToContext()) CompoundAssignOperator(
- ToLHS, ToRHS, E->getOpcode(), ToType, E->getValueKind(),
- E->getObjectKind(), ToComputationLHSType, ToComputationResultType,
- ToOperatorLoc, E->getFPFeatures());
+ return CompoundAssignOperator::Create(
+ Importer.getToContext(), ToLHS, ToRHS, E->getOpcode(), ToType,
+ E->getValueKind(), E->getObjectKind(), ToOperatorLoc,
+ E->getFPFeatures(Importer.getFromContext().getLangOpts()),
+ ToComputationLHSType, ToComputationResultType);
}
Expected<CXXCastPath>
@@ -6947,15 +6928,12 @@ ExpectedStmt ASTNodeImporter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
- auto Imp1 = importSeq(
- E->getType(), E->getSubExpr(), E->getTypeInfoAsWritten());
- if (!Imp1)
- return Imp1.takeError();
-
- QualType ToType;
- Expr *ToSubExpr;
- TypeSourceInfo *ToTypeInfoAsWritten;
- std::tie(ToType, ToSubExpr, ToTypeInfoAsWritten) = *Imp1;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToSubExpr = importChecked(Err, E->getSubExpr());
+ auto ToTypeInfoAsWritten = importChecked(Err, E->getTypeInfoAsWritten());
+ if (Err)
+ return std::move(Err);
Expected<CXXCastPath> ToBasePathOrErr = ImportCastPath(E);
if (!ToBasePathOrErr)
@@ -7015,11 +6993,13 @@ ExpectedStmt ASTNodeImporter::VisitOffsetOfExpr(OffsetOfExpr *E) {
const OffsetOfNode &FromNode = E->getComponent(I);
SourceLocation ToBeginLoc, ToEndLoc;
+
if (FromNode.getKind() != OffsetOfNode::Base) {
- auto Imp = importSeq(FromNode.getBeginLoc(), FromNode.getEndLoc());
- if (!Imp)
- return Imp.takeError();
- std::tie(ToBeginLoc, ToEndLoc) = *Imp;
+ Error Err = Error::success();
+ ToBeginLoc = importChecked(Err, FromNode.getBeginLoc());
+ ToEndLoc = importChecked(Err, FromNode.getEndLoc());
+ if (Err)
+ return std::move(Err);
}
switch (FromNode.getKind()) {
@@ -7057,16 +7037,13 @@ ExpectedStmt ASTNodeImporter::VisitOffsetOfExpr(OffsetOfExpr *E) {
ToExprs[I] = *ToIndexExprOrErr;
}
- auto Imp = importSeq(
- E->getType(), E->getTypeSourceInfo(), E->getOperatorLoc(),
- E->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- TypeSourceInfo *ToTypeSourceInfo;
- SourceLocation ToOperatorLoc, ToRParenLoc;
- std::tie(ToType, ToTypeSourceInfo, ToOperatorLoc, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToTypeSourceInfo = importChecked(Err, E->getTypeSourceInfo());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ if (Err)
+ return std::move(Err);
return OffsetOfExpr::Create(
Importer.getToContext(), ToType, ToOperatorLoc, ToTypeSourceInfo, ToNodes,
@@ -7074,15 +7051,13 @@ ExpectedStmt ASTNodeImporter::VisitOffsetOfExpr(OffsetOfExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getOperand(), E->getBeginLoc(), E->getEndLoc());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- Expr *ToOperand;
- SourceLocation ToBeginLoc, ToEndLoc;
- std::tie(ToType, ToOperand, ToBeginLoc, ToEndLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToOperand = importChecked(Err, E->getOperand());
+ auto ToBeginLoc = importChecked(Err, E->getBeginLoc());
+ auto ToEndLoc = importChecked(Err, E->getEndLoc());
+ if (Err)
+ return std::move(Err);
CanThrowResult ToCanThrow;
if (E->isValueDependent())
@@ -7095,14 +7070,12 @@ ExpectedStmt ASTNodeImporter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitCXXThrowExpr(CXXThrowExpr *E) {
- auto Imp = importSeq(E->getSubExpr(), E->getType(), E->getThrowLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToSubExpr;
- QualType ToType;
- SourceLocation ToThrowLoc;
- std::tie(ToSubExpr, ToType, ToThrowLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToSubExpr = importChecked(Err, E->getSubExpr());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToThrowLoc = importChecked(Err, E->getThrowLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) CXXThrowExpr(
ToSubExpr, ToType, ToThrowLoc, E->isThrownVariableInScope());
@@ -7142,15 +7115,12 @@ ExpectedStmt ASTNodeImporter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
ExpectedStmt
ASTNodeImporter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getTypeSourceInfo(), E->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- TypeSourceInfo *ToTypeSourceInfo;
- SourceLocation ToRParenLoc;
- std::tie(ToType, ToTypeSourceInfo, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToTypeSourceInfo = importChecked(Err, E->getTypeSourceInfo());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) CXXScalarValueInitExpr(
ToType, ToTypeSourceInfo, ToRParenLoc);
@@ -7172,18 +7142,15 @@ ASTNodeImporter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
}
ExpectedStmt
+
ASTNodeImporter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
- auto Imp = importSeq(
- E->getConstructor(), E->getType(), E->getTypeSourceInfo(),
- E->getParenOrBraceRange());
- if (!Imp)
- return Imp.takeError();
-
- CXXConstructorDecl *ToConstructor;
- QualType ToType;
- TypeSourceInfo *ToTypeSourceInfo;
- SourceRange ToParenOrBraceRange;
- std::tie(ToConstructor, ToType, ToTypeSourceInfo, ToParenOrBraceRange) = *Imp;
+ Error Err = Error::success();
+ auto ToConstructor = importChecked(Err, E->getConstructor());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToTypeSourceInfo = importChecked(Err, E->getTypeSourceInfo());
+ auto ToParenOrBraceRange = importChecked(Err, E->getParenOrBraceRange());
+ if (Err)
+ return std::move(Err);
SmallVector<Expr *, 8> ToArgs(E->getNumArgs());
if (Error Err = ImportContainerChecked(E->arguments(), ToArgs))
@@ -7202,14 +7169,11 @@ ExpectedDecl ASTNodeImporter::VisitLifetimeExtendedTemporaryDecl(
if (Error Err = ImportDeclContext(D, DC, LexicalDC))
return std::move(Err);
- auto Imp = importSeq(D->getTemporaryExpr(), D->getExtendingDecl());
- // FIXME: the APValue should be imported as well if present.
- if (!Imp)
- return Imp.takeError();
-
- Expr *Temporary;
- ValueDecl *ExtendingDecl;
- std::tie(Temporary, ExtendingDecl) = *Imp;
+ Error Err = Error::success();
+ auto Temporary = importChecked(Err, D->getTemporaryExpr());
+ auto ExtendingDecl = importChecked(Err, D->getExtendingDecl());
+ if (Err)
+ return std::move(Err);
// FIXME: Should ManglingNumber get numbers associated with 'to' context?
LifetimeExtendedTemporaryDecl *To;
@@ -7224,17 +7188,15 @@ ExpectedDecl ASTNodeImporter::VisitLifetimeExtendedTemporaryDecl(
ExpectedStmt
ASTNodeImporter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
- auto Imp = importSeq(E->getType(),
- E->getLifetimeExtendedTemporaryDecl() ? nullptr
- : E->getSubExpr(),
- E->getLifetimeExtendedTemporaryDecl());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- Expr *ToTemporaryExpr;
- LifetimeExtendedTemporaryDecl *ToMaterializedDecl;
- std::tie(ToType, ToTemporaryExpr, ToMaterializedDecl) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ Expr *ToTemporaryExpr = importChecked(
+ Err, E->getLifetimeExtendedTemporaryDecl() ? nullptr : E->getSubExpr());
+ auto ToMaterializedDecl =
+ importChecked(Err, E->getLifetimeExtendedTemporaryDecl());
+ if (Err)
+ return std::move(Err);
+
if (!ToTemporaryExpr)
ToTemporaryExpr = cast<Expr>(ToMaterializedDecl->getTemporaryExpr());
@@ -7246,29 +7208,25 @@ ASTNodeImporter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitPackExpansionExpr(PackExpansionExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getPattern(), E->getEllipsisLoc());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- Expr *ToPattern;
- SourceLocation ToEllipsisLoc;
- std::tie(ToType, ToPattern, ToEllipsisLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToPattern = importChecked(Err, E->getPattern());
+ auto ToEllipsisLoc = importChecked(Err, E->getEllipsisLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) PackExpansionExpr(
ToType, ToPattern, ToEllipsisLoc, E->getNumExpansions());
}
ExpectedStmt ASTNodeImporter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
- auto Imp = importSeq(
- E->getOperatorLoc(), E->getPack(), E->getPackLoc(), E->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToOperatorLoc, ToPackLoc, ToRParenLoc;
- NamedDecl *ToPack;
- std::tie(ToOperatorLoc, ToPack, ToPackLoc, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ auto ToPack = importChecked(Err, E->getPack());
+ auto ToPackLoc = importChecked(Err, E->getPackLoc());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ if (Err)
+ return std::move(Err);
Optional<unsigned> Length;
if (!E->isValueDependent())
@@ -7290,23 +7248,19 @@ ExpectedStmt ASTNodeImporter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
ExpectedStmt ASTNodeImporter::VisitCXXNewExpr(CXXNewExpr *E) {
- auto Imp = importSeq(
- E->getOperatorNew(), E->getOperatorDelete(), E->getTypeIdParens(),
- E->getArraySize(), E->getInitializer(), E->getType(),
- E->getAllocatedTypeSourceInfo(), E->getSourceRange(),
- E->getDirectInitRange());
- if (!Imp)
- return Imp.takeError();
-
- FunctionDecl *ToOperatorNew, *ToOperatorDelete;
- SourceRange ToTypeIdParens, ToSourceRange, ToDirectInitRange;
- Optional<Expr *> ToArraySize;
- Expr *ToInitializer;
- QualType ToType;
- TypeSourceInfo *ToAllocatedTypeSourceInfo;
- std::tie(
- ToOperatorNew, ToOperatorDelete, ToTypeIdParens, ToArraySize, ToInitializer,
- ToType, ToAllocatedTypeSourceInfo, ToSourceRange, ToDirectInitRange) = *Imp;
+ Error Err = Error::success();
+ auto ToOperatorNew = importChecked(Err, E->getOperatorNew());
+ auto ToOperatorDelete = importChecked(Err, E->getOperatorDelete());
+ auto ToTypeIdParens = importChecked(Err, E->getTypeIdParens());
+ auto ToArraySize = importChecked(Err, E->getArraySize());
+ auto ToInitializer = importChecked(Err, E->getInitializer());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToAllocatedTypeSourceInfo =
+ importChecked(Err, E->getAllocatedTypeSourceInfo());
+ auto ToSourceRange = importChecked(Err, E->getSourceRange());
+ auto ToDirectInitRange = importChecked(Err, E->getDirectInitRange());
+ if (Err)
+ return std::move(Err);
SmallVector<Expr *, 4> ToPlacementArgs(E->getNumPlacementArgs());
if (Error Err =
@@ -7322,16 +7276,13 @@ ExpectedStmt ASTNodeImporter::VisitCXXNewExpr(CXXNewExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getOperatorDelete(), E->getArgument(), E->getBeginLoc());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- FunctionDecl *ToOperatorDelete;
- Expr *ToArgument;
- SourceLocation ToBeginLoc;
- std::tie(ToType, ToOperatorDelete, ToArgument, ToBeginLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToOperatorDelete = importChecked(Err, E->getOperatorDelete());
+ auto ToArgument = importChecked(Err, E->getArgument());
+ auto ToBeginLoc = importChecked(Err, E->getBeginLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) CXXDeleteExpr(
ToType, E->isGlobalDelete(), E->isArrayForm(), E->isArrayFormAsWritten(),
@@ -7340,17 +7291,13 @@ ExpectedStmt ASTNodeImporter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitCXXConstructExpr(CXXConstructExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getLocation(), E->getConstructor(),
- E->getParenOrBraceRange());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- SourceLocation ToLocation;
- CXXConstructorDecl *ToConstructor;
- SourceRange ToParenOrBraceRange;
- std::tie(ToType, ToLocation, ToConstructor, ToParenOrBraceRange) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToLocation = importChecked(Err, E->getLocation());
+ auto ToConstructor = importChecked(Err, E->getConstructor());
+ auto ToParenOrBraceRange = importChecked(Err, E->getParenOrBraceRange());
+ if (Err)
+ return std::move(Err);
SmallVector<Expr *, 6> ToArgs(E->getNumArgs());
if (Error Err = ImportContainerChecked(E->arguments(), ToArgs))
@@ -7379,15 +7326,12 @@ ExpectedStmt ASTNodeImporter::VisitExprWithCleanups(ExprWithCleanups *E) {
}
ExpectedStmt ASTNodeImporter::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
- auto Imp = importSeq(
- E->getCallee(), E->getType(), E->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToCallee;
- QualType ToType;
- SourceLocation ToRParenLoc;
- std::tie(ToCallee, ToType, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToCallee = importChecked(Err, E->getCallee());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ if (Err)
+ return std::move(Err);
SmallVector<Expr *, 4> ToArgs(E->getNumArgs());
if (Error Err = ImportContainerChecked(E->arguments(), ToArgs))
@@ -7424,30 +7368,18 @@ ExpectedStmt ASTNodeImporter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitMemberExpr(MemberExpr *E) {
- auto Imp1 = importSeq(
- E->getBase(), E->getOperatorLoc(), E->getQualifierLoc(),
- E->getTemplateKeywordLoc(), E->getMemberDecl(), E->getType());
- if (!Imp1)
- return Imp1.takeError();
-
- Expr *ToBase;
- SourceLocation ToOperatorLoc, ToTemplateKeywordLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- ValueDecl *ToMemberDecl;
- QualType ToType;
- std::tie(
- ToBase, ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc, ToMemberDecl,
- ToType) = *Imp1;
-
- auto Imp2 = importSeq(
- E->getFoundDecl().getDecl(), E->getMemberNameInfo().getName(),
- E->getMemberNameInfo().getLoc(), E->getLAngleLoc(), E->getRAngleLoc());
- if (!Imp2)
- return Imp2.takeError();
- NamedDecl *ToDecl;
- DeclarationName ToName;
- SourceLocation ToLoc, ToLAngleLoc, ToRAngleLoc;
- std::tie(ToDecl, ToName, ToLoc, ToLAngleLoc, ToRAngleLoc) = *Imp2;
+ Error Err = Error::success();
+ auto ToBase = importChecked(Err, E->getBase());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc());
+ auto ToTemplateKeywordLoc = importChecked(Err, E->getTemplateKeywordLoc());
+ auto ToMemberDecl = importChecked(Err, E->getMemberDecl());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToDecl = importChecked(Err, E->getFoundDecl().getDecl());
+ auto ToName = importChecked(Err, E->getMemberNameInfo().getName());
+ auto ToLoc = importChecked(Err, E->getMemberNameInfo().getLoc());
+ if (Err)
+ return std::move(Err);
DeclAccessPair ToFoundDecl =
DeclAccessPair::make(ToDecl, E->getFoundDecl().getAccess());
@@ -7472,19 +7404,15 @@ ExpectedStmt ASTNodeImporter::VisitMemberExpr(MemberExpr *E) {
ExpectedStmt
ASTNodeImporter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
- auto Imp = importSeq(
- E->getBase(), E->getOperatorLoc(), E->getQualifierLoc(),
- E->getScopeTypeInfo(), E->getColonColonLoc(), E->getTildeLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToBase;
- SourceLocation ToOperatorLoc, ToColonColonLoc, ToTildeLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- TypeSourceInfo *ToScopeTypeInfo;
- std::tie(
- ToBase, ToOperatorLoc, ToQualifierLoc, ToScopeTypeInfo, ToColonColonLoc,
- ToTildeLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToBase = importChecked(Err, E->getBase());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc());
+ auto ToScopeTypeInfo = importChecked(Err, E->getScopeTypeInfo());
+ auto ToColonColonLoc = importChecked(Err, E->getColonColonLoc());
+ auto ToTildeLoc = importChecked(Err, E->getTildeLoc());
+ if (Err)
+ return std::move(Err);
PseudoDestructorTypeStorage Storage;
if (IdentifierInfo *FromII = E->getDestroyedTypeIdentifier()) {
@@ -7507,19 +7435,15 @@ ASTNodeImporter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
ExpectedStmt ASTNodeImporter::VisitCXXDependentScopeMemberExpr(
CXXDependentScopeMemberExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getOperatorLoc(), E->getQualifierLoc(),
- E->getTemplateKeywordLoc(), E->getFirstQualifierFoundInScope());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- SourceLocation ToOperatorLoc, ToTemplateKeywordLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- NamedDecl *ToFirstQualifierFoundInScope;
- std::tie(
- ToType, ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc,
- ToFirstQualifierFoundInScope) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc());
+ auto ToTemplateKeywordLoc = importChecked(Err, E->getTemplateKeywordLoc());
+ auto ToFirstQualifierFoundInScope =
+ importChecked(Err, E->getFirstQualifierFoundInScope());
+ if (Err)
+ return std::move(Err);
Expr *ToBase = nullptr;
if (!E->isImplicitAccess()) {
@@ -7530,22 +7454,23 @@ ExpectedStmt ASTNodeImporter::VisitCXXDependentScopeMemberExpr(
}
TemplateArgumentListInfo ToTAInfo, *ResInfo = nullptr;
+
if (E->hasExplicitTemplateArgs()) {
- if (Error Err = ImportTemplateArgumentListInfo(
- E->getLAngleLoc(), E->getRAngleLoc(), E->template_arguments(),
- ToTAInfo))
+ if (Error Err =
+ ImportTemplateArgumentListInfo(E->getLAngleLoc(), E->getRAngleLoc(),
+ E->template_arguments(), ToTAInfo))
return std::move(Err);
ResInfo = &ToTAInfo;
}
+ auto ToMember = importChecked(Err, E->getMember());
+ auto ToMemberLoc = importChecked(Err, E->getMemberLoc());
+ if (Err)
+ return std::move(Err);
+ DeclarationNameInfo ToMemberNameInfo(ToMember, ToMemberLoc);
- auto ToMemberNameInfoOrErr = importSeq(E->getMember(), E->getMemberLoc());
- if (!ToMemberNameInfoOrErr)
- return ToMemberNameInfoOrErr.takeError();
- DeclarationNameInfo ToMemberNameInfo(
- std::get<0>(*ToMemberNameInfoOrErr), std::get<1>(*ToMemberNameInfoOrErr));
// Import additional name location/type info.
- if (Error Err = ImportDeclarationNameLoc(
- E->getMemberNameInfo(), ToMemberNameInfo))
+ if (Error Err =
+ ImportDeclarationNameLoc(E->getMemberNameInfo(), ToMemberNameInfo))
return std::move(Err);
return CXXDependentScopeMemberExpr::Create(
@@ -7556,17 +7481,15 @@ ExpectedStmt ASTNodeImporter::VisitCXXDependentScopeMemberExpr(
ExpectedStmt
ASTNodeImporter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
- auto Imp = importSeq(E->getQualifierLoc(), E->getTemplateKeywordLoc(),
- E->getDeclName(), E->getNameInfo().getLoc(),
- E->getLAngleLoc(), E->getRAngleLoc());
- if (!Imp)
- return Imp.takeError();
-
- NestedNameSpecifierLoc ToQualifierLoc;
- SourceLocation ToTemplateKeywordLoc, ToNameLoc, ToLAngleLoc, ToRAngleLoc;
- DeclarationName ToDeclName;
- std::tie(ToQualifierLoc, ToTemplateKeywordLoc, ToDeclName, ToNameLoc,
- ToLAngleLoc, ToRAngleLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc());
+ auto ToTemplateKeywordLoc = importChecked(Err, E->getTemplateKeywordLoc());
+ auto ToDeclName = importChecked(Err, E->getDeclName());
+ auto ToNameLoc = importChecked(Err, E->getNameInfo().getLoc());
+ auto ToLAngleLoc = importChecked(Err, E->getLAngleLoc());
+ auto ToRAngleLoc = importChecked(Err, E->getRAngleLoc());
+ if (Err)
+ return std::move(Err);
DeclarationNameInfo ToNameInfo(ToDeclName, ToNameLoc);
if (Error Err = ImportDeclarationNameLoc(E->getNameInfo(), ToNameInfo))
@@ -7588,14 +7511,12 @@ ASTNodeImporter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
ExpectedStmt ASTNodeImporter::VisitCXXUnresolvedConstructExpr(
CXXUnresolvedConstructExpr *E) {
- auto Imp = importSeq(
- E->getLParenLoc(), E->getRParenLoc(), E->getTypeSourceInfo());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToLParenLoc, ToRParenLoc;
- TypeSourceInfo *ToTypeSourceInfo;
- std::tie(ToLParenLoc, ToRParenLoc, ToTypeSourceInfo) = *Imp;
+ Error Err = Error::success();
+ auto ToLParenLoc = importChecked(Err, E->getLParenLoc());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ auto ToTypeSourceInfo = importChecked(Err, E->getTypeSourceInfo());
+ if (Err)
+ return std::move(Err);
SmallVector<Expr *, 8> ToArgs(E->arg_size());
if (Error Err =
@@ -7617,11 +7538,13 @@ ASTNodeImporter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
if (!ToQualifierLocOrErr)
return ToQualifierLocOrErr.takeError();
- auto ToNameInfoOrErr = importSeq(E->getName(), E->getNameLoc());
- if (!ToNameInfoOrErr)
- return ToNameInfoOrErr.takeError();
- DeclarationNameInfo ToNameInfo(
- std::get<0>(*ToNameInfoOrErr), std::get<1>(*ToNameInfoOrErr));
+ Error Err = Error::success();
+ auto ToName = importChecked(Err, E->getName());
+ auto ToNameLoc = importChecked(Err, E->getNameLoc());
+ if (Err)
+ return std::move(Err);
+ DeclarationNameInfo ToNameInfo(ToName, ToNameLoc);
+
// Import additional name location/type info.
if (Error Err = ImportDeclarationNameLoc(E->getNameInfo(), ToNameInfo))
return std::move(Err);
@@ -7658,21 +7581,17 @@ ASTNodeImporter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
ExpectedStmt
ASTNodeImporter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
- auto Imp1 = importSeq(
- E->getType(), E->getOperatorLoc(), E->getQualifierLoc(),
- E->getTemplateKeywordLoc());
- if (!Imp1)
- return Imp1.takeError();
-
- QualType ToType;
- SourceLocation ToOperatorLoc, ToTemplateKeywordLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- std::tie(ToType, ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc) = *Imp1;
-
- auto Imp2 = importSeq(E->getName(), E->getNameLoc());
- if (!Imp2)
- return Imp2.takeError();
- DeclarationNameInfo ToNameInfo(std::get<0>(*Imp2), std::get<1>(*Imp2));
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc());
+ auto ToTemplateKeywordLoc = importChecked(Err, E->getTemplateKeywordLoc());
+ auto ToName = importChecked(Err, E->getName());
+ auto ToNameLoc = importChecked(Err, E->getNameLoc());
+ if (Err)
+ return std::move(Err);
+
+ DeclarationNameInfo ToNameInfo(ToName, ToNameLoc);
// Import additional name location/type info.
if (Error Err = ImportDeclarationNameLoc(E->getNameInfo(), ToNameInfo))
return std::move(Err);
@@ -7709,14 +7628,12 @@ ASTNodeImporter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitCallExpr(CallExpr *E) {
- auto Imp = importSeq(E->getCallee(), E->getType(), E->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToCallee;
- QualType ToType;
- SourceLocation ToRParenLoc;
- std::tie(ToCallee, ToType, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToCallee = importChecked(Err, E->getCallee());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ if (Err)
+ return std::move(Err);
unsigned NumArgs = E->getNumArgs();
llvm::SmallVector<Expr *, 2> ToArgs(NumArgs);
@@ -7746,44 +7663,32 @@ ExpectedStmt ASTNodeImporter::VisitLambdaExpr(LambdaExpr *E) {
if (!ToCallOpOrErr)
return ToCallOpOrErr.takeError();
- SmallVector<LambdaCapture, 8> ToCaptures;
- ToCaptures.reserve(E->capture_size());
- for (const auto &FromCapture : E->captures()) {
- if (auto ToCaptureOrErr = import(FromCapture))
- ToCaptures.push_back(*ToCaptureOrErr);
- else
- return ToCaptureOrErr.takeError();
- }
-
SmallVector<Expr *, 8> ToCaptureInits(E->capture_size());
if (Error Err = ImportContainerChecked(E->capture_inits(), ToCaptureInits))
return std::move(Err);
- auto Imp = importSeq(
- E->getIntroducerRange(), E->getCaptureDefaultLoc(), E->getEndLoc());
- if (!Imp)
- return Imp.takeError();
-
- SourceRange ToIntroducerRange;
- SourceLocation ToCaptureDefaultLoc, ToEndLoc;
- std::tie(ToIntroducerRange, ToCaptureDefaultLoc, ToEndLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToIntroducerRange = importChecked(Err, E->getIntroducerRange());
+ auto ToCaptureDefaultLoc = importChecked(Err, E->getCaptureDefaultLoc());
+ auto ToEndLoc = importChecked(Err, E->getEndLoc());
+ if (Err)
+ return std::move(Err);
- return LambdaExpr::Create(
- Importer.getToContext(), ToClass, ToIntroducerRange,
- E->getCaptureDefault(), ToCaptureDefaultLoc, ToCaptures,
- E->hasExplicitParameters(), E->hasExplicitResultType(), ToCaptureInits,
- ToEndLoc, E->containsUnexpandedParameterPack());
+ return LambdaExpr::Create(Importer.getToContext(), ToClass, ToIntroducerRange,
+ E->getCaptureDefault(), ToCaptureDefaultLoc,
+ E->hasExplicitParameters(),
+ E->hasExplicitResultType(), ToCaptureInits,
+ ToEndLoc, E->containsUnexpandedParameterPack());
}
ExpectedStmt ASTNodeImporter::VisitInitListExpr(InitListExpr *E) {
- auto Imp = importSeq(E->getLBraceLoc(), E->getRBraceLoc(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToLBraceLoc, ToRBraceLoc;
- QualType ToType;
- std::tie(ToLBraceLoc, ToRBraceLoc, ToType) = *Imp;
+ Error Err = Error::success();
+ auto ToLBraceLoc = importChecked(Err, E->getLBraceLoc());
+ auto ToRBraceLoc = importChecked(Err, E->getRBraceLoc());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
SmallVector<Expr *, 4> ToExprs(E->getNumInits());
if (Error Err = ImportContainerChecked(E->inits(), ToExprs))
@@ -7838,14 +7743,12 @@ ExpectedStmt ASTNodeImporter::VisitCXXStdInitializerListExpr(
ExpectedStmt ASTNodeImporter::VisitCXXInheritedCtorInitExpr(
CXXInheritedCtorInitExpr *E) {
- auto Imp = importSeq(E->getLocation(), E->getType(), E->getConstructor());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToLocation;
- QualType ToType;
- CXXConstructorDecl *ToConstructor;
- std::tie(ToLocation, ToType, ToConstructor) = *Imp;
+ Error Err = Error::success();
+ auto ToLocation = importChecked(Err, E->getLocation());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToConstructor = importChecked(Err, E->getConstructor());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) CXXInheritedCtorInitExpr(
ToLocation, ToType, ToConstructor, E->constructsVBase(),
@@ -7853,13 +7756,12 @@ ExpectedStmt ASTNodeImporter::VisitCXXInheritedCtorInitExpr(
}
ExpectedStmt ASTNodeImporter::VisitArrayInitLoopExpr(ArrayInitLoopExpr *E) {
- auto Imp = importSeq(E->getType(), E->getCommonExpr(), E->getSubExpr());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- Expr *ToCommonExpr, *ToSubExpr;
- std::tie(ToType, ToCommonExpr, ToSubExpr) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToCommonExpr = importChecked(Err, E->getCommonExpr());
+ auto ToSubExpr = importChecked(Err, E->getSubExpr());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ArrayInitLoopExpr(
ToType, ToCommonExpr, ToSubExpr);
@@ -7890,20 +7792,15 @@ ExpectedStmt ASTNodeImporter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getSubExpr(), E->getTypeInfoAsWritten(),
- E->getOperatorLoc(), E->getRParenLoc(), E->getAngleBrackets());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- Expr *ToSubExpr;
- TypeSourceInfo *ToTypeInfoAsWritten;
- SourceLocation ToOperatorLoc, ToRParenLoc;
- SourceRange ToAngleBrackets;
- std::tie(
- ToType, ToSubExpr, ToTypeInfoAsWritten, ToOperatorLoc, ToRParenLoc,
- ToAngleBrackets) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToSubExpr = importChecked(Err, E->getSubExpr());
+ auto ToTypeInfoAsWritten = importChecked(Err, E->getTypeInfoAsWritten());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ auto ToAngleBrackets = importChecked(Err, E->getAngleBrackets());
+ if (Err)
+ return std::move(Err);
ExprValueKind VK = E->getValueKind();
CastKind CK = E->getCastKind();
@@ -7935,30 +7832,25 @@ ExpectedStmt ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
ExpectedStmt ASTNodeImporter::VisitSubstNonTypeTemplateParmExpr(
SubstNonTypeTemplateParmExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getExprLoc(), E->getParameter(), E->getReplacement());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- SourceLocation ToExprLoc;
- NonTypeTemplateParmDecl *ToParameter;
- Expr *ToReplacement;
- std::tie(ToType, ToExprLoc, ToParameter, ToReplacement) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToExprLoc = importChecked(Err, E->getExprLoc());
+ auto ToParameter = importChecked(Err, E->getParameter());
+ auto ToReplacement = importChecked(Err, E->getReplacement());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) SubstNonTypeTemplateParmExpr(
ToType, E->getValueKind(), ToExprLoc, ToParameter, ToReplacement);
}
ExpectedStmt ASTNodeImporter::VisitTypeTraitExpr(TypeTraitExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getBeginLoc(), E->getEndLoc());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- SourceLocation ToBeginLoc, ToEndLoc;
- std::tie(ToType, ToBeginLoc, ToEndLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToBeginLoc = importChecked(Err, E->getBeginLoc());
+ auto ToEndLoc = importChecked(Err, E->getEndLoc());
+ if (Err)
+ return std::move(Err);
SmallVector<TypeSourceInfo *, 4> ToArgs(E->getNumArgs());
if (Error Err = ImportContainerChecked(E->getArgs(), ToArgs))
@@ -8100,6 +7992,18 @@ void ASTImporter::RegisterImportedDecl(Decl *FromD, Decl *ToD) {
MapImported(FromD, ToD);
}
+llvm::Expected<ExprWithCleanups::CleanupObject>
+ASTImporter::Import(ExprWithCleanups::CleanupObject From) {
+ if (auto *CLE = From.dyn_cast<CompoundLiteralExpr *>()) {
+ if (Expected<Expr *> R = Import(CLE))
+ return ExprWithCleanups::CleanupObject(cast<CompoundLiteralExpr>(*R));
+ }
+
+ // FIXME: Handle BlockDecl when we implement importing BlockExpr in
+ // ASTNodeImporter.
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
+}
+
Expected<QualType> ASTImporter::Import(QualType FromT) {
if (FromT.isNull())
return QualType{};
@@ -8141,12 +8045,47 @@ Expected<TypeSourceInfo *> ASTImporter::Import(TypeSourceInfo *FromTSI) {
}
Expected<Attr *> ASTImporter::Import(const Attr *FromAttr) {
- Attr *ToAttr = FromAttr->clone(ToContext);
- if (auto ToRangeOrErr = Import(FromAttr->getRange()))
- ToAttr->setRange(*ToRangeOrErr);
- else
- return ToRangeOrErr.takeError();
+ Attr *ToAttr = nullptr;
+ SourceRange ToRange;
+ if (Error Err = importInto(ToRange, FromAttr->getRange()))
+ return std::move(Err);
+ // FIXME: Is there some kind of AttrVisitor to use here?
+ switch (FromAttr->getKind()) {
+ case attr::Aligned: {
+ auto *From = cast<AlignedAttr>(FromAttr);
+ AlignedAttr *To;
+ auto CreateAlign = [&](bool IsAlignmentExpr, void *Alignment) {
+ return AlignedAttr::Create(ToContext, IsAlignmentExpr, Alignment, ToRange,
+ From->getSyntax(),
+ From->getSemanticSpelling());
+ };
+ if (From->isAlignmentExpr()) {
+ if (auto ToEOrErr = Import(From->getAlignmentExpr()))
+ To = CreateAlign(true, *ToEOrErr);
+ else
+ return ToEOrErr.takeError();
+ } else {
+ if (auto ToTOrErr = Import(From->getAlignmentType()))
+ To = CreateAlign(false, *ToTOrErr);
+ else
+ return ToTOrErr.takeError();
+ }
+ To->setInherited(From->isInherited());
+ To->setPackExpansion(From->isPackExpansion());
+ To->setImplicit(From->isImplicit());
+ ToAttr = To;
+ break;
+ }
+ default:
+ // FIXME: 'clone' copies every member but some of them should be imported.
+ // Handle other Attrs that have parameters that should be imported.
+ ToAttr = FromAttr->clone(ToContext);
+ ToAttr->setRange(ToRange);
+ break;
+ }
+ assert(ToAttr && "Attribute should be created.");
+
return ToAttr;
}
@@ -8254,7 +8193,7 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// FIXME Should we remove these Decls from the LookupTable,
// and from ImportedFromDecls?
}
- SavedImportPaths[FromD].clear();
+ SavedImportPaths.erase(FromD);
// Do not return ToDOrErr, error was taken out of it.
return make_error<ImportError>(ErrOut);
@@ -8287,7 +8226,7 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
Imported(FromD, ToD);
updateFlags(FromD, ToD);
- SavedImportPaths[FromD].clear();
+ SavedImportPaths.erase(FromD);
return ToDOrErr;
}
@@ -8304,15 +8243,22 @@ Expected<DeclContext *> ASTImporter::ImportContext(DeclContext *FromDC) {
// need it to have a definition.
if (auto *ToRecord = dyn_cast<RecordDecl>(ToDC)) {
auto *FromRecord = cast<RecordDecl>(FromDC);
- if (ToRecord->isCompleteDefinition()) {
- // Do nothing.
- } else if (FromRecord->isCompleteDefinition()) {
+ if (ToRecord->isCompleteDefinition())
+ return ToDC;
+
+ // If FromRecord is not defined we need to force it to be.
+ // Simply calling CompleteDecl(...) for a RecordDecl will break some cases
+ // it will start the definition but we never finish it.
+ // If there are base classes they won't be imported and we will
+ // be missing anything that we inherit from those bases.
+ if (FromRecord->getASTContext().getExternalSource() &&
+ !FromRecord->isCompleteDefinition())
+ FromRecord->getASTContext().getExternalSource()->CompleteType(FromRecord);
+
+ if (FromRecord->isCompleteDefinition())
if (Error Err = ASTNodeImporter(*this).ImportDefinition(
FromRecord, ToRecord, ASTNodeImporter::IDK_Basic))
return std::move(Err);
- } else {
- CompleteDecl(ToRecord);
- }
} else if (auto *ToEnum = dyn_cast<EnumDecl>(ToDC)) {
auto *FromEnum = cast<EnumDecl>(FromDC);
if (ToEnum->isCompleteDefinition()) {
@@ -8379,11 +8325,7 @@ Expected<Stmt *> ASTImporter::Import(Stmt *FromS) {
// constructors.
ToE->setValueKind(FromE->getValueKind());
ToE->setObjectKind(FromE->getObjectKind());
- ToE->setTypeDependent(FromE->isTypeDependent());
- ToE->setValueDependent(FromE->isValueDependent());
- ToE->setInstantiationDependent(FromE->isInstantiationDependent());
- ToE->setContainsUnexpandedParameterPack(
- FromE->containsUnexpandedParameterPack());
+ ToE->setDependence(FromE->getDependence());
}
// Record the imported statement object.
@@ -8685,12 +8627,21 @@ Expected<FileID> ASTImporter::Import(FileID FromID, bool IsBuiltin) {
} else {
const SrcMgr::ContentCache *Cache = FromSLoc.getFile().getContentCache();
- if (!IsBuiltin) {
+ if (!IsBuiltin && !Cache->BufferOverridden) {
// Include location of this file.
ExpectedSLoc ToIncludeLoc = Import(FromSLoc.getFile().getIncludeLoc());
if (!ToIncludeLoc)
return ToIncludeLoc.takeError();
+ // Every FileID that is not the main FileID needs to have a valid include
+ // location so that the include chain points to the main FileID. When
+ // importing the main FileID (which has no include location), we need to
+ // create a fake include location in the main file to keep this property
+ // intact.
+ SourceLocation ToIncludeLocOrFakeLoc = *ToIncludeLoc;
+ if (FromID == FromSM.getMainFileID())
+ ToIncludeLocOrFakeLoc = ToSM.getLocForStartOfFile(ToSM.getMainFileID());
+
if (Cache->OrigEntry && Cache->OrigEntry->getDir()) {
// FIXME: We probably want to use getVirtualFile(), so we don't hit the
// disk again
@@ -8702,7 +8653,7 @@ Expected<FileID> ASTImporter::Import(FileID FromID, bool IsBuiltin) {
// point to a valid file and we get no Entry here. In this case try with
// the memory buffer below.
if (Entry)
- ToID = ToSM.createFileID(*Entry, *ToIncludeLoc,
+ ToID = ToSM.createFileID(*Entry, ToIncludeLocOrFakeLoc,
FromSLoc.getFile().getFileCharacteristic());
}
}
diff --git a/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp b/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp
index 7390329d4ed8..4d6fff8f3419 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp
@@ -45,7 +45,11 @@ struct Builder : RecursiveASTVisitor<Builder> {
LT.add(RTy->getAsCXXRecordDecl());
else if (const auto *SpecTy = dyn_cast<TemplateSpecializationType>(Ty))
LT.add(SpecTy->getAsCXXRecordDecl());
- else if (isa<TypedefType>(Ty)) {
+ else if (const auto *SubstTy =
+ dyn_cast<SubstTemplateTypeParmType>(Ty)) {
+ if (SubstTy->getAsCXXRecordDecl())
+ LT.add(SubstTy->getAsCXXRecordDecl());
+ } else if (isa<TypedefType>(Ty)) {
// We do not put friend typedefs to the lookup table because
// ASTImporter does not organize typedefs into redecl chains.
} else {
diff --git a/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp b/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
index 91a2f3a8391b..8b5b2444f1e2 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
@@ -31,10 +31,9 @@
// }
// ```
// Indeed, it has it's queue, which holds pairs of nodes, one from each graph,
-// this is the `DeclsToCheck` and it's pair is in `TentativeEquivalences`.
-// `TentativeEquivalences` also plays the role of the marking (`marked`)
-// functionality above, we use it to check whether we've already seen a pair of
-// nodes.
+// this is the `DeclsToCheck` member. `VisitedDecls` plays the role of the
+// marking (`marked`) functionality above, we use it to check whether we've
+// already seen a pair of nodes.
//
// We put in the elements into the queue only in the toplevel decl check
// function:
@@ -57,11 +56,6 @@
// doing. Thus, static implementation functions must not call the **member**
// functions.
//
-// So, now `TentativeEquivalences` plays two roles. It is used to store the
-// second half of the decls which we want to compare, plus it plays a role in
-// closing the recursion. On a long term, we could refactor structural
-// equivalency to be more alike to the traditional BFS.
-//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTStructuralEquivalence.h"
@@ -623,6 +617,34 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
break;
}
+ case Type::DependentSizedMatrix: {
+ const DependentSizedMatrixType *Mat1 = cast<DependentSizedMatrixType>(T1);
+ const DependentSizedMatrixType *Mat2 = cast<DependentSizedMatrixType>(T2);
+ // The element types, row and column expressions must be structurally
+ // equivalent.
+ if (!IsStructurallyEquivalent(Context, Mat1->getRowExpr(),
+ Mat2->getRowExpr()) ||
+ !IsStructurallyEquivalent(Context, Mat1->getColumnExpr(),
+ Mat2->getColumnExpr()) ||
+ !IsStructurallyEquivalent(Context, Mat1->getElementType(),
+ Mat2->getElementType()))
+ return false;
+ break;
+ }
+
+ case Type::ConstantMatrix: {
+ const ConstantMatrixType *Mat1 = cast<ConstantMatrixType>(T1);
+ const ConstantMatrixType *Mat2 = cast<ConstantMatrixType>(T2);
+ // The element types must be structurally equivalent and the number of rows
+ // and columns must match.
+ if (!IsStructurallyEquivalent(Context, Mat1->getElementType(),
+ Mat2->getElementType()) ||
+ Mat1->getNumRows() != Mat2->getNumRows() ||
+ Mat1->getNumColumns() != Mat2->getNumColumns())
+ return false;
+ break;
+ }
+
case Type::FunctionProto: {
const auto *Proto1 = cast<FunctionProtoType>(T1);
const auto *Proto2 = cast<FunctionProtoType>(T2);
@@ -955,6 +977,24 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
cast<PipeType>(T2)->getElementType()))
return false;
break;
+ case Type::ExtInt: {
+ const auto *Int1 = cast<ExtIntType>(T1);
+ const auto *Int2 = cast<ExtIntType>(T2);
+
+ if (Int1->isUnsigned() != Int2->isUnsigned() ||
+ Int1->getNumBits() != Int2->getNumBits())
+ return false;
+ break;
+ }
+ case Type::DependentExtInt: {
+ const auto *Int1 = cast<DependentExtIntType>(T1);
+ const auto *Int2 = cast<DependentExtIntType>(T2);
+
+ if (Int1->isUnsigned() != Int2->isUnsigned() ||
+ !IsStructurallyEquivalent(Context, Int1->getNumBitsExpr(),
+ Int2->getNumBitsExpr()))
+ return false;
+ }
} // end switch
return true;
diff --git a/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp b/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp
index a5570c329ae8..34fc587694be 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp
@@ -18,8 +18,7 @@
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
-namespace clang {
-namespace ast_type_traits {
+using namespace clang;
const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = {
{ NKI_None, "<None>" },
@@ -28,6 +27,7 @@ const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = {
{ NKI_None, "NestedNameSpecifierLoc" },
{ NKI_None, "QualType" },
{ NKI_None, "TypeLoc" },
+ { NKI_None, "CXXBaseSpecifier" },
{ NKI_None, "CXXCtorInitializer" },
{ NKI_None, "NestedNameSpecifier" },
{ NKI_None, "Decl" },
@@ -40,8 +40,8 @@ const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = {
#define TYPE(DERIVED, BASE) { NKI_##BASE, #DERIVED "Type" },
#include "clang/AST/TypeNodes.inc"
{ NKI_None, "OMPClause" },
-#define OPENMP_CLAUSE(TextualSpelling, Class) {NKI_OMPClause, #Class},
-#include "clang/Basic/OpenMPKinds.def"
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) {NKI_OMPClause, #Class},
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
};
bool ASTNodeKind::isBaseOf(ASTNodeKind Other, unsigned *Distance) const {
@@ -112,15 +112,15 @@ ASTNodeKind ASTNodeKind::getFromNode(const Type &T) {
ASTNodeKind ASTNodeKind::getFromNode(const OMPClause &C) {
switch (C.getClauseKind()) {
-#define OPENMP_CLAUSE(Name, Class) \
- case OMPC_##Name: return ASTNodeKind(NKI_##Class);
-#include "clang/Basic/OpenMPKinds.def"
- case OMPC_threadprivate:
- case OMPC_uniform:
- case OMPC_device_type:
- case OMPC_match:
- case OMPC_unknown:
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
+ case llvm::omp::Clause::Enum: \
+ return ASTNodeKind(NKI_##Class);
+#define OMP_CLAUSE_NO_CLASS(Enum, Str) \
+ case llvm::omp::Clause::Enum: \
llvm_unreachable("unexpected OpenMP clause kind");
+ default:
+ break;
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
}
llvm_unreachable("invalid stmt kind");
}
@@ -152,13 +152,14 @@ void DynTypedNode::print(llvm::raw_ostream &OS,
OS << "Unable to print values of type " << NodeKind.asStringRef() << "\n";
}
-void DynTypedNode::dump(llvm::raw_ostream &OS, SourceManager &SM) const {
+void DynTypedNode::dump(llvm::raw_ostream &OS,
+ const ASTContext &Context) const {
if (const Decl *D = get<Decl>())
D->dump(OS);
else if (const Stmt *S = get<Stmt>())
- S->dump(OS, SM);
+ S->dump(OS, Context);
else if (const Type *T = get<Type>())
- T->dump(OS);
+ T->dump(OS, Context);
else
OS << "Unable to dump values of type " << NodeKind.asStringRef() << "\n";
}
@@ -178,6 +179,3 @@ SourceRange DynTypedNode::getSourceRange() const {
return SourceRange(C->getBeginLoc(), C->getEndLoc());
return SourceRange();
}
-
-} // end namespace ast_type_traits
-} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp b/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
index 0ef925ec1c90..7818fbb1918b 100644
--- a/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
+++ b/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
@@ -16,4 +16,155 @@
#include "clang/AST/Type.h"
using namespace clang;
+void LoopHintAttr::printPrettyPragma(raw_ostream &OS,
+ const PrintingPolicy &Policy) const {
+ unsigned SpellingIndex = getAttributeSpellingListIndex();
+ // For "#pragma unroll" and "#pragma nounroll" the string "unroll" or
+ // "nounroll" is already emitted as the pragma name.
+ if (SpellingIndex == Pragma_nounroll ||
+ SpellingIndex == Pragma_nounroll_and_jam)
+ return;
+ else if (SpellingIndex == Pragma_unroll ||
+ SpellingIndex == Pragma_unroll_and_jam) {
+ OS << ' ' << getValueString(Policy);
+ return;
+ }
+
+ assert(SpellingIndex == Pragma_clang_loop && "Unexpected spelling");
+ OS << ' ' << getOptionName(option) << getValueString(Policy);
+}
+
+// Return a string containing the loop hint argument including the
+// enclosing parentheses.
+std::string LoopHintAttr::getValueString(const PrintingPolicy &Policy) const {
+ std::string ValueName;
+ llvm::raw_string_ostream OS(ValueName);
+ OS << "(";
+ if (state == Numeric)
+ value->printPretty(OS, nullptr, Policy);
+ else if (state == Enable)
+ OS << "enable";
+ else if (state == Full)
+ OS << "full";
+ else if (state == AssumeSafety)
+ OS << "assume_safety";
+ else
+ OS << "disable";
+ OS << ")";
+ return OS.str();
+}
+
+// Return a string suitable for identifying this attribute in diagnostics.
+std::string
+LoopHintAttr::getDiagnosticName(const PrintingPolicy &Policy) const {
+ unsigned SpellingIndex = getAttributeSpellingListIndex();
+ if (SpellingIndex == Pragma_nounroll)
+ return "#pragma nounroll";
+ else if (SpellingIndex == Pragma_unroll)
+ return "#pragma unroll" +
+ (option == UnrollCount ? getValueString(Policy) : "");
+ else if (SpellingIndex == Pragma_nounroll_and_jam)
+ return "#pragma nounroll_and_jam";
+ else if (SpellingIndex == Pragma_unroll_and_jam)
+ return "#pragma unroll_and_jam" +
+ (option == UnrollAndJamCount ? getValueString(Policy) : "");
+
+ assert(SpellingIndex == Pragma_clang_loop && "Unexpected spelling");
+ return getOptionName(option) + getValueString(Policy);
+}
+
+void OMPDeclareSimdDeclAttr::printPrettyPragma(
+ raw_ostream &OS, const PrintingPolicy &Policy) const {
+ if (getBranchState() != BS_Undefined)
+ OS << ' ' << ConvertBranchStateTyToStr(getBranchState());
+ if (auto *E = getSimdlen()) {
+ OS << " simdlen(";
+ E->printPretty(OS, nullptr, Policy);
+ OS << ")";
+ }
+ if (uniforms_size() > 0) {
+ OS << " uniform";
+ StringRef Sep = "(";
+ for (auto *E : uniforms()) {
+ OS << Sep;
+ E->printPretty(OS, nullptr, Policy);
+ Sep = ", ";
+ }
+ OS << ")";
+ }
+ alignments_iterator NI = alignments_begin();
+ for (auto *E : aligneds()) {
+ OS << " aligned(";
+ E->printPretty(OS, nullptr, Policy);
+ if (*NI) {
+ OS << ": ";
+ (*NI)->printPretty(OS, nullptr, Policy);
+ }
+ OS << ")";
+ ++NI;
+ }
+ steps_iterator I = steps_begin();
+ modifiers_iterator MI = modifiers_begin();
+ for (auto *E : linears()) {
+ OS << " linear(";
+ if (*MI != OMPC_LINEAR_unknown)
+ OS << getOpenMPSimpleClauseTypeName(llvm::omp::Clause::OMPC_linear, *MI)
+ << "(";
+ E->printPretty(OS, nullptr, Policy);
+ if (*MI != OMPC_LINEAR_unknown)
+ OS << ")";
+ if (*I) {
+ OS << ": ";
+ (*I)->printPretty(OS, nullptr, Policy);
+ }
+ OS << ")";
+ ++I;
+ ++MI;
+ }
+}
+
+void OMPDeclareTargetDeclAttr::printPrettyPragma(
+ raw_ostream &OS, const PrintingPolicy &Policy) const {
+ // Use fake syntax because it is for testing and debugging purpose only.
+ if (getDevType() != DT_Any)
+ OS << " device_type(" << ConvertDevTypeTyToStr(getDevType()) << ")";
+ if (getMapType() != MT_To)
+ OS << ' ' << ConvertMapTypeTyToStr(getMapType());
+}
+
+llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
+OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(const ValueDecl *VD) {
+ if (!VD->hasAttrs())
+ return llvm::None;
+ if (const auto *Attr = VD->getAttr<OMPDeclareTargetDeclAttr>())
+ return Attr->getMapType();
+
+ return llvm::None;
+}
+
+llvm::Optional<OMPDeclareTargetDeclAttr::DevTypeTy>
+OMPDeclareTargetDeclAttr::getDeviceType(const ValueDecl *VD) {
+ if (!VD->hasAttrs())
+ return llvm::None;
+ if (const auto *Attr = VD->getAttr<OMPDeclareTargetDeclAttr>())
+ return Attr->getDevType();
+
+ return llvm::None;
+}
+
+namespace clang {
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo &TI);
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo *TI);
+}
+
+void OMPDeclareVariantAttr::printPrettyPragma(
+ raw_ostream &OS, const PrintingPolicy &Policy) const {
+ if (const Expr *E = getVariantFuncRef()) {
+ OS << "(";
+ E->printPretty(OS, nullptr, Policy);
+ OS << ")";
+ }
+ OS << " match(" << traitInfos << ")";
+}
+
#include "clang/AST/AttrImpl.inc"
diff --git a/contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp b/contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp
index 0377bd324cb6..8af97119e3cf 100644
--- a/contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp
+++ b/contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp
@@ -147,37 +147,27 @@ CXXRecordDecl::isCurrentInstantiation(const DeclContext *CurContext) const {
return false;
}
-bool CXXRecordDecl::forallBases(ForallBasesCallback BaseMatches,
- bool AllowShortCircuit) const {
+bool CXXRecordDecl::forallBases(ForallBasesCallback BaseMatches) const {
SmallVector<const CXXRecordDecl*, 8> Queue;
const CXXRecordDecl *Record = this;
- bool AllMatches = true;
while (true) {
for (const auto &I : Record->bases()) {
const RecordType *Ty = I.getType()->getAs<RecordType>();
- if (!Ty) {
- if (AllowShortCircuit) return false;
- AllMatches = false;
- continue;
- }
+ if (!Ty)
+ return false;
CXXRecordDecl *Base =
cast_or_null<CXXRecordDecl>(Ty->getDecl()->getDefinition());
if (!Base ||
(Base->isDependentContext() &&
!Base->isCurrentInstantiation(Record))) {
- if (AllowShortCircuit) return false;
- AllMatches = false;
- continue;
+ return false;
}
Queue.push_back(Base);
- if (!BaseMatches(Base)) {
- if (AllowShortCircuit) return false;
- AllMatches = false;
- continue;
- }
+ if (!BaseMatches(Base))
+ return false;
}
if (Queue.empty())
@@ -185,7 +175,7 @@ bool CXXRecordDecl::forallBases(ForallBasesCallback BaseMatches,
Record = Queue.pop_back_val(); // not actually a queue.
}
- return AllMatches;
+ return true;
}
bool CXXBasePaths::lookupInBases(ASTContext &Context,
diff --git a/contrib/llvm-project/clang/lib/AST/CommentCommandTraits.cpp b/contrib/llvm-project/clang/lib/AST/CommentCommandTraits.cpp
index b306fcbb154f..bdc0dd47fb7d 100644
--- a/contrib/llvm-project/clang/lib/AST/CommentCommandTraits.cpp
+++ b/contrib/llvm-project/clang/lib/AST/CommentCommandTraits.cpp
@@ -8,6 +8,7 @@
#include "clang/AST/CommentCommandTraits.h"
#include "llvm/ADT/STLExtras.h"
+#include <cassert>
namespace clang {
namespace comments {
diff --git a/contrib/llvm-project/clang/lib/AST/CommentSema.cpp b/contrib/llvm-project/clang/lib/AST/CommentSema.cpp
index 53c1832d1dd2..7642e73fa171 100644
--- a/contrib/llvm-project/clang/lib/AST/CommentSema.cpp
+++ b/contrib/llvm-project/clang/lib/AST/CommentSema.cpp
@@ -12,6 +12,7 @@
#include "clang/AST/CommentDiagnostic.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/SmallString.h"
@@ -134,7 +135,9 @@ void Sema::checkContainerDeclVerbatimLine(const BlockCommandComment *Comment) {
unsigned DiagSelect;
switch (Comment->getCommandID()) {
case CommandTraits::KCI_class:
- DiagSelect = (!isClassOrStructDecl() && !isClassTemplateDecl()) ? 1 : 0;
+ DiagSelect =
+ (!isClassOrStructOrTagTypedefDecl() && !isClassTemplateDecl()) ? 1
+ : 0;
// Allow @class command on @interface declarations.
// FIXME. Currently, \class and @class are indistinguishable. So,
// \class is also allowed on an @interface declaration
@@ -148,7 +151,7 @@ void Sema::checkContainerDeclVerbatimLine(const BlockCommandComment *Comment) {
DiagSelect = !isObjCProtocolDecl() ? 3 : 0;
break;
case CommandTraits::KCI_struct:
- DiagSelect = !isClassOrStructDecl() ? 4 : 0;
+ DiagSelect = !isClassOrStructOrTagTypedefDecl() ? 4 : 0;
break;
case CommandTraits::KCI_union:
DiagSelect = !isUnionDecl() ? 5 : 0;
@@ -688,7 +691,7 @@ void Sema::checkDeprecatedCommand(const BlockCommandComment *Command) {
FD->doesThisDeclarationHaveABody())
return;
- const LangOptions &LO = FD->getASTContext().getLangOpts();
+ const LangOptions &LO = FD->getLangOpts();
const bool DoubleSquareBracket = LO.CPlusPlus14 || LO.C2x;
StringRef AttributeSpelling =
DoubleSquareBracket ? "[[deprecated]]" : "__attribute__((deprecated))";
@@ -935,15 +938,50 @@ bool Sema::isUnionDecl() {
return RD->isUnion();
return false;
}
+static bool isClassOrStructDeclImpl(const Decl *D) {
+ if (auto *record = dyn_cast_or_null<RecordDecl>(D))
+ return !record->isUnion();
+
+ return false;
+}
bool Sema::isClassOrStructDecl() {
if (!ThisDeclInfo)
return false;
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
- return ThisDeclInfo->CurrentDecl &&
- isa<RecordDecl>(ThisDeclInfo->CurrentDecl) &&
- !isUnionDecl();
+
+ if (!ThisDeclInfo->CurrentDecl)
+ return false;
+
+ return isClassOrStructDeclImpl(ThisDeclInfo->CurrentDecl);
+}
+
+bool Sema::isClassOrStructOrTagTypedefDecl() {
+ if (!ThisDeclInfo)
+ return false;
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+
+ if (!ThisDeclInfo->CurrentDecl)
+ return false;
+
+ if (isClassOrStructDeclImpl(ThisDeclInfo->CurrentDecl))
+ return true;
+
+ if (auto *ThisTypedefDecl = dyn_cast<TypedefDecl>(ThisDeclInfo->CurrentDecl)) {
+ auto UnderlyingType = ThisTypedefDecl->getUnderlyingType();
+ if (auto ThisElaboratedType = dyn_cast<ElaboratedType>(UnderlyingType)) {
+ auto DesugaredType = ThisElaboratedType->desugar();
+ if (auto *DesugaredTypePtr = DesugaredType.getTypePtrOrNull()) {
+ if (auto *ThisRecordType = dyn_cast<RecordType>(DesugaredTypePtr)) {
+ return isClassOrStructDeclImpl(ThisRecordType->getAsRecordDecl());
+ }
+ }
+ }
+ }
+
+ return false;
}
bool Sema::isClassTemplateDecl() {
diff --git a/contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp b/contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp
index 07673230357f..6b6826c02a12 100644
--- a/contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ComparisonCategories.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Type.h"
diff --git a/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp b/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp
new file mode 100644
index 000000000000..2333993dbeb4
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp
@@ -0,0 +1,796 @@
+//===- ComputeDependence.cpp ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ComputeDependence.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/DependenceFlags.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprConcepts.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
+#include "clang/Basic/ExceptionSpecificationType.h"
+#include "llvm/ADT/ArrayRef.h"
+
+using namespace clang;
+
+ExprDependence clang::computeDependence(FullExpr *E) {
+ return E->getSubExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(OpaqueValueExpr *E) {
+ auto D = toExprDependence(E->getType()->getDependence());
+ if (auto *S = E->getSourceExpr())
+ D |= S->getDependence();
+ assert(!(D & ExprDependence::UnexpandedPack));
+ return D;
+}
+
+ExprDependence clang::computeDependence(ParenExpr *E) {
+ return E->getSubExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(UnaryOperator *E) {
+ return toExprDependence(E->getType()->getDependence()) |
+ E->getSubExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(UnaryExprOrTypeTraitExpr *E) {
+ // Never type-dependent (C++ [temp.dep.expr]p3).
+ // Value-dependent if the argument is type-dependent.
+ if (E->isArgumentType())
+ return turnTypeToValueDependence(
+ toExprDependence(E->getArgumentType()->getDependence()));
+
+ auto ArgDeps = E->getArgumentExpr()->getDependence();
+ auto Deps = ArgDeps & ~ExprDependence::TypeValue;
+ // Value-dependent if the argument is type-dependent.
+ if (ArgDeps & ExprDependence::Type)
+ Deps |= ExprDependence::Value;
+ // Check to see if we are in the situation where alignof(decl) should be
+ // dependent because decl's alignment is dependent.
+ auto ExprKind = E->getKind();
+ if (ExprKind != UETT_AlignOf && ExprKind != UETT_PreferredAlignOf)
+ return Deps;
+ if ((Deps & ExprDependence::Value) && (Deps & ExprDependence::Instantiation))
+ return Deps;
+
+ auto *NoParens = E->getArgumentExpr()->IgnoreParens();
+ const ValueDecl *D = nullptr;
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(NoParens))
+ D = DRE->getDecl();
+ else if (const auto *ME = dyn_cast<MemberExpr>(NoParens))
+ D = ME->getMemberDecl();
+ if (!D)
+ return Deps;
+ for (const auto *I : D->specific_attrs<AlignedAttr>()) {
+ if (I->isAlignmentErrorDependent())
+ Deps |= ExprDependence::Error;
+ if (I->isAlignmentDependent())
+ Deps |= ExprDependence::ValueInstantiation;
+ }
+ return Deps;
+}
+
+ExprDependence clang::computeDependence(ArraySubscriptExpr *E) {
+ return E->getLHS()->getDependence() | E->getRHS()->getDependence();
+}
+
+ExprDependence clang::computeDependence(MatrixSubscriptExpr *E) {
+ return E->getBase()->getDependence() | E->getRowIdx()->getDependence() |
+ (E->getColumnIdx() ? E->getColumnIdx()->getDependence()
+ : ExprDependence::None);
+}
+
+ExprDependence clang::computeDependence(CompoundLiteralExpr *E) {
+ return toExprDependence(E->getTypeSourceInfo()->getType()->getDependence()) |
+ turnTypeToValueDependence(E->getInitializer()->getDependence());
+}
+
+ExprDependence clang::computeDependence(CastExpr *E) {
+ // Cast expressions are type-dependent if the type is
+ // dependent (C++ [temp.dep.expr]p3).
+ // Cast expressions are value-dependent if the type is
+ // dependent or if the subexpression is value-dependent.
+ auto D = toExprDependence(E->getType()->getDependence());
+ if (E->getStmtClass() == Stmt::ImplicitCastExprClass) {
+ // An implicit cast expression doesn't (lexically) contain an
+ // unexpanded pack, even if its target type does.
+ D &= ~ExprDependence::UnexpandedPack;
+ }
+ if (auto *S = E->getSubExpr())
+ D |= S->getDependence() & ~ExprDependence::Type;
+ return D;
+}
+
+ExprDependence clang::computeDependence(BinaryOperator *E) {
+ return E->getLHS()->getDependence() | E->getRHS()->getDependence();
+}
+
+ExprDependence clang::computeDependence(ConditionalOperator *E) {
+ // The type of the conditional operator depends on the type of the conditional
+ // to support the GCC vector conditional extension. Additionally,
+ // [temp.dep.expr] does specify state that this should be dependent on ALL sub
+ // expressions.
+ return E->getCond()->getDependence() | E->getLHS()->getDependence() |
+ E->getRHS()->getDependence();
+}
+
+ExprDependence clang::computeDependence(BinaryConditionalOperator *E) {
+ return E->getCommon()->getDependence() | E->getFalseExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(StmtExpr *E, unsigned TemplateDepth) {
+ auto D = toExprDependence(E->getType()->getDependence());
+ // Propagate dependence of the result.
+ if (const auto *CompoundExprResult =
+ dyn_cast_or_null<ValueStmt>(E->getSubStmt()->getStmtExprResult()))
+ if (const Expr *ResultExpr = CompoundExprResult->getExprStmt())
+ D |= ResultExpr->getDependence();
+ // Note: we treat a statement-expression in a dependent context as always
+ // being value- and instantiation-dependent. This matches the behavior of
+ // lambda-expressions and GCC.
+ if (TemplateDepth)
+ D |= ExprDependence::ValueInstantiation;
+ // A param pack cannot be expanded over stmtexpr boundaries.
+ return D & ~ExprDependence::UnexpandedPack;
+}
+
+ExprDependence clang::computeDependence(ConvertVectorExpr *E) {
+ auto D = toExprDependence(E->getType()->getDependence()) |
+ E->getSrcExpr()->getDependence();
+ if (!E->getType()->isDependentType())
+ D &= ~ExprDependence::Type;
+ return D;
+}
+
+ExprDependence clang::computeDependence(ChooseExpr *E) {
+ if (E->isConditionDependent())
+ return ExprDependence::TypeValueInstantiation |
+ E->getCond()->getDependence() | E->getLHS()->getDependence() |
+ E->getRHS()->getDependence();
+
+ auto Cond = E->getCond()->getDependence();
+ auto Active = E->getLHS()->getDependence();
+ auto Inactive = E->getRHS()->getDependence();
+ if (!E->isConditionTrue())
+ std::swap(Active, Inactive);
+ // Take type- and value- dependency from the active branch. Propagate all
+ // other flags from all branches.
+ return (Active & ExprDependence::TypeValue) |
+ ((Cond | Active | Inactive) & ~ExprDependence::TypeValue);
+}
+
+ExprDependence clang::computeDependence(ParenListExpr *P) {
+ auto D = ExprDependence::None;
+ for (auto *E : P->exprs())
+ D |= E->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(VAArgExpr *E) {
+ auto D =
+ toExprDependence(E->getWrittenTypeInfo()->getType()->getDependence()) |
+ (E->getSubExpr()->getDependence() & ~ExprDependence::Type);
+ return D & ~ExprDependence::Value;
+}
+
+ExprDependence clang::computeDependence(NoInitExpr *E) {
+ return toExprDependence(E->getType()->getDependence()) &
+ (ExprDependence::Instantiation | ExprDependence::Error);
+}
+
+ExprDependence clang::computeDependence(ArrayInitLoopExpr *E) {
+ auto D = E->getCommonExpr()->getDependence() |
+ E->getSubExpr()->getDependence() | ExprDependence::Instantiation;
+ if (!E->getType()->isInstantiationDependentType())
+ D &= ~ExprDependence::Instantiation;
+ return turnTypeToValueDependence(D);
+}
+
+ExprDependence clang::computeDependence(ImplicitValueInitExpr *E) {
+ return toExprDependence(E->getType()->getDependence()) &
+ ExprDependence::Instantiation;
+}
+
+ExprDependence clang::computeDependence(ExtVectorElementExpr *E) {
+ return E->getBase()->getDependence();
+}
+
+ExprDependence clang::computeDependence(BlockExpr *E) {
+ auto D = toExprDependence(E->getType()->getDependence());
+ if (E->getBlockDecl()->isDependentContext())
+ D |= ExprDependence::Instantiation;
+ return D & ~ExprDependence::UnexpandedPack;
+}
+
+ExprDependence clang::computeDependence(AsTypeExpr *E) {
+ auto D = toExprDependence(E->getType()->getDependence()) |
+ E->getSrcExpr()->getDependence();
+ if (!E->getType()->isDependentType())
+ D &= ~ExprDependence::Type;
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXRewrittenBinaryOperator *E) {
+ return E->getSemanticForm()->getDependence();
+}
+
+ExprDependence clang::computeDependence(CXXStdInitializerListExpr *E) {
+ auto D = turnTypeToValueDependence(E->getSubExpr()->getDependence());
+ D |= toExprDependence(E->getType()->getDependence()) &
+ (ExprDependence::Type | ExprDependence::Error);
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXTypeidExpr *E) {
+ auto D = ExprDependence::None;
+ if (E->isTypeOperand())
+ D = toExprDependence(
+ E->getTypeOperandSourceInfo()->getType()->getDependence());
+ else
+ D = turnTypeToValueDependence(E->getExprOperand()->getDependence());
+ // typeid is never type-dependent (C++ [temp.dep.expr]p4)
+ return D & ~ExprDependence::Type;
+}
+
+ExprDependence clang::computeDependence(MSPropertyRefExpr *E) {
+ return E->getBaseExpr()->getDependence() & ~ExprDependence::Type;
+}
+
+ExprDependence clang::computeDependence(MSPropertySubscriptExpr *E) {
+ return E->getIdx()->getDependence();
+}
+
+ExprDependence clang::computeDependence(CXXUuidofExpr *E) {
+ if (E->isTypeOperand())
+ return turnTypeToValueDependence(toExprDependence(
+ E->getTypeOperandSourceInfo()->getType()->getDependence()));
+
+ return turnTypeToValueDependence(E->getExprOperand()->getDependence());
+}
+
+ExprDependence clang::computeDependence(CXXThisExpr *E) {
+ // 'this' is type-dependent if the class type of the enclosing
+ // member function is dependent (C++ [temp.dep.expr]p2)
+ auto D = toExprDependence(E->getType()->getDependence());
+ assert(!(D & ExprDependence::UnexpandedPack));
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXThrowExpr *E) {
+ auto *Op = E->getSubExpr();
+ if (!Op)
+ return ExprDependence::None;
+ return Op->getDependence() & ~ExprDependence::TypeValue;
+}
+
+ExprDependence clang::computeDependence(CXXBindTemporaryExpr *E) {
+ return E->getSubExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(CXXScalarValueInitExpr *E) {
+ return toExprDependence(E->getType()->getDependence()) &
+ ~ExprDependence::TypeValue;
+}
+
+ExprDependence clang::computeDependence(CXXDeleteExpr *E) {
+ return turnTypeToValueDependence(E->getArgument()->getDependence());
+}
+
+ExprDependence clang::computeDependence(ArrayTypeTraitExpr *E) {
+ auto D = toExprDependence(E->getQueriedType()->getDependence());
+ if (auto *Dim = E->getDimensionExpression())
+ D |= Dim->getDependence();
+ return turnTypeToValueDependence(D);
+}
+
+ExprDependence clang::computeDependence(ExpressionTraitExpr *E) {
+ // Never type-dependent.
+ auto D = E->getQueriedExpression()->getDependence() & ~ExprDependence::Type;
+ // Value-dependent if the argument is type-dependent.
+ if (E->getQueriedExpression()->isTypeDependent())
+ D |= ExprDependence::Value;
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXNoexceptExpr *E, CanThrowResult CT) {
+ auto D = E->getOperand()->getDependence() & ~ExprDependence::TypeValue;
+ if (CT == CT_Dependent)
+ D |= ExprDependence::ValueInstantiation;
+ return D;
+}
+
+ExprDependence clang::computeDependence(PackExpansionExpr *E) {
+ return (E->getPattern()->getDependence() & ~ExprDependence::UnexpandedPack) |
+ ExprDependence::TypeValueInstantiation;
+}
+
+ExprDependence clang::computeDependence(SubstNonTypeTemplateParmExpr *E) {
+ return E->getReplacement()->getDependence();
+}
+
+ExprDependence clang::computeDependence(CoroutineSuspendExpr *E) {
+ if (auto *Resume = E->getResumeExpr())
+ return (Resume->getDependence() &
+ (ExprDependence::TypeValue | ExprDependence::Error)) |
+ (E->getCommonExpr()->getDependence() & ~ExprDependence::TypeValue);
+ return E->getCommonExpr()->getDependence() |
+ ExprDependence::TypeValueInstantiation;
+}
+
+ExprDependence clang::computeDependence(DependentCoawaitExpr *E) {
+ return E->getOperand()->getDependence() |
+ ExprDependence::TypeValueInstantiation;
+}
+
+ExprDependence clang::computeDependence(ObjCBoxedExpr *E) {
+ return E->getSubExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(ObjCEncodeExpr *E) {
+ return toExprDependence(E->getEncodedType()->getDependence());
+}
+
+ExprDependence clang::computeDependence(ObjCIvarRefExpr *E) {
+ return turnTypeToValueDependence(E->getBase()->getDependence());
+}
+
+ExprDependence clang::computeDependence(ObjCPropertyRefExpr *E) {
+ if (E->isObjectReceiver())
+ return E->getBase()->getDependence() & ~ExprDependence::Type;
+ if (E->isSuperReceiver())
+ return toExprDependence(E->getSuperReceiverType()->getDependence()) &
+ ~ExprDependence::TypeValue;
+ assert(E->isClassReceiver());
+ return ExprDependence::None;
+}
+
+ExprDependence clang::computeDependence(ObjCSubscriptRefExpr *E) {
+ return E->getBaseExpr()->getDependence() | E->getKeyExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(ObjCIsaExpr *E) {
+ return E->getBase()->getDependence() & ~ExprDependence::Type &
+ ~ExprDependence::UnexpandedPack;
+}
+
+ExprDependence clang::computeDependence(ObjCIndirectCopyRestoreExpr *E) {
+ return E->getSubExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(OMPArraySectionExpr *E) {
+ auto D = E->getBase()->getDependence();
+ if (auto *LB = E->getLowerBound())
+ D |= LB->getDependence();
+ if (auto *Len = E->getLength())
+ D |= Len->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(OMPArrayShapingExpr *E) {
+ auto D = E->getBase()->getDependence() |
+ toExprDependence(E->getType()->getDependence());
+ for (Expr *Dim: E->getDimensions())
+ if (Dim)
+ D |= Dim->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(OMPIteratorExpr *E) {
+ auto D = toExprDependence(E->getType()->getDependence());
+ for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
+ if (auto *VD = cast_or_null<ValueDecl>(E->getIteratorDecl(I)))
+ D |= toExprDependence(VD->getType()->getDependence());
+ OMPIteratorExpr::IteratorRange IR = E->getIteratorRange(I);
+ if (Expr *BE = IR.Begin)
+ D |= BE->getDependence();
+ if (Expr *EE = IR.End)
+ D |= EE->getDependence();
+ if (Expr *SE = IR.Step)
+ D |= SE->getDependence();
+ }
+ return D;
+}
+
+/// Compute the type-, value-, and instantiation-dependence of a
+/// declaration reference
+/// based on the declaration being referenced.
+ExprDependence clang::computeDependence(DeclRefExpr *E, const ASTContext &Ctx) {
+ auto Deps = ExprDependence::None;
+
+ if (auto *NNS = E->getQualifier())
+ Deps |= toExprDependence(NNS->getDependence() &
+ ~NestedNameSpecifierDependence::Dependent);
+
+ if (auto *FirstArg = E->getTemplateArgs()) {
+ unsigned NumArgs = E->getNumTemplateArgs();
+ for (auto *Arg = FirstArg, *End = FirstArg + NumArgs; Arg < End; ++Arg)
+ Deps |= toExprDependence(Arg->getArgument().getDependence());
+ }
+
+ auto *Decl = E->getDecl();
+ auto Type = E->getType();
+
+ if (Decl->isParameterPack())
+ Deps |= ExprDependence::UnexpandedPack;
+ Deps |= toExprDependence(Type->getDependence()) & ExprDependence::Error;
+
+ // (TD) C++ [temp.dep.expr]p3:
+ // An id-expression is type-dependent if it contains:
+ //
+ // and
+ //
+ // (VD) C++ [temp.dep.constexpr]p2:
+ // An identifier is value-dependent if it is:
+
+ // (TD) - an identifier that was declared with dependent type
+ // (VD) - a name declared with a dependent type,
+ if (Type->isDependentType())
+ return Deps | ExprDependence::TypeValueInstantiation;
+ else if (Type->isInstantiationDependentType())
+ Deps |= ExprDependence::Instantiation;
+
+ // (TD) - a conversion-function-id that specifies a dependent type
+ if (Decl->getDeclName().getNameKind() ==
+ DeclarationName::CXXConversionFunctionName) {
+ QualType T = Decl->getDeclName().getCXXNameType();
+ if (T->isDependentType())
+ return Deps | ExprDependence::TypeValueInstantiation;
+
+ if (T->isInstantiationDependentType())
+ Deps |= ExprDependence::Instantiation;
+ }
+
+ // (VD) - the name of a non-type template parameter,
+ if (isa<NonTypeTemplateParmDecl>(Decl))
+ return Deps | ExprDependence::ValueInstantiation;
+
+ // (VD) - a constant with integral or enumeration type and is
+ // initialized with an expression that is value-dependent.
+ // (VD) - a constant with literal type and is initialized with an
+ // expression that is value-dependent [C++11].
+ // (VD) - FIXME: Missing from the standard:
+ // - an entity with reference type and is initialized with an
+ // expression that is value-dependent [C++11]
+ if (VarDecl *Var = dyn_cast<VarDecl>(Decl)) {
+ if ((Ctx.getLangOpts().CPlusPlus11
+ ? Var->getType()->isLiteralType(Ctx)
+ : Var->getType()->isIntegralOrEnumerationType()) &&
+ (Var->getType().isConstQualified() ||
+ Var->getType()->isReferenceType())) {
+ if (const Expr *Init = Var->getAnyInitializer())
+ if (Init->isValueDependent()) {
+ Deps |= ExprDependence::ValueInstantiation;
+ }
+ }
+
+ // (VD) - FIXME: Missing from the standard:
+ // - a member function or a static data member of the current
+ // instantiation
+ if (Var->isStaticDataMember() &&
+ Var->getDeclContext()->isDependentContext()) {
+ Deps |= ExprDependence::ValueInstantiation;
+ TypeSourceInfo *TInfo = Var->getFirstDecl()->getTypeSourceInfo();
+ if (TInfo->getType()->isIncompleteArrayType())
+ Deps |= ExprDependence::Type;
+ }
+
+ return Deps;
+ }
+
+ // (VD) - FIXME: Missing from the standard:
+ // - a member function or a static data member of the current
+ // instantiation
+ if (isa<CXXMethodDecl>(Decl) && Decl->getDeclContext()->isDependentContext())
+ Deps |= ExprDependence::ValueInstantiation;
+ return Deps;
+}
+
+ExprDependence clang::computeDependence(RecoveryExpr *E) {
+ // RecoveryExpr is
+ // - always value-dependent, and therefore instantiation dependent
+ // - contains errors (ExprDependence::Error), by definition
+ // - type-dependent if we don't know the type (fallback to an opaque
+ // dependent type), or the type is known and dependent, or it has
+ // type-dependent subexpressions.
+ auto D = toExprDependence(E->getType()->getDependence()) |
+ ExprDependence::ValueInstantiation | ExprDependence::Error;
+ // FIXME: remove the type-dependent bit from subexpressions, if the
+ // RecoveryExpr has a non-dependent type.
+ for (auto *S : E->subExpressions())
+ D |= S->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(PredefinedExpr *E) {
+ return toExprDependence(E->getType()->getDependence()) &
+ ~ExprDependence::UnexpandedPack;
+}
+
+ExprDependence clang::computeDependence(CallExpr *E,
+ llvm::ArrayRef<Expr *> PreArgs) {
+ auto D = E->getCallee()->getDependence();
+ for (auto *A : llvm::makeArrayRef(E->getArgs(), E->getNumArgs())) {
+ if (A)
+ D |= A->getDependence();
+ }
+ for (auto *A : PreArgs)
+ D |= A->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(OffsetOfExpr *E) {
+ auto D = turnTypeToValueDependence(
+ toExprDependence(E->getTypeSourceInfo()->getType()->getDependence()));
+ for (unsigned I = 0, N = E->getNumExpressions(); I < N; ++I)
+ D |= turnTypeToValueDependence(E->getIndexExpr(I)->getDependence());
+ return D;
+}
+
+ExprDependence clang::computeDependence(MemberExpr *E) {
+ auto *MemberDecl = E->getMemberDecl();
+ auto D = E->getBase()->getDependence();
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl)) {
+ DeclContext *DC = MemberDecl->getDeclContext();
+ // dyn_cast_or_null is used to handle objC variables which do not
+ // have a declaration context.
+ CXXRecordDecl *RD = dyn_cast_or_null<CXXRecordDecl>(DC);
+ if (RD && RD->isDependentContext() && RD->isCurrentInstantiation(DC)) {
+ if (!E->getType()->isDependentType())
+ D &= ~ExprDependence::Type;
+ }
+
+ // Bitfield with value-dependent width is type-dependent.
+ if (FD && FD->isBitField() && FD->getBitWidth()->isValueDependent()) {
+ D |= ExprDependence::Type;
+ }
+ }
+ // FIXME: move remaining dependence computation from MemberExpr::Create()
+ return D;
+}
+
+ExprDependence clang::computeDependence(InitListExpr *E) {
+ auto D = ExprDependence::None;
+ for (auto *A : E->inits())
+ D |= A->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(ShuffleVectorExpr *E) {
+ auto D = toExprDependence(E->getType()->getDependence());
+ for (auto *C : llvm::makeArrayRef(E->getSubExprs(), E->getNumSubExprs()))
+ D |= C->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(GenericSelectionExpr *E,
+ bool ContainsUnexpandedPack) {
+ auto D = ContainsUnexpandedPack ? ExprDependence::UnexpandedPack
+ : ExprDependence::None;
+ for (auto *AE : E->getAssocExprs())
+ D |= AE->getDependence() & ExprDependence::Error;
+ D |= E->getControllingExpr()->getDependence() & ExprDependence::Error;
+
+ if (E->isResultDependent())
+ return D | ExprDependence::TypeValueInstantiation;
+ return D | (E->getResultExpr()->getDependence() &
+ ~ExprDependence::UnexpandedPack);
+}
+
+ExprDependence clang::computeDependence(DesignatedInitExpr *E) {
+ auto Deps = E->getInit()->getDependence();
+ for (auto D : E->designators()) {
+ auto DesignatorDeps = ExprDependence::None;
+ if (D.isArrayDesignator())
+ DesignatorDeps |= E->getArrayIndex(D)->getDependence();
+ else if (D.isArrayRangeDesignator())
+ DesignatorDeps |= E->getArrayRangeStart(D)->getDependence() |
+ E->getArrayRangeEnd(D)->getDependence();
+ Deps |= DesignatorDeps;
+ if (DesignatorDeps & ExprDependence::TypeValue)
+ Deps |= ExprDependence::TypeValueInstantiation;
+ }
+ return Deps;
+}
+
+ExprDependence clang::computeDependence(PseudoObjectExpr *O) {
+ auto D = O->getSyntacticForm()->getDependence();
+ for (auto *E : O->semantics())
+ D |= E->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(AtomicExpr *A) {
+ auto D = ExprDependence::None;
+ for (auto *E : llvm::makeArrayRef(A->getSubExprs(), A->getNumSubExprs()))
+ D |= E->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXNewExpr *E) {
+ auto D = toExprDependence(E->getType()->getDependence());
+ auto Size = E->getArraySize();
+ if (Size.hasValue() && *Size)
+ D |= turnTypeToValueDependence((*Size)->getDependence());
+ if (auto *I = E->getInitializer())
+ D |= turnTypeToValueDependence(I->getDependence());
+ for (auto *A : E->placement_arguments())
+ D |= turnTypeToValueDependence(A->getDependence());
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXPseudoDestructorExpr *E) {
+ auto D = E->getBase()->getDependence();
+ if (!E->getDestroyedType().isNull())
+ D |= toExprDependence(E->getDestroyedType()->getDependence());
+ if (auto *ST = E->getScopeTypeInfo())
+ D |= turnTypeToValueDependence(
+ toExprDependence(ST->getType()->getDependence()));
+ if (auto *Q = E->getQualifier())
+ D |= toExprDependence(Q->getDependence() &
+ ~NestedNameSpecifierDependence::Dependent);
+ return D;
+}
+
+static inline ExprDependence getDependenceInExpr(DeclarationNameInfo Name) {
+ auto D = ExprDependence::None;
+ if (Name.isInstantiationDependent())
+ D |= ExprDependence::Instantiation;
+ if (Name.containsUnexpandedParameterPack())
+ D |= ExprDependence::UnexpandedPack;
+ return D;
+}
+
+ExprDependence
+clang::computeDependence(OverloadExpr *E, bool KnownDependent,
+ bool KnownInstantiationDependent,
+ bool KnownContainsUnexpandedParameterPack) {
+ auto Deps = ExprDependence::None;
+ if (KnownDependent)
+ Deps |= ExprDependence::TypeValue;
+ if (KnownInstantiationDependent)
+ Deps |= ExprDependence::Instantiation;
+ if (KnownContainsUnexpandedParameterPack)
+ Deps |= ExprDependence::UnexpandedPack;
+ Deps |= getDependenceInExpr(E->getNameInfo());
+ if (auto *Q = E->getQualifier())
+ Deps |= toExprDependence(Q->getDependence() &
+ ~NestedNameSpecifierDependence::Dependent);
+ for (auto *D : E->decls()) {
+ if (D->getDeclContext()->isDependentContext() ||
+ isa<UnresolvedUsingValueDecl>(D))
+ Deps |= ExprDependence::TypeValueInstantiation;
+ }
+ // If we have explicit template arguments, check for dependent
+ // template arguments and whether they contain any unexpanded pack
+ // expansions.
+ for (auto A : E->template_arguments())
+ Deps |= toExprDependence(A.getArgument().getDependence());
+ return Deps;
+}
+
+ExprDependence clang::computeDependence(DependentScopeDeclRefExpr *E) {
+ auto D = ExprDependence::TypeValue;
+ D |= getDependenceInExpr(E->getNameInfo());
+ if (auto *Q = E->getQualifier())
+ D |= toExprDependence(Q->getDependence());
+ for (auto A : E->template_arguments())
+ D |= toExprDependence(A.getArgument().getDependence());
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXConstructExpr *E) {
+ auto D = toExprDependence(E->getType()->getDependence());
+ for (auto *A : E->arguments())
+ D |= A->getDependence() & ~ExprDependence::Type;
+ return D;
+}
+
+ExprDependence clang::computeDependence(LambdaExpr *E,
+ bool ContainsUnexpandedParameterPack) {
+ auto D = toExprDependence(E->getType()->getDependence());
+ if (ContainsUnexpandedParameterPack)
+ D |= ExprDependence::UnexpandedPack;
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXUnresolvedConstructExpr *E) {
+ auto D = ExprDependence::ValueInstantiation;
+ D |= toExprDependence(E->getType()->getDependence());
+ if (E->getType()->getContainedDeducedType())
+ D |= ExprDependence::Type;
+ for (auto *A : E->arguments())
+ D |= A->getDependence() &
+ (ExprDependence::UnexpandedPack | ExprDependence::Error);
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXDependentScopeMemberExpr *E) {
+ auto D = ExprDependence::TypeValueInstantiation;
+ if (!E->isImplicitAccess())
+ D |= E->getBase()->getDependence();
+ if (auto *Q = E->getQualifier())
+ D |= toExprDependence(Q->getDependence());
+ D |= getDependenceInExpr(E->getMemberNameInfo());
+ for (auto A : E->template_arguments())
+ D |= toExprDependence(A.getArgument().getDependence());
+ return D;
+}
+
+ExprDependence clang::computeDependence(MaterializeTemporaryExpr *E) {
+ return E->getSubExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(CXXFoldExpr *E) {
+ auto D = ExprDependence::TypeValueInstantiation;
+ for (const auto *C : {E->getLHS(), E->getRHS()}) {
+ if (C)
+ D |= C->getDependence() & ~ExprDependence::UnexpandedPack;
+ }
+ return D;
+}
+
+ExprDependence clang::computeDependence(TypeTraitExpr *E) {
+ auto D = ExprDependence::None;
+ for (const auto *A : E->getArgs())
+ D |=
+ toExprDependence(A->getType()->getDependence()) & ~ExprDependence::Type;
+ return D;
+}
+
+ExprDependence clang::computeDependence(ConceptSpecializationExpr *E,
+ bool ValueDependent) {
+ auto TA = TemplateArgumentDependence::None;
+ const auto InterestingDeps = TemplateArgumentDependence::Instantiation |
+ TemplateArgumentDependence::UnexpandedPack;
+ for (const TemplateArgumentLoc &ArgLoc :
+ E->getTemplateArgsAsWritten()->arguments()) {
+ TA |= ArgLoc.getArgument().getDependence() & InterestingDeps;
+ if (TA == InterestingDeps)
+ break;
+ }
+
+ ExprDependence D =
+ ValueDependent ? ExprDependence::Value : ExprDependence::None;
+ return D | toExprDependence(TA);
+}
+
+ExprDependence clang::computeDependence(ObjCArrayLiteral *E) {
+ auto D = ExprDependence::None;
+ Expr **Elements = E->getElements();
+ for (unsigned I = 0, N = E->getNumElements(); I != N; ++I)
+ D |= turnTypeToValueDependence(Elements[I]->getDependence());
+ return D;
+}
+
+ExprDependence clang::computeDependence(ObjCDictionaryLiteral *E) {
+ auto Deps = ExprDependence::None;
+ for (unsigned I = 0, N = E->getNumElements(); I < N; ++I) {
+ auto KV = E->getKeyValueElement(I);
+ auto KVDeps = turnTypeToValueDependence(KV.Key->getDependence() |
+ KV.Value->getDependence());
+ if (KV.EllipsisLoc.isValid())
+ KVDeps &= ~ExprDependence::UnexpandedPack;
+ Deps |= KVDeps;
+ }
+ return Deps;
+}
+
+ExprDependence clang::computeDependence(ObjCMessageExpr *E) {
+ auto D = ExprDependence::None;
+ if (auto *R = E->getInstanceReceiver())
+ D |= R->getDependence();
+ else
+ D |= toExprDependence(E->getType()->getDependence());
+ for (auto *A : E->arguments())
+ D |= A->getDependence();
+ return D;
+}
diff --git a/contrib/llvm-project/clang/lib/AST/DataCollection.cpp b/contrib/llvm-project/clang/lib/AST/DataCollection.cpp
index 8e67c101dee1..d3f2c22e9cc3 100644
--- a/contrib/llvm-project/clang/lib/AST/DataCollection.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DataCollection.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/DataCollection.h"
-
+#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
namespace clang {
diff --git a/contrib/llvm-project/clang/lib/AST/Decl.cpp b/contrib/llvm-project/clang/lib/AST/Decl.cpp
index 0d30f64b992e..5c0a98815dd7 100644
--- a/contrib/llvm-project/clang/lib/AST/Decl.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Decl.cpp
@@ -892,6 +892,10 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
if (!TD->getAnonDeclWithTypedefName(/*AnyRedecl*/true))
return LinkageInfo::none();
+ } else if (isa<MSGuidDecl>(D)) {
+ // A GUID behaves like an inline variable with external linkage. Fall
+ // through.
+
// Everything not covered here has no linkage.
} else {
return LinkageInfo::none();
@@ -1318,19 +1322,6 @@ LinkageInfo LinkageComputer::getLVForLocalDecl(const NamedDecl *D,
LV.isVisibilityExplicit());
}
-static inline const CXXRecordDecl*
-getOutermostEnclosingLambda(const CXXRecordDecl *Record) {
- const CXXRecordDecl *Ret = Record;
- while (Record && Record->isLambda()) {
- Ret = Record;
- if (!Record->getParent()) break;
- // Get the Containing Class of this Lambda Class
- Record = dyn_cast_or_null<CXXRecordDecl>(
- Record->getParent()->getParent());
- }
- return Ret;
-}
-
LinkageInfo LinkageComputer::computeLVForDecl(const NamedDecl *D,
LVComputationKind computation,
bool IgnoreVarTypeLinkage) {
@@ -1396,25 +1387,9 @@ LinkageInfo LinkageComputer::computeLVForDecl(const NamedDecl *D,
return getInternalLinkageFor(D);
}
- // This lambda has its linkage/visibility determined:
- // - either by the outermost lambda if that lambda has no mangling
- // number.
- // - or by the parent of the outer most lambda
- // This prevents infinite recursion in settings such as nested lambdas
- // used in NSDMI's, for e.g.
- // struct L {
- // int t{};
- // int t2 = ([](int a) { return [](int b) { return b; };})(t)(t);
- // };
- const CXXRecordDecl *OuterMostLambda =
- getOutermostEnclosingLambda(Record);
- if (OuterMostLambda->hasKnownLambdaInternalLinkage() ||
- !OuterMostLambda->getLambdaManglingNumber())
- return getInternalLinkageFor(D);
-
return getLVForClosure(
- OuterMostLambda->getDeclContext()->getRedeclContext(),
- OuterMostLambda->getLambdaContextDecl(), computation);
+ Record->getDeclContext()->getRedeclContext(),
+ Record->getLambdaContextDecl(), computation);
}
break;
@@ -1571,10 +1546,19 @@ void NamedDecl::printQualifiedName(raw_ostream &OS,
return;
}
printNestedNameSpecifier(OS, P);
- if (getDeclName() || isa<DecompositionDecl>(this))
+ if (getDeclName())
OS << *this;
- else
- OS << "(anonymous)";
+ else {
+ // Give the printName override a chance to pick a different name before we
+ // fall back to "(anonymous)".
+ SmallString<64> NameBuffer;
+ llvm::raw_svector_ostream NameOS(NameBuffer);
+ printName(NameOS);
+ if (NameBuffer.empty())
+ OS << "(anonymous)";
+ else
+ OS << NameBuffer;
+ }
}
void NamedDecl::printNestedNameSpecifier(raw_ostream &OS) const {
@@ -1587,13 +1571,16 @@ void NamedDecl::printNestedNameSpecifier(raw_ostream &OS,
// For ObjC methods and properties, look through categories and use the
// interface as context.
- if (auto *MD = dyn_cast<ObjCMethodDecl>(this))
+ if (auto *MD = dyn_cast<ObjCMethodDecl>(this)) {
if (auto *ID = MD->getClassInterface())
Ctx = ID;
- if (auto *PD = dyn_cast<ObjCPropertyDecl>(this)) {
+ } else if (auto *PD = dyn_cast<ObjCPropertyDecl>(this)) {
if (auto *MD = PD->getGetterMethodDecl())
if (auto *ID = MD->getClassInterface())
Ctx = ID;
+ } else if (auto *ID = dyn_cast<ObjCIvarDecl>(this)) {
+ if (auto *CI = ID->getContainingInterface())
+ Ctx = CI;
}
if (Ctx->isFunctionOrMethod())
@@ -2981,7 +2968,8 @@ bool FunctionDecl::isReservedGlobalPlacementOperator() const {
return (proto->getParamType(1).getCanonicalType() == Context.VoidPtrTy);
}
-bool FunctionDecl::isReplaceableGlobalAllocationFunction(bool *IsAligned) const {
+bool FunctionDecl::isReplaceableGlobalAllocationFunction(
+ Optional<unsigned> *AlignmentParam, bool *IsNothrow) const {
if (getDeclName().getNameKind() != DeclarationName::CXXOperatorName)
return false;
if (getDeclName().getCXXOverloadedOperator() != OO_New &&
@@ -3028,9 +3016,9 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction(bool *IsAligned) const
// In C++17, the next parameter can be a 'std::align_val_t' for aligned
// new/delete.
if (Ctx.getLangOpts().AlignedAllocation && !Ty.isNull() && Ty->isAlignValT()) {
- if (IsAligned)
- *IsAligned = true;
Consume();
+ if (AlignmentParam)
+ *AlignmentParam = Params;
}
// Finally, if this is not a sized delete, the final parameter can
@@ -3039,8 +3027,11 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction(bool *IsAligned) const
Ty = Ty->getPointeeType();
if (Ty.getCVRQualifiers() != Qualifiers::Const)
return false;
- if (Ty->isNothrowT())
+ if (Ty->isNothrowT()) {
+ if (IsNothrow)
+ *IsNothrow = true;
Consume();
+ }
}
return Params == FPT->getNumParams();
@@ -3173,8 +3164,8 @@ FunctionDecl *FunctionDecl::getCanonicalDecl() { return getFirstDecl(); }
unsigned FunctionDecl::getBuiltinID(bool ConsiderWrapperFunctions) const {
unsigned BuiltinID;
- if (const auto *AMAA = getAttr<ArmMveAliasAttr>()) {
- BuiltinID = AMAA->getBuiltinName()->getBuiltinID();
+ if (const auto *ABAA = getAttr<ArmBuiltinAliasAttr>()) {
+ BuiltinID = ABAA->getBuiltinName()->getBuiltinID();
} else {
if (!getIdentifier())
return 0;
@@ -3206,7 +3197,7 @@ unsigned FunctionDecl::getBuiltinID(bool ConsiderWrapperFunctions) const {
// If the function is marked "overloadable", it has a different mangled name
// and is not the C library function.
if (!ConsiderWrapperFunctions && hasAttr<OverloadableAttr>() &&
- !hasAttr<ArmMveAliasAttr>())
+ !hasAttr<ArmBuiltinAliasAttr>())
return 0;
if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
@@ -3233,6 +3224,15 @@ unsigned FunctionDecl::getBuiltinID(bool ConsiderWrapperFunctions) const {
!(BuiltinID == Builtin::BIprintf || BuiltinID == Builtin::BImalloc))
return 0;
+ // As AMDGCN implementation of OpenMP does not have a device-side standard
+ // library, none of the predefined library functions except printf and malloc
+ // should be treated as a builtin i.e. 0 should be returned for them.
+ if (Context.getTargetInfo().getTriple().isAMDGCN() &&
+ Context.getLangOpts().OpenMPIsDevice &&
+ Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID) &&
+ !(BuiltinID == Builtin::BIprintf || BuiltinID == Builtin::BImalloc))
+ return 0;
+
return BuiltinID;
}
@@ -3264,13 +3264,27 @@ unsigned FunctionDecl::getMinRequiredArguments() const {
if (!getASTContext().getLangOpts().CPlusPlus)
return getNumParams();
+ // Note that it is possible for a parameter with no default argument to
+ // follow a parameter with a default argument.
unsigned NumRequiredArgs = 0;
- for (auto *Param : parameters())
- if (!Param->isParameterPack() && !Param->hasDefaultArg())
- ++NumRequiredArgs;
+ unsigned MinParamsSoFar = 0;
+ for (auto *Param : parameters()) {
+ if (!Param->isParameterPack()) {
+ ++MinParamsSoFar;
+ if (!Param->hasDefaultArg())
+ NumRequiredArgs = MinParamsSoFar;
+ }
+ }
return NumRequiredArgs;
}
+bool FunctionDecl::hasOneParamOrDefaultArgs() const {
+ return getNumParams() == 1 ||
+ (getNumParams() > 1 &&
+ std::all_of(param_begin() + 1, param_end(),
+ [](ParmVarDecl *P) { return P->hasDefaultArg(); }));
+}
+
/// The combination of the extern and inline keywords under MSVC forces
/// the function to be required.
///
@@ -3609,7 +3623,8 @@ bool FunctionDecl::isTemplateInstantiation() const {
return clang::isTemplateInstantiation(getTemplateSpecializationKind());
}
-FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const {
+FunctionDecl *
+FunctionDecl::getTemplateInstantiationPattern(bool ForDefinition) const {
// If this is a generic lambda call operator specialization, its
// instantiation pattern is always its primary template's pattern
// even if its primary template was instantiated from another
@@ -3626,18 +3641,20 @@ FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const {
}
if (MemberSpecializationInfo *Info = getMemberSpecializationInfo()) {
- if (!clang::isTemplateInstantiation(Info->getTemplateSpecializationKind()))
+ if (ForDefinition &&
+ !clang::isTemplateInstantiation(Info->getTemplateSpecializationKind()))
return nullptr;
return getDefinitionOrSelf(cast<FunctionDecl>(Info->getInstantiatedFrom()));
}
- if (!clang::isTemplateInstantiation(getTemplateSpecializationKind()))
+ if (ForDefinition &&
+ !clang::isTemplateInstantiation(getTemplateSpecializationKind()))
return nullptr;
if (FunctionTemplateDecl *Primary = getPrimaryTemplate()) {
// If we hit a point where the user provided a specialization of this
// template, we're done looking.
- while (!Primary->isMemberSpecialization()) {
+ while (!ForDefinition || !Primary->isMemberSpecialization()) {
auto *NewPrimary = Primary->getInstantiatedFromMemberTemplate();
if (!NewPrimary)
break;
@@ -4422,6 +4439,21 @@ void RecordDecl::setCapturedRecord() {
addAttr(CapturedRecordAttr::CreateImplicit(getASTContext()));
}
+bool RecordDecl::isOrContainsUnion() const {
+ if (isUnion())
+ return true;
+
+ if (const RecordDecl *Def = getDefinition()) {
+ for (const FieldDecl *FD : Def->fields()) {
+ const RecordType *RT = FD->getType()->getAs<RecordType>();
+ if (RT && RT->getDecl()->isOrContainsUnion())
+ return true;
+ }
+ }
+
+ return false;
+}
+
RecordDecl::field_iterator RecordDecl::field_begin() const {
if (hasExternalLexicalStorage() && !hasLoadedFieldsFromExternalStorage())
LoadFieldsFromExternalStorage();
@@ -4493,11 +4525,11 @@ bool RecordDecl::mayInsertExtraPadding(bool EmitRemark) const {
ReasonToReject = 5; // is standard layout.
else if (Blacklist.isBlacklistedLocation(EnabledAsanMask, getLocation(),
"field-padding"))
- ReasonToReject = 6; // is in a blacklisted file.
+ ReasonToReject = 6; // is in an excluded file.
else if (Blacklist.isBlacklistedType(EnabledAsanMask,
getQualifiedNameAsString(),
"field-padding"))
- ReasonToReject = 7; // is blacklisted.
+ ReasonToReject = 7; // The type is excluded.
if (EmitRemark) {
if (ReasonToReject >= 0)
@@ -4921,7 +4953,8 @@ static unsigned getNumModuleIdentifiers(Module *Mod) {
ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
Module *Imported,
ArrayRef<SourceLocation> IdentifierLocs)
- : Decl(Import, DC, StartLoc), ImportedAndComplete(Imported, true) {
+ : Decl(Import, DC, StartLoc), ImportedModule(Imported),
+ NextLocalImportAndComplete(nullptr, true) {
assert(getNumModuleIdentifiers(Imported) == IdentifierLocs.size());
auto *StoredLocs = getTrailingObjects<SourceLocation>();
std::uninitialized_copy(IdentifierLocs.begin(), IdentifierLocs.end(),
@@ -4930,7 +4963,8 @@ ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
Module *Imported, SourceLocation EndLoc)
- : Decl(Import, DC, StartLoc), ImportedAndComplete(Imported, false) {
+ : Decl(Import, DC, StartLoc), ImportedModule(Imported),
+ NextLocalImportAndComplete(nullptr, false) {
*getTrailingObjects<SourceLocation>() = EndLoc;
}
@@ -4959,7 +4993,7 @@ ImportDecl *ImportDecl::CreateDeserialized(ASTContext &C, unsigned ID,
}
ArrayRef<SourceLocation> ImportDecl::getIdentifierLocs() const {
- if (!ImportedAndComplete.getInt())
+ if (!isImportComplete())
return None;
const auto *StoredLocs = getTrailingObjects<SourceLocation>();
@@ -4968,7 +5002,7 @@ ArrayRef<SourceLocation> ImportDecl::getIdentifierLocs() const {
}
SourceRange ImportDecl::getSourceRange() const {
- if (!ImportedAndComplete.getInt())
+ if (!isImportComplete())
return SourceRange(getLocation(), *getTrailingObjects<SourceLocation>());
return SourceRange(getLocation(), getIdentifierLocs().back());
diff --git a/contrib/llvm-project/clang/lib/AST/DeclBase.cpp b/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
index cb7c7fcbd4b8..da1eadd9d931 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
@@ -240,15 +240,47 @@ TemplateDecl *Decl::getDescribedTemplate() const {
return nullptr;
}
+const TemplateParameterList *Decl::getDescribedTemplateParams() const {
+ if (auto *TD = getDescribedTemplate())
+ return TD->getTemplateParameters();
+ if (auto *CTPSD = dyn_cast<ClassTemplatePartialSpecializationDecl>(this))
+ return CTPSD->getTemplateParameters();
+ if (auto *VTPSD = dyn_cast<VarTemplatePartialSpecializationDecl>(this))
+ return VTPSD->getTemplateParameters();
+ return nullptr;
+}
+
bool Decl::isTemplated() const {
- // A declaration is dependent if it is a template or a template pattern, or
+ // A declaration is templated if it is a template or a template pattern, or
// is within (lexcially for a friend, semantically otherwise) a dependent
// context.
// FIXME: Should local extern declarations be treated like friends?
if (auto *AsDC = dyn_cast<DeclContext>(this))
return AsDC->isDependentContext();
auto *DC = getFriendObjectKind() ? getLexicalDeclContext() : getDeclContext();
- return DC->isDependentContext() || isTemplateDecl() || getDescribedTemplate();
+ return DC->isDependentContext() || isTemplateDecl() ||
+ getDescribedTemplateParams();
+}
+
+unsigned Decl::getTemplateDepth() const {
+ if (auto *DC = dyn_cast<DeclContext>(this))
+ if (DC->isFileContext())
+ return 0;
+
+ if (auto *TPL = getDescribedTemplateParams())
+ return TPL->getDepth() + 1;
+
+ // If this is a dependent lambda, there might be an enclosing variable
+ // template. In this case, the next step is not the parent DeclContext (or
+ // even a DeclContext at all).
+ auto *RD = dyn_cast<CXXRecordDecl>(this);
+ if (RD && RD->isDependentLambda())
+ if (Decl *Context = RD->getLambdaContextDecl())
+ return Context->getTemplateDepth();
+
+ const DeclContext *DC =
+ getFriendObjectKind() ? getLexicalDeclContext() : getDeclContext();
+ return cast<Decl>(DC)->getTemplateDepth();
}
const DeclContext *Decl::getParentFunctionOrMethod() const {
@@ -332,8 +364,10 @@ void Decl::setDeclContextsImpl(DeclContext *SemaDC, DeclContext *LexicalDC,
}
}
-bool Decl::isInLocalScope() const {
+bool Decl::isInLocalScopeForInstantiation() const {
const DeclContext *LDC = getLexicalDeclContext();
+ if (!LDC->isDependentContext())
+ return false;
while (true) {
if (LDC->isFunctionOrMethod())
return true;
@@ -381,6 +415,12 @@ ASTContext &Decl::getASTContext() const {
return getTranslationUnitDecl()->getASTContext();
}
+/// Helper to get the language options from the ASTContext.
+/// Defined out of line to avoid depending on ASTContext.h.
+const LangOptions &Decl::getLangOpts() const {
+ return getASTContext().getLangOpts();
+}
+
ASTMutationListener *Decl::getASTMutationListener() const {
return getASTContext().getASTMutationListener();
}
@@ -393,8 +433,10 @@ unsigned Decl::getMaxAlignment() const {
const AttrVec &V = getAttrs();
ASTContext &Ctx = getASTContext();
specific_attr_iterator<AlignedAttr> I(V.begin()), E(V.end());
- for (; I != E; ++I)
- Align = std::max(Align, I->getAlignment(Ctx));
+ for (; I != E; ++I) {
+ if (!I->isAlignmentErrorDependent())
+ Align = std::max(Align, I->getAlignment(Ctx));
+ }
return Align;
}
@@ -457,7 +499,8 @@ ExternalSourceSymbolAttr *Decl::getExternalSourceSymbolAttr() const {
}
bool Decl::hasDefiningAttr() const {
- return hasAttr<AliasAttr>() || hasAttr<IFuncAttr>();
+ return hasAttr<AliasAttr>() || hasAttr<IFuncAttr>() ||
+ hasAttr<LoaderUninitializedAttr>();
}
const Attr *Decl::getDefiningAttr() const {
@@ -465,6 +508,8 @@ const Attr *Decl::getDefiningAttr() const {
return AA;
if (auto *IFA = getAttr<IFuncAttr>())
return IFA;
+ if (auto *NZA = getAttr<LoaderUninitializedAttr>())
+ return NZA;
return nullptr;
}
@@ -590,7 +635,7 @@ AvailabilityResult Decl::getAvailability(std::string *Message,
continue;
if (Message)
- ResultMessage = Deprecated->getMessage();
+ ResultMessage = std::string(Deprecated->getMessage());
Result = AR_Deprecated;
continue;
@@ -598,7 +643,7 @@ AvailabilityResult Decl::getAvailability(std::string *Message,
if (const auto *Unavailable = dyn_cast<UnavailableAttr>(A)) {
if (Message)
- *Message = Unavailable->getMessage();
+ *Message = std::string(Unavailable->getMessage());
return AR_Unavailable;
}
@@ -789,6 +834,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case TranslationUnit:
case ExternCContext:
case Decomposition:
+ case MSGuid:
case UsingDirective:
case BuiltinTemplate:
diff --git a/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp b/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
index 227fe80ccab4..6f1fd2f14ede 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
@@ -42,6 +42,7 @@
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -83,10 +84,12 @@ CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
HasInheritedConstructor(false), HasInheritedAssignment(false),
NeedOverloadResolutionForCopyConstructor(false),
NeedOverloadResolutionForMoveConstructor(false),
+ NeedOverloadResolutionForCopyAssignment(false),
NeedOverloadResolutionForMoveAssignment(false),
NeedOverloadResolutionForDestructor(false),
DefaultedCopyConstructorIsDeleted(false),
DefaultedMoveConstructorIsDeleted(false),
+ DefaultedCopyAssignmentIsDeleted(false),
DefaultedMoveAssignmentIsDeleted(false),
DefaultedDestructorIsDeleted(false), HasTrivialSpecialMembers(SMF_All),
HasTrivialSpecialMembersForCall(SMF_All),
@@ -434,10 +437,8 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs);
// Keep track of the presence of mutable fields.
- if (BaseClassDecl->hasMutableFields()) {
+ if (BaseClassDecl->hasMutableFields())
data().HasMutableFields = true;
- data().NeedOverloadResolutionForCopyConstructor = true;
- }
if (BaseClassDecl->hasUninitializedReferenceMember())
data().HasUninitializedReferenceMember = true;
@@ -510,6 +511,8 @@ void CXXRecordDecl::addedClassSubobject(CXXRecordDecl *Subobj) {
// -- a direct or virtual base class B that cannot be copied/moved [...]
// -- a non-static data member of class type M (or array thereof)
// that cannot be copied or moved [...]
+ if (!Subobj->hasSimpleCopyAssignment())
+ data().NeedOverloadResolutionForCopyAssignment = true;
if (!Subobj->hasSimpleMoveAssignment())
data().NeedOverloadResolutionForMoveAssignment = true;
@@ -663,10 +666,9 @@ bool CXXRecordDecl::lambdaIsDefaultConstructibleAndAssignable() const {
// C++17 [expr.prim.lambda]p21:
// The closure type associated with a lambda-expression has no default
// constructor and a deleted copy assignment operator.
- if (getLambdaCaptureDefault() != LCD_None ||
- getLambdaData().NumCaptures != 0)
+ if (getLambdaCaptureDefault() != LCD_None || capture_size() != 0)
return false;
- return getASTContext().getLangOpts().CPlusPlus2a;
+ return getASTContext().getLangOpts().CPlusPlus20;
}
void CXXRecordDecl::addedMember(Decl *D) {
@@ -782,7 +784,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
// C++20 [dcl.init.aggr]p1:
// An aggregate is an array or a class with no user-declared [...]
// constructors
- if (getASTContext().getLangOpts().CPlusPlus2a
+ if (getASTContext().getLangOpts().CPlusPlus20
? !Constructor->isImplicit()
: (Constructor->isUserProvided() || Constructor->isExplicit()))
data().Aggregate = false;
@@ -978,10 +980,8 @@ void CXXRecordDecl::addedMember(Decl *D) {
}
// Keep track of the presence of mutable fields.
- if (Field->isMutable()) {
+ if (Field->isMutable())
data().HasMutableFields = true;
- data().NeedOverloadResolutionForCopyConstructor = true;
- }
// C++11 [class.union]p8, DR1460:
// If X is a union, a non-static data member of X that is not an anonymous
@@ -1025,10 +1025,12 @@ void CXXRecordDecl::addedMember(Decl *D) {
if (isUnion()) {
data().DefaultedCopyConstructorIsDeleted = true;
data().DefaultedMoveConstructorIsDeleted = true;
+ data().DefaultedCopyAssignmentIsDeleted = true;
data().DefaultedMoveAssignmentIsDeleted = true;
data().DefaultedDestructorIsDeleted = true;
data().NeedOverloadResolutionForCopyConstructor = true;
data().NeedOverloadResolutionForMoveConstructor = true;
+ data().NeedOverloadResolutionForCopyAssignment = true;
data().NeedOverloadResolutionForMoveAssignment = true;
data().NeedOverloadResolutionForDestructor = true;
}
@@ -1095,8 +1097,10 @@ void CXXRecordDecl::addedMember(Decl *D) {
// A defaulted copy/move assignment operator for a class X is defined
// as deleted if X has:
// -- a non-static data member of reference type
- if (T->isReferenceType())
+ if (T->isReferenceType()) {
+ data().DefaultedCopyAssignmentIsDeleted = true;
data().DefaultedMoveAssignmentIsDeleted = true;
+ }
// Bitfields of length 0 are also zero-sized, but we already bailed out for
// those because they are always unnamed.
@@ -1115,6 +1119,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
// parameter.
data().NeedOverloadResolutionForCopyConstructor = true;
data().NeedOverloadResolutionForMoveConstructor = true;
+ data().NeedOverloadResolutionForCopyAssignment = true;
data().NeedOverloadResolutionForMoveAssignment = true;
}
@@ -1128,6 +1133,8 @@ void CXXRecordDecl::addedMember(Decl *D) {
data().DefaultedCopyConstructorIsDeleted = true;
if (FieldRec->hasNonTrivialMoveConstructor())
data().DefaultedMoveConstructorIsDeleted = true;
+ if (FieldRec->hasNonTrivialCopyAssignment())
+ data().DefaultedCopyAssignmentIsDeleted = true;
if (FieldRec->hasNonTrivialMoveAssignment())
data().DefaultedMoveAssignmentIsDeleted = true;
if (FieldRec->hasNonTrivialDestructor())
@@ -1141,6 +1148,8 @@ void CXXRecordDecl::addedMember(Decl *D) {
FieldRec->data().NeedOverloadResolutionForCopyConstructor;
data().NeedOverloadResolutionForMoveConstructor |=
FieldRec->data().NeedOverloadResolutionForMoveConstructor;
+ data().NeedOverloadResolutionForCopyAssignment |=
+ FieldRec->data().NeedOverloadResolutionForCopyAssignment;
data().NeedOverloadResolutionForMoveAssignment |=
FieldRec->data().NeedOverloadResolutionForMoveAssignment;
data().NeedOverloadResolutionForDestructor |=
@@ -1238,9 +1247,15 @@ void CXXRecordDecl::addedMember(Decl *D) {
}
// Keep track of the presence of mutable fields.
- if (FieldRec->hasMutableFields()) {
+ if (FieldRec->hasMutableFields())
data().HasMutableFields = true;
+
+ if (Field->isMutable()) {
+ // Our copy constructor/assignment might call something other than
+ // the subobject's copy constructor/assignment if it's mutable and of
+ // class type.
data().NeedOverloadResolutionForCopyConstructor = true;
+ data().NeedOverloadResolutionForCopyAssignment = true;
}
// C++11 [class.copy]p13:
@@ -1288,7 +1303,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
// Base element type of field is a non-class type.
if (!T->isLiteralType(Context) ||
(!Field->hasInClassInitializer() && !isUnion() &&
- !Context.getLangOpts().CPlusPlus2a))
+ !Context.getLangOpts().CPlusPlus20))
data().DefaultedDefaultConstructorIsConstexpr = false;
// C++11 [class.copy]p23:
@@ -1296,8 +1311,10 @@ void CXXRecordDecl::addedMember(Decl *D) {
// as deleted if X has:
// -- a non-static data member of const non-class type (or array
// thereof)
- if (T.isConstQualified())
+ if (T.isConstQualified()) {
+ data().DefaultedCopyAssignmentIsDeleted = true;
data().DefaultedMoveAssignmentIsDeleted = true;
+ }
}
// C++14 [meta.unary.prop]p4:
@@ -1366,6 +1383,27 @@ void CXXRecordDecl::finishedDefaultedOrDeletedMember(CXXMethodDecl *D) {
data().DeclaredNonTrivialSpecialMembers |= SMKind;
}
+void CXXRecordDecl::setCaptures(ArrayRef<LambdaCapture> Captures) {
+ ASTContext &Context = getASTContext();
+ CXXRecordDecl::LambdaDefinitionData &Data = getLambdaData();
+
+ // Copy captures.
+ Data.NumCaptures = Captures.size();
+ Data.NumExplicitCaptures = 0;
+ Data.Captures = (LambdaCapture *)Context.Allocate(sizeof(LambdaCapture) *
+ Captures.size());
+ LambdaCapture *ToCapture = Data.Captures;
+ for (unsigned I = 0, N = Captures.size(); I != N; ++I) {
+ if (Captures[I].isExplicit())
+ ++Data.NumExplicitCaptures;
+
+ *ToCapture++ = Captures[I];
+ }
+
+ if (!lambdaIsDefaultConstructibleAndAssignable())
+ Data.DefaultedCopyAssignmentIsDeleted = true;
+}
+
void CXXRecordDecl::setTrivialForCallFlags(CXXMethodDecl *D) {
unsigned SMKind = 0;
@@ -1923,6 +1961,18 @@ bool CXXRecordDecl::mayBeAbstract() const {
return false;
}
+bool CXXRecordDecl::isEffectivelyFinal() const {
+ auto *Def = getDefinition();
+ if (!Def)
+ return false;
+ if (Def->hasAttr<FinalAttr>())
+ return true;
+ if (const auto *Dtor = Def->getDestructor())
+ if (Dtor->hasAttr<FinalAttr>())
+ return true;
+ return false;
+}
+
void CXXDeductionGuideDecl::anchor() {}
bool ExplicitSpecifier::isEquivalent(const ExplicitSpecifier Other) const {
@@ -2140,14 +2190,10 @@ CXXMethodDecl *CXXMethodDecl::getDevirtualizedMethod(const Expr *Base,
return DevirtualizedMethod;
// Similarly, if the class itself or its destructor is marked 'final',
- // the class can't be derived from and we can therefore devirtualize the
+ // the class can't be derived from and we can therefore devirtualize the
// member function call.
- if (BestDynamicDecl->hasAttr<FinalAttr>())
+ if (BestDynamicDecl->isEffectivelyFinal())
return DevirtualizedMethod;
- if (const auto *dtor = BestDynamicDecl->getDestructor()) {
- if (dtor->hasAttr<FinalAttr>())
- return DevirtualizedMethod;
- }
if (const auto *DRE = dyn_cast<DeclRefExpr>(Base)) {
if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
@@ -2356,17 +2402,15 @@ QualType CXXMethodDecl::getThisType() const {
// volatile X*, and if the member function is declared const volatile,
// the type of this is const volatile X*.
assert(isInstance() && "No 'this' for static methods!");
-
- return CXXMethodDecl::getThisType(getType()->getAs<FunctionProtoType>(),
+ return CXXMethodDecl::getThisType(getType()->castAs<FunctionProtoType>(),
getParent());
}
QualType CXXMethodDecl::getThisObjectType() const {
// Ditto getThisType.
assert(isInstance() && "No 'this' for static methods!");
-
- return CXXMethodDecl::getThisObjectType(getType()->getAs<FunctionProtoType>(),
- getParent());
+ return CXXMethodDecl::getThisObjectType(
+ getType()->castAs<FunctionProtoType>(), getParent());
}
bool CXXMethodDecl::hasInlineBody() const {
@@ -2542,11 +2586,11 @@ CXXConstructorDecl *CXXConstructorDecl::getTargetConstructor() const {
}
bool CXXConstructorDecl::isDefaultConstructor() const {
- // C++ [class.ctor]p5:
- // A default constructor for a class X is a constructor of class
- // X that can be called without an argument.
- return (getNumParams() == 0) ||
- (getNumParams() > 0 && getParamDecl(0)->hasDefaultArg());
+ // C++ [class.default.ctor]p1:
+ // A default constructor for a class X is a constructor of class X for
+ // which each parameter that is not a function parameter pack has a default
+ // argument (including the case of a constructor with no parameters)
+ return getMinRequiredArguments() == 0;
}
bool
@@ -2557,7 +2601,7 @@ CXXConstructorDecl::isCopyConstructor(unsigned &TypeQuals) const {
bool CXXConstructorDecl::isMoveConstructor(unsigned &TypeQuals) const {
return isCopyOrMoveConstructor(TypeQuals) &&
- getParamDecl(0)->getType()->isRValueReferenceType();
+ getParamDecl(0)->getType()->isRValueReferenceType();
}
/// Determine whether this is a copy or move constructor.
@@ -2572,10 +2616,8 @@ bool CXXConstructorDecl::isCopyOrMoveConstructor(unsigned &TypeQuals) const {
// first parameter is of type X&&, const X&&, volatile X&&, or
// const volatile X&&, and either there are no other parameters or else
// all other parameters have default arguments.
- if ((getNumParams() < 1) ||
- (getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()) ||
- (getPrimaryTemplate() != nullptr) ||
- (getDescribedFunctionTemplate() != nullptr))
+ if (!hasOneParamOrDefaultArgs() || getPrimaryTemplate() != nullptr ||
+ getDescribedFunctionTemplate() != nullptr)
return false;
const ParmVarDecl *Param = getParamDecl(0);
@@ -2612,18 +2654,16 @@ bool CXXConstructorDecl::isConvertingConstructor(bool AllowExplicit) const {
if (isExplicit() && !AllowExplicit)
return false;
- return (getNumParams() == 0 &&
- getType()->castAs<FunctionProtoType>()->isVariadic()) ||
- (getNumParams() == 1) ||
- (getNumParams() > 1 &&
- (getParamDecl(1)->hasDefaultArg() ||
- getParamDecl(1)->isParameterPack()));
+ // FIXME: This has nothing to do with the definition of converting
+ // constructor, but is convenient for how we use this function in overload
+ // resolution.
+ return getNumParams() == 0
+ ? getType()->castAs<FunctionProtoType>()->isVariadic()
+ : getMinRequiredArguments() <= 1;
}
bool CXXConstructorDecl::isSpecializationCopyingObject() const {
- if ((getNumParams() < 1) ||
- (getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()) ||
- (getDescribedFunctionTemplate() != nullptr))
+ if (!hasOneParamOrDefaultArgs() || getDescribedFunctionTemplate() != nullptr)
return false;
const ParmVarDecl *Param = getParamDecl(0);
@@ -3088,7 +3128,7 @@ VarDecl *BindingDecl::getHoldingVar() const {
if (!DRE)
return nullptr;
- auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ auto *VD = cast<VarDecl>(DRE->getDecl());
assert(VD->isImplicit() && "holding var for binding decl not implicit");
return VD;
}
@@ -3151,6 +3191,102 @@ MSPropertyDecl *MSPropertyDecl::CreateDeserialized(ASTContext &C,
SourceLocation(), nullptr, nullptr);
}
+void MSGuidDecl::anchor() {}
+
+MSGuidDecl::MSGuidDecl(DeclContext *DC, QualType T, Parts P)
+ : ValueDecl(Decl::MSGuid, DC, SourceLocation(), DeclarationName(), T),
+ PartVal(P), APVal() {}
+
+MSGuidDecl *MSGuidDecl::Create(const ASTContext &C, QualType T, Parts P) {
+ DeclContext *DC = C.getTranslationUnitDecl();
+ return new (C, DC) MSGuidDecl(DC, T, P);
+}
+
+MSGuidDecl *MSGuidDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) MSGuidDecl(nullptr, QualType(), Parts());
+}
+
+void MSGuidDecl::printName(llvm::raw_ostream &OS) const {
+ OS << llvm::format("GUID{%08" PRIx32 "-%04" PRIx16 "-%04" PRIx16 "-",
+ PartVal.Part1, PartVal.Part2, PartVal.Part3);
+ unsigned I = 0;
+ for (uint8_t Byte : PartVal.Part4And5) {
+ OS << llvm::format("%02" PRIx8, Byte);
+ if (++I == 2)
+ OS << '-';
+ }
+ OS << '}';
+}
+
+/// Determine if T is a valid 'struct _GUID' of the shape that we expect.
+static bool isValidStructGUID(ASTContext &Ctx, QualType T) {
+ // FIXME: We only need to check this once, not once each time we compute a
+ // GUID APValue.
+ using MatcherRef = llvm::function_ref<bool(QualType)>;
+
+ auto IsInt = [&Ctx](unsigned N) {
+ return [&Ctx, N](QualType T) {
+ return T->isUnsignedIntegerOrEnumerationType() &&
+ Ctx.getIntWidth(T) == N;
+ };
+ };
+
+ auto IsArray = [&Ctx](MatcherRef Elem, unsigned N) {
+ return [&Ctx, Elem, N](QualType T) {
+ const ConstantArrayType *CAT = Ctx.getAsConstantArrayType(T);
+ return CAT && CAT->getSize() == N && Elem(CAT->getElementType());
+ };
+ };
+
+ auto IsStruct = [](std::initializer_list<MatcherRef> Fields) {
+ return [Fields](QualType T) {
+ const RecordDecl *RD = T->getAsRecordDecl();
+ if (!RD || RD->isUnion())
+ return false;
+ RD = RD->getDefinition();
+ if (!RD)
+ return false;
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ if (CXXRD->getNumBases())
+ return false;
+ auto MatcherIt = Fields.begin();
+ for (const FieldDecl *FD : RD->fields()) {
+ if (FD->isUnnamedBitfield()) continue;
+ if (FD->isBitField() || MatcherIt == Fields.end() ||
+ !(*MatcherIt)(FD->getType()))
+ return false;
+ ++MatcherIt;
+ }
+ return MatcherIt == Fields.end();
+ };
+ };
+
+ // We expect an {i32, i16, i16, [8 x i8]}.
+ return IsStruct({IsInt(32), IsInt(16), IsInt(16), IsArray(IsInt(8), 8)})(T);
+}
+
+APValue &MSGuidDecl::getAsAPValue() const {
+ if (APVal.isAbsent() && isValidStructGUID(getASTContext(), getType())) {
+ using llvm::APInt;
+ using llvm::APSInt;
+ APVal = APValue(APValue::UninitStruct(), 0, 4);
+ APVal.getStructField(0) = APValue(APSInt(APInt(32, PartVal.Part1), true));
+ APVal.getStructField(1) = APValue(APSInt(APInt(16, PartVal.Part2), true));
+ APVal.getStructField(2) = APValue(APSInt(APInt(16, PartVal.Part3), true));
+ APValue &Arr = APVal.getStructField(3) =
+ APValue(APValue::UninitArray(), 8, 8);
+ for (unsigned I = 0; I != 8; ++I) {
+ Arr.getArrayInitializedElt(I) =
+ APValue(APSInt(APInt(8, PartVal.Part4And5[I]), true));
+ }
+ // Register this APValue to be destroyed if necessary. (Note that the
+ // MSGuidDecl destructor is never run.)
+ getASTContext().addDestruction(&APVal);
+ }
+
+ return APVal;
+}
+
static const char *getAccessName(AccessSpecifier AS) {
switch (AS) {
case AS_none:
diff --git a/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp b/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp
index 9a84e3c4a510..5c8b34731f36 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp
@@ -94,7 +94,7 @@ ObjCContainerDecl::getMethod(Selector Sel, bool isInstance,
// methods there.
if (const auto *Proto = dyn_cast<ObjCProtocolDecl>(this)) {
if (const ObjCProtocolDecl *Def = Proto->getDefinition())
- if (Def->isHidden() && !AllowHidden)
+ if (!Def->isUnconditionallyVisible() && !AllowHidden)
return nullptr;
}
@@ -146,7 +146,8 @@ bool ObjCContainerDecl::HasUserDeclaredSetterMethod(
// auto-synthesized).
for (const auto *P : Cat->properties())
if (P->getIdentifier() == Property->getIdentifier()) {
- if (P->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readwrite)
+ if (P->getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_readwrite)
return true;
break;
}
@@ -180,7 +181,7 @@ ObjCPropertyDecl::findPropertyDecl(const DeclContext *DC,
// property.
if (const auto *Proto = dyn_cast<ObjCProtocolDecl>(DC)) {
if (const ObjCProtocolDecl *Def = Proto->getDefinition())
- if (Def->isHidden())
+ if (!Def->isUnconditionallyVisible())
return nullptr;
}
@@ -238,7 +239,7 @@ ObjCPropertyDecl *ObjCContainerDecl::FindPropertyDeclaration(
// Don't find properties within hidden protocol definitions.
if (const auto *Proto = dyn_cast<ObjCProtocolDecl>(this)) {
if (const ObjCProtocolDecl *Def = Proto->getDefinition())
- if (Def->isHidden())
+ if (!Def->isUnconditionallyVisible())
return nullptr;
}
@@ -1361,25 +1362,23 @@ ObjCMethodDecl::findPropertyDecl(bool CheckOverrides) const {
return Found;
} else {
// Determine whether the container is a class.
- ClassDecl = dyn_cast<ObjCInterfaceDecl>(Container);
+ ClassDecl = cast<ObjCInterfaceDecl>(Container);
}
+ assert(ClassDecl && "Failed to find main class");
// If we have a class, check its visible extensions.
- if (ClassDecl) {
- for (const auto *Ext : ClassDecl->visible_extensions()) {
- if (Ext == Container)
- continue;
-
- if (const auto *Found = findMatchingProperty(Ext))
- return Found;
- }
+ for (const auto *Ext : ClassDecl->visible_extensions()) {
+ if (Ext == Container)
+ continue;
+ if (const auto *Found = findMatchingProperty(Ext))
+ return Found;
}
assert(isSynthesizedAccessorStub() && "expected an accessor stub");
+
for (const auto *Cat : ClassDecl->known_categories()) {
if (Cat == Container)
continue;
-
if (const auto *Found = findMatchingProperty(Cat))
return Found;
}
@@ -1920,7 +1919,7 @@ ObjCMethodDecl *ObjCProtocolDecl::lookupMethod(Selector Sel,
// If there is no definition or the definition is hidden, we don't find
// anything.
const ObjCProtocolDecl *Def = getDefinition();
- if (!Def || Def->isHidden())
+ if (!Def || !Def->isUnconditionallyVisible())
return nullptr;
if ((MethodDecl = getMethod(Sel, isInstance)))
diff --git a/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp b/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
index 4cedcbed0644..2e48b2b46c4d 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
@@ -105,6 +105,8 @@ namespace {
void VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D);
void VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D);
void VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D);
+ void VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *TTP);
+ void VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *NTTP);
void printTemplateParameters(const TemplateParameterList *Params,
bool OmitTemplateKW = false);
@@ -287,12 +289,10 @@ void DeclPrinter::ProcessDeclGroup(SmallVectorImpl<Decl*>& Decls) {
}
void DeclPrinter::Print(AccessSpecifier AS) {
- switch(AS) {
- case AS_none: llvm_unreachable("No access specifier!");
- case AS_public: Out << "public"; break;
- case AS_protected: Out << "protected"; break;
- case AS_private: Out << "private"; break;
- }
+ const auto AccessSpelling = getAccessSpelling(AS);
+ if (AccessSpelling.empty())
+ llvm_unreachable("No access specifier!");
+ Out << AccessSpelling;
}
void DeclPrinter::PrintConstructorInitializers(CXXConstructorDecl *CDecl,
@@ -530,7 +530,7 @@ void DeclPrinter::VisitEnumDecl(EnumDecl *D) {
Out << ' ' << *D;
- if (D->isFixed() && D->getASTContext().getLangOpts().CPlusPlus11)
+ if (D->isFixed())
Out << " : " << D->getIntegerType().stream(Policy);
if (D->isCompleteDefinition()) {
@@ -1051,37 +1051,10 @@ void DeclPrinter::printTemplateParameters(const TemplateParameterList *Params,
else
NeedComma = true;
- if (auto TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
-
- if (const TypeConstraint *TC = TTP->getTypeConstraint())
- TC->print(Out, Policy);
- else if (TTP->wasDeclaredWithTypename())
- Out << "typename";
- else
- Out << "class";
-
- if (TTP->isParameterPack())
- Out << " ...";
- else if (!TTP->getName().empty())
- Out << ' ';
-
- Out << *TTP;
-
- if (TTP->hasDefaultArgument()) {
- Out << " = ";
- Out << TTP->getDefaultArgument().getAsString(Policy);
- };
+ if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
+ VisitTemplateTypeParmDecl(TTP);
} else if (auto NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
- StringRef Name;
- if (IdentifierInfo *II = NTTP->getIdentifier())
- Name = II->getName();
- printDeclType(NTTP->getType(), Name, NTTP->isParameterPack());
-
- if (NTTP->hasDefaultArgument()) {
- Out << " = ";
- NTTP->getDefaultArgument()->printPretty(Out, nullptr, Policy,
- Indentation);
- }
+ VisitNonTypeTemplateParmDecl(NTTP);
} else if (auto TTPD = dyn_cast<TemplateTemplateParmDecl>(Param)) {
VisitTemplateDecl(TTPD);
// FIXME: print the default argument, if present.
@@ -1401,7 +1374,12 @@ void DeclPrinter::VisitObjCProtocolDecl(ObjCProtocolDecl *PID) {
}
void DeclPrinter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *PID) {
- Out << "@implementation " << *PID->getClassInterface() << '(' << *PID <<")\n";
+ Out << "@implementation ";
+ if (const auto *CID = PID->getClassInterface())
+ Out << *CID;
+ else
+ Out << "<<error-type>>";
+ Out << '(' << *PID << ")\n";
VisitDeclContext(PID, false);
Out << "@end";
@@ -1409,7 +1387,11 @@ void DeclPrinter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *PID) {
}
void DeclPrinter::VisitObjCCategoryDecl(ObjCCategoryDecl *PID) {
- Out << "@interface " << *PID->getClassInterface();
+ Out << "@interface ";
+ if (const auto *CID = PID->getClassInterface())
+ Out << *CID;
+ else
+ Out << "<<error-type>>";
if (auto TypeParams = PID->getTypeParamList()) {
PrintObjCTypeParams(TypeParams);
}
@@ -1453,85 +1435,83 @@ void DeclPrinter::VisitObjCPropertyDecl(ObjCPropertyDecl *PDecl) {
QualType T = PDecl->getType();
Out << "@property";
- if (PDecl->getPropertyAttributes() != ObjCPropertyDecl::OBJC_PR_noattr) {
+ if (PDecl->getPropertyAttributes() != ObjCPropertyAttribute::kind_noattr) {
bool first = true;
Out << "(";
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_class) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_class) {
Out << (first ? "" : ", ") << "class";
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_direct) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_direct) {
Out << (first ? "" : ", ") << "direct";
first = false;
}
if (PDecl->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_nonatomic) {
+ ObjCPropertyAttribute::kind_nonatomic) {
Out << (first ? "" : ", ") << "nonatomic";
first = false;
}
- if (PDecl->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_atomic) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic) {
Out << (first ? "" : ", ") << "atomic";
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_assign) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_assign) {
Out << (first ? "" : ", ") << "assign";
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) {
Out << (first ? "" : ", ") << "retain";
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_strong) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_strong) {
Out << (first ? "" : ", ") << "strong";
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_copy) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) {
Out << (first ? "" : ", ") << "copy";
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) {
Out << (first ? "" : ", ") << "weak";
first = false;
}
- if (PDecl->getPropertyAttributes()
- & ObjCPropertyDecl::OBJC_PR_unsafe_unretained) {
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_unsafe_unretained) {
Out << (first ? "" : ", ") << "unsafe_unretained";
first = false;
}
if (PDecl->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_readwrite) {
+ ObjCPropertyAttribute::kind_readwrite) {
Out << (first ? "" : ", ") << "readwrite";
first = false;
}
- if (PDecl->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_readonly) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_readonly) {
Out << (first ? "" : ", ") << "readonly";
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) {
Out << (first ? "" : ", ") << "getter = ";
PDecl->getGetterName().print(Out);
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) {
Out << (first ? "" : ", ") << "setter = ";
PDecl->getSetterName().print(Out);
first = false;
}
if (PDecl->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_nullability) {
+ ObjCPropertyAttribute::kind_nullability) {
if (auto nullability = AttributedType::stripOuterNullability(T)) {
if (*nullability == NullabilityKind::Unspecified &&
(PDecl->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_null_resettable)) {
+ ObjCPropertyAttribute::kind_null_resettable)) {
Out << (first ? "" : ", ") << "null_resettable";
} else {
Out << (first ? "" : ", ")
@@ -1705,3 +1685,36 @@ void DeclPrinter::VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D) {
D->getInit()->printPretty(Out, nullptr, Policy, Indentation);
}
+void DeclPrinter::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *TTP) {
+ if (const TypeConstraint *TC = TTP->getTypeConstraint())
+ TC->print(Out, Policy);
+ else if (TTP->wasDeclaredWithTypename())
+ Out << "typename";
+ else
+ Out << "class";
+
+ if (TTP->isParameterPack())
+ Out << " ...";
+ else if (!TTP->getName().empty())
+ Out << ' ';
+
+ Out << *TTP;
+
+ if (TTP->hasDefaultArgument()) {
+ Out << " = ";
+ Out << TTP->getDefaultArgument().getAsString(Policy);
+ }
+}
+
+void DeclPrinter::VisitNonTypeTemplateParmDecl(
+ const NonTypeTemplateParmDecl *NTTP) {
+ StringRef Name;
+ if (IdentifierInfo *II = NTTP->getIdentifier())
+ Name = II->getName();
+ printDeclType(NTTP->getType(), Name, NTTP->isParameterPack());
+
+ if (NTTP->hasDefaultArgument()) {
+ Out << " = ";
+ NTTP->getDefaultArgument()->printPretty(Out, nullptr, Policy, Indentation);
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp b/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
index b5e4ec2d7f43..7857e75f57a1 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
@@ -693,7 +693,7 @@ NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
QualType T, bool ParameterPack,
TypeSourceInfo *TInfo) {
AutoType *AT =
- C.getLangOpts().CPlusPlus2a ? T->getContainedAutoType() : nullptr;
+ C.getLangOpts().CPlusPlus20 ? T->getContainedAutoType() : nullptr;
return new (C, DC,
additionalSizeToAlloc<std::pair<QualType, TypeSourceInfo *>,
Expr *>(0,
@@ -1430,4 +1430,4 @@ void TypeConstraint::print(llvm::raw_ostream &OS, PrintingPolicy Policy) const {
ArgLoc.getArgument().print(Policy, OS);
OS << ">";
}
-} \ No newline at end of file
+}
diff --git a/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp b/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp
index 4eb11bc57e52..ecf676c9936d 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/OpenMPClause.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
@@ -138,8 +139,19 @@ void DeclarationName::print(raw_ostream &OS,
const PrintingPolicy &Policy) const {
switch (getNameKind()) {
case DeclarationName::Identifier:
- if (const IdentifierInfo *II = getAsIdentifierInfo())
- OS << II->getName();
+ if (const IdentifierInfo *II = getAsIdentifierInfo()) {
+ StringRef Name = II->getName();
+ // If this is a mangled OpenMP variant name we strip off the mangling for
+ // printing. It should not be visible to the user at all.
+ if (II->isMangledOpenMPVariantName()) {
+ std::pair<StringRef, StringRef> NameContextPair =
+ Name.split(getOpenMPVariantManglingSeparatorStr());
+ OS << NameContextPair.first << "["
+ << OMPTraitInfo(NameContextPair.second) << "]";
+ } else {
+ OS << Name;
+ }
+ }
return;
case DeclarationName::ObjCZeroArgSelector:
diff --git a/contrib/llvm-project/clang/lib/AST/Expr.cpp b/contrib/llvm-project/clang/lib/AST/Expr.cpp
index fea7d606f261..399e7e13c445 100644
--- a/contrib/llvm-project/clang/lib/AST/Expr.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Expr.cpp
@@ -14,9 +14,11 @@
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/ComputeDependence.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Mangle.h"
@@ -242,6 +244,7 @@ static void AssertResultStorageKind(ConstantExpr::ResultStorageKind Kind) {
assert((Kind == ConstantExpr::RSK_APValue ||
Kind == ConstantExpr::RSK_Int64 || Kind == ConstantExpr::RSK_None) &&
"Invalid StorageKind Value");
+ (void)Kind;
}
ConstantExpr::ResultStorageKind
@@ -266,29 +269,31 @@ ConstantExpr::getStorageKind(const Type *T, const ASTContext &Context) {
return ConstantExpr::RSK_APValue;
}
-void ConstantExpr::DefaultInit(ResultStorageKind StorageKind) {
+ConstantExpr::ConstantExpr(Expr *SubExpr, ResultStorageKind StorageKind,
+ bool IsImmediateInvocation)
+ : FullExpr(ConstantExprClass, SubExpr) {
ConstantExprBits.ResultKind = StorageKind;
ConstantExprBits.APValueKind = APValue::None;
+ ConstantExprBits.IsUnsigned = false;
+ ConstantExprBits.BitWidth = 0;
ConstantExprBits.HasCleanup = false;
+ ConstantExprBits.IsImmediateInvocation = IsImmediateInvocation;
+
if (StorageKind == ConstantExpr::RSK_APValue)
::new (getTrailingObjects<APValue>()) APValue();
}
-ConstantExpr::ConstantExpr(Expr *subexpr, ResultStorageKind StorageKind)
- : FullExpr(ConstantExprClass, subexpr) {
- DefaultInit(StorageKind);
-}
-
ConstantExpr *ConstantExpr::Create(const ASTContext &Context, Expr *E,
- ResultStorageKind StorageKind) {
+ ResultStorageKind StorageKind,
+ bool IsImmediateInvocation) {
assert(!isa<ConstantExpr>(E));
AssertResultStorageKind(StorageKind);
+
unsigned Size = totalSizeToAlloc<APValue, uint64_t>(
StorageKind == ConstantExpr::RSK_APValue,
StorageKind == ConstantExpr::RSK_Int64);
void *Mem = Context.Allocate(Size, alignof(ConstantExpr));
- ConstantExpr *Self = new (Mem) ConstantExpr(E, StorageKind);
- return Self;
+ return new (Mem) ConstantExpr(E, StorageKind, IsImmediateInvocation);
}
ConstantExpr *ConstantExpr::Create(const ASTContext &Context, Expr *E,
@@ -299,25 +304,27 @@ ConstantExpr *ConstantExpr::Create(const ASTContext &Context, Expr *E,
return Self;
}
-ConstantExpr::ConstantExpr(ResultStorageKind StorageKind, EmptyShell Empty)
+ConstantExpr::ConstantExpr(EmptyShell Empty, ResultStorageKind StorageKind)
: FullExpr(ConstantExprClass, Empty) {
- DefaultInit(StorageKind);
+ ConstantExprBits.ResultKind = StorageKind;
+
+ if (StorageKind == ConstantExpr::RSK_APValue)
+ ::new (getTrailingObjects<APValue>()) APValue();
}
ConstantExpr *ConstantExpr::CreateEmpty(const ASTContext &Context,
- ResultStorageKind StorageKind,
- EmptyShell Empty) {
+ ResultStorageKind StorageKind) {
AssertResultStorageKind(StorageKind);
+
unsigned Size = totalSizeToAlloc<APValue, uint64_t>(
StorageKind == ConstantExpr::RSK_APValue,
StorageKind == ConstantExpr::RSK_Int64);
void *Mem = Context.Allocate(Size, alignof(ConstantExpr));
- ConstantExpr *Self = new (Mem) ConstantExpr(StorageKind, Empty);
- return Self;
+ return new (Mem) ConstantExpr(EmptyShell(), StorageKind);
}
void ConstantExpr::MoveIntoResult(APValue &Value, const ASTContext &Context) {
- assert(getStorageKind(Value) == ConstantExprBits.ResultKind &&
+ assert((unsigned)getStorageKind(Value) <= ConstantExprBits.ResultKind &&
"Invalid storage for this value kind");
ConstantExprBits.APValueKind = Value.getKind();
switch (ConstantExprBits.ResultKind) {
@@ -352,6 +359,8 @@ llvm::APSInt ConstantExpr::getResultAsAPSInt() const {
}
APValue ConstantExpr::getAPValueResult() const {
+ assert(hasAPValueResult());
+
switch (ConstantExprBits.ResultKind) {
case ConstantExpr::RSK_APValue:
return APValueResult();
@@ -365,125 +374,12 @@ APValue ConstantExpr::getAPValueResult() const {
llvm_unreachable("invalid ResultKind");
}
-/// Compute the type-, value-, and instantiation-dependence of a
-/// declaration reference
-/// based on the declaration being referenced.
-static void computeDeclRefDependence(const ASTContext &Ctx, NamedDecl *D,
- QualType T, bool &TypeDependent,
- bool &ValueDependent,
- bool &InstantiationDependent) {
- TypeDependent = false;
- ValueDependent = false;
- InstantiationDependent = false;
-
- // (TD) C++ [temp.dep.expr]p3:
- // An id-expression is type-dependent if it contains:
- //
- // and
- //
- // (VD) C++ [temp.dep.constexpr]p2:
- // An identifier is value-dependent if it is:
-
- // (TD) - an identifier that was declared with dependent type
- // (VD) - a name declared with a dependent type,
- if (T->isDependentType()) {
- TypeDependent = true;
- ValueDependent = true;
- InstantiationDependent = true;
- return;
- } else if (T->isInstantiationDependentType()) {
- InstantiationDependent = true;
- }
-
- // (TD) - a conversion-function-id that specifies a dependent type
- if (D->getDeclName().getNameKind()
- == DeclarationName::CXXConversionFunctionName) {
- QualType T = D->getDeclName().getCXXNameType();
- if (T->isDependentType()) {
- TypeDependent = true;
- ValueDependent = true;
- InstantiationDependent = true;
- return;
- }
-
- if (T->isInstantiationDependentType())
- InstantiationDependent = true;
- }
-
- // (VD) - the name of a non-type template parameter,
- if (isa<NonTypeTemplateParmDecl>(D)) {
- ValueDependent = true;
- InstantiationDependent = true;
- return;
- }
-
- // (VD) - a constant with integral or enumeration type and is
- // initialized with an expression that is value-dependent.
- // (VD) - a constant with literal type and is initialized with an
- // expression that is value-dependent [C++11].
- // (VD) - FIXME: Missing from the standard:
- // - an entity with reference type and is initialized with an
- // expression that is value-dependent [C++11]
- if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
- if ((Ctx.getLangOpts().CPlusPlus11 ?
- Var->getType()->isLiteralType(Ctx) :
- Var->getType()->isIntegralOrEnumerationType()) &&
- (Var->getType().isConstQualified() ||
- Var->getType()->isReferenceType())) {
- if (const Expr *Init = Var->getAnyInitializer())
- if (Init->isValueDependent()) {
- ValueDependent = true;
- InstantiationDependent = true;
- }
- }
-
- // (VD) - FIXME: Missing from the standard:
- // - a member function or a static data member of the current
- // instantiation
- if (Var->isStaticDataMember() &&
- Var->getDeclContext()->isDependentContext()) {
- ValueDependent = true;
- InstantiationDependent = true;
- TypeSourceInfo *TInfo = Var->getFirstDecl()->getTypeSourceInfo();
- if (TInfo->getType()->isIncompleteArrayType())
- TypeDependent = true;
- }
-
- return;
- }
-
- // (VD) - FIXME: Missing from the standard:
- // - a member function or a static data member of the current
- // instantiation
- if (isa<CXXMethodDecl>(D) && D->getDeclContext()->isDependentContext()) {
- ValueDependent = true;
- InstantiationDependent = true;
- }
-}
-
-void DeclRefExpr::computeDependence(const ASTContext &Ctx) {
- bool TypeDependent = false;
- bool ValueDependent = false;
- bool InstantiationDependent = false;
- computeDeclRefDependence(Ctx, getDecl(), getType(), TypeDependent,
- ValueDependent, InstantiationDependent);
-
- ExprBits.TypeDependent |= TypeDependent;
- ExprBits.ValueDependent |= ValueDependent;
- ExprBits.InstantiationDependent |= InstantiationDependent;
-
- // Is the declaration a parameter pack?
- if (getDecl()->isParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-}
-
DeclRefExpr::DeclRefExpr(const ASTContext &Ctx, ValueDecl *D,
bool RefersToEnclosingVariableOrCapture, QualType T,
ExprValueKind VK, SourceLocation L,
const DeclarationNameLoc &LocInfo,
NonOdrUseReason NOUR)
- : Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
- D(D), DNLoc(LocInfo) {
+ : Expr(DeclRefExprClass, T, VK, OK_Ordinary), D(D), DNLoc(LocInfo) {
DeclRefExprBits.HasQualifier = false;
DeclRefExprBits.HasTemplateKWAndArgsInfo = false;
DeclRefExprBits.HasFoundDecl = false;
@@ -492,7 +388,7 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx, ValueDecl *D,
RefersToEnclosingVariableOrCapture;
DeclRefExprBits.NonOdrUseReason = NOUR;
DeclRefExprBits.Loc = L;
- computeDependence(Ctx);
+ setDependence(computeDependence(this, Ctx));
}
DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
@@ -502,19 +398,13 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
const DeclarationNameInfo &NameInfo, NamedDecl *FoundD,
const TemplateArgumentListInfo *TemplateArgs,
QualType T, ExprValueKind VK, NonOdrUseReason NOUR)
- : Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
- D(D), DNLoc(NameInfo.getInfo()) {
+ : Expr(DeclRefExprClass, T, VK, OK_Ordinary), D(D),
+ DNLoc(NameInfo.getInfo()) {
DeclRefExprBits.Loc = NameInfo.getLoc();
DeclRefExprBits.HasQualifier = QualifierLoc ? 1 : 0;
- if (QualifierLoc) {
+ if (QualifierLoc)
new (getTrailingObjects<NestedNameSpecifierLoc>())
NestedNameSpecifierLoc(QualifierLoc);
- auto *NNS = QualifierLoc.getNestedNameSpecifier();
- if (NNS->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (NNS->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
- }
DeclRefExprBits.HasFoundDecl = FoundD ? 1 : 0;
if (FoundD)
*getTrailingObjects<NamedDecl *>() = FoundD;
@@ -524,22 +414,18 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
RefersToEnclosingVariableOrCapture;
DeclRefExprBits.NonOdrUseReason = NOUR;
if (TemplateArgs) {
- bool Dependent = false;
- bool InstantiationDependent = false;
- bool ContainsUnexpandedParameterPack = false;
+ auto Deps = TemplateArgumentDependence::None;
getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc, *TemplateArgs, getTrailingObjects<TemplateArgumentLoc>(),
- Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
- assert(!Dependent && "built a DeclRefExpr with dependent template args");
- ExprBits.InstantiationDependent |= InstantiationDependent;
- ExprBits.ContainsUnexpandedParameterPack |= ContainsUnexpandedParameterPack;
+ Deps);
+ assert(!(Deps & TemplateArgumentDependence::Dependent) &&
+ "built a DeclRefExpr with dependent template args");
} else if (TemplateKWLoc.isValid()) {
getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc);
}
DeclRefExprBits.HadMultipleCandidates = 0;
-
- computeDependence(Ctx);
+ setDependence(computeDependence(this, Ctx));
}
DeclRefExpr *DeclRefExpr::Create(const ASTContext &Context,
@@ -611,10 +497,7 @@ SourceLocation DeclRefExpr::getEndLoc() const {
PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, IdentKind IK,
StringLiteral *SL)
- : Expr(PredefinedExprClass, FNTy, VK_LValue, OK_Ordinary,
- FNTy->isDependentType(), FNTy->isDependentType(),
- FNTy->isInstantiationDependentType(),
- /*ContainsUnexpandedParameterPack=*/false) {
+ : Expr(PredefinedExprClass, FNTy, VK_LValue, OK_Ordinary) {
PredefinedExprBits.Kind = IK;
assert((getIdentKind() == IK) &&
"IdentKind do not fit in PredefinedExprBitfields!");
@@ -623,6 +506,35 @@ PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, IdentKind IK,
PredefinedExprBits.Loc = L;
if (HasFunctionName)
setFunctionName(SL);
+ setDependence(computeDependence(this));
+}
+
+PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FnTy, IdentKind IK,
+ TypeSourceInfo *Info)
+ : Expr(PredefinedExprClass, FnTy, VK_LValue, OK_Ordinary) {
+ PredefinedExprBits.Kind = IK;
+ assert((getIdentKind() == IK) &&
+ "IdentKind do not fit in PredefinedExprBitFields!");
+ assert(IK == UniqueStableNameType &&
+ "Constructor only valid with UniqueStableNameType");
+ PredefinedExprBits.HasFunctionName = false;
+ PredefinedExprBits.Loc = L;
+ setTypeSourceInfo(Info);
+ setDependence(computeDependence(this));
+}
+
+PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FnTy, IdentKind IK,
+ Expr *E)
+ : Expr(PredefinedExprClass, FnTy, VK_LValue, OK_Ordinary) {
+ PredefinedExprBits.Kind = IK;
+ assert((getIdentKind() == IK) &&
+ "IdentKind do not fit in PredefinedExprBitFields!");
+ assert(IK == UniqueStableNameExpr &&
+ "Constructor only valid with UniqueStableNameExpr");
+ PredefinedExprBits.HasFunctionName = false;
+ PredefinedExprBits.Loc = L;
+ setExpr(E);
+ setDependence(computeDependence(this));
}
PredefinedExpr::PredefinedExpr(EmptyShell Empty, bool HasFunctionName)
@@ -634,15 +546,44 @@ PredefinedExpr *PredefinedExpr::Create(const ASTContext &Ctx, SourceLocation L,
QualType FNTy, IdentKind IK,
StringLiteral *SL) {
bool HasFunctionName = SL != nullptr;
- void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *>(HasFunctionName),
- alignof(PredefinedExpr));
+ void *Mem = Ctx.Allocate(
+ totalSizeToAlloc<Stmt *, Expr *, TypeSourceInfo *>(HasFunctionName, 0, 0),
+ alignof(PredefinedExpr));
return new (Mem) PredefinedExpr(L, FNTy, IK, SL);
}
+PredefinedExpr *PredefinedExpr::Create(const ASTContext &Ctx, SourceLocation L,
+ QualType FNTy, IdentKind IK,
+ StringLiteral *SL,
+ TypeSourceInfo *Info) {
+ assert(IK == UniqueStableNameType && "Only valid with UniqueStableNameType");
+ bool HasFunctionName = SL != nullptr;
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *, Expr *, TypeSourceInfo *>(
+ HasFunctionName, 0, !HasFunctionName),
+ alignof(PredefinedExpr));
+ if (HasFunctionName)
+ return new (Mem) PredefinedExpr(L, FNTy, IK, SL);
+ return new (Mem) PredefinedExpr(L, FNTy, IK, Info);
+}
+
+PredefinedExpr *PredefinedExpr::Create(const ASTContext &Ctx, SourceLocation L,
+ QualType FNTy, IdentKind IK,
+ StringLiteral *SL, Expr *E) {
+ assert(IK == UniqueStableNameExpr && "Only valid with UniqueStableNameExpr");
+ bool HasFunctionName = SL != nullptr;
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *, Expr *, TypeSourceInfo *>(
+ HasFunctionName, !HasFunctionName, 0),
+ alignof(PredefinedExpr));
+ if (HasFunctionName)
+ return new (Mem) PredefinedExpr(L, FNTy, IK, SL);
+ return new (Mem) PredefinedExpr(L, FNTy, IK, E);
+}
+
PredefinedExpr *PredefinedExpr::CreateEmpty(const ASTContext &Ctx,
bool HasFunctionName) {
- void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *>(HasFunctionName),
- alignof(PredefinedExpr));
+ void *Mem = Ctx.Allocate(
+ totalSizeToAlloc<Stmt *, Expr *, TypeSourceInfo *>(HasFunctionName, 0, 0),
+ alignof(PredefinedExpr));
return new (Mem) PredefinedExpr(EmptyShell(), HasFunctionName);
}
@@ -662,12 +603,28 @@ StringRef PredefinedExpr::getIdentKindName(PredefinedExpr::IdentKind IK) {
return "__FUNCSIG__";
case LFuncSig:
return "L__FUNCSIG__";
+ case UniqueStableNameType:
+ case UniqueStableNameExpr:
+ return "__builtin_unique_stable_name";
case PrettyFunctionNoVirtual:
break;
}
llvm_unreachable("Unknown ident kind for PredefinedExpr");
}
+std::string PredefinedExpr::ComputeName(ASTContext &Context, IdentKind IK,
+ QualType Ty) {
+ std::unique_ptr<MangleContext> Ctx{ItaniumMangleContext::create(
+ Context, Context.getDiagnostics(), /*IsUniqueNameMangler*/ true)};
+
+ Ty = Ty.getCanonicalType();
+
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ Ctx->mangleTypeName(Ty, Out);
+ return std::string(Buffer.str());
+}
+
// FIXME: Maybe this should use DeclPrinter with a special "print predefined
// expr" policy instead.
std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
@@ -681,18 +638,22 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
if (MC->shouldMangleDeclName(ND)) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
+ GlobalDecl GD;
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(ND))
- MC->mangleCXXCtor(CD, Ctor_Base, Out);
+ GD = GlobalDecl(CD, Ctor_Base);
else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(ND))
- MC->mangleCXXDtor(DD, Dtor_Base, Out);
+ GD = GlobalDecl(DD, Dtor_Base);
+ else if (ND->hasAttr<CUDAGlobalAttr>())
+ GD = GlobalDecl(cast<FunctionDecl>(ND));
else
- MC->mangleName(ND, Out);
+ GD = GlobalDecl(ND);
+ MC->mangleName(GD, Out);
if (!Buffer.empty() && Buffer.front() == '\01')
- return Buffer.substr(1);
- return Buffer.str();
+ return std::string(Buffer.substr(1));
+ return std::string(Buffer.str());
} else
- return ND->getIdentifier()->getName();
+ return std::string(ND->getIdentifier()->getName());
}
return "";
}
@@ -711,7 +672,7 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
Out << ComputeName(IK, DCBlock);
else if (auto *DCDecl = dyn_cast<Decl>(DC))
Out << ComputeName(IK, DCDecl) << "_block_invoke";
- return Out.str();
+ return std::string(Out.str());
}
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurrentDecl)) {
if (IK != PrettyFunction && IK != PrettyFunctionNoVirtual &&
@@ -856,7 +817,7 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
Out << Proto;
- return Name.str().str();
+ return std::string(Name);
}
if (const CapturedDecl *CD = dyn_cast<CapturedDecl>(CurrentDecl)) {
for (const DeclContext *DC = CD->getParent(); DC; DC = DC->getParent())
@@ -887,7 +848,7 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
MD->getSelector().print(Out);
Out << ']';
- return Name.str().str();
+ return std::string(Name);
}
if (isa<TranslationUnitDecl>(CurrentDecl) && IK == PrettyFunction) {
// __PRETTY_FUNCTION__ -> "top level", the others produce an empty string.
@@ -915,13 +876,12 @@ void APNumericStorage::setIntValue(const ASTContext &C,
IntegerLiteral::IntegerLiteral(const ASTContext &C, const llvm::APInt &V,
QualType type, SourceLocation l)
- : Expr(IntegerLiteralClass, type, VK_RValue, OK_Ordinary, false, false,
- false, false),
- Loc(l) {
+ : Expr(IntegerLiteralClass, type, VK_RValue, OK_Ordinary), Loc(l) {
assert(type->isIntegerType() && "Illegal type in IntegerLiteral");
assert(V.getBitWidth() == C.getIntWidth(type) &&
"Integer type is not the correct size for constant.");
setValue(C, V);
+ setDependence(ExprDependence::None);
}
IntegerLiteral *
@@ -938,13 +898,13 @@ IntegerLiteral::Create(const ASTContext &C, EmptyShell Empty) {
FixedPointLiteral::FixedPointLiteral(const ASTContext &C, const llvm::APInt &V,
QualType type, SourceLocation l,
unsigned Scale)
- : Expr(FixedPointLiteralClass, type, VK_RValue, OK_Ordinary, false, false,
- false, false),
- Loc(l), Scale(Scale) {
+ : Expr(FixedPointLiteralClass, type, VK_RValue, OK_Ordinary), Loc(l),
+ Scale(Scale) {
assert(type->isFixedPointType() && "Illegal type in FixedPointLiteral");
assert(V.getBitWidth() == C.getTypeInfo(type).Width &&
"Fixed point type is not the correct size for constant.");
setValue(C, V);
+ setDependence(ExprDependence::None);
}
FixedPointLiteral *FixedPointLiteral::CreateFromRawInt(const ASTContext &C,
@@ -955,6 +915,11 @@ FixedPointLiteral *FixedPointLiteral::CreateFromRawInt(const ASTContext &C,
return new (C) FixedPointLiteral(C, V, type, l, Scale);
}
+FixedPointLiteral *FixedPointLiteral::Create(const ASTContext &C,
+ EmptyShell Empty) {
+ return new (C) FixedPointLiteral(Empty);
+}
+
std::string FixedPointLiteral::getValueAsString(unsigned Radix) const {
// Currently the longest decimal number that can be printed is the max for an
// unsigned long _Accum: 4294967295.99999999976716935634613037109375
@@ -962,16 +927,16 @@ std::string FixedPointLiteral::getValueAsString(unsigned Radix) const {
SmallString<64> S;
FixedPointValueToString(
S, llvm::APSInt::getUnsigned(getValue().getZExtValue()), Scale);
- return S.str();
+ return std::string(S.str());
}
FloatingLiteral::FloatingLiteral(const ASTContext &C, const llvm::APFloat &V,
bool isexact, QualType Type, SourceLocation L)
- : Expr(FloatingLiteralClass, Type, VK_RValue, OK_Ordinary, false, false,
- false, false), Loc(L) {
+ : Expr(FloatingLiteralClass, Type, VK_RValue, OK_Ordinary), Loc(L) {
setSemantics(V.getSemantics());
FloatingLiteralBits.IsExact = isexact;
setValue(C, V);
+ setDependence(ExprDependence::None);
}
FloatingLiteral::FloatingLiteral(const ASTContext &C, EmptyShell Empty)
@@ -1031,8 +996,7 @@ StringLiteral::StringLiteral(const ASTContext &Ctx, StringRef Str,
StringKind Kind, bool Pascal, QualType Ty,
const SourceLocation *Loc,
unsigned NumConcatenated)
- : Expr(StringLiteralClass, Ty, VK_LValue, OK_Ordinary, false, false, false,
- false) {
+ : Expr(StringLiteralClass, Ty, VK_LValue, OK_Ordinary) {
assert(Ctx.getAsConstantArrayType(Ty) &&
"StringLiteral must be of constant array type!");
unsigned CharByteWidth = mapCharByteWidth(Ctx.getTargetInfo(), Kind);
@@ -1071,6 +1035,8 @@ StringLiteral::StringLiteral(const ASTContext &Ctx, StringRef Str,
// Initialize the trailing array of char holding the string data.
std::memcpy(getTrailingObjects<char>(), Str.data(), ByteLength);
+
+ setDependence(ExprDependence::None);
}
StringLiteral::StringLiteral(EmptyShell Empty, unsigned NumConcatenated,
@@ -1339,10 +1305,7 @@ CallExpr::CallExpr(StmtClass SC, Expr *Fn, ArrayRef<Expr *> PreArgs,
ArrayRef<Expr *> Args, QualType Ty, ExprValueKind VK,
SourceLocation RParenLoc, unsigned MinNumArgs,
ADLCallKind UsesADL)
- : Expr(SC, Ty, VK, OK_Ordinary, Fn->isTypeDependent(),
- Fn->isValueDependent(), Fn->isInstantiationDependent(),
- Fn->containsUnexpandedParameterPack()),
- RParenLoc(RParenLoc) {
+ : Expr(SC, Ty, VK, OK_Ordinary), RParenLoc(RParenLoc) {
NumArgs = std::max<unsigned>(Args.size(), MinNumArgs);
unsigned NumPreArgs = PreArgs.size();
CallExprBits.NumPreArgs = NumPreArgs;
@@ -1356,17 +1319,14 @@ CallExpr::CallExpr(StmtClass SC, Expr *Fn, ArrayRef<Expr *> PreArgs,
CallExprBits.UsesADL = static_cast<bool>(UsesADL);
setCallee(Fn);
- for (unsigned I = 0; I != NumPreArgs; ++I) {
- updateDependenciesFromArg(PreArgs[I]);
+ for (unsigned I = 0; I != NumPreArgs; ++I)
setPreArg(I, PreArgs[I]);
- }
- for (unsigned I = 0; I != Args.size(); ++I) {
- updateDependenciesFromArg(Args[I]);
+ for (unsigned I = 0; I != Args.size(); ++I)
setArg(I, Args[I]);
- }
- for (unsigned I = Args.size(); I != NumArgs; ++I) {
+ for (unsigned I = Args.size(); I != NumArgs; ++I)
setArg(I, nullptr);
- }
+
+ setDependence(computeDependence(this, PreArgs));
}
CallExpr::CallExpr(StmtClass SC, unsigned NumPreArgs, unsigned NumArgs,
@@ -1400,7 +1360,8 @@ CallExpr *CallExpr::CreateTemporary(void *Mem, Expr *Fn, QualType Ty,
assert(!(reinterpret_cast<uintptr_t>(Mem) % alignof(CallExpr)) &&
"Misaligned memory in CallExpr::CreateTemporary!");
return new (Mem) CallExpr(CallExprClass, Fn, /*PreArgs=*/{}, /*Args=*/{}, Ty,
- VK, RParenLoc, /*MinNumArgs=*/0, UsesADL);
+ VK, RParenLoc,
+ /*MinNumArgs=*/0, UsesADL);
}
CallExpr *CallExpr::CreateEmpty(const ASTContext &Ctx, unsigned NumArgs,
@@ -1429,33 +1390,31 @@ unsigned CallExpr::offsetToTrailingObjects(StmtClass SC) {
}
}
-void CallExpr::updateDependenciesFromArg(Expr *Arg) {
- if (Arg->isTypeDependent())
- ExprBits.TypeDependent = true;
- if (Arg->isValueDependent())
- ExprBits.ValueDependent = true;
- if (Arg->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (Arg->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-}
-
Decl *Expr::getReferencedDeclOfCallee() {
Expr *CEE = IgnoreParenImpCasts();
- while (SubstNonTypeTemplateParmExpr *NTTP
- = dyn_cast<SubstNonTypeTemplateParmExpr>(CEE)) {
- CEE = NTTP->getReplacement()->IgnoreParenCasts();
+ while (SubstNonTypeTemplateParmExpr *NTTP =
+ dyn_cast<SubstNonTypeTemplateParmExpr>(CEE)) {
+ CEE = NTTP->getReplacement()->IgnoreParenImpCasts();
}
// If we're calling a dereference, look at the pointer instead.
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CEE)) {
- if (BO->isPtrMemOp())
- CEE = BO->getRHS()->IgnoreParenCasts();
- } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(CEE)) {
- if (UO->getOpcode() == UO_Deref)
- CEE = UO->getSubExpr()->IgnoreParenCasts();
+ while (true) {
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CEE)) {
+ if (BO->isPtrMemOp()) {
+ CEE = BO->getRHS()->IgnoreParenImpCasts();
+ continue;
+ }
+ } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(CEE)) {
+ if (UO->getOpcode() == UO_Deref || UO->getOpcode() == UO_AddrOf ||
+ UO->getOpcode() == UO_Plus) {
+ CEE = UO->getSubExpr()->IgnoreParenImpCasts();
+ continue;
+ }
+ }
+ break;
}
+
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE))
return DRE->getDecl();
if (MemberExpr *ME = dyn_cast<MemberExpr>(CEE))
@@ -1466,28 +1425,11 @@ Decl *Expr::getReferencedDeclOfCallee() {
return nullptr;
}
-/// getBuiltinCallee - If this is a call to a builtin, return the builtin ID. If
-/// not, return 0.
+/// If this is a call to a builtin, return the builtin ID. If not, return 0.
unsigned CallExpr::getBuiltinCallee() const {
- // All simple function calls (e.g. func()) are implicitly cast to pointer to
- // function. As a result, we try and obtain the DeclRefExpr from the
- // ImplicitCastExpr.
- const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(getCallee());
- if (!ICE) // FIXME: deal with more complex calls (e.g. (func)(), (*func)()).
- return 0;
-
- const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr());
- if (!DRE)
- return 0;
-
- const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(DRE->getDecl());
- if (!FDecl)
- return 0;
-
- if (!FDecl->getIdentifier())
- return 0;
-
- return FDecl->getBuiltinID();
+ auto *FDecl =
+ dyn_cast_or_null<FunctionDecl>(getCallee()->getReferencedDeclOfCallee());
+ return FDecl ? FDecl->getBuiltinID() : 0;
}
bool CallExpr::isUnevaluatedBuiltinCall(const ASTContext &Ctx) const {
@@ -1569,28 +1511,17 @@ OffsetOfExpr *OffsetOfExpr::CreateEmpty(const ASTContext &C,
OffsetOfExpr::OffsetOfExpr(const ASTContext &C, QualType type,
SourceLocation OperatorLoc, TypeSourceInfo *tsi,
- ArrayRef<OffsetOfNode> comps, ArrayRef<Expr*> exprs,
+ ArrayRef<OffsetOfNode> comps, ArrayRef<Expr *> exprs,
SourceLocation RParenLoc)
- : Expr(OffsetOfExprClass, type, VK_RValue, OK_Ordinary,
- /*TypeDependent=*/false,
- /*ValueDependent=*/tsi->getType()->isDependentType(),
- tsi->getType()->isInstantiationDependentType(),
- tsi->getType()->containsUnexpandedParameterPack()),
- OperatorLoc(OperatorLoc), RParenLoc(RParenLoc), TSInfo(tsi),
- NumComps(comps.size()), NumExprs(exprs.size())
-{
- for (unsigned i = 0; i != comps.size(); ++i) {
+ : Expr(OffsetOfExprClass, type, VK_RValue, OK_Ordinary),
+ OperatorLoc(OperatorLoc), RParenLoc(RParenLoc), TSInfo(tsi),
+ NumComps(comps.size()), NumExprs(exprs.size()) {
+ for (unsigned i = 0; i != comps.size(); ++i)
setComponent(i, comps[i]);
- }
-
- for (unsigned i = 0; i != exprs.size(); ++i) {
- if (exprs[i]->isTypeDependent() || exprs[i]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (exprs[i]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ for (unsigned i = 0; i != exprs.size(); ++i)
setIndexExpr(i, exprs[i]);
- }
+
+ setDependence(computeDependence(this));
}
IdentifierInfo *OffsetOfNode::getFieldName() const {
@@ -1604,39 +1535,15 @@ IdentifierInfo *OffsetOfNode::getFieldName() const {
UnaryExprOrTypeTraitExpr::UnaryExprOrTypeTraitExpr(
UnaryExprOrTypeTrait ExprKind, Expr *E, QualType resultType,
SourceLocation op, SourceLocation rp)
- : Expr(UnaryExprOrTypeTraitExprClass, resultType, VK_RValue, OK_Ordinary,
- false, // Never type-dependent (C++ [temp.dep.expr]p3).
- // Value-dependent if the argument is type-dependent.
- E->isTypeDependent(), E->isInstantiationDependent(),
- E->containsUnexpandedParameterPack()),
+ : Expr(UnaryExprOrTypeTraitExprClass, resultType, VK_RValue, OK_Ordinary),
OpLoc(op), RParenLoc(rp) {
+ assert(ExprKind <= UETT_Last && "invalid enum value!");
UnaryExprOrTypeTraitExprBits.Kind = ExprKind;
+ assert(static_cast<unsigned>(ExprKind) == UnaryExprOrTypeTraitExprBits.Kind &&
+ "UnaryExprOrTypeTraitExprBits.Kind overflow!");
UnaryExprOrTypeTraitExprBits.IsType = false;
Argument.Ex = E;
-
- // Check to see if we are in the situation where alignof(decl) should be
- // dependent because decl's alignment is dependent.
- if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf) {
- if (!isValueDependent() || !isInstantiationDependent()) {
- E = E->IgnoreParens();
-
- const ValueDecl *D = nullptr;
- if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
- D = DRE->getDecl();
- else if (const auto *ME = dyn_cast<MemberExpr>(E))
- D = ME->getMemberDecl();
-
- if (D) {
- for (const auto *I : D->specific_attrs<AlignedAttr>()) {
- if (I->isAlignmentDependent()) {
- setValueDependent(true);
- setInstantiationDependent(true);
- break;
- }
- }
- }
- }
- }
+ setDependence(computeDependence(this));
}
MemberExpr::MemberExpr(Expr *Base, bool IsArrow, SourceLocation OperatorLoc,
@@ -1644,11 +1551,8 @@ MemberExpr::MemberExpr(Expr *Base, bool IsArrow, SourceLocation OperatorLoc,
const DeclarationNameInfo &NameInfo, QualType T,
ExprValueKind VK, ExprObjectKind OK,
NonOdrUseReason NOUR)
- : Expr(MemberExprClass, T, VK, OK, Base->isTypeDependent(),
- Base->isValueDependent(), Base->isInstantiationDependent(),
- Base->containsUnexpandedParameterPack()),
- Base(Base), MemberDecl(MemberDecl), MemberDNLoc(NameInfo.getInfo()),
- MemberLoc(NameInfo.getLoc()) {
+ : Expr(MemberExprClass, T, VK, OK), Base(Base), MemberDecl(MemberDecl),
+ MemberDNLoc(NameInfo.getInfo()), MemberLoc(NameInfo.getLoc()) {
assert(!NameInfo.getName() ||
MemberDecl->getDeclName() == NameInfo.getName());
MemberExprBits.IsArrow = IsArrow;
@@ -1657,6 +1561,7 @@ MemberExpr::MemberExpr(Expr *Base, bool IsArrow, SourceLocation OperatorLoc,
MemberExprBits.HadMultipleCandidates = false;
MemberExprBits.NonOdrUseReason = NOUR;
MemberExprBits.OperatorLoc = OperatorLoc;
+ setDependence(computeDependence(this));
}
MemberExpr *MemberExpr::Create(
@@ -1678,30 +1583,15 @@ MemberExpr *MemberExpr::Create(
MemberExpr *E = new (Mem) MemberExpr(Base, IsArrow, OperatorLoc, MemberDecl,
NameInfo, T, VK, OK, NOUR);
- if (isa<FieldDecl>(MemberDecl)) {
- DeclContext *DC = MemberDecl->getDeclContext();
- // dyn_cast_or_null is used to handle objC variables which do not
- // have a declaration context.
- CXXRecordDecl *RD = dyn_cast_or_null<CXXRecordDecl>(DC);
- if (RD && RD->isDependentContext() && RD->isCurrentInstantiation(DC))
- E->setTypeDependent(T->isDependentType());
-
- // Bitfield with value-dependent width is type-dependent.
- FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl);
- if (FD && FD->isBitField() && FD->getBitWidth()->isValueDependent())
- E->setTypeDependent(true);
- }
-
+ // FIXME: remove remaining dependence computation to computeDependence().
+ auto Deps = E->getDependence();
if (HasQualOrFound) {
// FIXME: Wrong. We should be looking at the member declaration we found.
- if (QualifierLoc && QualifierLoc.getNestedNameSpecifier()->isDependent()) {
- E->setValueDependent(true);
- E->setTypeDependent(true);
- E->setInstantiationDependent(true);
- }
+ if (QualifierLoc && QualifierLoc.getNestedNameSpecifier()->isDependent())
+ Deps |= ExprDependence::TypeValueInstantiation;
else if (QualifierLoc &&
QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())
- E->setInstantiationDependent(true);
+ Deps |= ExprDependence::Instantiation;
E->MemberExprBits.HasQualifierOrFoundDecl = true;
@@ -1715,19 +1605,17 @@ MemberExpr *MemberExpr::Create(
TemplateArgs || TemplateKWLoc.isValid();
if (TemplateArgs) {
- bool Dependent = false;
- bool InstantiationDependent = false;
- bool ContainsUnexpandedParameterPack = false;
+ auto TemplateArgDeps = TemplateArgumentDependence::None;
E->getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc, *TemplateArgs,
- E->getTrailingObjects<TemplateArgumentLoc>(), Dependent,
- InstantiationDependent, ContainsUnexpandedParameterPack);
- if (InstantiationDependent)
- E->setInstantiationDependent(true);
+ E->getTrailingObjects<TemplateArgumentLoc>(), TemplateArgDeps);
+ if (TemplateArgDeps & TemplateArgumentDependence::Instantiation)
+ Deps |= ExprDependence::Instantiation;
} else if (TemplateKWLoc.isValid()) {
E->getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc);
}
+ E->setDependence(Deps);
return E;
}
@@ -1828,12 +1716,13 @@ bool CastExpr::CastConsistency() const {
auto Ty = getType();
auto SETy = getSubExpr()->getType();
assert(getValueKindForType(Ty) == Expr::getValueKindForType(SETy));
- if (isRValue()) {
+ if (isRValue() && !Ty->isDependentType() && !SETy->isDependentType()) {
Ty = Ty->getPointeeType();
SETy = SETy->getPointeeType();
}
- assert(!Ty.isNull() && !SETy.isNull() &&
- Ty.getAddressSpace() != SETy.getAddressSpace());
+ assert((Ty->isDependentType() || SETy->isDependentType()) ||
+ (!Ty.isNull() && !SETy.isNull() &&
+ Ty.getAddressSpace() != SETy.getAddressSpace()));
goto CheckNoBasePath;
}
// These should not have an inheritance path.
@@ -2168,9 +2057,10 @@ SourceLocExpr::SourceLocExpr(const ASTContext &Ctx, IdentKind Kind,
SourceLocation BLoc, SourceLocation RParenLoc,
DeclContext *ParentContext)
: Expr(SourceLocExprClass, getDecayedSourceLocExprType(Ctx, Kind),
- VK_RValue, OK_Ordinary, false, false, false, false),
+ VK_RValue, OK_Ordinary),
BuiltinLoc(BLoc), RParenLoc(RParenLoc), ParentContext(ParentContext) {
SourceLocExprBits.Kind = Kind;
+ setDependence(ExprDependence::None);
}
StringRef SourceLocExpr::getBuiltinStr() const {
@@ -2234,25 +2124,14 @@ APValue SourceLocExpr::EvaluateInContext(const ASTContext &Ctx,
}
InitListExpr::InitListExpr(const ASTContext &C, SourceLocation lbraceloc,
- ArrayRef<Expr*> initExprs, SourceLocation rbraceloc)
- : Expr(InitListExprClass, QualType(), VK_RValue, OK_Ordinary, false, false,
- false, false),
- InitExprs(C, initExprs.size()),
- LBraceLoc(lbraceloc), RBraceLoc(rbraceloc), AltForm(nullptr, true)
-{
+ ArrayRef<Expr *> initExprs, SourceLocation rbraceloc)
+ : Expr(InitListExprClass, QualType(), VK_RValue, OK_Ordinary),
+ InitExprs(C, initExprs.size()), LBraceLoc(lbraceloc),
+ RBraceLoc(rbraceloc), AltForm(nullptr, true) {
sawArrayRangeDesignator(false);
- for (unsigned I = 0; I != initExprs.size(); ++I) {
- if (initExprs[I]->isTypeDependent())
- ExprBits.TypeDependent = true;
- if (initExprs[I]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (initExprs[I]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (initExprs[I]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
- }
-
InitExprs.insert(C, InitExprs.end(), initExprs.begin(), initExprs.end());
+
+ setDependence(computeDependence(this));
}
void InitListExpr::reserveInits(const ASTContext &C, unsigned NumInits) {
@@ -2393,6 +2272,64 @@ Stmt *BlockExpr::getBody() {
// Generic Expression Routines
//===----------------------------------------------------------------------===//
+bool Expr::isReadIfDiscardedInCPlusPlus11() const {
+ // In C++11, discarded-value expressions of a certain form are special,
+ // according to [expr]p10:
+ // The lvalue-to-rvalue conversion (4.1) is applied only if the
+ // expression is an lvalue of volatile-qualified type and it has
+ // one of the following forms:
+ if (!isGLValue() || !getType().isVolatileQualified())
+ return false;
+
+ const Expr *E = IgnoreParens();
+
+ // - id-expression (5.1.1),
+ if (isa<DeclRefExpr>(E))
+ return true;
+
+ // - subscripting (5.2.1),
+ if (isa<ArraySubscriptExpr>(E))
+ return true;
+
+ // - class member access (5.2.5),
+ if (isa<MemberExpr>(E))
+ return true;
+
+ // - indirection (5.3.1),
+ if (auto *UO = dyn_cast<UnaryOperator>(E))
+ if (UO->getOpcode() == UO_Deref)
+ return true;
+
+ if (auto *BO = dyn_cast<BinaryOperator>(E)) {
+ // - pointer-to-member operation (5.5),
+ if (BO->isPtrMemOp())
+ return true;
+
+ // - comma expression (5.18) where the right operand is one of the above.
+ if (BO->getOpcode() == BO_Comma)
+ return BO->getRHS()->isReadIfDiscardedInCPlusPlus11();
+ }
+
+ // - conditional expression (5.16) where both the second and the third
+ // operands are one of the above, or
+ if (auto *CO = dyn_cast<ConditionalOperator>(E))
+ return CO->getTrueExpr()->isReadIfDiscardedInCPlusPlus11() &&
+ CO->getFalseExpr()->isReadIfDiscardedInCPlusPlus11();
+ // The related edge case of "*x ?: *x".
+ if (auto *BCO =
+ dyn_cast<BinaryConditionalOperator>(E)) {
+ if (auto *OVE = dyn_cast<OpaqueValueExpr>(BCO->getTrueExpr()))
+ return OVE->getSourceExpr()->isReadIfDiscardedInCPlusPlus11() &&
+ BCO->getFalseExpr()->isReadIfDiscardedInCPlusPlus11();
+ }
+
+ // Objective-C++ extensions to the rule.
+ if (isa<PseudoObjectExpr>(E) || isa<ObjCIvarRefExpr>(E))
+ return true;
+
+ return false;
+}
+
/// isUnusedResultAWarning - Return true if this immediate expression should
/// be warned about if the result is unused. If so, fill in Loc and Ranges
/// with location to warn on and the source range[s] to report with the
@@ -2581,6 +2518,7 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
// If we don't know precisely what we're looking at, let's not warn.
case UnresolvedLookupExprClass:
case CXXUnresolvedConstructExprClass:
+ case RecoveryExprClass:
return false;
case CXXTemporaryObjectExprClass:
@@ -2680,20 +2618,31 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
}
case CXXFunctionalCastExprClass:
case CStyleCastExprClass: {
- // Ignore an explicit cast to void unless the operand is a non-trivial
- // volatile lvalue.
+ // Ignore an explicit cast to void, except in C++98 if the operand is a
+ // volatile glvalue for which we would trigger an implicit read in any
+ // other language mode. (Such an implicit read always happens as part of
+ // the lvalue conversion in C, and happens in C++ for expressions of all
+ // forms where it seems likely the user intended to trigger a volatile
+ // load.)
const CastExpr *CE = cast<CastExpr>(this);
+ const Expr *SubE = CE->getSubExpr()->IgnoreParens();
if (CE->getCastKind() == CK_ToVoid) {
- if (CE->getSubExpr()->isGLValue() &&
- CE->getSubExpr()->getType().isVolatileQualified()) {
- const DeclRefExpr *DRE =
- dyn_cast<DeclRefExpr>(CE->getSubExpr()->IgnoreParens());
- if (!(DRE && isa<VarDecl>(DRE->getDecl()) &&
- cast<VarDecl>(DRE->getDecl())->hasLocalStorage()) &&
- !isa<CallExpr>(CE->getSubExpr()->IgnoreParens())) {
- return CE->getSubExpr()->isUnusedResultAWarning(WarnE, Loc,
- R1, R2, Ctx);
- }
+ if (Ctx.getLangOpts().CPlusPlus && !Ctx.getLangOpts().CPlusPlus11 &&
+ SubE->isReadIfDiscardedInCPlusPlus11()) {
+ // Suppress the "unused value" warning for idiomatic usage of
+ // '(void)var;' used to suppress "unused variable" warnings.
+ if (auto *DRE = dyn_cast<DeclRefExpr>(SubE))
+ if (auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (!VD->isExternallyVisible())
+ return false;
+
+ // The lvalue-to-rvalue conversion would have no effect for an array.
+ // It's implausible that the programmer expected this to result in a
+ // volatile array load, so don't warn.
+ if (SubE->getType()->isArrayType())
+ return false;
+
+ return SubE->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
}
return false;
}
@@ -2905,6 +2854,12 @@ static Expr *IgnoreImplicitAsWrittenSingleStep(Expr *E) {
return IgnoreImplicitSingleStep(E);
}
+static Expr *IgnoreParensOnlySingleStep(Expr *E) {
+ if (auto *PE = dyn_cast<ParenExpr>(E))
+ return PE->getSubExpr();
+ return E;
+}
+
static Expr *IgnoreParensSingleStep(Expr *E) {
if (auto *PE = dyn_cast<ParenExpr>(E))
return PE->getSubExpr();
@@ -2924,9 +2879,6 @@ static Expr *IgnoreParensSingleStep(Expr *E) {
return CE->getChosenSubExpr();
}
- else if (auto *CE = dyn_cast<ConstantExpr>(E))
- return CE->getSubExpr();
-
return E;
}
@@ -3031,12 +2983,16 @@ Expr *Expr::IgnoreUnlessSpelledInSource() {
Expr *LastE = nullptr;
while (E != LastE) {
LastE = E;
- E = E->IgnoreParenImpCasts();
+ E = IgnoreExprNodes(E, IgnoreImplicitSingleStep,
+ IgnoreImpCastsExtraSingleStep,
+ IgnoreParensOnlySingleStep);
auto SR = E->getSourceRange();
if (auto *C = dyn_cast<CXXConstructExpr>(E)) {
- if (C->getNumArgs() == 1) {
+ auto NumArgs = C->getNumArgs();
+ if (NumArgs == 1 ||
+ (NumArgs > 1 && isa<CXXDefaultArgExpr>(C->getArg(1)))) {
Expr *A = C->getArg(0);
if (A->getSourceRange() == SR || !isa<CXXTemporaryObjectExpr>(C))
E = A;
@@ -3044,7 +3000,18 @@ Expr *Expr::IgnoreUnlessSpelledInSource() {
}
if (auto *C = dyn_cast<CXXMemberCallExpr>(E)) {
- Expr *ExprNode = C->getImplicitObjectArgument()->IgnoreParenImpCasts();
+ Expr *ExprNode = C->getImplicitObjectArgument();
+ if (ExprNode->getSourceRange() == SR) {
+ E = ExprNode;
+ continue;
+ }
+ if (auto *PE = dyn_cast<ParenExpr>(ExprNode)) {
+ if (PE->getSourceRange() == C->getSourceRange()) {
+ E = PE;
+ continue;
+ }
+ }
+ ExprNode = ExprNode->IgnoreParenImpCasts();
if (ExprNode->getSourceRange() == SR)
E = ExprNode;
}
@@ -3211,6 +3178,9 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
switch (getStmtClass()) {
default: break;
+ case Stmt::ExprWithCleanupsClass:
+ return cast<ExprWithCleanups>(this)->getSubExpr()->isConstantInitializer(
+ Ctx, IsForRef, Culprit);
case StringLiteralClass:
case ObjCEncodeExprClass:
return true;
@@ -3324,6 +3294,7 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
case ObjCBridgedCastExprClass:
case CXXDynamicCastExprClass:
case CXXReinterpretCastExprClass:
+ case CXXAddrspaceCastExprClass:
case CXXConstCastExprClass: {
const CastExpr *CE = cast<CastExpr>(this);
@@ -3390,6 +3361,26 @@ namespace {
bool hasSideEffects() const { return HasSideEffects; }
+ void VisitDecl(const Decl *D) {
+ if (!D)
+ return;
+
+ // We assume the caller checks subexpressions (eg, the initializer, VLA
+ // bounds) for side-effects on our behalf.
+ if (auto *VD = dyn_cast<VarDecl>(D)) {
+ // Registering a destructor is a side-effect.
+ if (IncludePossibleEffects && VD->isThisDeclarationADefinition() &&
+ VD->needsDestruction(Context))
+ HasSideEffects = true;
+ }
+ }
+
+ void VisitDeclStmt(const DeclStmt *DS) {
+ for (auto *D : DS->decls())
+ VisitDecl(D);
+ Inherited::VisitDeclStmt(DS);
+ }
+
void VisitExpr(const Expr *E) {
if (!HasSideEffects &&
E->HasSideEffects(Context, IncludePossibleEffects))
@@ -3426,6 +3417,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case SubstNonTypeTemplateParmPackExprClass:
case FunctionParmPackExprClass:
case TypoExprClass:
+ case RecoveryExprClass:
case CXXFoldExprClass:
llvm_unreachable("shouldn't see dependent / unresolved nodes here");
@@ -3521,7 +3513,10 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case ParenExprClass:
case ArraySubscriptExprClass:
+ case MatrixSubscriptExprClass:
case OMPArraySectionExprClass:
+ case OMPArrayShapingExprClass:
+ case OMPIteratorExprClass:
case MemberExprClass:
case ConditionalOperatorClass:
case BinaryConditionalOperatorClass:
@@ -3592,6 +3587,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case CXXStaticCastExprClass:
case CXXReinterpretCastExprClass:
case CXXConstCastExprClass:
+ case CXXAddrspaceCastExprClass:
case CXXFunctionalCastExprClass:
case BuiltinBitCastExprClass: {
// While volatile reads are side-effecting in both C and C++, we treat them
@@ -3633,7 +3629,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case LambdaExprClass: {
const LambdaExpr *LE = cast<LambdaExpr>(this);
for (Expr *E : LE->capture_inits())
- if (E->HasSideEffects(Ctx, IncludePossibleEffects))
+ if (E && E->HasSideEffects(Ctx, IncludePossibleEffects))
return true;
return false;
}
@@ -3816,6 +3812,11 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
return Source->isNullPointerConstant(Ctx, NPC);
}
+ // If the expression has no type information, it cannot be a null pointer
+ // constant.
+ if (getType().isNull())
+ return NPCK_NotNull;
+
// C++11 nullptr_t is always a null pointer constant.
if (getType()->isNullPtrType())
return NPCK_CXX11_nullptr;
@@ -4154,28 +4155,16 @@ void ExtVectorElementExpr::getEncodedElementAccess(
}
}
-ShuffleVectorExpr::ShuffleVectorExpr(const ASTContext &C, ArrayRef<Expr*> args,
+ShuffleVectorExpr::ShuffleVectorExpr(const ASTContext &C, ArrayRef<Expr *> args,
QualType Type, SourceLocation BLoc,
SourceLocation RP)
- : Expr(ShuffleVectorExprClass, Type, VK_RValue, OK_Ordinary,
- Type->isDependentType(), Type->isDependentType(),
- Type->isInstantiationDependentType(),
- Type->containsUnexpandedParameterPack()),
- BuiltinLoc(BLoc), RParenLoc(RP), NumExprs(args.size())
-{
+ : Expr(ShuffleVectorExprClass, Type, VK_RValue, OK_Ordinary),
+ BuiltinLoc(BLoc), RParenLoc(RP), NumExprs(args.size()) {
SubExprs = new (C) Stmt*[args.size()];
- for (unsigned i = 0; i != args.size(); i++) {
- if (args[i]->isTypeDependent())
- ExprBits.TypeDependent = true;
- if (args[i]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (args[i]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (args[i]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ for (unsigned i = 0; i != args.size(); i++)
SubExprs[i] = args[i];
- }
+
+ setDependence(computeDependence(this));
}
void ShuffleVectorExpr::setExprs(const ASTContext &C, ArrayRef<Expr *> Exprs) {
@@ -4193,11 +4182,7 @@ GenericSelectionExpr::GenericSelectionExpr(
bool ContainsUnexpandedParameterPack, unsigned ResultIndex)
: Expr(GenericSelectionExprClass, AssocExprs[ResultIndex]->getType(),
AssocExprs[ResultIndex]->getValueKind(),
- AssocExprs[ResultIndex]->getObjectKind(),
- AssocExprs[ResultIndex]->isTypeDependent(),
- AssocExprs[ResultIndex]->isValueDependent(),
- AssocExprs[ResultIndex]->isInstantiationDependent(),
- ContainsUnexpandedParameterPack),
+ AssocExprs[ResultIndex]->getObjectKind()),
NumAssocs(AssocExprs.size()), ResultIndex(ResultIndex),
DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
assert(AssocTypes.size() == AssocExprs.size() &&
@@ -4211,6 +4196,8 @@ GenericSelectionExpr::GenericSelectionExpr(
getTrailingObjects<Stmt *>() + AssocExprStartIndex);
std::copy(AssocTypes.begin(), AssocTypes.end(),
getTrailingObjects<TypeSourceInfo *>());
+
+ setDependence(computeDependence(this, ContainsUnexpandedParameterPack));
}
GenericSelectionExpr::GenericSelectionExpr(
@@ -4219,10 +4206,7 @@ GenericSelectionExpr::GenericSelectionExpr(
SourceLocation DefaultLoc, SourceLocation RParenLoc,
bool ContainsUnexpandedParameterPack)
: Expr(GenericSelectionExprClass, Context.DependentTy, VK_RValue,
- OK_Ordinary,
- /*isTypeDependent=*/true,
- /*isValueDependent=*/true,
- /*isInstantiationDependent=*/true, ContainsUnexpandedParameterPack),
+ OK_Ordinary),
NumAssocs(AssocExprs.size()), ResultIndex(ResultDependentIndex),
DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
assert(AssocTypes.size() == AssocExprs.size() &&
@@ -4235,6 +4219,8 @@ GenericSelectionExpr::GenericSelectionExpr(
getTrailingObjects<Stmt *>() + AssocExprStartIndex);
std::copy(AssocTypes.begin(), AssocTypes.end(),
getTrailingObjects<TypeSourceInfo *>());
+
+ setDependence(computeDependence(this, ContainsUnexpandedParameterPack));
}
GenericSelectionExpr::GenericSelectionExpr(EmptyShell Empty, unsigned NumAssocs)
@@ -4293,15 +4279,11 @@ DesignatedInitExpr::DesignatedInitExpr(const ASTContext &C, QualType Ty,
llvm::ArrayRef<Designator> Designators,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
- ArrayRef<Expr*> IndexExprs,
- Expr *Init)
- : Expr(DesignatedInitExprClass, Ty,
- Init->getValueKind(), Init->getObjectKind(),
- Init->isTypeDependent(), Init->isValueDependent(),
- Init->isInstantiationDependent(),
- Init->containsUnexpandedParameterPack()),
- EqualOrColonLoc(EqualOrColonLoc), GNUSyntax(GNUSyntax),
- NumDesignators(Designators.size()), NumSubExprs(IndexExprs.size() + 1) {
+ ArrayRef<Expr *> IndexExprs, Expr *Init)
+ : Expr(DesignatedInitExprClass, Ty, Init->getValueKind(),
+ Init->getObjectKind()),
+ EqualOrColonLoc(EqualOrColonLoc), GNUSyntax(GNUSyntax),
+ NumDesignators(Designators.size()), NumSubExprs(IndexExprs.size() + 1) {
this->Designators = new (C) Designator[NumDesignators];
// Record the initializer itself.
@@ -4313,38 +4295,10 @@ DesignatedInitExpr::DesignatedInitExpr(const ASTContext &C, QualType Ty,
unsigned IndexIdx = 0;
for (unsigned I = 0; I != NumDesignators; ++I) {
this->Designators[I] = Designators[I];
-
if (this->Designators[I].isArrayDesignator()) {
- // Compute type- and value-dependence.
- Expr *Index = IndexExprs[IndexIdx];
- if (Index->isTypeDependent() || Index->isValueDependent())
- ExprBits.TypeDependent = ExprBits.ValueDependent = true;
- if (Index->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- // Propagate unexpanded parameter packs.
- if (Index->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
// Copy the index expressions into permanent storage.
*Child++ = IndexExprs[IndexIdx++];
} else if (this->Designators[I].isArrayRangeDesignator()) {
- // Compute type- and value-dependence.
- Expr *Start = IndexExprs[IndexIdx];
- Expr *End = IndexExprs[IndexIdx + 1];
- if (Start->isTypeDependent() || Start->isValueDependent() ||
- End->isTypeDependent() || End->isValueDependent()) {
- ExprBits.TypeDependent = ExprBits.ValueDependent = true;
- ExprBits.InstantiationDependent = true;
- } else if (Start->isInstantiationDependent() ||
- End->isInstantiationDependent()) {
- ExprBits.InstantiationDependent = true;
- }
-
- // Propagate unexpanded parameter packs.
- if (Start->containsUnexpandedParameterPack() ||
- End->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
// Copy the start/end expressions into permanent storage.
*Child++ = IndexExprs[IndexIdx++];
*Child++ = IndexExprs[IndexIdx++];
@@ -4352,6 +4306,7 @@ DesignatedInitExpr::DesignatedInitExpr(const ASTContext &C, QualType Ty,
}
assert(IndexIdx == IndexExprs.size() && "Wrong number of index expressions");
+ setDependence(computeDependence(this));
}
DesignatedInitExpr *
@@ -4455,14 +4410,19 @@ void DesignatedInitExpr::ExpandDesignator(const ASTContext &C, unsigned Idx,
}
DesignatedInitUpdateExpr::DesignatedInitUpdateExpr(const ASTContext &C,
- SourceLocation lBraceLoc, Expr *baseExpr, SourceLocation rBraceLoc)
- : Expr(DesignatedInitUpdateExprClass, baseExpr->getType(), VK_RValue,
- OK_Ordinary, false, false, false, false) {
+ SourceLocation lBraceLoc,
+ Expr *baseExpr,
+ SourceLocation rBraceLoc)
+ : Expr(DesignatedInitUpdateExprClass, baseExpr->getType(), VK_RValue,
+ OK_Ordinary) {
BaseAndUpdaterExprs[0] = baseExpr;
InitListExpr *ILE = new (C) InitListExpr(C, lBraceLoc, None, rBraceLoc);
ILE->setType(baseExpr->getType());
BaseAndUpdaterExprs[1] = ILE;
+
+ // FIXME: this is wrong, set it correctly.
+ setDependence(ExprDependence::None);
}
SourceLocation DesignatedInitUpdateExpr::getBeginLoc() const {
@@ -4475,23 +4435,13 @@ SourceLocation DesignatedInitUpdateExpr::getEndLoc() const {
ParenListExpr::ParenListExpr(SourceLocation LParenLoc, ArrayRef<Expr *> Exprs,
SourceLocation RParenLoc)
- : Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary, false, false,
- false, false),
+ : Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary),
LParenLoc(LParenLoc), RParenLoc(RParenLoc) {
ParenListExprBits.NumExprs = Exprs.size();
- for (unsigned I = 0, N = Exprs.size(); I != N; ++I) {
- if (Exprs[I]->isTypeDependent())
- ExprBits.TypeDependent = true;
- if (Exprs[I]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (Exprs[I]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (Exprs[I]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ for (unsigned I = 0, N = Exprs.size(); I != N; ++I)
getTrailingObjects<Stmt *>()[I] = Exprs[I];
- }
+ setDependence(computeDependence(this));
}
ParenListExpr::ParenListExpr(EmptyShell Empty, unsigned NumExprs)
@@ -4515,6 +4465,115 @@ ParenListExpr *ParenListExpr::CreateEmpty(const ASTContext &Ctx,
return new (Mem) ParenListExpr(EmptyShell(), NumExprs);
}
+BinaryOperator::BinaryOperator(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
+ Opcode opc, QualType ResTy, ExprValueKind VK,
+ ExprObjectKind OK, SourceLocation opLoc,
+ FPOptionsOverride FPFeatures)
+ : Expr(BinaryOperatorClass, ResTy, VK, OK) {
+ BinaryOperatorBits.Opc = opc;
+ assert(!isCompoundAssignmentOp() &&
+ "Use CompoundAssignOperator for compound assignments");
+ BinaryOperatorBits.OpLoc = opLoc;
+ SubExprs[LHS] = lhs;
+ SubExprs[RHS] = rhs;
+ BinaryOperatorBits.HasFPFeatures = FPFeatures.requiresTrailingStorage();
+ if (BinaryOperatorBits.HasFPFeatures)
+ *getTrailingFPFeatures() = FPFeatures;
+ setDependence(computeDependence(this));
+}
+
+BinaryOperator::BinaryOperator(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
+ Opcode opc, QualType ResTy, ExprValueKind VK,
+ ExprObjectKind OK, SourceLocation opLoc,
+ FPOptionsOverride FPFeatures, bool dead2)
+ : Expr(CompoundAssignOperatorClass, ResTy, VK, OK) {
+ BinaryOperatorBits.Opc = opc;
+ assert(isCompoundAssignmentOp() &&
+ "Use CompoundAssignOperator for compound assignments");
+ BinaryOperatorBits.OpLoc = opLoc;
+ SubExprs[LHS] = lhs;
+ SubExprs[RHS] = rhs;
+ BinaryOperatorBits.HasFPFeatures = FPFeatures.requiresTrailingStorage();
+ if (BinaryOperatorBits.HasFPFeatures)
+ *getTrailingFPFeatures() = FPFeatures;
+ setDependence(computeDependence(this));
+}
+
+BinaryOperator *BinaryOperator::CreateEmpty(const ASTContext &C,
+ bool HasFPFeatures) {
+ unsigned Extra = sizeOfTrailingObjects(HasFPFeatures);
+ void *Mem =
+ C.Allocate(sizeof(BinaryOperator) + Extra, alignof(BinaryOperator));
+ return new (Mem) BinaryOperator(EmptyShell());
+}
+
+BinaryOperator *BinaryOperator::Create(const ASTContext &C, Expr *lhs,
+ Expr *rhs, Opcode opc, QualType ResTy,
+ ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation opLoc,
+ FPOptionsOverride FPFeatures) {
+ bool HasFPFeatures = FPFeatures.requiresTrailingStorage();
+ unsigned Extra = sizeOfTrailingObjects(HasFPFeatures);
+ void *Mem =
+ C.Allocate(sizeof(BinaryOperator) + Extra, alignof(BinaryOperator));
+ return new (Mem)
+ BinaryOperator(C, lhs, rhs, opc, ResTy, VK, OK, opLoc, FPFeatures);
+}
+
+CompoundAssignOperator *
+CompoundAssignOperator::CreateEmpty(const ASTContext &C, bool HasFPFeatures) {
+ unsigned Extra = sizeOfTrailingObjects(HasFPFeatures);
+ void *Mem = C.Allocate(sizeof(CompoundAssignOperator) + Extra,
+ alignof(CompoundAssignOperator));
+ return new (Mem) CompoundAssignOperator(C, EmptyShell(), HasFPFeatures);
+}
+
+CompoundAssignOperator *
+CompoundAssignOperator::Create(const ASTContext &C, Expr *lhs, Expr *rhs,
+ Opcode opc, QualType ResTy, ExprValueKind VK,
+ ExprObjectKind OK, SourceLocation opLoc,
+ FPOptionsOverride FPFeatures,
+ QualType CompLHSType, QualType CompResultType) {
+ bool HasFPFeatures = FPFeatures.requiresTrailingStorage();
+ unsigned Extra = sizeOfTrailingObjects(HasFPFeatures);
+ void *Mem = C.Allocate(sizeof(CompoundAssignOperator) + Extra,
+ alignof(CompoundAssignOperator));
+ return new (Mem)
+ CompoundAssignOperator(C, lhs, rhs, opc, ResTy, VK, OK, opLoc, FPFeatures,
+ CompLHSType, CompResultType);
+}
+
+UnaryOperator *UnaryOperator::CreateEmpty(const ASTContext &C,
+ bool hasFPFeatures) {
+ void *Mem = C.Allocate(totalSizeToAlloc<FPOptionsOverride>(hasFPFeatures),
+ alignof(UnaryOperator));
+ return new (Mem) UnaryOperator(hasFPFeatures, EmptyShell());
+}
+
+UnaryOperator::UnaryOperator(const ASTContext &Ctx, Expr *input, Opcode opc,
+ QualType type, ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation l, bool CanOverflow,
+ FPOptionsOverride FPFeatures)
+ : Expr(UnaryOperatorClass, type, VK, OK), Val(input) {
+ UnaryOperatorBits.Opc = opc;
+ UnaryOperatorBits.CanOverflow = CanOverflow;
+ UnaryOperatorBits.Loc = l;
+ UnaryOperatorBits.HasFPFeatures = FPFeatures.requiresTrailingStorage();
+ setDependence(computeDependence(this));
+}
+
+UnaryOperator *UnaryOperator::Create(const ASTContext &C, Expr *input,
+ Opcode opc, QualType type,
+ ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation l, bool CanOverflow,
+ FPOptionsOverride FPFeatures) {
+ bool HasFPFeatures = FPFeatures.requiresTrailingStorage();
+ unsigned Size = totalSizeToAlloc<FPOptionsOverride>(HasFPFeatures);
+ void *Mem = C.Allocate(Size, alignof(UnaryOperator));
+ return new (Mem)
+ UnaryOperator(C, input, opc, type, VK, OK, l, CanOverflow, FPFeatures);
+}
+
const OpaqueValueExpr *OpaqueValueExpr::findInCopyConstruct(const Expr *e) {
if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(e))
e = ewc->getSubExpr();
@@ -4565,10 +4624,9 @@ PseudoObjectExpr *PseudoObjectExpr::Create(const ASTContext &C, Expr *syntax,
}
PseudoObjectExpr::PseudoObjectExpr(QualType type, ExprValueKind VK,
- Expr *syntax, ArrayRef<Expr*> semantics,
+ Expr *syntax, ArrayRef<Expr *> semantics,
unsigned resultIndex)
- : Expr(PseudoObjectExprClass, type, VK, OK_Ordinary,
- /*filled in at end of ctor*/ false, false, false, false) {
+ : Expr(PseudoObjectExprClass, type, VK, OK_Ordinary) {
PseudoObjectExprBits.NumSubExprs = semantics.size() + 1;
PseudoObjectExprBits.ResultIndex = resultIndex + 1;
@@ -4576,20 +4634,13 @@ PseudoObjectExpr::PseudoObjectExpr(QualType type, ExprValueKind VK,
Expr *E = (i == 0 ? syntax : semantics[i-1]);
getSubExprsBuffer()[i] = E;
- if (E->isTypeDependent())
- ExprBits.TypeDependent = true;
- if (E->isValueDependent())
- ExprBits.ValueDependent = true;
- if (E->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (E->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
if (isa<OpaqueValueExpr>(E))
assert(cast<OpaqueValueExpr>(E)->getSourceExpr() != nullptr &&
"opaque-value semantic expressions for pseudo-object "
"operations must have sources");
}
+
+ setDependence(computeDependence(this));
}
//===----------------------------------------------------------------------===//
@@ -4616,25 +4667,14 @@ Stmt::const_child_range UnaryExprOrTypeTraitExpr::children() const {
return const_child_range(&Argument.Ex, &Argument.Ex + 1);
}
-AtomicExpr::AtomicExpr(SourceLocation BLoc, ArrayRef<Expr*> args,
- QualType t, AtomicOp op, SourceLocation RP)
- : Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary,
- false, false, false, false),
- NumSubExprs(args.size()), BuiltinLoc(BLoc), RParenLoc(RP), Op(op)
-{
+AtomicExpr::AtomicExpr(SourceLocation BLoc, ArrayRef<Expr *> args, QualType t,
+ AtomicOp op, SourceLocation RP)
+ : Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary),
+ NumSubExprs(args.size()), BuiltinLoc(BLoc), RParenLoc(RP), Op(op) {
assert(args.size() == getNumSubExprs(op) && "wrong number of subexpressions");
- for (unsigned i = 0; i != args.size(); i++) {
- if (args[i]->isTypeDependent())
- ExprBits.TypeDependent = true;
- if (args[i]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (args[i]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (args[i]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ for (unsigned i = 0; i != args.size(); i++)
SubExprs[i] = args[i];
- }
+ setDependence(computeDependence(this));
}
unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
@@ -4736,3 +4776,211 @@ QualType OMPArraySectionExpr::getBaseOriginalType(const Expr *Base) {
}
return OriginalTy;
}
+
+RecoveryExpr::RecoveryExpr(ASTContext &Ctx, QualType T, SourceLocation BeginLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> SubExprs)
+ : Expr(RecoveryExprClass, T.getNonReferenceType(),
+ T->isDependentType() ? VK_LValue : getValueKindForType(T),
+ OK_Ordinary),
+ BeginLoc(BeginLoc), EndLoc(EndLoc), NumExprs(SubExprs.size()) {
+ assert(!T.isNull());
+ assert(llvm::all_of(SubExprs, [](Expr* E) { return E != nullptr; }));
+
+ llvm::copy(SubExprs, getTrailingObjects<Expr *>());
+ setDependence(computeDependence(this));
+}
+
+RecoveryExpr *RecoveryExpr::Create(ASTContext &Ctx, QualType T,
+ SourceLocation BeginLoc,
+ SourceLocation EndLoc,
+ ArrayRef<Expr *> SubExprs) {
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<Expr *>(SubExprs.size()),
+ alignof(RecoveryExpr));
+ return new (Mem) RecoveryExpr(Ctx, T, BeginLoc, EndLoc, SubExprs);
+}
+
+RecoveryExpr *RecoveryExpr::CreateEmpty(ASTContext &Ctx, unsigned NumSubExprs) {
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<Expr *>(NumSubExprs),
+ alignof(RecoveryExpr));
+ return new (Mem) RecoveryExpr(EmptyShell(), NumSubExprs);
+}
+
+void OMPArrayShapingExpr::setDimensions(ArrayRef<Expr *> Dims) {
+ assert(
+ NumDims == Dims.size() &&
+ "Preallocated number of dimensions is different from the provided one.");
+ llvm::copy(Dims, getTrailingObjects<Expr *>());
+}
+
+void OMPArrayShapingExpr::setBracketsRanges(ArrayRef<SourceRange> BR) {
+ assert(
+ NumDims == BR.size() &&
+ "Preallocated number of dimensions is different from the provided one.");
+ llvm::copy(BR, getTrailingObjects<SourceRange>());
+}
+
+OMPArrayShapingExpr::OMPArrayShapingExpr(QualType ExprTy, Expr *Op,
+ SourceLocation L, SourceLocation R,
+ ArrayRef<Expr *> Dims)
+ : Expr(OMPArrayShapingExprClass, ExprTy, VK_LValue, OK_Ordinary), LPLoc(L),
+ RPLoc(R), NumDims(Dims.size()) {
+ setBase(Op);
+ setDimensions(Dims);
+ setDependence(computeDependence(this));
+}
+
+OMPArrayShapingExpr *
+OMPArrayShapingExpr::Create(const ASTContext &Context, QualType T, Expr *Op,
+ SourceLocation L, SourceLocation R,
+ ArrayRef<Expr *> Dims,
+ ArrayRef<SourceRange> BracketRanges) {
+ assert(Dims.size() == BracketRanges.size() &&
+ "Different number of dimensions and brackets ranges.");
+ void *Mem = Context.Allocate(
+ totalSizeToAlloc<Expr *, SourceRange>(Dims.size() + 1, Dims.size()),
+ alignof(OMPArrayShapingExpr));
+ auto *E = new (Mem) OMPArrayShapingExpr(T, Op, L, R, Dims);
+ E->setBracketsRanges(BracketRanges);
+ return E;
+}
+
+OMPArrayShapingExpr *OMPArrayShapingExpr::CreateEmpty(const ASTContext &Context,
+ unsigned NumDims) {
+ void *Mem = Context.Allocate(
+ totalSizeToAlloc<Expr *, SourceRange>(NumDims + 1, NumDims),
+ alignof(OMPArrayShapingExpr));
+ return new (Mem) OMPArrayShapingExpr(EmptyShell(), NumDims);
+}
+
+void OMPIteratorExpr::setIteratorDeclaration(unsigned I, Decl *D) {
+ assert(I < NumIterators &&
+ "Idx is greater or equal the number of iterators definitions.");
+ getTrailingObjects<Decl *>()[I] = D;
+}
+
+void OMPIteratorExpr::setAssignmentLoc(unsigned I, SourceLocation Loc) {
+ assert(I < NumIterators &&
+ "Idx is greater or equal the number of iterators definitions.");
+ getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(RangeLocOffset::Total) +
+ static_cast<int>(RangeLocOffset::AssignLoc)] = Loc;
+}
+
+void OMPIteratorExpr::setIteratorRange(unsigned I, Expr *Begin,
+ SourceLocation ColonLoc, Expr *End,
+ SourceLocation SecondColonLoc,
+ Expr *Step) {
+ assert(I < NumIterators &&
+ "Idx is greater or equal the number of iterators definitions.");
+ getTrailingObjects<Expr *>()[I * static_cast<int>(RangeExprOffset::Total) +
+ static_cast<int>(RangeExprOffset::Begin)] =
+ Begin;
+ getTrailingObjects<Expr *>()[I * static_cast<int>(RangeExprOffset::Total) +
+ static_cast<int>(RangeExprOffset::End)] = End;
+ getTrailingObjects<Expr *>()[I * static_cast<int>(RangeExprOffset::Total) +
+ static_cast<int>(RangeExprOffset::Step)] = Step;
+ getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(RangeLocOffset::Total) +
+ static_cast<int>(RangeLocOffset::FirstColonLoc)] =
+ ColonLoc;
+ getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(RangeLocOffset::Total) +
+ static_cast<int>(RangeLocOffset::SecondColonLoc)] =
+ SecondColonLoc;
+}
+
+Decl *OMPIteratorExpr::getIteratorDecl(unsigned I) {
+ return getTrailingObjects<Decl *>()[I];
+}
+
+OMPIteratorExpr::IteratorRange OMPIteratorExpr::getIteratorRange(unsigned I) {
+ IteratorRange Res;
+ Res.Begin =
+ getTrailingObjects<Expr *>()[I * static_cast<int>(
+ RangeExprOffset::Total) +
+ static_cast<int>(RangeExprOffset::Begin)];
+ Res.End =
+ getTrailingObjects<Expr *>()[I * static_cast<int>(
+ RangeExprOffset::Total) +
+ static_cast<int>(RangeExprOffset::End)];
+ Res.Step =
+ getTrailingObjects<Expr *>()[I * static_cast<int>(
+ RangeExprOffset::Total) +
+ static_cast<int>(RangeExprOffset::Step)];
+ return Res;
+}
+
+SourceLocation OMPIteratorExpr::getAssignLoc(unsigned I) const {
+ return getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(RangeLocOffset::Total) +
+ static_cast<int>(RangeLocOffset::AssignLoc)];
+}
+
+SourceLocation OMPIteratorExpr::getColonLoc(unsigned I) const {
+ return getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(RangeLocOffset::Total) +
+ static_cast<int>(RangeLocOffset::FirstColonLoc)];
+}
+
+SourceLocation OMPIteratorExpr::getSecondColonLoc(unsigned I) const {
+ return getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(RangeLocOffset::Total) +
+ static_cast<int>(RangeLocOffset::SecondColonLoc)];
+}
+
+void OMPIteratorExpr::setHelper(unsigned I, const OMPIteratorHelperData &D) {
+ getTrailingObjects<OMPIteratorHelperData>()[I] = D;
+}
+
+OMPIteratorHelperData &OMPIteratorExpr::getHelper(unsigned I) {
+ return getTrailingObjects<OMPIteratorHelperData>()[I];
+}
+
+const OMPIteratorHelperData &OMPIteratorExpr::getHelper(unsigned I) const {
+ return getTrailingObjects<OMPIteratorHelperData>()[I];
+}
+
+OMPIteratorExpr::OMPIteratorExpr(
+ QualType ExprTy, SourceLocation IteratorKwLoc, SourceLocation L,
+ SourceLocation R, ArrayRef<OMPIteratorExpr::IteratorDefinition> Data,
+ ArrayRef<OMPIteratorHelperData> Helpers)
+ : Expr(OMPIteratorExprClass, ExprTy, VK_LValue, OK_Ordinary),
+ IteratorKwLoc(IteratorKwLoc), LPLoc(L), RPLoc(R),
+ NumIterators(Data.size()) {
+ for (unsigned I = 0, E = Data.size(); I < E; ++I) {
+ const IteratorDefinition &D = Data[I];
+ setIteratorDeclaration(I, D.IteratorDecl);
+ setAssignmentLoc(I, D.AssignmentLoc);
+ setIteratorRange(I, D.Range.Begin, D.ColonLoc, D.Range.End,
+ D.SecondColonLoc, D.Range.Step);
+ setHelper(I, Helpers[I]);
+ }
+ setDependence(computeDependence(this));
+}
+
+OMPIteratorExpr *
+OMPIteratorExpr::Create(const ASTContext &Context, QualType T,
+ SourceLocation IteratorKwLoc, SourceLocation L,
+ SourceLocation R,
+ ArrayRef<OMPIteratorExpr::IteratorDefinition> Data,
+ ArrayRef<OMPIteratorHelperData> Helpers) {
+ assert(Data.size() == Helpers.size() &&
+ "Data and helpers must have the same size.");
+ void *Mem = Context.Allocate(
+ totalSizeToAlloc<Decl *, Expr *, SourceLocation, OMPIteratorHelperData>(
+ Data.size(), Data.size() * static_cast<int>(RangeExprOffset::Total),
+ Data.size() * static_cast<int>(RangeLocOffset::Total),
+ Helpers.size()),
+ alignof(OMPIteratorExpr));
+ return new (Mem) OMPIteratorExpr(T, IteratorKwLoc, L, R, Data, Helpers);
+}
+
+OMPIteratorExpr *OMPIteratorExpr::CreateEmpty(const ASTContext &Context,
+ unsigned NumIterators) {
+ void *Mem = Context.Allocate(
+ totalSizeToAlloc<Decl *, Expr *, SourceLocation, OMPIteratorHelperData>(
+ NumIterators, NumIterators * static_cast<int>(RangeExprOffset::Total),
+ NumIterators * static_cast<int>(RangeLocOffset::Total), NumIterators),
+ alignof(OMPIteratorExpr));
+ return new (Mem) OMPIteratorExpr(EmptyShell(), NumIterators);
+}
diff --git a/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp b/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp
index e4bd218ae2d3..5d99f61c579f 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp
@@ -13,12 +13,14 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/ComputeDependence.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclAccessPair.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/Expr.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
@@ -173,9 +175,7 @@ CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew,
Expr *Initializer, QualType Ty,
TypeSourceInfo *AllocatedTypeInfo, SourceRange Range,
SourceRange DirectInitRange)
- : Expr(CXXNewExprClass, Ty, VK_RValue, OK_Ordinary, Ty->isDependentType(),
- Ty->isDependentType(), Ty->isInstantiationDependentType(),
- Ty->containsUnexpandedParameterPack()),
+ : Expr(CXXNewExprClass, Ty, VK_RValue, OK_Ordinary),
OperatorNew(OperatorNew), OperatorDelete(OperatorDelete),
AllocatedTypeInfo(AllocatedTypeInfo), Range(Range),
DirectInitRange(DirectInitRange) {
@@ -193,42 +193,13 @@ CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew,
CXXNewExprBits.IsParenTypeId = IsParenTypeId;
CXXNewExprBits.NumPlacementArgs = PlacementArgs.size();
- if (ArraySize) {
- if (Expr *SizeExpr = *ArraySize) {
- if (SizeExpr->isValueDependent())
- ExprBits.ValueDependent = true;
- if (SizeExpr->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (SizeExpr->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
- }
-
+ if (ArraySize)
getTrailingObjects<Stmt *>()[arraySizeOffset()] = *ArraySize;
- }
-
- if (Initializer) {
- if (Initializer->isValueDependent())
- ExprBits.ValueDependent = true;
- if (Initializer->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (Initializer->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ if (Initializer)
getTrailingObjects<Stmt *>()[initExprOffset()] = Initializer;
- }
-
- for (unsigned I = 0; I != PlacementArgs.size(); ++I) {
- if (PlacementArgs[I]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (PlacementArgs[I]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (PlacementArgs[I]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ for (unsigned I = 0; I != PlacementArgs.size(); ++I)
getTrailingObjects<Stmt *>()[placementNewArgsOffset() + I] =
PlacementArgs[I];
- }
-
if (IsParenTypeId)
getTrailingObjects<SourceRange>()[0] = TypeIdParens;
@@ -244,6 +215,8 @@ CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew,
this->Range.setEnd(TypeIdParens.getEnd());
break;
}
+
+ setDependence(computeDependence(this));
}
CXXNewExpr::CXXNewExpr(EmptyShell Empty, bool IsArray,
@@ -331,40 +304,19 @@ PseudoDestructorTypeStorage::PseudoDestructorTypeStorage(TypeSourceInfo *Info)
Location = Info->getTypeLoc().getLocalSourceRange().getBegin();
}
-CXXPseudoDestructorExpr::CXXPseudoDestructorExpr(const ASTContext &Context,
- Expr *Base, bool isArrow, SourceLocation OperatorLoc,
- NestedNameSpecifierLoc QualifierLoc, TypeSourceInfo *ScopeType,
- SourceLocation ColonColonLoc, SourceLocation TildeLoc,
- PseudoDestructorTypeStorage DestroyedType)
- : Expr(CXXPseudoDestructorExprClass,
- Context.BoundMemberTy,
- VK_RValue, OK_Ordinary,
- /*isTypeDependent=*/(Base->isTypeDependent() ||
- (DestroyedType.getTypeSourceInfo() &&
- DestroyedType.getTypeSourceInfo()->getType()->isDependentType())),
- /*isValueDependent=*/Base->isValueDependent(),
- (Base->isInstantiationDependent() ||
- (QualifierLoc &&
- QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent()) ||
- (ScopeType &&
- ScopeType->getType()->isInstantiationDependentType()) ||
- (DestroyedType.getTypeSourceInfo() &&
- DestroyedType.getTypeSourceInfo()->getType()
- ->isInstantiationDependentType())),
- // ContainsUnexpandedParameterPack
- (Base->containsUnexpandedParameterPack() ||
- (QualifierLoc &&
- QualifierLoc.getNestedNameSpecifier()
- ->containsUnexpandedParameterPack()) ||
- (ScopeType &&
- ScopeType->getType()->containsUnexpandedParameterPack()) ||
- (DestroyedType.getTypeSourceInfo() &&
- DestroyedType.getTypeSourceInfo()->getType()
- ->containsUnexpandedParameterPack()))),
- Base(static_cast<Stmt *>(Base)), IsArrow(isArrow),
- OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
- ScopeType(ScopeType), ColonColonLoc(ColonColonLoc), TildeLoc(TildeLoc),
- DestroyedType(DestroyedType) {}
+CXXPseudoDestructorExpr::CXXPseudoDestructorExpr(
+ const ASTContext &Context, Expr *Base, bool isArrow,
+ SourceLocation OperatorLoc, NestedNameSpecifierLoc QualifierLoc,
+ TypeSourceInfo *ScopeType, SourceLocation ColonColonLoc,
+ SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType)
+ : Expr(CXXPseudoDestructorExprClass, Context.BoundMemberTy, VK_RValue,
+ OK_Ordinary),
+ Base(static_cast<Stmt *>(Base)), IsArrow(isArrow),
+ OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
+ ScopeType(ScopeType), ColonColonLoc(ColonColonLoc), TildeLoc(TildeLoc),
+ DestroyedType(DestroyedType) {
+ setDependence(computeDependence(this));
+}
QualType CXXPseudoDestructorExpr::getDestroyedType() const {
if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo())
@@ -454,62 +406,31 @@ OverloadExpr::OverloadExpr(StmtClass SC, const ASTContext &Context,
UnresolvedSetIterator End, bool KnownDependent,
bool KnownInstantiationDependent,
bool KnownContainsUnexpandedParameterPack)
- : Expr(
- SC, Context.OverloadTy, VK_LValue, OK_Ordinary, KnownDependent,
- KnownDependent,
- (KnownInstantiationDependent || NameInfo.isInstantiationDependent() ||
- (QualifierLoc &&
- QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())),
- (KnownContainsUnexpandedParameterPack ||
- NameInfo.containsUnexpandedParameterPack() ||
- (QualifierLoc && QualifierLoc.getNestedNameSpecifier()
- ->containsUnexpandedParameterPack()))),
- NameInfo(NameInfo), QualifierLoc(QualifierLoc) {
+ : Expr(SC, Context.OverloadTy, VK_LValue, OK_Ordinary), NameInfo(NameInfo),
+ QualifierLoc(QualifierLoc) {
unsigned NumResults = End - Begin;
OverloadExprBits.NumResults = NumResults;
OverloadExprBits.HasTemplateKWAndArgsInfo =
(TemplateArgs != nullptr ) || TemplateKWLoc.isValid();
if (NumResults) {
- // Determine whether this expression is type-dependent.
- for (UnresolvedSetImpl::const_iterator I = Begin; I != End; ++I) {
- if ((*I)->getDeclContext()->isDependentContext() ||
- isa<UnresolvedUsingValueDecl>(*I)) {
- ExprBits.TypeDependent = true;
- ExprBits.ValueDependent = true;
- ExprBits.InstantiationDependent = true;
- }
- }
-
// Copy the results to the trailing array past UnresolvedLookupExpr
// or UnresolvedMemberExpr.
DeclAccessPair *Results = getTrailingResults();
memcpy(Results, Begin.I, NumResults * sizeof(DeclAccessPair));
}
- // If we have explicit template arguments, check for dependent
- // template arguments and whether they contain any unexpanded pack
- // expansions.
if (TemplateArgs) {
- bool Dependent = false;
- bool InstantiationDependent = false;
- bool ContainsUnexpandedParameterPack = false;
+ auto Deps = TemplateArgumentDependence::None;
getTrailingASTTemplateKWAndArgsInfo()->initializeFrom(
- TemplateKWLoc, *TemplateArgs, getTrailingTemplateArgumentLoc(),
- Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
-
- if (Dependent) {
- ExprBits.TypeDependent = true;
- ExprBits.ValueDependent = true;
- }
- if (InstantiationDependent)
- ExprBits.InstantiationDependent = true;
- if (ContainsUnexpandedParameterPack)
- ExprBits.ContainsUnexpandedParameterPack = true;
+ TemplateKWLoc, *TemplateArgs, getTrailingTemplateArgumentLoc(), Deps);
} else if (TemplateKWLoc.isValid()) {
getTrailingASTTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
}
+ setDependence(computeDependence(this, KnownDependent,
+ KnownInstantiationDependent,
+ KnownContainsUnexpandedParameterPack));
if (isTypeDependent())
setType(Context.DependentTy);
}
@@ -526,31 +447,19 @@ DependentScopeDeclRefExpr::DependentScopeDeclRefExpr(
QualType Ty, NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *Args)
- : Expr(
- DependentScopeDeclRefExprClass, Ty, VK_LValue, OK_Ordinary, true,
- true,
- (NameInfo.isInstantiationDependent() ||
- (QualifierLoc &&
- QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())),
- (NameInfo.containsUnexpandedParameterPack() ||
- (QualifierLoc && QualifierLoc.getNestedNameSpecifier()
- ->containsUnexpandedParameterPack()))),
+ : Expr(DependentScopeDeclRefExprClass, Ty, VK_LValue, OK_Ordinary),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {
DependentScopeDeclRefExprBits.HasTemplateKWAndArgsInfo =
(Args != nullptr) || TemplateKWLoc.isValid();
if (Args) {
- bool Dependent = true;
- bool InstantiationDependent = true;
- bool ContainsUnexpandedParameterPack
- = ExprBits.ContainsUnexpandedParameterPack;
+ auto Deps = TemplateArgumentDependence::None;
getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
- TemplateKWLoc, *Args, getTrailingObjects<TemplateArgumentLoc>(),
- Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
- ExprBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
+ TemplateKWLoc, *Args, getTrailingObjects<TemplateArgumentLoc>(), Deps);
} else if (TemplateKWLoc.isValid()) {
getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc);
}
+ setDependence(computeDependence(this));
}
DependentScopeDeclRefExpr *DependentScopeDeclRefExpr::Create(
@@ -616,27 +525,27 @@ CXXOperatorCallExpr::CXXOperatorCallExpr(OverloadedOperatorKind OpKind,
Expr *Fn, ArrayRef<Expr *> Args,
QualType Ty, ExprValueKind VK,
SourceLocation OperatorLoc,
- FPOptions FPFeatures,
+ FPOptionsOverride FPFeatures,
ADLCallKind UsesADL)
: CallExpr(CXXOperatorCallExprClass, Fn, /*PreArgs=*/{}, Args, Ty, VK,
OperatorLoc, /*MinNumArgs=*/0, UsesADL) {
CXXOperatorCallExprBits.OperatorKind = OpKind;
- CXXOperatorCallExprBits.FPFeatures = FPFeatures.getInt();
assert(
(CXXOperatorCallExprBits.OperatorKind == static_cast<unsigned>(OpKind)) &&
"OperatorKind overflow!");
- assert((CXXOperatorCallExprBits.FPFeatures == FPFeatures.getInt()) &&
- "FPFeatures overflow!");
Range = getSourceRangeImpl();
+ Overrides = FPFeatures;
}
CXXOperatorCallExpr::CXXOperatorCallExpr(unsigned NumArgs, EmptyShell Empty)
: CallExpr(CXXOperatorCallExprClass, /*NumPreArgs=*/0, NumArgs, Empty) {}
-CXXOperatorCallExpr *CXXOperatorCallExpr::Create(
- const ASTContext &Ctx, OverloadedOperatorKind OpKind, Expr *Fn,
- ArrayRef<Expr *> Args, QualType Ty, ExprValueKind VK,
- SourceLocation OperatorLoc, FPOptions FPFeatures, ADLCallKind UsesADL) {
+CXXOperatorCallExpr *
+CXXOperatorCallExpr::Create(const ASTContext &Ctx,
+ OverloadedOperatorKind OpKind, Expr *Fn,
+ ArrayRef<Expr *> Args, QualType Ty,
+ ExprValueKind VK, SourceLocation OperatorLoc,
+ FPOptionsOverride FPFeatures, ADLCallKind UsesADL) {
// Allocate storage for the trailing objects of CallExpr.
unsigned NumArgs = Args.size();
unsigned SizeOfTrailingObjects =
@@ -668,7 +577,7 @@ SourceRange CXXOperatorCallExpr::getSourceRangeImpl() const {
// Postfix operator
return SourceRange(getArg(0)->getBeginLoc(), getOperatorLoc());
} else if (Kind == OO_Arrow) {
- return getArg(0)->getSourceRange();
+ return SourceRange(getArg(0)->getBeginLoc(), getOperatorLoc());
} else if (Kind == OO_Call) {
return SourceRange(getArg(0)->getBeginLoc(), getRParenLoc());
} else if (Kind == OO_Subscript) {
@@ -767,6 +676,7 @@ const char *CXXNamedCastExpr::getCastName() const {
case CXXDynamicCastExprClass: return "dynamic_cast";
case CXXReinterpretCastExprClass: return "reinterpret_cast";
case CXXConstCastExprClass: return "const_cast";
+ case CXXAddrspaceCastExprClass: return "addrspace_cast";
default: return "<invalid cast>";
}
}
@@ -891,6 +801,19 @@ CXXConstCastExpr *CXXConstCastExpr::CreateEmpty(const ASTContext &C) {
return new (C) CXXConstCastExpr(EmptyShell());
}
+CXXAddrspaceCastExpr *
+CXXAddrspaceCastExpr::Create(const ASTContext &C, QualType T, ExprValueKind VK,
+ CastKind K, Expr *Op, TypeSourceInfo *WrittenTy,
+ SourceLocation L, SourceLocation RParenLoc,
+ SourceRange AngleBrackets) {
+ return new (C) CXXAddrspaceCastExpr(T, VK, K, Op, WrittenTy, L, RParenLoc,
+ AngleBrackets);
+}
+
+CXXAddrspaceCastExpr *CXXAddrspaceCastExpr::CreateEmpty(const ASTContext &C) {
+ return new (C) CXXAddrspaceCastExpr(EmptyShell());
+}
+
CXXFunctionalCastExpr *
CXXFunctionalCastExpr::Create(const ASTContext &C, QualType T, ExprValueKind VK,
TypeSourceInfo *Written, CastKind K, Expr *Op,
@@ -990,17 +913,19 @@ const IdentifierInfo *UserDefinedLiteral::getUDSuffix() const {
return cast<FunctionDecl>(getCalleeDecl())->getLiteralIdentifier();
}
-CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &Ctx, SourceLocation Loc,
- FieldDecl *Field, QualType Ty,
- DeclContext *UsedContext)
+CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &Ctx,
+ SourceLocation Loc, FieldDecl *Field,
+ QualType Ty, DeclContext *UsedContext)
: Expr(CXXDefaultInitExprClass, Ty.getNonLValueExprType(Ctx),
- Ty->isLValueReferenceType() ? VK_LValue : Ty->isRValueReferenceType()
- ? VK_XValue
- : VK_RValue,
- /*FIXME*/ OK_Ordinary, false, false, false, false),
+ Ty->isLValueReferenceType()
+ ? VK_LValue
+ : Ty->isRValueReferenceType() ? VK_XValue : VK_RValue,
+ /*FIXME*/ OK_Ordinary),
Field(Field), UsedContext(UsedContext) {
CXXDefaultInitExprBits.Loc = Loc;
assert(Field->hasInClassInitializer());
+
+ setDependence(ExprDependence::None);
}
CXXTemporary *CXXTemporary::Create(const ASTContext &C,
@@ -1098,11 +1023,8 @@ CXXConstructExpr::CXXConstructExpr(
bool ListInitialization, bool StdInitListInitialization,
bool ZeroInitialization, ConstructionKind ConstructKind,
SourceRange ParenOrBraceRange)
- : Expr(SC, Ty, VK_RValue, OK_Ordinary, Ty->isDependentType(),
- Ty->isDependentType(), Ty->isInstantiationDependentType(),
- Ty->containsUnexpandedParameterPack()),
- Constructor(Ctor), ParenOrBraceRange(ParenOrBraceRange),
- NumArgs(Args.size()) {
+ : Expr(SC, Ty, VK_RValue, OK_Ordinary), Constructor(Ctor),
+ ParenOrBraceRange(ParenOrBraceRange), NumArgs(Args.size()) {
CXXConstructExprBits.Elidable = Elidable;
CXXConstructExprBits.HadMultipleCandidates = HadMultipleCandidates;
CXXConstructExprBits.ListInitialization = ListInitialization;
@@ -1114,16 +1036,10 @@ CXXConstructExpr::CXXConstructExpr(
Stmt **TrailingArgs = getTrailingArgs();
for (unsigned I = 0, N = Args.size(); I != N; ++I) {
assert(Args[I] && "NULL argument in CXXConstructExpr!");
-
- if (Args[I]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (Args[I]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (Args[I]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
TrailingArgs[I] = Args[I];
}
+
+ setDependence(computeDependence(this));
}
CXXConstructExpr::CXXConstructExpr(StmtClass SC, EmptyShell Empty,
@@ -1171,37 +1087,22 @@ LambdaCaptureKind LambdaCapture::getCaptureKind() const {
LambdaExpr::LambdaExpr(QualType T, SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
- SourceLocation CaptureDefaultLoc,
- ArrayRef<LambdaCapture> Captures, bool ExplicitParams,
+ SourceLocation CaptureDefaultLoc, bool ExplicitParams,
bool ExplicitResultType, ArrayRef<Expr *> CaptureInits,
SourceLocation ClosingBrace,
bool ContainsUnexpandedParameterPack)
- : Expr(LambdaExprClass, T, VK_RValue, OK_Ordinary, T->isDependentType(),
- T->isDependentType(), T->isDependentType(),
- ContainsUnexpandedParameterPack),
+ : Expr(LambdaExprClass, T, VK_RValue, OK_Ordinary),
IntroducerRange(IntroducerRange), CaptureDefaultLoc(CaptureDefaultLoc),
- NumCaptures(Captures.size()), CaptureDefault(CaptureDefault),
- ExplicitParams(ExplicitParams), ExplicitResultType(ExplicitResultType),
ClosingBrace(ClosingBrace) {
- assert(CaptureInits.size() == Captures.size() && "Wrong number of arguments");
+ LambdaExprBits.NumCaptures = CaptureInits.size();
+ LambdaExprBits.CaptureDefault = CaptureDefault;
+ LambdaExprBits.ExplicitParams = ExplicitParams;
+ LambdaExprBits.ExplicitResultType = ExplicitResultType;
+
CXXRecordDecl *Class = getLambdaClass();
- CXXRecordDecl::LambdaDefinitionData &Data = Class->getLambdaData();
-
- // FIXME: Propagate "has unexpanded parameter pack" bit.
-
- // Copy captures.
- const ASTContext &Context = Class->getASTContext();
- Data.NumCaptures = NumCaptures;
- Data.NumExplicitCaptures = 0;
- Data.Captures =
- (LambdaCapture *)Context.Allocate(sizeof(LambdaCapture) * NumCaptures);
- LambdaCapture *ToCapture = Data.Captures;
- for (unsigned I = 0, N = Captures.size(); I != N; ++I) {
- if (Captures[I].isExplicit())
- ++Data.NumExplicitCaptures;
-
- *ToCapture++ = Captures[I];
- }
+ (void)Class;
+ assert(capture_size() == Class->capture_size() && "Wrong number of captures");
+ assert(getCaptureDefault() == Class->getLambdaCaptureDefault());
// Copy initialization expressions for the non-static data members.
Stmt **Stored = getStoredStmts();
@@ -1210,24 +1111,37 @@ LambdaExpr::LambdaExpr(QualType T, SourceRange IntroducerRange,
// Copy the body of the lambda.
*Stored++ = getCallOperator()->getBody();
+
+ setDependence(computeDependence(this, ContainsUnexpandedParameterPack));
+}
+
+LambdaExpr::LambdaExpr(EmptyShell Empty, unsigned NumCaptures)
+ : Expr(LambdaExprClass, Empty) {
+ LambdaExprBits.NumCaptures = NumCaptures;
+
+ // Initially don't initialize the body of the LambdaExpr. The body will
+ // be lazily deserialized when needed.
+ getStoredStmts()[NumCaptures] = nullptr; // Not one past the end.
}
-LambdaExpr *LambdaExpr::Create(
- const ASTContext &Context, CXXRecordDecl *Class,
- SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault,
- SourceLocation CaptureDefaultLoc, ArrayRef<LambdaCapture> Captures,
- bool ExplicitParams, bool ExplicitResultType, ArrayRef<Expr *> CaptureInits,
- SourceLocation ClosingBrace, bool ContainsUnexpandedParameterPack) {
+LambdaExpr *LambdaExpr::Create(const ASTContext &Context, CXXRecordDecl *Class,
+ SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ SourceLocation CaptureDefaultLoc,
+ bool ExplicitParams, bool ExplicitResultType,
+ ArrayRef<Expr *> CaptureInits,
+ SourceLocation ClosingBrace,
+ bool ContainsUnexpandedParameterPack) {
// Determine the type of the expression (i.e., the type of the
// function object we're creating).
QualType T = Context.getTypeDeclType(Class);
- unsigned Size = totalSizeToAlloc<Stmt *>(Captures.size() + 1);
+ unsigned Size = totalSizeToAlloc<Stmt *>(CaptureInits.size() + 1);
void *Mem = Context.Allocate(Size);
return new (Mem)
LambdaExpr(T, IntroducerRange, CaptureDefault, CaptureDefaultLoc,
- Captures, ExplicitParams, ExplicitResultType, CaptureInits,
- ClosingBrace, ContainsUnexpandedParameterPack);
+ ExplicitParams, ExplicitResultType, CaptureInits, ClosingBrace,
+ ContainsUnexpandedParameterPack);
}
LambdaExpr *LambdaExpr::CreateDeserialized(const ASTContext &C,
@@ -1237,6 +1151,25 @@ LambdaExpr *LambdaExpr::CreateDeserialized(const ASTContext &C,
return new (Mem) LambdaExpr(EmptyShell(), NumCaptures);
}
+void LambdaExpr::initBodyIfNeeded() const {
+ if (!getStoredStmts()[capture_size()]) {
+ auto *This = const_cast<LambdaExpr *>(this);
+ This->getStoredStmts()[capture_size()] = getCallOperator()->getBody();
+ }
+}
+
+Stmt *LambdaExpr::getBody() const {
+ initBodyIfNeeded();
+ return getStoredStmts()[capture_size()];
+}
+
+const CompoundStmt *LambdaExpr::getCompoundStmtBody() const {
+ Stmt *Body = getBody();
+ if (const auto *CoroBody = dyn_cast<CoroutineBodyStmt>(Body))
+ return cast<CompoundStmt>(CoroBody->getBody());
+ return cast<CompoundStmt>(Body);
+}
+
bool LambdaExpr::isInitCapture(const LambdaCapture *C) const {
return (C->capturesVariable() && C->getCapturedVar()->isInitCapture() &&
(getCallOperator() == C->getCapturedVar()->getDeclContext()));
@@ -1247,7 +1180,7 @@ LambdaExpr::capture_iterator LambdaExpr::capture_begin() const {
}
LambdaExpr::capture_iterator LambdaExpr::capture_end() const {
- return capture_begin() + NumCaptures;
+ return capture_begin() + capture_size();
}
LambdaExpr::capture_range LambdaExpr::captures() const {
@@ -1304,19 +1237,17 @@ ArrayRef<NamedDecl *> LambdaExpr::getExplicitTemplateParameters() const {
return Record->getLambdaExplicitTemplateParameters();
}
-CompoundStmt *LambdaExpr::getBody() const {
- // FIXME: this mutation in getBody is bogus. It should be
- // initialized in ASTStmtReader::VisitLambdaExpr, but for reasons I
- // don't understand, that doesn't work.
- if (!getStoredStmts()[NumCaptures])
- *const_cast<Stmt **>(&getStoredStmts()[NumCaptures]) =
- getCallOperator()->getBody();
+bool LambdaExpr::isMutable() const { return !getCallOperator()->isConst(); }
- return static_cast<CompoundStmt *>(getStoredStmts()[NumCaptures]);
+LambdaExpr::child_range LambdaExpr::children() {
+ initBodyIfNeeded();
+ return child_range(getStoredStmts(), getStoredStmts() + capture_size() + 1);
}
-bool LambdaExpr::isMutable() const {
- return !getCallOperator()->isConst();
+LambdaExpr::const_child_range LambdaExpr::children() const {
+ initBodyIfNeeded();
+ return const_child_range(getStoredStmts(),
+ getStoredStmts() + capture_size() + 1);
}
ExprWithCleanups::ExprWithCleanups(Expr *subexpr,
@@ -1361,19 +1292,13 @@ CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *TSI,
? VK_LValue
: TSI->getType()->isRValueReferenceType() ? VK_XValue
: VK_RValue),
- OK_Ordinary,
- TSI->getType()->isDependentType() ||
- TSI->getType()->getContainedDeducedType(),
- true, true, TSI->getType()->containsUnexpandedParameterPack()),
+ OK_Ordinary),
TSI(TSI), LParenLoc(LParenLoc), RParenLoc(RParenLoc) {
CXXUnresolvedConstructExprBits.NumArgs = Args.size();
auto **StoredArgs = getTrailingObjects<Expr *>();
- for (unsigned I = 0; I != Args.size(); ++I) {
- if (Args[I]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ for (unsigned I = 0; I != Args.size(); ++I)
StoredArgs[I] = Args[I];
- }
+ setDependence(computeDependence(this));
}
CXXUnresolvedConstructExpr *CXXUnresolvedConstructExpr::Create(
@@ -1401,11 +1326,7 @@ CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(
DeclarationNameInfo MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs)
: Expr(CXXDependentScopeMemberExprClass, Ctx.DependentTy, VK_LValue,
- OK_Ordinary, true, true, true,
- ((Base && Base->containsUnexpandedParameterPack()) ||
- (QualifierLoc && QualifierLoc.getNestedNameSpecifier()
- ->containsUnexpandedParameterPack()) ||
- MemberNameInfo.containsUnexpandedParameterPack())),
+ OK_Ordinary),
Base(Base), BaseType(BaseType), QualifierLoc(QualifierLoc),
MemberNameInfo(MemberNameInfo) {
CXXDependentScopeMemberExprBits.IsArrow = IsArrow;
@@ -1416,14 +1337,10 @@ CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(
CXXDependentScopeMemberExprBits.OperatorLoc = OperatorLoc;
if (TemplateArgs) {
- bool Dependent = true;
- bool InstantiationDependent = true;
- bool ContainsUnexpandedParameterPack = false;
+ auto Deps = TemplateArgumentDependence::None;
getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc, *TemplateArgs, getTrailingObjects<TemplateArgumentLoc>(),
- Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
- if (ContainsUnexpandedParameterPack)
- ExprBits.ContainsUnexpandedParameterPack = true;
+ Deps);
} else if (TemplateKWLoc.isValid()) {
getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc);
@@ -1431,6 +1348,7 @@ CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(
if (hasFirstQualifierFoundInScope())
*getTrailingObjects<NamedDecl *>() = FirstQualifierFoundInScope;
+ setDependence(computeDependence(this));
}
CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(
@@ -1612,16 +1530,15 @@ SizeOfPackExpr *SizeOfPackExpr::CreateDeserialized(ASTContext &Context,
return new (Storage) SizeOfPackExpr(EmptyShell(), NumPartialArgs);
}
-SubstNonTypeTemplateParmPackExpr::
-SubstNonTypeTemplateParmPackExpr(QualType T,
- ExprValueKind ValueKind,
- NonTypeTemplateParmDecl *Param,
- SourceLocation NameLoc,
- const TemplateArgument &ArgPack)
- : Expr(SubstNonTypeTemplateParmPackExprClass, T, ValueKind, OK_Ordinary,
- true, true, true, true),
+SubstNonTypeTemplateParmPackExpr::SubstNonTypeTemplateParmPackExpr(
+ QualType T, ExprValueKind ValueKind, NonTypeTemplateParmDecl *Param,
+ SourceLocation NameLoc, const TemplateArgument &ArgPack)
+ : Expr(SubstNonTypeTemplateParmPackExprClass, T, ValueKind, OK_Ordinary),
Param(Param), Arguments(ArgPack.pack_begin()),
- NumArguments(ArgPack.pack_size()), NameLoc(NameLoc) {}
+ NumArguments(ArgPack.pack_size()), NameLoc(NameLoc) {
+ setDependence(ExprDependence::TypeValueInstantiation |
+ ExprDependence::UnexpandedPack);
+}
TemplateArgument SubstNonTypeTemplateParmPackExpr::getArgumentPack() const {
return TemplateArgument(llvm::makeArrayRef(Arguments, NumArguments));
@@ -1631,12 +1548,13 @@ FunctionParmPackExpr::FunctionParmPackExpr(QualType T, VarDecl *ParamPack,
SourceLocation NameLoc,
unsigned NumParams,
VarDecl *const *Params)
- : Expr(FunctionParmPackExprClass, T, VK_LValue, OK_Ordinary, true, true,
- true, true),
+ : Expr(FunctionParmPackExprClass, T, VK_LValue, OK_Ordinary),
ParamPack(ParamPack), NameLoc(NameLoc), NumParameters(NumParams) {
if (Params)
std::uninitialized_copy(Params, Params + NumParams,
getTrailingObjects<VarDecl *>());
+ setDependence(ExprDependence::TypeValueInstantiation |
+ ExprDependence::UnexpandedPack);
}
FunctionParmPackExpr *
@@ -1658,16 +1576,14 @@ MaterializeTemporaryExpr::MaterializeTemporaryExpr(
QualType T, Expr *Temporary, bool BoundToLvalueReference,
LifetimeExtendedTemporaryDecl *MTD)
: Expr(MaterializeTemporaryExprClass, T,
- BoundToLvalueReference ? VK_LValue : VK_XValue, OK_Ordinary,
- Temporary->isTypeDependent(), Temporary->isValueDependent(),
- Temporary->isInstantiationDependent(),
- Temporary->containsUnexpandedParameterPack()) {
+ BoundToLvalueReference ? VK_LValue : VK_XValue, OK_Ordinary) {
if (MTD) {
State = MTD;
MTD->ExprWithTemporary = Temporary;
return;
}
State = Temporary;
+ setDependence(computeDependence(this));
}
void MaterializeTemporaryExpr::setExtendingDecl(ValueDecl *ExtendedBy,
@@ -1689,30 +1605,23 @@ void MaterializeTemporaryExpr::setExtendingDecl(ValueDecl *ExtendedBy,
TypeTraitExpr::TypeTraitExpr(QualType T, SourceLocation Loc, TypeTrait Kind,
ArrayRef<TypeSourceInfo *> Args,
- SourceLocation RParenLoc,
- bool Value)
- : Expr(TypeTraitExprClass, T, VK_RValue, OK_Ordinary,
- /*TypeDependent=*/false,
- /*ValueDependent=*/false,
- /*InstantiationDependent=*/false,
- /*ContainsUnexpandedParameterPack=*/false),
- Loc(Loc), RParenLoc(RParenLoc) {
+ SourceLocation RParenLoc, bool Value)
+ : Expr(TypeTraitExprClass, T, VK_RValue, OK_Ordinary), Loc(Loc),
+ RParenLoc(RParenLoc) {
+ assert(Kind <= TT_Last && "invalid enum value!");
TypeTraitExprBits.Kind = Kind;
+ assert(static_cast<unsigned>(Kind) == TypeTraitExprBits.Kind &&
+ "TypeTraitExprBits.Kind overflow!");
TypeTraitExprBits.Value = Value;
TypeTraitExprBits.NumArgs = Args.size();
+ assert(Args.size() == TypeTraitExprBits.NumArgs &&
+ "TypeTraitExprBits.NumArgs overflow!");
auto **ToArgs = getTrailingObjects<TypeSourceInfo *>();
-
- for (unsigned I = 0, N = Args.size(); I != N; ++I) {
- if (Args[I]->getType()->isDependentType())
- setValueDependent(true);
- if (Args[I]->getType()->isInstantiationDependentType())
- setInstantiationDependent(true);
- if (Args[I]->getType()->containsUnexpandedParameterPack())
- setContainsUnexpandedParameterPack(true);
-
+ for (unsigned I = 0, N = Args.size(); I != N; ++I)
ToArgs[I] = Args[I];
- }
+
+ setDependence(computeDependence(this));
}
TypeTraitExpr *TypeTraitExpr::Create(const ASTContext &C, QualType T,
diff --git a/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp b/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp
index d201af31f521..31aa734ffedb 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp
@@ -124,6 +124,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::ObjCPropertyRefExprClass:
// C++ [expr.typeid]p1: The result of a typeid expression is an lvalue of...
case Expr::CXXTypeidExprClass:
+ case Expr::CXXUuidofExprClass:
// Unresolved lookups and uncorrected typos get classified as lvalues.
// FIXME: Is this wise? Should they get their own kind?
case Expr::UnresolvedLookupExprClass:
@@ -139,6 +140,8 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::MSPropertyRefExprClass:
case Expr::MSPropertySubscriptExprClass:
case Expr::OMPArraySectionExprClass:
+ case Expr::OMPArrayShapingExprClass:
+ case Expr::OMPIteratorExprClass:
return Cl::CL_LValue;
// C99 6.5.2.5p5 says that compound literals are lvalues.
@@ -220,6 +223,10 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
}
return Cl::CL_LValue;
+ // Subscripting matrix types behaves like member accesses.
+ case Expr::MatrixSubscriptExprClass:
+ return ClassifyInternal(Ctx, cast<MatrixSubscriptExpr>(E)->getBase());
+
// C++ [expr.prim.general]p3: The result is an lvalue if the entity is a
// function or variable and a prvalue otherwise.
case Expr::DeclRefExprClass:
@@ -268,6 +275,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
return Cl::CL_PRValue;
}
+ case Expr::RecoveryExprClass:
case Expr::OpaqueValueExprClass:
return ClassifyExprValueKind(Lang, E, E->getValueKind());
@@ -348,6 +356,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::CXXDynamicCastExprClass:
case Expr::CXXReinterpretCastExprClass:
case Expr::CXXConstCastExprClass:
+ case Expr::CXXAddrspaceCastExprClass:
case Expr::ObjCBridgedCastExprClass:
case Expr::BuiltinBitCastExprClass:
// Only in C++ can casts be interesting at all.
@@ -402,9 +411,6 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
return Cl::CL_PRValue;
}
- case Expr::CXXUuidofExprClass:
- return Cl::CL_LValue;
-
case Expr::PackExpansionExprClass:
return ClassifyInternal(Ctx, cast<PackExpansionExpr>(E)->getPattern());
@@ -452,6 +458,7 @@ static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) {
islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) ||
isa<IndirectFieldDecl>(D) ||
isa<BindingDecl>(D) ||
+ isa<MSGuidDecl>(D) ||
(Ctx.getLangOpts().CPlusPlus &&
(isa<FunctionDecl>(D) || isa<MSPropertyDecl>(D) ||
isa<FunctionTemplateDecl>(D)));
diff --git a/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp b/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp
index b5a3686dc99a..d00d8329095c 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp
@@ -11,11 +11,13 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ExprConcepts.h"
-#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTConcept.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ComputeDependence.h"
#include "clang/AST/Decl.h"
-#include "clang/AST/DeclarationName.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/TemplateBase.h"
@@ -23,47 +25,33 @@
#include "clang/Basic/SourceLocation.h"
#include "llvm/Support/TrailingObjects.h"
#include <algorithm>
-#include <utility>
#include <string>
+#include <utility>
using namespace clang;
-ConceptSpecializationExpr::ConceptSpecializationExpr(const ASTContext &C,
- NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
- DeclarationNameInfo ConceptNameInfo, NamedDecl *FoundDecl,
- ConceptDecl *NamedConcept, const ASTTemplateArgumentListInfo *ArgsAsWritten,
+ConceptSpecializationExpr::ConceptSpecializationExpr(
+ const ASTContext &C, NestedNameSpecifierLoc NNS,
+ SourceLocation TemplateKWLoc, DeclarationNameInfo ConceptNameInfo,
+ NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
+ const ASTTemplateArgumentListInfo *ArgsAsWritten,
ArrayRef<TemplateArgument> ConvertedArgs,
const ConstraintSatisfaction *Satisfaction)
- : Expr(ConceptSpecializationExprClass, C.BoolTy, VK_RValue, OK_Ordinary,
- /*TypeDependent=*/false,
- // All the flags below are set in setTemplateArguments.
- /*ValueDependent=*/!Satisfaction, /*InstantiationDependent=*/false,
- /*ContainsUnexpandedParameterPacks=*/false),
+ : Expr(ConceptSpecializationExprClass, C.BoolTy, VK_RValue, OK_Ordinary),
ConceptReference(NNS, TemplateKWLoc, ConceptNameInfo, FoundDecl,
NamedConcept, ArgsAsWritten),
NumTemplateArgs(ConvertedArgs.size()),
- Satisfaction(Satisfaction ?
- ASTConstraintSatisfaction::Create(C, *Satisfaction) :
- nullptr) {
+ Satisfaction(Satisfaction
+ ? ASTConstraintSatisfaction::Create(C, *Satisfaction)
+ : nullptr) {
setTemplateArguments(ConvertedArgs);
- bool IsInstantiationDependent = false;
- bool ContainsUnexpandedParameterPack = false;
- for (const TemplateArgumentLoc& ArgLoc : ArgsAsWritten->arguments()) {
- if (ArgLoc.getArgument().isInstantiationDependent())
- IsInstantiationDependent = true;
- if (ArgLoc.getArgument().containsUnexpandedParameterPack())
- ContainsUnexpandedParameterPack = true;
- if (ContainsUnexpandedParameterPack && IsInstantiationDependent)
- break;
- }
+ setDependence(computeDependence(this, /*ValueDependent=*/!Satisfaction));
// Currently guaranteed by the fact concepts can only be at namespace-scope.
assert(!NestedNameSpec ||
(!NestedNameSpec.getNestedNameSpecifier()->isInstantiationDependent() &&
!NestedNameSpec.getNestedNameSpecifier()
->containsUnexpandedParameterPack()));
- setInstantiationDependent(IsInstantiationDependent);
- setContainsUnexpandedParameterPack(ContainsUnexpandedParameterPack);
assert((!isValueDependent() || isInstantiationDependent()) &&
"should not be value-dependent");
}
@@ -103,18 +91,23 @@ ConceptSpecializationExpr::ConceptSpecializationExpr(
ArrayRef<TemplateArgument> ConvertedArgs,
const ConstraintSatisfaction *Satisfaction, bool Dependent,
bool ContainsUnexpandedParameterPack)
- : Expr(ConceptSpecializationExprClass, C.BoolTy, VK_RValue, OK_Ordinary,
- /*TypeDependent=*/false,
- /*ValueDependent=*/!Satisfaction, Dependent,
- ContainsUnexpandedParameterPack),
+ : Expr(ConceptSpecializationExprClass, C.BoolTy, VK_RValue, OK_Ordinary),
ConceptReference(NestedNameSpecifierLoc(), SourceLocation(),
- DeclarationNameInfo(), NamedConcept,
- NamedConcept, nullptr),
+ DeclarationNameInfo(), NamedConcept, NamedConcept,
+ nullptr),
NumTemplateArgs(ConvertedArgs.size()),
- Satisfaction(Satisfaction ?
- ASTConstraintSatisfaction::Create(C, *Satisfaction) :
- nullptr) {
+ Satisfaction(Satisfaction
+ ? ASTConstraintSatisfaction::Create(C, *Satisfaction)
+ : nullptr) {
setTemplateArguments(ConvertedArgs);
+ ExprDependence D = ExprDependence::None;
+ if (!Satisfaction)
+ D |= ExprDependence::Value;
+ if (Dependent)
+ D |= ExprDependence::Instantiation;
+ if (ContainsUnexpandedParameterPack)
+ D |= ExprDependence::UnexpandedPack;
+ setDependence(D);
}
ConceptSpecializationExpr *
@@ -153,11 +146,9 @@ RequiresExpr::RequiresExpr(ASTContext &C, SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation RBraceLoc)
- : Expr(RequiresExprClass, C.BoolTy, VK_RValue, OK_Ordinary,
- /*TD=*/false, /*VD=*/false, /*ID=*/false,
- /*ContainsUnexpandedParameterPack=*/false),
- NumLocalParameters(LocalParameters.size()),
- NumRequirements(Requirements.size()), Body(Body), RBraceLoc(RBraceLoc) {
+ : Expr(RequiresExprClass, C.BoolTy, VK_RValue, OK_Ordinary),
+ NumLocalParameters(LocalParameters.size()),
+ NumRequirements(Requirements.size()), Body(Body), RBraceLoc(RBraceLoc) {
RequiresExprBits.IsSatisfied = false;
RequiresExprBits.RequiresKWLoc = RequiresKWLoc;
bool Dependent = false;
@@ -182,9 +173,15 @@ RequiresExpr::RequiresExpr(ASTContext &C, SourceLocation RequiresKWLoc,
std::copy(Requirements.begin(), Requirements.end(),
getTrailingObjects<concepts::Requirement *>());
RequiresExprBits.IsSatisfied |= Dependent;
- setValueDependent(Dependent);
- setInstantiationDependent(Dependent);
- setContainsUnexpandedParameterPack(ContainsUnexpandedParameterPack);
+ // FIXME: move the computing dependency logic to ComputeDependence.h
+ if (ContainsUnexpandedParameterPack)
+ setDependence(getDependence() | ExprDependence::UnexpandedPack);
+ // FIXME: this is incorrect for cases where we have a non-dependent
+ // requirement, but its parameters are instantiation-dependent. RequiresExpr
+ // should be instantiation-dependent if it has instantiation-dependent
+ // parameters.
+ if (Dependent)
+ setDependence(getDependence() | ExprDependence::ValueInstantiation);
}
RequiresExpr::RequiresExpr(ASTContext &C, EmptyShell Empty,
diff --git a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
index afa4ae5d1374..d20c2382b6ac 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
@@ -54,6 +54,7 @@
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
#include <cstring>
@@ -674,6 +675,7 @@ namespace {
None,
Bases,
AfterBases,
+ AfterFields,
Destroying,
DestroyingBases
};
@@ -821,6 +823,9 @@ namespace {
void finishedConstructingBases() {
EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterBases;
}
+ void finishedConstructingFields() {
+ EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterFields;
+ }
~EvaluatingConstructorRAII() {
if (DidInsert) EI.ObjectsUnderConstruction.erase(Object);
}
@@ -1417,6 +1422,31 @@ static bool isFormalAccess(AccessKinds AK) {
return isAnyAccess(AK) && AK != AK_Construct && AK != AK_Destroy;
}
+/// Is this kind of axcess valid on an indeterminate object value?
+static bool isValidIndeterminateAccess(AccessKinds AK) {
+ switch (AK) {
+ case AK_Read:
+ case AK_Increment:
+ case AK_Decrement:
+ // These need the object's value.
+ return false;
+
+ case AK_ReadObjectRepresentation:
+ case AK_Assign:
+ case AK_Construct:
+ case AK_Destroy:
+ // Construction and destruction don't need the value.
+ return true;
+
+ case AK_MemberCall:
+ case AK_DynamicCast:
+ case AK_TypeId:
+ // These aren't really meaningful on scalars.
+ return true;
+ }
+ llvm_unreachable("unknown access kind");
+}
+
namespace {
struct ComplexValue {
private:
@@ -1865,7 +1895,8 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
if (const VarDecl *VD = dyn_cast<VarDecl>(D))
return VD->hasGlobalStorage();
// ... the address of a function,
- return isa<FunctionDecl>(D);
+ // ... the address of a GUID [MS extension],
+ return isa<FunctionDecl>(D) || isa<MSGuidDecl>(D);
}
if (B.is<TypeInfoLValue>() || B.is<DynamicAllocLValue>())
@@ -1888,7 +1919,6 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
case Expr::PredefinedExprClass:
case Expr::ObjCStringLiteralClass:
case Expr::ObjCEncodeExprClass:
- case Expr::CXXUuidofExprClass:
return true;
case Expr::ObjCBoxedExprClass:
return cast<ObjCBoxedExpr>(E)->isExpressibleAsConstantInitializer();
@@ -2005,6 +2035,17 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
APValue::LValueBase Base = LVal.getLValueBase();
const SubobjectDesignator &Designator = LVal.getLValueDesignator();
+ if (auto *VD = LVal.getLValueBase().dyn_cast<const ValueDecl *>()) {
+ if (auto *FD = dyn_cast<FunctionDecl>(VD)) {
+ if (FD->isConsteval()) {
+ Info.FFDiag(Loc, diag::note_consteval_address_accessible)
+ << !Type->isAnyPointerType();
+ Info.Note(FD->getLocation(), diag::note_declared_at);
+ return false;
+ }
+ }
+ }
+
// Check that the object is a global. Note that the fake 'this' object we
// manufacture when checking potential constant expressions is conservatively
// assumed to be global here.
@@ -2114,6 +2155,11 @@ static bool CheckMemberPointerConstantExpression(EvalInfo &Info,
const auto *FD = dyn_cast_or_null<CXXMethodDecl>(Member);
if (!FD)
return true;
+ if (FD->isConsteval()) {
+ Info.FFDiag(Loc, diag::note_consteval_address_accessible) << /*pointer*/ 0;
+ Info.Note(FD->getLocation(), diag::note_declared_at);
+ return false;
+ }
return Usage == Expr::EvaluateForMangling || FD->isVirtual() ||
!FD->hasAttr<DLLImportAttr>();
}
@@ -2533,7 +2579,7 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS,
if (SA != RHS) {
Info.CCEDiag(E, diag::note_constexpr_large_shift)
<< RHS << E->getType() << LHS.getBitWidth();
- } else if (LHS.isSigned() && !Info.getLangOpts().CPlusPlus2a) {
+ } else if (LHS.isSigned() && !Info.getLangOpts().CPlusPlus20) {
// C++11 [expr.shift]p2: A signed left shift must have a non-negative
// operand, and must not overflow the corresponding unsigned type.
// C++2a [expr.shift]p2: E1 << E2 is the unique value congruent to
@@ -2618,6 +2664,155 @@ static bool handleFloatFloatBinOp(EvalInfo &Info, const Expr *E,
return true;
}
+static bool handleLogicalOpForVector(const APInt &LHSValue,
+ BinaryOperatorKind Opcode,
+ const APInt &RHSValue, APInt &Result) {
+ bool LHS = (LHSValue != 0);
+ bool RHS = (RHSValue != 0);
+
+ if (Opcode == BO_LAnd)
+ Result = LHS && RHS;
+ else
+ Result = LHS || RHS;
+ return true;
+}
+static bool handleLogicalOpForVector(const APFloat &LHSValue,
+ BinaryOperatorKind Opcode,
+ const APFloat &RHSValue, APInt &Result) {
+ bool LHS = !LHSValue.isZero();
+ bool RHS = !RHSValue.isZero();
+
+ if (Opcode == BO_LAnd)
+ Result = LHS && RHS;
+ else
+ Result = LHS || RHS;
+ return true;
+}
+
+static bool handleLogicalOpForVector(const APValue &LHSValue,
+ BinaryOperatorKind Opcode,
+ const APValue &RHSValue, APInt &Result) {
+ // The result is always an int type, however operands match the first.
+ if (LHSValue.getKind() == APValue::Int)
+ return handleLogicalOpForVector(LHSValue.getInt(), Opcode,
+ RHSValue.getInt(), Result);
+ assert(LHSValue.getKind() == APValue::Float && "Should be no other options");
+ return handleLogicalOpForVector(LHSValue.getFloat(), Opcode,
+ RHSValue.getFloat(), Result);
+}
+
+template <typename APTy>
+static bool
+handleCompareOpForVectorHelper(const APTy &LHSValue, BinaryOperatorKind Opcode,
+ const APTy &RHSValue, APInt &Result) {
+ switch (Opcode) {
+ default:
+ llvm_unreachable("unsupported binary operator");
+ case BO_EQ:
+ Result = (LHSValue == RHSValue);
+ break;
+ case BO_NE:
+ Result = (LHSValue != RHSValue);
+ break;
+ case BO_LT:
+ Result = (LHSValue < RHSValue);
+ break;
+ case BO_GT:
+ Result = (LHSValue > RHSValue);
+ break;
+ case BO_LE:
+ Result = (LHSValue <= RHSValue);
+ break;
+ case BO_GE:
+ Result = (LHSValue >= RHSValue);
+ break;
+ }
+
+ return true;
+}
+
+static bool handleCompareOpForVector(const APValue &LHSValue,
+ BinaryOperatorKind Opcode,
+ const APValue &RHSValue, APInt &Result) {
+ // The result is always an int type, however operands match the first.
+ if (LHSValue.getKind() == APValue::Int)
+ return handleCompareOpForVectorHelper(LHSValue.getInt(), Opcode,
+ RHSValue.getInt(), Result);
+ assert(LHSValue.getKind() == APValue::Float && "Should be no other options");
+ return handleCompareOpForVectorHelper(LHSValue.getFloat(), Opcode,
+ RHSValue.getFloat(), Result);
+}
+
+// Perform binary operations for vector types, in place on the LHS.
+static bool handleVectorVectorBinOp(EvalInfo &Info, const Expr *E,
+ BinaryOperatorKind Opcode,
+ APValue &LHSValue,
+ const APValue &RHSValue) {
+ assert(Opcode != BO_PtrMemD && Opcode != BO_PtrMemI &&
+ "Operation not supported on vector types");
+
+ const auto *VT = E->getType()->castAs<VectorType>();
+ unsigned NumElements = VT->getNumElements();
+ QualType EltTy = VT->getElementType();
+
+ // In the cases (typically C as I've observed) where we aren't evaluating
+ // constexpr but are checking for cases where the LHS isn't yet evaluatable,
+ // just give up.
+ if (!LHSValue.isVector()) {
+ assert(LHSValue.isLValue() &&
+ "A vector result that isn't a vector OR uncalculated LValue");
+ Info.FFDiag(E);
+ return false;
+ }
+
+ assert(LHSValue.getVectorLength() == NumElements &&
+ RHSValue.getVectorLength() == NumElements && "Different vector sizes");
+
+ SmallVector<APValue, 4> ResultElements;
+
+ for (unsigned EltNum = 0; EltNum < NumElements; ++EltNum) {
+ APValue LHSElt = LHSValue.getVectorElt(EltNum);
+ APValue RHSElt = RHSValue.getVectorElt(EltNum);
+
+ if (EltTy->isIntegerType()) {
+ APSInt EltResult{Info.Ctx.getIntWidth(EltTy),
+ EltTy->isUnsignedIntegerType()};
+ bool Success = true;
+
+ if (BinaryOperator::isLogicalOp(Opcode))
+ Success = handleLogicalOpForVector(LHSElt, Opcode, RHSElt, EltResult);
+ else if (BinaryOperator::isComparisonOp(Opcode))
+ Success = handleCompareOpForVector(LHSElt, Opcode, RHSElt, EltResult);
+ else
+ Success = handleIntIntBinOp(Info, E, LHSElt.getInt(), Opcode,
+ RHSElt.getInt(), EltResult);
+
+ if (!Success) {
+ Info.FFDiag(E);
+ return false;
+ }
+ ResultElements.emplace_back(EltResult);
+
+ } else if (EltTy->isFloatingType()) {
+ assert(LHSElt.getKind() == APValue::Float &&
+ RHSElt.getKind() == APValue::Float &&
+ "Mismatched LHS/RHS/Result Type");
+ APFloat LHSFloat = LHSElt.getFloat();
+
+ if (!handleFloatFloatBinOp(Info, E, LHSFloat, Opcode,
+ RHSElt.getFloat())) {
+ Info.FFDiag(E);
+ return false;
+ }
+
+ ResultElements.emplace_back(LHSFloat);
+ }
+ }
+
+ LHSValue = APValue(ResultElements.data(), ResultElements.size());
+ return true;
+}
+
/// Cast an lvalue referring to a base subobject to a derived class, by
/// truncating the lvalue's path to the given length.
static bool CastToDerivedClass(EvalInfo &Info, const Expr *E, LValue &Result,
@@ -2830,7 +3025,7 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
if (Info.checkingPotentialConstantExpression())
return false;
if (!Frame || !Frame->Arguments) {
- Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ Info.FFDiag(E, diag::note_constexpr_function_param_value_unknown) << VD;
return false;
}
Result = &Frame->Arguments[PVD->getFunctionScopeIndex()];
@@ -2861,12 +3056,34 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
}
// Dig out the initializer, and use the declaration which it's attached to.
+ // FIXME: We should eventually check whether the variable has a reachable
+ // initializing declaration.
const Expr *Init = VD->getAnyInitializer(VD);
- if (!Init || Init->isValueDependent()) {
- // If we're checking a potential constant expression, the variable could be
- // initialized later.
- if (!Info.checkingPotentialConstantExpression())
- Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ if (!Init) {
+ // Don't diagnose during potential constant expression checking; an
+ // initializer might be added later.
+ if (!Info.checkingPotentialConstantExpression()) {
+ Info.FFDiag(E, diag::note_constexpr_var_init_unknown, 1)
+ << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ }
+ return false;
+ }
+
+ if (Init->isValueDependent()) {
+ // The DeclRefExpr is not value-dependent, but the variable it refers to
+ // has a value-dependent initializer. This should only happen in
+ // constant-folding cases, where the variable is not actually of a suitable
+ // type for use in a constant expression (otherwise the DeclRefExpr would
+ // have been value-dependent too), so diagnose that.
+ assert(!VD->mightBeUsableInConstantExpressions(Info.Ctx));
+ if (!Info.checkingPotentialConstantExpression()) {
+ Info.FFDiag(E, Info.getLangOpts().CPlusPlus11
+ ? diag::note_constexpr_ltor_non_constexpr
+ : diag::note_constexpr_ltor_non_integral, 1)
+ << VD << VD->getType();
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ }
return false;
}
@@ -2877,13 +3094,6 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
return true;
}
- // Never evaluate the initializer of a weak variable. We can't be sure that
- // this is the definition which will be used.
- if (VD->isWeak()) {
- Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
- return false;
- }
-
// Check that we can fold the initializer. In C++, we will have already done
// this in the cases where it matters for conformance.
SmallVector<PartialDiagnosticAt, 8> Notes;
@@ -2893,13 +3103,24 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
Info.Note(VD->getLocation(), diag::note_declared_at);
Info.addNotes(Notes);
return false;
- } else if (!VD->checkInitIsICE()) {
+ }
+
+ // Check that the variable is actually usable in constant expressions.
+ if (!VD->checkInitIsICE()) {
Info.CCEDiag(E, diag::note_constexpr_var_init_non_constant,
Notes.size() + 1) << VD;
Info.Note(VD->getLocation(), diag::note_declared_at);
Info.addNotes(Notes);
}
+ // Never use the initializer of a weak variable, not even for constant
+ // folding. We can't be sure that this is the definition that will be used.
+ if (VD->isWeak()) {
+ Info.FFDiag(E, diag::note_constexpr_var_init_weak) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ return false;
+ }
+
Result = VD->getEvaluatedValue();
return true;
}
@@ -3006,15 +3227,22 @@ static void expandArray(APValue &Array, unsigned Index) {
/// is trivial. Note that this is never true for a union type with fields
/// (because the copy always "reads" the active member) and always true for
/// a non-class type.
+static bool isReadByLvalueToRvalueConversion(const CXXRecordDecl *RD);
static bool isReadByLvalueToRvalueConversion(QualType T) {
CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
- if (!RD || (RD->isUnion() && !RD->field_empty()))
- return true;
+ return !RD || isReadByLvalueToRvalueConversion(RD);
+}
+static bool isReadByLvalueToRvalueConversion(const CXXRecordDecl *RD) {
+ // FIXME: A trivial copy of a union copies the object representation, even if
+ // the union is empty.
+ if (RD->isUnion())
+ return !RD->field_empty();
if (RD->isEmpty())
return false;
for (auto *Field : RD->fields())
- if (isReadByLvalueToRvalueConversion(Field->getType()))
+ if (!Field->isUnnamedBitfield() &&
+ isReadByLvalueToRvalueConversion(Field->getType()))
return true;
for (auto &BaseSpec : RD->bases())
@@ -3124,6 +3352,13 @@ struct CompleteObject {
: Base(Base), Value(Value), Type(Type) {}
bool mayAccessMutableMembers(EvalInfo &Info, AccessKinds AK) const {
+ // If this isn't a "real" access (eg, if it's just accessing the type
+ // info), allow it. We assume the type doesn't change dynamically for
+ // subobjects of constexpr objects (even though we'd hit UB here if it
+ // did). FIXME: Is this right?
+ if (!isAnyAccess(AK))
+ return true;
+
// In C++14 onwards, it is permitted to read a mutable member whose
// lifetime began within the evaluation.
// FIXME: Should we also allow this in C++11?
@@ -3178,9 +3413,8 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
for (unsigned I = 0, N = Sub.Entries.size(); /**/; ++I) {
// Reading an indeterminate value is undefined, but assigning over one is OK.
if ((O->isAbsent() && !(handler.AccessKind == AK_Construct && I == N)) ||
- (O->isIndeterminate() && handler.AccessKind != AK_Construct &&
- handler.AccessKind != AK_Assign &&
- handler.AccessKind != AK_ReadObjectRepresentation)) {
+ (O->isIndeterminate() &&
+ !isValidIndeterminateAccess(handler.AccessKind))) {
if (!Info.checkingPotentialConstantExpression())
Info.FFDiag(E, diag::note_constexpr_access_uninit)
<< handler.AccessKind << O->isIndeterminate();
@@ -3548,7 +3782,30 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
APValue *BaseVal = nullptr;
QualType BaseType = getType(LVal.Base);
- if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl*>()) {
+ if (const ConstantExpr *CE =
+ dyn_cast_or_null<ConstantExpr>(LVal.Base.dyn_cast<const Expr *>())) {
+ /// Nested immediate invocation have been previously removed so if we found
+ /// a ConstantExpr it can only be the EvaluatingDecl.
+ assert(CE->isImmediateInvocation() && CE == Info.EvaluatingDecl);
+ (void)CE;
+ BaseVal = Info.EvaluatingDeclValue;
+ } else if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl *>()) {
+ // Allow reading from a GUID declaration.
+ if (auto *GD = dyn_cast<MSGuidDecl>(D)) {
+ if (isModification(AK)) {
+ // All the remaining cases do not permit modification of the object.
+ Info.FFDiag(E, diag::note_constexpr_modify_global);
+ return CompleteObject();
+ }
+ APValue &V = GD->getAsAPValue();
+ if (V.isAbsent()) {
+ Info.FFDiag(E, diag::note_constexpr_unsupported_layout)
+ << GD->getType();
+ return CompleteObject();
+ }
+ return CompleteObject(LVal.Base, &V, GD->getType());
+ }
+
// In C++98, const, non-volatile integers initialized with ICEs are ICEs.
// In C++11, constexpr, non-volatile variables initialized with constant
// expressions are constant expressions too. Inside constexpr functions,
@@ -3566,6 +3823,11 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
return CompleteObject();
}
+ // In OpenCL if a variable is in constant address space it is a const value.
+ bool IsConstant = BaseType.isConstQualified() ||
+ (Info.getLangOpts().OpenCL &&
+ BaseType.getAddressSpace() == LangAS::opencl_constant);
+
// Unless we're looking at a local variable or argument in a constexpr call,
// the variable we're reading must be const.
if (!Frame) {
@@ -3583,9 +3845,7 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
} else if (BaseType->isIntegralOrEnumerationType()) {
// In OpenCL if a variable is in constant address space it is a const
// value.
- if (!(BaseType.isConstQualified() ||
- (Info.getLangOpts().OpenCL &&
- BaseType.getAddressSpace() == LangAS::opencl_constant))) {
+ if (!IsConstant) {
if (!IsAccess)
return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
if (Info.getLangOpts().CPlusPlus) {
@@ -3598,27 +3858,29 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
}
} else if (!IsAccess) {
return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
- } else if (BaseType->isFloatingType() && BaseType.isConstQualified()) {
- // We support folding of const floating-point types, in order to make
- // static const data members of such types (supported as an extension)
- // more useful.
- if (Info.getLangOpts().CPlusPlus11) {
- Info.CCEDiag(E, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
+ } else if (IsConstant && Info.checkingPotentialConstantExpression() &&
+ BaseType->isLiteralType(Info.Ctx) && !VD->hasDefinition()) {
+ // This variable might end up being constexpr. Don't diagnose it yet.
+ } else if (IsConstant) {
+ // Keep evaluating to see what we can do. In particular, we support
+ // folding of const floating-point types, in order to make static const
+ // data members of such types (supported as an extension) more useful.
+ if (Info.getLangOpts().CPlusPlus) {
+ Info.CCEDiag(E, Info.getLangOpts().CPlusPlus11
+ ? diag::note_constexpr_ltor_non_constexpr
+ : diag::note_constexpr_ltor_non_integral, 1)
+ << VD << BaseType;
Info.Note(VD->getLocation(), diag::note_declared_at);
} else {
Info.CCEDiag(E);
}
- } else if (BaseType.isConstQualified() && VD->hasDefinition(Info.Ctx)) {
- Info.CCEDiag(E, diag::note_constexpr_ltor_non_constexpr) << VD;
- // Keep evaluating to see what we can do.
} else {
- // FIXME: Allow folding of values of any literal type in all languages.
- if (Info.checkingPotentialConstantExpression() &&
- VD->getType().isConstQualified() && !VD->hasDefinition(Info.Ctx)) {
- // The definition of this variable could be constexpr. We can't
- // access it right now, but may be able to in future.
- } else if (Info.getLangOpts().CPlusPlus11) {
- Info.FFDiag(E, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
+ // Never allow reading a non-const value.
+ if (Info.getLangOpts().CPlusPlus) {
+ Info.FFDiag(E, Info.getLangOpts().CPlusPlus11
+ ? diag::note_constexpr_ltor_non_constexpr
+ : diag::note_constexpr_ltor_non_integral, 1)
+ << VD << BaseType;
Info.Note(VD->getLocation(), diag::note_declared_at);
} else {
Info.FFDiag(E);
@@ -3828,12 +4090,26 @@ struct CompoundAssignSubobjectHandler {
return false;
case APValue::LValue:
return foundPointer(Subobj, SubobjType);
+ case APValue::Vector:
+ return foundVector(Subobj, SubobjType);
default:
// FIXME: can this happen?
Info.FFDiag(E);
return false;
}
}
+
+ bool foundVector(APValue &Value, QualType SubobjType) {
+ if (!checkConst(SubobjType))
+ return false;
+
+ if (!SubobjType->isVectorType()) {
+ Info.FFDiag(E);
+ return false;
+ }
+ return handleVectorVectorBinOp(Info, E, Opcode, Value, RHS);
+ }
+
bool found(APSInt &Value, QualType SubobjType) {
if (!checkConst(SubobjType))
return false;
@@ -4230,37 +4506,48 @@ static bool HandleBaseToDerivedCast(EvalInfo &Info, const CastExpr *E,
}
/// Get the value to use for a default-initialized object of type T.
-static APValue getDefaultInitValue(QualType T) {
+/// Return false if it encounters something invalid.
+static bool getDefaultInitValue(QualType T, APValue &Result) {
+ bool Success = true;
if (auto *RD = T->getAsCXXRecordDecl()) {
- if (RD->isUnion())
- return APValue((const FieldDecl*)nullptr);
-
- APValue Struct(APValue::UninitStruct(), RD->getNumBases(),
- std::distance(RD->field_begin(), RD->field_end()));
+ if (RD->isInvalidDecl()) {
+ Result = APValue();
+ return false;
+ }
+ if (RD->isUnion()) {
+ Result = APValue((const FieldDecl *)nullptr);
+ return true;
+ }
+ Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
+ std::distance(RD->field_begin(), RD->field_end()));
unsigned Index = 0;
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- End = RD->bases_end(); I != End; ++I, ++Index)
- Struct.getStructBase(Index) = getDefaultInitValue(I->getType());
+ End = RD->bases_end();
+ I != End; ++I, ++Index)
+ Success &= getDefaultInitValue(I->getType(), Result.getStructBase(Index));
for (const auto *I : RD->fields()) {
if (I->isUnnamedBitfield())
continue;
- Struct.getStructField(I->getFieldIndex()) =
- getDefaultInitValue(I->getType());
+ Success &= getDefaultInitValue(I->getType(),
+ Result.getStructField(I->getFieldIndex()));
}
- return Struct;
+ return Success;
}
if (auto *AT =
dyn_cast_or_null<ConstantArrayType>(T->getAsArrayTypeUnsafe())) {
- APValue Array(APValue::UninitArray(), 0, AT->getSize().getZExtValue());
- if (Array.hasArrayFiller())
- Array.getArrayFiller() = getDefaultInitValue(AT->getElementType());
- return Array;
+ Result = APValue(APValue::UninitArray(), 0, AT->getSize().getZExtValue());
+ if (Result.hasArrayFiller())
+ Success &=
+ getDefaultInitValue(AT->getElementType(), Result.getArrayFiller());
+
+ return Success;
}
- return APValue::IndeterminateValue();
+ Result = APValue::IndeterminateValue();
+ return true;
}
namespace {
@@ -4290,10 +4577,8 @@ static bool EvaluateVarDecl(EvalInfo &Info, const VarDecl *VD) {
Info.CurrentCall->createTemporary(VD, VD->getType(), true, Result);
const Expr *InitE = VD->getInit();
- if (!InitE) {
- Val = getDefaultInitValue(VD->getType());
- return true;
- }
+ if (!InitE)
+ return getDefaultInitValue(VD->getType(), Val);
if (InitE->isValueDependent())
return false;
@@ -4901,7 +5186,7 @@ static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
// DR1872: An instantiated virtual constexpr function can't be called in a
// constant expression (prior to C++20). We can still constant-fold such a
// call.
- if (!Info.Ctx.getLangOpts().CPlusPlus2a && isa<CXXMethodDecl>(Declaration) &&
+ if (!Info.Ctx.getLangOpts().CPlusPlus20 && isa<CXXMethodDecl>(Declaration) &&
cast<CXXMethodDecl>(Declaration)->isVirtual())
Info.CCEDiag(CallLoc, diag::note_constexpr_virtual_call);
@@ -4910,6 +5195,13 @@ static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
return false;
}
+ if (const auto *CtorDecl = dyn_cast_or_null<CXXConstructorDecl>(Definition)) {
+ for (const auto *InitExpr : CtorDecl->inits()) {
+ if (InitExpr->getInit() && InitExpr->getInit()->containsErrors())
+ return false;
+ }
+ }
+
// Can we evaluate this function call?
if (Definition && Definition->isConstexpr() && Body)
return true;
@@ -5060,6 +5352,7 @@ static Optional<DynamicType> ComputeDynamicType(EvalInfo &Info, const Expr *E,
case ConstructionPhase::None:
case ConstructionPhase::AfterBases:
+ case ConstructionPhase::AfterFields:
case ConstructionPhase::Destroying:
// We've finished constructing the base classes and not yet started
// destroying them again, so this is the dynamic type.
@@ -5278,12 +5571,15 @@ static bool HandleDynamicCast(EvalInfo &Info, const ExplicitCastExpr *E,
namespace {
struct StartLifetimeOfUnionMemberHandler {
+ EvalInfo &Info;
+ const Expr *LHSExpr;
const FieldDecl *Field;
-
+ bool DuringInit;
+ bool Failed = false;
static const AccessKinds AccessKind = AK_Assign;
typedef bool result_type;
- bool failed() { return false; }
+ bool failed() { return Failed; }
bool found(APValue &Subobj, QualType SubobjType) {
// We are supposed to perform no initialization but begin the lifetime of
// the object. We interpret that as meaning to do what default
@@ -5294,9 +5590,22 @@ struct StartLifetimeOfUnionMemberHandler {
// * No variant members' lifetimes begin
// * All scalar subobjects whose lifetimes begin have indeterminate values
assert(SubobjType->isUnionType());
- if (!declaresSameEntity(Subobj.getUnionField(), Field) ||
- !Subobj.getUnionValue().hasValue())
- Subobj.setUnion(Field, getDefaultInitValue(Field->getType()));
+ if (declaresSameEntity(Subobj.getUnionField(), Field)) {
+ // This union member is already active. If it's also in-lifetime, there's
+ // nothing to do.
+ if (Subobj.getUnionValue().hasValue())
+ return true;
+ } else if (DuringInit) {
+ // We're currently in the process of initializing a different union
+ // member. If we carried on, that initialization would attempt to
+ // store to an inactive union member, resulting in undefined behavior.
+ Info.FFDiag(LHSExpr,
+ diag::note_constexpr_union_member_change_during_init);
+ return false;
+ }
+ APValue Result;
+ Failed = !getDefaultInitValue(Field->getType(), Result);
+ Subobj.setUnion(Field, Result);
return true;
}
bool found(APSInt &Value, QualType SubobjType) {
@@ -5399,7 +5708,10 @@ static bool HandleUnionActiveMemberChange(EvalInfo &Info, const Expr *LHSExpr,
SubobjectDesignator D = LHS.Designator;
D.truncate(Info.Ctx, LHS.Base, LengthAndField.first);
- StartLifetimeOfUnionMemberHandler StartLifetime{LengthAndField.second};
+ bool DuringInit = Info.isEvaluatingCtorDtor(LHS.Base, D.Entries) ==
+ ConstructionPhase::AfterBases;
+ StartLifetimeOfUnionMemberHandler StartLifetime{
+ Info, LHSExpr, LengthAndField.second, DuringInit};
if (!findSubobject(Info, LHSExpr, Obj, D, StartLifetime))
return false;
}
@@ -5407,22 +5719,6 @@ static bool HandleUnionActiveMemberChange(EvalInfo &Info, const Expr *LHSExpr,
return true;
}
-/// Determine if a class has any fields that might need to be copied by a
-/// trivial copy or move operation.
-static bool hasFields(const CXXRecordDecl *RD) {
- if (!RD || RD->isEmpty())
- return false;
- for (auto *FD : RD->fields()) {
- if (FD->isUnnamedBitfield())
- continue;
- return true;
- }
- for (auto &Base : RD->bases())
- if (hasFields(Base.getType()->getAsCXXRecordDecl()))
- return true;
- return false;
-}
-
namespace {
typedef SmallVector<APValue, 8> ArgVector;
}
@@ -5447,6 +5743,8 @@ static bool EvaluateArgs(ArrayRef<const Expr *> Args, ArgVector &ArgValues,
}
}
}
+ // FIXME: This is the wrong evaluation order for an assignment operator
+ // called via operator syntax.
for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
if (!Evaluate(ArgValues[Idx], Info, Args[Idx])) {
// If we're checking for a potential constant expression, evaluate all
@@ -5491,7 +5789,8 @@ static bool HandleFunctionCall(SourceLocation CallLoc,
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Callee);
if (MD && MD->isDefaulted() &&
(MD->getParent()->isUnion() ||
- (MD->isTrivial() && hasFields(MD->getParent())))) {
+ (MD->isTrivial() &&
+ isReadByLvalueToRvalueConversion(MD->getParent())))) {
assert(This &&
(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()));
LValue RHS;
@@ -5500,7 +5799,7 @@ static bool HandleFunctionCall(SourceLocation CallLoc,
if (!handleLValueToRValueConversion(Info, Args[0], Args[0]->getType(), RHS,
RHSValue, MD->getParent()->isUnion()))
return false;
- if (Info.getLangOpts().CPlusPlus2a && MD->isTrivial() &&
+ if (Info.getLangOpts().CPlusPlus20 && MD->isTrivial() &&
!HandleUnionActiveMemberChange(Info, Args[0], *This))
return false;
if (!handleAssignment(Info, Args[0], *This, MD->getThisType(),
@@ -5578,7 +5877,8 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
// actually read them.
if (Definition->isDefaulted() && Definition->isCopyOrMoveConstructor() &&
(Definition->getParent()->isUnion() ||
- (Definition->isTrivial() && hasFields(Definition->getParent())))) {
+ (Definition->isTrivial() &&
+ isReadByLvalueToRvalueConversion(Definition->getParent())))) {
LValue RHS;
RHS.setFrom(Info.Ctx, ArgValues[0]);
return handleLValueToRValueConversion(
@@ -5587,9 +5887,14 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
}
// Reserve space for the struct members.
- if (!RD->isUnion() && !Result.hasValue())
- Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
- std::distance(RD->field_begin(), RD->field_end()));
+ if (!Result.hasValue()) {
+ if (!RD->isUnion())
+ Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
+ std::distance(RD->field_begin(), RD->field_end()));
+ else
+ // A union starts with no active member.
+ Result = APValue((const FieldDecl*)nullptr);
+ }
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
@@ -5616,8 +5921,9 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
for (; !declaresSameEntity(*FieldIt, FD); ++FieldIt) {
assert(FieldIt != RD->field_end() && "missing field?");
if (!FieldIt->isUnnamedBitfield())
- Result.getStructField(FieldIt->getFieldIndex()) =
- getDefaultInitValue(FieldIt->getType());
+ Success &= getDefaultInitValue(
+ FieldIt->getType(),
+ Result.getStructField(FieldIt->getFieldIndex()));
}
++FieldIt;
};
@@ -5669,10 +5975,10 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
if (CD->isUnion())
*Value = APValue(FD);
else
- // FIXME: This immediately starts the lifetime of all members of an
- // anonymous struct. It would be preferable to strictly start member
- // lifetime in initialization order.
- *Value = getDefaultInitValue(Info.Ctx.getRecordType(CD));
+ // FIXME: This immediately starts the lifetime of all members of
+ // an anonymous struct. It would be preferable to strictly start
+ // member lifetime in initialization order.
+ Success &= getDefaultInitValue(Info.Ctx.getRecordType(CD), *Value);
}
// Store Subobject as its parent before updating it for the last element
// in the chain.
@@ -5719,11 +6025,14 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
if (!RD->isUnion()) {
for (; FieldIt != RD->field_end(); ++FieldIt) {
if (!FieldIt->isUnnamedBitfield())
- Result.getStructField(FieldIt->getFieldIndex()) =
- getDefaultInitValue(FieldIt->getType());
+ Success &= getDefaultInitValue(
+ FieldIt->getType(),
+ Result.getStructField(FieldIt->getFieldIndex()));
}
}
+ EvalObj.finishedConstructingFields();
+
return Success &&
EvaluateStmt(Ret, Info, Definition->getBody()) != ESR_Failed &&
LifetimeExtendedScope.destroy();
@@ -5964,7 +6273,7 @@ static bool HandleOperatorNewCall(EvalInfo &Info, const CallExpr *E,
// This is permitted only within a call to std::allocator<T>::allocate.
auto Caller = Info.getStdAllocatorCaller("allocate");
if (!Caller) {
- Info.FFDiag(E->getExprLoc(), Info.getLangOpts().CPlusPlus2a
+ Info.FFDiag(E->getExprLoc(), Info.getLangOpts().CPlusPlus20
? diag::note_constexpr_new_untyped
: diag::note_constexpr_new);
return false;
@@ -6697,8 +7006,13 @@ public:
return Error(E);
}
- bool VisitConstantExpr(const ConstantExpr *E)
- { return StmtVisitorTy::Visit(E->getSubExpr()); }
+ bool VisitConstantExpr(const ConstantExpr *E) {
+ if (E->hasAPValueResult())
+ return DerivedSuccess(E->getAPValueResult(), E);
+
+ return StmtVisitorTy::Visit(E->getSubExpr());
+ }
+
bool VisitParenExpr(const ParenExpr *E)
{ return StmtVisitorTy::Visit(E->getSubExpr()); }
bool VisitUnaryExtension(const UnaryOperator *E)
@@ -6741,7 +7055,7 @@ public:
return static_cast<Derived*>(this)->VisitCastExpr(E);
}
bool VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) {
- if (!Info.Ctx.getLangOpts().CPlusPlus2a)
+ if (!Info.Ctx.getLangOpts().CPlusPlus20)
CCEDiag(E, diag::note_constexpr_invalid_cast) << 1;
return static_cast<Derived*>(this)->VisitCastExpr(E);
}
@@ -6900,12 +7214,10 @@ public:
return Error(Callee);
This = &ThisVal;
} else if (const auto *PDE = dyn_cast<CXXPseudoDestructorExpr>(Callee)) {
- if (!Info.getLangOpts().CPlusPlus2a)
+ if (!Info.getLangOpts().CPlusPlus20)
Info.CCEDiag(PDE, diag::note_constexpr_pseudo_destructor);
- // FIXME: If pseudo-destructor calls ever start ending the lifetime of
- // their callee, we should start calling HandleDestruction here.
- // For now, we just evaluate the object argument and discard it.
- return EvaluateObjectArgument(Info, PDE->getBase(), ThisVal);
+ return EvaluateObjectArgument(Info, PDE->getBase(), ThisVal) &&
+ HandleDestruction(Info, PDE, ThisVal, PDE->getDestroyedType());
} else
return Error(Callee);
FD = Member;
@@ -7369,6 +7681,8 @@ public:
// from the AST (FIXME).
// * A MaterializeTemporaryExpr that has static storage duration, with no
// CallIndex, for a lifetime-extended temporary.
+// * The ConstantExpr that is currently being evaluated during evaluation of an
+// immediate invocation.
// plus an offset in bytes.
//===----------------------------------------------------------------------===//
namespace {
@@ -7448,6 +7762,8 @@ bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
return VisitVarDecl(E, VD);
if (const BindingDecl *BD = dyn_cast<BindingDecl>(E->getDecl()))
return Visit(BD->getBinding());
+ if (const MSGuidDecl *GD = dyn_cast<MSGuidDecl>(E->getDecl()))
+ return Success(GD);
return Error(E);
}
@@ -7604,7 +7920,7 @@ bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
else
TypeInfo = TypeInfoLValue(E->getExprOperand()->getType().getTypePtr());
} else {
- if (!Info.Ctx.getLangOpts().CPlusPlus2a) {
+ if (!Info.Ctx.getLangOpts().CPlusPlus20) {
Info.CCEDiag(E, diag::note_constexpr_typeid_polymorphic)
<< E->getExprOperand()->getType()
<< E->getExprOperand()->getSourceRange();
@@ -7626,7 +7942,7 @@ bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
}
bool LValueExprEvaluator::VisitCXXUuidofExpr(const CXXUuidofExpr *E) {
- return Success(E);
+ return Success(E->getGuidDecl());
}
bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) {
@@ -7740,7 +8056,7 @@ bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) {
if (!Evaluate(NewVal, this->Info, E->getRHS()))
return false;
- if (Info.getLangOpts().CPlusPlus2a &&
+ if (Info.getLangOpts().CPlusPlus20 &&
!HandleUnionActiveMemberChange(Info, E->getLHS(), Result))
return false;
@@ -8235,6 +8551,12 @@ bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
return visitNonBuiltinCallExpr(E);
}
+// Determine if T is a character type for which we guarantee that
+// sizeof(T) == 1.
+static bool isOneByteCharacterType(QualType T) {
+ return T->isCharType() || T->isChar8Type();
+}
+
bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
unsigned BuiltinOp) {
switch (BuiltinOp) {
@@ -8385,8 +8707,12 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
}
// Give up on byte-oriented matching against multibyte elements.
// FIXME: We can compare the bytes in the correct order.
- if (IsRawByte && Info.Ctx.getTypeSizeInChars(CharTy) != CharUnits::One())
+ if (IsRawByte && !isOneByteCharacterType(CharTy)) {
+ Info.FFDiag(E, diag::note_constexpr_memchr_unsupported)
+ << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'")
+ << CharTy;
return false;
+ }
// Figure out what value we're actually looking for (after converting to
// the corresponding unsigned type if necessary).
uint64_t DesiredVal;
@@ -8502,6 +8828,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
QualType T = Dest.Designator.getType(Info.Ctx);
QualType SrcT = Src.Designator.getType(Info.Ctx);
if (!Info.Ctx.hasSameUnqualifiedType(T, SrcT)) {
+ // FIXME: Consider using our bit_cast implementation to support this.
Info.FFDiag(E, diag::note_constexpr_memcpy_type_pun) << Move << SrcT << T;
return false;
}
@@ -8599,7 +8926,7 @@ static bool EvaluateArrayNewConstructExpr(EvalInfo &Info, LValue &This,
QualType AllocType);
bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
- if (!Info.getLangOpts().CPlusPlus2a)
+ if (!Info.getLangOpts().CPlusPlus20)
Info.CCEDiag(E, diag::note_constexpr_new);
// We cannot speculatively evaluate a delete expression.
@@ -8713,8 +9040,8 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
// special handling for this case when we initialize.
if (InitBound != AllocBound)
ResizedArrayILE = cast<InitListExpr>(Init);
- } else if (Init) {
- ResizedArrayCCE = cast<CXXConstructExpr>(Init);
+ } else if (Init) {
+ ResizedArrayCCE = cast<CXXConstructExpr>(Init);
}
AllocType = Info.Ctx.getConstantArrayType(AllocType, ArrayBound, nullptr,
@@ -8786,8 +9113,8 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
} else if (Init) {
if (!EvaluateInPlace(*Val, Info, Result, Init))
return false;
- } else {
- *Val = getDefaultInitValue(AllocType);
+ } else if (!getDefaultInitValue(AllocType, *Val)) {
+ return false;
}
// Array new returns a pointer to the first element, not a pointer to the
@@ -9137,6 +9464,8 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
}
}
+ EvalObj.finishedConstructingFields();
+
return Success;
}
@@ -9156,8 +9485,7 @@ bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
if (ZeroInit)
return ZeroInitialization(E, T);
- Result = getDefaultInitValue(T);
- return true;
+ return getDefaultInitValue(T, Result);
}
const FunctionDecl *Definition = nullptr;
@@ -9215,24 +9543,30 @@ bool RecordExprEvaluator::VisitCXXStdInitializerListExpr(
// Get a pointer to the first element of the array.
Array.addArray(Info, E, ArrayType);
+ auto InvalidType = [&] {
+ Info.FFDiag(E, diag::note_constexpr_unsupported_layout)
+ << E->getType();
+ return false;
+ };
+
// FIXME: Perform the checks on the field types in SemaInit.
RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
RecordDecl::field_iterator Field = Record->field_begin();
if (Field == Record->field_end())
- return Error(E);
+ return InvalidType();
// Start pointer.
if (!Field->getType()->isPointerType() ||
!Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
ArrayType->getElementType()))
- return Error(E);
+ return InvalidType();
// FIXME: What if the initializer_list type has base classes, etc?
Result = APValue(APValue::UninitStruct(), 0, 2);
Array.moveInto(Result.getStructField(0));
if (++Field == Record->field_end())
- return Error(E);
+ return InvalidType();
if (Field->getType()->isPointerType() &&
Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
@@ -9247,10 +9581,10 @@ bool RecordExprEvaluator::VisitCXXStdInitializerListExpr(
// Length.
Result.getStructField(1) = APValue(APSInt(ArrayType->getSize()));
else
- return Error(E);
+ return InvalidType();
if (++Field != Record->field_end())
- return Error(E);
+ return InvalidType();
return true;
}
@@ -9387,10 +9721,9 @@ namespace {
bool VisitCastExpr(const CastExpr* E);
bool VisitInitListExpr(const InitListExpr *E);
bool VisitUnaryImag(const UnaryOperator *E);
- // FIXME: Missing: unary -, unary ~, binary add/sub/mul/div,
- // binary comparisons, binary and/or/xor,
- // conditional operator (for GNU conditional select),
- // shufflevector, ExtVectorElementExpr
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ // FIXME: Missing: unary -, unary ~, conditional operator (for GNU
+ // conditional select), shufflevector, ExtVectorElementExpr
};
} // end anonymous namespace
@@ -9538,6 +9871,41 @@ bool VectorExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
return ZeroInitialization(E);
}
+bool VectorExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ BinaryOperatorKind Op = E->getOpcode();
+ assert(Op != BO_PtrMemD && Op != BO_PtrMemI && Op != BO_Cmp &&
+ "Operation not supported on vector types");
+
+ if (Op == BO_Comma)
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+
+ Expr *LHS = E->getLHS();
+ Expr *RHS = E->getRHS();
+
+ assert(LHS->getType()->isVectorType() && RHS->getType()->isVectorType() &&
+ "Must both be vector types");
+ // Checking JUST the types are the same would be fine, except shifts don't
+ // need to have their types be the same (since you always shift by an int).
+ assert(LHS->getType()->getAs<VectorType>()->getNumElements() ==
+ E->getType()->getAs<VectorType>()->getNumElements() &&
+ RHS->getType()->getAs<VectorType>()->getNumElements() ==
+ E->getType()->getAs<VectorType>()->getNumElements() &&
+ "All operands must be the same size.");
+
+ APValue LHSValue;
+ APValue RHSValue;
+ bool LHSOK = Evaluate(LHSValue, Info, LHS);
+ if (!LHSOK && !Info.noteFailure())
+ return false;
+ if (!Evaluate(RHSValue, Info, RHS) || !LHSOK)
+ return false;
+
+ if (!handleVectorVectorBinOp(Info, E, Op, LHSValue, RHSValue))
+ return false;
+
+ return Success(LHSValue, E);
+}
+
//===----------------------------------------------------------------------===//
// Array Evaluation
//===----------------------------------------------------------------------===//
@@ -9561,8 +9929,18 @@ namespace {
bool ZeroInitialization(const Expr *E) {
const ConstantArrayType *CAT =
Info.Ctx.getAsConstantArrayType(E->getType());
- if (!CAT)
+ if (!CAT) {
+ if (const IncompleteArrayType *IAT =
+ Info.Ctx.getAsIncompleteArrayType(E->getType())) {
+ // We can be asked to zero-initialize a flexible array member; this
+ // is represented as an ImplicitValueInitExpr of incomplete array
+ // type. In this case, the array has zero elements.
+ Result = APValue(APValue::UninitArray(), 0, 0);
+ return true;
+ }
+ // FIXME: We could handle VLAs here.
return Error(E);
+ }
Result = APValue(APValue::UninitArray(), 0,
CAT->getSize().getZExtValue());
@@ -9620,7 +9998,7 @@ static bool EvaluateArrayNewConstructExpr(EvalInfo &Info, LValue &This,
// Return true iff the given array filler may depend on the element index.
static bool MaybeElementDependentArrayFiller(const Expr *FillerExpr) {
- // For now, just whitelist non-class value-initialization and initialization
+ // For now, just allow non-class value-initialization and initialization
// lists comprised of them.
if (isa<ImplicitValueInitExpr>(FillerExpr))
return false;
@@ -9857,8 +10235,6 @@ public:
// Visitor Methods
//===--------------------------------------------------------------------===//
- bool VisitConstantExpr(const ConstantExpr *E);
-
bool VisitIntegerLiteral(const IntegerLiteral *E) {
return Success(E->getValue(), E);
}
@@ -10221,10 +10597,12 @@ EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) {
case Type::BlockPointer:
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
case Type::Pipe:
+ case Type::ExtInt:
// GCC classifies vectors as None. We follow its lead and classify all
// other types that don't fit into the regular classification the same way.
return GCCTypeClass::None;
@@ -10308,7 +10686,7 @@ static bool EvaluateBuiltinConstantP(EvalInfo &Info, const Expr *Arg) {
ArgType->isAnyComplexType() || ArgType->isPointerType() ||
ArgType->isNullPtrType()) {
APValue V;
- if (!::EvaluateAsRValue(Info, Arg, V)) {
+ if (!::EvaluateAsRValue(Info, Arg, V) || Info.EvalStatus.HasSideEffects) {
Fold.keepDiagnostics();
return false;
}
@@ -10486,9 +10864,9 @@ static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal) {
// the array at the end was flexible, or if it had 0 or 1 elements. This
// broke some common standard library extensions (PR30346), but was
// otherwise seemingly fine. It may be useful to reintroduce this behavior
- // with some sort of whitelist. OTOH, it seems that GCC is always
+ // with some sort of list. OTOH, it seems that GCC is always
// conservative with the last element in structs (if it's an array), so our
- // current behavior is more compatible than a whitelisting approach would
+ // current behavior is more compatible than an explicit list approach would
// be.
return LVal.InvalidBase &&
Designator.Entries.size() == Designator.MostDerivedPathLength &&
@@ -10638,13 +11016,6 @@ static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
return true;
}
-bool IntExprEvaluator::VisitConstantExpr(const ConstantExpr *E) {
- llvm::SaveAndRestore<bool> InConstantContext(Info.InConstantContext, true);
- if (E->getResultAPValueKind() != APValue::None)
- return Success(E->getAPValueResult(), E);
- return ExprEvaluatorBaseTy::VisitConstantExpr(E);
-}
-
bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
if (unsigned BuiltinOp = E->getBuiltinCallee())
return VisitBuiltinCallExpr(E, BuiltinOp);
@@ -10681,7 +11052,7 @@ static bool getBuiltinAlignArguments(const CallExpr *E, EvalInfo &Info,
bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
unsigned BuiltinOp) {
- switch (unsigned BuiltinOp = E->getBuiltinCallee()) {
+ switch (BuiltinOp) {
default:
return ExprEvaluatorBaseTy::VisitCallExpr(E);
@@ -10870,6 +11241,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
}
case Builtin::BI__builtin_expect:
+ case Builtin::BI__builtin_expect_with_probability:
return Visit(E->getArg(0));
case Builtin::BI__builtin_ffs:
@@ -11063,6 +11435,17 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
CharTy1, E->getArg(0)->getType()->getPointeeType()) &&
Info.Ctx.hasSameUnqualifiedType(CharTy1, CharTy2)));
+ // For memcmp, allow comparing any arrays of '[[un]signed] char' or
+ // 'char8_t', but no other types.
+ if (IsRawByte &&
+ !(isOneByteCharacterType(CharTy1) && isOneByteCharacterType(CharTy2))) {
+ // FIXME: Consider using our bit_cast implementation to support this.
+ Info.FFDiag(E, diag::note_constexpr_memcmp_unsupported)
+ << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'")
+ << CharTy1 << CharTy2;
+ return false;
+ }
+
const auto &ReadCurElems = [&](APValue &Char1, APValue &Char2) {
return handleLValueToRValueConversion(Info, E, CharTy1, String1, Char1) &&
handleLValueToRValueConversion(Info, E, CharTy2, String2, Char2) &&
@@ -11073,57 +11456,6 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
HandleLValueArrayAdjustment(Info, E, String2, CharTy2, 1);
};
- if (IsRawByte) {
- uint64_t BytesRemaining = MaxLength;
- // Pointers to const void may point to objects of incomplete type.
- if (CharTy1->isIncompleteType()) {
- Info.FFDiag(E, diag::note_constexpr_ltor_incomplete_type) << CharTy1;
- return false;
- }
- if (CharTy2->isIncompleteType()) {
- Info.FFDiag(E, diag::note_constexpr_ltor_incomplete_type) << CharTy2;
- return false;
- }
- uint64_t CharTy1Width{Info.Ctx.getTypeSize(CharTy1)};
- CharUnits CharTy1Size = Info.Ctx.toCharUnitsFromBits(CharTy1Width);
- // Give up on comparing between elements with disparate widths.
- if (CharTy1Size != Info.Ctx.getTypeSizeInChars(CharTy2))
- return false;
- uint64_t BytesPerElement = CharTy1Size.getQuantity();
- assert(BytesRemaining && "BytesRemaining should not be zero: the "
- "following loop considers at least one element");
- while (true) {
- APValue Char1, Char2;
- if (!ReadCurElems(Char1, Char2))
- return false;
- // We have compatible in-memory widths, but a possible type and
- // (for `bool`) internal representation mismatch.
- // Assuming two's complement representation, including 0 for `false` and
- // 1 for `true`, we can check an appropriate number of elements for
- // equality even if they are not byte-sized.
- APSInt Char1InMem = Char1.getInt().extOrTrunc(CharTy1Width);
- APSInt Char2InMem = Char2.getInt().extOrTrunc(CharTy1Width);
- if (Char1InMem.ne(Char2InMem)) {
- // If the elements are byte-sized, then we can produce a three-way
- // comparison result in a straightforward manner.
- if (BytesPerElement == 1u) {
- // memcmp always compares unsigned chars.
- return Success(Char1InMem.ult(Char2InMem) ? -1 : 1, E);
- }
- // The result is byte-order sensitive, and we have multibyte elements.
- // FIXME: We can compare the remaining bytes in the correct order.
- return false;
- }
- if (!AdvanceElems())
- return false;
- if (BytesRemaining <= BytesPerElement)
- break;
- BytesRemaining -= BytesPerElement;
- }
- // Enough elements are equal to account for the memcmp limit.
- return Success(0, E);
- }
-
bool StopAtNull =
(BuiltinOp != Builtin::BImemcmp && BuiltinOp != Builtin::BIbcmp &&
BuiltinOp != Builtin::BIwmemcmp &&
@@ -11141,7 +11473,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
APValue Char1, Char2;
if (!ReadCurElems(Char1, Char2))
return false;
- if (Char1.getInt() != Char2.getInt()) {
+ if (Char1.getInt().ne(Char2.getInt())) {
if (IsWide) // wmemcmp compares with wchar_t signedness.
return Success(Char1.getInt() < Char2.getInt() ? -1 : 1, E);
// memcmp always compares unsigned chars.
@@ -11198,13 +11530,6 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
}
}
- // Avoid emiting call for runtime decision on PowerPC 32-bit
- // The lock free possibilities on this platform are covered by the lines
- // above and we know in advance other cases require lock
- if (Info.Ctx.getTargetInfo().getTriple().getArch() == llvm::Triple::ppc) {
- return Success(0, E);
- }
-
return BuiltinOp == Builtin::BI__atomic_always_lock_free ?
Success(0, E) : Error(E);
}
@@ -12598,8 +12923,14 @@ bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) {
return false;
bool Overflowed;
APFixedPoint Result = Src.convert(DestFXSema, &Overflowed);
- if (Overflowed && !HandleOverflow(Info, E, Result, DestType))
- return false;
+ if (Overflowed) {
+ if (Info.checkingForUndefinedBehavior())
+ Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
+ diag::warn_fixedpoint_constant_overflow)
+ << Result.toString() << E->getType();
+ else if (!HandleOverflow(Info, E, Result, E->getType()))
+ return false;
+ }
return Success(Result, E);
}
case CK_IntegralToFixedPoint: {
@@ -12611,8 +12942,14 @@ bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) {
APFixedPoint IntResult = APFixedPoint::getFromIntValue(
Src, Info.Ctx.getFixedPointSemantics(DestType), &Overflowed);
- if (Overflowed && !HandleOverflow(Info, E, IntResult, DestType))
- return false;
+ if (Overflowed) {
+ if (Info.checkingForUndefinedBehavior())
+ Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
+ diag::warn_fixedpoint_constant_overflow)
+ << IntResult.toString() << E->getType();
+ else if (!HandleOverflow(Info, E, IntResult, E->getType()))
+ return false;
+ }
return Success(IntResult, E);
}
@@ -12625,6 +12962,9 @@ bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) {
}
bool FixedPointExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+
const Expr *LHS = E->getLHS();
const Expr *RHS = E->getRHS();
FixedPointSemantics ResultFXSema =
@@ -12637,20 +12977,45 @@ bool FixedPointExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (!EvaluateFixedPointOrInteger(RHS, RHSFX, Info))
return false;
+ bool OpOverflow = false, ConversionOverflow = false;
+ APFixedPoint Result(LHSFX.getSemantics());
switch (E->getOpcode()) {
case BO_Add: {
- bool AddOverflow, ConversionOverflow;
- APFixedPoint Result = LHSFX.add(RHSFX, &AddOverflow)
- .convert(ResultFXSema, &ConversionOverflow);
- if ((AddOverflow || ConversionOverflow) &&
- !HandleOverflow(Info, E, Result, E->getType()))
+ Result = LHSFX.add(RHSFX, &OpOverflow)
+ .convert(ResultFXSema, &ConversionOverflow);
+ break;
+ }
+ case BO_Sub: {
+ Result = LHSFX.sub(RHSFX, &OpOverflow)
+ .convert(ResultFXSema, &ConversionOverflow);
+ break;
+ }
+ case BO_Mul: {
+ Result = LHSFX.mul(RHSFX, &OpOverflow)
+ .convert(ResultFXSema, &ConversionOverflow);
+ break;
+ }
+ case BO_Div: {
+ if (RHSFX.getValue() == 0) {
+ Info.FFDiag(E, diag::note_expr_divide_by_zero);
return false;
- return Success(Result, E);
+ }
+ Result = LHSFX.div(RHSFX, &OpOverflow)
+ .convert(ResultFXSema, &ConversionOverflow);
+ break;
}
default:
return false;
}
- llvm_unreachable("Should've exited before this");
+ if (OpOverflow || ConversionOverflow) {
+ if (Info.checkingForUndefinedBehavior())
+ Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
+ diag::warn_fixedpoint_constant_overflow)
+ << Result.toString() << E->getType();
+ else if (!HandleOverflow(Info, E, Result, E->getType()))
+ return false;
+ }
+ return Success(Result, E);
}
//===----------------------------------------------------------------------===//
@@ -13502,7 +13867,7 @@ bool VoidExprEvaluator::VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
// This is the only case where we need to produce an extension warning:
// the only other way we can succeed is if we find a dynamic allocation,
// and we will have warned when we allocated it in that case.
- if (!Info.getLangOpts().CPlusPlus2a)
+ if (!Info.getLangOpts().CPlusPlus20)
Info.CCEDiag(E, diag::note_constexpr_new);
return true;
}
@@ -13855,7 +14220,7 @@ bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx,
}
bool Expr::EvaluateAsConstantExpr(EvalResult &Result, ConstExprUsage Usage,
- const ASTContext &Ctx) const {
+ const ASTContext &Ctx, bool InPlace) const {
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
@@ -13863,7 +14228,14 @@ bool Expr::EvaluateAsConstantExpr(EvalResult &Result, ConstExprUsage Usage,
EvalInfo Info(Ctx, Result, EM);
Info.InConstantContext = true;
- if (!::Evaluate(Result.Val, Info, this) || Result.HasSideEffects)
+ if (InPlace) {
+ Info.setEvaluatingDecl(this, Result.Val);
+ LValue LVal;
+ LVal.set(this);
+ if (!::EvaluateInPlace(Result.Val, Info, LVal, this) ||
+ Result.HasSideEffects)
+ return false;
+ } else if (!::Evaluate(Result.Val, Info, this) || Result.HasSideEffects)
return false;
if (!Info.discardCleanups())
@@ -13906,18 +14278,6 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
LValue LVal;
LVal.set(VD);
- // C++11 [basic.start.init]p2:
- // Variables with static storage duration or thread storage duration shall
- // be zero-initialized before any other initialization takes place.
- // This behavior is not present in C.
- if (Ctx.getLangOpts().CPlusPlus && !VD->hasLocalStorage() &&
- !DeclTy->isReferenceType()) {
- ImplicitValueInitExpr VIE(DeclTy);
- if (!EvaluateInPlace(Value, Info, LVal, &VIE,
- /*AllowNonLiteralTypes=*/true))
- return false;
- }
-
if (!EvaluateInPlace(Value, Info, LVal, this,
/*AllowNonLiteralTypes=*/true) ||
EStatus.HasSideEffects)
@@ -13936,14 +14296,17 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
bool VarDecl::evaluateDestruction(
SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
- assert(getEvaluatedValue() && !getEvaluatedValue()->isAbsent() &&
- "cannot evaluate destruction of non-constant-initialized variable");
-
Expr::EvalStatus EStatus;
EStatus.Diag = &Notes;
- // Make a copy of the value for the destructor to mutate.
- APValue DestroyedValue = *getEvaluatedValue();
+ // Make a copy of the value for the destructor to mutate, if we know it.
+ // Otherwise, treat the value as default-initialized; if the destructor works
+ // anyway, then the destruction is constant (and must be essentially empty).
+ APValue DestroyedValue;
+ if (getEvaluatedValue() && !getEvaluatedValue()->isAbsent())
+ DestroyedValue = *getEvaluatedValue();
+ else if (!getDefaultInitValue(getType(), DestroyedValue))
+ return false;
EvalInfo Info(getASTContext(), EStatus, EvalInfo::EM_ConstantExpression);
Info.setEvaluatingDecl(this, DestroyedValue,
@@ -13956,8 +14319,6 @@ bool VarDecl::evaluateDestruction(
LValue LVal;
LVal.set(this);
- // FIXME: Consider storing whether this variable has constant destruction in
- // the EvaluatedStmt so that CodeGen can query it.
if (!HandleDestruction(Info, DeclLoc, LVal.Base, DestroyedValue, DeclTy) ||
EStatus.HasSideEffects)
return false;
@@ -14105,7 +14466,10 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::ImaginaryLiteralClass:
case Expr::StringLiteralClass:
case Expr::ArraySubscriptExprClass:
+ case Expr::MatrixSubscriptExprClass:
case Expr::OMPArraySectionExprClass:
+ case Expr::OMPArrayShapingExprClass:
+ case Expr::OMPIteratorExprClass:
case Expr::MemberExprClass:
case Expr::CompoundAssignOperatorClass:
case Expr::CompoundLiteralExprClass:
@@ -14122,6 +14486,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::StmtExprClass:
case Expr::CXXMemberCallExprClass:
case Expr::CUDAKernelCallExprClass:
+ case Expr::CXXAddrspaceCastExprClass:
case Expr::CXXDynamicCastExprClass:
case Expr::CXXTypeidExprClass:
case Expr::CXXUuidofExprClass:
@@ -14136,6 +14501,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::CXXPseudoDestructorExprClass:
case Expr::UnresolvedLookupExprClass:
case Expr::TypoExprClass:
+ case Expr::RecoveryExprClass:
case Expr::DependentScopeDeclRefExprClass:
case Expr::CXXConstructExprClass:
case Expr::CXXInheritedCtorInitExprClass:
@@ -14660,6 +15026,15 @@ bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
if (FD->isDependentContext())
return true;
+ // Bail out if a constexpr constructor has an initializer that contains an
+ // error. We deliberately don't produce a diagnostic, as we have produced a
+ // relevant diagnostic when parsing the error initializer.
+ if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(FD)) {
+ for (const auto *InitExpr : Ctor->inits()) {
+ if (InitExpr->getInit() && InitExpr->getInit()->containsErrors())
+ return false;
+ }
+ }
Expr::EvalStatus Status;
Status.Diag = &Diags;
diff --git a/contrib/llvm-project/clang/lib/AST/ExprObjC.cpp b/contrib/llvm-project/clang/lib/AST/ExprObjC.cpp
index 53d0e873f8c9..662bc325f12c 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprObjC.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprObjC.cpp
@@ -12,6 +12,8 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ComputeDependence.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/SelectorLocationsKind.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
@@ -25,20 +27,13 @@ using namespace clang;
ObjCArrayLiteral::ObjCArrayLiteral(ArrayRef<Expr *> Elements, QualType T,
ObjCMethodDecl *Method, SourceRange SR)
- : Expr(ObjCArrayLiteralClass, T, VK_RValue, OK_Ordinary, false, false,
- false, false),
+ : Expr(ObjCArrayLiteralClass, T, VK_RValue, OK_Ordinary),
NumElements(Elements.size()), Range(SR), ArrayWithObjectsMethod(Method) {
Expr **SaveElements = getElements();
- for (unsigned I = 0, N = Elements.size(); I != N; ++I) {
- if (Elements[I]->isTypeDependent() || Elements[I]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (Elements[I]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (Elements[I]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ for (unsigned I = 0, N = Elements.size(); I != N; ++I)
SaveElements[I] = Elements[I];
- }
+
+ setDependence(computeDependence(this));
}
ObjCArrayLiteral *ObjCArrayLiteral::Create(const ASTContext &C,
@@ -59,25 +54,13 @@ ObjCDictionaryLiteral::ObjCDictionaryLiteral(ArrayRef<ObjCDictionaryElement> VK,
bool HasPackExpansions, QualType T,
ObjCMethodDecl *method,
SourceRange SR)
- : Expr(ObjCDictionaryLiteralClass, T, VK_RValue, OK_Ordinary, false, false,
- false, false),
+ : Expr(ObjCDictionaryLiteralClass, T, VK_RValue, OK_Ordinary),
NumElements(VK.size()), HasPackExpansions(HasPackExpansions), Range(SR),
DictWithObjectsMethod(method) {
KeyValuePair *KeyValues = getTrailingObjects<KeyValuePair>();
ExpansionData *Expansions =
HasPackExpansions ? getTrailingObjects<ExpansionData>() : nullptr;
for (unsigned I = 0; I < NumElements; I++) {
- if (VK[I].Key->isTypeDependent() || VK[I].Key->isValueDependent() ||
- VK[I].Value->isTypeDependent() || VK[I].Value->isValueDependent())
- ExprBits.ValueDependent = true;
- if (VK[I].Key->isInstantiationDependent() ||
- VK[I].Value->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (VK[I].EllipsisLoc.isInvalid() &&
- (VK[I].Key->containsUnexpandedParameterPack() ||
- VK[I].Value->containsUnexpandedParameterPack()))
- ExprBits.ContainsUnexpandedParameterPack = true;
-
KeyValues[I].Key = VK[I].Key;
KeyValues[I].Value = VK[I].Value;
if (Expansions) {
@@ -88,6 +71,7 @@ ObjCDictionaryLiteral::ObjCDictionaryLiteral(ArrayRef<ObjCDictionaryElement> VK,
Expansions[I].NumExpansionsPlusOne = 0;
}
}
+ setDependence(computeDependence(this));
}
ObjCDictionaryLiteral *
@@ -127,10 +111,7 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
SelectorLocationsKind SelLocsK,
ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
SourceLocation RBracLoc, bool isImplicit)
- : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary,
- /*TypeDependent=*/false, /*ValueDependent=*/false,
- /*InstantiationDependent=*/false,
- /*ContainsUnexpandedParameterPack=*/false),
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary),
SelectorOrMethod(
reinterpret_cast<uintptr_t>(Method ? Method : Sel.getAsOpaquePtr())),
Kind(IsInstanceSuper ? SuperInstance : SuperClass),
@@ -139,6 +120,7 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
RBracLoc(RBracLoc) {
initArgsAndSelLocs(Args, SelLocs, SelLocsK);
setReceiverPointer(SuperType.getAsOpaquePtr());
+ setDependence(computeDependence(this));
}
ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
@@ -148,15 +130,14 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
SelectorLocationsKind SelLocsK,
ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
SourceLocation RBracLoc, bool isImplicit)
- : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, T->isDependentType(),
- T->isDependentType(), T->isInstantiationDependentType(),
- T->containsUnexpandedParameterPack()),
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary),
SelectorOrMethod(
reinterpret_cast<uintptr_t>(Method ? Method : Sel.getAsOpaquePtr())),
Kind(Class), HasMethod(Method != nullptr), IsDelegateInitCall(false),
IsImplicit(isImplicit), LBracLoc(LBracLoc), RBracLoc(RBracLoc) {
initArgsAndSelLocs(Args, SelLocs, SelLocsK);
setReceiverPointer(Receiver);
+ setDependence(computeDependence(this));
}
ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
@@ -165,16 +146,14 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
SelectorLocationsKind SelLocsK,
ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
SourceLocation RBracLoc, bool isImplicit)
- : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary,
- Receiver->isTypeDependent(), Receiver->isTypeDependent(),
- Receiver->isInstantiationDependent(),
- Receiver->containsUnexpandedParameterPack()),
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary),
SelectorOrMethod(
reinterpret_cast<uintptr_t>(Method ? Method : Sel.getAsOpaquePtr())),
Kind(Instance), HasMethod(Method != nullptr), IsDelegateInitCall(false),
IsImplicit(isImplicit), LBracLoc(LBracLoc), RBracLoc(RBracLoc) {
initArgsAndSelLocs(Args, SelLocs, SelLocsK);
setReceiverPointer(Receiver);
+ setDependence(computeDependence(this));
}
void ObjCMessageExpr::initArgsAndSelLocs(ArrayRef<Expr *> Args,
@@ -182,18 +161,8 @@ void ObjCMessageExpr::initArgsAndSelLocs(ArrayRef<Expr *> Args,
SelectorLocationsKind SelLocsK) {
setNumArgs(Args.size());
Expr **MyArgs = getArgs();
- for (unsigned I = 0; I != Args.size(); ++I) {
- if (Args[I]->isTypeDependent())
- ExprBits.TypeDependent = true;
- if (Args[I]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (Args[I]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (Args[I]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ for (unsigned I = 0; I != Args.size(); ++I)
MyArgs[I] = Args[I];
- }
SelLocsKind = SelLocsK;
if (!isImplicit()) {
diff --git a/contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp b/contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp
index 837be5527fce..257833182621 100644
--- a/contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp
@@ -15,9 +15,11 @@
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclarationName.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/Module.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/None.h"
#include "llvm/Support/ErrorHandling.h"
#include <cstdint>
@@ -28,7 +30,7 @@ char ExternalASTSource::ID;
ExternalASTSource::~ExternalASTSource() = default;
-llvm::Optional<ExternalASTSource::ASTSourceDescriptor>
+llvm::Optional<ASTSourceDescriptor>
ExternalASTSource::getSourceDescriptor(unsigned ID) {
return None;
}
@@ -38,21 +40,6 @@ ExternalASTSource::hasExternalDefinitions(const Decl *D) {
return EK_ReplyHazy;
}
-ExternalASTSource::ASTSourceDescriptor::ASTSourceDescriptor(const Module &M)
- : Signature(M.Signature), ClangModule(&M) {
- if (M.Directory)
- Path = M.Directory->getName();
- if (auto *File = M.getASTFile())
- ASTFile = File->getName();
-}
-
-std::string ExternalASTSource::ASTSourceDescriptor::getModuleName() const {
- if (ClangModule)
- return ClangModule->Name;
- else
- return PCHModuleName;
-}
-
void ExternalASTSource::FindFileRegionDecls(FileID File, unsigned Offset,
unsigned Length,
SmallVectorImpl<Decl *> &Decls) {}
diff --git a/contrib/llvm-project/clang/lib/AST/FormatString.cpp b/contrib/llvm-project/clang/lib/AST/FormatString.cpp
index fcc0b3b11e25..83b952116a5e 100644
--- a/contrib/llvm-project/clang/lib/AST/FormatString.cpp
+++ b/contrib/llvm-project/clang/lib/AST/FormatString.cpp
@@ -419,7 +419,6 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
QualType pointeeTy = PT->getPointeeType();
if (const BuiltinType *BT = pointeeTy->getAs<BuiltinType>())
switch (BT->getKind()) {
- case BuiltinType::Void:
case BuiltinType::Char_U:
case BuiltinType::UChar:
case BuiltinType::Char_S:
@@ -539,7 +538,7 @@ QualType ArgType::getRepresentativeType(ASTContext &C) const {
}
std::string ArgType::getRepresentativeTypeName(ASTContext &C) const {
- std::string S = getRepresentativeType(C).getAsString();
+ std::string S = getRepresentativeType(C).getAsString(C.getPrintingPolicy());
std::string Alias;
if (Name) {
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h b/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h
index 3e6c8b5da9f0..2baa717311bc 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h
@@ -85,14 +85,13 @@ class Boolean {
static Boolean max(unsigned NumBits) { return Boolean(true); }
template <typename T>
- static typename std::enable_if<std::is_integral<T>::value, Boolean>::type
- from(T Value) {
+ static std::enable_if_t<std::is_integral<T>::value, Boolean> from(T Value) {
return Boolean(Value != 0);
}
template <unsigned SrcBits, bool SrcSign>
- static typename std::enable_if<SrcBits != 0, Boolean>::type from(
- Integral<SrcBits, SrcSign> Value) {
+ static std::enable_if_t<SrcBits != 0, Boolean>
+ from(Integral<SrcBits, SrcSign> Value) {
return Boolean(!Value.isZero());
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h
index 1d0e34fc991f..716f28551e58 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h
@@ -21,6 +21,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/Optional.h"
namespace clang {
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp
index e7f9ba0f010a..3bfcdfcd4c58 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp
@@ -17,6 +17,7 @@
#include "PrimType.h"
#include "Program.h"
#include "clang/AST/Expr.h"
+#include "clang/Basic/TargetInfo.h"
using namespace clang;
using namespace clang::interp;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp
index e77a825eb1f2..293fdd4b3256 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp
@@ -16,6 +16,7 @@
#include "Program.h"
#include "clang/AST/DeclCXX.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Format.h"
using namespace clang;
using namespace clang::interp;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Integral.h b/contrib/llvm-project/clang/lib/AST/Interp/Integral.h
index 7cc788070de8..46cd611ee389 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Integral.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Integral.h
@@ -156,13 +156,12 @@ public:
}
template <typename T>
- static typename std::enable_if<std::is_integral<T>::value, Integral>::type
- from(T Value) {
+ static std::enable_if_t<std::is_integral<T>::value, Integral> from(T Value) {
return Integral(Value);
}
template <unsigned SrcBits, bool SrcSign>
- static typename std::enable_if<SrcBits != 0, Integral>::type
+ static std::enable_if_t<SrcBits != 0, Integral>
from(Integral<SrcBits, SrcSign> Value) {
return Integral(Value.V);
}
@@ -206,52 +205,52 @@ public:
private:
template <typename T>
- static typename std::enable_if<std::is_signed<T>::value, bool>::type
- CheckAddUB(T A, T B, T &R) {
+ static std::enable_if_t<std::is_signed<T>::value, bool> CheckAddUB(T A, T B,
+ T &R) {
return llvm::AddOverflow<T>(A, B, R);
}
template <typename T>
- static typename std::enable_if<std::is_unsigned<T>::value, bool>::type
- CheckAddUB(T A, T B, T &R) {
+ static std::enable_if_t<std::is_unsigned<T>::value, bool> CheckAddUB(T A, T B,
+ T &R) {
R = A + B;
return false;
}
template <typename T>
- static typename std::enable_if<std::is_signed<T>::value, bool>::type
- CheckSubUB(T A, T B, T &R) {
+ static std::enable_if_t<std::is_signed<T>::value, bool> CheckSubUB(T A, T B,
+ T &R) {
return llvm::SubOverflow<T>(A, B, R);
}
template <typename T>
- static typename std::enable_if<std::is_unsigned<T>::value, bool>::type
- CheckSubUB(T A, T B, T &R) {
+ static std::enable_if_t<std::is_unsigned<T>::value, bool> CheckSubUB(T A, T B,
+ T &R) {
R = A - B;
return false;
}
template <typename T>
- static typename std::enable_if<std::is_signed<T>::value, bool>::type
- CheckMulUB(T A, T B, T &R) {
+ static std::enable_if_t<std::is_signed<T>::value, bool> CheckMulUB(T A, T B,
+ T &R) {
return llvm::MulOverflow<T>(A, B, R);
}
template <typename T>
- static typename std::enable_if<std::is_unsigned<T>::value, bool>::type
- CheckMulUB(T A, T B, T &R) {
+ static std::enable_if_t<std::is_unsigned<T>::value, bool> CheckMulUB(T A, T B,
+ T &R) {
R = A * B;
return false;
}
template <typename T, T Min, T Max>
- static typename std::enable_if<std::is_signed<T>::value, bool>::type
+ static std::enable_if_t<std::is_signed<T>::value, bool>
CheckRange(int64_t V) {
return Min <= V && V <= Max;
}
template <typename T, T Min, T Max>
- static typename std::enable_if<std::is_unsigned<T>::value, bool>::type
+ static std::enable_if_t<std::is_unsigned<T>::value, bool>
CheckRange(int64_t V) {
return V >= 0 && static_cast<uint64_t>(V) <= Max;
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp
index 1a8109cedf76..cec3f6d6160e 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp
@@ -334,7 +334,7 @@ bool CheckCallable(InterpState &S, CodePtr OpPC, Function *F) {
const SourceLocation &Loc = S.Current->getLocation(OpPC);
if (F->isVirtual()) {
- if (!S.getLangOpts().CPlusPlus2a) {
+ if (!S.getLangOpts().CPlusPlus20) {
S.CCEDiag(Loc, diag::note_constexpr_virtual_call);
return false;
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Interp.h b/contrib/llvm-project/clang/lib/AST/Interp/Interp.h
index c12caa639da7..a63c5a871ba3 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Interp.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Interp.h
@@ -869,7 +869,7 @@ inline bool ShiftRight(InterpState &S, CodePtr OpPC, const T &V, unsigned RHS) {
template <PrimType TL, PrimType TR, typename T = typename PrimConv<TL>::T>
inline bool ShiftLeft(InterpState &S, CodePtr OpPC, const T &V, unsigned RHS) {
- if (V.isSigned() && !S.getLangOpts().CPlusPlus2a) {
+ if (V.isSigned() && !S.getLangOpts().CPlusPlus20) {
// C++11 [expr.shift]p2: A signed left shift must have a non-negative
// operand, and must not overflow the corresponding unsigned type.
// C++2a [expr.shift]p2: E1 << E2 is the unique value congruent to
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Block.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp
index 5fc93eb39f4e..ed6e8910194d 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Block.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp
@@ -10,7 +10,7 @@
//
//===----------------------------------------------------------------------===//
-#include "Block.h"
+#include "InterpBlock.h"
#include "Pointer.h"
using namespace clang;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Block.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h
index 97fb9a3ca096..0ccdef221c83 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Block.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h
@@ -1,4 +1,4 @@
-//===--- Block.h - Allocated blocks for the interpreter ---------*- C++ -*-===//
+//===-- InterpBlock.h - Allocated blocks for the interpreter -*- C++ ----*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h
index b8391b0bcf92..304e2ad66537 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h
@@ -45,16 +45,16 @@ public:
void popArgs();
/// Describes the frame with arguments for diagnostic purposes.
- void describe(llvm::raw_ostream &OS);
+ void describe(llvm::raw_ostream &OS) override;
/// Returns the parent frame object.
- Frame *getCaller() const;
+ Frame *getCaller() const override;
/// Returns the location of the call to the frame.
- SourceLocation getCallLocation() const;
+ SourceLocation getCallLocation() const override;
/// Returns the caller.
- const FunctionDecl *getCallee() const;
+ const FunctionDecl *getCallee() const override;
/// Returns the current function.
Function *getFunction() const { return Func; }
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp
index 1a10723aaca5..ef2638e2a36b 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp
@@ -7,8 +7,8 @@
//===----------------------------------------------------------------------===//
#include "Pointer.h"
-#include "Block.h"
#include "Function.h"
+#include "InterpBlock.h"
#include "PrimType.h"
using namespace clang;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h
index b8fa98e24faa..f2f6e0e76018 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h
@@ -13,12 +13,12 @@
#ifndef LLVM_CLANG_AST_INTERP_POINTER_H
#define LLVM_CLANG_AST_INTERP_POINTER_H
-#include "Block.h"
#include "Descriptor.h"
+#include "InterpBlock.h"
+#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
-#include "clang/AST/ComparisonCategories.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Source.h b/contrib/llvm-project/clang/lib/AST/Interp/Source.h
index e591c3399d7c..19c652b7331a 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Source.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Source.h
@@ -56,14 +56,14 @@ private:
/// Helper to decode a value or a pointer.
template <typename T>
- static typename std::enable_if<!std::is_pointer<T>::value, T>::type
+ static std::enable_if_t<!std::is_pointer<T>::value, T>
ReadHelper(const char *Ptr) {
using namespace llvm::support;
return endian::read<T, endianness::native, 1>(Ptr);
}
template <typename T>
- static typename std::enable_if<std::is_pointer<T>::value, T>::type
+ static std::enable_if_t<std::is_pointer<T>::value, T>
ReadHelper(const char *Ptr) {
using namespace llvm::support;
auto Punned = endian::read<uintptr_t, endianness::native, 1>(Ptr);
diff --git a/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp b/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
index 5d485e000750..ddfbe9f86499 100644
--- a/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
@@ -13,6 +13,7 @@
// http://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling
//
//===----------------------------------------------------------------------===//
+
#include "clang/AST/Mangle.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
@@ -27,6 +28,7 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/ABI.h"
+#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
@@ -125,8 +127,9 @@ class ItaniumMangleContextImpl : public ItaniumMangleContext {
public:
explicit ItaniumMangleContextImpl(ASTContext &Context,
- DiagnosticsEngine &Diags)
- : ItaniumMangleContext(Context, Diags) {}
+ DiagnosticsEngine &Diags,
+ bool IsUniqueNameMangler)
+ : ItaniumMangleContext(Context, Diags, IsUniqueNameMangler) {}
/// @name Mangler Entry Points
/// @{
@@ -135,7 +138,7 @@ public:
bool shouldMangleStringLiteral(const StringLiteral *) override {
return false;
}
- void mangleCXXName(const NamedDecl *D, raw_ostream &) override;
+ void mangleCXXName(GlobalDecl GD, raw_ostream &) override;
void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk,
raw_ostream &) override;
void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
@@ -150,10 +153,6 @@ public:
void mangleCXXRTTI(QualType T, raw_ostream &) override;
void mangleCXXRTTIName(QualType T, raw_ostream &) override;
void mangleTypeName(QualType T, raw_ostream &) override;
- void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
- raw_ostream &) override;
- void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
- raw_ostream &) override;
void mangleCXXCtorComdat(const CXXConstructorDecl *D, raw_ostream &) override;
void mangleCXXDtorComdat(const CXXDestructorDecl *D, raw_ostream &) override;
@@ -161,6 +160,7 @@ public:
void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out) override;
void mangleDynamicAtExitDestructor(const VarDecl *D,
raw_ostream &Out) override;
+ void mangleDynamicStermFinalizer(const VarDecl *D, raw_ostream &Out) override;
void mangleSEHFilterExpression(const NamedDecl *EnclosingDecl,
raw_ostream &Out) override;
void mangleSEHFinallyBlock(const NamedDecl *EnclosingDecl,
@@ -417,14 +417,14 @@ public:
void disableDerivedAbiTags() { DisableDerivedAbiTags = true; }
static bool shouldHaveAbiTags(ItaniumMangleContextImpl &C, const VarDecl *VD);
- void mangle(const NamedDecl *D);
+ void mangle(GlobalDecl GD);
void mangleCallOffset(int64_t NonVirtual, int64_t Virtual);
void mangleNumber(const llvm::APSInt &I);
void mangleNumber(int64_t Number);
void mangleFloat(const llvm::APFloat &F);
- void mangleFunctionEncoding(const FunctionDecl *FD);
+ void mangleFunctionEncoding(GlobalDecl GD);
void mangleSeqID(unsigned SeqID);
- void mangleName(const NamedDecl *ND);
+ void mangleName(GlobalDecl GD);
void mangleType(QualType T);
void mangleNameOrStandardSubstitution(const NamedDecl *ND);
void mangleLambdaSig(const CXXRecordDecl *Lambda);
@@ -461,38 +461,39 @@ private:
void mangleFunctionEncodingBareType(const FunctionDecl *FD);
- void mangleNameWithAbiTags(const NamedDecl *ND,
+ void mangleNameWithAbiTags(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags);
void mangleModuleName(const Module *M);
void mangleModuleNamePrefix(StringRef Name);
void mangleTemplateName(const TemplateDecl *TD,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs);
- void mangleUnqualifiedName(const NamedDecl *ND,
+ void mangleUnqualifiedName(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags) {
- mangleUnqualifiedName(ND, ND->getDeclName(), UnknownArity,
+ mangleUnqualifiedName(GD, cast<NamedDecl>(GD.getDecl())->getDeclName(), UnknownArity,
AdditionalAbiTags);
}
- void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name,
+ void mangleUnqualifiedName(GlobalDecl GD, DeclarationName Name,
unsigned KnownArity,
const AbiTagList *AdditionalAbiTags);
- void mangleUnscopedName(const NamedDecl *ND,
+ void mangleUnscopedName(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags);
- void mangleUnscopedTemplateName(const TemplateDecl *ND,
+ void mangleUnscopedTemplateName(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags);
void mangleUnscopedTemplateName(TemplateName,
const AbiTagList *AdditionalAbiTags);
void mangleSourceName(const IdentifierInfo *II);
void mangleRegCallName(const IdentifierInfo *II);
+ void mangleDeviceStubName(const IdentifierInfo *II);
void mangleSourceNameWithAbiTags(
const NamedDecl *ND, const AbiTagList *AdditionalAbiTags = nullptr);
- void mangleLocalName(const Decl *D,
+ void mangleLocalName(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags);
void mangleBlockForPrefix(const BlockDecl *Block);
void mangleUnqualifiedBlock(const BlockDecl *Block);
void mangleTemplateParamDecl(const NamedDecl *Decl);
void mangleLambda(const CXXRecordDecl *Lambda);
- void mangleNestedName(const NamedDecl *ND, const DeclContext *DC,
+ void mangleNestedName(GlobalDecl GD, const DeclContext *DC,
const AbiTagList *AdditionalAbiTags,
bool NoFunction=false);
void mangleNestedName(const TemplateDecl *TD,
@@ -501,7 +502,7 @@ private:
void manglePrefix(NestedNameSpecifier *qualifier);
void manglePrefix(const DeclContext *DC, bool NoFunction=false);
void manglePrefix(QualType type);
- void mangleTemplatePrefix(const TemplateDecl *ND, bool NoFunction=false);
+ void mangleTemplatePrefix(GlobalDecl GD, bool NoFunction=false);
void mangleTemplatePrefix(TemplateName Template);
bool mangleUnresolvedTypeOrSimpleId(QualType DestroyedType,
StringRef Prefix = "");
@@ -640,34 +641,40 @@ void CXXNameMangler::mangleSourceNameWithAbiTags(
writeAbiTags(ND, AdditionalAbiTags);
}
-void CXXNameMangler::mangle(const NamedDecl *D) {
+void CXXNameMangler::mangle(GlobalDecl GD) {
// <mangled-name> ::= _Z <encoding>
// ::= <data name>
// ::= <special-name>
Out << "_Z";
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- mangleFunctionEncoding(FD);
- else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ if (isa<FunctionDecl>(GD.getDecl()))
+ mangleFunctionEncoding(GD);
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(GD.getDecl()))
mangleName(VD);
- else if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(D))
+ else if (const IndirectFieldDecl *IFD =
+ dyn_cast<IndirectFieldDecl>(GD.getDecl()))
mangleName(IFD->getAnonField());
+ else if (const FieldDecl *FD = dyn_cast<FieldDecl>(GD.getDecl()))
+ mangleName(FD);
+ else if (const MSGuidDecl *GuidD = dyn_cast<MSGuidDecl>(GD.getDecl()))
+ mangleName(GuidD);
else
- mangleName(cast<FieldDecl>(D));
+ llvm_unreachable("unexpected kind of global decl");
}
-void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
+void CXXNameMangler::mangleFunctionEncoding(GlobalDecl GD) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
// <encoding> ::= <function name> <bare-function-type>
// Don't mangle in the type if this isn't a decl we should typically mangle.
if (!Context.shouldMangleDeclName(FD)) {
- mangleName(FD);
+ mangleName(GD);
return;
}
AbiTagList ReturnTypeAbiTags = makeFunctionReturnTypeTags(FD);
if (ReturnTypeAbiTags.empty()) {
// There are no tags for return type, the simplest case.
- mangleName(FD);
+ mangleName(GD);
mangleFunctionEncodingBareType(FD);
return;
}
@@ -787,13 +794,14 @@ static bool isStdNamespace(const DeclContext *DC) {
return isStd(cast<NamespaceDecl>(DC));
}
-static const TemplateDecl *
-isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
+static const GlobalDecl
+isTemplate(GlobalDecl GD, const TemplateArgumentList *&TemplateArgs) {
+ const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
// Check if we have a function template.
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
if (const TemplateDecl *TD = FD->getPrimaryTemplate()) {
TemplateArgs = FD->getTemplateSpecializationArgs();
- return TD;
+ return GD.getWithDecl(TD);
}
}
@@ -801,20 +809,21 @@ isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
if (const ClassTemplateSpecializationDecl *Spec =
dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
TemplateArgs = &Spec->getTemplateArgs();
- return Spec->getSpecializedTemplate();
+ return GD.getWithDecl(Spec->getSpecializedTemplate());
}
// Check if we have a variable template.
if (const VarTemplateSpecializationDecl *Spec =
dyn_cast<VarTemplateSpecializationDecl>(ND)) {
TemplateArgs = &Spec->getTemplateArgs();
- return Spec->getSpecializedTemplate();
+ return GD.getWithDecl(Spec->getSpecializedTemplate());
}
- return nullptr;
+ return GlobalDecl();
}
-void CXXNameMangler::mangleName(const NamedDecl *ND) {
+void CXXNameMangler::mangleName(GlobalDecl GD) {
+ const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
// Variables should have implicit tags from its type.
AbiTagList VariableTypeAbiTags = makeVariableTypeTags(VD);
@@ -843,12 +852,13 @@ void CXXNameMangler::mangleName(const NamedDecl *ND) {
// Output name with implicit tags.
mangleNameWithAbiTags(VD, &AdditionalAbiTags);
} else {
- mangleNameWithAbiTags(ND, nullptr);
+ mangleNameWithAbiTags(GD, nullptr);
}
}
-void CXXNameMangler::mangleNameWithAbiTags(const NamedDecl *ND,
+void CXXNameMangler::mangleNameWithAbiTags(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags) {
+ const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
// <name> ::= [<module-name>] <nested-name>
// ::= [<module-name>] <unscoped-name>
// ::= [<module-name>] <unscoped-template-name> <template-args>
@@ -864,14 +874,14 @@ void CXXNameMangler::mangleNameWithAbiTags(const NamedDecl *ND,
while (!DC->isNamespace() && !DC->isTranslationUnit())
DC = getEffectiveParentContext(DC);
else if (GetLocalClassDecl(ND)) {
- mangleLocalName(ND, AdditionalAbiTags);
+ mangleLocalName(GD, AdditionalAbiTags);
return;
}
DC = IgnoreLinkageSpecDecls(DC);
if (isLocalContainerContext(DC)) {
- mangleLocalName(ND, AdditionalAbiTags);
+ mangleLocalName(GD, AdditionalAbiTags);
return;
}
@@ -886,17 +896,17 @@ void CXXNameMangler::mangleNameWithAbiTags(const NamedDecl *ND,
if (DC->isTranslationUnit() || isStdNamespace(DC)) {
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = nullptr;
- if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ if (GlobalDecl TD = isTemplate(GD, TemplateArgs)) {
mangleUnscopedTemplateName(TD, AdditionalAbiTags);
mangleTemplateArgs(*TemplateArgs);
return;
}
- mangleUnscopedName(ND, AdditionalAbiTags);
+ mangleUnscopedName(GD, AdditionalAbiTags);
return;
}
- mangleNestedName(ND, DC, AdditionalAbiTags);
+ mangleNestedName(GD, DC, AdditionalAbiTags);
}
void CXXNameMangler::mangleModuleName(const Module *M) {
@@ -947,19 +957,21 @@ void CXXNameMangler::mangleTemplateName(const TemplateDecl *TD,
}
}
-void CXXNameMangler::mangleUnscopedName(const NamedDecl *ND,
+void CXXNameMangler::mangleUnscopedName(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags) {
+ const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
// <unscoped-name> ::= <unqualified-name>
// ::= St <unqualified-name> # ::std::
if (isStdNamespace(IgnoreLinkageSpecDecls(getEffectiveDeclContext(ND))))
Out << "St";
- mangleUnqualifiedName(ND, AdditionalAbiTags);
+ mangleUnqualifiedName(GD, AdditionalAbiTags);
}
void CXXNameMangler::mangleUnscopedTemplateName(
- const TemplateDecl *ND, const AbiTagList *AdditionalAbiTags) {
+ GlobalDecl GD, const AbiTagList *AdditionalAbiTags) {
+ const TemplateDecl *ND = cast<TemplateDecl>(GD.getDecl());
// <unscoped-template-name> ::= <unscoped-name>
// ::= <substitution>
if (mangleSubstitution(ND))
@@ -971,9 +983,9 @@ void CXXNameMangler::mangleUnscopedTemplateName(
"template template param cannot have abi tags");
mangleTemplateParameter(TTP->getDepth(), TTP->getIndex());
} else if (isa<BuiltinTemplateDecl>(ND) || isa<ConceptDecl>(ND)) {
- mangleUnscopedName(ND, AdditionalAbiTags);
+ mangleUnscopedName(GD, AdditionalAbiTags);
} else {
- mangleUnscopedName(ND->getTemplatedDecl(), AdditionalAbiTags);
+ mangleUnscopedName(GD.getWithDecl(ND->getTemplatedDecl()), AdditionalAbiTags);
}
addSubstitution(ND);
@@ -1250,10 +1262,11 @@ void CXXNameMangler::mangleUnresolvedName(
mangleTemplateArgs(TemplateArgs, NumTemplateArgs);
}
-void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
+void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
DeclarationName Name,
unsigned KnownArity,
const AbiTagList *AdditionalAbiTags) {
+ const NamedDecl *ND = cast_or_null<NamedDecl>(GD.getDecl());
unsigned Arity = KnownArity;
// <unqualified-name> ::= <operator-name>
// ::= <ctor-dtor-name>
@@ -1279,6 +1292,16 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
break;
}
+ if (auto *GD = dyn_cast<MSGuidDecl>(ND)) {
+ // We follow MSVC in mangling GUID declarations as if they were variables
+ // with a particular reserved name. Continue the pretense here.
+ SmallString<sizeof("_GUID_12345678_1234_1234_1234_1234567890ab")> GUID;
+ llvm::raw_svector_ostream GUIDOS(GUID);
+ Context.mangleMSGuidDecl(GD, GUIDOS);
+ Out << GUID.size() << GUID;
+ break;
+ }
+
if (II) {
// Match GCC's naming convention for internal linkage symbols, for
// symbols that are not actually visible outside of this TU. GCC
@@ -1303,7 +1326,12 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
bool IsRegCall = FD &&
FD->getType()->castAs<FunctionType>()->getCallConv() ==
clang::CC_X86RegCall;
- if (IsRegCall)
+ bool IsDeviceStub =
+ FD && FD->hasAttr<CUDAGlobalAttr>() &&
+ GD.getKernelReferenceKind() == KernelReferenceKind::Stub;
+ if (IsDeviceStub)
+ mangleDeviceStubName(II);
+ else if (IsRegCall)
mangleRegCallName(II);
else
mangleSourceName(II);
@@ -1381,7 +1409,8 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
// <lambda-sig> ::= <template-param-decl>* <parameter-type>+
// # Parameter types or 'v' for 'void'.
if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) {
- if (Record->isLambda() && Record->getLambdaManglingNumber()) {
+ if (Record->isLambda() && (Record->getLambdaManglingNumber() ||
+ Context.isUniqueNameMangler())) {
assert(!AdditionalAbiTags &&
"Lambda type cannot have additional abi tags");
mangleLambda(Record);
@@ -1492,6 +1521,14 @@ void CXXNameMangler::mangleRegCallName(const IdentifierInfo *II) {
<< II->getName();
}
+void CXXNameMangler::mangleDeviceStubName(const IdentifierInfo *II) {
+ // <source-name> ::= <positive length number> __device_stub__ <identifier>
+ // <number> ::= [n] <non-negative decimal integer>
+ // <identifier> ::= <unqualified source code identifier>
+ Out << II->getLength() + sizeof("__device_stub__") - 1 << "__device_stub__"
+ << II->getName();
+}
+
void CXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
// <source-name> ::= <positive length number> <identifier>
// <number> ::= [n] <non-negative decimal integer>
@@ -1499,10 +1536,11 @@ void CXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
Out << II->getLength() << II->getName();
}
-void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
+void CXXNameMangler::mangleNestedName(GlobalDecl GD,
const DeclContext *DC,
const AbiTagList *AdditionalAbiTags,
bool NoFunction) {
+ const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
// <nested-name>
// ::= N [<CV-qualifiers>] [<ref-qualifier>] <prefix> <unqualified-name> E
// ::= N [<CV-qualifiers>] [<ref-qualifier>] <template-prefix>
@@ -1520,13 +1558,13 @@ void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = nullptr;
- if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ if (GlobalDecl TD = isTemplate(GD, TemplateArgs)) {
mangleTemplatePrefix(TD, NoFunction);
mangleTemplateArgs(*TemplateArgs);
}
else {
manglePrefix(DC, NoFunction);
- mangleUnqualifiedName(ND, AdditionalAbiTags);
+ mangleUnqualifiedName(GD, AdditionalAbiTags);
}
Out << 'E';
@@ -1544,8 +1582,24 @@ void CXXNameMangler::mangleNestedName(const TemplateDecl *TD,
Out << 'E';
}
-void CXXNameMangler::mangleLocalName(const Decl *D,
+static GlobalDecl getParentOfLocalEntity(const DeclContext *DC) {
+ GlobalDecl GD;
+ // The Itanium spec says:
+ // For entities in constructors and destructors, the mangling of the
+ // complete object constructor or destructor is used as the base function
+ // name, i.e. the C1 or D1 version.
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(DC))
+ GD = GlobalDecl(CD, Ctor_Complete);
+ else if (auto *DD = dyn_cast<CXXDestructorDecl>(DC))
+ GD = GlobalDecl(DD, Dtor_Complete);
+ else
+ GD = GlobalDecl(cast<FunctionDecl>(DC));
+ return GD;
+}
+
+void CXXNameMangler::mangleLocalName(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags) {
+ const Decl *D = GD.getDecl();
// <local-name> := Z <function encoding> E <entity name> [<discriminator>]
// := Z <function encoding> E s [<discriminator>]
// <local-name> := Z <function encoding> E d [ <parameter number> ]
@@ -1565,7 +1619,7 @@ void CXXNameMangler::mangleLocalName(const Decl *D,
else if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC))
mangleBlockForPrefix(BD);
else
- mangleFunctionEncoding(cast<FunctionDecl>(DC));
+ mangleFunctionEncoding(getParentOfLocalEntity(DC));
// Implicit ABI tags (from namespace) are not available in the following
// entity; reset to actually emitted tags, which are available.
@@ -1608,7 +1662,7 @@ void CXXNameMangler::mangleLocalName(const Decl *D,
mangleUnqualifiedBlock(BD);
} else {
const NamedDecl *ND = cast<NamedDecl>(D);
- mangleNestedName(ND, getEffectiveDeclContext(ND), AdditionalAbiTags,
+ mangleNestedName(GD, getEffectiveDeclContext(ND), AdditionalAbiTags,
true /*NoFunction*/);
}
} else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
@@ -1629,7 +1683,7 @@ void CXXNameMangler::mangleLocalName(const Decl *D,
assert(!AdditionalAbiTags && "Block cannot have additional abi tags");
mangleUnqualifiedBlock(BD);
} else {
- mangleUnqualifiedName(cast<NamedDecl>(D), AdditionalAbiTags);
+ mangleUnqualifiedName(GD, AdditionalAbiTags);
}
if (const NamedDecl *ND = dyn_cast<NamedDecl>(RD ? RD : D)) {
@@ -1731,6 +1785,37 @@ void CXXNameMangler::mangleTemplateParamDecl(const NamedDecl *Decl) {
}
}
+// Handles the __builtin_unique_stable_name feature for lambdas. Instead of the
+// ordinal of the lambda in its mangling, this does line/column to uniquely and
+// reliably identify the lambda. Additionally, macro expansions are expressed
+// as well to prevent macros causing duplicates.
+static void mangleUniqueNameLambda(CXXNameMangler &Mangler, SourceManager &SM,
+ raw_ostream &Out,
+ const CXXRecordDecl *Lambda) {
+ SourceLocation Loc = Lambda->getLocation();
+
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ Mangler.mangleNumber(PLoc.getLine());
+ Out << "_";
+ Mangler.mangleNumber(PLoc.getColumn());
+
+ while(Loc.isMacroID()) {
+ SourceLocation SLToPrint = Loc;
+ if (SM.isMacroArgExpansion(Loc))
+ SLToPrint = SM.getImmediateExpansionRange(Loc).getBegin();
+
+ PLoc = SM.getPresumedLoc(SM.getSpellingLoc(SLToPrint));
+ Out << "m";
+ Mangler.mangleNumber(PLoc.getLine());
+ Out << "_";
+ Mangler.mangleNumber(PLoc.getColumn());
+
+ Loc = SM.getImmediateMacroCallerLoc(Loc);
+ if (Loc.isFileID())
+ Loc = SM.getImmediateMacroCallerLoc(SLToPrint);
+ }
+}
+
void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
// If the context of a closure type is an initializer for a class member
// (static or nonstatic), it is encoded in a qualified name with a final
@@ -1761,6 +1846,12 @@ void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
mangleLambdaSig(Lambda);
Out << "E";
+ if (Context.isUniqueNameMangler()) {
+ mangleUniqueNameLambda(
+ *this, Context.getASTContext().getSourceManager(), Out, Lambda);
+ return;
+ }
+
// The number is omitted for the first closure type with a given
// <lambda-sig> in a given context; it is n-2 for the nth closure type
// (in lexical order) with that same <lambda-sig> and context.
@@ -1776,8 +1867,8 @@ void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
void CXXNameMangler::mangleLambdaSig(const CXXRecordDecl *Lambda) {
for (auto *D : Lambda->getLambdaExplicitTemplateParameters())
mangleTemplateParamDecl(D);
- const FunctionProtoType *Proto = Lambda->getLambdaTypeInfo()->getType()->
- getAs<FunctionProtoType>();
+ auto *Proto =
+ Lambda->getLambdaTypeInfo()->getType()->castAs<FunctionProtoType>();
mangleBareFunctionType(Proto, /*MangleReturnType=*/false,
Lambda->getLambdaStaticInvoker());
}
@@ -1840,7 +1931,7 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = nullptr;
- if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ if (GlobalDecl TD = isTemplate(ND, TemplateArgs)) {
mangleTemplatePrefix(TD);
mangleTemplateArgs(*TemplateArgs);
} else {
@@ -1863,7 +1954,7 @@ void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) {
if (OverloadedTemplateStorage *Overloaded
= Template.getAsOverloadedTemplate()) {
- mangleUnqualifiedName(nullptr, (*Overloaded->begin())->getDeclName(),
+ mangleUnqualifiedName(GlobalDecl(), (*Overloaded->begin())->getDeclName(),
UnknownArity, nullptr);
return;
}
@@ -1875,8 +1966,9 @@ void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) {
mangleUnscopedTemplateName(Template, /* AdditionalAbiTags */ nullptr);
}
-void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND,
+void CXXNameMangler::mangleTemplatePrefix(GlobalDecl GD,
bool NoFunction) {
+ const TemplateDecl *ND = cast<TemplateDecl>(GD.getDecl());
// <template-prefix> ::= <prefix> <template unqualified-name>
// ::= <template-param>
// ::= <substitution>
@@ -1892,9 +1984,9 @@ void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND,
} else {
manglePrefix(getEffectiveDeclContext(ND), NoFunction);
if (isa<BuiltinTemplateDecl>(ND) || isa<ConceptDecl>(ND))
- mangleUnqualifiedName(ND, nullptr);
+ mangleUnqualifiedName(GD, nullptr);
else
- mangleUnqualifiedName(ND->getTemplatedDecl(), nullptr);
+ mangleUnqualifiedName(GD.getWithDecl(ND->getTemplatedDecl()), nullptr);
}
addSubstitution(ND);
@@ -1988,6 +2080,8 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::DependentSizedExtVector:
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
+ case Type::DependentSizedMatrix:
case Type::FunctionProto:
case Type::FunctionNoProto:
case Type::Paren:
@@ -2002,6 +2096,8 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::Atomic:
case Type::Pipe:
case Type::MacroQualified:
+ case Type::ExtInt:
+ case Type::DependentExtInt:
llvm_unreachable("type is illegal as a nested name specifier");
case Type::SubstTemplateTypeParmPack:
@@ -2669,6 +2765,11 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
Out << TI->getFloat128Mangling();
break;
}
+ case BuiltinType::BFloat16: {
+ const TargetInfo *TI = &getASTContext().getTargetInfo();
+ Out << TI->getBFloat16Mangling();
+ break;
+ }
case BuiltinType::NullPtr:
Out << "Dn";
break;
@@ -2720,10 +2821,18 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
// The SVE types are effectively target-specific. The mangling scheme
// is defined in the appendices to the Procedure Call Standard for the
// Arm Architecture.
-#define SVE_TYPE(Name, Id, SingletonId) \
- case BuiltinType::Id: \
- type_name = Name; \
- Out << 'u' << type_name.size() << type_name; \
+#define SVE_VECTOR_TYPE(InternalName, MangledName, Id, SingletonId, NumEls, \
+ ElBits, IsSigned, IsFP, IsBF) \
+ case BuiltinType::Id: \
+ type_name = MangledName; \
+ Out << (type_name == InternalName ? "u" : "") << type_name.size() \
+ << type_name; \
+ break;
+#define SVE_PREDICATE_TYPE(InternalName, MangledName, Id, SingletonId, NumEls) \
+ case BuiltinType::Id: \
+ type_name = MangledName; \
+ Out << (type_name == InternalName ? "u" : "") << type_name.size() \
+ << type_name; \
break;
#include "clang/Basic/AArch64SVEACLETypes.def"
}
@@ -3067,6 +3176,7 @@ void CXXNameMangler::mangleNeonVectorType(const VectorType *T) {
case BuiltinType::UShort:
EltName = "poly16_t";
break;
+ case BuiltinType::LongLong:
case BuiltinType::ULongLong:
EltName = "poly64_t";
break;
@@ -3084,7 +3194,8 @@ void CXXNameMangler::mangleNeonVectorType(const VectorType *T) {
case BuiltinType::ULongLong: EltName = "uint64_t"; break;
case BuiltinType::Double: EltName = "float64_t"; break;
case BuiltinType::Float: EltName = "float32_t"; break;
- case BuiltinType::Half: EltName = "float16_t";break;
+ case BuiltinType::Half: EltName = "float16_t"; break;
+ case BuiltinType::BFloat16: EltName = "bfloat16_t"; break;
default:
llvm_unreachable("unexpected Neon vector element type");
}
@@ -3136,6 +3247,8 @@ static StringRef mangleAArch64VectorBase(const BuiltinType *EltType) {
return "Float32";
case BuiltinType::Double:
return "Float64";
+ case BuiltinType::BFloat16:
+ return "BFloat16";
default:
llvm_unreachable("Unexpected vector element base type");
}
@@ -3250,6 +3363,31 @@ void CXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
mangleType(T->getElementType());
}
+void CXXNameMangler::mangleType(const ConstantMatrixType *T) {
+ // Mangle matrix types using a vendor extended type qualifier:
+ // U<Len>matrix_type<Rows><Columns><element type>
+ StringRef VendorQualifier = "matrix_type";
+ Out << "U" << VendorQualifier.size() << VendorQualifier;
+ auto &ASTCtx = getASTContext();
+ unsigned BitWidth = ASTCtx.getTypeSize(ASTCtx.getSizeType());
+ llvm::APSInt Rows(BitWidth);
+ Rows = T->getNumRows();
+ mangleIntegerLiteral(ASTCtx.getSizeType(), Rows);
+ llvm::APSInt Columns(BitWidth);
+ Columns = T->getNumColumns();
+ mangleIntegerLiteral(ASTCtx.getSizeType(), Columns);
+ mangleType(T->getElementType());
+}
+
+void CXXNameMangler::mangleType(const DependentSizedMatrixType *T) {
+ // U<Len>matrix_type<row expr><column expr><element type>
+ StringRef VendorQualifier = "matrix_type";
+ Out << "U" << VendorQualifier.size() << VendorQualifier;
+ mangleTemplateArg(T->getRowExpr());
+ mangleTemplateArg(T->getColumnExpr());
+ mangleType(T->getElementType());
+}
+
void CXXNameMangler::mangleType(const DependentAddressSpaceType *T) {
SplitQualType split = T->getPointeeType().split();
mangleQualifiers(split.Quals, T);
@@ -3460,6 +3598,28 @@ void CXXNameMangler::mangleType(const PipeType *T) {
Out << "8ocl_pipe";
}
+void CXXNameMangler::mangleType(const ExtIntType *T) {
+ Out << "U7_ExtInt";
+ llvm::APSInt BW(32, true);
+ BW = T->getNumBits();
+ TemplateArgument TA(Context.getASTContext(), BW, getASTContext().IntTy);
+ mangleTemplateArgs(&TA, 1);
+ if (T->isUnsigned())
+ Out << "j";
+ else
+ Out << "i";
+}
+
+void CXXNameMangler::mangleType(const DependentExtIntType *T) {
+ Out << "U7_ExtInt";
+ TemplateArgument TA(T->getNumBitsExpr());
+ mangleTemplateArgs(&TA, 1);
+ if (T->isUnsigned())
+ Out << "j";
+ else
+ Out << "i";
+}
+
void CXXNameMangler::mangleIntegerLiteral(QualType T,
const llvm::APSInt &Value) {
// <expr-primary> ::= L <type> <value number> E # integer literal
@@ -3634,8 +3794,11 @@ recurse:
case Expr::LambdaExprClass:
case Expr::MSPropertyRefExprClass:
case Expr::MSPropertySubscriptExprClass:
- case Expr::TypoExprClass: // This should no longer exist in the AST by now.
+ case Expr::TypoExprClass: // This should no longer exist in the AST by now.
+ case Expr::RecoveryExprClass:
case Expr::OMPArraySectionExprClass:
+ case Expr::OMPArrayShapingExprClass:
+ case Expr::OMPIteratorExprClass:
case Expr::CXXInheritedCtorInitExprClass:
llvm_unreachable("unexpected statement kind");
@@ -4089,6 +4252,15 @@ recurse:
break;
}
+ case Expr::MatrixSubscriptExprClass: {
+ const MatrixSubscriptExpr *ME = cast<MatrixSubscriptExpr>(E);
+ Out << "ixix";
+ mangleExpression(ME->getBase());
+ mangleExpression(ME->getRowIdx());
+ mangleExpression(ME->getColumnIdx());
+ break;
+ }
+
case Expr::CompoundAssignOperatorClass: // fallthrough
case Expr::BinaryOperatorClass: {
const BinaryOperator *BO = cast<BinaryOperator>(E);
@@ -4172,6 +4344,9 @@ recurse:
case Expr::CXXConstCastExprClass:
mangleCastExpression(E, "cc");
break;
+ case Expr::CXXAddrspaceCastExprClass:
+ mangleCastExpression(E, "ac");
+ break;
case Expr::CXXOperatorCallExprClass: {
const CXXOperatorCallExpr *CE = cast<CXXOperatorCallExpr>(E);
@@ -4943,45 +5118,42 @@ bool CXXNameMangler::shouldHaveAbiTags(ItaniumMangleContextImpl &C,
/// and this routine will return false. In this case, the caller should just
/// emit the identifier of the declaration (\c D->getIdentifier()) as its
/// name.
-void ItaniumMangleContextImpl::mangleCXXName(const NamedDecl *D,
+void ItaniumMangleContextImpl::mangleCXXName(GlobalDecl GD,
raw_ostream &Out) {
+ const NamedDecl *D = cast<NamedDecl>(GD.getDecl());
assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
"Invalid mangleName() call, argument is not a variable or function!");
- assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
- "Invalid mangleName() call on 'structor decl!");
PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
getASTContext().getSourceManager(),
"Mangling declaration");
- CXXNameMangler Mangler(*this, Out, D);
- Mangler.mangle(D);
-}
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(D)) {
+ auto Type = GD.getCtorType();
+ CXXNameMangler Mangler(*this, Out, CD, Type);
+ return Mangler.mangle(GlobalDecl(CD, Type));
+ }
-void ItaniumMangleContextImpl::mangleCXXCtor(const CXXConstructorDecl *D,
- CXXCtorType Type,
- raw_ostream &Out) {
- CXXNameMangler Mangler(*this, Out, D, Type);
- Mangler.mangle(D);
-}
+ if (auto *DD = dyn_cast<CXXDestructorDecl>(D)) {
+ auto Type = GD.getDtorType();
+ CXXNameMangler Mangler(*this, Out, DD, Type);
+ return Mangler.mangle(GlobalDecl(DD, Type));
+ }
-void ItaniumMangleContextImpl::mangleCXXDtor(const CXXDestructorDecl *D,
- CXXDtorType Type,
- raw_ostream &Out) {
- CXXNameMangler Mangler(*this, Out, D, Type);
- Mangler.mangle(D);
+ CXXNameMangler Mangler(*this, Out, D);
+ Mangler.mangle(GD);
}
void ItaniumMangleContextImpl::mangleCXXCtorComdat(const CXXConstructorDecl *D,
raw_ostream &Out) {
CXXNameMangler Mangler(*this, Out, D, Ctor_Comdat);
- Mangler.mangle(D);
+ Mangler.mangle(GlobalDecl(D, Ctor_Comdat));
}
void ItaniumMangleContextImpl::mangleCXXDtorComdat(const CXXDestructorDecl *D,
raw_ostream &Out) {
CXXNameMangler Mangler(*this, Out, D, Dtor_Comdat);
- Mangler.mangle(D);
+ Mangler.mangle(GlobalDecl(D, Dtor_Comdat));
}
void ItaniumMangleContextImpl::mangleThunk(const CXXMethodDecl *MD,
@@ -5025,7 +5197,7 @@ void ItaniumMangleContextImpl::mangleCXXDtorThunk(
Mangler.mangleCallOffset(ThisAdjustment.NonVirtual,
ThisAdjustment.Virtual.Itanium.VCallOffsetOffset);
- Mangler.mangleFunctionEncoding(DD);
+ Mangler.mangleFunctionEncoding(GlobalDecl(DD, Type));
}
/// Returns the mangled name for a guard variable for the passed in VarDecl.
@@ -5059,6 +5231,18 @@ void ItaniumMangleContextImpl::mangleDynamicAtExitDestructor(const VarDecl *D,
Mangler.getStream() << D->getName();
}
+void ItaniumMangleContextImpl::mangleDynamicStermFinalizer(const VarDecl *D,
+ raw_ostream &Out) {
+ // Clang generates these internal-linkage functions as part of its
+ // implementation of the XL ABI.
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "__finalize_";
+ if (shouldMangleDeclName(D))
+ Mangler.mangle(D);
+ else
+ Mangler.getStream() << D->getName();
+}
+
void ItaniumMangleContextImpl::mangleSEHFilterExpression(
const NamedDecl *EnclosingDecl, raw_ostream &Out) {
CXXNameMangler Mangler(*this, Out);
@@ -5167,7 +5351,8 @@ void ItaniumMangleContextImpl::mangleLambdaSig(const CXXRecordDecl *Lambda,
Mangler.mangleLambdaSig(Lambda);
}
-ItaniumMangleContext *
-ItaniumMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) {
- return new ItaniumMangleContextImpl(Context, Diags);
+ItaniumMangleContext *ItaniumMangleContext::create(ASTContext &Context,
+ DiagnosticsEngine &Diags,
+ bool IsUniqueNameMangler) {
+ return new ItaniumMangleContextImpl(Context, Diags, IsUniqueNameMangler);
}
diff --git a/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp b/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
index c30b07137edc..4bd00ece86ab 100644
--- a/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
+++ b/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
@@ -1,4 +1,6 @@
#include "clang/AST/JSONNodeDumper.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
#include "clang/Lex/Lexer.h"
#include "llvm/ADT/StringSwitch.h"
@@ -72,6 +74,7 @@ void JSONNodeDumper::Visit(const Type *T) {
JOS.attribute("kind", (llvm::Twine(T->getTypeClassName()) + "Type").str());
JOS.attribute("type", createQualType(QualType(T, 0), /*Desugar*/ false));
+ attributeOnlyIfTrue("containsErrors", T->containsErrors());
attributeOnlyIfTrue("isDependent", T->isDependentType());
attributeOnlyIfTrue("isInstantiationDependent",
T->isInstantiationDependentType());
@@ -109,7 +112,7 @@ void JSONNodeDumper::Visit(const Decl *D) {
JOS.attribute("isReferenced", true);
if (const auto *ND = dyn_cast<NamedDecl>(D))
- attributeOnlyIfTrue("isHidden", ND->isHidden());
+ attributeOnlyIfTrue("isHidden", !ND->isUnconditionallyVisible());
if (D->getLexicalDeclContext() != D->getDeclContext()) {
// Because of multiple inheritance, a DeclContext pointer does not produce
@@ -180,6 +183,13 @@ void JSONNodeDumper::Visit(const GenericSelectionExpr::ConstAssociation &A) {
attributeOnlyIfTrue("selected", A.isSelected());
}
+void JSONNodeDumper::Visit(const APValue &Value, QualType Ty) {
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+ Value.printPretty(OS, Ctx, Ty);
+ JOS.attribute("value", OS.str());
+}
+
void JSONNodeDumper::writeIncludeStack(PresumedLoc Loc, bool JustFirst) {
if (Loc.isInvalid())
return;
@@ -384,6 +394,7 @@ static llvm::json::Object
createCopyAssignmentDefinitionData(const CXXRecordDecl *RD) {
llvm::json::Object Ret;
+ FIELD2("simple", hasSimpleCopyAssignment);
FIELD2("trivial", hasTrivialCopyAssignment);
FIELD2("nonTrivial", hasNonTrivialCopyAssignment);
FIELD2("hasConstParam", hasCopyAssignmentWithConstParam);
@@ -464,13 +475,10 @@ JSONNodeDumper::createCXXRecordDefinitionData(const CXXRecordDecl *RD) {
#undef FIELD2
std::string JSONNodeDumper::createAccessSpecifier(AccessSpecifier AS) {
- switch (AS) {
- case AS_none: return "none";
- case AS_private: return "private";
- case AS_protected: return "protected";
- case AS_public: return "public";
- }
- llvm_unreachable("Unknown access specifier");
+ const auto AccessSpelling = getAccessSpelling(AS);
+ if (AccessSpelling.empty())
+ return "none";
+ return AccessSpelling.str();
}
llvm::json::Object
@@ -997,32 +1005,33 @@ void JSONNodeDumper::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
case ObjCPropertyDecl::Required: JOS.attribute("control", "required"); break;
case ObjCPropertyDecl::Optional: JOS.attribute("control", "optional"); break;
}
-
- ObjCPropertyDecl::PropertyAttributeKind Attrs = D->getPropertyAttributes();
- if (Attrs != ObjCPropertyDecl::OBJC_PR_noattr) {
- if (Attrs & ObjCPropertyDecl::OBJC_PR_getter)
+
+ ObjCPropertyAttribute::Kind Attrs = D->getPropertyAttributes();
+ if (Attrs != ObjCPropertyAttribute::kind_noattr) {
+ if (Attrs & ObjCPropertyAttribute::kind_getter)
JOS.attribute("getter", createBareDeclRef(D->getGetterMethodDecl()));
- if (Attrs & ObjCPropertyDecl::OBJC_PR_setter)
+ if (Attrs & ObjCPropertyAttribute::kind_setter)
JOS.attribute("setter", createBareDeclRef(D->getSetterMethodDecl()));
- attributeOnlyIfTrue("readonly", Attrs & ObjCPropertyDecl::OBJC_PR_readonly);
- attributeOnlyIfTrue("assign", Attrs & ObjCPropertyDecl::OBJC_PR_assign);
+ attributeOnlyIfTrue("readonly",
+ Attrs & ObjCPropertyAttribute::kind_readonly);
+ attributeOnlyIfTrue("assign", Attrs & ObjCPropertyAttribute::kind_assign);
attributeOnlyIfTrue("readwrite",
- Attrs & ObjCPropertyDecl::OBJC_PR_readwrite);
- attributeOnlyIfTrue("retain", Attrs & ObjCPropertyDecl::OBJC_PR_retain);
- attributeOnlyIfTrue("copy", Attrs & ObjCPropertyDecl::OBJC_PR_copy);
+ Attrs & ObjCPropertyAttribute::kind_readwrite);
+ attributeOnlyIfTrue("retain", Attrs & ObjCPropertyAttribute::kind_retain);
+ attributeOnlyIfTrue("copy", Attrs & ObjCPropertyAttribute::kind_copy);
attributeOnlyIfTrue("nonatomic",
- Attrs & ObjCPropertyDecl::OBJC_PR_nonatomic);
- attributeOnlyIfTrue("atomic", Attrs & ObjCPropertyDecl::OBJC_PR_atomic);
- attributeOnlyIfTrue("weak", Attrs & ObjCPropertyDecl::OBJC_PR_weak);
- attributeOnlyIfTrue("strong", Attrs & ObjCPropertyDecl::OBJC_PR_strong);
+ Attrs & ObjCPropertyAttribute::kind_nonatomic);
+ attributeOnlyIfTrue("atomic", Attrs & ObjCPropertyAttribute::kind_atomic);
+ attributeOnlyIfTrue("weak", Attrs & ObjCPropertyAttribute::kind_weak);
+ attributeOnlyIfTrue("strong", Attrs & ObjCPropertyAttribute::kind_strong);
attributeOnlyIfTrue("unsafe_unretained",
- Attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
- attributeOnlyIfTrue("class", Attrs & ObjCPropertyDecl::OBJC_PR_class);
- attributeOnlyIfTrue("direct", Attrs & ObjCPropertyDecl::OBJC_PR_direct);
+ Attrs & ObjCPropertyAttribute::kind_unsafe_unretained);
+ attributeOnlyIfTrue("class", Attrs & ObjCPropertyAttribute::kind_class);
+ attributeOnlyIfTrue("direct", Attrs & ObjCPropertyAttribute::kind_direct);
attributeOnlyIfTrue("nullability",
- Attrs & ObjCPropertyDecl::OBJC_PR_nullability);
+ Attrs & ObjCPropertyAttribute::kind_nullability);
attributeOnlyIfTrue("null_resettable",
- Attrs & ObjCPropertyDecl::OBJC_PR_null_resettable);
+ Attrs & ObjCPropertyAttribute::kind_null_resettable);
}
}
@@ -1234,14 +1243,7 @@ void JSONNodeDumper::VisitCallExpr(const CallExpr *CE) {
void JSONNodeDumper::VisitUnaryExprOrTypeTraitExpr(
const UnaryExprOrTypeTraitExpr *TTE) {
- switch (TTE->getKind()) {
- case UETT_SizeOf: JOS.attribute("name", "sizeof"); break;
- case UETT_AlignOf: JOS.attribute("name", "alignof"); break;
- case UETT_VecStep: JOS.attribute("name", "vec_step"); break;
- case UETT_PreferredAlignOf: JOS.attribute("name", "__alignof"); break;
- case UETT_OpenMPRequiredSimdAlign:
- JOS.attribute("name", "__builtin_omp_required_simd_align"); break;
- }
+ JOS.attribute("name", getTraitSpelling(TTE->getKind()));
if (TTE->isArgumentType())
JOS.attribute("argType", createQualType(TTE->getArgumentType()));
}
@@ -1277,12 +1279,8 @@ void JSONNodeDumper::VisitCXXTypeidExpr(const CXXTypeidExpr *CTE) {
}
void JSONNodeDumper::VisitConstantExpr(const ConstantExpr *CE) {
- if (CE->getResultAPValueKind() != APValue::None) {
- std::string Str;
- llvm::raw_string_ostream OS(Str);
- CE->getAPValueResult().printPretty(OS, Ctx, CE->getType());
- JOS.attribute("value", OS.str());
- }
+ if (CE->getResultAPValueKind() != APValue::None)
+ Visit(CE->getAPValueResult(), CE->getType());
}
void JSONNodeDumper::VisitInitListExpr(const InitListExpr *ILE) {
@@ -1333,7 +1331,16 @@ void JSONNodeDumper::VisitExprWithCleanups(const ExprWithCleanups *EWC) {
if (EWC->getNumObjects()) {
JOS.attributeArray("cleanups", [this, EWC] {
for (const ExprWithCleanups::CleanupObject &CO : EWC->getObjects())
- JOS.value(createBareDeclRef(CO));
+ if (auto *BD = CO.dyn_cast<BlockDecl *>()) {
+ JOS.value(createBareDeclRef(BD));
+ } else if (auto *CLE = CO.dyn_cast<CompoundLiteralExpr *>()) {
+ llvm::json::Object Obj;
+ Obj["id"] = createPointerRepresentation(CLE);
+ Obj["kind"] = CLE->getStmtClassName();
+ JOS.value(std::move(Obj));
+ } else {
+ llvm_unreachable("unexpected cleanup object type");
+ }
});
}
}
diff --git a/contrib/llvm-project/clang/lib/AST/Linkage.h b/contrib/llvm-project/clang/lib/AST/Linkage.h
index 4e913540de86..5d8acf0016f4 100644
--- a/contrib/llvm-project/clang/lib/AST/Linkage.h
+++ b/contrib/llvm-project/clang/lib/AST/Linkage.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_LIB_AST_LINKAGE_H
#define LLVM_CLANG_LIB_AST_LINKAGE_H
+#include "clang/AST/ASTFwd.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Type.h"
diff --git a/contrib/llvm-project/clang/lib/AST/Mangle.cpp b/contrib/llvm-project/clang/lib/AST/Mangle.cpp
index e106b31f59f0..a732325006c6 100644
--- a/contrib/llvm-project/clang/lib/AST/Mangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Mangle.cpp
@@ -25,6 +25,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Mangler.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -50,19 +51,32 @@ enum CCMangling {
CCM_Fast,
CCM_RegCall,
CCM_Vector,
- CCM_Std
+ CCM_Std,
+ CCM_WasmMainArgcArgv
};
static bool isExternC(const NamedDecl *ND) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
return FD->isExternC();
- return cast<VarDecl>(ND)->isExternC();
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND))
+ return VD->isExternC();
+ return false;
}
static CCMangling getCallingConvMangling(const ASTContext &Context,
const NamedDecl *ND) {
const TargetInfo &TI = Context.getTargetInfo();
const llvm::Triple &Triple = TI.getTriple();
+
+ // On wasm, the argc/argv form of "main" is renamed so that the startup code
+ // can call it with the correct function signature.
+ // On Emscripten, users may be exporting "main" and expecting to call it
+ // themselves, so we can't mangle it.
+ if (Triple.isWasm() && !Triple.isOSEmscripten())
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
+ if (FD->isMain() && FD->hasPrototype() && FD->param_size() == 2)
+ return CCM_WasmMainArgcArgv;
+
if (!Triple.isOSWindows() || !Triple.isX86())
return CCM_Other;
@@ -111,10 +125,15 @@ bool MangleContext::shouldMangleDeclName(const NamedDecl *D) {
if (D->hasAttr<AsmLabelAttr>())
return true;
+ // Declarations that don't have identifier names always need to be mangled.
+ if (isa<MSGuidDecl>(D))
+ return true;
+
return shouldMangleCXXName(D);
}
-void MangleContext::mangleName(const NamedDecl *D, raw_ostream &Out) {
+void MangleContext::mangleName(GlobalDecl GD, raw_ostream &Out) {
+ const NamedDecl *D = cast<NamedDecl>(GD.getDecl());
// Any decl can be declared with __asm("foo") on it, and this takes precedence
// over all other naming in the .o file.
if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
@@ -141,15 +160,24 @@ void MangleContext::mangleName(const NamedDecl *D, raw_ostream &Out) {
return;
}
+ if (auto *GD = dyn_cast<MSGuidDecl>(D))
+ return mangleMSGuidDecl(GD, Out);
+
const ASTContext &ASTContext = getASTContext();
CCMangling CC = getCallingConvMangling(ASTContext, D);
+
+ if (CC == CCM_WasmMainArgcArgv) {
+ Out << "__main_argc_argv";
+ return;
+ }
+
bool MCXX = shouldMangleCXXName(D);
const TargetInfo &TI = Context.getTargetInfo();
if (CC == CCM_Other || (MCXX && TI.getCXXABI() == TargetCXXABI::Microsoft)) {
if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D))
mangleObjCMethodName(OMD, Out);
else
- mangleCXXName(D, Out);
+ mangleCXXName(GD, Out);
return;
}
@@ -166,7 +194,7 @@ void MangleContext::mangleName(const NamedDecl *D, raw_ostream &Out) {
else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D))
mangleObjCMethodName(OMD, Out);
else
- mangleCXXName(D, Out);
+ mangleCXXName(GD, Out);
const FunctionDecl *FD = cast<FunctionDecl>(D);
const FunctionType *FT = FD->getType()->castAs<FunctionType>();
@@ -191,6 +219,20 @@ void MangleContext::mangleName(const NamedDecl *D, raw_ostream &Out) {
Out << ((TI.getPointerWidth(0) / 8) * ArgWords);
}
+void MangleContext::mangleMSGuidDecl(const MSGuidDecl *GD, raw_ostream &Out) {
+ // For now, follow the MSVC naming convention for GUID objects on all
+ // targets.
+ MSGuidDecl::Parts P = GD->getParts();
+ Out << llvm::format("_GUID_%08" PRIx32 "_%04" PRIx32 "_%04" PRIx32 "_",
+ P.Part1, P.Part2, P.Part3);
+ unsigned I = 0;
+ for (uint8_t C : P.Part4And5) {
+ Out << llvm::format("%02" PRIx8, C);
+ if (++I == 2)
+ Out << "_";
+ }
+}
+
void MangleContext::mangleGlobalBlock(const BlockDecl *BD,
const NamedDecl *ID,
raw_ostream &Out) {
@@ -213,7 +255,7 @@ void MangleContext::mangleCtorBlock(const CXXConstructorDecl *CD,
raw_ostream &ResStream) {
SmallString<64> Buffer;
llvm::raw_svector_ostream Out(Buffer);
- mangleCXXCtor(CD, CT, Out);
+ mangleName(GlobalDecl(CD, CT), Out);
mangleFunctionBlock(*this, Buffer, BD, ResStream);
}
@@ -222,7 +264,7 @@ void MangleContext::mangleDtorBlock(const CXXDestructorDecl *DD,
raw_ostream &ResStream) {
SmallString<64> Buffer;
llvm::raw_svector_ostream Out(Buffer);
- mangleCXXDtor(DD, DT, Out);
+ mangleName(GlobalDecl(DD, DT), Out);
mangleFunctionBlock(*this, Buffer, BD, ResStream);
}
@@ -358,7 +400,7 @@ public:
SmallString<40> Mangled;
auto Prefix = getClassSymbolPrefix(Kind, OCD->getASTContext());
llvm::Mangler::getNameWithPrefix(Mangled, Prefix + ClassName, DL);
- return Mangled.str();
+ return std::string(Mangled.str());
};
return {
@@ -420,12 +462,16 @@ public:
private:
bool writeFuncOrVarName(const NamedDecl *D, raw_ostream &OS) {
if (MC->shouldMangleDeclName(D)) {
+ GlobalDecl GD;
if (const auto *CtorD = dyn_cast<CXXConstructorDecl>(D))
- MC->mangleCXXCtor(CtorD, Ctor_Complete, OS);
+ GD = GlobalDecl(CtorD, Ctor_Complete);
else if (const auto *DtorD = dyn_cast<CXXDestructorDecl>(D))
- MC->mangleCXXDtor(DtorD, Dtor_Complete, OS);
+ GD = GlobalDecl(DtorD, Dtor_Complete);
+ else if (D->hasAttr<CUDAGlobalAttr>())
+ GD = GlobalDecl(cast<FunctionDecl>(D));
else
- MC->mangleName(D, OS);
+ GD = GlobalDecl(D);
+ MC->mangleName(GD, OS);
return false;
} else {
IdentifierInfo *II = D->getIdentifier();
@@ -445,10 +491,12 @@ private:
std::string FrontendBuf;
llvm::raw_string_ostream FOS(FrontendBuf);
+ GlobalDecl GD;
if (const auto *CD = dyn_cast_or_null<CXXConstructorDecl>(ND))
- MC->mangleCXXCtor(CD, static_cast<CXXCtorType>(StructorType), FOS);
+ GD = GlobalDecl(CD, static_cast<CXXCtorType>(StructorType));
else if (const auto *DD = dyn_cast_or_null<CXXDestructorDecl>(ND))
- MC->mangleCXXDtor(DD, static_cast<CXXDtorType>(StructorType), FOS);
+ GD = GlobalDecl(DD, static_cast<CXXDtorType>(StructorType));
+ MC->mangleName(GD, FOS);
std::string BackendBuf;
llvm::raw_string_ostream BOS(BackendBuf);
diff --git a/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp b/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
index a286c5335584..529f301e4696 100644
--- a/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
@@ -10,7 +10,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/Mangle.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
@@ -22,9 +21,12 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Mangle.h"
#include "clang/AST/VTableBuilder.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CRC.h"
@@ -135,7 +137,7 @@ public:
MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags);
bool shouldMangleCXXName(const NamedDecl *D) override;
bool shouldMangleStringLiteral(const StringLiteral *SL) override;
- void mangleCXXName(const NamedDecl *D, raw_ostream &Out) override;
+ void mangleCXXName(GlobalDecl GD, raw_ostream &Out) override;
void mangleVirtualMemPtrThunk(const CXXMethodDecl *MD,
const MethodVFTableLocation &ML,
raw_ostream &Out) override;
@@ -177,10 +179,6 @@ public:
ArrayRef<const CXXRecordDecl *> BasePath,
raw_ostream &Out) override;
void mangleTypeName(QualType T, raw_ostream &) override;
- void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
- raw_ostream &) override;
- void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
- raw_ostream &) override;
void mangleReferenceTemporary(const VarDecl *, unsigned ManglingNumber,
raw_ostream &) override;
void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &Out) override;
@@ -464,7 +462,7 @@ bool MicrosoftMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
if (VD->isExternC())
return false;
- // Variables at global scope with non-internal linkage are not mangled.
+ // Variables at global scope with internal linkage are not mangled.
const DeclContext *DC = getEffectiveDeclContext(D);
// Check for extern variable declared locally.
if (DC->isFunctionOrMethod() && D->hasLinkage())
@@ -499,6 +497,10 @@ void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) {
mangleFunctionEncoding(FD, Context.shouldMangleDeclName(FD));
else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
mangleVariableEncoding(VD);
+ else if (isa<MSGuidDecl>(D))
+ // MSVC appears to mangle GUIDs as if they were variables of type
+ // 'const struct __s_GUID'.
+ Out << "3U__s_GUID@@B";
else
llvm_unreachable("Tried to mangle unexpected NamedDecl!");
}
@@ -895,6 +897,16 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
break;
}
+ if (const MSGuidDecl *GD = dyn_cast<MSGuidDecl>(ND)) {
+ // Mangle a GUID object as if it were a variable with the corresponding
+ // mangled name.
+ SmallString<sizeof("_GUID_12345678_1234_1234_1234_1234567890ab")> GUID;
+ llvm::raw_svector_ostream GUIDOS(GUID);
+ Context.mangleMSGuidDecl(GD, GUIDOS);
+ mangleSourceName(GUID);
+ break;
+ }
+
// We must have an anonymous struct.
const TagDecl *TD = cast<TagDecl>(ND);
if (const TypedefNameDecl *D = TD->getTypedefNameForAnonDecl()) {
@@ -935,12 +947,12 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
mangleSourceName(Name);
- // If the context of a closure type is an initializer for a class
- // member (static or nonstatic), it is encoded in a qualified name.
+ // If the context is a variable or a class member and not a parameter,
+ // it is encoded in a qualified name.
if (LambdaManglingNumber && LambdaContextDecl) {
if ((isa<VarDecl>(LambdaContextDecl) ||
isa<FieldDecl>(LambdaContextDecl)) &&
- LambdaContextDecl->getDeclContext()->isRecord()) {
+ !isa<ParmVarDecl>(LambdaContextDecl)) {
mangleUnqualifiedName(cast<NamedDecl>(LambdaContextDecl));
}
}
@@ -1301,7 +1313,7 @@ void MicrosoftCXXNameMangler::mangleSourceName(StringRef Name) {
BackRefVec::iterator Found = llvm::find(NameBackReferences, Name);
if (Found == NameBackReferences.end()) {
if (NameBackReferences.size() < 10)
- NameBackReferences.push_back(Name);
+ NameBackReferences.push_back(std::string(Name));
Out << Name << '@';
} else {
Out << (Found - NameBackReferences.begin());
@@ -1366,45 +1378,6 @@ void MicrosoftCXXNameMangler::mangleExpression(const Expr *E) {
return;
}
- // Look through no-op casts like template parameter substitutions.
- E = E->IgnoreParenNoopCasts(Context.getASTContext());
-
- const CXXUuidofExpr *UE = nullptr;
- if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
- if (UO->getOpcode() == UO_AddrOf)
- UE = dyn_cast<CXXUuidofExpr>(UO->getSubExpr());
- } else
- UE = dyn_cast<CXXUuidofExpr>(E);
-
- if (UE) {
- // If we had to peek through an address-of operator, treat this like we are
- // dealing with a pointer type. Otherwise, treat it like a const reference.
- //
- // N.B. This matches up with the handling of TemplateArgument::Declaration
- // in mangleTemplateArg
- if (UE == E)
- Out << "$E?";
- else
- Out << "$1?";
-
- // This CXXUuidofExpr is mangled as-if it were actually a VarDecl from
- // const __s_GUID _GUID_{lower case UUID with underscores}
- StringRef Uuid = UE->getUuidStr();
- std::string Name = "_GUID_" + Uuid.lower();
- std::replace(Name.begin(), Name.end(), '-', '_');
-
- mangleSourceName(Name);
- // Terminate the whole name with an '@'.
- Out << '@';
- // It's a global variable.
- Out << '3';
- // It's a struct called __s_GUID.
- mangleArtificialTagType(TTK_Struct, "__s_GUID");
- // It's const.
- Out << 'B';
- return;
- }
-
// As bad as this diagnostic is, it's better than crashing.
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
@@ -2141,6 +2114,7 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
case BuiltinType::SatUShortFract:
case BuiltinType::SatUFract:
case BuiltinType::SatULongFract:
+ case BuiltinType::BFloat16:
case BuiltinType::Float128: {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
@@ -2757,6 +2731,23 @@ void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T,
<< Range;
}
+void MicrosoftCXXNameMangler::mangleType(const ConstantMatrixType *T,
+ Qualifiers quals, SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "Cannot mangle this matrix type yet");
+ Diags.Report(Range.getBegin(), DiagID) << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedMatrixType *T,
+ Qualifiers quals, SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "Cannot mangle this dependent-sized matrix type yet");
+ Diags.Report(Range.getBegin(), DiagID) << Range;
+}
+
void MicrosoftCXXNameMangler::mangleType(const DependentAddressSpaceType *T,
Qualifiers, SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
@@ -2942,29 +2933,68 @@ void MicrosoftCXXNameMangler::mangleType(const AtomicType *T, Qualifiers,
void MicrosoftCXXNameMangler::mangleType(const PipeType *T, Qualifiers,
SourceRange Range) {
- DiagnosticsEngine &Diags = Context.getDiags();
- unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
- "cannot mangle this OpenCL pipe type yet");
- Diags.Report(Range.getBegin(), DiagID)
- << Range;
+ QualType ElementType = T->getElementType();
+
+ llvm::SmallString<64> TemplateMangling;
+ llvm::raw_svector_ostream Stream(TemplateMangling);
+ MicrosoftCXXNameMangler Extra(Context, Stream);
+ Stream << "?$";
+ Extra.mangleSourceName("ocl_pipe");
+ Extra.mangleType(ElementType, Range, QMM_Escape);
+ Extra.mangleIntegerLiteral(llvm::APSInt::get(T->isReadOnly()), true);
+
+ mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"});
}
-void MicrosoftMangleContextImpl::mangleCXXName(const NamedDecl *D,
+void MicrosoftMangleContextImpl::mangleCXXName(GlobalDecl GD,
raw_ostream &Out) {
- assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
- "Invalid mangleName() call, argument is not a variable or function!");
- assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
- "Invalid mangleName() call on 'structor decl!");
-
+ const NamedDecl *D = cast<NamedDecl>(GD.getDecl());
PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
getASTContext().getSourceManager(),
"Mangling declaration");
msvc_hashing_ostream MHO(Out);
+
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(D)) {
+ auto Type = GD.getCtorType();
+ MicrosoftCXXNameMangler mangler(*this, MHO, CD, Type);
+ return mangler.mangle(D);
+ }
+
+ if (auto *DD = dyn_cast<CXXDestructorDecl>(D)) {
+ auto Type = GD.getDtorType();
+ MicrosoftCXXNameMangler mangler(*this, MHO, DD, Type);
+ return mangler.mangle(D);
+ }
+
MicrosoftCXXNameMangler Mangler(*this, MHO);
return Mangler.mangle(D);
}
+void MicrosoftCXXNameMangler::mangleType(const ExtIntType *T, Qualifiers,
+ SourceRange Range) {
+ llvm::SmallString<64> TemplateMangling;
+ llvm::raw_svector_ostream Stream(TemplateMangling);
+ MicrosoftCXXNameMangler Extra(Context, Stream);
+ Stream << "?$";
+ if (T->isUnsigned())
+ Extra.mangleSourceName("_UExtInt");
+ else
+ Extra.mangleSourceName("_ExtInt");
+ Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(T->getNumBits()),
+ /*IsBoolean=*/false);
+
+ mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"});
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DependentExtIntType *T,
+ Qualifiers, SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error, "cannot mangle this DependentExtInt type yet");
+ Diags.Report(Range.getBegin(), DiagID) << Range;
+}
+
// <this-adjustment> ::= <no-adjustment> | <static-adjustment> |
// <virtual-adjustment>
// <no-adjustment> ::= A # private near
@@ -3218,7 +3248,7 @@ void MicrosoftMangleContextImpl::mangleCXXCatchableType(
if (!OmitCopyCtor && CD) {
llvm::raw_svector_ostream Stream(CopyCtorMangling);
msvc_hashing_ostream MHO(Stream);
- mangleCXXCtor(CD, CT, MHO);
+ mangleCXXName(GlobalDecl(CD, CT), MHO);
}
Mangler.getStream() << CopyCtorMangling;
@@ -3321,22 +3351,6 @@ void MicrosoftMangleContextImpl::mangleTypeName(QualType T, raw_ostream &Out) {
Mangler.mangleType(T, SourceRange());
}
-void MicrosoftMangleContextImpl::mangleCXXCtor(const CXXConstructorDecl *D,
- CXXCtorType Type,
- raw_ostream &Out) {
- msvc_hashing_ostream MHO(Out);
- MicrosoftCXXNameMangler mangler(*this, MHO, D, Type);
- mangler.mangle(D);
-}
-
-void MicrosoftMangleContextImpl::mangleCXXDtor(const CXXDestructorDecl *D,
- CXXDtorType Type,
- raw_ostream &Out) {
- msvc_hashing_ostream MHO(Out);
- MicrosoftCXXNameMangler mangler(*this, MHO, D, Type);
- mangler.mangle(D);
-}
-
void MicrosoftMangleContextImpl::mangleReferenceTemporary(
const VarDecl *VD, unsigned ManglingNumber, raw_ostream &Out) {
msvc_hashing_ostream MHO(Out);
diff --git a/contrib/llvm-project/clang/lib/AST/NSAPI.cpp b/contrib/llvm-project/clang/lib/AST/NSAPI.cpp
index ae6ff04f5126..ace7f1ceebe7 100644
--- a/contrib/llvm-project/clang/lib/AST/NSAPI.cpp
+++ b/contrib/llvm-project/clang/lib/AST/NSAPI.cpp
@@ -482,7 +482,11 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
case BuiltinType::Half:
case BuiltinType::PseudoObject:
case BuiltinType::BuiltinFn:
+ case BuiltinType::IncompleteMatrixIdx:
case BuiltinType::OMPArraySection:
+ case BuiltinType::OMPArrayShaping:
+ case BuiltinType::OMPIterator:
+ case BuiltinType::BFloat16:
break;
}
diff --git a/contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp b/contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp
index 137953fa8203..08e8819a4d69 100644
--- a/contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp
+++ b/contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
@@ -197,75 +198,53 @@ CXXRecordDecl *NestedNameSpecifier::getAsRecordDecl() const {
llvm_unreachable("Invalid NNS Kind!");
}
-/// Whether this nested name specifier refers to a dependent
-/// type or not.
-bool NestedNameSpecifier::isDependent() const {
+NestedNameSpecifierDependence NestedNameSpecifier::getDependence() const {
switch (getKind()) {
- case Identifier:
+ case Identifier: {
// Identifier specifiers always represent dependent types
- return true;
+ auto F = NestedNameSpecifierDependence::Dependent |
+ NestedNameSpecifierDependence::Instantiation;
+ // Prefix can contain unexpanded template parameters.
+ if (getPrefix())
+ return F | getPrefix()->getDependence();
+ return F;
+ }
case Namespace:
case NamespaceAlias:
case Global:
- return false;
+ return NestedNameSpecifierDependence::None;
case Super: {
CXXRecordDecl *RD = static_cast<CXXRecordDecl *>(Specifier);
for (const auto &Base : RD->bases())
if (Base.getType()->isDependentType())
- return true;
-
- return false;
+ // FIXME: must also be instantiation-dependent.
+ return NestedNameSpecifierDependence::Dependent;
+ return NestedNameSpecifierDependence::None;
}
case TypeSpec:
case TypeSpecWithTemplate:
- return getAsType()->isDependentType();
+ return toNestedNameSpecifierDependendence(getAsType()->getDependence());
}
-
llvm_unreachable("Invalid NNS Kind!");
}
-/// Whether this nested name specifier refers to a dependent
-/// type or not.
-bool NestedNameSpecifier::isInstantiationDependent() const {
- switch (getKind()) {
- case Identifier:
- // Identifier specifiers always represent dependent types
- return true;
-
- case Namespace:
- case NamespaceAlias:
- case Global:
- case Super:
- return false;
-
- case TypeSpec:
- case TypeSpecWithTemplate:
- return getAsType()->isInstantiationDependentType();
- }
+bool NestedNameSpecifier::isDependent() const {
+ return getDependence() & NestedNameSpecifierDependence::Dependent;
+}
- llvm_unreachable("Invalid NNS Kind!");
+bool NestedNameSpecifier::isInstantiationDependent() const {
+ return getDependence() & NestedNameSpecifierDependence::Instantiation;
}
bool NestedNameSpecifier::containsUnexpandedParameterPack() const {
- switch (getKind()) {
- case Identifier:
- return getPrefix() && getPrefix()->containsUnexpandedParameterPack();
-
- case Namespace:
- case NamespaceAlias:
- case Global:
- case Super:
- return false;
-
- case TypeSpec:
- case TypeSpecWithTemplate:
- return getAsType()->containsUnexpandedParameterPack();
- }
+ return getDependence() & NestedNameSpecifierDependence::UnexpandedPack;
+}
- llvm_unreachable("Invalid NNS Kind!");
+bool NestedNameSpecifier::containsErrors() const {
+ return getDependence() & NestedNameSpecifierDependence::Error;
}
/// Print this nested name specifier to the given output
@@ -336,6 +315,14 @@ void NestedNameSpecifier::print(raw_ostream &OS, const PrintingPolicy &Policy,
// Print the template argument list.
printTemplateArgumentList(OS, SpecType->template_arguments(),
InnerPolicy);
+ } else if (const auto *DepSpecType =
+ dyn_cast<DependentTemplateSpecializationType>(T)) {
+ // Print the template name without its corresponding
+ // nested-name-specifier.
+ OS << DepSpecType->getIdentifier()->getName();
+ // Print the template argument list.
+ printTemplateArgumentList(OS, DepSpecType->template_arguments(),
+ InnerPolicy);
} else {
// Print the type normally
QualType(T, 0).print(OS, InnerPolicy);
@@ -481,12 +468,14 @@ static void Append(char *Start, char *End, char *&Buffer, unsigned &BufferSize,
unsigned NewCapacity = std::max(
(unsigned)(BufferCapacity ? BufferCapacity * 2 : sizeof(void *) * 2),
(unsigned)(BufferSize + (End - Start)));
- char *NewBuffer = static_cast<char *>(llvm::safe_malloc(NewCapacity));
- if (BufferCapacity) {
- memcpy(NewBuffer, Buffer, BufferSize);
- free(Buffer);
+ if (!BufferCapacity) {
+ char *NewBuffer = static_cast<char *>(llvm::safe_malloc(NewCapacity));
+ if (Buffer)
+ memcpy(NewBuffer, Buffer, BufferSize);
+ Buffer = NewBuffer;
+ } else {
+ Buffer = static_cast<char *>(llvm::safe_realloc(Buffer, NewCapacity));
}
- Buffer = NewBuffer;
BufferCapacity = NewCapacity;
}
assert(Buffer && Start && End && End > Start && "Illegal memory buffer copy");
diff --git a/contrib/llvm-project/clang/lib/AST/ODRHash.cpp b/contrib/llvm-project/clang/lib/AST/ODRHash.cpp
index 1f9ff9e407dc..735bcff8f113 100644
--- a/contrib/llvm-project/clang/lib/AST/ODRHash.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ODRHash.cpp
@@ -440,7 +440,7 @@ public:
// Only allow a small portion of Decl's to be processed. Remove this once
// all Decl's can be handled.
-bool ODRHash::isWhitelistedDecl(const Decl *D, const DeclContext *Parent) {
+bool ODRHash::isDeclToBeProcessed(const Decl *D, const DeclContext *Parent) {
if (D->isImplicit()) return false;
if (D->getDeclContext() != Parent) return false;
@@ -487,7 +487,7 @@ void ODRHash::AddCXXRecordDecl(const CXXRecordDecl *Record) {
// accurate count of Decl's.
llvm::SmallVector<const Decl *, 16> Decls;
for (Decl *SubDecl : Record->decls()) {
- if (isWhitelistedDecl(SubDecl, Record)) {
+ if (isDeclToBeProcessed(SubDecl, Record)) {
Decls.push_back(SubDecl);
if (auto *Function = dyn_cast<FunctionDecl>(SubDecl)) {
// Compute/Preload ODRHash into FunctionDecl.
@@ -588,7 +588,7 @@ void ODRHash::AddFunctionDecl(const FunctionDecl *Function,
// accurate count of Decl's.
llvm::SmallVector<const Decl *, 16> Decls;
for (Decl *SubDecl : Function->decls()) {
- if (isWhitelistedDecl(SubDecl, Function)) {
+ if (isDeclToBeProcessed(SubDecl, Function)) {
Decls.push_back(SubDecl);
}
}
@@ -614,7 +614,7 @@ void ODRHash::AddEnumDecl(const EnumDecl *Enum) {
// accurate count of Decl's.
llvm::SmallVector<const Decl *, 16> Decls;
for (Decl *SubDecl : Enum->decls()) {
- if (isWhitelistedDecl(SubDecl, Enum)) {
+ if (isDeclToBeProcessed(SubDecl, Enum)) {
assert(isa<EnumConstantDecl>(SubDecl) && "Unexpected Decl");
Decls.push_back(SubDecl);
}
diff --git a/contrib/llvm-project/clang/lib/AST/OSLog.cpp b/contrib/llvm-project/clang/lib/AST/OSLog.cpp
index df2f808728cf..094c0102854b 100644
--- a/contrib/llvm-project/clang/lib/AST/OSLog.cpp
+++ b/contrib/llvm-project/clang/lib/AST/OSLog.cpp
@@ -55,9 +55,9 @@ public:
ArgsData.reserve(Args.size());
}
- virtual bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
- const char *StartSpecifier,
- unsigned SpecifierLen) {
+ bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
+ const char *StartSpecifier,
+ unsigned SpecifierLen) override {
if (!FS.consumesDataArgument() &&
FS.getConversionSpecifier().getKind() !=
clang::analyze_format_string::ConversionSpecifier::PrintErrno)
diff --git a/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp b/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
index 5ef82648c89d..a0b0dca55390 100644
--- a/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
+++ b/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
@@ -12,6 +12,7 @@
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/Basic/LLVM.h"
@@ -23,31 +24,35 @@
#include <cassert>
using namespace clang;
+using namespace llvm;
+using namespace omp;
OMPClause::child_range OMPClause::children() {
switch (getClauseKind()) {
default:
break;
-#define OPENMP_CLAUSE(Name, Class) \
- case OMPC_##Name: \
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
+ case Enum: \
return static_cast<Class *>(this)->children();
-#include "clang/Basic/OpenMPKinds.def"
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
}
llvm_unreachable("unknown OMPClause");
}
OMPClause::child_range OMPClause::used_children() {
switch (getClauseKind()) {
-#define OPENMP_CLAUSE(Name, Class) \
- case OMPC_##Name: \
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
+ case Enum: \
return static_cast<Class *>(this)->used_children();
-#include "clang/Basic/OpenMPKinds.def"
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
case OMPC_threadprivate:
case OMPC_uniform:
case OMPC_device_type:
case OMPC_match:
case OMPC_unknown:
break;
+ default:
+ break;
}
llvm_unreachable("unknown OMPClause");
}
@@ -111,11 +116,16 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_mergeable:
case OMPC_threadprivate:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_read:
case OMPC_write:
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_depend:
case OMPC_threads:
case OMPC_simd:
@@ -128,6 +138,7 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -137,6 +148,15 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_order:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ break;
+ default:
break;
}
@@ -184,11 +204,16 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
case OMPC_mergeable:
case OMPC_threadprivate:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_read:
case OMPC_write:
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_depend:
case OMPC_device:
case OMPC_threads:
@@ -207,6 +232,7 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -216,6 +242,15 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_order:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ break;
+ default:
break;
}
@@ -316,6 +351,39 @@ const Expr *OMPOrderedClause::getLoopCounter(unsigned NumLoop) const {
return getTrailingObjects<Expr *>()[NumberOfLoops + NumLoop];
}
+OMPUpdateClause *OMPUpdateClause::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (C) OMPUpdateClause(StartLoc, EndLoc, /*IsExtended=*/false);
+}
+
+OMPUpdateClause *
+OMPUpdateClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation ArgumentLoc,
+ OpenMPDependClauseKind DK, SourceLocation EndLoc) {
+ void *Mem =
+ C.Allocate(totalSizeToAlloc<SourceLocation, OpenMPDependClauseKind>(2, 1),
+ alignof(OMPUpdateClause));
+ auto *Clause =
+ new (Mem) OMPUpdateClause(StartLoc, EndLoc, /*IsExtended=*/true);
+ Clause->setLParenLoc(LParenLoc);
+ Clause->setArgumentLoc(ArgumentLoc);
+ Clause->setDependencyKind(DK);
+ return Clause;
+}
+
+OMPUpdateClause *OMPUpdateClause::CreateEmpty(const ASTContext &C,
+ bool IsExtended) {
+ if (!IsExtended)
+ return new (C) OMPUpdateClause(/*IsExtended=*/false);
+ void *Mem =
+ C.Allocate(totalSizeToAlloc<SourceLocation, OpenMPDependClauseKind>(2, 1),
+ alignof(OMPUpdateClause));
+ auto *Clause = new (Mem) OMPUpdateClause(/*IsExtended=*/true);
+ Clause->IsExtended = true;
+ return Clause;
+}
+
void OMPPrivateClause::setPrivateCopies(ArrayRef<Expr *> VL) {
assert(VL.size() == varlist_size() &&
"Number of private copies is not the same as the preallocated buffer");
@@ -647,16 +715,46 @@ void OMPReductionClause::setReductionOps(ArrayRef<Expr *> ReductionOps) {
std::copy(ReductionOps.begin(), ReductionOps.end(), getRHSExprs().end());
}
+void OMPReductionClause::setInscanCopyOps(ArrayRef<Expr *> Ops) {
+ assert(Modifier == OMPC_REDUCTION_inscan && "Expected inscan reduction.");
+ assert(Ops.size() == varlist_size() && "Number of copy "
+ "expressions is not the same "
+ "as the preallocated buffer");
+ llvm::copy(Ops, getReductionOps().end());
+}
+
+void OMPReductionClause::setInscanCopyArrayTemps(
+ ArrayRef<Expr *> CopyArrayTemps) {
+ assert(Modifier == OMPC_REDUCTION_inscan && "Expected inscan reduction.");
+ assert(CopyArrayTemps.size() == varlist_size() &&
+ "Number of copy temp expressions is not the same as the preallocated "
+ "buffer");
+ llvm::copy(CopyArrayTemps, getInscanCopyOps().end());
+}
+
+void OMPReductionClause::setInscanCopyArrayElems(
+ ArrayRef<Expr *> CopyArrayElems) {
+ assert(Modifier == OMPC_REDUCTION_inscan && "Expected inscan reduction.");
+ assert(CopyArrayElems.size() == varlist_size() &&
+ "Number of copy temp expressions is not the same as the preallocated "
+ "buffer");
+ llvm::copy(CopyArrayElems, getInscanCopyArrayTemps().end());
+}
+
OMPReductionClause *OMPReductionClause::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation EndLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL,
+ SourceLocation ModifierLoc, SourceLocation EndLoc, SourceLocation ColonLoc,
+ OpenMPReductionClauseModifier Modifier, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo,
ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs,
- ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit,
- Expr *PostUpdate) {
- void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * VL.size()));
- OMPReductionClause *Clause = new (Mem) OMPReductionClause(
- StartLoc, LParenLoc, EndLoc, ColonLoc, VL.size(), QualifierLoc, NameInfo);
+ ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps,
+ ArrayRef<Expr *> CopyOps, ArrayRef<Expr *> CopyArrayTemps,
+ ArrayRef<Expr *> CopyArrayElems, Stmt *PreInit, Expr *PostUpdate) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(
+ (Modifier == OMPC_REDUCTION_inscan ? 8 : 5) * VL.size()));
+ auto *Clause = new (Mem)
+ OMPReductionClause(StartLoc, LParenLoc, ModifierLoc, EndLoc, ColonLoc,
+ Modifier, VL.size(), QualifierLoc, NameInfo);
Clause->setVarRefs(VL);
Clause->setPrivates(Privates);
Clause->setLHSExprs(LHSExprs);
@@ -664,13 +762,29 @@ OMPReductionClause *OMPReductionClause::Create(
Clause->setReductionOps(ReductionOps);
Clause->setPreInitStmt(PreInit);
Clause->setPostUpdateExpr(PostUpdate);
+ if (Modifier == OMPC_REDUCTION_inscan) {
+ Clause->setInscanCopyOps(CopyOps);
+ Clause->setInscanCopyArrayTemps(CopyArrayTemps);
+ Clause->setInscanCopyArrayElems(CopyArrayElems);
+ } else {
+ assert(CopyOps.empty() &&
+ "copy operations are expected in inscan reductions only.");
+ assert(CopyArrayTemps.empty() &&
+ "copy array temps are expected in inscan reductions only.");
+ assert(CopyArrayElems.empty() &&
+ "copy array temps are expected in inscan reductions only.");
+ }
return Clause;
}
-OMPReductionClause *OMPReductionClause::CreateEmpty(const ASTContext &C,
- unsigned N) {
- void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * N));
- return new (Mem) OMPReductionClause(N);
+OMPReductionClause *
+OMPReductionClause::CreateEmpty(const ASTContext &C, unsigned N,
+ OpenMPReductionClauseModifier Modifier) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(
+ (Modifier == OMPC_REDUCTION_inscan ? 8 : 5) * N));
+ auto *Clause = new (Mem) OMPReductionClause(N);
+ Clause->setModifier(Modifier);
+ return Clause;
}
void OMPTaskReductionClause::setPrivates(ArrayRef<Expr *> Privates) {
@@ -825,19 +939,36 @@ OMPFlushClause *OMPFlushClause::CreateEmpty(const ASTContext &C, unsigned N) {
return new (Mem) OMPFlushClause(N);
}
+OMPDepobjClause *OMPDepobjClause::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc,
+ Expr *Depobj) {
+ auto *Clause = new (C) OMPDepobjClause(StartLoc, LParenLoc, RParenLoc);
+ Clause->setDepobj(Depobj);
+ return Clause;
+}
+
+OMPDepobjClause *OMPDepobjClause::CreateEmpty(const ASTContext &C) {
+ return new (C) OMPDepobjClause();
+}
+
OMPDependClause *
OMPDependClause::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
- OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
- SourceLocation ColonLoc, ArrayRef<Expr *> VL,
- unsigned NumLoops) {
- void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size() + NumLoops));
+ Expr *DepModifier, OpenMPDependClauseKind DepKind,
+ SourceLocation DepLoc, SourceLocation ColonLoc,
+ ArrayRef<Expr *> VL, unsigned NumLoops) {
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<Expr *>(VL.size() + /*depend-modifier*/ 1 + NumLoops),
+ alignof(OMPDependClause));
OMPDependClause *Clause = new (Mem)
OMPDependClause(StartLoc, LParenLoc, EndLoc, VL.size(), NumLoops);
Clause->setVarRefs(VL);
Clause->setDependencyKind(DepKind);
Clause->setDependencyLoc(DepLoc);
Clause->setColonLoc(ColonLoc);
+ Clause->setModifier(DepModifier);
for (unsigned I = 0 ; I < NumLoops; ++I)
Clause->setLoopData(I, nullptr);
return Clause;
@@ -845,7 +976,9 @@ OMPDependClause::Create(const ASTContext &C, SourceLocation StartLoc,
OMPDependClause *OMPDependClause::CreateEmpty(const ASTContext &C, unsigned N,
unsigned NumLoops) {
- void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N + NumLoops));
+ void *Mem =
+ C.Allocate(totalSizeToAlloc<Expr *>(N + /*depend-modifier*/ 1 + NumLoops),
+ alignof(OMPDependClause));
return new (Mem) OMPDependClause(N, NumLoops);
}
@@ -855,7 +988,7 @@ void OMPDependClause::setLoopData(unsigned NumLoop, Expr *Cnt) {
NumLoop < NumLoops &&
"Expected sink or source depend + loop index must be less number of "
"loops.");
- auto It = std::next(getVarRefs().end(), NumLoop);
+ auto *It = std::next(getVarRefs().end(), NumLoop + 1);
*It = Cnt;
}
@@ -865,7 +998,7 @@ Expr *OMPDependClause::getLoopData(unsigned NumLoop) {
NumLoop < NumLoops &&
"Expected sink or source depend + loop index must be less number of "
"loops.");
- auto It = std::next(getVarRefs().end(), NumLoop);
+ auto *It = std::next(getVarRefs().end(), NumLoop + 1);
return *It;
}
@@ -875,10 +1008,15 @@ const Expr *OMPDependClause::getLoopData(unsigned NumLoop) const {
NumLoop < NumLoops &&
"Expected sink or source depend + loop index must be less number of "
"loops.");
- auto It = std::next(getVarRefs().end(), NumLoop);
+ const auto *It = std::next(getVarRefs().end(), NumLoop + 1);
return *It;
}
+void OMPDependClause::setModifier(Expr *DepModifier) {
+ *getVarRefs().end() = DepModifier;
+}
+Expr *OMPDependClause::getModifier() { return *getVarRefs().end(); }
+
unsigned OMPClauseMappableExprCommon::getComponentsTotalNumber(
MappableExprComponentListsRef ComponentLists) {
unsigned TotalNum = 0u;
@@ -1075,8 +1213,8 @@ OMPUseDevicePtrClause *OMPUseDevicePtrClause::Create(
Sizes.NumComponents = getComponentsTotalNumber(ComponentLists);
// We need to allocate:
- // 3 x NumVars x Expr* - we have an original list expression for each clause
- // list entry and an equal number of private copies and inits.
+ // NumVars x Expr* - we have an original list expression for each clause
+ // list entry.
// NumUniqueDeclarations x ValueDecl* - unique base declarations associated
// with each component list.
// (NumUniqueDeclarations + NumComponentLists) x unsigned - we specify the
@@ -1112,6 +1250,53 @@ OMPUseDevicePtrClause::CreateEmpty(const ASTContext &C,
return new (Mem) OMPUseDevicePtrClause(Sizes);
}
+OMPUseDeviceAddrClause *
+OMPUseDeviceAddrClause::Create(const ASTContext &C, const OMPVarListLocTy &Locs,
+ ArrayRef<Expr *> Vars,
+ ArrayRef<ValueDecl *> Declarations,
+ MappableExprComponentListsRef ComponentLists) {
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Vars.size();
+ Sizes.NumUniqueDeclarations = getUniqueDeclarationsTotalNumber(Declarations);
+ Sizes.NumComponentLists = ComponentLists.size();
+ Sizes.NumComponents = getComponentsTotalNumber(ComponentLists);
+
+ // We need to allocate:
+ // 3 x NumVars x Expr* - we have an original list expression for each clause
+ // list entry and an equal number of private copies and inits.
+ // NumUniqueDeclarations x ValueDecl* - unique base declarations associated
+ // with each component list.
+ // (NumUniqueDeclarations + NumComponentLists) x unsigned - we specify the
+ // number of lists for each unique declaration and the size of each component
+ // list.
+ // NumComponents x MappableComponent - the total of all the components in all
+ // the lists.
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
+ OMPClauseMappableExprCommon::MappableComponent>(
+ Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
+
+ auto *Clause = new (Mem) OMPUseDeviceAddrClause(Locs, Sizes);
+
+ Clause->setVarRefs(Vars);
+ Clause->setClauseInfo(Declarations, ComponentLists);
+ return Clause;
+}
+
+OMPUseDeviceAddrClause *
+OMPUseDeviceAddrClause::CreateEmpty(const ASTContext &C,
+ const OMPMappableExprListSizeTy &Sizes) {
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
+ OMPClauseMappableExprCommon::MappableComponent>(
+ Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
+ return new (Mem) OMPUseDeviceAddrClause(Sizes);
+}
+
OMPIsDevicePtrClause *
OMPIsDevicePtrClause::Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars,
@@ -1184,13 +1369,132 @@ void OMPNontemporalClause::setPrivateRefs(ArrayRef<Expr *> VL) {
std::copy(VL.begin(), VL.end(), varlist_end());
}
+OMPInclusiveClause *OMPInclusiveClause::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc,
+ ArrayRef<Expr *> VL) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
+ auto *Clause =
+ new (Mem) OMPInclusiveClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ return Clause;
+}
+
+OMPInclusiveClause *OMPInclusiveClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
+ return new (Mem) OMPInclusiveClause(N);
+}
+
+OMPExclusiveClause *OMPExclusiveClause::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc,
+ ArrayRef<Expr *> VL) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
+ auto *Clause =
+ new (Mem) OMPExclusiveClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ return Clause;
+}
+
+OMPExclusiveClause *OMPExclusiveClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
+ return new (Mem) OMPExclusiveClause(N);
+}
+
+void OMPUsesAllocatorsClause::setAllocatorsData(
+ ArrayRef<OMPUsesAllocatorsClause::Data> Data) {
+ assert(Data.size() == NumOfAllocators &&
+ "Size of allocators data is not the same as the preallocated buffer.");
+ for (unsigned I = 0, E = Data.size(); I < E; ++I) {
+ const OMPUsesAllocatorsClause::Data &D = Data[I];
+ getTrailingObjects<Expr *>()[I * static_cast<int>(ExprOffsets::Total) +
+ static_cast<int>(ExprOffsets::Allocator)] =
+ D.Allocator;
+ getTrailingObjects<Expr *>()[I * static_cast<int>(ExprOffsets::Total) +
+ static_cast<int>(
+ ExprOffsets::AllocatorTraits)] =
+ D.AllocatorTraits;
+ getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(ParenLocsOffsets::Total) +
+ static_cast<int>(ParenLocsOffsets::LParen)] =
+ D.LParenLoc;
+ getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(ParenLocsOffsets::Total) +
+ static_cast<int>(ParenLocsOffsets::RParen)] =
+ D.RParenLoc;
+ }
+}
+
+OMPUsesAllocatorsClause::Data
+OMPUsesAllocatorsClause::getAllocatorData(unsigned I) const {
+ OMPUsesAllocatorsClause::Data Data;
+ Data.Allocator =
+ getTrailingObjects<Expr *>()[I * static_cast<int>(ExprOffsets::Total) +
+ static_cast<int>(ExprOffsets::Allocator)];
+ Data.AllocatorTraits =
+ getTrailingObjects<Expr *>()[I * static_cast<int>(ExprOffsets::Total) +
+ static_cast<int>(
+ ExprOffsets::AllocatorTraits)];
+ Data.LParenLoc = getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(ParenLocsOffsets::Total) +
+ static_cast<int>(ParenLocsOffsets::LParen)];
+ Data.RParenLoc = getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(ParenLocsOffsets::Total) +
+ static_cast<int>(ParenLocsOffsets::RParen)];
+ return Data;
+}
+
+OMPUsesAllocatorsClause *
+OMPUsesAllocatorsClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc,
+ ArrayRef<OMPUsesAllocatorsClause::Data> Data) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *, SourceLocation>(
+ static_cast<int>(ExprOffsets::Total) * Data.size(),
+ static_cast<int>(ParenLocsOffsets::Total) * Data.size()));
+ auto *Clause = new (Mem)
+ OMPUsesAllocatorsClause(StartLoc, LParenLoc, EndLoc, Data.size());
+ Clause->setAllocatorsData(Data);
+ return Clause;
+}
+
+OMPUsesAllocatorsClause *
+OMPUsesAllocatorsClause::CreateEmpty(const ASTContext &C, unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *, SourceLocation>(
+ static_cast<int>(ExprOffsets::Total) * N,
+ static_cast<int>(ParenLocsOffsets::Total) * N));
+ return new (Mem) OMPUsesAllocatorsClause(N);
+}
+
+OMPAffinityClause *
+OMPAffinityClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation ColonLoc,
+ SourceLocation EndLoc, Expr *Modifier,
+ ArrayRef<Expr *> Locators) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(Locators.size() + 1));
+ auto *Clause = new (Mem)
+ OMPAffinityClause(StartLoc, LParenLoc, ColonLoc, EndLoc, Locators.size());
+ Clause->setModifier(Modifier);
+ Clause->setVarRefs(Locators);
+ return Clause;
+}
+
+OMPAffinityClause *OMPAffinityClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N + 1));
+ return new (Mem) OMPAffinityClause(N);
+}
+
//===----------------------------------------------------------------------===//
// OpenMP clauses printing methods
//===----------------------------------------------------------------------===//
void OMPClausePrinter::VisitOMPIfClause(OMPIfClause *Node) {
OS << "if(";
- if (Node->getNameModifier() != llvm::omp::OMPD_unknown)
+ if (Node->getNameModifier() != OMPD_unknown)
OS << getOpenMPDirectiveName(Node->getNameModifier()) << ": ";
Node->getCondition()->printPretty(OS, nullptr, Policy, 0);
OS << ")";
@@ -1232,9 +1536,16 @@ void OMPClausePrinter::VisitOMPCollapseClause(OMPCollapseClause *Node) {
OS << ")";
}
+void OMPClausePrinter::VisitOMPDetachClause(OMPDetachClause *Node) {
+ OS << "detach(";
+ Node->getEventHandler()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
void OMPClausePrinter::VisitOMPDefaultClause(OMPDefaultClause *Node) {
OS << "default("
- << getOpenMPSimpleClauseTypeName(OMPC_default, Node->getDefaultKind())
+ << getOpenMPSimpleClauseTypeName(OMPC_default,
+ unsigned(Node->getDefaultKind()))
<< ")";
}
@@ -1320,8 +1631,14 @@ void OMPClausePrinter::VisitOMPReadClause(OMPReadClause *) { OS << "read"; }
void OMPClausePrinter::VisitOMPWriteClause(OMPWriteClause *) { OS << "write"; }
-void OMPClausePrinter::VisitOMPUpdateClause(OMPUpdateClause *) {
+void OMPClausePrinter::VisitOMPUpdateClause(OMPUpdateClause *Node) {
OS << "update";
+ if (Node->isExtended()) {
+ OS << "(";
+ OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(),
+ Node->getDependencyKind());
+ OS << ")";
+ }
}
void OMPClausePrinter::VisitOMPCaptureClause(OMPCaptureClause *) {
@@ -1332,6 +1649,22 @@ void OMPClausePrinter::VisitOMPSeqCstClause(OMPSeqCstClause *) {
OS << "seq_cst";
}
+void OMPClausePrinter::VisitOMPAcqRelClause(OMPAcqRelClause *) {
+ OS << "acq_rel";
+}
+
+void OMPClausePrinter::VisitOMPAcquireClause(OMPAcquireClause *) {
+ OS << "acquire";
+}
+
+void OMPClausePrinter::VisitOMPReleaseClause(OMPReleaseClause *) {
+ OS << "release";
+}
+
+void OMPClausePrinter::VisitOMPRelaxedClause(OMPRelaxedClause *) {
+ OS << "relaxed";
+}
+
void OMPClausePrinter::VisitOMPThreadsClause(OMPThreadsClause *) {
OS << "threads";
}
@@ -1340,6 +1673,11 @@ void OMPClausePrinter::VisitOMPSIMDClause(OMPSIMDClause *) { OS << "simd"; }
void OMPClausePrinter::VisitOMPDeviceClause(OMPDeviceClause *Node) {
OS << "device(";
+ OpenMPDeviceClauseModifier Modifier = Node->getModifier();
+ if (Modifier != OMPC_DEVICE_unknown) {
+ OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(), Modifier)
+ << ": ";
+ }
Node->getDevice()->printPretty(OS, nullptr, Policy, 0);
OS << ")";
}
@@ -1380,6 +1718,10 @@ void OMPClausePrinter::VisitOMPHintClause(OMPHintClause *Node) {
OS << ")";
}
+void OMPClausePrinter::VisitOMPDestroyClause(OMPDestroyClause *) {
+ OS << "destroy";
+}
+
template<typename T>
void OMPClausePrinter::VisitOMPClauseList(T *Node, char StartSym) {
for (typename T::varlist_iterator I = Node->varlist_begin(),
@@ -1453,6 +1795,9 @@ void OMPClausePrinter::VisitOMPSharedClause(OMPSharedClause *Node) {
void OMPClausePrinter::VisitOMPReductionClause(OMPReductionClause *Node) {
if (!Node->varlist_empty()) {
OS << "reduction(";
+ if (Node->getModifierLoc().isValid())
+ OS << getOpenMPSimpleClauseTypeName(OMPC_reduction, Node->getModifier())
+ << ", ";
NestedNameSpecifier *QualifierLoc =
Node->getQualifierLoc().getNestedNameSpecifier();
OverloadedOperatorKind OOK =
@@ -1570,8 +1915,18 @@ void OMPClausePrinter::VisitOMPFlushClause(OMPFlushClause *Node) {
}
}
+void OMPClausePrinter::VisitOMPDepobjClause(OMPDepobjClause *Node) {
+ OS << "(";
+ Node->getDepobj()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
void OMPClausePrinter::VisitOMPDependClause(OMPDependClause *Node) {
OS << "depend(";
+ if (Expr *DepModifier = Node->getModifier()) {
+ DepModifier->printPretty(OS, nullptr, Policy);
+ OS << ", ";
+ }
OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(),
Node->getDependencyKind());
if (!Node->varlist_empty()) {
@@ -1585,7 +1940,7 @@ void OMPClausePrinter::VisitOMPMapClause(OMPMapClause *Node) {
if (!Node->varlist_empty()) {
OS << "map(";
if (Node->getMapType() != OMPC_MAP_unknown) {
- for (unsigned I = 0; I < OMPMapClause::NumberOfModifiers; ++I) {
+ for (unsigned I = 0; I < NumberOfOMPMapClauseModifiers; ++I) {
if (Node->getMapTypeModifier(I) != OMPC_MAP_MODIFIER_unknown) {
OS << getOpenMPSimpleClauseTypeName(OMPC_map,
Node->getMapTypeModifier(I));
@@ -1662,9 +2017,11 @@ void OMPClausePrinter::VisitOMPDefaultmapClause(OMPDefaultmapClause *Node) {
OS << "defaultmap(";
OS << getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
Node->getDefaultmapModifier());
- OS << ": ";
- OS << getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
- Node->getDefaultmapKind());
+ if (Node->getDefaultmapKind() != OMPC_DEFAULTMAP_unknown) {
+ OS << ": ";
+ OS << getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
+ Node->getDefaultmapKind());
+ }
OS << ")";
}
@@ -1676,6 +2033,15 @@ void OMPClausePrinter::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *Node) {
}
}
+void OMPClausePrinter::VisitOMPUseDeviceAddrClause(
+ OMPUseDeviceAddrClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "use_device_addr";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
void OMPClausePrinter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *Node) {
if (!Node->varlist_empty()) {
OS << "is_device_ptr";
@@ -1691,3 +2057,226 @@ void OMPClausePrinter::VisitOMPNontemporalClause(OMPNontemporalClause *Node) {
OS << ")";
}
}
+
+void OMPClausePrinter::VisitOMPOrderClause(OMPOrderClause *Node) {
+ OS << "order(" << getOpenMPSimpleClauseTypeName(OMPC_order, Node->getKind())
+ << ")";
+}
+
+void OMPClausePrinter::VisitOMPInclusiveClause(OMPInclusiveClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "inclusive";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPExclusiveClause(OMPExclusiveClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "exclusive";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPUsesAllocatorsClause(
+ OMPUsesAllocatorsClause *Node) {
+ if (Node->getNumberOfAllocators() == 0)
+ return;
+ OS << "uses_allocators(";
+ for (unsigned I = 0, E = Node->getNumberOfAllocators(); I < E; ++I) {
+ OMPUsesAllocatorsClause::Data Data = Node->getAllocatorData(I);
+ Data.Allocator->printPretty(OS, nullptr, Policy);
+ if (Data.AllocatorTraits) {
+ OS << "(";
+ Data.AllocatorTraits->printPretty(OS, nullptr, Policy);
+ OS << ")";
+ }
+ if (I < E - 1)
+ OS << ",";
+ }
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPAffinityClause(OMPAffinityClause *Node) {
+ if (Node->varlist_empty())
+ return;
+ OS << "affinity";
+ char StartSym = '(';
+ if (Expr *Modifier = Node->getModifier()) {
+ OS << "(";
+ Modifier->printPretty(OS, nullptr, Policy);
+ OS << " :";
+ StartSym = ' ';
+ }
+ VisitOMPClauseList(Node, StartSym);
+ OS << ")";
+}
+
+void OMPTraitInfo::getAsVariantMatchInfo(ASTContext &ASTCtx,
+ VariantMatchInfo &VMI) const {
+ for (const OMPTraitSet &Set : Sets) {
+ for (const OMPTraitSelector &Selector : Set.Selectors) {
+
+ // User conditions are special as we evaluate the condition here.
+ if (Selector.Kind == TraitSelector::user_condition) {
+ assert(Selector.ScoreOrCondition &&
+ "Ill-formed user condition, expected condition expression!");
+ assert(Selector.Properties.size() == 1 &&
+ Selector.Properties.front().Kind ==
+ TraitProperty::user_condition_unknown &&
+ "Ill-formed user condition, expected unknown trait property!");
+
+ llvm::APSInt CondVal;
+ if (Selector.ScoreOrCondition->isIntegerConstantExpr(CondVal, ASTCtx))
+ VMI.addTrait(CondVal.isNullValue()
+ ? TraitProperty::user_condition_false
+ : TraitProperty::user_condition_true);
+ else
+ VMI.addTrait(TraitProperty::user_condition_false);
+ continue;
+ }
+
+ llvm::APSInt Score;
+ llvm::APInt *ScorePtr = nullptr;
+ if (Selector.ScoreOrCondition) {
+ if (Selector.ScoreOrCondition->isIntegerConstantExpr(Score, ASTCtx))
+ ScorePtr = &Score;
+ else
+ VMI.addTrait(TraitProperty::user_condition_false);
+ }
+
+ for (const OMPTraitProperty &Property : Selector.Properties)
+ VMI.addTrait(Set.Kind, Property.Kind, ScorePtr);
+
+ if (Set.Kind != TraitSet::construct)
+ continue;
+
+ // TODO: This might not hold once we implement SIMD properly.
+ assert(Selector.Properties.size() == 1 &&
+ Selector.Properties.front().Kind ==
+ getOpenMPContextTraitPropertyForSelector(
+ Selector.Kind) &&
+ "Ill-formed construct selector!");
+
+ VMI.ConstructTraits.push_back(Selector.Properties.front().Kind);
+ }
+ }
+}
+
+void OMPTraitInfo::print(llvm::raw_ostream &OS,
+ const PrintingPolicy &Policy) const {
+ bool FirstSet = true;
+ for (const OMPTraitSet &Set : Sets) {
+ if (!FirstSet)
+ OS << ", ";
+ FirstSet = false;
+ OS << getOpenMPContextTraitSetName(Set.Kind) << "={";
+
+ bool FirstSelector = true;
+ for (const OMPTraitSelector &Selector : Set.Selectors) {
+ if (!FirstSelector)
+ OS << ", ";
+ FirstSelector = false;
+ OS << getOpenMPContextTraitSelectorName(Selector.Kind);
+
+ bool AllowsTraitScore = false;
+ bool RequiresProperty = false;
+ isValidTraitSelectorForTraitSet(
+ Selector.Kind, Set.Kind, AllowsTraitScore, RequiresProperty);
+
+ if (!RequiresProperty)
+ continue;
+
+ OS << "(";
+ if (Selector.Kind == TraitSelector::user_condition) {
+ Selector.ScoreOrCondition->printPretty(OS, nullptr, Policy);
+ } else {
+
+ if (Selector.ScoreOrCondition) {
+ OS << "score(";
+ Selector.ScoreOrCondition->printPretty(OS, nullptr, Policy);
+ OS << "): ";
+ }
+
+ bool FirstProperty = true;
+ for (const OMPTraitProperty &Property : Selector.Properties) {
+ if (!FirstProperty)
+ OS << ", ";
+ FirstProperty = false;
+ OS << getOpenMPContextTraitPropertyName(Property.Kind);
+ }
+ }
+ OS << ")";
+ }
+ OS << "}";
+ }
+}
+
+std::string OMPTraitInfo::getMangledName() const {
+ std::string MangledName;
+ llvm::raw_string_ostream OS(MangledName);
+ for (const OMPTraitSet &Set : Sets) {
+ OS << '$' << 'S' << unsigned(Set.Kind);
+ for (const OMPTraitSelector &Selector : Set.Selectors) {
+
+ bool AllowsTraitScore = false;
+ bool RequiresProperty = false;
+ isValidTraitSelectorForTraitSet(
+ Selector.Kind, Set.Kind, AllowsTraitScore, RequiresProperty);
+ OS << '$' << 's' << unsigned(Selector.Kind);
+
+ if (!RequiresProperty ||
+ Selector.Kind == TraitSelector::user_condition)
+ continue;
+
+ for (const OMPTraitProperty &Property : Selector.Properties)
+ OS << '$' << 'P' << getOpenMPContextTraitPropertyName(Property.Kind);
+ }
+ }
+ return OS.str();
+}
+
+OMPTraitInfo::OMPTraitInfo(StringRef MangledName) {
+ unsigned long U;
+ do {
+ if (!MangledName.consume_front("$S"))
+ break;
+ if (MangledName.consumeInteger(10, U))
+ break;
+ Sets.push_back(OMPTraitSet());
+ OMPTraitSet &Set = Sets.back();
+ Set.Kind = TraitSet(U);
+ do {
+ if (!MangledName.consume_front("$s"))
+ break;
+ if (MangledName.consumeInteger(10, U))
+ break;
+ Set.Selectors.push_back(OMPTraitSelector());
+ OMPTraitSelector &Selector = Set.Selectors.back();
+ Selector.Kind = TraitSelector(U);
+ do {
+ if (!MangledName.consume_front("$P"))
+ break;
+ Selector.Properties.push_back(OMPTraitProperty());
+ OMPTraitProperty &Property = Selector.Properties.back();
+ std::pair<StringRef, StringRef> PropRestPair = MangledName.split('$');
+ Property.Kind =
+ getOpenMPContextTraitPropertyKind(Set.Kind, PropRestPair.first);
+ MangledName = PropRestPair.second;
+ } while (true);
+ } while (true);
+ } while (true);
+}
+
+llvm::raw_ostream &clang::operator<<(llvm::raw_ostream &OS,
+ const OMPTraitInfo &TI) {
+ LangOptions LO;
+ PrintingPolicy Policy(LO);
+ TI.print(OS, Policy);
+ return OS;
+}
+llvm::raw_ostream &clang::operator<<(llvm::raw_ostream &OS,
+ const OMPTraitInfo *TI) {
+ return TI ? OS << *TI : OS;
+}
diff --git a/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp b/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp
new file mode 100644
index 000000000000..b73b32774b53
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp
@@ -0,0 +1,321 @@
+//===- ParentMapContext.cpp - Map of parents using DynTypedNode -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Similar to ParentMap.cpp, but generalizes to non-Stmt nodes, which can have
+// multiple parents.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ParentMapContext.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/TemplateBase.h"
+
+using namespace clang;
+
+ParentMapContext::ParentMapContext(ASTContext &Ctx) : ASTCtx(Ctx) {}
+
+ParentMapContext::~ParentMapContext() = default;
+
+void ParentMapContext::clear() { Parents.reset(); }
+
+const Expr *ParentMapContext::traverseIgnored(const Expr *E) const {
+ return traverseIgnored(const_cast<Expr *>(E));
+}
+
+Expr *ParentMapContext::traverseIgnored(Expr *E) const {
+ if (!E)
+ return nullptr;
+
+ switch (Traversal) {
+ case TK_AsIs:
+ return E;
+ case TK_IgnoreImplicitCastsAndParentheses:
+ return E->IgnoreParenImpCasts();
+ case TK_IgnoreUnlessSpelledInSource:
+ return E->IgnoreUnlessSpelledInSource();
+ }
+ llvm_unreachable("Invalid Traversal type!");
+}
+
+DynTypedNode ParentMapContext::traverseIgnored(const DynTypedNode &N) const {
+ if (const auto *E = N.get<Expr>()) {
+ return DynTypedNode::create(*traverseIgnored(E));
+ }
+ return N;
+}
+
+class ParentMapContext::ParentMap {
+ /// Contains parents of a node.
+ using ParentVector = llvm::SmallVector<DynTypedNode, 2>;
+
+ /// Maps from a node to its parents. This is used for nodes that have
+ /// pointer identity only, which are more common and we can save space by
+ /// only storing a unique pointer to them.
+ using ParentMapPointers =
+ llvm::DenseMap<const void *,
+ llvm::PointerUnion<const Decl *, const Stmt *,
+ DynTypedNode *, ParentVector *>>;
+
+ /// Parent map for nodes without pointer identity. We store a full
+ /// DynTypedNode for all keys.
+ using ParentMapOtherNodes =
+ llvm::DenseMap<DynTypedNode,
+ llvm::PointerUnion<const Decl *, const Stmt *,
+ DynTypedNode *, ParentVector *>>;
+
+ ParentMapPointers PointerParents;
+ ParentMapOtherNodes OtherParents;
+ class ASTVisitor;
+
+ static DynTypedNode
+ getSingleDynTypedNodeFromParentMap(ParentMapPointers::mapped_type U) {
+ if (const auto *D = U.dyn_cast<const Decl *>())
+ return DynTypedNode::create(*D);
+ if (const auto *S = U.dyn_cast<const Stmt *>())
+ return DynTypedNode::create(*S);
+ return *U.get<DynTypedNode *>();
+ }
+
+ template <typename NodeTy, typename MapTy>
+ static DynTypedNodeList getDynNodeFromMap(const NodeTy &Node,
+ const MapTy &Map) {
+ auto I = Map.find(Node);
+ if (I == Map.end()) {
+ return llvm::ArrayRef<DynTypedNode>();
+ }
+ if (const auto *V = I->second.template dyn_cast<ParentVector *>()) {
+ return llvm::makeArrayRef(*V);
+ }
+ return getSingleDynTypedNodeFromParentMap(I->second);
+ }
+
+public:
+ ParentMap(ASTContext &Ctx);
+ ~ParentMap() {
+ for (const auto &Entry : PointerParents) {
+ if (Entry.second.is<DynTypedNode *>()) {
+ delete Entry.second.get<DynTypedNode *>();
+ } else if (Entry.second.is<ParentVector *>()) {
+ delete Entry.second.get<ParentVector *>();
+ }
+ }
+ for (const auto &Entry : OtherParents) {
+ if (Entry.second.is<DynTypedNode *>()) {
+ delete Entry.second.get<DynTypedNode *>();
+ } else if (Entry.second.is<ParentVector *>()) {
+ delete Entry.second.get<ParentVector *>();
+ }
+ }
+ }
+
+ DynTypedNodeList getParents(TraversalKind TK, const DynTypedNode &Node) {
+ if (Node.getNodeKind().hasPointerIdentity()) {
+ auto ParentList =
+ getDynNodeFromMap(Node.getMemoizationData(), PointerParents);
+ if (ParentList.size() == 1 && TK == TK_IgnoreUnlessSpelledInSource) {
+ const auto *E = ParentList[0].get<Expr>();
+ const auto *Child = Node.get<Expr>();
+ if (E && Child)
+ return AscendIgnoreUnlessSpelledInSource(E, Child);
+ }
+ return ParentList;
+ }
+ return getDynNodeFromMap(Node, OtherParents);
+ }
+
+ DynTypedNodeList AscendIgnoreUnlessSpelledInSource(const Expr *E,
+ const Expr *Child) {
+
+ auto ShouldSkip = [](const Expr *E, const Expr *Child) {
+ if (isa<ImplicitCastExpr>(E))
+ return true;
+
+ if (isa<FullExpr>(E))
+ return true;
+
+ if (isa<MaterializeTemporaryExpr>(E))
+ return true;
+
+ if (isa<CXXBindTemporaryExpr>(E))
+ return true;
+
+ if (isa<ParenExpr>(E))
+ return true;
+
+ if (isa<ExprWithCleanups>(E))
+ return true;
+
+ auto SR = Child->getSourceRange();
+
+ if (const auto *C = dyn_cast<CXXConstructExpr>(E)) {
+ if (C->getSourceRange() == SR || !isa<CXXTemporaryObjectExpr>(C))
+ return true;
+ }
+
+ if (const auto *C = dyn_cast<CXXMemberCallExpr>(E)) {
+ if (C->getSourceRange() == SR)
+ return true;
+ }
+
+ if (const auto *C = dyn_cast<MemberExpr>(E)) {
+ if (C->getSourceRange() == SR)
+ return true;
+ }
+ return false;
+ };
+
+ while (ShouldSkip(E, Child)) {
+ auto It = PointerParents.find(E);
+ if (It == PointerParents.end())
+ break;
+ const auto *S = It->second.dyn_cast<const Stmt *>();
+ if (!S) {
+ if (auto *Vec = It->second.dyn_cast<ParentVector *>())
+ return llvm::makeArrayRef(*Vec);
+ return getSingleDynTypedNodeFromParentMap(It->second);
+ }
+ const auto *P = dyn_cast<Expr>(S);
+ if (!P)
+ return DynTypedNode::create(*S);
+ Child = E;
+ E = P;
+ }
+ return DynTypedNode::create(*E);
+ }
+};
+
+/// Template specializations to abstract away from pointers and TypeLocs.
+/// @{
+template <typename T> static DynTypedNode createDynTypedNode(const T &Node) {
+ return DynTypedNode::create(*Node);
+}
+template <> DynTypedNode createDynTypedNode(const TypeLoc &Node) {
+ return DynTypedNode::create(Node);
+}
+template <>
+DynTypedNode createDynTypedNode(const NestedNameSpecifierLoc &Node) {
+ return DynTypedNode::create(Node);
+}
+/// @}
+
+/// A \c RecursiveASTVisitor that builds a map from nodes to their
+/// parents as defined by the \c RecursiveASTVisitor.
+///
+/// Note that the relationship described here is purely in terms of AST
+/// traversal - there are other relationships (for example declaration context)
+/// in the AST that are better modeled by special matchers.
+///
+/// FIXME: Currently only builds up the map using \c Stmt and \c Decl nodes.
+class ParentMapContext::ParentMap::ASTVisitor
+ : public RecursiveASTVisitor<ASTVisitor> {
+public:
+ ASTVisitor(ParentMap &Map) : Map(Map) {}
+
+private:
+ friend class RecursiveASTVisitor<ASTVisitor>;
+
+ using VisitorBase = RecursiveASTVisitor<ASTVisitor>;
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+
+ bool shouldVisitImplicitCode() const { return true; }
+
+ template <typename T, typename MapNodeTy, typename BaseTraverseFn,
+ typename MapTy>
+ bool TraverseNode(T Node, MapNodeTy MapNode, BaseTraverseFn BaseTraverse,
+ MapTy *Parents) {
+ if (!Node)
+ return true;
+ if (ParentStack.size() > 0) {
+ // FIXME: Currently we add the same parent multiple times, but only
+ // when no memoization data is available for the type.
+ // For example when we visit all subexpressions of template
+ // instantiations; this is suboptimal, but benign: the only way to
+ // visit those is with hasAncestor / hasParent, and those do not create
+ // new matches.
+ // The plan is to enable DynTypedNode to be storable in a map or hash
+ // map. The main problem there is to implement hash functions /
+ // comparison operators for all types that DynTypedNode supports that
+ // do not have pointer identity.
+ auto &NodeOrVector = (*Parents)[MapNode];
+ if (NodeOrVector.isNull()) {
+ if (const auto *D = ParentStack.back().get<Decl>())
+ NodeOrVector = D;
+ else if (const auto *S = ParentStack.back().get<Stmt>())
+ NodeOrVector = S;
+ else
+ NodeOrVector = new DynTypedNode(ParentStack.back());
+ } else {
+ if (!NodeOrVector.template is<ParentVector *>()) {
+ auto *Vector = new ParentVector(
+ 1, getSingleDynTypedNodeFromParentMap(NodeOrVector));
+ delete NodeOrVector.template dyn_cast<DynTypedNode *>();
+ NodeOrVector = Vector;
+ }
+
+ auto *Vector = NodeOrVector.template get<ParentVector *>();
+ // Skip duplicates for types that have memoization data.
+ // We must check that the type has memoization data before calling
+ // std::find() because DynTypedNode::operator== can't compare all
+ // types.
+ bool Found = ParentStack.back().getMemoizationData() &&
+ std::find(Vector->begin(), Vector->end(),
+ ParentStack.back()) != Vector->end();
+ if (!Found)
+ Vector->push_back(ParentStack.back());
+ }
+ }
+ ParentStack.push_back(createDynTypedNode(Node));
+ bool Result = BaseTraverse();
+ ParentStack.pop_back();
+ return Result;
+ }
+
+ bool TraverseDecl(Decl *DeclNode) {
+ return TraverseNode(
+ DeclNode, DeclNode, [&] { return VisitorBase::TraverseDecl(DeclNode); },
+ &Map.PointerParents);
+ }
+
+ bool TraverseStmt(Stmt *StmtNode) {
+ return TraverseNode(StmtNode, StmtNode,
+ [&] { return VisitorBase::TraverseStmt(StmtNode); },
+ &Map.PointerParents);
+ }
+
+ bool TraverseTypeLoc(TypeLoc TypeLocNode) {
+ return TraverseNode(
+ TypeLocNode, DynTypedNode::create(TypeLocNode),
+ [&] { return VisitorBase::TraverseTypeLoc(TypeLocNode); },
+ &Map.OtherParents);
+ }
+
+ bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSLocNode) {
+ return TraverseNode(
+ NNSLocNode, DynTypedNode::create(NNSLocNode),
+ [&] { return VisitorBase::TraverseNestedNameSpecifierLoc(NNSLocNode); },
+ &Map.OtherParents);
+ }
+
+ ParentMap &Map;
+ llvm::SmallVector<DynTypedNode, 16> ParentStack;
+};
+
+ParentMapContext::ParentMap::ParentMap(ASTContext &Ctx) {
+ ASTVisitor(*this).TraverseAST(Ctx);
+}
+
+DynTypedNodeList ParentMapContext::getParents(const DynTypedNode &Node) {
+ if (!Parents)
+ // We build the parent map for the traversal scope (usually whole TU), as
+ // hasAncestor can escape any subtree.
+ Parents = std::make_unique<ParentMap>(ASTCtx);
+ return Parents->getParents(getTraversalKind(), Node);
+}
diff --git a/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp b/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp
index dabc39c57ef7..f3ac181214ac 100644
--- a/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp
+++ b/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp
@@ -11,10 +11,11 @@
//
//===----------------------------------------------------------------------===//
+#include "FormatStringParsing.h"
#include "clang/AST/FormatString.h"
#include "clang/AST/OSLog.h"
-#include "FormatStringParsing.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/Regex.h"
using clang::analyze_format_string::ArgType;
using clang::analyze_format_string::FormatStringHandler;
@@ -316,8 +317,8 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
case 'g': k = ConversionSpecifier::gArg; break;
case 'i': k = ConversionSpecifier::iArg; break;
case 'n':
- // Not handled, but reserved in OpenCL and FreeBSD kernel.
- if (!LO.OpenCL && !isFreeBSDKPrintf)
+ // Not handled, but reserved in OpenCL.
+ if (!LO.OpenCL)
k = ConversionSpecifier::nArg;
break;
case 'o': k = ConversionSpecifier::oArg; break;
@@ -751,6 +752,7 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
case BuiltinType::UInt128:
case BuiltinType::Int128:
case BuiltinType::Half:
+ case BuiltinType::BFloat16:
case BuiltinType::Float16:
case BuiltinType::Float128:
case BuiltinType::ShortAccum:
diff --git a/contrib/llvm-project/clang/lib/AST/RawCommentList.cpp b/contrib/llvm-project/clang/lib/AST/RawCommentList.cpp
index d7124156521c..a8d15036cab9 100644
--- a/contrib/llvm-project/clang/lib/AST/RawCommentList.cpp
+++ b/contrib/llvm-project/clang/lib/AST/RawCommentList.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/CommentSema.h"
#include "clang/Basic/CharInfo.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Allocator.h"
using namespace clang;
diff --git a/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp b/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
index 9a21732b63e3..d56c7e2ab8c0 100644
--- a/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/VTableBuilder.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Support/Format.h"
@@ -1186,11 +1187,10 @@ ItaniumRecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
// Query the external layout to see if it provides an offset.
bool HasExternalLayout = false;
if (UseExternalLayout) {
- // FIXME: This appears to be reversed.
if (Base->IsVirtual)
- HasExternalLayout = External.getExternalNVBaseOffset(Base->Class, Offset);
- else
HasExternalLayout = External.getExternalVBaseOffset(Base->Class, Offset);
+ else
+ HasExternalLayout = External.getExternalNVBaseOffset(Base->Class, Offset);
}
// Clang <= 6 incorrectly applied the 'packed' attribute to base classes.
@@ -2107,7 +2107,7 @@ static const CXXMethodDecl *computeKeyFunction(ASTContext &Context,
if (MD->isImplicit())
continue;
- if (MD->isInlineSpecified())
+ if (MD->isInlineSpecified() || MD->isConstexpr())
continue;
if (MD->hasInlineBody())
@@ -2568,9 +2568,11 @@ MicrosoftRecordLayoutBuilder::layoutNonVirtualBases(const CXXRecordDecl *RD) {
// information about the bases, such as required alignment and the presence of
// zero sized members.
const ASTRecordLayout *PreviousBaseLayout = nullptr;
+ bool HasPolymorphicBaseClass = false;
// Iterate through the bases and lay out the non-virtual ones.
for (const CXXBaseSpecifier &Base : RD->bases()) {
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+ HasPolymorphicBaseClass |= BaseDecl->isPolymorphic();
const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
// Mark and skip virtual bases.
if (Base.isVirtual()) {
@@ -2594,11 +2596,23 @@ MicrosoftRecordLayoutBuilder::layoutNonVirtualBases(const CXXRecordDecl *RD) {
layoutNonVirtualBase(RD, BaseDecl, BaseLayout, PreviousBaseLayout);
}
// Figure out if we need a fresh VFPtr for this class.
- if (!PrimaryBase && RD->isDynamicClass())
- for (CXXRecordDecl::method_iterator i = RD->method_begin(),
- e = RD->method_end();
- !HasOwnVFPtr && i != e; ++i)
- HasOwnVFPtr = i->isVirtual() && i->size_overridden_methods() == 0;
+ if (RD->isPolymorphic()) {
+ if (!HasPolymorphicBaseClass)
+ // This class introduces polymorphism, so we need a vftable to store the
+ // RTTI information.
+ HasOwnVFPtr = true;
+ else if (!PrimaryBase) {
+ // We have a polymorphic base class but can't extend its vftable. Add a
+ // new vfptr if we would use any vftable slots.
+ for (CXXMethodDecl *M : RD->methods()) {
+ if (MicrosoftVTableContext::hasVtableSlot(M) &&
+ M->size_overridden_methods() == 0) {
+ HasOwnVFPtr = true;
+ break;
+ }
+ }
+ }
+ }
// If we don't have a primary base then we have a leading object that could
// itself lead with a zero-sized object, something we track.
bool CheckLeadingLayout = !PrimaryBase;
@@ -2993,7 +3007,8 @@ void MicrosoftRecordLayoutBuilder::computeVtorDispSet(
llvm::SmallPtrSet<const CXXRecordDecl *, 2> BasesWithOverriddenMethods;
// Seed the working set with our non-destructor, non-pure virtual methods.
for (const CXXMethodDecl *MD : RD->methods())
- if (MD->isVirtual() && !isa<CXXDestructorDecl>(MD) && !MD->isPure())
+ if (MicrosoftVTableContext::hasVtableSlot(MD) &&
+ !isa<CXXDestructorDecl>(MD) && !MD->isPure())
Work.insert(MD);
while (!Work.empty()) {
const CXXMethodDecl *MD = *Work.begin();
@@ -3222,7 +3237,8 @@ ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
if (D->hasExternalLexicalStorage() && !D->getDefinition())
getExternalSource()->CompleteType(const_cast<ObjCInterfaceDecl*>(D));
D = D->getDefinition();
- assert(D && D->isThisDeclarationADefinition() && "Invalid interface decl!");
+ assert(D && !D->isInvalidDecl() && D->isThisDeclarationADefinition() &&
+ "Invalid interface decl!");
// Look up this layout, if already laid out, return what we have.
const ObjCContainerDecl *Key =
diff --git a/contrib/llvm-project/clang/lib/AST/Stmt.cpp b/contrib/llvm-project/clang/lib/AST/Stmt.cpp
index 7409ae7ddc9e..25e685be3e9b 100644
--- a/contrib/llvm-project/clang/lib/AST/Stmt.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Stmt.cpp
@@ -273,7 +273,6 @@ SourceRange Stmt::getSourceRange() const {
}
SourceLocation Stmt::getBeginLoc() const {
- // llvm::errs() << "getBeginLoc() for " << getStmtClassName() << "\n";
switch (getStmtClass()) {
case Stmt::NoStmtClass: llvm_unreachable("statement without class");
#define ABSTRACT_STMT(type)
@@ -457,7 +456,7 @@ void GCCAsmStmt::setInputExpr(unsigned i, Expr *E) {
}
AddrLabelExpr *GCCAsmStmt::getLabelExpr(unsigned i) const {
- return cast<AddrLabelExpr>(Exprs[i + NumInputs]);
+ return cast<AddrLabelExpr>(Exprs[i + NumOutputs + NumInputs]);
}
StringRef GCCAsmStmt::getLabelName(unsigned i) const {
@@ -523,7 +522,7 @@ int GCCAsmStmt::getNamedOperand(StringRef SymbolicName) const {
for (unsigned i = 0, e = getNumLabels(); i != e; ++i)
if (getLabelName(i) == SymbolicName)
- return i + getNumInputs();
+ return i + getNumOutputs() + getNumInputs();
// Not found.
return -1;
@@ -732,7 +731,7 @@ std::string GCCAsmStmt::generateAsmString(const ASTContext &C) const {
/// Assemble final IR asm string (MS-style).
std::string MSAsmStmt::generateAsmString(const ASTContext &C) const {
// FIXME: This needs to be translated into the IR string representation.
- return AsmStr;
+ return std::string(AsmStr);
}
Expr *MSAsmStmt::getOutputExpr(unsigned i) {
@@ -1013,7 +1012,8 @@ void SwitchStmt::setConditionVariable(const ASTContext &Ctx, VarDecl *V) {
}
WhileStmt::WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
- Stmt *Body, SourceLocation WL)
+ Stmt *Body, SourceLocation WL, SourceLocation LParenLoc,
+ SourceLocation RParenLoc)
: Stmt(WhileStmtClass) {
bool HasVar = Var != nullptr;
WhileStmtBits.HasVar = HasVar;
@@ -1024,6 +1024,8 @@ WhileStmt::WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
setConditionVariable(Ctx, Var);
setWhileLoc(WL);
+ setLParenLoc(LParenLoc);
+ setRParenLoc(RParenLoc);
}
WhileStmt::WhileStmt(EmptyShell Empty, bool HasVar)
@@ -1032,12 +1034,14 @@ WhileStmt::WhileStmt(EmptyShell Empty, bool HasVar)
}
WhileStmt *WhileStmt::Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
- Stmt *Body, SourceLocation WL) {
+ Stmt *Body, SourceLocation WL,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
bool HasVar = Var != nullptr;
void *Mem =
Ctx.Allocate(totalSizeToAlloc<Stmt *>(NumMandatoryStmtPtr + HasVar),
alignof(WhileStmt));
- return new (Mem) WhileStmt(Ctx, Var, Cond, Body, WL);
+ return new (Mem) WhileStmt(Ctx, Var, Cond, Body, WL, LParenLoc, RParenLoc);
}
WhileStmt *WhileStmt::CreateEmpty(const ASTContext &Ctx, bool HasVar) {
diff --git a/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp b/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
index da6d308ad15b..788fac789270 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
@@ -10,9 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/StmtOpenMP.h"
-
#include "clang/AST/ASTContext.h"
+#include "clang/AST/StmtOpenMP.h"
using namespace clang;
using namespace llvm::omp;
@@ -162,7 +161,8 @@ void OMPLoopDirective::setFinalsConditions(ArrayRef<Expr *> A) {
OMPParallelDirective *OMPParallelDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel) {
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
+ bool HasCancel) {
unsigned Size =
llvm::alignTo(sizeof(OMPParallelDirective), alignof(OMPClause *));
void *Mem =
@@ -171,6 +171,7 @@ OMPParallelDirective *OMPParallelDirective::Create(
new (Mem) OMPParallelDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -228,11 +229,10 @@ OMPSimdDirective *OMPSimdDirective::CreateEmpty(const ASTContext &C,
return new (Mem) OMPSimdDirective(CollapsedNum, NumClauses);
}
-OMPForDirective *
-OMPForDirective::Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation EndLoc, unsigned CollapsedNum,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs, bool HasCancel) {
+OMPForDirective *OMPForDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
unsigned Size = llvm::alignTo(sizeof(OMPForDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
@@ -265,6 +265,7 @@ OMPForDirective::Create(const ASTContext &C, SourceLocation StartLoc,
Dir->setDependentInits(Exprs.DependentInits);
Dir->setFinalsConditions(Exprs.FinalsConditions);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -335,7 +336,8 @@ OMPForSimdDirective *OMPForSimdDirective::CreateEmpty(const ASTContext &C,
OMPSectionsDirective *OMPSectionsDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel) {
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
+ bool HasCancel) {
unsigned Size =
llvm::alignTo(sizeof(OMPSectionsDirective), alignof(OMPClause *));
void *Mem =
@@ -344,6 +346,7 @@ OMPSectionsDirective *OMPSectionsDirective::Create(
new (Mem) OMPSectionsDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -450,7 +453,7 @@ OMPCriticalDirective *OMPCriticalDirective::CreateEmpty(const ASTContext &C,
OMPParallelForDirective *OMPParallelForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs, bool HasCancel) {
+ const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
unsigned Size =
llvm::alignTo(sizeof(OMPParallelForDirective), alignof(OMPClause *));
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
@@ -484,6 +487,7 @@ OMPParallelForDirective *OMPParallelForDirective::Create(
Dir->setDependentInits(Exprs.DependentInits);
Dir->setFinalsConditions(Exprs.FinalsConditions);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -553,7 +557,7 @@ OMPParallelForSimdDirective::CreateEmpty(const ASTContext &C,
OMPParallelMasterDirective *OMPParallelMasterDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef) {
unsigned Size =
llvm::alignTo(sizeof(OMPParallelMasterDirective), alignof(OMPClause *));
void *Mem =
@@ -562,6 +566,7 @@ OMPParallelMasterDirective *OMPParallelMasterDirective::Create(
new (Mem) OMPParallelMasterDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
return Dir;
}
@@ -577,7 +582,8 @@ OMPParallelMasterDirective *OMPParallelMasterDirective::CreateEmpty(const ASTCon
OMPParallelSectionsDirective *OMPParallelSectionsDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel) {
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
+ bool HasCancel) {
unsigned Size =
llvm::alignTo(sizeof(OMPParallelSectionsDirective), alignof(OMPClause *));
void *Mem =
@@ -586,6 +592,7 @@ OMPParallelSectionsDirective *OMPParallelSectionsDirective::Create(
new (Mem) OMPParallelSectionsDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -759,6 +766,50 @@ OMPFlushDirective *OMPFlushDirective::CreateEmpty(const ASTContext &C,
return new (Mem) OMPFlushDirective(NumClauses);
}
+OMPDepobjDirective *OMPDepobjDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses) {
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPDepobjDirective), alignof(OMPClause *));
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size(),
+ alignof(OMPDepobjDirective));
+ auto *Dir = new (Mem) OMPDepobjDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ return Dir;
+}
+
+OMPDepobjDirective *OMPDepobjDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPDepobjDirective), alignof(OMPClause *));
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses,
+ alignof(OMPDepobjDirective));
+ return new (Mem) OMPDepobjDirective(NumClauses);
+}
+
+OMPScanDirective *OMPScanDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses) {
+ unsigned Size = llvm::alignTo(sizeof(OMPScanDirective), alignof(OMPClause *));
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size(),
+ alignof(OMPScanDirective));
+ auto *Dir = new (Mem) OMPScanDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ return Dir;
+}
+
+OMPScanDirective *OMPScanDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::alignTo(sizeof(OMPScanDirective), alignof(OMPClause *));
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses,
+ alignof(OMPScanDirective));
+ return new (Mem) OMPScanDirective(NumClauses);
+}
+
OMPOrderedDirective *OMPOrderedDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
@@ -844,7 +895,8 @@ OMPTargetDirective *OMPTargetDirective::CreateEmpty(const ASTContext &C,
OMPTargetParallelDirective *OMPTargetParallelDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
+ bool HasCancel) {
unsigned Size =
llvm::alignTo(sizeof(OMPTargetParallelDirective), alignof(OMPClause *));
void *Mem =
@@ -853,6 +905,8 @@ OMPTargetParallelDirective *OMPTargetParallelDirective::Create(
new (Mem) OMPTargetParallelDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
+ Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -869,7 +923,7 @@ OMPTargetParallelDirective::CreateEmpty(const ASTContext &C,
OMPTargetParallelForDirective *OMPTargetParallelForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs, bool HasCancel) {
+ const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
unsigned Size = llvm::alignTo(sizeof(OMPTargetParallelForDirective),
alignof(OMPClause *));
void *Mem = C.Allocate(
@@ -903,6 +957,7 @@ OMPTargetParallelForDirective *OMPTargetParallelForDirective::Create(
Dir->setDependentInits(Exprs.DependentInits);
Dir->setFinalsConditions(Exprs.FinalsConditions);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -1014,7 +1069,7 @@ OMPTeamsDirective *OMPTeamsDirective::CreateEmpty(const ASTContext &C,
OMPTaskLoopDirective *OMPTaskLoopDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs) {
+ const HelperExprs &Exprs, bool HasCancel) {
unsigned Size =
llvm::alignTo(sizeof(OMPTaskLoopDirective), alignof(OMPClause *));
void *Mem =
@@ -1048,6 +1103,7 @@ OMPTaskLoopDirective *OMPTaskLoopDirective::Create(
Dir->setDependentInits(Exprs.DependentInits);
Dir->setFinalsConditions(Exprs.FinalsConditions);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -1117,7 +1173,7 @@ OMPTaskLoopSimdDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
OMPMasterTaskLoopDirective *OMPMasterTaskLoopDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs) {
+ const HelperExprs &Exprs, bool HasCancel) {
unsigned Size =
llvm::alignTo(sizeof(OMPMasterTaskLoopDirective), alignof(OMPClause *));
void *Mem = C.Allocate(
@@ -1151,6 +1207,7 @@ OMPMasterTaskLoopDirective *OMPMasterTaskLoopDirective::Create(
Dir->setDependentInits(Exprs.DependentInits);
Dir->setFinalsConditions(Exprs.FinalsConditions);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -1223,7 +1280,7 @@ OMPMasterTaskLoopSimdDirective::CreateEmpty(const ASTContext &C,
OMPParallelMasterTaskLoopDirective *OMPParallelMasterTaskLoopDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs) {
+ const HelperExprs &Exprs, bool HasCancel) {
unsigned Size = llvm::alignTo(sizeof(OMPParallelMasterTaskLoopDirective),
alignof(OMPClause *));
void *Mem = C.Allocate(
@@ -1258,6 +1315,7 @@ OMPParallelMasterTaskLoopDirective *OMPParallelMasterTaskLoopDirective::Create(
Dir->setDependentInits(Exprs.DependentInits);
Dir->setFinalsConditions(Exprs.FinalsConditions);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -1410,7 +1468,7 @@ OMPTargetUpdateDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
OMPDistributeParallelForDirective *OMPDistributeParallelForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs, bool HasCancel) {
+ const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
unsigned Size = llvm::alignTo(sizeof(OMPDistributeParallelForDirective),
alignof(OMPClause *));
void *Mem = C.Allocate(
@@ -1459,6 +1517,7 @@ OMPDistributeParallelForDirective *OMPDistributeParallelForDirective::Create(
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->HasCancel = HasCancel;
return Dir;
}
@@ -1885,7 +1944,7 @@ OMPTeamsDistributeParallelForDirective *
OMPTeamsDistributeParallelForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs, bool HasCancel) {
+ const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
auto Size = llvm::alignTo(sizeof(OMPTeamsDistributeParallelForDirective),
alignof(OMPClause *));
void *Mem = C.Allocate(
@@ -1934,6 +1993,7 @@ OMPTeamsDistributeParallelForDirective::Create(
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->HasCancel = HasCancel;
return Dir;
}
@@ -2037,7 +2097,7 @@ OMPTargetTeamsDistributeParallelForDirective *
OMPTargetTeamsDistributeParallelForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs, bool HasCancel) {
+ const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
auto Size =
llvm::alignTo(sizeof(OMPTargetTeamsDistributeParallelForDirective),
alignof(OMPClause *));
@@ -2088,6 +2148,7 @@ OMPTargetTeamsDistributeParallelForDirective::Create(
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->HasCancel = HasCancel;
return Dir;
}
diff --git a/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp b/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
index 45fd8ceae8d3..f797f5fe8e6d 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
@@ -75,14 +75,11 @@ namespace {
public:
StmtPrinter(raw_ostream &os, PrinterHelper *helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
- StringRef NL = "\n",
- const ASTContext *Context = nullptr)
+ StringRef NL = "\n", const ASTContext *Context = nullptr)
: OS(os), IndentLevel(Indentation), Helper(helper), Policy(Policy),
NL(NL), Context(Context) {}
- void PrintStmt(Stmt *S) {
- PrintStmt(S, Policy.Indentation);
- }
+ void PrintStmt(Stmt *S) { PrintStmt(S, Policy.Indentation); }
void PrintStmt(Stmt *S, int SubIndent) {
IndentLevel += SubIndent;
@@ -756,6 +753,16 @@ void StmtPrinter::VisitOMPFlushDirective(OMPFlushDirective *Node) {
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPDepobjDirective(OMPDepobjDirective *Node) {
+ Indent() << "#pragma omp depobj";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPScanDirective(OMPScanDirective *Node) {
+ Indent() << "#pragma omp scan";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPOrderedDirective(OMPOrderedDirective *Node) {
Indent() << "#pragma omp ordered";
PrintOMPExecutableDirective(Node, Node->hasClausesOfKind<OMPDependClause>());
@@ -1274,29 +1281,20 @@ void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) {
OS << ")";
}
-void StmtPrinter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node){
- switch(Node->getKind()) {
- case UETT_SizeOf:
- OS << "sizeof";
- break;
- case UETT_AlignOf:
+void StmtPrinter::VisitUnaryExprOrTypeTraitExpr(
+ UnaryExprOrTypeTraitExpr *Node) {
+ const char *Spelling = getTraitSpelling(Node->getKind());
+ if (Node->getKind() == UETT_AlignOf) {
if (Policy.Alignof)
- OS << "alignof";
+ Spelling = "alignof";
else if (Policy.UnderscoreAlignof)
- OS << "_Alignof";
+ Spelling = "_Alignof";
else
- OS << "__alignof";
- break;
- case UETT_PreferredAlignOf:
- OS << "__alignof";
- break;
- case UETT_VecStep:
- OS << "vec_step";
- break;
- case UETT_OpenMPRequiredSimdAlign:
- OS << "__builtin_omp_required_simd_align";
- break;
+ Spelling = "__alignof";
}
+
+ OS << Spelling;
+
if (Node->isArgumentType()) {
OS << '(';
Node->getArgumentType().print(OS, Policy);
@@ -1330,19 +1328,65 @@ void StmtPrinter::VisitArraySubscriptExpr(ArraySubscriptExpr *Node) {
OS << "]";
}
+void StmtPrinter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *Node) {
+ PrintExpr(Node->getBase());
+ OS << "[";
+ PrintExpr(Node->getRowIdx());
+ OS << "]";
+ OS << "[";
+ PrintExpr(Node->getColumnIdx());
+ OS << "]";
+}
+
void StmtPrinter::VisitOMPArraySectionExpr(OMPArraySectionExpr *Node) {
PrintExpr(Node->getBase());
OS << "[";
if (Node->getLowerBound())
PrintExpr(Node->getLowerBound());
- if (Node->getColonLoc().isValid()) {
+ if (Node->getColonLocFirst().isValid()) {
OS << ":";
if (Node->getLength())
PrintExpr(Node->getLength());
}
+ if (Node->getColonLocSecond().isValid()) {
+ OS << ":";
+ if (Node->getStride())
+ PrintExpr(Node->getStride());
+ }
OS << "]";
}
+void StmtPrinter::VisitOMPArrayShapingExpr(OMPArrayShapingExpr *Node) {
+ OS << "(";
+ for (Expr *E : Node->getDimensions()) {
+ OS << "[";
+ PrintExpr(E);
+ OS << "]";
+ }
+ OS << ")";
+ PrintExpr(Node->getBase());
+}
+
+void StmtPrinter::VisitOMPIteratorExpr(OMPIteratorExpr *Node) {
+ OS << "iterator(";
+ for (unsigned I = 0, E = Node->numOfIterators(); I < E; ++I) {
+ auto *VD = cast<ValueDecl>(Node->getIteratorDecl(I));
+ VD->getType().print(OS, Policy);
+ const OMPIteratorExpr::IteratorRange Range = Node->getIteratorRange(I);
+ OS << " " << VD->getName() << " = ";
+ PrintExpr(Range.Begin);
+ OS << ":";
+ PrintExpr(Range.End);
+ if (Range.Step) {
+ OS << ":";
+ PrintExpr(Range.Step);
+ }
+ if (I < E - 1)
+ OS << ", ";
+ }
+ OS << ")";
+}
+
void StmtPrinter::PrintCallArgs(CallExpr *Call) {
for (unsigned i = 0, e = Call->getNumArgs(); i != e; ++i) {
if (isa<CXXDefaultArgExpr>(Call->getArg(i))) {
@@ -1750,6 +1794,10 @@ void StmtPrinter::VisitBuiltinBitCastExpr(BuiltinBitCastExpr *Node) {
OS << ")";
}
+void StmtPrinter::VisitCXXAddrspaceCastExpr(CXXAddrspaceCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
void StmtPrinter::VisitCXXTypeidExpr(CXXTypeidExpr *Node) {
OS << "typeid(";
if (Node->isTypeOperand()) {
@@ -2008,7 +2056,7 @@ void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
if (Policy.TerseOutput)
OS << "{}";
else
- PrintRawCompoundStmt(Node->getBody());
+ PrintRawCompoundStmt(Node->getCompoundStmtBody());
}
void StmtPrinter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *Node) {
@@ -2160,37 +2208,8 @@ void StmtPrinter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *Node) {
printTemplateArgumentList(OS, Node->template_arguments(), Policy);
}
-static const char *getTypeTraitName(TypeTrait TT) {
- switch (TT) {
-#define TYPE_TRAIT_1(Spelling, Name, Key) \
-case clang::UTT_##Name: return #Spelling;
-#define TYPE_TRAIT_2(Spelling, Name, Key) \
-case clang::BTT_##Name: return #Spelling;
-#define TYPE_TRAIT_N(Spelling, Name, Key) \
- case clang::TT_##Name: return #Spelling;
-#include "clang/Basic/TokenKinds.def"
- }
- llvm_unreachable("Type trait not covered by switch");
-}
-
-static const char *getTypeTraitName(ArrayTypeTrait ATT) {
- switch (ATT) {
- case ATT_ArrayRank: return "__array_rank";
- case ATT_ArrayExtent: return "__array_extent";
- }
- llvm_unreachable("Array type trait not covered by switch");
-}
-
-static const char *getExpressionTraitName(ExpressionTrait ET) {
- switch (ET) {
- case ET_IsLValueExpr: return "__is_lvalue_expr";
- case ET_IsRValueExpr: return "__is_rvalue_expr";
- }
- llvm_unreachable("Expression type trait not covered by switch");
-}
-
void StmtPrinter::VisitTypeTraitExpr(TypeTraitExpr *E) {
- OS << getTypeTraitName(E->getTrait()) << "(";
+ OS << getTraitSpelling(E->getTrait()) << "(";
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) {
if (I > 0)
OS << ", ";
@@ -2200,13 +2219,13 @@ void StmtPrinter::VisitTypeTraitExpr(TypeTraitExpr *E) {
}
void StmtPrinter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
- OS << getTypeTraitName(E->getTrait()) << '(';
+ OS << getTraitSpelling(E->getTrait()) << '(';
E->getQueriedType().print(OS, Policy);
OS << ')';
}
void StmtPrinter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
- OS << getExpressionTraitName(E->getTrait()) << '(';
+ OS << getTraitSpelling(E->getTrait()) << '(';
PrintExpr(E->getQueriedExpression());
OS << ')';
}
@@ -2499,6 +2518,17 @@ void StmtPrinter::VisitTypoExpr(TypoExpr *Node) {
llvm_unreachable("Cannot print TypoExpr nodes");
}
+void StmtPrinter::VisitRecoveryExpr(RecoveryExpr *Node) {
+ OS << "<recovery-expr>(";
+ const char *Sep = "";
+ for (Expr *E : Node->subExpressions()) {
+ OS << Sep;
+ PrintExpr(E);
+ Sep = ", ";
+ }
+ OS << ')';
+}
+
void StmtPrinter::VisitAsTypeExpr(AsTypeExpr *Node) {
OS << "__builtin_astype(";
PrintExpr(Node->getSrcExpr());
diff --git a/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp b/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
index 60dec50d53da..bf3b43b816f1 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ODRHash.h"
+#include "clang/AST/OpenMPClause.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/ADT/FoldingSet.h"
using namespace clang;
@@ -413,9 +414,8 @@ class OMPClauseProfiler : public ConstOMPClauseVisitor<OMPClauseProfiler> {
public:
OMPClauseProfiler(StmtProfiler *P) : Profiler(P) { }
-#define OPENMP_CLAUSE(Name, Class) \
- void Visit##Class(const Class *C);
-#include "clang/Basic/OpenMPKinds.def"
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(const Class *C);
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
void VistOMPClauseWithPreInit(const OMPClauseWithPreInit *C);
void VistOMPClauseWithPostUpdate(const OMPClauseWithPostUpdate *C);
};
@@ -471,6 +471,11 @@ void OMPClauseProfiler::VisitOMPCollapseClause(const OMPCollapseClause *C) {
Profiler->VisitStmt(C->getNumForLoops());
}
+void OMPClauseProfiler::VisitOMPDetachClause(const OMPDetachClause *C) {
+ if (Expr *Evt = C->getEventHandler())
+ Profiler->VisitStmt(Evt);
+}
+
void OMPClauseProfiler::VisitOMPDefaultClause(const OMPDefaultClause *C) { }
void OMPClauseProfiler::VisitOMPProcBindClause(const OMPProcBindClause *C) { }
@@ -517,12 +522,22 @@ void OMPClauseProfiler::VisitOMPCaptureClause(const OMPCaptureClause *) {}
void OMPClauseProfiler::VisitOMPSeqCstClause(const OMPSeqCstClause *) {}
+void OMPClauseProfiler::VisitOMPAcqRelClause(const OMPAcqRelClause *) {}
+
+void OMPClauseProfiler::VisitOMPAcquireClause(const OMPAcquireClause *) {}
+
+void OMPClauseProfiler::VisitOMPReleaseClause(const OMPReleaseClause *) {}
+
+void OMPClauseProfiler::VisitOMPRelaxedClause(const OMPRelaxedClause *) {}
+
void OMPClauseProfiler::VisitOMPThreadsClause(const OMPThreadsClause *) {}
void OMPClauseProfiler::VisitOMPSIMDClause(const OMPSIMDClause *) {}
void OMPClauseProfiler::VisitOMPNogroupClause(const OMPNogroupClause *) {}
+void OMPClauseProfiler::VisitOMPDestroyClause(const OMPDestroyClause *) {}
+
template<typename T>
void OMPClauseProfiler::VisitOMPClauseList(T *Node) {
for (auto *E : Node->varlists()) {
@@ -594,6 +609,20 @@ void OMPClauseProfiler::VisitOMPReductionClause(
if (E)
Profiler->VisitStmt(E);
}
+ if (C->getModifier() == clang::OMPC_REDUCTION_inscan) {
+ for (auto *E : C->copy_ops()) {
+ if (E)
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->copy_array_temps()) {
+ if (E)
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->copy_array_elems()) {
+ if (E)
+ Profiler->VisitStmt(E);
+ }
+ }
}
void OMPClauseProfiler::VisitOMPTaskReductionClause(
const OMPTaskReductionClause *C) {
@@ -710,6 +739,10 @@ OMPClauseProfiler::VisitOMPCopyprivateClause(const OMPCopyprivateClause *C) {
void OMPClauseProfiler::VisitOMPFlushClause(const OMPFlushClause *C) {
VisitOMPClauseList(C);
}
+void OMPClauseProfiler::VisitOMPDepobjClause(const OMPDepobjClause *C) {
+ if (const Expr *Depobj = C->getDepobj())
+ Profiler->VisitStmt(Depobj);
+}
void OMPClauseProfiler::VisitOMPDependClause(const OMPDependClause *C) {
VisitOMPClauseList(C);
}
@@ -765,6 +798,10 @@ void OMPClauseProfiler::VisitOMPUseDevicePtrClause(
const OMPUseDevicePtrClause *C) {
VisitOMPClauseList(C);
}
+void OMPClauseProfiler::VisitOMPUseDeviceAddrClause(
+ const OMPUseDeviceAddrClause *C) {
+ VisitOMPClauseList(C);
+}
void OMPClauseProfiler::VisitOMPIsDevicePtrClause(
const OMPIsDevicePtrClause *C) {
VisitOMPClauseList(C);
@@ -775,6 +812,28 @@ void OMPClauseProfiler::VisitOMPNontemporalClause(
for (auto *E : C->private_refs())
Profiler->VisitStmt(E);
}
+void OMPClauseProfiler::VisitOMPInclusiveClause(const OMPInclusiveClause *C) {
+ VisitOMPClauseList(C);
+}
+void OMPClauseProfiler::VisitOMPExclusiveClause(const OMPExclusiveClause *C) {
+ VisitOMPClauseList(C);
+}
+void OMPClauseProfiler::VisitOMPUsesAllocatorsClause(
+ const OMPUsesAllocatorsClause *C) {
+ for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
+ OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
+ Profiler->VisitStmt(D.Allocator);
+ if (D.AllocatorTraits)
+ Profiler->VisitStmt(D.AllocatorTraits);
+ }
+}
+void OMPClauseProfiler::VisitOMPAffinityClause(const OMPAffinityClause *C) {
+ if (const Expr *Modifier = C->getModifier())
+ Profiler->VisitStmt(Modifier);
+ for (const Expr *E : C->varlists())
+ Profiler->VisitStmt(E);
+}
+void OMPClauseProfiler::VisitOMPOrderClause(const OMPOrderClause *C) {}
} // namespace
void
@@ -875,6 +934,14 @@ void StmtProfiler::VisitOMPFlushDirective(const OMPFlushDirective *S) {
VisitOMPExecutableDirective(S);
}
+void StmtProfiler::VisitOMPDepobjDirective(const OMPDepobjDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPScanDirective(const OMPScanDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
void StmtProfiler::VisitOMPOrderedDirective(const OMPOrderedDirective *S) {
VisitOMPExecutableDirective(S);
}
@@ -1155,10 +1222,24 @@ void StmtProfiler::VisitArraySubscriptExpr(const ArraySubscriptExpr *S) {
VisitExpr(S);
}
+void StmtProfiler::VisitMatrixSubscriptExpr(const MatrixSubscriptExpr *S) {
+ VisitExpr(S);
+}
+
void StmtProfiler::VisitOMPArraySectionExpr(const OMPArraySectionExpr *S) {
VisitExpr(S);
}
+void StmtProfiler::VisitOMPArrayShapingExpr(const OMPArrayShapingExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitOMPIteratorExpr(const OMPIteratorExpr *S) {
+ VisitExpr(S);
+ for (unsigned I = 0, E = S->numOfIterators(); I < E; ++I)
+ VisitDecl(S->getIteratorDecl(I));
+}
+
void StmtProfiler::VisitCallExpr(const CallExpr *S) {
VisitExpr(S);
}
@@ -1377,7 +1458,7 @@ void StmtProfiler::VisitRequiresExpr(const RequiresExpr *S) {
ID.AddInteger(concepts::Requirement::RK_Nested);
auto *NestedReq = cast<concepts::NestedRequirement>(Req);
ID.AddBoolean(NestedReq->isSubstitutionFailure());
- if (!NestedReq->isSubstitutionFailure())
+ if (!NestedReq->isSubstitutionFailure())
Visit(NestedReq->getConstraintExpr());
}
}
@@ -1393,7 +1474,6 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
case OO_Array_New:
case OO_Array_Delete:
case OO_Arrow:
- case OO_Call:
case OO_Conditional:
case NUM_OVERLOADED_OPERATORS:
llvm_unreachable("Invalid operator call kind");
@@ -1567,6 +1647,9 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
case OO_Subscript:
return Stmt::ArraySubscriptExprClass;
+ case OO_Call:
+ return Stmt::CallExprClass;
+
case OO_Coawait:
UnaryOp = UO_Coawait;
return Stmt::UnaryOperatorClass;
@@ -1607,7 +1690,7 @@ void StmtProfiler::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *S) {
SC == Stmt::CompoundAssignOperatorClass)
ID.AddInteger(BinaryOp);
else
- assert(SC == Stmt::ArraySubscriptExprClass);
+ assert(SC == Stmt::ArraySubscriptExprClass || SC == Stmt::CallExprClass);
return;
}
@@ -1670,6 +1753,10 @@ void StmtProfiler::VisitBuiltinBitCastExpr(const BuiltinBitCastExpr *S) {
VisitType(S->getTypeInfoAsWritten()->getType());
}
+void StmtProfiler::VisitCXXAddrspaceCastExpr(const CXXAddrspaceCastExpr *S) {
+ VisitCXXNamedCastExpr(S);
+}
+
void StmtProfiler::VisitUserDefinedLiteral(const UserDefinedLiteral *S) {
VisitCallExpr(S);
}
@@ -1990,6 +2077,8 @@ void StmtProfiler::VisitSourceLocExpr(const SourceLocExpr *E) {
VisitExpr(E);
}
+void StmtProfiler::VisitRecoveryExpr(const RecoveryExpr *E) { VisitExpr(E); }
+
void StmtProfiler::VisitObjCStringLiteral(const ObjCStringLiteral *S) {
VisitExpr(S);
}
diff --git a/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp b/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp
index 6f0ebf232e77..6a3d2b30e46e 100644
--- a/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/PrettyPrinter.h"
@@ -111,84 +112,60 @@ TemplateArgument::CreatePackCopy(ASTContext &Context,
return TemplateArgument(Args.copy(Context));
}
-bool TemplateArgument::isDependent() const {
+TemplateArgumentDependence TemplateArgument::getDependence() const {
+ auto Deps = TemplateArgumentDependence::None;
switch (getKind()) {
case Null:
llvm_unreachable("Should not have a NULL template argument");
case Type:
- return getAsType()->isDependentType() ||
- isa<PackExpansionType>(getAsType());
+ Deps = toTemplateArgumentDependence(getAsType()->getDependence());
+ if (isa<PackExpansionType>(getAsType()))
+ Deps |= TemplateArgumentDependence::Dependent;
+ return Deps;
case Template:
- return getAsTemplate().isDependent();
+ return toTemplateArgumentDependence(getAsTemplate().getDependence());
case TemplateExpansion:
- return true;
+ return TemplateArgumentDependence::Dependent |
+ TemplateArgumentDependence::Instantiation;
- case Declaration:
- if (DeclContext *DC = dyn_cast<DeclContext>(getAsDecl()))
- return DC->isDependentContext();
- return getAsDecl()->getDeclContext()->isDependentContext();
+ case Declaration: {
+ auto *DC = dyn_cast<DeclContext>(getAsDecl());
+ if (!DC)
+ DC = getAsDecl()->getDeclContext();
+ if (DC->isDependentContext())
+ Deps = TemplateArgumentDependence::Dependent |
+ TemplateArgumentDependence::Instantiation;
+ return Deps;
+ }
case NullPtr:
- return false;
-
case Integral:
- // Never dependent
- return false;
+ return TemplateArgumentDependence::None;
case Expression:
- return (getAsExpr()->isTypeDependent() || getAsExpr()->isValueDependent() ||
- isa<PackExpansionExpr>(getAsExpr()));
+ Deps = toTemplateArgumentDependence(getAsExpr()->getDependence());
+ if (isa<PackExpansionExpr>(getAsExpr()))
+ Deps |= TemplateArgumentDependence::Dependent |
+ TemplateArgumentDependence::Instantiation;
+ return Deps;
case Pack:
for (const auto &P : pack_elements())
- if (P.isDependent())
- return true;
- return false;
+ Deps |= P.getDependence();
+ return Deps;
}
+ llvm_unreachable("unhandled ArgKind");
+}
- llvm_unreachable("Invalid TemplateArgument Kind!");
+bool TemplateArgument::isDependent() const {
+ return getDependence() & TemplateArgumentDependence::Dependent;
}
bool TemplateArgument::isInstantiationDependent() const {
- switch (getKind()) {
- case Null:
- llvm_unreachable("Should not have a NULL template argument");
-
- case Type:
- return getAsType()->isInstantiationDependentType();
-
- case Template:
- return getAsTemplate().isInstantiationDependent();
-
- case TemplateExpansion:
- return true;
-
- case Declaration:
- if (DeclContext *DC = dyn_cast<DeclContext>(getAsDecl()))
- return DC->isDependentContext();
- return getAsDecl()->getDeclContext()->isDependentContext();
-
- case NullPtr:
- return false;
-
- case Integral:
- // Never dependent
- return false;
-
- case Expression:
- return getAsExpr()->isInstantiationDependent();
-
- case Pack:
- for (const auto &P : pack_elements())
- if (P.isInstantiationDependent())
- return true;
- return false;
- }
-
- llvm_unreachable("Invalid TemplateArgument Kind!");
+ return getDependence() & TemplateArgumentDependence::Instantiation;
}
bool TemplateArgument::isPackExpansion() const {
@@ -215,38 +192,7 @@ bool TemplateArgument::isPackExpansion() const {
}
bool TemplateArgument::containsUnexpandedParameterPack() const {
- switch (getKind()) {
- case Null:
- case Declaration:
- case Integral:
- case TemplateExpansion:
- case NullPtr:
- break;
-
- case Type:
- if (getAsType()->containsUnexpandedParameterPack())
- return true;
- break;
-
- case Template:
- if (getAsTemplate().containsUnexpandedParameterPack())
- return true;
- break;
-
- case Expression:
- if (getAsExpr()->containsUnexpandedParameterPack())
- return true;
- break;
-
- case Pack:
- for (const auto &P : pack_elements())
- if (P.containsUnexpandedParameterPack())
- return true;
-
- break;
- }
-
- return false;
+ return getDependence() & TemplateArgumentDependence::UnexpandedPack;
}
Optional<unsigned> TemplateArgument::getNumTemplateExpansions() const {
@@ -406,13 +352,9 @@ void TemplateArgument::print(const PrintingPolicy &Policy,
case Declaration: {
NamedDecl *ND = getAsDecl();
- Out << '&';
- if (ND->getDeclName()) {
- // FIXME: distinguish between pointer and reference args?
- ND->printQualifiedName(Out);
- } else {
- Out << "(anonymous)";
- }
+ if (!getParamTypeForDecl()->isReferenceType())
+ Out << '&';
+ ND->printQualifiedName(Out);
break;
}
@@ -601,20 +543,14 @@ void ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc) {
void ASTTemplateKWAndArgsInfo::initializeFrom(
SourceLocation TemplateKWLoc, const TemplateArgumentListInfo &Info,
- TemplateArgumentLoc *OutArgArray, bool &Dependent,
- bool &InstantiationDependent, bool &ContainsUnexpandedParameterPack) {
+ TemplateArgumentLoc *OutArgArray, TemplateArgumentDependence &Deps) {
this->TemplateKWLoc = TemplateKWLoc;
LAngleLoc = Info.getLAngleLoc();
RAngleLoc = Info.getRAngleLoc();
NumTemplateArgs = Info.size();
for (unsigned i = 0; i != NumTemplateArgs; ++i) {
- Dependent = Dependent || Info[i].getArgument().isDependent();
- InstantiationDependent = InstantiationDependent ||
- Info[i].getArgument().isInstantiationDependent();
- ContainsUnexpandedParameterPack =
- ContainsUnexpandedParameterPack ||
- Info[i].getArgument().containsUnexpandedParameterPack();
+ Deps |= Info[i].getArgument().getDependence();
new (&OutArgArray[i]) TemplateArgumentLoc(Info[i]);
}
diff --git a/contrib/llvm-project/clang/lib/AST/TemplateName.cpp b/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
index 06e1dcec7449..40a8736ae1af 100644
--- a/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
@@ -11,8 +11,10 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/TemplateName.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TemplateBase.h"
@@ -168,52 +170,54 @@ TemplateName TemplateName::getNameToSubstitute() const {
return TemplateName(Decl);
}
-bool TemplateName::isDependent() const {
+TemplateNameDependence TemplateName::getDependence() const {
+ auto D = TemplateNameDependence::None;
+ switch (getKind()) {
+ case TemplateName::NameKind::QualifiedTemplate:
+ D |= toTemplateNameDependence(
+ getAsQualifiedTemplateName()->getQualifier()->getDependence());
+ break;
+ case TemplateName::NameKind::DependentTemplate:
+ D |= toTemplateNameDependence(
+ getAsDependentTemplateName()->getQualifier()->getDependence());
+ break;
+ case TemplateName::NameKind::SubstTemplateTemplateParmPack:
+ D |= TemplateNameDependence::UnexpandedPack;
+ break;
+ case TemplateName::NameKind::OverloadedTemplate:
+ llvm_unreachable("overloaded templates shouldn't survive to here.");
+ default:
+ break;
+ }
if (TemplateDecl *Template = getAsTemplateDecl()) {
- if (isa<TemplateTemplateParmDecl>(Template))
- return true;
+ if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) {
+ D |= TemplateNameDependence::DependentInstantiation;
+ if (TTP->isParameterPack())
+ D |= TemplateNameDependence::UnexpandedPack;
+ }
// FIXME: Hack, getDeclContext() can be null if Template is still
// initializing due to PCH reading, so we check it before using it.
// Should probably modify TemplateSpecializationType to allow constructing
// it without the isDependent() checking.
- return Template->getDeclContext() &&
- Template->getDeclContext()->isDependentContext();
+ if (Template->getDeclContext() &&
+ Template->getDeclContext()->isDependentContext())
+ D |= TemplateNameDependence::DependentInstantiation;
+ } else {
+ D |= TemplateNameDependence::DependentInstantiation;
}
+ return D;
+}
- assert(!getAsOverloadedTemplate() &&
- "overloaded templates shouldn't survive to here");
-
- return true;
+bool TemplateName::isDependent() const {
+ return getDependence() & TemplateNameDependence::Dependent;
}
bool TemplateName::isInstantiationDependent() const {
- if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
- if (QTN->getQualifier()->isInstantiationDependent())
- return true;
- }
-
- return isDependent();
+ return getDependence() & TemplateNameDependence::Instantiation;
}
bool TemplateName::containsUnexpandedParameterPack() const {
- if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
- if (QTN->getQualifier()->containsUnexpandedParameterPack())
- return true;
- }
-
- if (TemplateDecl *Template = getAsTemplateDecl()) {
- if (TemplateTemplateParmDecl *TTP
- = dyn_cast<TemplateTemplateParmDecl>(Template))
- return TTP->isParameterPack();
-
- return false;
- }
-
- if (DependentTemplateName *DTN = getAsDependentTemplateName())
- return DTN->getQualifier() &&
- DTN->getQualifier()->containsUnexpandedParameterPack();
-
- return getAsSubstTemplateTemplateParmPack() != nullptr;
+ return getDependence() & TemplateNameDependence::UnexpandedPack;
}
void
diff --git a/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp b/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
index c9b571862c19..5b0a0ac392c0 100644
--- a/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
@@ -11,10 +11,19 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/TextNodeDumper.h"
+#include "clang/AST/APValue.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/LocInfoType.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/Module.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TypeTraits.h"
+
+#include <algorithm>
+#include <utility>
using namespace clang;
@@ -47,12 +56,15 @@ static void dumpPreviousDecl(raw_ostream &OS, const Decl *D) {
llvm_unreachable("Decl that isn't part of DeclNodes.inc!");
}
-TextNodeDumper::TextNodeDumper(raw_ostream &OS, bool ShowColors,
- const SourceManager *SM,
- const PrintingPolicy &PrintPolicy,
- const comments::CommandTraits *Traits)
- : TextTreeStructure(OS, ShowColors), OS(OS), ShowColors(ShowColors), SM(SM),
- PrintPolicy(PrintPolicy), Traits(Traits) {}
+TextNodeDumper::TextNodeDumper(raw_ostream &OS, const ASTContext &Context,
+ bool ShowColors)
+ : TextTreeStructure(OS, ShowColors), OS(OS), ShowColors(ShowColors),
+ Context(&Context), SM(&Context.getSourceManager()),
+ PrintPolicy(Context.getPrintingPolicy()),
+ Traits(&Context.getCommentCommandTraits()) {}
+
+TextNodeDumper::TextNodeDumper(raw_ostream &OS, bool ShowColors)
+ : TextTreeStructure(OS, ShowColors), OS(OS), ShowColors(ShowColors) {}
void TextNodeDumper::Visit(const comments::Comment *C,
const comments::FullComment *FC) {
@@ -121,12 +133,14 @@ void TextNodeDumper::Visit(const Stmt *Node) {
dumpPointer(Node);
dumpSourceRange(Node->getSourceRange());
- if (Node->isOMPStructuredBlock())
- OS << " openmp_structured_block";
-
if (const auto *E = dyn_cast<Expr>(Node)) {
dumpType(E->getType());
+ if (E->containsErrors()) {
+ ColorScope Color(OS, ShowColors, ErrorsColor);
+ OS << " contains-errors";
+ }
+
{
ColorScope Color(OS, ShowColors, ValueKindColor);
switch (E->getValueKind()) {
@@ -158,6 +172,9 @@ void TextNodeDumper::Visit(const Stmt *Node) {
case OK_VectorComponent:
OS << " vectorcomponent";
break;
+ case OK_MatrixComponent:
+ OS << " matrixcomponent";
+ break;
}
}
}
@@ -193,6 +210,11 @@ void TextNodeDumper::Visit(const Type *T) {
if (SingleStepDesugar != QualType(T, 0))
OS << " sugar";
+ if (T->containsErrors()) {
+ ColorScope Color(OS, ShowColors, ErrorsColor);
+ OS << " contains-errors";
+ }
+
if (T->isDependentType())
OS << " dependent";
else if (T->isInstantiationDependentType())
@@ -243,7 +265,7 @@ void TextNodeDumper::Visit(const Decl *D) {
const_cast<NamedDecl *>(ND)))
AddChild([=] { OS << "also in " << M->getFullModuleName(); });
if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
- if (ND->isHidden())
+ if (!ND->isUnconditionallyVisible())
OS << " hidden";
if (D->isImplicit())
OS << " implicit";
@@ -310,7 +332,7 @@ void TextNodeDumper::Visit(const OMPClause *C) {
}
{
ColorScope Color(OS, ShowColors, AttrColor);
- StringRef ClauseName(getOpenMPClauseName(C->getClauseKind()));
+ StringRef ClauseName(llvm::omp::getOpenMPClauseName(C->getClauseKind()));
OS << "OMP" << ClauseName.substr(/*Start=*/0, /*N=*/1).upper()
<< ClauseName.drop_front() << "Clause";
}
@@ -333,6 +355,218 @@ void TextNodeDumper::Visit(const GenericSelectionExpr::ConstAssociation &A) {
OS << " selected";
}
+static double GetApproxValue(const llvm::APFloat &F) {
+ llvm::APFloat V = F;
+ bool ignored;
+ V.convert(llvm::APFloat::IEEEdouble(), llvm::APFloat::rmNearestTiesToEven,
+ &ignored);
+ return V.convertToDouble();
+}
+
+/// True if the \p APValue \p Value can be folded onto the current line.
+static bool isSimpleAPValue(const APValue &Value) {
+ switch (Value.getKind()) {
+ case APValue::None:
+ case APValue::Indeterminate:
+ case APValue::Int:
+ case APValue::Float:
+ case APValue::FixedPoint:
+ case APValue::ComplexInt:
+ case APValue::ComplexFloat:
+ case APValue::LValue:
+ case APValue::MemberPointer:
+ case APValue::AddrLabelDiff:
+ return true;
+ case APValue::Vector:
+ case APValue::Array:
+ case APValue::Struct:
+ return false;
+ case APValue::Union:
+ return isSimpleAPValue(Value.getUnionValue());
+ }
+ llvm_unreachable("unexpected APValue kind!");
+}
+
+/// Dump the children of the \p APValue \p Value.
+///
+/// \param[in] Value The \p APValue to visit
+/// \param[in] Ty The \p QualType passed to \p Visit
+///
+/// \param[in] IdxToChildFun A function mapping an \p APValue and an index
+/// to one of the child of the \p APValue
+///
+/// \param[in] NumChildren \p IdxToChildFun will be called on \p Value with
+/// the indices in the range \p [0,NumChildren(
+///
+/// \param[in] LabelSingular The label to use on a line with a single child
+/// \param[in] LabelPlurial The label to use on a line with multiple children
+void TextNodeDumper::dumpAPValueChildren(
+ const APValue &Value, QualType Ty,
+ const APValue &(*IdxToChildFun)(const APValue &, unsigned),
+ unsigned NumChildren, StringRef LabelSingular, StringRef LabelPlurial) {
+ // To save some vertical space we print up to MaxChildrenPerLine APValues
+ // considered to be simple (by isSimpleAPValue) on a single line.
+ constexpr unsigned MaxChildrenPerLine = 4;
+ unsigned I = 0;
+ while (I < NumChildren) {
+ unsigned J = I;
+ while (J < NumChildren) {
+ if (isSimpleAPValue(IdxToChildFun(Value, J)) &&
+ (J - I < MaxChildrenPerLine)) {
+ ++J;
+ continue;
+ }
+ break;
+ }
+
+ J = std::max(I + 1, J);
+
+ // Print [I,J) on a single line.
+ AddChild(J - I > 1 ? LabelPlurial : LabelSingular, [=]() {
+ for (unsigned X = I; X < J; ++X) {
+ Visit(IdxToChildFun(Value, X), Ty);
+ if (X + 1 != J)
+ OS << ", ";
+ }
+ });
+ I = J;
+ }
+}
+
+void TextNodeDumper::Visit(const APValue &Value, QualType Ty) {
+ ColorScope Color(OS, ShowColors, ValueKindColor);
+ switch (Value.getKind()) {
+ case APValue::None:
+ OS << "None";
+ return;
+ case APValue::Indeterminate:
+ OS << "Indeterminate";
+ return;
+ case APValue::Int:
+ OS << "Int ";
+ {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << Value.getInt();
+ }
+ return;
+ case APValue::Float:
+ OS << "Float ";
+ {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << GetApproxValue(Value.getFloat());
+ }
+ return;
+ case APValue::FixedPoint:
+ OS << "FixedPoint ";
+ {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << Value.getFixedPoint();
+ }
+ return;
+ case APValue::Vector: {
+ unsigned VectorLength = Value.getVectorLength();
+ OS << "Vector length=" << VectorLength;
+
+ dumpAPValueChildren(
+ Value, Ty,
+ [](const APValue &Value, unsigned Index) -> const APValue & {
+ return Value.getVectorElt(Index);
+ },
+ VectorLength, "element", "elements");
+ return;
+ }
+ case APValue::ComplexInt:
+ OS << "ComplexInt ";
+ {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << Value.getComplexIntReal() << " + " << Value.getComplexIntImag()
+ << 'i';
+ }
+ return;
+ case APValue::ComplexFloat:
+ OS << "ComplexFloat ";
+ {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << GetApproxValue(Value.getComplexFloatReal()) << " + "
+ << GetApproxValue(Value.getComplexFloatImag()) << 'i';
+ }
+ return;
+ case APValue::LValue:
+ (void)Context;
+ OS << "LValue <todo>";
+ return;
+ case APValue::Array: {
+ unsigned ArraySize = Value.getArraySize();
+ unsigned NumInitializedElements = Value.getArrayInitializedElts();
+ OS << "Array size=" << ArraySize;
+
+ dumpAPValueChildren(
+ Value, Ty,
+ [](const APValue &Value, unsigned Index) -> const APValue & {
+ return Value.getArrayInitializedElt(Index);
+ },
+ NumInitializedElements, "element", "elements");
+
+ if (Value.hasArrayFiller()) {
+ AddChild("filler", [=] {
+ {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << ArraySize - NumInitializedElements << " x ";
+ }
+ Visit(Value.getArrayFiller(), Ty);
+ });
+ }
+
+ return;
+ }
+ case APValue::Struct: {
+ OS << "Struct";
+
+ dumpAPValueChildren(
+ Value, Ty,
+ [](const APValue &Value, unsigned Index) -> const APValue & {
+ return Value.getStructBase(Index);
+ },
+ Value.getStructNumBases(), "base", "bases");
+
+ dumpAPValueChildren(
+ Value, Ty,
+ [](const APValue &Value, unsigned Index) -> const APValue & {
+ return Value.getStructField(Index);
+ },
+ Value.getStructNumFields(), "field", "fields");
+
+ return;
+ }
+ case APValue::Union: {
+ OS << "Union";
+ {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ if (const FieldDecl *FD = Value.getUnionField())
+ OS << " ." << *cast<NamedDecl>(FD);
+ }
+ // If the union value is considered to be simple, fold it into the
+ // current line to save some vertical space.
+ const APValue &UnionValue = Value.getUnionValue();
+ if (isSimpleAPValue(UnionValue)) {
+ OS << ' ';
+ Visit(UnionValue, Ty);
+ } else {
+ AddChild([=] { Visit(UnionValue, Ty); });
+ }
+
+ return;
+ }
+ case APValue::MemberPointer:
+ OS << "MemberPointer <todo>";
+ return;
+ case APValue::AddrLabelDiff:
+ OS << "AddrLabelDiff <todo>";
+ return;
+ }
+ llvm_unreachable("Unknown APValue kind!");
+}
+
void TextNodeDumper::dumpPointer(const void *Ptr) {
ColorScope Color(OS, ShowColors, AddressColor);
OS << ' ' << Ptr;
@@ -432,19 +666,27 @@ void TextNodeDumper::dumpName(const NamedDecl *ND) {
}
void TextNodeDumper::dumpAccessSpecifier(AccessSpecifier AS) {
- switch (AS) {
- case AS_none:
- break;
- case AS_public:
- OS << "public";
- break;
- case AS_protected:
- OS << "protected";
- break;
- case AS_private:
- OS << "private";
- break;
- }
+ const auto AccessSpelling = getAccessSpelling(AS);
+ if (AccessSpelling.empty())
+ return;
+ OS << AccessSpelling;
+}
+
+void TextNodeDumper::dumpCleanupObject(
+ const ExprWithCleanups::CleanupObject &C) {
+ if (auto *BD = C.dyn_cast<BlockDecl *>())
+ dumpDeclRef(BD, "cleanup");
+ else if (auto *CLE = C.dyn_cast<CompoundLiteralExpr *>())
+ AddChild([=] {
+ OS << "cleanup ";
+ {
+ ColorScope Color(OS, ShowColors, StmtColor);
+ OS << CLE->getStmtClassName();
+ }
+ dumpPointer(CLE);
+ });
+ else
+ llvm_unreachable("unexpected cleanup type");
}
void TextNodeDumper::dumpDeclRef(const Decl *D, StringRef Label) {
@@ -687,11 +929,9 @@ void TextNodeDumper::VisitCaseStmt(const CaseStmt *Node) {
}
void TextNodeDumper::VisitConstantExpr(const ConstantExpr *Node) {
- if (Node->getResultAPValueKind() != APValue::None) {
- ColorScope Color(OS, ShowColors, ValueColor);
- OS << " ";
- Node->getAPValueResult().dump(OS);
- }
+ if (Node->hasAPValueResult())
+ AddChild("value",
+ [=] { Visit(Node->getAPValueResult(), Node->getType()); });
}
void TextNodeDumper::VisitCallExpr(const CallExpr *Node) {
@@ -699,6 +939,14 @@ void TextNodeDumper::VisitCallExpr(const CallExpr *Node) {
OS << " adl";
}
+void TextNodeDumper::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *Node) {
+ const char *OperatorSpelling = clang::getOperatorSpelling(Node->getOperator());
+ if (OperatorSpelling)
+ OS << " '" << OperatorSpelling << "'";
+
+ VisitCallExpr(Node);
+}
+
void TextNodeDumper::VisitCastExpr(const CastExpr *Node) {
OS << " <";
{
@@ -809,23 +1057,8 @@ void TextNodeDumper::VisitUnaryOperator(const UnaryOperator *Node) {
void TextNodeDumper::VisitUnaryExprOrTypeTraitExpr(
const UnaryExprOrTypeTraitExpr *Node) {
- switch (Node->getKind()) {
- case UETT_SizeOf:
- OS << " sizeof";
- break;
- case UETT_AlignOf:
- OS << " alignof";
- break;
- case UETT_VecStep:
- OS << " vec_step";
- break;
- case UETT_OpenMPRequiredSimdAlign:
- OS << " __builtin_omp_required_simd_align";
- break;
- case UETT_PreferredAlignOf:
- OS << " __alignof";
- break;
- }
+ OS << " " << getTraitSpelling(Node->getKind());
+
if (Node->isArgumentType())
dumpType(Node->getArgumentType());
}
@@ -939,6 +1172,18 @@ void TextNodeDumper::VisitCXXDeleteExpr(const CXXDeleteExpr *Node) {
}
}
+void TextNodeDumper::VisitTypeTraitExpr(const TypeTraitExpr *Node) {
+ OS << " " << getTraitSpelling(Node->getTrait());
+}
+
+void TextNodeDumper::VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *Node) {
+ OS << " " << getTraitSpelling(Node->getTrait());
+}
+
+void TextNodeDumper::VisitExpressionTraitExpr(const ExpressionTraitExpr *Node) {
+ OS << " " << getTraitSpelling(Node->getTrait());
+}
+
void TextNodeDumper::VisitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *Node) {
if (const ValueDecl *VD = Node->getExtendingDecl()) {
@@ -949,7 +1194,7 @@ void TextNodeDumper::VisitMaterializeTemporaryExpr(
void TextNodeDumper::VisitExprWithCleanups(const ExprWithCleanups *Node) {
for (unsigned i = 0, e = Node->getNumObjects(); i != e; ++i)
- dumpDeclRef(Node->getObject(i), "cleanup");
+ dumpCleanupObject(Node->getObject(i));
}
void TextNodeDumper::VisitSizeOfPackExpr(const SizeOfPackExpr *Node) {
@@ -1065,6 +1310,23 @@ void TextNodeDumper::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *Node) {
OS << " " << (Node->getValue() ? "__objc_yes" : "__objc_no");
}
+void TextNodeDumper::VisitOMPIteratorExpr(const OMPIteratorExpr *Node) {
+ OS << " ";
+ for (unsigned I = 0, E = Node->numOfIterators(); I < E; ++I) {
+ Visit(Node->getIteratorDecl(I));
+ OS << " = ";
+ const OMPIteratorExpr::IteratorRange Range = Node->getIteratorRange(I);
+ OS << " begin ";
+ Visit(Range.Begin);
+ OS << " end ";
+ Visit(Range.End);
+ if (Range.Step) {
+ OS << " step ";
+ Visit(Range.Step);
+ }
+ }
+}
+
void TextNodeDumper::VisitRValueReferenceType(const ReferenceType *T) {
if (T->isSpelledAsLValue())
OS << " written as lvalue reference";
@@ -1407,6 +1669,16 @@ void TextNodeDumper::VisitVarDecl(const VarDecl *D) {
OS << " destroyed";
if (D->isParameterPack())
OS << " pack";
+
+ if (D->hasInit()) {
+ const Expr *E = D->getInit();
+ // Only dump the value of constexpr VarDecls for now.
+ if (E && !E->isValueDependent() && D->isConstexpr()) {
+ const APValue *Value = D->evaluateValue();
+ if (Value)
+ AddChild("value", [=] { Visit(*Value, E->getType()); });
+ }
+ }
}
void TextNodeDumper::VisitBindingDecl(const BindingDecl *D) {
@@ -1496,7 +1768,8 @@ void TextNodeDumper::VisitOMPRequiresDecl(const OMPRequiresDecl *D) {
}
{
ColorScope Color(OS, ShowColors, AttrColor);
- StringRef ClauseName(getOpenMPClauseName(C->getClauseKind()));
+ StringRef ClauseName(
+ llvm::omp::getOpenMPClauseName(C->getClauseKind()));
OS << "OMP" << ClauseName.substr(/*Start=*/0, /*N=*/1).upper()
<< ClauseName.drop_front() << "Clause";
}
@@ -1629,6 +1902,7 @@ void TextNodeDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << "CopyAssignment";
}
+ FLAG(hasSimpleCopyAssignment, simple);
FLAG(hasTrivialCopyAssignment, trivial);
FLAG(hasNonTrivialCopyAssignment, non_trivial);
FLAG(hasCopyAssignmentWithConstParam, has_const_param);
@@ -1919,35 +2193,35 @@ void TextNodeDumper::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
else if (D->getPropertyImplementation() == ObjCPropertyDecl::Optional)
OS << " optional";
- ObjCPropertyDecl::PropertyAttributeKind Attrs = D->getPropertyAttributes();
- if (Attrs != ObjCPropertyDecl::OBJC_PR_noattr) {
- if (Attrs & ObjCPropertyDecl::OBJC_PR_readonly)
+ ObjCPropertyAttribute::Kind Attrs = D->getPropertyAttributes();
+ if (Attrs != ObjCPropertyAttribute::kind_noattr) {
+ if (Attrs & ObjCPropertyAttribute::kind_readonly)
OS << " readonly";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_assign)
+ if (Attrs & ObjCPropertyAttribute::kind_assign)
OS << " assign";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_readwrite)
+ if (Attrs & ObjCPropertyAttribute::kind_readwrite)
OS << " readwrite";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_retain)
+ if (Attrs & ObjCPropertyAttribute::kind_retain)
OS << " retain";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_copy)
+ if (Attrs & ObjCPropertyAttribute::kind_copy)
OS << " copy";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ if (Attrs & ObjCPropertyAttribute::kind_nonatomic)
OS << " nonatomic";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_atomic)
+ if (Attrs & ObjCPropertyAttribute::kind_atomic)
OS << " atomic";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_weak)
+ if (Attrs & ObjCPropertyAttribute::kind_weak)
OS << " weak";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_strong)
+ if (Attrs & ObjCPropertyAttribute::kind_strong)
OS << " strong";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained)
+ if (Attrs & ObjCPropertyAttribute::kind_unsafe_unretained)
OS << " unsafe_unretained";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_class)
+ if (Attrs & ObjCPropertyAttribute::kind_class)
OS << " class";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_direct)
+ if (Attrs & ObjCPropertyAttribute::kind_direct)
OS << " direct";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_getter)
+ if (Attrs & ObjCPropertyAttribute::kind_getter)
dumpDeclRef(D->getGetterMethodDecl(), "getter");
- if (Attrs & ObjCPropertyDecl::OBJC_PR_setter)
+ if (Attrs & ObjCPropertyAttribute::kind_setter)
dumpDeclRef(D->getSetterMethodDecl(), "setter");
}
}
diff --git a/contrib/llvm-project/clang/lib/AST/Type.cpp b/contrib/llvm-project/clang/lib/AST/Type.cpp
index 5099494da5fd..10a6a2610130 100644
--- a/contrib/llvm-project/clang/lib/AST/Type.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Type.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/NonTrivialTypeVisitor.h"
@@ -123,14 +124,15 @@ ArrayType::ArrayType(TypeClass tc, QualType et, QualType can,
//
// template<int ...N> int arr[] = {N...};
: Type(tc, can,
- et->isDependentType() || (sz && sz->isValueDependent()) ||
- tc == DependentSizedArray,
- et->isInstantiationDependentType() ||
- (sz && sz->isInstantiationDependent()) ||
- tc == DependentSizedArray,
- (tc == VariableArray || et->isVariablyModifiedType()),
- et->containsUnexpandedParameterPack() ||
- (sz && sz->containsUnexpandedParameterPack())),
+ et->getDependence() |
+ (sz ? toTypeDependence(
+ turnValueToTypeDependence(sz->getDependence()))
+ : TypeDependence::None) |
+ (tc == VariableArray ? TypeDependence::VariablyModified
+ : TypeDependence::None) |
+ (tc == DependentSizedArray
+ ? TypeDependence::DependentInstantiation
+ : TypeDependence::None)),
ElementType(et) {
ArrayTypeBits.IndexTypeQuals = tq;
ArrayTypeBits.SizeModifier = sm;
@@ -217,14 +219,16 @@ void DependentSizedArrayType::Profile(llvm::FoldingSetNodeID &ID,
E->Profile(ID, Context, true);
}
-DependentVectorType::DependentVectorType(
- const ASTContext &Context, QualType ElementType, QualType CanonType,
- Expr *SizeExpr, SourceLocation Loc, VectorType::VectorKind VecKind)
- : Type(DependentVector, CanonType, /*Dependent=*/true,
- /*InstantiationDependent=*/true,
- ElementType->isVariablyModifiedType(),
- ElementType->containsUnexpandedParameterPack() ||
- (SizeExpr && SizeExpr->containsUnexpandedParameterPack())),
+DependentVectorType::DependentVectorType(const ASTContext &Context,
+ QualType ElementType,
+ QualType CanonType, Expr *SizeExpr,
+ SourceLocation Loc,
+ VectorType::VectorKind VecKind)
+ : Type(DependentVector, CanonType,
+ TypeDependence::DependentInstantiation |
+ ElementType->getDependence() |
+ (SizeExpr ? toTypeDependence(SizeExpr->getDependence())
+ : TypeDependence::None)),
Context(Context), ElementType(ElementType), SizeExpr(SizeExpr), Loc(Loc) {
VectorTypeBits.VecKind = VecKind;
}
@@ -238,19 +242,16 @@ void DependentVectorType::Profile(llvm::FoldingSetNodeID &ID,
SizeExpr->Profile(ID, Context, true);
}
-DependentSizedExtVectorType::DependentSizedExtVectorType(const
- ASTContext &Context,
- QualType ElementType,
- QualType can,
- Expr *SizeExpr,
- SourceLocation loc)
- : Type(DependentSizedExtVector, can, /*Dependent=*/true,
- /*InstantiationDependent=*/true,
- ElementType->isVariablyModifiedType(),
- (ElementType->containsUnexpandedParameterPack() ||
- (SizeExpr && SizeExpr->containsUnexpandedParameterPack()))),
- Context(Context), SizeExpr(SizeExpr), ElementType(ElementType),
- loc(loc) {}
+DependentSizedExtVectorType::DependentSizedExtVectorType(
+ const ASTContext &Context, QualType ElementType, QualType can,
+ Expr *SizeExpr, SourceLocation loc)
+ : Type(DependentSizedExtVector, can,
+ TypeDependence::DependentInstantiation |
+ ElementType->getDependence() |
+ (SizeExpr ? toTypeDependence(SizeExpr->getDependence())
+ : TypeDependence::None)),
+ Context(Context), SizeExpr(SizeExpr), ElementType(ElementType), loc(loc) {
+}
void
DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID,
@@ -260,15 +261,16 @@ DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID,
SizeExpr->Profile(ID, Context, true);
}
-DependentAddressSpaceType::DependentAddressSpaceType(
- const ASTContext &Context, QualType PointeeType, QualType can,
- Expr *AddrSpaceExpr, SourceLocation loc)
- : Type(DependentAddressSpace, can, /*Dependent=*/true,
- /*InstantiationDependent=*/true,
- PointeeType->isVariablyModifiedType(),
- (PointeeType->containsUnexpandedParameterPack() ||
- (AddrSpaceExpr &&
- AddrSpaceExpr->containsUnexpandedParameterPack()))),
+DependentAddressSpaceType::DependentAddressSpaceType(const ASTContext &Context,
+ QualType PointeeType,
+ QualType can,
+ Expr *AddrSpaceExpr,
+ SourceLocation loc)
+ : Type(DependentAddressSpace, can,
+ TypeDependence::DependentInstantiation |
+ PointeeType->getDependence() |
+ (AddrSpaceExpr ? toTypeDependence(AddrSpaceExpr->getDependence())
+ : TypeDependence::None)),
Context(Context), AddrSpaceExpr(AddrSpaceExpr), PointeeType(PointeeType),
loc(loc) {}
@@ -280,21 +282,89 @@ void DependentAddressSpaceType::Profile(llvm::FoldingSetNodeID &ID,
AddrSpaceExpr->Profile(ID, Context, true);
}
+MatrixType::MatrixType(TypeClass tc, QualType matrixType, QualType canonType,
+ const Expr *RowExpr, const Expr *ColumnExpr)
+ : Type(tc, canonType,
+ (RowExpr ? (matrixType->getDependence() | TypeDependence::Dependent |
+ TypeDependence::Instantiation |
+ (matrixType->isVariablyModifiedType()
+ ? TypeDependence::VariablyModified
+ : TypeDependence::None) |
+ (matrixType->containsUnexpandedParameterPack() ||
+ (RowExpr &&
+ RowExpr->containsUnexpandedParameterPack()) ||
+ (ColumnExpr &&
+ ColumnExpr->containsUnexpandedParameterPack())
+ ? TypeDependence::UnexpandedPack
+ : TypeDependence::None))
+ : matrixType->getDependence())),
+ ElementType(matrixType) {}
+
+ConstantMatrixType::ConstantMatrixType(QualType matrixType, unsigned nRows,
+ unsigned nColumns, QualType canonType)
+ : ConstantMatrixType(ConstantMatrix, matrixType, nRows, nColumns,
+ canonType) {}
+
+ConstantMatrixType::ConstantMatrixType(TypeClass tc, QualType matrixType,
+ unsigned nRows, unsigned nColumns,
+ QualType canonType)
+ : MatrixType(tc, matrixType, canonType) {
+ ConstantMatrixTypeBits.NumRows = nRows;
+ ConstantMatrixTypeBits.NumColumns = nColumns;
+}
+
+DependentSizedMatrixType::DependentSizedMatrixType(
+ const ASTContext &CTX, QualType ElementType, QualType CanonicalType,
+ Expr *RowExpr, Expr *ColumnExpr, SourceLocation loc)
+ : MatrixType(DependentSizedMatrix, ElementType, CanonicalType, RowExpr,
+ ColumnExpr),
+ Context(CTX), RowExpr(RowExpr), ColumnExpr(ColumnExpr), loc(loc) {}
+
+void DependentSizedMatrixType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &CTX,
+ QualType ElementType, Expr *RowExpr,
+ Expr *ColumnExpr) {
+ ID.AddPointer(ElementType.getAsOpaquePtr());
+ RowExpr->Profile(ID, CTX, true);
+ ColumnExpr->Profile(ID, CTX, true);
+}
+
VectorType::VectorType(QualType vecType, unsigned nElements, QualType canonType,
VectorKind vecKind)
: VectorType(Vector, vecType, nElements, canonType, vecKind) {}
VectorType::VectorType(TypeClass tc, QualType vecType, unsigned nElements,
QualType canonType, VectorKind vecKind)
- : Type(tc, canonType, vecType->isDependentType(),
- vecType->isInstantiationDependentType(),
- vecType->isVariablyModifiedType(),
- vecType->containsUnexpandedParameterPack()),
- ElementType(vecType) {
+ : Type(tc, canonType, vecType->getDependence()), ElementType(vecType) {
VectorTypeBits.VecKind = vecKind;
VectorTypeBits.NumElements = nElements;
}
+ExtIntType::ExtIntType(bool IsUnsigned, unsigned NumBits)
+ : Type(ExtInt, QualType{}, TypeDependence::None), IsUnsigned(IsUnsigned),
+ NumBits(NumBits) {}
+
+DependentExtIntType::DependentExtIntType(const ASTContext &Context,
+ bool IsUnsigned, Expr *NumBitsExpr)
+ : Type(DependentExtInt, QualType{},
+ toTypeDependence(NumBitsExpr->getDependence())),
+ Context(Context), ExprAndUnsigned(NumBitsExpr, IsUnsigned) {}
+
+bool DependentExtIntType::isUnsigned() const {
+ return ExprAndUnsigned.getInt();
+}
+
+clang::Expr *DependentExtIntType::getNumBitsExpr() const {
+ return ExprAndUnsigned.getPointer();
+}
+
+void DependentExtIntType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context, bool IsUnsigned,
+ Expr *NumBitsExpr) {
+ ID.AddBoolean(IsUnsigned);
+ NumBitsExpr->Profile(ID, Context, true);
+}
+
/// getArrayElementTypeNoTypeQual - If this is an array type, return the
/// element type of the array, potentially with type qualifiers missing.
/// This method should never be used when type qualifiers are meaningful.
@@ -652,14 +722,11 @@ bool Type::isObjCClassOrClassKindOfType() const {
return OPT->isObjCClassType() || OPT->isObjCQualifiedClassType();
}
-ObjCTypeParamType::ObjCTypeParamType(const ObjCTypeParamDecl *D,
- QualType can,
+ObjCTypeParamType::ObjCTypeParamType(const ObjCTypeParamDecl *D, QualType can,
ArrayRef<ObjCProtocolDecl *> protocols)
- : Type(ObjCTypeParam, can, can->isDependentType(),
- can->isInstantiationDependentType(),
- can->isVariablyModifiedType(),
- /*ContainsUnexpandedParameterPack=*/false),
- OTPDecl(const_cast<ObjCTypeParamDecl*>(D)) {
+ : Type(ObjCTypeParam, can,
+ can->getDependence() & ~TypeDependence::UnexpandedPack),
+ OTPDecl(const_cast<ObjCTypeParamDecl *>(D)) {
initialize(protocols);
}
@@ -667,11 +734,7 @@ ObjCObjectType::ObjCObjectType(QualType Canonical, QualType Base,
ArrayRef<QualType> typeArgs,
ArrayRef<ObjCProtocolDecl *> protocols,
bool isKindOf)
- : Type(ObjCObject, Canonical, Base->isDependentType(),
- Base->isInstantiationDependentType(),
- Base->isVariablyModifiedType(),
- Base->containsUnexpandedParameterPack()),
- BaseType(Base) {
+ : Type(ObjCObject, Canonical, Base->getDependence()), BaseType(Base) {
ObjCObjectTypeBits.IsKindOf = isKindOf;
ObjCObjectTypeBits.NumTypeArgs = typeArgs.size();
@@ -682,13 +745,7 @@ ObjCObjectType::ObjCObjectType(QualType Canonical, QualType Base,
typeArgs.size() * sizeof(QualType));
for (auto typeArg : typeArgs) {
- if (typeArg->isDependentType())
- setDependent();
- else if (typeArg->isInstantiationDependentType())
- setInstantiationDependent();
-
- if (typeArg->containsUnexpandedParameterPack())
- setContainsUnexpandedParameterPack();
+ addDependence(typeArg->getDependence() & ~TypeDependence::VariablyModified);
}
// Initialize the protocol qualifiers. The protocol storage is known
// after we set number of type arguments.
@@ -953,6 +1010,17 @@ public:
return Ctx.getExtVectorType(elementType, T->getNumElements());
}
+ QualType VisitConstantMatrixType(const ConstantMatrixType *T) {
+ QualType elementType = recurse(T->getElementType());
+ if (elementType.isNull())
+ return {};
+ if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getConstantMatrixType(elementType, T->getNumRows(),
+ T->getNumColumns());
+ }
+
QualType VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
QualType returnType = recurse(T->getReturnType());
if (returnType.isNull())
@@ -1772,6 +1840,14 @@ namespace {
return Visit(T->getElementType());
}
+ Type *VisitDependentSizedMatrixType(const DependentSizedMatrixType *T) {
+ return Visit(T->getElementType());
+ }
+
+ Type *VisitConstantMatrixType(const ConstantMatrixType *T) {
+ return Visit(T->getElementType());
+ }
+
Type *VisitFunctionProtoType(const FunctionProtoType *T) {
if (Syntactic && T->hasTrailingReturn())
return const_cast<FunctionProtoType*>(T);
@@ -1851,13 +1927,17 @@ bool Type::isIntegralType(const ASTContext &Ctx) const {
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
return ET->getDecl()->isComplete();
- return false;
+ return isExtIntType();
}
bool Type::isIntegralOrUnscopedEnumerationType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::Int128;
+
+ if (isExtIntType())
+ return true;
+
return isUnscopedEnumerationType();
}
@@ -1938,6 +2018,9 @@ bool Type::isSignedIntegerType() const {
return ET->getDecl()->getIntegerType()->isSignedIntegerType();
}
+ if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType))
+ return IT->isSigned();
+
return false;
}
@@ -1952,6 +2035,10 @@ bool Type::isSignedIntegerOrEnumerationType() const {
return ET->getDecl()->getIntegerType()->isSignedIntegerType();
}
+ if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType))
+ return IT->isSigned();
+
+
return false;
}
@@ -1978,6 +2065,9 @@ bool Type::isUnsignedIntegerType() const {
return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
}
+ if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType))
+ return IT->isUnsigned();
+
return false;
}
@@ -1992,6 +2082,9 @@ bool Type::isUnsignedIntegerOrEnumerationType() const {
return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
}
+ if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType))
+ return IT->isUnsigned();
+
return false;
}
@@ -2030,13 +2123,14 @@ bool Type::isRealType() const {
BT->getKind() <= BuiltinType::Float128;
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
- return false;
+ return isExtIntType();
}
bool Type::isArithmeticType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
- BT->getKind() <= BuiltinType::Float128;
+ BT->getKind() <= BuiltinType::Float128 &&
+ BT->getKind() != BuiltinType::BFloat16;
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
// GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2).
// If a body isn't seen by the time we get here, return false.
@@ -2045,7 +2139,7 @@ bool Type::isArithmeticType() const {
// false for scoped enumerations since that will disable any
// unwanted implicit conversions.
return !ET->getDecl()->isScoped() && ET->getDecl()->isComplete();
- return isa<ComplexType>(CanonicalType);
+ return isa<ComplexType>(CanonicalType) || isExtIntType();
}
Type::ScalarTypeKind Type::getScalarTypeKind() const {
@@ -2074,6 +2168,8 @@ Type::ScalarTypeKind Type::getScalarTypeKind() const {
if (CT->getElementType()->isRealFloatingType())
return STK_FloatingComplex;
return STK_IntegralComplex;
+ } else if (isExtIntType()) {
+ return STK_Integral;
}
llvm_unreachable("unknown scalar type");
@@ -2182,6 +2278,22 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
}
}
+bool Type::isSizelessBuiltinType() const {
+ if (const BuiltinType *BT = getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ // SVE Types
+#define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/AArch64SVEACLETypes.def"
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+bool Type::isSizelessType() const { return isSizelessBuiltinType(); }
+
bool QualType::isPODType(const ASTContext &Context) const {
// C++11 has a more relaxed definition of POD.
if (Context.getLangOpts().CPlusPlus11)
@@ -2223,6 +2335,7 @@ bool QualType::isCXX98PODType(const ASTContext &Context) const {
case Type::MemberPointer:
case Type::Vector:
case Type::ExtVector:
+ case Type::ExtInt:
return true;
case Type::Enum:
@@ -2248,6 +2361,9 @@ bool QualType::isTrivialType(const ASTContext &Context) const {
if ((*this)->isArrayType())
return Context.getBaseElementType(*this).isTrivialType(Context);
+ if ((*this)->isSizelessBuiltinType())
+ return true;
+
// Return false for incomplete types after skipping any incomplete array
// types which are expressly allowed by the standard and thus our API.
if ((*this)->isIncompleteType())
@@ -2302,6 +2418,9 @@ bool QualType::isTriviallyCopyableType(const ASTContext &Context) const {
if (CanonicalType->isDependentType())
return false;
+ if (CanonicalType->isSizelessBuiltinType())
+ return true;
+
// Return false for incomplete types after skipping any incomplete array types
// which are expressly allowed by the standard and thus our API.
if (CanonicalType->isIncompleteType())
@@ -2495,6 +2614,9 @@ bool QualType::isCXX11PODType(const ASTContext &Context) const {
const Type *BaseTy = ty->getBaseElementTypeUnsafe();
assert(BaseTy && "NULL element type");
+ if (BaseTy->isSizelessBuiltinType())
+ return true;
+
// Return false for incomplete types after skipping any incomplete array
// types which are expressly allowed by the standard and thus our API.
if (BaseTy->isIncompleteType())
@@ -2699,21 +2821,20 @@ StringRef TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) {
}
DependentTemplateSpecializationType::DependentTemplateSpecializationType(
- ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS, const IdentifierInfo *Name,
- ArrayRef<TemplateArgument> Args,
- QualType Canon)
- : TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon, true, true,
- /*VariablyModified=*/false,
- NNS && NNS->containsUnexpandedParameterPack()),
- NNS(NNS), Name(Name) {
+ ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name, ArrayRef<TemplateArgument> Args, QualType Canon)
+ : TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon,
+ TypeDependence::DependentInstantiation |
+ (NNS ? toTypeDependence(NNS->getDependence())
+ : TypeDependence::None)),
+ NNS(NNS), Name(Name) {
DependentTemplateSpecializationTypeBits.NumArgs = Args.size();
assert((!NNS || NNS->isDependent()) &&
"DependentTemplateSpecializatonType requires dependent qualifier");
TemplateArgument *ArgBuffer = getArgBuffer();
for (const TemplateArgument &Arg : Args) {
- if (Arg.containsUnexpandedParameterPack())
- setContainsUnexpandedParameterPack();
+ addDependence(toTypeDependence(Arg.getDependence() &
+ TemplateArgumentDependence::UnexpandedPack));
new (ArgBuffer++) TemplateArgument(Arg);
}
@@ -2794,6 +2915,8 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
return "unsigned __int128";
case Half:
return Policy.Half ? "half" : "__fp16";
+ case BFloat16:
+ return "__bf16";
case Float:
return "float";
case Double:
@@ -2897,8 +3020,14 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
return "queue_t";
case OCLReserveID:
return "reserve_id_t";
+ case IncompleteMatrixIdx:
+ return "<incomplete matrix index type>";
case OMPArraySection:
return "<OpenMP array section type>";
+ case OMPArrayShaping:
+ return "<OpenMP array shaping type>";
+ case OMPIterator:
+ return "<OpenMP iterator type>";
#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
case Id: \
return #ExtType;
@@ -2912,6 +3041,13 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
llvm_unreachable("Invalid builtin type.");
}
+QualType QualType::getNonPackExpansionType() const {
+ // We never wrap type sugar around a PackExpansionType.
+ if (auto *PET = dyn_cast<PackExpansionType>(getTypePtr()))
+ return PET->getPattern();
+ return *this;
+}
+
QualType QualType::getNonLValueExprType(const ASTContext &Context) const {
if (const auto *RefType = getTypePtr()->getAs<ReferenceType>())
return RefType->getPointeeType();
@@ -2956,10 +3092,8 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
QualType canonical,
const ExtProtoInfo &epi)
- : FunctionType(FunctionProto, result, canonical, result->isDependentType(),
- result->isInstantiationDependentType(),
- result->isVariablyModifiedType(),
- result->containsUnexpandedParameterPack(), epi.ExtInfo) {
+ : FunctionType(FunctionProto, result, canonical, result->getDependence(),
+ epi.ExtInfo) {
FunctionTypeBits.FastTypeQuals = epi.TypeQuals.getFastQualifiers();
FunctionTypeBits.RefQualifier = epi.RefQualifier;
FunctionTypeBits.NumParams = params.size();
@@ -2978,14 +3112,8 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
// Fill in the trailing argument array.
auto *argSlot = getTrailingObjects<QualType>();
for (unsigned i = 0; i != getNumParams(); ++i) {
- if (params[i]->isDependentType())
- setDependent();
- else if (params[i]->isInstantiationDependentType())
- setInstantiationDependent();
-
- if (params[i]->containsUnexpandedParameterPack())
- setContainsUnexpandedParameterPack();
-
+ addDependence(params[i]->getDependence() &
+ ~TypeDependence::VariablyModified);
argSlot[i] = params[i];
}
@@ -2999,11 +3127,9 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
// Note that, before C++17, a dependent exception specification does
// *not* make a type dependent; it's not even part of the C++ type
// system.
- if (ExceptionType->isInstantiationDependentType())
- setInstantiationDependent();
-
- if (ExceptionType->containsUnexpandedParameterPack())
- setContainsUnexpandedParameterPack();
+ addDependence(
+ ExceptionType->getDependence() &
+ (TypeDependence::Instantiation | TypeDependence::UnexpandedPack));
exnSlot[I++] = ExceptionType;
}
@@ -3017,12 +3143,9 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
// Store the noexcept expression and context.
*getTrailingObjects<Expr *>() = epi.ExceptionSpec.NoexceptExpr;
- if (epi.ExceptionSpec.NoexceptExpr->isValueDependent() ||
- epi.ExceptionSpec.NoexceptExpr->isInstantiationDependent())
- setInstantiationDependent();
-
- if (epi.ExceptionSpec.NoexceptExpr->containsUnexpandedParameterPack())
- setContainsUnexpandedParameterPack();
+ addDependence(
+ toTypeDependence(epi.ExceptionSpec.NoexceptExpr->getDependence()) &
+ (TypeDependence::Instantiation | TypeDependence::UnexpandedPack));
}
// Fill in the FunctionDecl * in the exception specification if present.
else if (getExceptionSpecType() == EST_Uninstantiated) {
@@ -3046,11 +3169,11 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
if (getExceptionSpecType() == EST_Dynamic ||
getExceptionSpecType() == EST_DependentNoexcept) {
assert(hasDependentExceptionSpec() && "type should not be canonical");
- setDependent();
+ addDependence(TypeDependence::DependentInstantiation);
}
} else if (getCanonicalTypeInternal()->isDependentType()) {
// Ask our canonical type whether our exception specification was dependent.
- setDependent();
+ addDependence(TypeDependence::DependentInstantiation);
}
// Fill in the extra parameter info if present.
@@ -3213,10 +3336,10 @@ QualType MacroQualifiedType::getModifiedType() const {
}
TypeOfExprType::TypeOfExprType(Expr *E, QualType can)
- : Type(TypeOfExpr, can, E->isTypeDependent(),
- E->isInstantiationDependent(),
- E->getType()->isVariablyModifiedType(),
- E->containsUnexpandedParameterPack()),
+ : Type(TypeOfExpr, can,
+ toTypeDependence(E->getDependence()) |
+ (E->getType()->getDependence() &
+ TypeDependence::VariablyModified)),
TOExpr(E) {}
bool TypeOfExprType::isSugared() const {
@@ -3236,13 +3359,15 @@ void DependentTypeOfExprType::Profile(llvm::FoldingSetNodeID &ID,
}
DecltypeType::DecltypeType(Expr *E, QualType underlyingType, QualType can)
- // C++11 [temp.type]p2: "If an expression e involves a template parameter,
- // decltype(e) denotes a unique dependent type." Hence a decltype type is
- // type-dependent even if its expression is only instantiation-dependent.
- : Type(Decltype, can, E->isInstantiationDependent(),
- E->isInstantiationDependent(),
- E->getType()->isVariablyModifiedType(),
- E->containsUnexpandedParameterPack()),
+ // C++11 [temp.type]p2: "If an expression e involves a template parameter,
+ // decltype(e) denotes a unique dependent type." Hence a decltype type is
+ // type-dependent even if its expression is only instantiation-dependent.
+ : Type(Decltype, can,
+ toTypeDependence(E->getDependence()) |
+ (E->isInstantiationDependent() ? TypeDependence::Dependent
+ : TypeDependence::None) |
+ (E->getType()->getDependence() &
+ TypeDependence::VariablyModified)),
E(E), UnderlyingType(underlyingType) {}
bool DecltypeType::isSugared() const { return !E->isInstantiationDependent(); }
@@ -3263,13 +3388,9 @@ void DependentDecltypeType::Profile(llvm::FoldingSetNodeID &ID,
}
UnaryTransformType::UnaryTransformType(QualType BaseType,
- QualType UnderlyingType,
- UTTKind UKind,
+ QualType UnderlyingType, UTTKind UKind,
QualType CanonicalType)
- : Type(UnaryTransform, CanonicalType, BaseType->isDependentType(),
- BaseType->isInstantiationDependentType(),
- BaseType->isVariablyModifiedType(),
- BaseType->containsUnexpandedParameterPack()),
+ : Type(UnaryTransform, CanonicalType, BaseType->getDependence()),
BaseType(BaseType), UnderlyingType(UnderlyingType), UKind(UKind) {}
DependentUnaryTransformType::DependentUnaryTransformType(const ASTContext &C,
@@ -3278,11 +3399,10 @@ DependentUnaryTransformType::DependentUnaryTransformType(const ASTContext &C,
: UnaryTransformType(BaseType, C.DependentTy, UKind, QualType()) {}
TagType::TagType(TypeClass TC, const TagDecl *D, QualType can)
- : Type(TC, can, D->isDependentType(),
- /*InstantiationDependent=*/D->isDependentType(),
- /*VariablyModified=*/false,
- /*ContainsUnexpandedParameterPack=*/false),
- decl(const_cast<TagDecl*>(D)) {}
+ : Type(TC, can,
+ D->isDependentType() ? TypeDependence::DependentInstantiation
+ : TypeDependence::None),
+ decl(const_cast<TagDecl *>(D)) {}
static TagDecl *getInterestingTagDecl(TagDecl *decl) {
for (auto I : decl->redecls()) {
@@ -3391,11 +3511,12 @@ IdentifierInfo *TemplateTypeParmType::getIdentifier() const {
return isCanonicalUnqualified() ? nullptr : getDecl()->getIdentifier();
}
-SubstTemplateTypeParmPackType::
-SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
- QualType Canon,
- const TemplateArgument &ArgPack)
- : Type(SubstTemplateTypeParmPack, Canon, true, true, false, true),
+SubstTemplateTypeParmPackType::SubstTemplateTypeParmPackType(
+ const TemplateTypeParmType *Param, QualType Canon,
+ const TemplateArgument &ArgPack)
+ : Type(SubstTemplateTypeParmPack, Canon,
+ TypeDependence::DependentInstantiation |
+ TypeDependence::UnexpandedPack),
Replaced(Param), Arguments(ArgPack.pack_begin()) {
SubstTemplateTypeParmPackTypeBits.NumArgs = ArgPack.pack_size();
}
@@ -3439,16 +3560,17 @@ anyDependentTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
return false;
}
-TemplateSpecializationType::
-TemplateSpecializationType(TemplateName T,
- ArrayRef<TemplateArgument> Args,
- QualType Canon, QualType AliasedType)
- : Type(TemplateSpecialization,
- Canon.isNull()? QualType(this, 0) : Canon,
- Canon.isNull()? true : Canon->isDependentType(),
- Canon.isNull()? true : Canon->isInstantiationDependentType(),
- false,
- T.containsUnexpandedParameterPack()), Template(T) {
+TemplateSpecializationType::TemplateSpecializationType(
+ TemplateName T, ArrayRef<TemplateArgument> Args, QualType Canon,
+ QualType AliasedType)
+ : Type(TemplateSpecialization, Canon.isNull() ? QualType(this, 0) : Canon,
+ (Canon.isNull()
+ ? TypeDependence::DependentInstantiation
+ : Canon->getDependence() & ~(TypeDependence::VariablyModified |
+ TypeDependence::UnexpandedPack)) |
+ (toTypeDependence(T.getDependence()) &
+ TypeDependence::UnexpandedPack)),
+ Template(T) {
TemplateSpecializationTypeBits.NumArgs = Args.size();
TemplateSpecializationTypeBits.TypeAlias = !AliasedType.isNull();
@@ -3461,7 +3583,7 @@ TemplateSpecializationType(TemplateName T,
auto *TemplateArgs = reinterpret_cast<TemplateArgument *>(this + 1);
for (const TemplateArgument &Arg : Args) {
- // Update instantiation-dependent and variably-modified bits.
+ // Update instantiation-dependent, variably-modified, and error bits.
// If the canonical type exists and is non-dependent, the template
// specialization type can be non-dependent even if one of the type
// arguments is. Given:
@@ -3469,13 +3591,11 @@ TemplateSpecializationType(TemplateName T,
// U<T> is always non-dependent, irrespective of the type T.
// However, U<Ts> contains an unexpanded parameter pack, even though
// its expansion (and thus its desugared type) doesn't.
- if (Arg.isInstantiationDependent())
- setInstantiationDependent();
- if (Arg.getKind() == TemplateArgument::Type &&
- Arg.getAsType()->isVariablyModifiedType())
- setVariablyModified();
- if (Arg.containsUnexpandedParameterPack())
- setContainsUnexpandedParameterPack();
+ addDependence(toTypeDependence(Arg.getDependence()) &
+ ~TypeDependence::Dependent);
+ if (Arg.getKind() == TemplateArgument::Type)
+ addDependence(Arg.getAsType()->getDependence() &
+ TypeDependence::VariablyModified);
new (TemplateArgs++) TemplateArgument(Arg);
}
@@ -3535,15 +3655,17 @@ void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID) {
void ObjCTypeParamType::Profile(llvm::FoldingSetNodeID &ID,
const ObjCTypeParamDecl *OTPDecl,
+ QualType CanonicalType,
ArrayRef<ObjCProtocolDecl *> protocols) {
ID.AddPointer(OTPDecl);
+ ID.AddPointer(CanonicalType.getAsOpaquePtr());
ID.AddInteger(protocols.size());
for (auto proto : protocols)
ID.AddPointer(proto);
}
void ObjCTypeParamType::Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getDecl(),
+ Profile(ID, getDecl(), getCanonicalTypeInternal(),
llvm::makeArrayRef(qual_begin(), getNumProtocols()));
}
@@ -3646,6 +3768,7 @@ static CachedProperties computeCachedProperties(const Type *T) {
// here in error recovery.
return CachedProperties(ExternalLinkage, false);
+ case Type::ExtInt:
case Type::Builtin:
// C++ [basic.link]p8:
// A type is said to have linkage if and only if:
@@ -3691,6 +3814,8 @@ static CachedProperties computeCachedProperties(const Type *T) {
case Type::Vector:
case Type::ExtVector:
return Cache::get(cast<VectorType>(T)->getElementType());
+ case Type::ConstantMatrix:
+ return Cache::get(cast<ConstantMatrixType>(T)->getElementType());
case Type::FunctionNoProto:
return Cache::get(cast<FunctionType>(T)->getReturnType());
case Type::FunctionProto: {
@@ -3743,6 +3868,7 @@ LinkageInfo LinkageComputer::computeTypeLinkageInfo(const Type *T) {
assert(T->isInstantiationDependentType());
return LinkageInfo::external();
+ case Type::ExtInt:
case Type::Builtin:
return LinkageInfo::external();
@@ -3776,6 +3902,9 @@ LinkageInfo LinkageComputer::computeTypeLinkageInfo(const Type *T) {
case Type::Vector:
case Type::ExtVector:
return computeTypeLinkageInfo(cast<VectorType>(T)->getElementType());
+ case Type::ConstantMatrix:
+ return computeTypeLinkageInfo(
+ cast<ConstantMatrixType>(T)->getElementType());
case Type::FunctionNoProto:
return computeTypeLinkageInfo(cast<FunctionType>(T)->getReturnType());
case Type::FunctionProto: {
@@ -3920,7 +4049,10 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
#include "clang/Basic/AArch64SVEACLETypes.def"
case BuiltinType::BuiltinFn:
case BuiltinType::NullPtr:
+ case BuiltinType::IncompleteMatrixIdx:
case BuiltinType::OMPArraySection:
+ case BuiltinType::OMPArrayShaping:
+ case BuiltinType::OMPIterator:
return false;
}
llvm_unreachable("unknown builtin type");
@@ -3937,6 +4069,8 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
case Type::DependentSizedExtVector:
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
+ case Type::DependentSizedMatrix:
case Type::DependentAddressSpace:
case Type::FunctionProto:
case Type::FunctionNoProto:
@@ -3949,6 +4083,8 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
case Type::ObjCInterface:
case Type::Atomic:
case Type::Pipe:
+ case Type::ExtInt:
+ case Type::DependentExtInt:
return false;
}
llvm_unreachable("bad type kind!");
@@ -4100,6 +4236,20 @@ bool Type::isCARCBridgableType() const {
return Pointee->isVoidType() || Pointee->isRecordType();
}
+/// Check if the specified type is the CUDA device builtin surface type.
+bool Type::isCUDADeviceBuiltinSurfaceType() const {
+ if (const auto *RT = getAs<RecordType>())
+ return RT->getDecl()->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>();
+ return false;
+}
+
+/// Check if the specified type is the CUDA device builtin texture type.
+bool Type::isCUDADeviceBuiltinTextureType() const {
+ if (const auto *RT = getAs<RecordType>())
+ return RT->getDecl()->hasAttr<CUDADeviceBuiltinTextureTypeAttr>();
+ return false;
+}
+
bool Type::hasSizedVLAType() const {
if (!isVariablyModifiedType()) return false;
@@ -4162,19 +4312,18 @@ void clang::FixedPointValueToString(SmallVectorImpl<char> &Str,
}
AutoType::AutoType(QualType DeducedAsType, AutoTypeKeyword Keyword,
- bool IsDeducedAsDependent, bool IsDeducedAsPack,
+ TypeDependence ExtraDependence,
ConceptDecl *TypeConstraintConcept,
ArrayRef<TemplateArgument> TypeConstraintArgs)
- : DeducedType(Auto, DeducedAsType, IsDeducedAsDependent,
- IsDeducedAsDependent, IsDeducedAsPack) {
+ : DeducedType(Auto, DeducedAsType, ExtraDependence) {
AutoTypeBits.Keyword = (unsigned)Keyword;
AutoTypeBits.NumArgs = TypeConstraintArgs.size();
this->TypeConstraintConcept = TypeConstraintConcept;
if (TypeConstraintConcept) {
TemplateArgument *ArgBuffer = getArgBuffer();
for (const TemplateArgument &Arg : TypeConstraintArgs) {
- if (Arg.containsUnexpandedParameterPack())
- setContainsUnexpandedParameterPack();
+ addDependence(toTypeDependence(
+ Arg.getDependence() & TemplateArgumentDependence::UnexpandedPack));
new (ArgBuffer++) TemplateArgument(Arg);
}
diff --git a/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp b/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
index 665a86f2c143..57c11ca5571d 100644
--- a/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
@@ -375,6 +375,7 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
case BuiltinType::SatUShortFract:
case BuiltinType::SatUFract:
case BuiltinType::SatULongFract:
+ case BuiltinType::BFloat16:
llvm_unreachable("Builtin type needs extra local data!");
// Fall through, if the impossible happens.
@@ -403,7 +404,10 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
case BuiltinType::BuiltinFn:
+ case BuiltinType::IncompleteMatrixIdx:
case BuiltinType::OMPArraySection:
+ case BuiltinType::OMPArrayShaping:
+ case BuiltinType::OMPIterator:
return TST_unspecified;
}
diff --git a/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp b/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
index 4a7e765a2bd8..6f6932e65214 100644
--- a/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
@@ -227,6 +227,8 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::ObjCInterface:
case Type::Atomic:
case Type::Pipe:
+ case Type::ExtInt:
+ case Type::DependentExtInt:
CanPrefixQualifiers = true;
break;
@@ -254,6 +256,8 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::DependentSizedExtVector:
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
+ case Type::DependentSizedMatrix:
case Type::FunctionProto:
case Type::FunctionNoProto:
case Type::Paren:
@@ -718,6 +722,38 @@ void TypePrinter::printExtVectorAfter(const ExtVectorType *T, raw_ostream &OS) {
OS << ")))";
}
+void TypePrinter::printConstantMatrixBefore(const ConstantMatrixType *T,
+ raw_ostream &OS) {
+ printBefore(T->getElementType(), OS);
+ OS << " __attribute__((matrix_type(";
+ OS << T->getNumRows() << ", " << T->getNumColumns();
+ OS << ")))";
+}
+
+void TypePrinter::printConstantMatrixAfter(const ConstantMatrixType *T,
+ raw_ostream &OS) {
+ printAfter(T->getElementType(), OS);
+}
+
+void TypePrinter::printDependentSizedMatrixBefore(
+ const DependentSizedMatrixType *T, raw_ostream &OS) {
+ printBefore(T->getElementType(), OS);
+ OS << " __attribute__((matrix_type(";
+ if (T->getRowExpr()) {
+ T->getRowExpr()->printPretty(OS, nullptr, Policy);
+ }
+ OS << ", ";
+ if (T->getColumnExpr()) {
+ T->getColumnExpr()->printPretty(OS, nullptr, Policy);
+ }
+ OS << ")))";
+}
+
+void TypePrinter::printDependentSizedMatrixAfter(
+ const DependentSizedMatrixType *T, raw_ostream &OS) {
+ printAfter(T->getElementType(), OS);
+}
+
void
FunctionProtoType::printExceptionSpecification(raw_ostream &OS,
const PrintingPolicy &Policy)
@@ -909,6 +945,8 @@ void TypePrinter::printFunctionAfter(const FunctionType::ExtInfo &Info,
if (Info.getNoReturn())
OS << " __attribute__((noreturn))";
+ if (Info.getCmseNSCall())
+ OS << " __attribute__((cmse_nonsecure_call))";
if (Info.getProducesResult())
OS << " __attribute__((ns_returns_retained))";
if (Info.getRegParm())
@@ -1112,6 +1150,28 @@ void TypePrinter::printPipeBefore(const PipeType *T, raw_ostream &OS) {
void TypePrinter::printPipeAfter(const PipeType *T, raw_ostream &OS) {}
+void TypePrinter::printExtIntBefore(const ExtIntType *T, raw_ostream &OS) {
+ if (T->isUnsigned())
+ OS << "unsigned ";
+ OS << "_ExtInt(" << T->getNumBits() << ")";
+ spaceBeforePlaceHolder(OS);
+}
+
+void TypePrinter::printExtIntAfter(const ExtIntType *T, raw_ostream &OS) {}
+
+void TypePrinter::printDependentExtIntBefore(const DependentExtIntType *T,
+ raw_ostream &OS) {
+ if (T->isUnsigned())
+ OS << "unsigned ";
+ OS << "_ExtInt(";
+ T->getNumBitsExpr()->printPretty(OS, nullptr, Policy);
+ OS << ")";
+ spaceBeforePlaceHolder(OS);
+}
+
+void TypePrinter::printDependentExtIntAfter(const DependentExtIntType *T,
+ raw_ostream &OS) {}
+
/// Appends the given scope to the end of a string.
void TypePrinter::AppendScope(DeclContext *DC, raw_ostream &OS) {
if (DC->isTranslationUnit()) return;
@@ -1303,7 +1363,12 @@ void TypePrinter::printTemplateSpecializationAfter(
void TypePrinter::printInjectedClassNameBefore(const InjectedClassNameType *T,
raw_ostream &OS) {
- printTemplateSpecializationBefore(T->getInjectedTST(), OS);
+ if (Policy.PrintInjectedClassNameWithArguments)
+ return printTemplateSpecializationBefore(T->getInjectedTST(), OS);
+
+ IncludeStrongLifetimeRAII Strong(Policy);
+ T->getTemplateName().print(OS, Policy);
+ spaceBeforePlaceHolder(OS);
}
void TypePrinter::printInjectedClassNameAfter(const InjectedClassNameType *T,
@@ -1386,7 +1451,7 @@ void TypePrinter::printDependentTemplateSpecializationBefore(
if (T->getQualifier())
T->getQualifier()->print(OS, Policy);
- OS << T->getIdentifier()->getName();
+ OS << "template " << T->getIdentifier()->getName();
printTemplateArgumentList(OS, T->template_arguments(), Policy);
spaceBeforePlaceHolder(OS);
}
@@ -1519,6 +1584,7 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::SPtr:
case attr::UPtr:
case attr::AddressSpace:
+ case attr::CmseNSCall:
llvm_unreachable("This attribute should have been handled already");
case attr::NSReturnsRetained:
@@ -1563,6 +1629,9 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::AcquireHandle:
OS << "acquire_handle";
break;
+ case attr::ArmMveStrictPolymorphism:
+ OS << "__clang_arm_mve_strict_polymorphism";
+ break;
}
OS << "))";
}
@@ -1716,13 +1785,13 @@ static void printTo(raw_ostream &OS, ArrayRef<TA> Args,
OS << ArgString;
- NeedSpace = (!ArgString.empty() && ArgString.back() == '>');
+ // If the last character of our string is '>', add another space to
+ // keep the two '>''s separate tokens.
+ NeedSpace = Policy.SplitTemplateClosers && !ArgString.empty() &&
+ ArgString.back() == '>';
FirstArg = false;
}
- // If the last character of our string is '>', add another space to
- // keep the two '>''s separate tokens. We don't *have* to do this in
- // C++0x, but it's still good hygiene.
if (NeedSpace)
OS << ' ';
@@ -1760,7 +1829,7 @@ std::string Qualifiers::getAsString(const PrintingPolicy &Policy) const {
SmallString<64> Buf;
llvm::raw_svector_ostream StrOS(Buf);
print(StrOS, Policy);
- return StrOS.str();
+ return std::string(StrOS.str());
}
bool Qualifiers::isEmptyWhenPrinted(const PrintingPolicy &Policy) const {
@@ -1918,6 +1987,6 @@ void QualType::getAsStringInternal(const Type *ty, Qualifiers qs,
SmallString<256> Buf;
llvm::raw_svector_ostream StrOS(Buf);
TypePrinter(policy).print(ty, qs, StrOS, buffer);
- std::string str = StrOS.str();
+ std::string str = std::string(StrOS.str());
buffer.swap(str);
}
diff --git a/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp b/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
index 2b5b74be5961..f5865ce96b64 100644
--- a/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
@@ -408,7 +408,7 @@ void FinalOverriders::dump(raw_ostream &Out, BaseSubobject Base,
// Now dump the overriders for this base subobject.
for (const auto *MD : RD->methods()) {
- if (!MD->isVirtual())
+ if (!VTableContextBase::hasVtableSlot(MD))
continue;
MD = MD->getCanonicalDecl();
@@ -486,8 +486,8 @@ static bool HasSameVirtualSignature(const CXXMethodDecl *LHS,
bool VCallOffsetMap::MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
const CXXMethodDecl *RHS) {
- assert(LHS->isVirtual() && "LHS must be virtual!");
- assert(RHS->isVirtual() && "LHS must be virtual!");
+ assert(VTableContextBase::hasVtableSlot(LHS) && "LHS must be virtual!");
+ assert(VTableContextBase::hasVtableSlot(RHS) && "LHS must be virtual!");
// A destructor can share a vcall offset with another destructor.
if (isa<CXXDestructorDecl>(LHS))
@@ -535,6 +535,8 @@ public:
VBaseOffsetOffsetsMapTy;
private:
+ const ItaniumVTableContext &VTables;
+
/// MostDerivedClass - The most derived class for which we're building vcall
/// and vbase offsets.
const CXXRecordDecl *MostDerivedClass;
@@ -583,13 +585,15 @@ private:
CharUnits getCurrentOffsetOffset() const;
public:
- VCallAndVBaseOffsetBuilder(const CXXRecordDecl *MostDerivedClass,
+ VCallAndVBaseOffsetBuilder(const ItaniumVTableContext &VTables,
+ const CXXRecordDecl *MostDerivedClass,
const CXXRecordDecl *LayoutClass,
const FinalOverriders *Overriders,
BaseSubobject Base, bool BaseIsVirtual,
CharUnits OffsetInLayoutClass)
- : MostDerivedClass(MostDerivedClass), LayoutClass(LayoutClass),
- Context(MostDerivedClass->getASTContext()), Overriders(Overriders) {
+ : VTables(VTables), MostDerivedClass(MostDerivedClass),
+ LayoutClass(LayoutClass), Context(MostDerivedClass->getASTContext()),
+ Overriders(Overriders) {
// Add vcall and vbase offsets.
AddVCallAndVBaseOffsets(Base, BaseIsVirtual, OffsetInLayoutClass);
@@ -662,9 +666,13 @@ CharUnits VCallAndVBaseOffsetBuilder::getCurrentOffsetOffset() const {
// vcall offset itself).
int64_t OffsetIndex = -(int64_t)(3 + Components.size());
- CharUnits PointerWidth =
- Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
- CharUnits OffsetOffset = PointerWidth * OffsetIndex;
+ // Under the relative ABI, the offset widths are 32-bit ints instead of
+ // pointer widths.
+ CharUnits OffsetWidth = Context.toCharUnitsFromBits(
+ VTables.isRelativeLayout() ? 32
+ : Context.getTargetInfo().getPointerWidth(0));
+ CharUnits OffsetOffset = OffsetWidth * OffsetIndex;
+
return OffsetOffset;
}
@@ -689,7 +697,7 @@ void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
// Add the vcall offsets.
for (const auto *MD : RD->methods()) {
- if (!MD->isVirtual())
+ if (!VTableContextBase::hasVtableSlot(MD))
continue;
MD = MD->getCanonicalDecl();
@@ -1077,7 +1085,7 @@ typedef llvm::SmallPtrSet<const CXXMethodDecl *, 8> OverriddenMethodsSetTy;
template <class VisitorTy>
static void
visitAllOverriddenMethods(const CXXMethodDecl *MD, VisitorTy &Visitor) {
- assert(MD->isVirtual() && "Method is not virtual!");
+ assert(VTableContextBase::hasVtableSlot(MD) && "Method is not virtual!");
for (const CXXMethodDecl *OverriddenMD : MD->overridden_methods()) {
if (!Visitor(OverriddenMD))
@@ -1271,13 +1279,13 @@ ThisAdjustment ItaniumVTableBuilder::ComputeThisAdjustment(
if (VCallOffsets.empty()) {
// We don't have vcall offsets for this virtual base, go ahead and
// build them.
- VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, MostDerivedClass,
- /*Overriders=*/nullptr,
- BaseSubobject(Offset.VirtualBase,
- CharUnits::Zero()),
- /*BaseIsVirtual=*/true,
- /*OffsetInLayoutClass=*/
- CharUnits::Zero());
+ VCallAndVBaseOffsetBuilder Builder(
+ VTables, MostDerivedClass, MostDerivedClass,
+ /*Overriders=*/nullptr,
+ BaseSubobject(Offset.VirtualBase, CharUnits::Zero()),
+ /*BaseIsVirtual=*/true,
+ /*OffsetInLayoutClass=*/
+ CharUnits::Zero());
VCallOffsets = Builder.getVCallOffsets();
}
@@ -1474,14 +1482,14 @@ void ItaniumVTableBuilder::AddMethods(
llvm_unreachable("Found a duplicate primary base!");
}
- const CXXDestructorDecl *ImplicitVirtualDtor = nullptr;
-
typedef llvm::SmallVector<const CXXMethodDecl *, 8> NewVirtualFunctionsTy;
NewVirtualFunctionsTy NewVirtualFunctions;
+ llvm::SmallVector<const CXXMethodDecl*, 4> NewImplicitVirtualFunctions;
+
// Now go through all virtual member functions and add them.
for (const auto *MD : RD->methods()) {
- if (!MD->isVirtual())
+ if (!ItaniumVTableContext::hasVtableSlot(MD))
continue;
MD = MD->getCanonicalDecl();
@@ -1542,24 +1550,30 @@ void ItaniumVTableBuilder::AddMethods(
}
}
- if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
- if (MD->isImplicit()) {
- // Itanium C++ ABI 2.5.2:
- // If a class has an implicitly-defined virtual destructor,
- // its entries come after the declared virtual function pointers.
-
- assert(!ImplicitVirtualDtor &&
- "Did already see an implicit virtual dtor!");
- ImplicitVirtualDtor = DD;
- continue;
- }
- }
-
- NewVirtualFunctions.push_back(MD);
- }
-
- if (ImplicitVirtualDtor)
- NewVirtualFunctions.push_back(ImplicitVirtualDtor);
+ if (MD->isImplicit())
+ NewImplicitVirtualFunctions.push_back(MD);
+ else
+ NewVirtualFunctions.push_back(MD);
+ }
+
+ std::stable_sort(
+ NewImplicitVirtualFunctions.begin(), NewImplicitVirtualFunctions.end(),
+ [](const CXXMethodDecl *A, const CXXMethodDecl *B) {
+ if (A->isCopyAssignmentOperator() != B->isCopyAssignmentOperator())
+ return A->isCopyAssignmentOperator();
+ if (A->isMoveAssignmentOperator() != B->isMoveAssignmentOperator())
+ return A->isMoveAssignmentOperator();
+ if (isa<CXXDestructorDecl>(A) != isa<CXXDestructorDecl>(B))
+ return isa<CXXDestructorDecl>(A);
+ assert(A->getOverloadedOperator() == OO_EqualEqual &&
+ B->getOverloadedOperator() == OO_EqualEqual &&
+ "unexpected or duplicate implicit virtual function");
+ // We rely on Sema to have declared the operator== members in the
+ // same order as the corresponding operator<=> members.
+ return false;
+ });
+ NewVirtualFunctions.append(NewImplicitVirtualFunctions.begin(),
+ NewImplicitVirtualFunctions.end());
for (const CXXMethodDecl *MD : NewVirtualFunctions) {
// Get the final overrider.
@@ -1629,9 +1643,9 @@ void ItaniumVTableBuilder::LayoutPrimaryAndSecondaryVTables(
VTableIndices.push_back(VTableIndex);
// Add vcall and vbase offsets for this vtable.
- VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, LayoutClass, &Overriders,
- Base, BaseIsVirtualInLayoutClass,
- OffsetInLayoutClass);
+ VCallAndVBaseOffsetBuilder Builder(
+ VTables, MostDerivedClass, LayoutClass, &Overriders, Base,
+ BaseIsVirtualInLayoutClass, OffsetInLayoutClass);
Components.append(Builder.components_begin(), Builder.components_end());
// Check if we need to add these vcall offsets.
@@ -2155,7 +2169,7 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
for (const auto *MD : MostDerivedClass->methods()) {
// We only want virtual member functions.
- if (!MD->isVirtual())
+ if (!ItaniumVTableContext::hasVtableSlot(MD))
continue;
MD = MD->getCanonicalDecl();
@@ -2194,12 +2208,40 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
}
}
+static VTableLayout::AddressPointsIndexMapTy
+MakeAddressPointIndices(const VTableLayout::AddressPointsMapTy &addressPoints,
+ unsigned numVTables) {
+ VTableLayout::AddressPointsIndexMapTy indexMap(numVTables);
+
+ for (auto it = addressPoints.begin(); it != addressPoints.end(); ++it) {
+ const auto &addressPointLoc = it->second;
+ unsigned vtableIndex = addressPointLoc.VTableIndex;
+ unsigned addressPoint = addressPointLoc.AddressPointIndex;
+ if (indexMap[vtableIndex]) {
+ // Multiple BaseSubobjects can map to the same AddressPointLocation, but
+ // every vtable index should have a unique address point.
+ assert(indexMap[vtableIndex] == addressPoint &&
+ "Every vtable index should have a unique address point. Found a "
+ "vtable that has two different address points.");
+ } else {
+ indexMap[vtableIndex] = addressPoint;
+ }
+ }
+
+ // Note that by this point, not all the address may be initialized if the
+ // AddressPoints map is empty. This is ok if the map isn't needed. See
+ // MicrosoftVTableContext::computeVTableRelatedInformation() which uses an
+ // emprt map.
+ return indexMap;
+}
+
VTableLayout::VTableLayout(ArrayRef<size_t> VTableIndices,
ArrayRef<VTableComponent> VTableComponents,
ArrayRef<VTableThunkTy> VTableThunks,
const AddressPointsMapTy &AddressPoints)
: VTableComponents(VTableComponents), VTableThunks(VTableThunks),
- AddressPoints(AddressPoints) {
+ AddressPoints(AddressPoints), AddressPointIndices(MakeAddressPointIndices(
+ AddressPoints, VTableIndices.size())) {
if (VTableIndices.size() <= 1)
assert(VTableIndices.size() == 1 && VTableIndices[0] == 0);
else
@@ -2215,8 +2257,13 @@ VTableLayout::VTableLayout(ArrayRef<size_t> VTableIndices,
VTableLayout::~VTableLayout() { }
-ItaniumVTableContext::ItaniumVTableContext(ASTContext &Context)
- : VTableContextBase(/*MS=*/false) {}
+bool VTableContextBase::hasVtableSlot(const CXXMethodDecl *MD) {
+ return MD->isVirtual() && !MD->isConsteval();
+}
+
+ItaniumVTableContext::ItaniumVTableContext(
+ ASTContext &Context, VTableComponentLayout ComponentLayout)
+ : VTableContextBase(/*MS=*/false), ComponentLayout(ComponentLayout) {}
ItaniumVTableContext::~ItaniumVTableContext() {}
@@ -2245,7 +2292,7 @@ ItaniumVTableContext::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
if (I != VirtualBaseClassOffsetOffsets.end())
return I->second;
- VCallAndVBaseOffsetBuilder Builder(RD, RD, /*Overriders=*/nullptr,
+ VCallAndVBaseOffsetBuilder Builder(*this, RD, RD, /*Overriders=*/nullptr,
BaseSubobject(RD, CharUnits::Zero()),
/*BaseIsVirtual=*/false,
/*OffsetInLayoutClass=*/CharUnits::Zero());
@@ -2494,8 +2541,9 @@ private:
BasesSetVectorTy VisitedBases;
AddMethods(BaseSubobject(MostDerivedClass, CharUnits::Zero()), 0, nullptr,
VisitedBases);
- assert((HasRTTIComponent ? Components.size() - 1 : Components.size()) &&
- "vftable can't be empty");
+ // Note that it is possible for the vftable to contain only an RTTI
+ // pointer, if all virtual functions are constewval.
+ assert(!Components.empty() && "vftable can't be empty");
assert(MethodVFTableLocations.empty());
for (const auto &I : MethodInfoMap) {
@@ -2874,7 +2922,7 @@ static void GroupNewVirtualOverloads(
if (Inserted)
Groups.push_back(MethodGroup());
if (const auto *MD = dyn_cast<CXXMethodDecl>(ND))
- if (MD->isVirtual())
+ if (MicrosoftVTableContext::hasVtableSlot(MD))
Groups[J->second].push_back(MD->getCanonicalDecl());
}
@@ -3470,7 +3518,7 @@ static const FullPathTy *selectBestPath(ASTContext &Context,
getOffsetOfFullPath(Context, TopLevelRD, SpecificPath);
FinalOverriders Overriders(TopLevelRD, CharUnits::Zero(), TopLevelRD);
for (const CXXMethodDecl *MD : Info.IntroducingObject->methods()) {
- if (!MD->isVirtual())
+ if (!MicrosoftVTableContext::hasVtableSlot(MD))
continue;
FinalOverriders::OverriderInfo OI =
Overriders.getOverrider(MD->getCanonicalDecl(), BaseOffset);
@@ -3609,7 +3657,7 @@ void MicrosoftVTableContext::dumpMethodLocations(
for (const auto &I : NewMethods) {
const CXXMethodDecl *MD = cast<const CXXMethodDecl>(I.first.getDecl());
- assert(MD->isVirtual());
+ assert(hasVtableSlot(MD));
std::string MethodName = PredefinedExpr::ComputeName(
PredefinedExpr::PrettyFunctionNoVirtual, MD);
@@ -3729,7 +3777,7 @@ MicrosoftVTableContext::getVFTableLayout(const CXXRecordDecl *RD,
MethodVFTableLocation
MicrosoftVTableContext::getMethodVFTableLocation(GlobalDecl GD) {
- assert(cast<CXXMethodDecl>(GD.getDecl())->isVirtual() &&
+ assert(hasVtableSlot(cast<CXXMethodDecl>(GD.getDecl())) &&
"Only use this method for virtual methods or dtors");
if (isa<CXXDestructorDecl>(GD.getDecl()))
assert(GD.getDtorType() == Dtor_Deleting);
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchFinder.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchFinder.cpp
index 0d1f713db8d3..e88da16dd3d4 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchFinder.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -43,6 +43,13 @@ typedef MatchFinder::MatchCallback MatchCallback;
// optimize this on.
static const unsigned MaxMemoizationEntries = 10000;
+enum class MatchType {
+ Ancestors,
+
+ Descendants,
+ Child,
+};
+
// We use memoization to avoid running the same matcher on the same
// AST node twice. This struct is the key for looking up match
// result. It consists of an ID of the MatcherInterface (for
@@ -57,14 +64,15 @@ static const unsigned MaxMemoizationEntries = 10000;
// provides enough benefit for the additional amount of code.
struct MatchKey {
DynTypedMatcher::MatcherIDType MatcherID;
- ast_type_traits::DynTypedNode Node;
+ DynTypedNode Node;
BoundNodesTreeBuilder BoundNodes;
- ast_type_traits::TraversalKind Traversal = ast_type_traits::TK_AsIs;
+ TraversalKind Traversal = TK_AsIs;
+ MatchType Type;
bool operator<(const MatchKey &Other) const {
- return std::tie(MatcherID, Node, BoundNodes, Traversal) <
- std::tie(Other.MatcherID, Other.Node, Other.BoundNodes,
- Other.Traversal);
+ return std::tie(Traversal, Type, MatcherID, Node, BoundNodes) <
+ std::tie(Other.Traversal, Other.Type, Other.MatcherID, Other.Node,
+ Other.BoundNodes);
}
};
@@ -87,8 +95,7 @@ public:
// matching the descendants.
MatchChildASTVisitor(const DynTypedMatcher *Matcher, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder, int MaxDepth,
- ast_type_traits::TraversalKind Traversal,
- ASTMatchFinder::BindKind Bind)
+ TraversalKind Traversal, ASTMatchFinder::BindKind Bind)
: Matcher(Matcher), Finder(Finder), Builder(Builder), CurrentDepth(0),
MaxDepth(MaxDepth), Traversal(Traversal), Bind(Bind), Matches(false) {}
@@ -103,7 +110,7 @@ public:
// Traverse*(c) for each child c of 'node'.
// - Traverse*(c) in turn calls Traverse(c), completing the
// recursion.
- bool findMatch(const ast_type_traits::DynTypedNode &DynNode) {
+ bool findMatch(const DynTypedNode &DynNode) {
reset();
if (const Decl *D = DynNode.get<Decl>())
traverse(*D);
@@ -143,14 +150,16 @@ public:
Stmt *StmtToTraverse = StmtNode;
if (auto *ExprNode = dyn_cast_or_null<Expr>(StmtNode)) {
auto *LambdaNode = dyn_cast_or_null<LambdaExpr>(StmtNode);
- if (LambdaNode && Finder->getASTContext().getTraversalKind() ==
- ast_type_traits::TK_IgnoreUnlessSpelledInSource)
+ if (LambdaNode &&
+ Finder->getASTContext().getParentMapContext().getTraversalKind() ==
+ TK_IgnoreUnlessSpelledInSource)
StmtToTraverse = LambdaNode;
else
- StmtToTraverse = Finder->getASTContext().traverseIgnored(ExprNode);
+ StmtToTraverse =
+ Finder->getASTContext().getParentMapContext().traverseIgnored(
+ ExprNode);
}
- if (Traversal ==
- ast_type_traits::TraversalKind::TK_IgnoreImplicitCastsAndParentheses) {
+ if (Traversal == TraversalKind::TK_IgnoreImplicitCastsAndParentheses) {
if (Expr *ExprNode = dyn_cast_or_null<Expr>(StmtNode))
StmtToTraverse = ExprNode->IgnoreParenImpCasts();
}
@@ -216,8 +225,8 @@ public:
return traverse(*CtorInit);
}
bool TraverseLambdaExpr(LambdaExpr *Node) {
- if (Finder->getASTContext().getTraversalKind() !=
- ast_type_traits::TK_IgnoreUnlessSpelledInSource)
+ if (Finder->getASTContext().getParentMapContext().getTraversalKind() !=
+ TK_IgnoreUnlessSpelledInSource)
return VisitorBase::TraverseLambdaExpr(Node);
if (!Node)
return true;
@@ -308,7 +317,7 @@ private:
}
if (Bind != ASTMatchFinder::BK_All) {
BoundNodesTreeBuilder RecursiveBuilder(*Builder);
- if (Matcher->matches(ast_type_traits::DynTypedNode::create(Node), Finder,
+ if (Matcher->matches(DynTypedNode::create(Node), Finder,
&RecursiveBuilder)) {
Matches = true;
ResultBindings.addMatch(RecursiveBuilder);
@@ -316,7 +325,7 @@ private:
}
} else {
BoundNodesTreeBuilder RecursiveBuilder(*Builder);
- if (Matcher->matches(ast_type_traits::DynTypedNode::create(Node), Finder,
+ if (Matcher->matches(DynTypedNode::create(Node), Finder,
&RecursiveBuilder)) {
// After the first match the matcher succeeds.
Matches = true;
@@ -343,7 +352,7 @@ private:
BoundNodesTreeBuilder ResultBindings;
int CurrentDepth;
const int MaxDepth;
- const ast_type_traits::TraversalKind Traversal;
+ const TraversalKind Traversal;
const ASTMatchFinder::BindKind Bind;
bool Matches;
};
@@ -440,12 +449,10 @@ public:
bool TraverseConstructorInitializer(CXXCtorInitializer *CtorInit);
// Matches children or descendants of 'Node' with 'BaseMatcher'.
- bool memoizedMatchesRecursively(const ast_type_traits::DynTypedNode &Node,
- ASTContext &Ctx,
+ bool memoizedMatchesRecursively(const DynTypedNode &Node, ASTContext &Ctx,
const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder, int MaxDepth,
- ast_type_traits::TraversalKind Traversal,
- BindKind Bind) {
+ TraversalKind Traversal, BindKind Bind) {
// For AST-nodes that don't have an identity, we can't memoize.
if (!Node.getMemoizationData() || !Builder->isComparable())
return matchesRecursively(Node, Matcher, Builder, MaxDepth, Traversal,
@@ -456,8 +463,9 @@ public:
Key.Node = Node;
// Note that we key on the bindings *before* the match.
Key.BoundNodes = *Builder;
- Key.Traversal = Ctx.getTraversalKind();
-
+ Key.Traversal = Ctx.getParentMapContext().getTraversalKind();
+ // Memoize result even doing a single-level match, it might be expensive.
+ Key.Type = MaxDepth == 1 ? MatchType::Child : MatchType::Descendants;
MemoizationMap::iterator I = ResultCache.find(Key);
if (I != ResultCache.end()) {
*Builder = I->second.Nodes;
@@ -477,11 +485,10 @@ public:
}
// Matches children or descendants of 'Node' with 'BaseMatcher'.
- bool matchesRecursively(const ast_type_traits::DynTypedNode &Node,
+ bool matchesRecursively(const DynTypedNode &Node,
const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder, int MaxDepth,
- ast_type_traits::TraversalKind Traversal,
- BindKind Bind) {
+ TraversalKind Traversal, BindKind Bind) {
MatchChildASTVisitor Visitor(
&Matcher, this, Builder, MaxDepth, Traversal, Bind);
return Visitor.findMatch(Node);
@@ -498,10 +505,9 @@ public:
bool Directly) override;
// Implements ASTMatchFinder::matchesChildOf.
- bool matchesChildOf(const ast_type_traits::DynTypedNode &Node,
- ASTContext &Ctx, const DynTypedMatcher &Matcher,
- BoundNodesTreeBuilder *Builder,
- ast_type_traits::TraversalKind Traversal,
+ bool matchesChildOf(const DynTypedNode &Node, ASTContext &Ctx,
+ const DynTypedMatcher &Matcher,
+ BoundNodesTreeBuilder *Builder, TraversalKind Traversal,
BindKind Bind) override {
if (ResultCache.size() > MaxMemoizationEntries)
ResultCache.clear();
@@ -509,19 +515,18 @@ public:
Bind);
}
// Implements ASTMatchFinder::matchesDescendantOf.
- bool matchesDescendantOf(const ast_type_traits::DynTypedNode &Node,
- ASTContext &Ctx, const DynTypedMatcher &Matcher,
+ bool matchesDescendantOf(const DynTypedNode &Node, ASTContext &Ctx,
+ const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder,
BindKind Bind) override {
if (ResultCache.size() > MaxMemoizationEntries)
ResultCache.clear();
return memoizedMatchesRecursively(Node, Ctx, Matcher, Builder, INT_MAX,
- ast_type_traits::TraversalKind::TK_AsIs,
- Bind);
+ TraversalKind::TK_AsIs, Bind);
}
// Implements ASTMatchFinder::matchesAncestorOf.
- bool matchesAncestorOf(const ast_type_traits::DynTypedNode &Node,
- ASTContext &Ctx, const DynTypedMatcher &Matcher,
+ bool matchesAncestorOf(const DynTypedNode &Node, ASTContext &Ctx,
+ const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder,
AncestorMatchMode MatchMode) override {
// Reset the cache outside of the recursive call to make sure we
@@ -534,7 +539,7 @@ public:
// Matches all registered matchers on the given node and calls the
// result callback for every node that matches.
- void match(const ast_type_traits::DynTypedNode &Node) {
+ void match(const DynTypedNode &Node) {
// FIXME: Improve this with a switch or a visitor pattern.
if (auto *N = Node.get<Decl>()) {
match(*N);
@@ -612,7 +617,7 @@ private:
}
}
- void matchWithFilter(const ast_type_traits::DynTypedNode &DynNode) {
+ void matchWithFilter(const DynTypedNode &DynNode) {
auto Kind = DynNode.getNodeKind();
auto it = MatcherFiltersMap.find(Kind);
const auto &Filter =
@@ -636,8 +641,7 @@ private:
}
}
- const std::vector<unsigned short> &
- getFilterForKind(ast_type_traits::ASTNodeKind Kind) {
+ const std::vector<unsigned short> &getFilterForKind(ASTNodeKind Kind) {
auto &Filter = MatcherFiltersMap[Kind];
auto &Matchers = this->Matchers->DeclOrStmt;
assert((Matchers.size() < USHRT_MAX) && "Too many matchers.");
@@ -652,10 +656,10 @@ private:
/// @{
/// Overloads to pair the different node types to their matchers.
void matchDispatch(const Decl *Node) {
- return matchWithFilter(ast_type_traits::DynTypedNode::create(*Node));
+ return matchWithFilter(DynTypedNode::create(*Node));
}
void matchDispatch(const Stmt *Node) {
- return matchWithFilter(ast_type_traits::DynTypedNode::create(*Node));
+ return matchWithFilter(DynTypedNode::create(*Node));
}
void matchDispatch(const Type *Node) {
@@ -692,12 +696,16 @@ private:
// Once there are multiple parents, the breadth first search order does not
// allow simple memoization on the ancestors. Thus, we only memoize as long
// as there is a single parent.
- bool memoizedMatchesAncestorOfRecursively(
- const ast_type_traits::DynTypedNode &Node, ASTContext &Ctx,
- const DynTypedMatcher &Matcher, BoundNodesTreeBuilder *Builder,
- AncestorMatchMode MatchMode) {
+ bool memoizedMatchesAncestorOfRecursively(const DynTypedNode &Node,
+ ASTContext &Ctx,
+ const DynTypedMatcher &Matcher,
+ BoundNodesTreeBuilder *Builder,
+ AncestorMatchMode MatchMode) {
// For AST-nodes that don't have an identity, we can't memoize.
- if (!Builder->isComparable())
+ // When doing a single-level match, we don't need to memoize because
+ // ParentMap (in ASTContext) already memoizes the result.
+ if (!Builder->isComparable() ||
+ MatchMode == AncestorMatchMode::AMM_ParentOnly)
return matchesAncestorOfRecursively(Node, Ctx, Matcher, Builder,
MatchMode);
@@ -705,7 +713,8 @@ private:
Key.MatcherID = Matcher.getID();
Key.Node = Node;
Key.BoundNodes = *Builder;
- Key.Traversal = Ctx.getTraversalKind();
+ Key.Traversal = Ctx.getParentMapContext().getTraversalKind();
+ Key.Type = MatchType::Ancestors;
// Note that we cannot use insert and reuse the iterator, as recursive
// calls to match might invalidate the result cache iterators.
@@ -727,8 +736,7 @@ private:
return CachedResult.ResultOfMatch;
}
- bool matchesAncestorOfRecursively(const ast_type_traits::DynTypedNode &Node,
- ASTContext &Ctx,
+ bool matchesAncestorOfRecursively(const DynTypedNode &Node, ASTContext &Ctx,
const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder,
AncestorMatchMode MatchMode) {
@@ -747,7 +755,7 @@ private:
return D->getKind() == Decl::TranslationUnit;
})) {
llvm::errs() << "Tried to match orphan node:\n";
- Node.dump(llvm::errs(), ActiveASTContext->getSourceManager());
+ Node.dump(llvm::errs(), *ActiveASTContext);
llvm_unreachable("Parent map should be complete!");
}
#endif
@@ -755,7 +763,7 @@ private:
}
if (Parents.size() == 1) {
// Only one parent - do recursive memoization.
- const ast_type_traits::DynTypedNode Parent = Parents[0];
+ const DynTypedNode Parent = Parents[0];
BoundNodesTreeBuilder BuilderCopy = *Builder;
if (Matcher.matches(Parent, this, &BuilderCopy)) {
*Builder = std::move(BuilderCopy);
@@ -770,8 +778,7 @@ private:
} else {
// Multiple parents - BFS over the rest of the nodes.
llvm::DenseSet<const void *> Visited;
- std::deque<ast_type_traits::DynTypedNode> Queue(Parents.begin(),
- Parents.end());
+ std::deque<DynTypedNode> Queue(Parents.begin(), Parents.end());
while (!Queue.empty()) {
BoundNodesTreeBuilder BuilderCopy = *Builder;
if (Matcher.matches(Queue.front(), this, &BuilderCopy)) {
@@ -861,8 +868,7 @@ private:
/// kind (and derived kinds) so it is a waste to try every matcher on every
/// node.
/// We precalculate a list of matchers that pass the toplevel restrict check.
- llvm::DenseMap<ast_type_traits::ASTNodeKind, std::vector<unsigned short>>
- MatcherFiltersMap;
+ llvm::DenseMap<ASTNodeKind, std::vector<unsigned short>> MatcherFiltersMap;
const MatchFinder::MatchFinderOptions &Options;
ASTContext *ActiveASTContext;
@@ -923,9 +929,8 @@ bool MatchASTVisitor::classIsDerivedFrom(const CXXRecordDecl *Declaration,
if (!ClassDecl)
continue;
if (ClassDecl == Declaration) {
- // This can happen for recursive template definitions; if the
- // current declaration did not match, we can safely return false.
- return false;
+ // This can happen for recursive template definitions.
+ continue;
}
BoundNodesTreeBuilder Result(*Builder);
if (Base.matches(*ClassDecl, this, &Result)) {
@@ -1137,8 +1142,7 @@ std::unique_ptr<ASTConsumer> MatchFinder::newASTConsumer() {
return std::make_unique<internal::MatchASTConsumer>(this, ParsingDone);
}
-void MatchFinder::match(const clang::ast_type_traits::DynTypedNode &Node,
- ASTContext &Context) {
+void MatchFinder::match(const clang::DynTypedNode &Node, ASTContext &Context) {
internal::MatchASTVisitor Visitor(&Matchers, Options);
Visitor.set_active_ast_context(&Context);
Visitor.match(Node);
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
index ae127d775846..4b9baf7a0e75 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -15,9 +15,11 @@
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ParentMapContext.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/Basic/LLVM.h"
+#include "clang/Lex/Lexer.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/None.h"
@@ -27,6 +29,8 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/WithColor.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -40,39 +44,56 @@ namespace ast_matchers {
AST_MATCHER_P(ObjCMessageExpr, hasAnySelectorMatcher, std::vector<std::string>,
Matches) {
- std::string SelString = Node.getSelector().getAsString();
- for (const std::string &S : Matches)
- if (S == SelString)
- return true;
- return false;
+ return llvm::is_contained(Matches, Node.getSelector().getAsString());
}
namespace internal {
-bool NotUnaryOperator(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder, BoundNodesTreeBuilder *Builder,
+bool NotUnaryOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers);
-bool AllOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+bool AllOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers);
-bool EachOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+bool EachOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers);
-bool AnyOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+bool AnyOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers);
-bool OptionallyVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
+bool OptionallyVariadicOperator(const DynTypedNode &DynNode,
ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers);
+bool matchesAnyBase(const CXXRecordDecl &Node,
+ const Matcher<CXXBaseSpecifier> &BaseSpecMatcher,
+ ASTMatchFinder *Finder, BoundNodesTreeBuilder *Builder) {
+ if (!Node.hasDefinition())
+ return false;
+
+ CXXBasePaths Paths;
+ Paths.setOrigin(&Node);
+
+ const auto basePredicate =
+ [Finder, Builder, &BaseSpecMatcher](const CXXBaseSpecifier *BaseSpec,
+ CXXBasePath &IgnoredParam) {
+ BoundNodesTreeBuilder Result(*Builder);
+ if (BaseSpecMatcher.matches(*BaseSpec, Finder, Builder)) {
+ *Builder = std::move(Result);
+ return true;
+ }
+ return false;
+ };
+
+ return Node.lookupInBases(basePredicate, Paths,
+ /*LookupInDependent =*/true);
+}
+
void BoundNodesTreeBuilder::visitMatches(Visitor *ResultVisitor) {
if (Bindings.empty())
Bindings.push_back(BoundNodesMap());
@@ -84,7 +105,7 @@ void BoundNodesTreeBuilder::visitMatches(Visitor *ResultVisitor) {
namespace {
using VariadicOperatorFunction = bool (*)(
- const ast_type_traits::DynTypedNode &DynNode, ASTMatchFinder *Finder,
+ const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder, ArrayRef<DynTypedMatcher> InnerMatchers);
template <VariadicOperatorFunction Func>
@@ -93,8 +114,7 @@ public:
VariadicMatcher(std::vector<DynTypedMatcher> InnerMatchers)
: InnerMatchers(std::move(InnerMatchers)) {}
- bool dynMatches(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+ bool dynMatches(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const override {
return Func(DynNode, Finder, Builder, InnerMatchers);
}
@@ -109,16 +129,14 @@ public:
IntrusiveRefCntPtr<DynMatcherInterface> InnerMatcher)
: ID(ID), InnerMatcher(std::move(InnerMatcher)) {}
- bool dynMatches(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+ bool dynMatches(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const override {
bool Result = InnerMatcher->dynMatches(DynNode, Finder, Builder);
if (Result) Builder->setBinding(ID, DynNode);
return Result;
}
- llvm::Optional<ast_type_traits::TraversalKind>
- TraversalKind() const override {
+ llvm::Optional<clang::TraversalKind> TraversalKind() const override {
return InnerMatcher->TraversalKind();
}
@@ -138,20 +156,45 @@ public:
Retain(); // Reference count will never become zero.
}
- bool dynMatches(const ast_type_traits::DynTypedNode &, ASTMatchFinder *,
+ bool dynMatches(const DynTypedNode &, ASTMatchFinder *,
BoundNodesTreeBuilder *) const override {
return true;
}
};
+/// A matcher that specifies a particular \c TraversalKind.
+///
+/// The kind provided to the constructor overrides any kind that may be
+/// specified by the `InnerMatcher`.
+class DynTraversalMatcherImpl : public DynMatcherInterface {
+public:
+ explicit DynTraversalMatcherImpl(
+ clang::TraversalKind TK,
+ IntrusiveRefCntPtr<DynMatcherInterface> InnerMatcher)
+ : TK(TK), InnerMatcher(std::move(InnerMatcher)) {}
+
+ bool dynMatches(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const override {
+ return this->InnerMatcher->dynMatches(DynNode, Finder, Builder);
+ }
+
+ llvm::Optional<clang::TraversalKind> TraversalKind() const override {
+ return TK;
+ }
+
+private:
+ clang::TraversalKind TK;
+ IntrusiveRefCntPtr<DynMatcherInterface> InnerMatcher;
+};
+
} // namespace
static llvm::ManagedStatic<TrueMatcherImpl> TrueMatcherInstance;
-DynTypedMatcher DynTypedMatcher::constructVariadic(
- DynTypedMatcher::VariadicOperator Op,
- ast_type_traits::ASTNodeKind SupportedKind,
- std::vector<DynTypedMatcher> InnerMatchers) {
+DynTypedMatcher
+DynTypedMatcher::constructVariadic(DynTypedMatcher::VariadicOperator Op,
+ ASTNodeKind SupportedKind,
+ std::vector<DynTypedMatcher> InnerMatchers) {
assert(!InnerMatchers.empty() && "Array must not be empty.");
assert(llvm::all_of(InnerMatchers,
[SupportedKind](const DynTypedMatcher &M) {
@@ -172,8 +215,8 @@ DynTypedMatcher DynTypedMatcher::constructVariadic(
// invalid types earlier and we can elide the kind checks inside the
// matcher.
for (auto &IM : InnerMatchers) {
- RestrictKind = ast_type_traits::ASTNodeKind::getMostDerivedType(
- RestrictKind, IM.RestrictKind);
+ RestrictKind =
+ ASTNodeKind::getMostDerivedType(RestrictKind, IM.RestrictKind);
}
return DynTypedMatcher(
SupportedKind, RestrictKind,
@@ -204,40 +247,45 @@ DynTypedMatcher DynTypedMatcher::constructVariadic(
llvm_unreachable("Invalid Op value.");
}
-DynTypedMatcher DynTypedMatcher::constructRestrictedWrapper(
- const DynTypedMatcher &InnerMatcher,
- ast_type_traits::ASTNodeKind RestrictKind) {
+DynTypedMatcher
+DynTypedMatcher::constructRestrictedWrapper(const DynTypedMatcher &InnerMatcher,
+ ASTNodeKind RestrictKind) {
DynTypedMatcher Copy = InnerMatcher;
Copy.RestrictKind = RestrictKind;
return Copy;
}
-DynTypedMatcher DynTypedMatcher::trueMatcher(
- ast_type_traits::ASTNodeKind NodeKind) {
+DynTypedMatcher
+DynTypedMatcher::withTraversalKind(ast_type_traits::TraversalKind TK) {
+ auto Copy = *this;
+ Copy.Implementation =
+ new DynTraversalMatcherImpl(TK, std::move(Copy.Implementation));
+ return Copy;
+}
+
+DynTypedMatcher DynTypedMatcher::trueMatcher(ASTNodeKind NodeKind) {
return DynTypedMatcher(NodeKind, NodeKind, &*TrueMatcherInstance);
}
-bool DynTypedMatcher::canMatchNodesOfKind(
- ast_type_traits::ASTNodeKind Kind) const {
+bool DynTypedMatcher::canMatchNodesOfKind(ASTNodeKind Kind) const {
return RestrictKind.isBaseOf(Kind);
}
-DynTypedMatcher DynTypedMatcher::dynCastTo(
- const ast_type_traits::ASTNodeKind Kind) const {
+DynTypedMatcher DynTypedMatcher::dynCastTo(const ASTNodeKind Kind) const {
auto Copy = *this;
Copy.SupportedKind = Kind;
- Copy.RestrictKind =
- ast_type_traits::ASTNodeKind::getMostDerivedType(Kind, RestrictKind);
+ Copy.RestrictKind = ASTNodeKind::getMostDerivedType(Kind, RestrictKind);
return Copy;
}
-bool DynTypedMatcher::matches(const ast_type_traits::DynTypedNode &DynNode,
+bool DynTypedMatcher::matches(const DynTypedNode &DynNode,
ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const {
TraversalKindScope RAII(Finder->getASTContext(),
Implementation->TraversalKind());
- auto N = Finder->getASTContext().traverseIgnored(DynNode);
+ auto N =
+ Finder->getASTContext().getParentMapContext().traverseIgnored(DynNode);
if (RestrictKind.isBaseOf(N.getNodeKind()) &&
Implementation->dynMatches(N, Finder, Builder)) {
@@ -250,13 +298,14 @@ bool DynTypedMatcher::matches(const ast_type_traits::DynTypedNode &DynNode,
return false;
}
-bool DynTypedMatcher::matchesNoKindCheck(
- const ast_type_traits::DynTypedNode &DynNode, ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder) const {
+bool DynTypedMatcher::matchesNoKindCheck(const DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
TraversalKindScope raii(Finder->getASTContext(),
Implementation->TraversalKind());
- auto N = Finder->getASTContext().traverseIgnored(DynNode);
+ auto N =
+ Finder->getASTContext().getParentMapContext().traverseIgnored(DynNode);
assert(RestrictKind.isBaseOf(N.getNodeKind()));
if (Implementation->dynMatches(N, Finder, Builder)) {
@@ -277,10 +326,10 @@ llvm::Optional<DynTypedMatcher> DynTypedMatcher::tryBind(StringRef ID) const {
return std::move(Result);
}
-bool DynTypedMatcher::canConvertTo(ast_type_traits::ASTNodeKind To) const {
+bool DynTypedMatcher::canConvertTo(ASTNodeKind To) const {
const auto From = getSupportedKind();
- auto QualKind = ast_type_traits::ASTNodeKind::getFromNodeKind<QualType>();
- auto TypeKind = ast_type_traits::ASTNodeKind::getFromNodeKind<Type>();
+ auto QualKind = ASTNodeKind::getFromNodeKind<QualType>();
+ auto TypeKind = ASTNodeKind::getFromNodeKind<Type>();
/// Mimic the implicit conversions of Matcher<>.
/// - From Matcher<Type> to Matcher<QualType>
if (From.isSame(TypeKind) && To.isSame(QualKind)) return true;
@@ -292,8 +341,8 @@ void BoundNodesTreeBuilder::addMatch(const BoundNodesTreeBuilder &Other) {
Bindings.append(Other.Bindings.begin(), Other.Bindings.end());
}
-bool NotUnaryOperator(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder, BoundNodesTreeBuilder *Builder,
+bool NotUnaryOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers) {
if (InnerMatchers.size() != 1)
return false;
@@ -312,22 +361,18 @@ bool NotUnaryOperator(const ast_type_traits::DynTypedNode &DynNode,
return !InnerMatchers[0].matches(DynNode, Finder, &Discard);
}
-bool AllOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+bool AllOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers) {
// allOf leads to one matcher for each alternative in the first
// matcher combined with each alternative in the second matcher.
// Thus, we can reuse the same Builder.
- for (const DynTypedMatcher &InnerMatcher : InnerMatchers) {
- if (!InnerMatcher.matchesNoKindCheck(DynNode, Finder, Builder))
- return false;
- }
- return true;
+ return llvm::all_of(InnerMatchers, [&](const DynTypedMatcher &InnerMatcher) {
+ return InnerMatcher.matchesNoKindCheck(DynNode, Finder, Builder);
+ });
}
-bool EachOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+bool EachOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers) {
BoundNodesTreeBuilder Result;
@@ -343,8 +388,7 @@ bool EachOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
return Matched;
}
-bool AnyOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+bool AnyOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers) {
for (const DynTypedMatcher &InnerMatcher : InnerMatchers) {
@@ -357,31 +401,31 @@ bool AnyOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
return false;
}
-bool OptionallyVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
+bool OptionallyVariadicOperator(const DynTypedNode &DynNode,
ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers) {
- BoundNodesTreeBuilder Result;
- for (const DynTypedMatcher &InnerMatcher : InnerMatchers) {
- BoundNodesTreeBuilder BuilderInner(*Builder);
- if (InnerMatcher.matches(DynNode, Finder, &BuilderInner))
- Result.addMatch(BuilderInner);
- }
- *Builder = std::move(Result);
+ if (InnerMatchers.size() != 1)
+ return false;
+
+ BoundNodesTreeBuilder Result(*Builder);
+ if (InnerMatchers[0].matches(DynNode, Finder, &Result))
+ *Builder = std::move(Result);
return true;
}
inline static
std::vector<std::string> vectorFromRefs(ArrayRef<const StringRef *> NameRefs) {
std::vector<std::string> Names;
+ Names.reserve(NameRefs.size());
for (auto *Name : NameRefs)
Names.emplace_back(*Name);
return Names;
}
Matcher<NamedDecl> hasAnyNameFunc(ArrayRef<const StringRef *> NameRefs) {
- std::vector<std::string> Names = vectorFromRefs(NameRefs);
- return internal::Matcher<NamedDecl>(new internal::HasNameMatcher(Names));
+ return internal::Matcher<NamedDecl>(
+ new internal::HasNameMatcher(vectorFromRefs(NameRefs)));
}
Matcher<ObjCMessageExpr> hasAnySelectorFunc(
@@ -389,10 +433,18 @@ Matcher<ObjCMessageExpr> hasAnySelectorFunc(
return hasAnySelectorMatcher(vectorFromRefs(NameRefs));
}
+HasOpNameMatcher hasAnyOperatorNameFunc(ArrayRef<const StringRef *> NameRefs) {
+ return HasOpNameMatcher(vectorFromRefs(NameRefs));
+}
+
+HasOverloadOpNameMatcher
+hasAnyOverloadedOperatorNameFunc(ArrayRef<const StringRef *> NameRefs) {
+ return HasOverloadOpNameMatcher(vectorFromRefs(NameRefs));
+}
+
HasNameMatcher::HasNameMatcher(std::vector<std::string> N)
- : UseUnqualifiedMatch(std::all_of(
- N.begin(), N.end(),
- [](StringRef Name) { return Name.find("::") == Name.npos; })),
+ : UseUnqualifiedMatch(llvm::all_of(
+ N, [](StringRef Name) { return Name.find("::") == Name.npos; })),
Names(std::move(N)) {
#ifndef NDEBUG
for (StringRef Name : Names)
@@ -450,6 +502,7 @@ namespace {
class PatternSet {
public:
PatternSet(ArrayRef<std::string> Names) {
+ Patterns.reserve(Names.size());
for (StringRef Name : Names)
Patterns.push_back({Name, Name.startswith("::")});
}
@@ -474,10 +527,10 @@ public:
/// A match will be a pattern that was fully consumed, that also matches the
/// 'fully qualified' requirement.
bool foundMatch(bool AllowFullyQualified) const {
- for (auto& P: Patterns)
- if (P.P.empty() && (AllowFullyQualified || !P.IsFullyQualified))
- return true;
- return false;
+ return llvm::any_of(Patterns, [&](const Pattern &Pattern) {
+ return Pattern.P.empty() &&
+ (AllowFullyQualified || !Pattern.IsFullyQualified);
+ });
}
private:
@@ -598,6 +651,52 @@ bool HasNameMatcher::matchesNode(const NamedDecl &Node) const {
return matchesNodeFullFast(Node);
}
+// Checks whether \p Loc points to a token with source text of \p TokenText.
+static bool isTokenAtLoc(const SourceManager &SM, const LangOptions &LangOpts,
+ StringRef Text, SourceLocation Loc) {
+ llvm::SmallString<16> Buffer;
+ bool Invalid = false;
+ // Since `Loc` may point into an expansion buffer, which has no corresponding
+ // source, we need to look at the spelling location to read the actual source.
+ StringRef TokenText = Lexer::getSpelling(SM.getSpellingLoc(Loc), Buffer, SM,
+ LangOpts, &Invalid);
+ return !Invalid && Text == TokenText;
+}
+
+llvm::Optional<SourceLocation>
+getExpansionLocOfMacro(StringRef MacroName, SourceLocation Loc,
+ const ASTContext &Context) {
+ auto &SM = Context.getSourceManager();
+ const LangOptions &LangOpts = Context.getLangOpts();
+ while (Loc.isMacroID()) {
+ SrcMgr::ExpansionInfo Expansion =
+ SM.getSLocEntry(SM.getFileID(Loc)).getExpansion();
+ if (Expansion.isMacroArgExpansion())
+ // Check macro argument for an expansion of the given macro. For example,
+ // `F(G(3))`, where `MacroName` is `G`.
+ if (llvm::Optional<SourceLocation> ArgLoc = getExpansionLocOfMacro(
+ MacroName, Expansion.getSpellingLoc(), Context))
+ return ArgLoc;
+ Loc = Expansion.getExpansionLocStart();
+ if (isTokenAtLoc(SM, LangOpts, MacroName, Loc))
+ return Loc;
+ }
+ return llvm::None;
+}
+
+std::shared_ptr<llvm::Regex> createAndVerifyRegex(StringRef Regex,
+ llvm::Regex::RegexFlags Flags,
+ StringRef MatcherID) {
+ assert(!Regex.empty() && "Empty regex string");
+ auto SharedRegex = std::make_shared<llvm::Regex>(Regex, Flags);
+ std::string Error;
+ if (!SharedRegex->isValid(Error)) {
+ llvm::WithColor::error()
+ << "building matcher '" << MatcherID << "': " << Error << "\n";
+ llvm::WithColor::note() << " input was '" << Regex << "'\n";
+ }
+ return SharedRegex;
+}
} // end namespace internal
const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt>
@@ -653,6 +752,7 @@ const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
+const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
@@ -733,6 +833,8 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr>
materializeTemporaryExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
+ cxxNoexceptExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
@@ -774,6 +876,8 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral;
const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral;
+const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral>
+ fixedPointLiteral;
const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
@@ -827,12 +931,18 @@ const internal::VariadicOperatorMatcherFunc<
const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf = {internal::DynTypedMatcher::VO_AllOf};
-const internal::VariadicOperatorMatcherFunc<
- 1, std::numeric_limits<unsigned>::max()>
- optionally = {internal::DynTypedMatcher::VO_Optionally};
+const internal::VariadicOperatorMatcherFunc<1, 1> optionally = {
+ internal::DynTypedMatcher::VO_Optionally};
const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName = {};
+
+const internal::VariadicFunction<internal::HasOpNameMatcher, StringRef,
+ internal::hasAnyOperatorNameFunc>
+ hasAnyOperatorName = {};
+const internal::VariadicFunction<internal::HasOverloadOpNameMatcher, StringRef,
+ internal::hasAnyOverloadedOperatorNameFunc>
+ hasAnyOverloadedOperatorName = {};
const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef,
internal::hasAnySelectorFunc>
hasAnySelector = {};
@@ -864,6 +974,8 @@ const AstTypeMatcher<BuiltinType> builtinType;
const AstTypeMatcher<ArrayType> arrayType;
const AstTypeMatcher<ComplexType> complexType;
const AstTypeMatcher<ConstantArrayType> constantArrayType;
+const AstTypeMatcher<DeducedTemplateSpecializationType>
+ deducedTemplateSpecializationType;
const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
const AstTypeMatcher<VariableArrayType> variableArrayType;
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp
index 8656bca870ec..88c2279afb2e 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp
@@ -1,4 +1,4 @@
-//===--- Diagnostics.cpp - Helper class for error diagnostics -----*- C++ -*-===//
+//===--- Diagnostics.cpp - Helper class for error diagnostics ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -98,6 +98,8 @@ static StringRef errorTypeToFormatString(Diagnostics::ErrorType Type) {
return "Ambiguous matcher overload.";
case Diagnostics::ET_RegistryValueNotFound:
return "Value not found: $0";
+ case Diagnostics::ET_RegistryUnknownEnumWithReplace:
+ return "Unknown value '$1' for arg $0; did you mean '$2'";
case Diagnostics::ET_ParserStringError:
return "Error parsing string token: <$0>";
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
new file mode 100644
index 000000000000..989ee0fa75cd
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
@@ -0,0 +1,172 @@
+//===--- Marshallers.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "Marshallers.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Regex.h"
+#include <string>
+
+static llvm::Optional<std::string>
+getBestGuess(llvm::StringRef Search, llvm::ArrayRef<llvm::StringRef> Allowed,
+ llvm::StringRef DropPrefix = "", unsigned MaxEditDistance = 3) {
+ if (MaxEditDistance != ~0U)
+ ++MaxEditDistance;
+ llvm::StringRef Res;
+ for (const llvm::StringRef &Item : Allowed) {
+ if (Item.equals_lower(Search)) {
+ assert(!Item.equals(Search) && "This should be handled earlier on.");
+ MaxEditDistance = 1;
+ Res = Item;
+ continue;
+ }
+ unsigned Distance = Item.edit_distance(Search);
+ if (Distance < MaxEditDistance) {
+ MaxEditDistance = Distance;
+ Res = Item;
+ }
+ }
+ if (!Res.empty())
+ return Res.str();
+ if (!DropPrefix.empty()) {
+ --MaxEditDistance; // Treat dropping the prefix as 1 edit
+ for (const llvm::StringRef &Item : Allowed) {
+ auto NoPrefix = Item;
+ if (!NoPrefix.consume_front(DropPrefix))
+ continue;
+ if (NoPrefix.equals_lower(Search)) {
+ if (NoPrefix.equals(Search))
+ return Item.str();
+ MaxEditDistance = 1;
+ Res = Item;
+ continue;
+ }
+ unsigned Distance = NoPrefix.edit_distance(Search);
+ if (Distance < MaxEditDistance) {
+ MaxEditDistance = Distance;
+ Res = Item;
+ }
+ }
+ if (!Res.empty())
+ return Res.str();
+ }
+ return llvm::None;
+}
+
+llvm::Optional<std::string>
+clang::ast_matchers::dynamic::internal::ArgTypeTraits<
+ clang::attr::Kind>::getBestGuess(const VariantValue &Value) {
+ static constexpr llvm::StringRef Allowed[] = {
+#define ATTR(X) "attr::" #X,
+#include "clang/Basic/AttrList.inc"
+ };
+ if (Value.isString())
+ return ::getBestGuess(Value.getString(), llvm::makeArrayRef(Allowed),
+ "attr::");
+ return llvm::None;
+}
+
+llvm::Optional<std::string>
+clang::ast_matchers::dynamic::internal::ArgTypeTraits<
+ clang::CastKind>::getBestGuess(const VariantValue &Value) {
+ static constexpr llvm::StringRef Allowed[] = {
+#define CAST_OPERATION(Name) "CK_" #Name,
+#include "clang/AST/OperationKinds.def"
+ };
+ if (Value.isString())
+ return ::getBestGuess(Value.getString(), llvm::makeArrayRef(Allowed),
+ "CK_");
+ return llvm::None;
+}
+
+llvm::Optional<std::string>
+clang::ast_matchers::dynamic::internal::ArgTypeTraits<
+ clang::OpenMPClauseKind>::getBestGuess(const VariantValue &Value) {
+ static constexpr llvm::StringRef Allowed[] = {
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) #Enum,
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+ };
+ if (Value.isString())
+ return ::getBestGuess(Value.getString(), llvm::makeArrayRef(Allowed),
+ "OMPC_");
+ return llvm::None;
+}
+
+llvm::Optional<std::string>
+clang::ast_matchers::dynamic::internal::ArgTypeTraits<
+ clang::UnaryExprOrTypeTrait>::getBestGuess(const VariantValue &Value) {
+ static constexpr llvm::StringRef Allowed[] = {
+#define UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) "UETT_" #Name,
+#define CXX11_UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) "UETT_" #Name,
+#include "clang/Basic/TokenKinds.def"
+ };
+ if (Value.isString())
+ return ::getBestGuess(Value.getString(), llvm::makeArrayRef(Allowed),
+ "UETT_");
+ return llvm::None;
+}
+
+static constexpr std::pair<llvm::StringRef, llvm::Regex::RegexFlags>
+ RegexMap[] = {
+ {"NoFlags", llvm::Regex::RegexFlags::NoFlags},
+ {"IgnoreCase", llvm::Regex::RegexFlags::IgnoreCase},
+ {"Newline", llvm::Regex::RegexFlags::Newline},
+ {"BasicRegex", llvm::Regex::RegexFlags::BasicRegex},
+};
+
+llvm::Optional<llvm::Regex::RegexFlags> getRegexFlag(llvm::StringRef Flag) {
+ for (const auto &StringFlag : RegexMap) {
+ if (Flag == StringFlag.first)
+ return StringFlag.second;
+ }
+ return llvm::None;
+}
+
+llvm::Optional<llvm::StringRef> getCloseRegexMatch(llvm::StringRef Flag) {
+ for (const auto &StringFlag : RegexMap) {
+ if (Flag.edit_distance(StringFlag.first) < 3)
+ return StringFlag.first;
+ }
+ return llvm::None;
+}
+
+llvm::Optional<llvm::Regex::RegexFlags>
+clang::ast_matchers::dynamic::internal::ArgTypeTraits<
+ llvm::Regex::RegexFlags>::getFlags(llvm::StringRef Flags) {
+ llvm::Optional<llvm::Regex::RegexFlags> Flag;
+ SmallVector<StringRef, 4> Split;
+ Flags.split(Split, '|', -1, false);
+ for (StringRef OrFlag : Split) {
+ if (llvm::Optional<llvm::Regex::RegexFlags> NextFlag =
+ getRegexFlag(OrFlag.trim()))
+ Flag = Flag.getValueOr(llvm::Regex::NoFlags) | *NextFlag;
+ else
+ return None;
+ }
+ return Flag;
+}
+
+llvm::Optional<std::string>
+clang::ast_matchers::dynamic::internal::ArgTypeTraits<
+ llvm::Regex::RegexFlags>::getBestGuess(const VariantValue &Value) {
+ if (!Value.isString())
+ return llvm::None;
+ SmallVector<StringRef, 4> Split;
+ llvm::StringRef(Value.getString()).split(Split, '|', -1, false);
+ for (llvm::StringRef &Flag : Split) {
+ if (llvm::Optional<llvm::StringRef> BestGuess =
+ getCloseRegexMatch(Flag.trim()))
+ Flag = *BestGuess;
+ else
+ return None;
+ }
+ if (Split.empty())
+ return None;
+ return llvm::join(Split, " | ");
+}
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
index 9f46108d1848..33f6d1e4155c 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
@@ -27,12 +27,15 @@
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/OpenMPKinds.h"
+#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
@@ -64,6 +67,10 @@ template <> struct ArgTypeTraits<std::string> {
static ArgKind getKind() {
return ArgKind(ArgKind::AK_String);
}
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &) {
+ return llvm::None;
+ }
};
template <>
@@ -80,7 +87,11 @@ template <class T> struct ArgTypeTraits<ast_matchers::internal::Matcher<T>> {
}
static ArgKind getKind() {
- return ArgKind(ast_type_traits::ASTNodeKind::getFromNodeKind<T>());
+ return ArgKind(ASTNodeKind::getFromNodeKind<T>());
+ }
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &) {
+ return llvm::None;
}
};
@@ -94,6 +105,10 @@ template <> struct ArgTypeTraits<bool> {
static ArgKind getKind() {
return ArgKind(ArgKind::AK_Boolean);
}
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &) {
+ return llvm::None;
+ }
};
template <> struct ArgTypeTraits<double> {
@@ -106,6 +121,10 @@ template <> struct ArgTypeTraits<double> {
static ArgKind getKind() {
return ArgKind(ArgKind::AK_Double);
}
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &) {
+ return llvm::None;
+ }
};
template <> struct ArgTypeTraits<unsigned> {
@@ -118,6 +137,10 @@ template <> struct ArgTypeTraits<unsigned> {
static ArgKind getKind() {
return ArgKind(ArgKind::AK_Unsigned);
}
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &) {
+ return llvm::None;
+ }
};
template <> struct ArgTypeTraits<attr::Kind> {
@@ -141,13 +164,15 @@ public:
static ArgKind getKind() {
return ArgKind(ArgKind::AK_String);
}
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &Value);
};
template <> struct ArgTypeTraits<CastKind> {
private:
static Optional<CastKind> getCastKind(llvm::StringRef AttrKind) {
return llvm::StringSwitch<Optional<CastKind>>(AttrKind)
-#define CAST_OPERATION(Name) .Case( #Name, CK_##Name)
+#define CAST_OPERATION(Name) .Case("CK_" #Name, CK_##Name)
#include "clang/AST/OperationKinds.def"
.Default(llvm::None);
}
@@ -164,15 +189,34 @@ public:
static ArgKind getKind() {
return ArgKind(ArgKind::AK_String);
}
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &Value);
+};
+
+template <> struct ArgTypeTraits<llvm::Regex::RegexFlags> {
+private:
+ static Optional<llvm::Regex::RegexFlags> getFlags(llvm::StringRef Flags);
+
+public:
+ static bool is(const VariantValue &Value) {
+ return Value.isString() && getFlags(Value.getString());
+ }
+
+ static llvm::Regex::RegexFlags get(const VariantValue &Value) {
+ return *getFlags(Value.getString());
+ }
+
+ static ArgKind getKind() { return ArgKind(ArgKind::AK_String); }
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &Value);
};
template <> struct ArgTypeTraits<OpenMPClauseKind> {
private:
static Optional<OpenMPClauseKind> getClauseKind(llvm::StringRef ClauseKind) {
return llvm::StringSwitch<Optional<OpenMPClauseKind>>(ClauseKind)
-#define OPENMP_CLAUSE(TextualSpelling, Class) \
- .Case("OMPC_" #TextualSpelling, OMPC_##TextualSpelling)
-#include "clang/Basic/OpenMPKinds.def"
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) .Case(#Enum, llvm::omp::Clause::Enum)
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
.Default(llvm::None);
}
@@ -186,6 +230,35 @@ public:
}
static ArgKind getKind() { return ArgKind(ArgKind::AK_String); }
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &Value);
+};
+
+template <> struct ArgTypeTraits<UnaryExprOrTypeTrait> {
+private:
+ static Optional<UnaryExprOrTypeTrait>
+ getUnaryOrTypeTraitKind(llvm::StringRef ClauseKind) {
+ return llvm::StringSwitch<Optional<UnaryExprOrTypeTrait>>(ClauseKind)
+#define UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) \
+ .Case("UETT_" #Name, UETT_##Name)
+#define CXX11_UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) \
+ .Case("UETT_" #Name, UETT_##Name)
+#include "clang/Basic/TokenKinds.def"
+ .Default(llvm::None);
+ }
+
+public:
+ static bool is(const VariantValue &Value) {
+ return Value.isString() && getUnaryOrTypeTraitKind(Value.getString());
+ }
+
+ static UnaryExprOrTypeTrait get(const VariantValue &Value) {
+ return *getUnaryOrTypeTraitKind(Value.getString());
+ }
+
+ static ArgKind getKind() { return ArgKind(ArgKind::AK_String); }
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &Value);
};
/// Matcher descriptor interface.
@@ -211,7 +284,7 @@ public:
/// set of argument types accepted for argument \p ArgNo to \p ArgKinds.
// FIXME: We should provide the ability to constrain the output of this
// function based on the types of other matcher arguments.
- virtual void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+ virtual void getArgKinds(ASTNodeKind ThisKind, unsigned ArgNo,
std::vector<ArgKind> &ArgKinds) const = 0;
/// Returns whether this matcher is convertible to the given type. If it is
@@ -221,20 +294,19 @@ public:
/// same matcher overload. Zero specificity indicates that this conversion
/// would produce a trivial matcher that will either always or never match.
/// Such matchers are excluded from code completion results.
- virtual bool isConvertibleTo(
- ast_type_traits::ASTNodeKind Kind, unsigned *Specificity = nullptr,
- ast_type_traits::ASTNodeKind *LeastDerivedKind = nullptr) const = 0;
+ virtual bool
+ isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity = nullptr,
+ ASTNodeKind *LeastDerivedKind = nullptr) const = 0;
/// Returns whether the matcher will, given a matcher of any type T, yield a
/// matcher of type T.
virtual bool isPolymorphic() const { return false; }
};
-inline bool isRetKindConvertibleTo(
- ArrayRef<ast_type_traits::ASTNodeKind> RetKinds,
- ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
- ast_type_traits::ASTNodeKind *LeastDerivedKind) {
- for (const ast_type_traits::ASTNodeKind &NodeKind : RetKinds) {
+inline bool isRetKindConvertibleTo(ArrayRef<ASTNodeKind> RetKinds,
+ ASTNodeKind Kind, unsigned *Specificity,
+ ASTNodeKind *LeastDerivedKind) {
+ for (const ASTNodeKind &NodeKind : RetKinds) {
if (ArgKind(NodeKind).isConvertibleTo(Kind, Specificity)) {
if (LeastDerivedKind)
*LeastDerivedKind = NodeKind;
@@ -264,10 +336,10 @@ public:
/// \param RetKinds The list of matcher types to which the matcher is
/// convertible.
/// \param ArgKinds The types of the arguments this matcher takes.
- FixedArgCountMatcherDescriptor(
- MarshallerType Marshaller, void (*Func)(), StringRef MatcherName,
- ArrayRef<ast_type_traits::ASTNodeKind> RetKinds,
- ArrayRef<ArgKind> ArgKinds)
+ FixedArgCountMatcherDescriptor(MarshallerType Marshaller, void (*Func)(),
+ StringRef MatcherName,
+ ArrayRef<ASTNodeKind> RetKinds,
+ ArrayRef<ArgKind> ArgKinds)
: Marshaller(Marshaller), Func(Func), MatcherName(MatcherName),
RetKinds(RetKinds.begin(), RetKinds.end()),
ArgKinds(ArgKinds.begin(), ArgKinds.end()) {}
@@ -281,14 +353,13 @@ public:
bool isVariadic() const override { return false; }
unsigned getNumArgs() const override { return ArgKinds.size(); }
- void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+ void getArgKinds(ASTNodeKind ThisKind, unsigned ArgNo,
std::vector<ArgKind> &Kinds) const override {
Kinds.push_back(ArgKinds[ArgNo]);
}
- bool isConvertibleTo(
- ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
- ast_type_traits::ASTNodeKind *LeastDerivedKind) const override {
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity,
+ ASTNodeKind *LeastDerivedKind) const override {
return isRetKindConvertibleTo(RetKinds, Kind, Specificity,
LeastDerivedKind);
}
@@ -297,7 +368,7 @@ private:
const MarshallerType Marshaller;
void (* const Func)();
const std::string MatcherName;
- const std::vector<ast_type_traits::ASTNodeKind> RetKinds;
+ const std::vector<ASTNodeKind> RetKinds;
const std::vector<ArgKind> ArgKinds;
};
@@ -321,7 +392,7 @@ static void mergePolyMatchers(const PolyMatcher &Poly,
/// polymorphic matcher. For the former, we just construct the VariantMatcher.
/// For the latter, we instantiate all the possible Matcher<T> of the poly
/// matcher.
-static VariantMatcher outvalueToVariantMatcher(const DynTypedMatcher &Matcher) {
+inline VariantMatcher outvalueToVariantMatcher(const DynTypedMatcher &Matcher) {
return VariantMatcher::SingleMatcher(Matcher);
}
@@ -336,36 +407,35 @@ static VariantMatcher outvalueToVariantMatcher(const T &PolyMatcher,
}
template <typename T>
-inline void buildReturnTypeVectorFromTypeList(
- std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {
- RetTypes.push_back(
- ast_type_traits::ASTNodeKind::getFromNodeKind<typename T::head>());
+inline void
+buildReturnTypeVectorFromTypeList(std::vector<ASTNodeKind> &RetTypes) {
+ RetTypes.push_back(ASTNodeKind::getFromNodeKind<typename T::head>());
buildReturnTypeVectorFromTypeList<typename T::tail>(RetTypes);
}
template <>
inline void
buildReturnTypeVectorFromTypeList<ast_matchers::internal::EmptyTypeList>(
- std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {}
+ std::vector<ASTNodeKind> &RetTypes) {}
template <typename T>
struct BuildReturnTypeVector {
- static void build(std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {
+ static void build(std::vector<ASTNodeKind> &RetTypes) {
buildReturnTypeVectorFromTypeList<typename T::ReturnTypes>(RetTypes);
}
};
template <typename T>
struct BuildReturnTypeVector<ast_matchers::internal::Matcher<T>> {
- static void build(std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {
- RetTypes.push_back(ast_type_traits::ASTNodeKind::getFromNodeKind<T>());
+ static void build(std::vector<ASTNodeKind> &RetTypes) {
+ RetTypes.push_back(ASTNodeKind::getFromNodeKind<T>());
}
};
template <typename T>
struct BuildReturnTypeVector<ast_matchers::internal::BindableMatcher<T>> {
- static void build(std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {
- RetTypes.push_back(ast_type_traits::ASTNodeKind::getFromNodeKind<T>());
+ static void build(std::vector<ASTNodeKind> &RetTypes) {
+ RetTypes.push_back(ASTNodeKind::getFromNodeKind<T>());
}
};
@@ -439,14 +509,13 @@ public:
bool isVariadic() const override { return true; }
unsigned getNumArgs() const override { return 0; }
- void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+ void getArgKinds(ASTNodeKind ThisKind, unsigned ArgNo,
std::vector<ArgKind> &Kinds) const override {
Kinds.push_back(ArgsKind);
}
- bool isConvertibleTo(
- ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
- ast_type_traits::ASTNodeKind *LeastDerivedKind) const override {
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity,
+ ASTNodeKind *LeastDerivedKind) const override {
return isRetKindConvertibleTo(RetKinds, Kind, Specificity,
LeastDerivedKind);
}
@@ -454,7 +523,7 @@ public:
private:
const RunFunc Func;
const std::string MatcherName;
- std::vector<ast_type_traits::ASTNodeKind> RetKinds;
+ std::vector<ASTNodeKind> RetKinds;
const ArgKind ArgsKind;
};
@@ -466,12 +535,10 @@ public:
ast_matchers::internal::VariadicDynCastAllOfMatcher<BaseT, DerivedT> Func,
StringRef MatcherName)
: VariadicFuncMatcherDescriptor(Func, MatcherName),
- DerivedKind(ast_type_traits::ASTNodeKind::getFromNodeKind<DerivedT>()) {
- }
+ DerivedKind(ASTNodeKind::getFromNodeKind<DerivedT>()) {}
- bool
- isConvertibleTo(ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
- ast_type_traits::ASTNodeKind *LeastDerivedKind) const override {
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity,
+ ASTNodeKind *LeastDerivedKind) const override {
// If Kind is not a base of DerivedKind, either DerivedKind is a base of
// Kind (in which case the match will always succeed) or Kind and
// DerivedKind are unrelated (in which case it will always fail), so set
@@ -489,7 +556,7 @@ public:
}
private:
- const ast_type_traits::ASTNodeKind DerivedKind;
+ const ASTNodeKind DerivedKind;
};
/// Helper macros to check the arguments on all marshaller functions.
@@ -502,9 +569,16 @@ private:
#define CHECK_ARG_TYPE(index, type) \
if (!ArgTypeTraits<type>::is(Args[index].Value)) { \
- Error->addError(Args[index].Range, Error->ET_RegistryWrongArgType) \
- << (index + 1) << ArgTypeTraits<type>::getKind().asString() \
- << Args[index].Value.getTypeAsString(); \
+ if (llvm::Optional<std::string> BestGuess = \
+ ArgTypeTraits<type>::getBestGuess(Args[index].Value)) { \
+ Error->addError(Args[index].Range, \
+ Error->ET_RegistryUnknownEnumWithReplace) \
+ << index + 1 << Args[index].Value.getString() << *BestGuess; \
+ } else { \
+ Error->addError(Args[index].Range, Error->ET_RegistryWrongArgType) \
+ << (index + 1) << ArgTypeTraits<type>::getKind().asString() \
+ << Args[index].Value.getTypeAsString(); \
+ } \
return VariantMatcher(); \
}
@@ -635,7 +709,7 @@ public:
return Overload0NumArgs;
}
- void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+ void getArgKinds(ASTNodeKind ThisKind, unsigned ArgNo,
std::vector<ArgKind> &Kinds) const override {
for (const auto &O : Overloads) {
if (O->isConvertibleTo(ThisKind))
@@ -643,9 +717,8 @@ public:
}
}
- bool isConvertibleTo(
- ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
- ast_type_traits::ASTNodeKind *LeastDerivedKind) const override {
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity,
+ ASTNodeKind *LeastDerivedKind) const override {
for (const auto &O : Overloads) {
if (O->isConvertibleTo(Kind, Specificity, LeastDerivedKind))
return true;
@@ -657,6 +730,71 @@ private:
std::vector<std::unique_ptr<MatcherDescriptor>> Overloads;
};
+template <typename ReturnType>
+class RegexMatcherDescriptor : public MatcherDescriptor {
+public:
+ RegexMatcherDescriptor(ReturnType (*WithFlags)(StringRef,
+ llvm::Regex::RegexFlags),
+ ReturnType (*NoFlags)(StringRef),
+ ArrayRef<ASTNodeKind> RetKinds)
+ : WithFlags(WithFlags), NoFlags(NoFlags),
+ RetKinds(RetKinds.begin(), RetKinds.end()) {}
+ bool isVariadic() const override { return true; }
+ unsigned getNumArgs() const override { return 0; }
+
+ void getArgKinds(ASTNodeKind ThisKind, unsigned ArgNo,
+ std::vector<ArgKind> &Kinds) const override {
+ assert(ArgNo < 2);
+ Kinds.push_back(ArgKind::AK_String);
+ }
+
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity,
+ ASTNodeKind *LeastDerivedKind) const override {
+ return isRetKindConvertibleTo(RetKinds, Kind, Specificity,
+ LeastDerivedKind);
+ }
+
+ VariantMatcher create(SourceRange NameRange, ArrayRef<ParserValue> Args,
+ Diagnostics *Error) const override {
+ if (Args.size() < 1 || Args.size() > 2) {
+ Error->addError(NameRange, Diagnostics::ET_RegistryWrongArgCount)
+ << "1 or 2" << Args.size();
+ return VariantMatcher();
+ }
+ if (!ArgTypeTraits<StringRef>::is(Args[0].Value)) {
+ Error->addError(Args[0].Range, Error->ET_RegistryWrongArgType)
+ << 1 << ArgTypeTraits<StringRef>::getKind().asString()
+ << Args[0].Value.getTypeAsString();
+ return VariantMatcher();
+ }
+ if (Args.size() == 1) {
+ return outvalueToVariantMatcher(
+ NoFlags(ArgTypeTraits<StringRef>::get(Args[0].Value)));
+ }
+ if (!ArgTypeTraits<llvm::Regex::RegexFlags>::is(Args[1].Value)) {
+ if (llvm::Optional<std::string> BestGuess =
+ ArgTypeTraits<llvm::Regex::RegexFlags>::getBestGuess(
+ Args[1].Value)) {
+ Error->addError(Args[1].Range, Error->ET_RegistryUnknownEnumWithReplace)
+ << 2 << Args[1].Value.getString() << *BestGuess;
+ } else {
+ Error->addError(Args[1].Range, Error->ET_RegistryWrongArgType)
+ << 2 << ArgTypeTraits<llvm::Regex::RegexFlags>::getKind().asString()
+ << Args[1].Value.getTypeAsString();
+ }
+ return VariantMatcher();
+ }
+ return outvalueToVariantMatcher(
+ WithFlags(ArgTypeTraits<StringRef>::get(Args[0].Value),
+ ArgTypeTraits<llvm::Regex::RegexFlags>::get(Args[1].Value)));
+ }
+
+private:
+ ReturnType (*const WithFlags)(StringRef, llvm::Regex::RegexFlags);
+ ReturnType (*const NoFlags)(StringRef);
+ const std::vector<ASTNodeKind> RetKinds;
+};
+
/// Variadic operator marshaller function.
class VariadicOperatorMatcherDescriptor : public MatcherDescriptor {
public:
@@ -697,13 +835,13 @@ public:
bool isVariadic() const override { return true; }
unsigned getNumArgs() const override { return 0; }
- void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+ void getArgKinds(ASTNodeKind ThisKind, unsigned ArgNo,
std::vector<ArgKind> &Kinds) const override {
Kinds.push_back(ThisKind);
}
- bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
- ast_type_traits::ASTNodeKind *LeastDerivedKind) const override {
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity,
+ ASTNodeKind *LeastDerivedKind) const override {
if (Specificity)
*Specificity = 1;
if (LeastDerivedKind)
@@ -727,7 +865,7 @@ private:
template <typename ReturnType>
std::unique_ptr<MatcherDescriptor>
makeMatcherAutoMarshall(ReturnType (*Func)(), StringRef MatcherName) {
- std::vector<ast_type_traits::ASTNodeKind> RetTypes;
+ std::vector<ASTNodeKind> RetTypes;
BuildReturnTypeVector<ReturnType>::build(RetTypes);
return std::make_unique<FixedArgCountMatcherDescriptor>(
matcherMarshall0<ReturnType>, reinterpret_cast<void (*)()>(Func),
@@ -738,7 +876,7 @@ makeMatcherAutoMarshall(ReturnType (*Func)(), StringRef MatcherName) {
template <typename ReturnType, typename ArgType1>
std::unique_ptr<MatcherDescriptor>
makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1), StringRef MatcherName) {
- std::vector<ast_type_traits::ASTNodeKind> RetTypes;
+ std::vector<ASTNodeKind> RetTypes;
BuildReturnTypeVector<ReturnType>::build(RetTypes);
ArgKind AK = ArgTypeTraits<ArgType1>::getKind();
return std::make_unique<FixedArgCountMatcherDescriptor>(
@@ -751,7 +889,7 @@ template <typename ReturnType, typename ArgType1, typename ArgType2>
std::unique_ptr<MatcherDescriptor>
makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1, ArgType2),
StringRef MatcherName) {
- std::vector<ast_type_traits::ASTNodeKind> RetTypes;
+ std::vector<ASTNodeKind> RetTypes;
BuildReturnTypeVector<ReturnType>::build(RetTypes);
ArgKind AKs[] = { ArgTypeTraits<ArgType1>::getKind(),
ArgTypeTraits<ArgType2>::getKind() };
@@ -760,6 +898,16 @@ makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1, ArgType2),
reinterpret_cast<void (*)()>(Func), MatcherName, RetTypes, AKs);
}
+template <typename ReturnType>
+std::unique_ptr<MatcherDescriptor> makeMatcherRegexMarshall(
+ ReturnType (*FuncFlags)(llvm::StringRef, llvm::Regex::RegexFlags),
+ ReturnType (*Func)(llvm::StringRef)) {
+ std::vector<ASTNodeKind> RetTypes;
+ BuildReturnTypeVector<ReturnType>::build(RetTypes);
+ return std::make_unique<RegexMatcherDescriptor<ReturnType>>(FuncFlags, Func,
+ RetTypes);
+}
+
/// Variadic overload.
template <typename ResultT, typename ArgT,
ResultT (*Func)(ArrayRef<const ArgT *>)>
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
index 1c0930c5983a..ec2215804c09 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -90,14 +90,14 @@ void RegistryMaps::registerMatcher(
REGISTER_MATCHER_OVERLOAD(name); \
} while (false)
+#define REGISTER_REGEX_MATCHER(name) \
+ registerMatcher(#name, internal::makeMatcherRegexMarshall(name, name))
+
/// Generate a registry map with all the known matchers.
/// Please keep sorted alphabetically!
RegistryMaps::RegistryMaps() {
// TODO: Here is the list of the missing matchers, grouped by reason.
//
- // Need Variant/Parser fixes:
- // ofKind
- //
// Polymorphic + argument overload:
// findAll
//
@@ -124,6 +124,10 @@ RegistryMaps::RegistryMaps() {
};
REGISTER_MATCHER_OVERLOAD(equals);
+ REGISTER_REGEX_MATCHER(isExpansionInFileMatching);
+ REGISTER_REGEX_MATCHER(matchesName);
+ REGISTER_REGEX_MATCHER(matchesSelector);
+
REGISTER_MATCHER(accessSpecDecl);
REGISTER_MATCHER(addrLabelExpr);
REGISTER_MATCHER(alignOfExpr);
@@ -154,6 +158,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(characterLiteral);
REGISTER_MATCHER(chooseExpr);
REGISTER_MATCHER(classTemplateDecl);
+ REGISTER_MATCHER(classTemplatePartialSpecializationDecl);
REGISTER_MATCHER(classTemplateSpecializationDecl);
REGISTER_MATCHER(complexType);
REGISTER_MATCHER(compoundLiteralExpr);
@@ -183,6 +188,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(cxxMemberCallExpr);
REGISTER_MATCHER(cxxMethodDecl);
REGISTER_MATCHER(cxxNewExpr);
+ REGISTER_MATCHER(cxxNoexceptExpr);
REGISTER_MATCHER(cxxNullPtrLiteralExpr);
REGISTER_MATCHER(cxxOperatorCallExpr);
REGISTER_MATCHER(cxxRecordDecl);
@@ -201,6 +207,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(declStmt);
REGISTER_MATCHER(declaratorDecl);
REGISTER_MATCHER(decltypeType);
+ REGISTER_MATCHER(deducedTemplateSpecializationType);
REGISTER_MATCHER(defaultStmt);
REGISTER_MATCHER(dependentSizedArrayType);
REGISTER_MATCHER(designatedInitExpr);
@@ -237,11 +244,15 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(has);
REGISTER_MATCHER(hasAncestor);
REGISTER_MATCHER(hasAnyArgument);
+ REGISTER_MATCHER(hasAnyBase);
REGISTER_MATCHER(hasAnyClause);
REGISTER_MATCHER(hasAnyConstructorInitializer);
REGISTER_MATCHER(hasAnyDeclaration);
REGISTER_MATCHER(hasAnyName);
+ REGISTER_MATCHER(hasAnyOperatorName);
+ REGISTER_MATCHER(hasAnyOverloadedOperatorName);
REGISTER_MATCHER(hasAnyParameter);
+ REGISTER_MATCHER(hasAnyPlacementArg);
REGISTER_MATCHER(hasAnySelector);
REGISTER_MATCHER(hasAnySubstatement);
REGISTER_MATCHER(hasAnyTemplateArgument);
@@ -267,6 +278,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasDefinition);
REGISTER_MATCHER(hasDescendant);
REGISTER_MATCHER(hasDestinationType);
+ REGISTER_MATCHER(hasDirectBase);
REGISTER_MATCHER(hasDynamicExceptionSpec);
REGISTER_MATCHER(hasEitherOperand);
REGISTER_MATCHER(hasElementType);
@@ -292,6 +304,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasName);
REGISTER_MATCHER(hasNullSelector);
REGISTER_MATCHER(hasObjectExpression);
+ REGISTER_MATCHER(hasOperands);
REGISTER_MATCHER(hasOperatorName);
REGISTER_MATCHER(hasOverloadedOperatorName);
REGISTER_MATCHER(hasParameter);
@@ -303,6 +316,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasReceiverType);
REGISTER_MATCHER(hasReplacementType);
REGISTER_MATCHER(hasReturnValue);
+ REGISTER_MATCHER(hasPlacementArg);
REGISTER_MATCHER(hasSelector);
REGISTER_MATCHER(hasSingleDecl);
REGISTER_MATCHER(hasSize);
@@ -347,12 +361,14 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isArray);
REGISTER_MATCHER(isArrow);
REGISTER_MATCHER(isAssignmentOperator);
+ REGISTER_MATCHER(isAtPosition);
REGISTER_MATCHER(isBaseInitializer);
REGISTER_MATCHER(isBitField);
REGISTER_MATCHER(isCatchAll);
REGISTER_MATCHER(isClass);
REGISTER_MATCHER(isClassMessage);
REGISTER_MATCHER(isClassMethod);
+ REGISTER_MATCHER(isComparisonOperator);
REGISTER_MATCHER(isConst);
REGISTER_MATCHER(isConstQualified);
REGISTER_MATCHER(isConstexpr);
@@ -363,8 +379,9 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isDefinition);
REGISTER_MATCHER(isDelegatingConstructor);
REGISTER_MATCHER(isDeleted);
+ REGISTER_MATCHER(isEnum);
REGISTER_MATCHER(isExceptionVariable);
- REGISTER_MATCHER(isExpansionInFileMatching);
+ REGISTER_MATCHER(isExpandedFromMacro);
REGISTER_MATCHER(isExpansionInMainFile);
REGISTER_MATCHER(isExpansionInSystemHeader);
REGISTER_MATCHER(isExplicit);
@@ -372,6 +389,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isExpr);
REGISTER_MATCHER(isExternC);
REGISTER_MATCHER(isFinal);
+ REGISTER_MATCHER(isFirstPrivateKind);
REGISTER_MATCHER(isImplicit);
REGISTER_MATCHER(isInStdNamespace);
REGISTER_MATCHER(isInTemplateInstantiation);
@@ -391,7 +409,6 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isNoReturn);
REGISTER_MATCHER(isNoThrow);
REGISTER_MATCHER(isNoneKind);
- REGISTER_MATCHER(isOMPStructuredBlock);
REGISTER_MATCHER(isOverride);
REGISTER_MATCHER(isPrivate);
REGISTER_MATCHER(isProtected);
@@ -420,8 +437,6 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(labelStmt);
REGISTER_MATCHER(lambdaExpr);
REGISTER_MATCHER(linkageSpecDecl);
- REGISTER_MATCHER(matchesName);
- REGISTER_MATCHER(matchesSelector);
REGISTER_MATCHER(materializeTemporaryExpr);
REGISTER_MATCHER(member);
REGISTER_MATCHER(memberExpr);
@@ -452,6 +467,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(objcThrowStmt);
REGISTER_MATCHER(objcTryStmt);
REGISTER_MATCHER(ofClass);
+ REGISTER_MATCHER(ofKind);
REGISTER_MATCHER(ompDefaultClause);
REGISTER_MATCHER(ompExecutableDirective);
REGISTER_MATCHER(on);
@@ -492,6 +508,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(substTemplateTypeParmType);
REGISTER_MATCHER(switchCase);
REGISTER_MATCHER(switchStmt);
+ REGISTER_MATCHER(tagDecl);
REGISTER_MATCHER(tagType);
REGISTER_MATCHER(templateArgument);
REGISTER_MATCHER(templateArgumentCountIs);
@@ -652,7 +669,7 @@ Registry::getMatcherCompletions(ArrayRef<ArgKind> AcceptedTypes) {
OS << "...";
OS << ")";
- std::string TypedText = Name;
+ std::string TypedText = std::string(Name);
TypedText += "(";
if (ArgsKinds.empty())
TypedText += ")";
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp
index 118ca2a41cb1..866e2d0e3491 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp
@@ -1,4 +1,4 @@
-//===--- VariantValue.cpp - Polymorphic value type -*- C++ -*-===/
+//===--- VariantValue.cpp - Polymorphic value type --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -101,8 +101,7 @@ public:
return llvm::None;
}
- bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind,
- unsigned *Specificity) const override {
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity) const override {
return ArgKind(Matcher.getSupportedKind())
.isConvertibleTo(Kind, Specificity);
}
@@ -159,8 +158,7 @@ public:
return llvm::None;
}
- bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind,
- unsigned *Specificity) const override {
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity) const override {
unsigned MaxSpecificity = 0;
for (const DynTypedMatcher &Matcher : Matchers) {
unsigned ThisSpecificity;
@@ -202,8 +200,7 @@ public:
return Ops.constructVariadicOperator(Op, Args);
}
- bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind,
- unsigned *Specificity) const override {
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity) const override {
for (const VariantMatcher &Matcher : Args) {
if (!Matcher.isConvertibleTo(Kind, Specificity))
return false;
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/GtestMatchers.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/GtestMatchers.cpp
new file mode 100644
index 000000000000..c99fdf6c0fcd
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/GtestMatchers.cpp
@@ -0,0 +1,104 @@
+//===- GtestMatchers.cpp - AST Matchers for Gtest ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/ASTMatchers/GtestMatchers.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Timer.h"
+#include <deque>
+#include <memory>
+#include <set>
+
+namespace clang {
+namespace ast_matchers {
+
+static DeclarationMatcher getComparisonDecl(GtestCmp Cmp) {
+ switch (Cmp) {
+ case GtestCmp::Eq:
+ return cxxMethodDecl(hasName("Compare"),
+ ofClass(cxxRecordDecl(isSameOrDerivedFrom(
+ hasName("::testing::internal::EqHelper")))));
+ case GtestCmp::Ne:
+ return functionDecl(hasName("::testing::internal::CmpHelperNE"));
+ case GtestCmp::Ge:
+ return functionDecl(hasName("::testing::internal::CmpHelperGE"));
+ case GtestCmp::Gt:
+ return functionDecl(hasName("::testing::internal::CmpHelperGT"));
+ case GtestCmp::Le:
+ return functionDecl(hasName("::testing::internal::CmpHelperLE"));
+ case GtestCmp::Lt:
+ return functionDecl(hasName("::testing::internal::CmpHelperLT"));
+ }
+ llvm_unreachable("Unhandled GtestCmp enum");
+}
+
+static llvm::StringRef getAssertMacro(GtestCmp Cmp) {
+ switch (Cmp) {
+ case GtestCmp::Eq:
+ return "ASSERT_EQ";
+ case GtestCmp::Ne:
+ return "ASSERT_NE";
+ case GtestCmp::Ge:
+ return "ASSERT_GE";
+ case GtestCmp::Gt:
+ return "ASSERT_GT";
+ case GtestCmp::Le:
+ return "ASSERT_LE";
+ case GtestCmp::Lt:
+ return "ASSERT_LT";
+ }
+ llvm_unreachable("Unhandled GtestCmp enum");
+}
+
+static llvm::StringRef getExpectMacro(GtestCmp Cmp) {
+ switch (Cmp) {
+ case GtestCmp::Eq:
+ return "EXPECT_EQ";
+ case GtestCmp::Ne:
+ return "EXPECT_NE";
+ case GtestCmp::Ge:
+ return "EXPECT_GE";
+ case GtestCmp::Gt:
+ return "EXPECT_GT";
+ case GtestCmp::Le:
+ return "EXPECT_LE";
+ case GtestCmp::Lt:
+ return "EXPECT_LT";
+ }
+ llvm_unreachable("Unhandled GtestCmp enum");
+}
+
+// In general, AST matchers cannot match calls to macros. However, we can
+// simulate such matches if the macro definition has identifiable elements that
+// themselves can be matched. In that case, we can match on those elements and
+// then check that the match occurs within an expansion of the desired
+// macro. The more uncommon the identified elements, the more efficient this
+// process will be.
+//
+// We use this approach to implement the derived matchers gtestAssert and
+// gtestExpect.
+internal::BindableMatcher<Stmt> gtestAssert(GtestCmp Cmp, StatementMatcher Left,
+ StatementMatcher Right) {
+ return callExpr(callee(getComparisonDecl(Cmp)),
+ isExpandedFromMacro(getAssertMacro(Cmp)),
+ hasArgument(2, Left), hasArgument(3, Right));
+}
+
+internal::BindableMatcher<Stmt> gtestExpect(GtestCmp Cmp, StatementMatcher Left,
+ StatementMatcher Right) {
+ return callExpr(callee(getComparisonDecl(Cmp)),
+ isExpandedFromMacro(getExpectMacro(Cmp)),
+ hasArgument(2, Left), hasArgument(3, Right));
+}
+
+} // end namespace ast_matchers
+} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp b/contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp
index 9f58b5079c76..783de6442645 100644
--- a/contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp
@@ -50,18 +50,18 @@
using namespace clang;
-using ManagedAnalysisMap = llvm::DenseMap<const void *, ManagedAnalysis *>;
+using ManagedAnalysisMap = llvm::DenseMap<const void *, std::unique_ptr<ManagedAnalysis>>;
-AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
- const Decl *d,
- const CFG::BuildOptions &buildOptions)
- : Manager(Mgr), D(d), cfgBuildOptions(buildOptions) {
+AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *ADCMgr,
+ const Decl *D,
+ const CFG::BuildOptions &Options)
+ : ADCMgr(ADCMgr), D(D), cfgBuildOptions(Options) {
cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs;
}
-AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
- const Decl *d)
- : Manager(Mgr), D(d) {
+AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *ADCMgr,
+ const Decl *D)
+ : ADCMgr(ADCMgr), D(D) {
cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs;
}
@@ -96,8 +96,8 @@ Stmt *AnalysisDeclContext::getBody(bool &IsAutosynthesized) const {
Stmt *Body = FD->getBody();
if (auto *CoroBody = dyn_cast_or_null<CoroutineBodyStmt>(Body))
Body = CoroBody->getBody();
- if (Manager && Manager->synthesizeBodies()) {
- Stmt *SynthesizedBody = Manager->getBodyFarm().getBody(FD);
+ if (ADCMgr && ADCMgr->synthesizeBodies()) {
+ Stmt *SynthesizedBody = ADCMgr->getBodyFarm().getBody(FD);
if (SynthesizedBody) {
Body = SynthesizedBody;
IsAutosynthesized = true;
@@ -107,8 +107,8 @@ Stmt *AnalysisDeclContext::getBody(bool &IsAutosynthesized) const {
}
else if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
Stmt *Body = MD->getBody();
- if (Manager && Manager->synthesizeBodies()) {
- Stmt *SynthesizedBody = Manager->getBodyFarm().getBody(MD);
+ if (ADCMgr && ADCMgr->synthesizeBodies()) {
+ Stmt *SynthesizedBody = ADCMgr->getBodyFarm().getBody(MD);
if (SynthesizedBody) {
Body = SynthesizedBody;
IsAutosynthesized = true;
@@ -309,19 +309,17 @@ AnalysisDeclContext *AnalysisDeclContextManager::getContext(const Decl *D) {
BodyFarm &AnalysisDeclContextManager::getBodyFarm() { return FunctionBodyFarm; }
const StackFrameContext *
-AnalysisDeclContext::getStackFrame(LocationContext const *Parent, const Stmt *S,
- const CFGBlock *Blk, unsigned BlockCount,
- unsigned Idx) {
- return getLocationContextManager().getStackFrame(this, Parent, S, Blk,
- BlockCount, Idx);
+AnalysisDeclContext::getStackFrame(const LocationContext *ParentLC,
+ const Stmt *S, const CFGBlock *Blk,
+ unsigned BlockCount, unsigned Index) {
+ return getLocationContextManager().getStackFrame(this, ParentLC, S, Blk,
+ BlockCount, Index);
}
-const BlockInvocationContext *
-AnalysisDeclContext::getBlockInvocationContext(const LocationContext *parent,
- const BlockDecl *BD,
- const void *ContextData) {
- return getLocationContextManager().getBlockInvocationContext(this, parent,
- BD, ContextData);
+const BlockInvocationContext *AnalysisDeclContext::getBlockInvocationContext(
+ const LocationContext *ParentLC, const BlockDecl *BD, const void *Data) {
+ return getLocationContextManager().getBlockInvocationContext(this, ParentLC,
+ BD, Data);
}
bool AnalysisDeclContext::isInStdNamespace(const Decl *D) {
@@ -340,9 +338,10 @@ bool AnalysisDeclContext::isInStdNamespace(const Decl *D) {
}
LocationContextManager &AnalysisDeclContext::getLocationContextManager() {
- assert(Manager &&
- "Cannot create LocationContexts without an AnalysisDeclContextManager!");
- return Manager->getLocationContextManager();
+ assert(
+ ADCMgr &&
+ "Cannot create LocationContexts without an AnalysisDeclContextManager!");
+ return ADCMgr->getLocationContextManager();
}
//===----------------------------------------------------------------------===//
@@ -365,36 +364,14 @@ void StackFrameContext::Profile(llvm::FoldingSetNodeID &ID) {
BlockCount, Index);
}
-void ScopeContext::Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getAnalysisDeclContext(), getParent(), Enter);
-}
-
void BlockInvocationContext::Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getAnalysisDeclContext(), getParent(), BD, ContextData);
+ Profile(ID, getAnalysisDeclContext(), getParent(), BD, Data);
}
//===----------------------------------------------------------------------===//
// LocationContext creation.
//===----------------------------------------------------------------------===//
-template <typename LOC, typename DATA>
-const LOC*
-LocationContextManager::getLocationContext(AnalysisDeclContext *ctx,
- const LocationContext *parent,
- const DATA *d) {
- llvm::FoldingSetNodeID ID;
- LOC::Profile(ID, ctx, parent, d);
- void *InsertPos;
-
- LOC *L = cast_or_null<LOC>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
-
- if (!L) {
- L = new LOC(ctx, parent, d, ++NewID);
- Contexts.InsertNode(L, InsertPos);
- }
- return L;
-}
-
const StackFrameContext *LocationContextManager::getStackFrame(
AnalysisDeclContext *ctx, const LocationContext *parent, const Stmt *s,
const CFGBlock *blk, unsigned blockCount, unsigned idx) {
@@ -410,26 +387,17 @@ const StackFrameContext *LocationContextManager::getStackFrame(
return L;
}
-const ScopeContext *
-LocationContextManager::getScope(AnalysisDeclContext *ctx,
- const LocationContext *parent,
- const Stmt *s) {
- return getLocationContext<ScopeContext, Stmt>(ctx, parent, s);
-}
-
-const BlockInvocationContext *
-LocationContextManager::getBlockInvocationContext(AnalysisDeclContext *ctx,
- const LocationContext *parent,
- const BlockDecl *BD,
- const void *ContextData) {
+const BlockInvocationContext *LocationContextManager::getBlockInvocationContext(
+ AnalysisDeclContext *ADC, const LocationContext *ParentLC,
+ const BlockDecl *BD, const void *Data) {
llvm::FoldingSetNodeID ID;
- BlockInvocationContext::Profile(ID, ctx, parent, BD, ContextData);
+ BlockInvocationContext::Profile(ID, ADC, ParentLC, BD, Data);
void *InsertPos;
auto *L =
cast_or_null<BlockInvocationContext>(Contexts.FindNodeOrInsertPos(ID,
InsertPos));
if (!L) {
- L = new BlockInvocationContext(ctx, parent, BD, ContextData, ++NewID);
+ L = new BlockInvocationContext(ADC, ParentLC, BD, Data, ++NewID);
Contexts.InsertNode(L, InsertPos);
}
return L;
@@ -473,9 +441,7 @@ static void printLocation(raw_ostream &Out, const SourceManager &SM,
Loc.print(Out, SM);
}
-void LocationContext::dumpStack(raw_ostream &Out, const char *NL,
- std::function<void(const LocationContext *)>
- printMoreInfoPerContext) const {
+void LocationContext::dumpStack(raw_ostream &Out) const {
ASTContext &Ctx = getAnalysisDeclContext()->getASTContext();
PrintingPolicy PP(Ctx.getLangOpts());
PP.TerseOutput = 1;
@@ -498,9 +464,6 @@ void LocationContext::dumpStack(raw_ostream &Out, const char *NL,
printLocation(Out, SM, S->getBeginLoc());
}
break;
- case Scope:
- Out << "Entering scope";
- break;
case Block:
Out << "Invoking block";
if (const Decl *D = cast<BlockInvocationContext>(LCtx)->getDecl()) {
@@ -509,9 +472,7 @@ void LocationContext::dumpStack(raw_ostream &Out, const char *NL,
}
break;
}
- Out << NL;
-
- printMoreInfoPerContext(LCtx);
+ Out << '\n';
}
}
@@ -548,9 +509,6 @@ void LocationContext::printJson(raw_ostream &Out, const char *NL,
Out << ", \"items\": ";
break;
- case Scope:
- Out << "Entering scope\" ";
- break;
case Block:
Out << "Invoking block\" ";
if (const Decl *D = cast<BlockInvocationContext>(LCtx)->getDecl()) {
@@ -659,7 +617,7 @@ AnalysisDeclContext::getReferencedBlockVars(const BlockDecl *BD) {
return llvm::make_range(V->begin(), V->end());
}
-ManagedAnalysis *&AnalysisDeclContext::getAnalysisImpl(const void *tag) {
+std::unique_ptr<ManagedAnalysis> &AnalysisDeclContext::getAnalysisImpl(const void *tag) {
if (!ManagedAnalyses)
ManagedAnalyses = new ManagedAnalysisMap();
ManagedAnalysisMap *M = (ManagedAnalysisMap*) ManagedAnalyses;
@@ -675,12 +633,7 @@ ManagedAnalysis::~ManagedAnalysis() = default;
AnalysisDeclContext::~AnalysisDeclContext() {
delete forcedBlkExprs;
delete ReferencedBlockVars;
- // Release the managed analyses.
- if (ManagedAnalyses) {
- ManagedAnalysisMap *M = (ManagedAnalysisMap*) ManagedAnalyses;
- llvm::DeleteContainerSeconds(*M);
- delete M;
- }
+ delete (ManagedAnalysisMap*) ManagedAnalyses;
}
LocationContext::~LocationContext() = default;
diff --git a/contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp b/contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp
index 1a7891550542..f9f0553d28f0 100644
--- a/contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp
@@ -114,21 +114,19 @@ private:
BinaryOperator *ASTMaker::makeAssignment(const Expr *LHS, const Expr *RHS,
QualType Ty) {
- return new (C) BinaryOperator(const_cast<Expr*>(LHS), const_cast<Expr*>(RHS),
- BO_Assign, Ty, VK_RValue,
- OK_Ordinary, SourceLocation(), FPOptions());
+ return BinaryOperator::Create(
+ C, const_cast<Expr *>(LHS), const_cast<Expr *>(RHS), BO_Assign, Ty,
+ VK_RValue, OK_Ordinary, SourceLocation(), FPOptionsOverride());
}
BinaryOperator *ASTMaker::makeComparison(const Expr *LHS, const Expr *RHS,
BinaryOperator::Opcode Op) {
assert(BinaryOperator::isLogicalOp(Op) ||
BinaryOperator::isComparisonOp(Op));
- return new (C) BinaryOperator(const_cast<Expr*>(LHS),
- const_cast<Expr*>(RHS),
- Op,
- C.getLogicalOperationType(),
- VK_RValue,
- OK_Ordinary, SourceLocation(), FPOptions());
+ return BinaryOperator::Create(
+ C, const_cast<Expr *>(LHS), const_cast<Expr *>(RHS), Op,
+ C.getLogicalOperationType(), VK_RValue, OK_Ordinary, SourceLocation(),
+ FPOptionsOverride());
}
CompoundStmt *ASTMaker::makeCompound(ArrayRef<Stmt *> Stmts) {
@@ -147,9 +145,9 @@ DeclRefExpr *ASTMaker::makeDeclRefExpr(
}
UnaryOperator *ASTMaker::makeDereference(const Expr *Arg, QualType Ty) {
- return new (C) UnaryOperator(const_cast<Expr*>(Arg), UO_Deref, Ty,
+ return UnaryOperator::Create(C, const_cast<Expr *>(Arg), UO_Deref, Ty,
VK_LValue, OK_Ordinary, SourceLocation(),
- /*CanOverflow*/ false);
+ /*CanOverflow*/ false, FPOptionsOverride());
}
ImplicitCastExpr *ASTMaker::makeLvalueToRvalue(const Expr *Arg, QualType Ty) {
@@ -296,7 +294,8 @@ static CallExpr *create_call_once_lambda_call(ASTContext &C, ASTMaker M,
/*Args=*/CallArgs,
/*QualType=*/C.VoidTy,
/*ExprValueType=*/VK_RValue,
- /*SourceLocation=*/SourceLocation(), FPOptions());
+ /*SourceLocation=*/SourceLocation(),
+ /*FPFeatures=*/FPOptionsOverride());
}
/// Create a fake body for std::call_once.
@@ -447,15 +446,16 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
QualType DerefType = Deref->getType();
// Negation predicate.
- UnaryOperator *FlagCheck = new (C) UnaryOperator(
+ UnaryOperator *FlagCheck = UnaryOperator::Create(
+ C,
/* input=*/
M.makeImplicitCast(M.makeLvalueToRvalue(Deref, DerefType), DerefType,
CK_IntegralToBoolean),
- /* opc=*/ UO_LNot,
- /* QualType=*/ C.IntTy,
- /* ExprValueKind=*/ VK_RValue,
- /* ExprObjectKind=*/ OK_Ordinary, SourceLocation(),
- /* CanOverflow*/ false);
+ /* opc=*/UO_LNot,
+ /* QualType=*/C.IntTy,
+ /* ExprValueKind=*/VK_RValue,
+ /* ExprObjectKind=*/OK_Ordinary, SourceLocation(),
+ /* CanOverflow*/ false, FPOptionsOverride());
// Create assignment.
BinaryOperator *FlagAssignment = M.makeAssignment(
@@ -518,9 +518,9 @@ static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
// (2) Create the assignment to the predicate.
Expr *DoneValue =
- new (C) UnaryOperator(M.makeIntegerLiteral(0, C.LongTy), UO_Not, C.LongTy,
- VK_RValue, OK_Ordinary, SourceLocation(),
- /*CanOverflow*/false);
+ UnaryOperator::Create(C, M.makeIntegerLiteral(0, C.LongTy), UO_Not,
+ C.LongTy, VK_RValue, OK_Ordinary, SourceLocation(),
+ /*CanOverflow*/ false, FPOptionsOverride());
BinaryOperator *B =
M.makeAssignment(
@@ -762,7 +762,7 @@ static Stmt *createObjCPropertyGetter(ASTContext &Ctx,
return nullptr;
// Ignore weak variables, which have special behavior.
- if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)
+ if (Prop->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak)
return nullptr;
// Look to see if Sema has synthesized a body for us. This happens in
diff --git a/contrib/llvm-project/clang/lib/Analysis/CFG.cpp b/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
index 4c1ea8995f9f..fc74226951a4 100644
--- a/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
@@ -223,8 +223,6 @@ private:
///
class LocalScope {
public:
- friend class const_iterator;
-
using AutomaticVarsTy = BumpVector<VarDecl *>;
/// const_iterator - Iterates local scope backwards and jumps to previous
@@ -720,10 +718,10 @@ private:
// These sorts of call expressions don't have a common superclass,
// hence strict duck-typing.
template <typename CallLikeExpr,
- typename = typename std::enable_if<
- std::is_same<CallLikeExpr, CallExpr>::value ||
- std::is_same<CallLikeExpr, CXXConstructExpr>::value ||
- std::is_same<CallLikeExpr, ObjCMessageExpr>::value>>
+ typename = std::enable_if_t<
+ std::is_base_of<CallExpr, CallLikeExpr>::value ||
+ std::is_base_of<CXXConstructExpr, CallLikeExpr>::value ||
+ std::is_base_of<ObjCMessageExpr, CallLikeExpr>::value>>
void findConstructionContextsForArguments(CallLikeExpr *E) {
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
Expr *Arg = E->getArg(i);
@@ -2839,11 +2837,30 @@ CFGBlock *CFGBuilder::VisitDeclStmt(DeclStmt *DS) {
/// DeclStmts and initializers in them.
CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
assert(DS->isSingleDecl() && "Can handle single declarations only.");
+
+ if (const auto *TND = dyn_cast<TypedefNameDecl>(DS->getSingleDecl())) {
+ // If we encounter a VLA, process its size expressions.
+ const Type *T = TND->getUnderlyingType().getTypePtr();
+ if (!T->isVariablyModifiedType())
+ return Block;
+
+ autoCreateBlock();
+ appendStmt(Block, DS);
+
+ CFGBlock *LastBlock = Block;
+ for (const VariableArrayType *VA = FindVA(T); VA != nullptr;
+ VA = FindVA(VA->getElementType().getTypePtr())) {
+ if (CFGBlock *NewBlock = addStmt(VA->getSizeExpr()))
+ LastBlock = NewBlock;
+ }
+ return LastBlock;
+ }
+
VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
if (!VD) {
- // Of everything that can be declared in a DeclStmt, only VarDecls impact
- // runtime semantics.
+ // Of everything that can be declared in a DeclStmt, only VarDecls and the
+ // exceptions above impact runtime semantics.
return Block;
}
@@ -2905,6 +2922,8 @@ CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
}
// If the type of VD is a VLA, then we must process its size expressions.
+ // FIXME: This does not find the VLA if it is embedded in other types,
+ // like here: `int (*p_vla)[x];`
for (const VariableArrayType* VA = FindVA(VD->getType().getTypePtr());
VA != nullptr; VA = FindVA(VA->getElementType().getTypePtr())) {
if (CFGBlock *newBlock = addStmt(VA->getSizeExpr()))
@@ -3997,6 +4016,11 @@ CFGBlock *CFGBuilder::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E,
}
// VLA types have expressions that must be evaluated.
+ // Evaluation is done only for `sizeof`.
+
+ if (E->getKind() != UETT_SizeOf)
+ return Block;
+
CFGBlock *lastBlock = Block;
if (E->isArgumentType()) {
diff --git a/contrib/llvm-project/clang/lib/Analysis/CallGraph.cpp b/contrib/llvm-project/clang/lib/Analysis/CallGraph.cpp
index 76be292dad8d..59cc939b6fd1 100644
--- a/contrib/llvm-project/clang/lib/Analysis/CallGraph.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/CallGraph.cpp
@@ -66,16 +66,16 @@ public:
return nullptr;
}
- void addCalledDecl(Decl *D) {
- if (G->includeInGraph(D)) {
+ void addCalledDecl(Decl *D, Expr *CallExpr) {
+ if (G->includeCalleeInGraph(D)) {
CallGraphNode *CalleeNode = G->getOrInsertNode(D);
- CallerNode->addCallee(CalleeNode);
+ CallerNode->addCallee({CalleeNode, CallExpr});
}
}
void VisitCallExpr(CallExpr *CE) {
if (Decl *D = getDeclFromCall(CE))
- addCalledDecl(D);
+ addCalledDecl(D, CE);
VisitChildren(CE);
}
@@ -89,14 +89,14 @@ public:
void VisitCXXNewExpr(CXXNewExpr *E) {
if (FunctionDecl *FD = E->getOperatorNew())
- addCalledDecl(FD);
+ addCalledDecl(FD, E);
VisitChildren(E);
}
void VisitCXXConstructExpr(CXXConstructExpr *E) {
CXXConstructorDecl *Ctor = E->getConstructor();
if (FunctionDecl *Def = Ctor->getDefinition())
- addCalledDecl(Def);
+ addCalledDecl(Def, E);
VisitChildren(E);
}
@@ -122,7 +122,7 @@ public:
else
D = IDecl->lookupPrivateClassMethod(Sel);
if (D) {
- addCalledDecl(D);
+ addCalledDecl(D, ME);
NumObjCCallEdges++;
}
}
@@ -157,6 +157,10 @@ bool CallGraph::includeInGraph(const Decl *D) {
if (!D->hasBody())
return false;
+ return includeCalleeInGraph(D);
+}
+
+bool CallGraph::includeCalleeInGraph(const Decl *D) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// We skip function template definitions, as their semantics is
// only determined when they are instantiated.
@@ -207,7 +211,7 @@ CallGraphNode *CallGraph::getOrInsertNode(Decl *F) {
Node = std::make_unique<CallGraphNode>(F);
// Make Root node a parent of all functions to make sure all are reachable.
if (F)
- Root->addCallee(Node.get());
+ Root->addCallee({Node.get(), /*Call=*/nullptr});
return Node.get();
}
@@ -230,8 +234,8 @@ void CallGraph::print(raw_ostream &OS) const {
OS << " calls: ";
for (CallGraphNode::const_iterator CI = N->begin(),
CE = N->end(); CI != CE; ++CI) {
- assert(*CI != Root && "No one can call the root node.");
- (*CI)->print(OS);
+ assert(CI->Callee != Root && "No one can call the root node.");
+ CI->Callee->print(OS);
OS << " ";
}
OS << '\n';
diff --git a/contrib/llvm-project/clang/lib/Analysis/CloneDetection.cpp b/contrib/llvm-project/clang/lib/Analysis/CloneDetection.cpp
index 5fb5840ce293..0a1122bd5a4a 100644
--- a/contrib/llvm-project/clang/lib/Analysis/CloneDetection.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/CloneDetection.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/DataCollection.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/Path.h"
diff --git a/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp b/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
index fb5a139e82ab..2f80285f17b4 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
@@ -43,9 +43,6 @@ AST_MATCHER(CXXTypeidExpr, isPotentiallyEvaluated) {
return Node.isPotentiallyEvaluated();
}
-const ast_matchers::internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
- cxxNoexceptExpr;
-
const ast_matchers::internal::VariadicDynCastAllOfMatcher<Stmt,
GenericSelectionExpr>
genericSelectionExpr;
@@ -76,10 +73,10 @@ const auto isMoveOnly = [] {
};
template <class T> struct NodeID;
-template <> struct NodeID<Expr> { static const std::string value; };
-template <> struct NodeID<Decl> { static const std::string value; };
-const std::string NodeID<Expr>::value = "expr";
-const std::string NodeID<Decl>::value = "decl";
+template <> struct NodeID<Expr> { static constexpr StringRef value = "expr"; };
+template <> struct NodeID<Decl> { static constexpr StringRef value = "decl"; };
+constexpr StringRef NodeID<Expr>::value;
+constexpr StringRef NodeID<Decl>::value;
template <class T, class F = const Stmt *(ExprMutationAnalyzer::*)(const T *)>
const Stmt *tryEachMatch(ArrayRef<ast_matchers::BoundNodes> Matches,
@@ -204,14 +201,15 @@ const Stmt *ExprMutationAnalyzer::findDeclPointeeMutation(
const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
// LHS of any assignment operators.
- const auto AsAssignmentLhs =
- binaryOperator(isAssignmentOperator(),
- hasLHS(maybeEvalCommaExpr(equalsNode(Exp))));
+ const auto AsAssignmentLhs = binaryOperator(
+ isAssignmentOperator(),
+ hasLHS(maybeEvalCommaExpr(ignoringParenImpCasts(equalsNode(Exp)))));
// Operand of increment/decrement operators.
const auto AsIncDecOperand =
unaryOperator(anyOf(hasOperatorName("++"), hasOperatorName("--")),
- hasUnaryOperand(maybeEvalCommaExpr(equalsNode(Exp))));
+ hasUnaryOperand(maybeEvalCommaExpr(
+ ignoringParenImpCasts(equalsNode(Exp)))));
// Invoking non-const member function.
// A member function is assumed to be non-const when it is unresolved.
@@ -283,13 +281,15 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
const auto AsNonConstRefReturn = returnStmt(hasReturnValue(
maybeEvalCommaExpr(equalsNode(Exp))));
- const auto Matches =
- match(findAll(stmt(anyOf(AsAssignmentLhs, AsIncDecOperand, AsNonConstThis,
- AsAmpersandOperand, AsPointerFromArrayDecay,
- AsOperatorArrowThis, AsNonConstRefArg,
- AsLambdaRefCaptureInit, AsNonConstRefReturn))
- .bind("stmt")),
- Stm, Context);
+ const auto Matches = match(
+ traverse(
+ ast_type_traits::TK_AsIs,
+ findAll(stmt(anyOf(AsAssignmentLhs, AsIncDecOperand, AsNonConstThis,
+ AsAmpersandOperand, AsPointerFromArrayDecay,
+ AsOperatorArrowThis, AsNonConstRefArg,
+ AsLambdaRefCaptureInit, AsNonConstRefReturn))
+ .bind("stmt"))),
+ Stm, Context);
return selectFirst<Stmt>("stmt", Matches);
}
@@ -388,12 +388,15 @@ const Stmt *ExprMutationAnalyzer::findFunctionArgMutation(const Expr *Exp) {
const auto IsInstantiated = hasDeclaration(isInstantiated());
const auto FuncDecl = hasDeclaration(functionDecl().bind("func"));
const auto Matches = match(
- findAll(expr(anyOf(callExpr(NonConstRefParam, IsInstantiated, FuncDecl,
+ traverse(
+ ast_type_traits::TK_AsIs,
+ findAll(
+ expr(anyOf(callExpr(NonConstRefParam, IsInstantiated, FuncDecl,
unless(callee(namedDecl(hasAnyName(
"::std::move", "::std::forward"))))),
cxxConstructExpr(NonConstRefParam, IsInstantiated,
FuncDecl)))
- .bind(NodeID<Expr>::value)),
+ .bind(NodeID<Expr>::value))),
Stm, Context);
for (const auto &Nodes : Matches) {
const auto *Exp = Nodes.getNodeAs<Expr>(NodeID<Expr>::value);
diff --git a/contrib/llvm-project/clang/lib/Analysis/LiveVariables.cpp b/contrib/llvm-project/clang/lib/Analysis/LiveVariables.cpp
index 2cd607d8a493..d24c40b457b4 100644
--- a/contrib/llvm-project/clang/lib/Analysis/LiveVariables.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/LiveVariables.cpp
@@ -13,12 +13,10 @@
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtVisitor.h"
-#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/DataflowWorklist.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/PostOrderIterator.h"
-#include "llvm/ADT/PriorityQueue.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <vector>
@@ -26,51 +24,6 @@
using namespace clang;
namespace {
-
-class DataflowWorklist {
- llvm::BitVector enqueuedBlocks;
- PostOrderCFGView *POV;
- llvm::PriorityQueue<const CFGBlock *, SmallVector<const CFGBlock *, 20>,
- PostOrderCFGView::BlockOrderCompare> worklist;
-
-public:
- DataflowWorklist(const CFG &cfg, AnalysisDeclContext &Ctx)
- : enqueuedBlocks(cfg.getNumBlockIDs()),
- POV(Ctx.getAnalysis<PostOrderCFGView>()),
- worklist(POV->getComparator()) {}
-
- void enqueueBlock(const CFGBlock *block);
- void enqueuePredecessors(const CFGBlock *block);
-
- const CFGBlock *dequeue();
-};
-
-}
-
-void DataflowWorklist::enqueueBlock(const clang::CFGBlock *block) {
- if (block && !enqueuedBlocks[block->getBlockID()]) {
- enqueuedBlocks[block->getBlockID()] = true;
- worklist.push(block);
- }
-}
-
-void DataflowWorklist::enqueuePredecessors(const clang::CFGBlock *block) {
- for (CFGBlock::const_pred_iterator I = block->pred_begin(),
- E = block->pred_end(); I != E; ++I) {
- enqueueBlock(*I);
- }
-}
-
-const CFGBlock *DataflowWorklist::dequeue() {
- if (worklist.empty())
- return nullptr;
- const CFGBlock *b = worklist.top();
- worklist.pop();
- enqueuedBlocks[b->getBlockID()] = false;
- return b;
-}
-
-namespace {
class LiveVariablesImpl {
public:
AnalysisDeclContext &analysisContext;
@@ -136,7 +89,7 @@ namespace {
}
return A;
}
-}
+} // namespace
void LiveVariables::Observer::anchor() { }
@@ -218,7 +171,7 @@ public:
void VisitUnaryOperator(UnaryOperator *UO);
void Visit(Stmt *S);
};
-}
+} // namespace
static const VariableArrayType *FindVA(QualType Ty) {
const Type *ty = Ty.getTypePtr();
@@ -537,9 +490,8 @@ LiveVariables::~LiveVariables() {
delete (LiveVariablesImpl*) impl;
}
-LiveVariables *
-LiveVariables::computeLiveness(AnalysisDeclContext &AC,
- bool killAtAssign) {
+std::unique_ptr<LiveVariables>
+LiveVariables::computeLiveness(AnalysisDeclContext &AC, bool killAtAssign) {
// No CFG? Bail out.
CFG *cfg = AC.getCFG();
@@ -555,7 +507,7 @@ LiveVariables::computeLiveness(AnalysisDeclContext &AC,
// Construct the dataflow worklist. Enqueue the exit block as the
// start of the analysis.
- DataflowWorklist worklist(*cfg, AC);
+ BackwardDataflowWorklist worklist(*cfg, AC);
llvm::BitVector everAnalyzedBlock(cfg->getNumBlockIDs());
// FIXME: we should enqueue using post order.
@@ -612,7 +564,7 @@ LiveVariables::computeLiveness(AnalysisDeclContext &AC,
worklist.enqueuePredecessors(block);
}
- return new LiveVariables(LV);
+ return std::unique_ptr<LiveVariables>(new LiveVariables(LV));
}
void LiveVariables::dumpBlockLiveness(const SourceManager &M) {
diff --git a/contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp b/contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp
index 53235ba07699..c88e6c1e1535 100644
--- a/contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/ParentMap.h"
+#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/Type.h"
#include "clang/Analysis/AnalysisDeclContext.h"
@@ -909,7 +910,7 @@ static void describeClass(raw_ostream &Out, const CXXRecordDecl *D,
Out << Prefix << '\'' << *D;
if (const auto T = dyn_cast<ClassTemplateSpecializationDecl>(D))
describeTemplateParameters(Out, T->getTemplateArgs().asArray(),
- D->getASTContext().getLangOpts(), "<", ">");
+ D->getLangOpts(), "<", ">");
Out << '\'';
}
@@ -975,8 +976,8 @@ static bool describeCodeDecl(raw_ostream &Out, const Decl *D,
if (const auto FD = dyn_cast<FunctionDecl>(D))
if (const TemplateArgumentList *TAList =
FD->getTemplateSpecializationArgs())
- describeTemplateParameters(Out, TAList->asArray(),
- FD->getASTContext().getLangOpts(), "<", ">");
+ describeTemplateParameters(Out, TAList->asArray(), FD->getLangOpts(), "<",
+ ">");
Out << '\'';
return true;
diff --git a/contrib/llvm-project/clang/lib/Analysis/PostOrderCFGView.cpp b/contrib/llvm-project/clang/lib/Analysis/PostOrderCFGView.cpp
index f79d0007cb3d..0c09c0f97ff6 100644
--- a/contrib/llvm-project/clang/lib/Analysis/PostOrderCFGView.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/PostOrderCFGView.cpp
@@ -29,11 +29,12 @@ PostOrderCFGView::PostOrderCFGView(const CFG *cfg) {
}
}
-PostOrderCFGView *PostOrderCFGView::create(AnalysisDeclContext &ctx) {
+std::unique_ptr<PostOrderCFGView>
+PostOrderCFGView::create(AnalysisDeclContext &ctx) {
const CFG *cfg = ctx.getCFG();
if (!cfg)
return nullptr;
- return new PostOrderCFGView(cfg);
+ return std::make_unique<PostOrderCFGView>(cfg);
}
const void *PostOrderCFGView::getTag() { static int x; return &x; }
diff --git a/contrib/llvm-project/clang/lib/Analysis/ProgramPoint.cpp b/contrib/llvm-project/clang/lib/Analysis/ProgramPoint.cpp
index 0783fbed5315..2a91749affd2 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ProgramPoint.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ProgramPoint.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/ProgramPoint.h"
+#include "clang/AST/ASTContext.h"
#include "clang/Basic/JsonSupport.h"
using namespace clang;
diff --git a/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp b/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp
index 369879ad65f5..221d137dadb8 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp
@@ -138,10 +138,10 @@ static bool isDeadReturn(const CFGBlock *B, const Stmt *S) {
static SourceLocation getTopMostMacro(SourceLocation Loc, SourceManager &SM) {
assert(Loc.isMacroID());
SourceLocation Last;
- while (Loc.isMacroID()) {
+ do {
Last = Loc;
Loc = SM.getImmediateMacroCallerLoc(Loc);
- }
+ } while (Loc.isMacroID());
return Last;
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp b/contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp
index 6f46917b2dfc..9f45a8efe546 100644
--- a/contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp
@@ -140,12 +140,15 @@ RetainSummaryManager::getPersistentSummary(const RetainSummary &OldSumm) {
static bool isSubclass(const Decl *D,
StringRef ClassName) {
using namespace ast_matchers;
- DeclarationMatcher SubclassM = cxxRecordDecl(isSameOrDerivedFrom(ClassName));
+ DeclarationMatcher SubclassM =
+ cxxRecordDecl(isSameOrDerivedFrom(std::string(ClassName)));
return !(match(SubclassM, *D, D->getASTContext()).empty());
}
static bool isOSObjectSubclass(const Decl *D) {
- return D && isSubclass(D, "OSMetaClassBase");
+ // OSSymbols are particular OSObjects that are allocated globally
+ // and therefore aren't really refcounted, so we ignore them.
+ return D && isSubclass(D, "OSMetaClassBase") && !isSubclass(D, "OSSymbol");
}
static bool isOSObjectDynamicCast(StringRef S) {
@@ -662,6 +665,7 @@ RetainSummaryManager::getSummary(AnyCall C,
switch (C.getKind()) {
case AnyCall::Function:
case AnyCall::Constructor:
+ case AnyCall::InheritedConstructor:
case AnyCall::Allocator:
case AnyCall::Deallocator:
Summ = getFunctionSummary(cast_or_null<FunctionDecl>(C.getDecl()));
diff --git a/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp b/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
index 48f4106b6bae..1208eaf93e25 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
@@ -905,11 +905,7 @@ public:
ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc)
: FactEntry(CE, LK_Exclusive, Loc, false) {}
- void addExclusiveLock(const CapabilityExpr &M) {
- UnderlyingMutexes.emplace_back(M.sexpr(), UCK_Acquired);
- }
-
- void addSharedLock(const CapabilityExpr &M) {
+ void addLock(const CapabilityExpr &M) {
UnderlyingMutexes.emplace_back(M.sexpr(), UCK_Acquired);
}
@@ -999,7 +995,10 @@ private:
FSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
!Cp, LK_Exclusive, loc));
} else if (Handler) {
- Handler->handleUnmatchedUnlock(DiagKind, Cp.toString(), loc);
+ SourceLocation PrevLoc;
+ if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
+ PrevLoc = Neg->loc();
+ Handler->handleUnmatchedUnlock(DiagKind, Cp.toString(), loc, PrevLoc);
}
}
};
@@ -1249,8 +1248,7 @@ static StringRef ClassifyDiagnostic(const ValueDecl *VD) {
}
template <typename AttrTy>
-static typename std::enable_if<!has_arg_iterator_range<AttrTy>::value,
- StringRef>::type
+static std::enable_if_t<!has_arg_iterator_range<AttrTy>::value, StringRef>
ClassifyDiagnostic(const AttrTy *A) {
if (const ValueDecl *VD = getValueDecl(A->getArg()))
return ClassifyDiagnostic(VD);
@@ -1258,8 +1256,7 @@ ClassifyDiagnostic(const AttrTy *A) {
}
template <typename AttrTy>
-static typename std::enable_if<has_arg_iterator_range<AttrTy>::value,
- StringRef>::type
+static std::enable_if_t<has_arg_iterator_range<AttrTy>::value, StringRef>
ClassifyDiagnostic(const AttrTy *A) {
for (const auto *Arg : A->args()) {
if (const ValueDecl *VD = getValueDecl(Arg))
@@ -1328,7 +1325,10 @@ void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
const FactEntry *LDat = FSet.findLock(FactMan, Cp);
if (!LDat) {
- Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc);
+ SourceLocation PrevLoc;
+ if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
+ PrevLoc = Neg->loc();
+ Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc, PrevLoc);
return;
}
@@ -1803,7 +1803,7 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
SourceLocation Loc = Exp->getExprLoc();
CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
- CapExprSet ScopedExclusiveReqs, ScopedSharedReqs;
+ CapExprSet ScopedReqsAndExcludes;
StringRef CapDiagKind = "mutex";
// Figure out if we're constructing an object of scoped lockable class
@@ -1894,19 +1894,20 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
POK_FunctionCall, ClassifyDiagnostic(A),
Exp->getExprLoc());
// use for adopting a lock
- if (isScopedVar) {
- Analyzer->getMutexIDs(A->isShared() ? ScopedSharedReqs
- : ScopedExclusiveReqs,
- A, Exp, D, VD);
- }
+ if (isScopedVar)
+ Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, VD);
}
break;
}
case attr::LocksExcluded: {
const auto *A = cast<LocksExcludedAttr>(At);
- for (auto *Arg : A->args())
+ for (auto *Arg : A->args()) {
warnIfMutexHeld(D, Exp, Arg, ClassifyDiagnostic(A));
+ // use for deferring a lock
+ if (isScopedVar)
+ Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, VD);
+ }
break;
}
@@ -1946,13 +1947,11 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>(Scp, MLoc);
for (const auto &M : ExclusiveLocksToAdd)
- ScopedEntry->addExclusiveLock(M);
- for (const auto &M : ScopedExclusiveReqs)
- ScopedEntry->addExclusiveLock(M);
+ ScopedEntry->addLock(M);
for (const auto &M : SharedLocksToAdd)
- ScopedEntry->addSharedLock(M);
- for (const auto &M : ScopedSharedReqs)
- ScopedEntry->addSharedLock(M);
+ ScopedEntry->addLock(M);
+ for (const auto &M : ScopedReqsAndExcludes)
+ ScopedEntry->addLock(M);
for (const auto &M : ExclusiveLocksToRemove)
ScopedEntry->addExclusiveUnlock(M);
for (const auto &M : SharedLocksToRemove)
@@ -2141,12 +2140,14 @@ void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
// handle constructors that involve temporaries
if (auto *EWC = dyn_cast<ExprWithCleanups>(E))
- E = EWC->getSubExpr();
- if (auto *ICE = dyn_cast<ImplicitCastExpr>(E))
- if (ICE->getCastKind() == CK_NoOp)
- E = ICE->getSubExpr();
+ E = EWC->getSubExpr()->IgnoreParens();
+ if (auto *CE = dyn_cast<CastExpr>(E))
+ if (CE->getCastKind() == CK_NoOp ||
+ CE->getCastKind() == CK_ConstructorConversion ||
+ CE->getCastKind() == CK_UserDefinedConversion)
+ E = CE->getSubExpr()->IgnoreParens();
if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E))
- E = BTE->getSubExpr();
+ E = BTE->getSubExpr()->IgnoreParens();
if (const auto *CE = dyn_cast<CXXConstructExpr>(E)) {
const auto *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
diff --git a/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp b/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
index 8a233d4a44f1..67cd39728c35 100644
--- a/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
@@ -24,6 +24,7 @@
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/DomainSpecific/ObjCNoReturn.h"
+#include "clang/Analysis/FlowSensitive/DataflowWorklist.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
@@ -213,68 +214,6 @@ ValueVector::reference CFGBlockValues::operator[](const VarDecl *vd) {
}
//------------------------------------------------------------------------====//
-// Worklist: worklist for dataflow analysis.
-//====------------------------------------------------------------------------//
-
-namespace {
-
-class DataflowWorklist {
- PostOrderCFGView::iterator PO_I, PO_E;
- SmallVector<const CFGBlock *, 20> worklist;
- llvm::BitVector enqueuedBlocks;
-
-public:
- DataflowWorklist(const CFG &cfg, PostOrderCFGView &view)
- : PO_I(view.begin()), PO_E(view.end()),
- enqueuedBlocks(cfg.getNumBlockIDs(), true) {
- // Treat the first block as already analyzed.
- if (PO_I != PO_E) {
- assert(*PO_I == &cfg.getEntry());
- enqueuedBlocks[(*PO_I)->getBlockID()] = false;
- ++PO_I;
- }
- }
-
- void enqueueSuccessors(const CFGBlock *block);
- const CFGBlock *dequeue();
-};
-
-} // namespace
-
-void DataflowWorklist::enqueueSuccessors(const CFGBlock *block) {
- for (CFGBlock::const_succ_iterator I = block->succ_begin(),
- E = block->succ_end(); I != E; ++I) {
- const CFGBlock *Successor = *I;
- if (!Successor || enqueuedBlocks[Successor->getBlockID()])
- continue;
- worklist.push_back(Successor);
- enqueuedBlocks[Successor->getBlockID()] = true;
- }
-}
-
-const CFGBlock *DataflowWorklist::dequeue() {
- const CFGBlock *B = nullptr;
-
- // First dequeue from the worklist. This can represent
- // updates along backedges that we want propagated as quickly as possible.
- if (!worklist.empty())
- B = worklist.pop_back_val();
-
- // Next dequeue from the initial reverse post order. This is the
- // theoretical ideal in the presence of no back edges.
- else if (PO_I != PO_E) {
- B = *PO_I;
- ++PO_I;
- }
- else
- return nullptr;
-
- assert(enqueuedBlocks[B->getBlockID()] == true);
- enqueuedBlocks[B->getBlockID()] = false;
- return B;
-}
-
-//------------------------------------------------------------------------====//
// Classification of DeclRefExprs as use or initialization.
//====------------------------------------------------------------------------//
@@ -329,6 +268,7 @@ public:
Init,
Use,
SelfInit,
+ ConstRefUse,
Ignore
};
@@ -465,6 +405,15 @@ static bool isPointerToConst(const QualType &QT) {
return QT->isAnyPointerType() && QT->getPointeeType().isConstQualified();
}
+static bool hasTrivialBody(CallExpr *CE) {
+ if (FunctionDecl *FD = CE->getDirectCallee()) {
+ if (FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
+ return FTD->getTemplatedDecl()->hasTrivialBody();
+ return FD->hasTrivialBody();
+ }
+ return false;
+}
+
void ClassifyRefs::VisitCallExpr(CallExpr *CE) {
// Classify arguments to std::move as used.
if (CE->isCallToStdMove()) {
@@ -473,15 +422,17 @@ void ClassifyRefs::VisitCallExpr(CallExpr *CE) {
classify(CE->getArg(0), Use);
return;
}
-
- // If a value is passed by const pointer or by const reference to a function,
+ bool isTrivialBody = hasTrivialBody(CE);
+ // If a value is passed by const pointer to a function,
// we should not assume that it is initialized by the call, and we
// conservatively do not assume that it is used.
+ // If a value is passed by const reference to a function,
+ // it should already be initialized.
for (CallExpr::arg_iterator I = CE->arg_begin(), E = CE->arg_end();
I != E; ++I) {
if ((*I)->isGLValue()) {
if ((*I)->getType().isConstQualified())
- classify((*I), Ignore);
+ classify((*I), isTrivialBody ? Ignore : ConstRefUse);
} else if (isPointerToConst((*I)->getType())) {
const Expr *Ex = stripCasts(DC->getParentASTContext(), *I);
const auto *UO = dyn_cast<UnaryOperator>(Ex);
@@ -530,12 +481,14 @@ public:
handler(handler) {}
void reportUse(const Expr *ex, const VarDecl *vd);
+ void reportConstRefUse(const Expr *ex, const VarDecl *vd);
void VisitBinaryOperator(BinaryOperator *bo);
void VisitBlockExpr(BlockExpr *be);
void VisitCallExpr(CallExpr *ce);
void VisitDeclRefExpr(DeclRefExpr *dr);
void VisitDeclStmt(DeclStmt *ds);
+ void VisitGCCAsmStmt(GCCAsmStmt *as);
void VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS);
void VisitObjCMessageExpr(ObjCMessageExpr *ME);
void VisitOMPExecutableDirective(OMPExecutableDirective *ED);
@@ -636,6 +589,28 @@ public:
continue;
}
+ if (AtPredExit == MayUninitialized) {
+ // If the predecessor's terminator is an "asm goto" that initializes
+ // the variable, then it won't be counted as "initialized" on the
+ // non-fallthrough paths.
+ CFGTerminator term = Pred->getTerminator();
+ if (const auto *as = dyn_cast_or_null<GCCAsmStmt>(term.getStmt())) {
+ const CFGBlock *fallthrough = *Pred->succ_begin();
+ if (as->isAsmGoto() &&
+ llvm::any_of(as->outputs(), [&](const Expr *output) {
+ return vd == findVar(output).getDecl() &&
+ llvm::any_of(as->labels(),
+ [&](const AddrLabelExpr *label) {
+ return label->getLabel()->getStmt() == B->Label &&
+ B != fallthrough;
+ });
+ })) {
+ Use.setUninitAfterDecl();
+ continue;
+ }
+ }
+ }
+
unsigned &SV = SuccsVisited[Pred->getBlockID()];
if (!SV) {
// When visiting the first successor of a block, mark all NULL
@@ -705,6 +680,12 @@ void TransferFunctions::reportUse(const Expr *ex, const VarDecl *vd) {
handler.handleUseOfUninitVariable(vd, getUninitUse(ex, vd, v));
}
+void TransferFunctions::reportConstRefUse(const Expr *ex, const VarDecl *vd) {
+ Value v = vals[vd];
+ if (isAlwaysUninit(v))
+ handler.handleConstRefUseOfUninitVariable(vd, getUninitUse(ex, vd, v));
+}
+
void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS) {
// This represents an initialization of the 'element' value.
if (const auto *DS = dyn_cast<DeclStmt>(FS->getElement())) {
@@ -772,7 +753,10 @@ void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *dr) {
vals[cast<VarDecl>(dr->getDecl())] = Initialized;
break;
case ClassifyRefs::SelfInit:
- handler.handleSelfInit(cast<VarDecl>(dr->getDecl()));
+ handler.handleSelfInit(cast<VarDecl>(dr->getDecl()));
+ break;
+ case ClassifyRefs::ConstRefUse:
+ reportConstRefUse(dr, cast<VarDecl>(dr->getDecl()));
break;
}
}
@@ -821,6 +805,20 @@ void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
}
}
+void TransferFunctions::VisitGCCAsmStmt(GCCAsmStmt *as) {
+ // An "asm goto" statement is a terminator that may initialize some variables.
+ if (!as->isAsmGoto())
+ return;
+
+ for (const Expr *o : as->outputs())
+ if (const VarDecl *VD = findVar(o).getDecl())
+ if (vals[VD] != Initialized)
+ // If the variable isn't initialized by the time we get here, then we
+ // mark it as potentially uninitialized for those cases where it's used
+ // on an indirect path, where it's not guaranteed to be defined.
+ vals[VD] = MayUninitialized;
+}
+
void TransferFunctions::VisitObjCMessageExpr(ObjCMessageExpr *ME) {
// If the Objective-C message expression is an implicit no-return that
// is not modeled in the CFG, set the tracked dataflow values to Unknown.
@@ -858,6 +856,10 @@ static bool runOnBlock(const CFGBlock *block, const CFG &cfg,
if (Optional<CFGStmt> cs = I.getAs<CFGStmt>())
tf.Visit(const_cast<Stmt *>(cs->getStmt()));
}
+ CFGTerminator terminator = block->getTerminator();
+ if (auto *as = dyn_cast_or_null<GCCAsmStmt>(terminator.getStmt()))
+ if (as->isAsmGoto())
+ tf.Visit(as);
return vals.updateValueVectorWithScratch(block);
}
@@ -887,6 +889,12 @@ struct PruneBlocksHandler : public UninitVariablesHandler {
hadAnyUse = true;
}
+ void handleConstRefUseOfUninitVariable(const VarDecl *vd,
+ const UninitUse &use) override {
+ hadUse[currentBlock] = true;
+ hadAnyUse = true;
+ }
+
/// Called when the uninitialized variable analysis detects the
/// idiom 'int x = x'. All other uses of 'x' within the initializer
/// are handled by handleUseOfUninitVariable.
@@ -924,7 +932,7 @@ void clang::runUninitializedVariablesAnalysis(
}
// Proceed with the workist.
- DataflowWorklist worklist(cfg, *ac.getAnalysis<PostOrderCFGView>());
+ ForwardDataflowWorklist worklist(cfg, ac);
llvm::BitVector previouslyVisited(cfg.getNumBlockIDs());
worklist.enqueueSuccessors(&cfg.getEntry());
llvm::BitVector wasAnalyzed(cfg.getNumBlockIDs(), false);
diff --git a/contrib/llvm-project/clang/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp b/contrib/llvm-project/clang/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp
index 77de3630ae7e..32fba9c93752 100644
--- a/contrib/llvm-project/clang/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp
@@ -21,7 +21,7 @@ void registerMyChecker(CheckerManager &Mgr) {
<< '\n';
}
-bool shouldRegisterMyChecker(const LangOptions &LO) { return true; }
+bool shouldRegisterMyChecker(const CheckerManager &mgr) { return true; }
} // end anonymous namespace
diff --git a/contrib/llvm-project/clang/lib/Basic/Attributes.cpp b/contrib/llvm-project/clang/lib/Basic/Attributes.cpp
index 74cc3d1d03da..ff6dbf870fcf 100644
--- a/contrib/llvm-project/clang/lib/Basic/Attributes.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Attributes.cpp
@@ -36,10 +36,14 @@ const char *attr::getSubjectMatchRuleSpelling(attr::SubjectMatchRule Rule) {
}
static StringRef
-normalizeAttrScopeName(StringRef ScopeName,
+normalizeAttrScopeName(const IdentifierInfo *Scope,
AttributeCommonInfo::Syntax SyntaxUsed) {
+ if (!Scope)
+ return "";
+
// Normalize the "__gnu__" scope name to be "gnu" and the "_Clang" scope name
// to be "clang".
+ StringRef ScopeName = Scope->getName();
if (SyntaxUsed == AttributeCommonInfo::AS_CXX11 ||
SyntaxUsed == AttributeCommonInfo::AS_C2x) {
if (ScopeName == "__gnu__")
@@ -50,7 +54,7 @@ normalizeAttrScopeName(StringRef ScopeName,
return ScopeName;
}
-static StringRef normalizeAttrName(StringRef AttrName,
+static StringRef normalizeAttrName(const IdentifierInfo *Name,
StringRef NormalizedScopeName,
AttributeCommonInfo::Syntax SyntaxUsed) {
// Normalize the attribute name, __foo__ becomes foo. This is only allowable
@@ -61,6 +65,7 @@ static StringRef normalizeAttrName(StringRef AttrName,
SyntaxUsed == AttributeCommonInfo::AS_C2x) &&
(NormalizedScopeName.empty() || NormalizedScopeName == "gnu" ||
NormalizedScopeName == "clang"));
+ StringRef AttrName = Name->getName();
if (ShouldNormalize && AttrName.size() >= 4 && AttrName.startswith("__") &&
AttrName.endswith("__"))
AttrName = AttrName.slice(2, AttrName.size() - 2);
@@ -74,35 +79,41 @@ bool AttributeCommonInfo::isGNUScope() const {
#include "clang/Sema/AttrParsedAttrKinds.inc"
+static SmallString<64> normalizeName(const IdentifierInfo *Name,
+ const IdentifierInfo *Scope,
+ AttributeCommonInfo::Syntax SyntaxUsed) {
+ StringRef ScopeName = normalizeAttrScopeName(Scope, SyntaxUsed);
+ StringRef AttrName = normalizeAttrName(Name, ScopeName, SyntaxUsed);
+
+ SmallString<64> FullName = ScopeName;
+ if (!ScopeName.empty()) {
+ assert(SyntaxUsed == AttributeCommonInfo::AS_CXX11 ||
+ SyntaxUsed == AttributeCommonInfo::AS_C2x);
+ FullName += "::";
+ }
+ FullName += AttrName;
+
+ return FullName;
+}
+
AttributeCommonInfo::Kind
AttributeCommonInfo::getParsedKind(const IdentifierInfo *Name,
const IdentifierInfo *ScopeName,
Syntax SyntaxUsed) {
- StringRef AttrName = Name->getName();
-
- SmallString<64> FullName;
- if (ScopeName)
- FullName += normalizeAttrScopeName(ScopeName->getName(), SyntaxUsed);
-
- AttrName = normalizeAttrName(AttrName, FullName, SyntaxUsed);
-
- // Ensure that in the case of C++11 attributes, we look for '::foo' if it is
- // unscoped.
- if (ScopeName || SyntaxUsed == AS_CXX11 || SyntaxUsed == AS_C2x)
- FullName += "::";
- FullName += AttrName;
+ return ::getAttrKind(normalizeName(Name, ScopeName, SyntaxUsed), SyntaxUsed);
+}
- return ::getAttrKind(FullName, SyntaxUsed);
+std::string AttributeCommonInfo::getNormalizedFullName() const {
+ return static_cast<std::string>(
+ normalizeName(getAttrName(), getScopeName(), getSyntax()));
}
unsigned AttributeCommonInfo::calculateAttributeSpellingListIndex() const {
// Both variables will be used in tablegen generated
// attribute spell list index matching code.
auto Syntax = static_cast<AttributeCommonInfo::Syntax>(getSyntax());
- StringRef Scope =
- getScopeName() ? normalizeAttrScopeName(getScopeName()->getName(), Syntax)
- : "";
- StringRef Name = normalizeAttrName(getAttrName()->getName(), Scope, Syntax);
+ StringRef Scope = normalizeAttrScopeName(getScopeName(), Syntax);
+ StringRef Name = normalizeAttrName(getAttrName(), Scope, Syntax);
#include "clang/Sema/AttrSpellingListIndex.inc"
}
diff --git a/contrib/llvm-project/clang/lib/Basic/CodeGenOptions.cpp b/contrib/llvm-project/clang/lib/Basic/CodeGenOptions.cpp
index fa186380f109..4fc7a535c9eb 100644
--- a/contrib/llvm-project/clang/lib/Basic/CodeGenOptions.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/CodeGenOptions.cpp
@@ -17,7 +17,7 @@ CodeGenOptions::CodeGenOptions() {
#include "clang/Basic/CodeGenOptions.def"
RelocationModel = llvm::Reloc::PIC_;
- memcpy(CoverageVersion, "402*", 4);
+ memcpy(CoverageVersion, "408*", 4);
}
bool CodeGenOptions::isNoBuiltinFunc(const char *Name) const {
diff --git a/contrib/llvm-project/clang/lib/Basic/Cuda.cpp b/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
index e06d120c58bf..709185707bd9 100644
--- a/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
@@ -28,6 +28,10 @@ const char *CudaVersionToString(CudaVersion V) {
return "10.0";
case CudaVersion::CUDA_101:
return "10.1";
+ case CudaVersion::CUDA_102:
+ return "10.2";
+ case CudaVersion::CUDA_110:
+ return "11.0";
}
llvm_unreachable("invalid enum");
}
@@ -42,253 +46,87 @@ CudaVersion CudaStringToVersion(const llvm::Twine &S) {
.Case("9.2", CudaVersion::CUDA_92)
.Case("10.0", CudaVersion::CUDA_100)
.Case("10.1", CudaVersion::CUDA_101)
+ .Case("10.2", CudaVersion::CUDA_102)
+ .Case("11.0", CudaVersion::CUDA_110)
.Default(CudaVersion::UNKNOWN);
}
-const char *CudaArchToString(CudaArch A) {
- switch (A) {
- case CudaArch::LAST:
- break;
- case CudaArch::UNKNOWN:
- return "unknown";
- case CudaArch::SM_20:
- return "sm_20";
- case CudaArch::SM_21:
- return "sm_21";
- case CudaArch::SM_30:
- return "sm_30";
- case CudaArch::SM_32:
- return "sm_32";
- case CudaArch::SM_35:
- return "sm_35";
- case CudaArch::SM_37:
- return "sm_37";
- case CudaArch::SM_50:
- return "sm_50";
- case CudaArch::SM_52:
- return "sm_52";
- case CudaArch::SM_53:
- return "sm_53";
- case CudaArch::SM_60:
- return "sm_60";
- case CudaArch::SM_61:
- return "sm_61";
- case CudaArch::SM_62:
- return "sm_62";
- case CudaArch::SM_70:
- return "sm_70";
- case CudaArch::SM_72:
- return "sm_72";
- case CudaArch::SM_75:
- return "sm_75";
- case CudaArch::GFX600: // tahiti
- return "gfx600";
- case CudaArch::GFX601: // pitcairn, verde, oland,hainan
- return "gfx601";
- case CudaArch::GFX700: // kaveri
- return "gfx700";
- case CudaArch::GFX701: // hawaii
- return "gfx701";
- case CudaArch::GFX702: // 290,290x,R390,R390x
- return "gfx702";
- case CudaArch::GFX703: // kabini mullins
- return "gfx703";
- case CudaArch::GFX704: // bonaire
- return "gfx704";
- case CudaArch::GFX801: // carrizo
- return "gfx801";
- case CudaArch::GFX802: // tonga,iceland
- return "gfx802";
- case CudaArch::GFX803: // fiji,polaris10
- return "gfx803";
- case CudaArch::GFX810: // stoney
- return "gfx810";
- case CudaArch::GFX900: // vega, instinct
- return "gfx900";
- case CudaArch::GFX902: // TBA
- return "gfx902";
- case CudaArch::GFX904: // TBA
- return "gfx904";
- case CudaArch::GFX906: // TBA
- return "gfx906";
- case CudaArch::GFX908: // TBA
- return "gfx908";
- case CudaArch::GFX909: // TBA
- return "gfx909";
- case CudaArch::GFX1010: // TBA
- return "gfx1010";
- case CudaArch::GFX1011: // TBA
- return "gfx1011";
- case CudaArch::GFX1012: // TBA
- return "gfx1012";
- }
- llvm_unreachable("invalid enum");
-}
+struct CudaArchToStringMap {
+ CudaArch arch;
+ const char *arch_name;
+ const char *virtual_arch_name;
+};
-CudaArch StringToCudaArch(llvm::StringRef S) {
- return llvm::StringSwitch<CudaArch>(S)
- .Case("sm_20", CudaArch::SM_20)
- .Case("sm_21", CudaArch::SM_21)
- .Case("sm_30", CudaArch::SM_30)
- .Case("sm_32", CudaArch::SM_32)
- .Case("sm_35", CudaArch::SM_35)
- .Case("sm_37", CudaArch::SM_37)
- .Case("sm_50", CudaArch::SM_50)
- .Case("sm_52", CudaArch::SM_52)
- .Case("sm_53", CudaArch::SM_53)
- .Case("sm_60", CudaArch::SM_60)
- .Case("sm_61", CudaArch::SM_61)
- .Case("sm_62", CudaArch::SM_62)
- .Case("sm_70", CudaArch::SM_70)
- .Case("sm_72", CudaArch::SM_72)
- .Case("sm_75", CudaArch::SM_75)
- .Case("gfx600", CudaArch::GFX600)
- .Case("gfx601", CudaArch::GFX601)
- .Case("gfx700", CudaArch::GFX700)
- .Case("gfx701", CudaArch::GFX701)
- .Case("gfx702", CudaArch::GFX702)
- .Case("gfx703", CudaArch::GFX703)
- .Case("gfx704", CudaArch::GFX704)
- .Case("gfx801", CudaArch::GFX801)
- .Case("gfx802", CudaArch::GFX802)
- .Case("gfx803", CudaArch::GFX803)
- .Case("gfx810", CudaArch::GFX810)
- .Case("gfx900", CudaArch::GFX900)
- .Case("gfx902", CudaArch::GFX902)
- .Case("gfx904", CudaArch::GFX904)
- .Case("gfx906", CudaArch::GFX906)
- .Case("gfx908", CudaArch::GFX908)
- .Case("gfx909", CudaArch::GFX909)
- .Case("gfx1010", CudaArch::GFX1010)
- .Case("gfx1011", CudaArch::GFX1011)
- .Case("gfx1012", CudaArch::GFX1012)
- .Default(CudaArch::UNKNOWN);
-}
+#define SM2(sm, ca) \
+ { CudaArch::SM_##sm, "sm_" #sm, ca }
+#define SM(sm) SM2(sm, "compute_" #sm)
+#define GFX(gpu) \
+ { CudaArch::GFX##gpu, "gfx" #gpu, "compute_amdgcn" }
+CudaArchToStringMap arch_names[] = {
+ // clang-format off
+ SM2(20, "compute_20"), SM2(21, "compute_20"), // Fermi
+ SM(30), SM(32), SM(35), SM(37), // Kepler
+ SM(50), SM(52), SM(53), // Maxwell
+ SM(60), SM(61), SM(62), // Pascal
+ SM(70), SM(72), // Volta
+ SM(75), // Turing
+ SM(80), // Ampere
+ GFX(600), // tahiti
+ GFX(601), // pitcairn, verde, oland,hainan
+ GFX(700), // kaveri
+ GFX(701), // hawaii
+ GFX(702), // 290,290x,R390,R390x
+ GFX(703), // kabini mullins
+ GFX(704), // bonaire
+ GFX(801), // carrizo
+ GFX(802), // tonga,iceland
+ GFX(803), // fiji,polaris10
+ GFX(810), // stoney
+ GFX(900), // vega, instinct
+ GFX(902), GFX(904), GFX(906), GFX(908), GFX(909),
+ GFX(1010), GFX(1011), GFX(1012),
+ // clang-format on
+};
+#undef SM
+#undef SM2
+#undef GFX
-const char *CudaVirtualArchToString(CudaVirtualArch A) {
- switch (A) {
- case CudaVirtualArch::UNKNOWN:
+const char *CudaArchToString(CudaArch A) {
+ auto result = std::find_if(
+ std::begin(arch_names), std::end(arch_names),
+ [A](const CudaArchToStringMap &map) { return A == map.arch; });
+ if (result == std::end(arch_names))
return "unknown";
- case CudaVirtualArch::COMPUTE_20:
- return "compute_20";
- case CudaVirtualArch::COMPUTE_30:
- return "compute_30";
- case CudaVirtualArch::COMPUTE_32:
- return "compute_32";
- case CudaVirtualArch::COMPUTE_35:
- return "compute_35";
- case CudaVirtualArch::COMPUTE_37:
- return "compute_37";
- case CudaVirtualArch::COMPUTE_50:
- return "compute_50";
- case CudaVirtualArch::COMPUTE_52:
- return "compute_52";
- case CudaVirtualArch::COMPUTE_53:
- return "compute_53";
- case CudaVirtualArch::COMPUTE_60:
- return "compute_60";
- case CudaVirtualArch::COMPUTE_61:
- return "compute_61";
- case CudaVirtualArch::COMPUTE_62:
- return "compute_62";
- case CudaVirtualArch::COMPUTE_70:
- return "compute_70";
- case CudaVirtualArch::COMPUTE_72:
- return "compute_72";
- case CudaVirtualArch::COMPUTE_75:
- return "compute_75";
- case CudaVirtualArch::COMPUTE_AMDGCN:
- return "compute_amdgcn";
- }
- llvm_unreachable("invalid enum");
+ return result->arch_name;
}
-CudaVirtualArch StringToCudaVirtualArch(llvm::StringRef S) {
- return llvm::StringSwitch<CudaVirtualArch>(S)
- .Case("compute_20", CudaVirtualArch::COMPUTE_20)
- .Case("compute_30", CudaVirtualArch::COMPUTE_30)
- .Case("compute_32", CudaVirtualArch::COMPUTE_32)
- .Case("compute_35", CudaVirtualArch::COMPUTE_35)
- .Case("compute_37", CudaVirtualArch::COMPUTE_37)
- .Case("compute_50", CudaVirtualArch::COMPUTE_50)
- .Case("compute_52", CudaVirtualArch::COMPUTE_52)
- .Case("compute_53", CudaVirtualArch::COMPUTE_53)
- .Case("compute_60", CudaVirtualArch::COMPUTE_60)
- .Case("compute_61", CudaVirtualArch::COMPUTE_61)
- .Case("compute_62", CudaVirtualArch::COMPUTE_62)
- .Case("compute_70", CudaVirtualArch::COMPUTE_70)
- .Case("compute_72", CudaVirtualArch::COMPUTE_72)
- .Case("compute_75", CudaVirtualArch::COMPUTE_75)
- .Case("compute_amdgcn", CudaVirtualArch::COMPUTE_AMDGCN)
- .Default(CudaVirtualArch::UNKNOWN);
+const char *CudaArchToVirtualArchString(CudaArch A) {
+ auto result = std::find_if(
+ std::begin(arch_names), std::end(arch_names),
+ [A](const CudaArchToStringMap &map) { return A == map.arch; });
+ if (result == std::end(arch_names))
+ return "unknown";
+ return result->virtual_arch_name;
}
-CudaVirtualArch VirtualArchForCudaArch(CudaArch A) {
- switch (A) {
- case CudaArch::LAST:
- break;
- case CudaArch::UNKNOWN:
- return CudaVirtualArch::UNKNOWN;
- case CudaArch::SM_20:
- case CudaArch::SM_21:
- return CudaVirtualArch::COMPUTE_20;
- case CudaArch::SM_30:
- return CudaVirtualArch::COMPUTE_30;
- case CudaArch::SM_32:
- return CudaVirtualArch::COMPUTE_32;
- case CudaArch::SM_35:
- return CudaVirtualArch::COMPUTE_35;
- case CudaArch::SM_37:
- return CudaVirtualArch::COMPUTE_37;
- case CudaArch::SM_50:
- return CudaVirtualArch::COMPUTE_50;
- case CudaArch::SM_52:
- return CudaVirtualArch::COMPUTE_52;
- case CudaArch::SM_53:
- return CudaVirtualArch::COMPUTE_53;
- case CudaArch::SM_60:
- return CudaVirtualArch::COMPUTE_60;
- case CudaArch::SM_61:
- return CudaVirtualArch::COMPUTE_61;
- case CudaArch::SM_62:
- return CudaVirtualArch::COMPUTE_62;
- case CudaArch::SM_70:
- return CudaVirtualArch::COMPUTE_70;
- case CudaArch::SM_72:
- return CudaVirtualArch::COMPUTE_72;
- case CudaArch::SM_75:
- return CudaVirtualArch::COMPUTE_75;
- case CudaArch::GFX600:
- case CudaArch::GFX601:
- case CudaArch::GFX700:
- case CudaArch::GFX701:
- case CudaArch::GFX702:
- case CudaArch::GFX703:
- case CudaArch::GFX704:
- case CudaArch::GFX801:
- case CudaArch::GFX802:
- case CudaArch::GFX803:
- case CudaArch::GFX810:
- case CudaArch::GFX900:
- case CudaArch::GFX902:
- case CudaArch::GFX904:
- case CudaArch::GFX906:
- case CudaArch::GFX908:
- case CudaArch::GFX909:
- case CudaArch::GFX1010:
- case CudaArch::GFX1011:
- case CudaArch::GFX1012:
- return CudaVirtualArch::COMPUTE_AMDGCN;
- }
- llvm_unreachable("invalid enum");
+CudaArch StringToCudaArch(llvm::StringRef S) {
+ auto result = std::find_if(
+ std::begin(arch_names), std::end(arch_names),
+ [S](const CudaArchToStringMap &map) { return S == map.arch_name; });
+ if (result == std::end(arch_names))
+ return CudaArch::UNKNOWN;
+ return result->arch;
}
CudaVersion MinVersionForCudaArch(CudaArch A) {
- switch (A) {
- case CudaArch::LAST:
- break;
- case CudaArch::UNKNOWN:
+ if (A == CudaArch::UNKNOWN)
return CudaVersion::UNKNOWN;
+
+ // AMD GPUs do not depend on CUDA versions.
+ if (IsAMDGpuArch(A))
+ return CudaVersion::CUDA_70;
+
+ switch (A) {
case CudaArch::SM_20:
case CudaArch::SM_21:
case CudaArch::SM_30:
@@ -309,60 +147,30 @@ CudaVersion MinVersionForCudaArch(CudaArch A) {
return CudaVersion::CUDA_91;
case CudaArch::SM_75:
return CudaVersion::CUDA_100;
- case CudaArch::GFX600:
- case CudaArch::GFX601:
- case CudaArch::GFX700:
- case CudaArch::GFX701:
- case CudaArch::GFX702:
- case CudaArch::GFX703:
- case CudaArch::GFX704:
- case CudaArch::GFX801:
- case CudaArch::GFX802:
- case CudaArch::GFX803:
- case CudaArch::GFX810:
- case CudaArch::GFX900:
- case CudaArch::GFX902:
- case CudaArch::GFX904:
- case CudaArch::GFX906:
- case CudaArch::GFX908:
- case CudaArch::GFX909:
- case CudaArch::GFX1010:
- case CudaArch::GFX1011:
- case CudaArch::GFX1012:
- return CudaVersion::CUDA_70;
+ case CudaArch::SM_80:
+ return CudaVersion::CUDA_110;
+ default:
+ llvm_unreachable("invalid enum");
}
- llvm_unreachable("invalid enum");
}
CudaVersion MaxVersionForCudaArch(CudaArch A) {
+ // AMD GPUs do not depend on CUDA versions.
+ if (IsAMDGpuArch(A))
+ return CudaVersion::LATEST;
+
switch (A) {
case CudaArch::UNKNOWN:
return CudaVersion::UNKNOWN;
case CudaArch::SM_20:
case CudaArch::SM_21:
- case CudaArch::GFX600:
- case CudaArch::GFX601:
- case CudaArch::GFX700:
- case CudaArch::GFX701:
- case CudaArch::GFX702:
- case CudaArch::GFX703:
- case CudaArch::GFX704:
- case CudaArch::GFX801:
- case CudaArch::GFX802:
- case CudaArch::GFX803:
- case CudaArch::GFX810:
- case CudaArch::GFX900:
- case CudaArch::GFX902:
- case CudaArch::GFX1010:
- case CudaArch::GFX1011:
- case CudaArch::GFX1012:
return CudaVersion::CUDA_80;
default:
return CudaVersion::LATEST;
}
}
-static CudaVersion ToCudaVersion(llvm::VersionTuple Version) {
+CudaVersion ToCudaVersion(llvm::VersionTuple Version) {
int IVer =
Version.getMajor() * 10 + Version.getMinor().getValueOr(0);
switch(IVer) {
@@ -382,6 +190,10 @@ static CudaVersion ToCudaVersion(llvm::VersionTuple Version) {
return CudaVersion::CUDA_100;
case 101:
return CudaVersion::CUDA_101;
+ case 102:
+ return CudaVersion::CUDA_102;
+ case 110:
+ return CudaVersion::CUDA_110;
default:
return CudaVersion::UNKNOWN;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp b/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
index f258b37f2fa6..661eabf9bc7c 100644
--- a/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
@@ -61,6 +61,12 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
return DB;
}
+const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
+ llvm::Error &&E) {
+ DB.AddString(toString(std::move(E)));
+ return DB;
+}
+
static void DummyArgToStringFn(DiagnosticsEngine::ArgumentKind AK, intptr_t QT,
StringRef Modifier, StringRef Argument,
ArrayRef<DiagnosticsEngine::ArgumentValue> PrevArgs,
diff --git a/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp b/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
index e30e3753d193..8c7e63e06301 100644
--- a/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
@@ -85,6 +85,7 @@ VALIDATE_DIAG_SIZE(LEX)
VALIDATE_DIAG_SIZE(PARSE)
VALIDATE_DIAG_SIZE(AST)
VALIDATE_DIAG_SIZE(COMMENT)
+VALIDATE_DIAG_SIZE(CROSSTU)
VALIDATE_DIAG_SIZE(SEMA)
VALIDATE_DIAG_SIZE(ANALYSIS)
VALIDATE_DIAG_SIZE(REFACTORING)
@@ -289,7 +290,7 @@ namespace clang {
unsigned getOrCreateDiagID(DiagnosticIDs::Level L, StringRef Message,
DiagnosticIDs &Diags) {
- DiagDesc D(L, Message);
+ DiagDesc D(L, std::string(Message));
// Check to see if it already exists.
std::map<DiagDesc, unsigned>::iterator I = DiagIDs.lower_bound(D);
if (I != DiagIDs.end() && I->first == D)
diff --git a/contrib/llvm-project/clang/lib/Basic/ExpressionTraits.cpp b/contrib/llvm-project/clang/lib/Basic/ExpressionTraits.cpp
new file mode 100644
index 000000000000..5fde1940038f
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/ExpressionTraits.cpp
@@ -0,0 +1,36 @@
+//===--- ExpressionTraits.cpp - Expression Traits Support -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the expression traits support functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/ExpressionTraits.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+using namespace clang;
+
+static constexpr const char *ExpressionTraitNames[] = {
+#define EXPRESSION_TRAIT(Spelling, Name, Key) #Name,
+#include "clang/Basic/TokenKinds.def"
+};
+
+static constexpr const char *ExpressionTraitSpellings[] = {
+#define EXPRESSION_TRAIT(Spelling, Name, Key) #Spelling,
+#include "clang/Basic/TokenKinds.def"
+};
+
+const char *clang::getTraitName(ExpressionTrait T) {
+ assert(T <= ET_Last && "invalid enum value!");
+ return ExpressionTraitNames[T];
+}
+
+const char *clang::getTraitSpelling(ExpressionTrait T) {
+ assert(T <= ET_Last && "invalid enum value!");
+ return ExpressionTraitSpellings[T];
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/FileManager.cpp b/contrib/llvm-project/clang/lib/Basic/FileManager.cpp
index 079a4bbfc82f..e92e9d5911c0 100644
--- a/contrib/llvm-project/clang/lib/Basic/FileManager.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/FileManager.cpp
@@ -454,11 +454,12 @@ void FileManager::fillRealPathName(FileEntry *UFE, llvm::StringRef FileName) {
// misleading. We need to clean up the interface here.
makeAbsolutePath(AbsPath);
llvm::sys::path::remove_dots(AbsPath, /*remove_dot_dot=*/true);
- UFE->RealPathName = AbsPath.str();
+ UFE->RealPathName = std::string(AbsPath.str());
}
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
-FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile) {
+FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile,
+ bool RequiresNullTerminator) {
uint64_t FileSize = Entry->getSize();
// If there's a high enough chance that the file have changed since we
// got its size, force a stat before opening it.
@@ -468,28 +469,29 @@ FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile) {
StringRef Filename = Entry->getName();
// If the file is already open, use the open file descriptor.
if (Entry->File) {
- auto Result =
- Entry->File->getBuffer(Filename, FileSize,
- /*RequiresNullTerminator=*/true, isVolatile);
+ auto Result = Entry->File->getBuffer(Filename, FileSize,
+ RequiresNullTerminator, isVolatile);
Entry->closeFile();
return Result;
}
// Otherwise, open the file.
- return getBufferForFileImpl(Filename, FileSize, isVolatile);
+ return getBufferForFileImpl(Filename, FileSize, isVolatile,
+ RequiresNullTerminator);
}
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
FileManager::getBufferForFileImpl(StringRef Filename, int64_t FileSize,
- bool isVolatile) {
+ bool isVolatile,
+ bool RequiresNullTerminator) {
if (FileSystemOpts.WorkingDir.empty())
- return FS->getBufferForFile(Filename, FileSize,
- /*RequiresNullTerminator=*/true, isVolatile);
+ return FS->getBufferForFile(Filename, FileSize, RequiresNullTerminator,
+ isVolatile);
SmallString<128> FilePath(Filename);
FixupRelativePath(FilePath);
- return FS->getBufferForFile(FilePath, FileSize,
- /*RequiresNullTerminator=*/true, isVolatile);
+ return FS->getBufferForFile(FilePath, FileSize, RequiresNullTerminator,
+ isVolatile);
}
/// getStatValue - Get the 'stat' information for the specified path,
@@ -513,7 +515,7 @@ FileManager::getStatValue(StringRef Path, llvm::vfs::Status &Status,
StatCache.get(), *FS);
}
-std::error_code
+std::error_code
FileManager::getNoncachedStatValue(StringRef Path,
llvm::vfs::Status &Result) {
SmallString<128> FilePath(Path);
diff --git a/contrib/llvm-project/clang/lib/Basic/FixedPoint.cpp b/contrib/llvm-project/clang/lib/Basic/FixedPoint.cpp
index 05600dfc6d21..ed8b92c98fdb 100644
--- a/contrib/llvm-project/clang/lib/Basic/FixedPoint.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/FixedPoint.cpp
@@ -173,6 +173,142 @@ APFixedPoint APFixedPoint::add(const APFixedPoint &Other,
return APFixedPoint(Result, CommonFXSema);
}
+APFixedPoint APFixedPoint::sub(const APFixedPoint &Other,
+ bool *Overflow) const {
+ auto CommonFXSema = Sema.getCommonSemantics(Other.getSemantics());
+ APFixedPoint ConvertedThis = convert(CommonFXSema);
+ APFixedPoint ConvertedOther = Other.convert(CommonFXSema);
+ llvm::APSInt ThisVal = ConvertedThis.getValue();
+ llvm::APSInt OtherVal = ConvertedOther.getValue();
+ bool Overflowed = false;
+
+ llvm::APSInt Result;
+ if (CommonFXSema.isSaturated()) {
+ Result = CommonFXSema.isSigned() ? ThisVal.ssub_sat(OtherVal)
+ : ThisVal.usub_sat(OtherVal);
+ } else {
+ Result = ThisVal.isSigned() ? ThisVal.ssub_ov(OtherVal, Overflowed)
+ : ThisVal.usub_ov(OtherVal, Overflowed);
+ }
+
+ if (Overflow)
+ *Overflow = Overflowed;
+
+ return APFixedPoint(Result, CommonFXSema);
+}
+
+APFixedPoint APFixedPoint::mul(const APFixedPoint &Other,
+ bool *Overflow) const {
+ auto CommonFXSema = Sema.getCommonSemantics(Other.getSemantics());
+ APFixedPoint ConvertedThis = convert(CommonFXSema);
+ APFixedPoint ConvertedOther = Other.convert(CommonFXSema);
+ llvm::APSInt ThisVal = ConvertedThis.getValue();
+ llvm::APSInt OtherVal = ConvertedOther.getValue();
+ bool Overflowed = false;
+
+ // Widen the LHS and RHS so we can perform a full multiplication.
+ unsigned Wide = CommonFXSema.getWidth() * 2;
+ if (CommonFXSema.isSigned()) {
+ ThisVal = ThisVal.sextOrSelf(Wide);
+ OtherVal = OtherVal.sextOrSelf(Wide);
+ } else {
+ ThisVal = ThisVal.zextOrSelf(Wide);
+ OtherVal = OtherVal.zextOrSelf(Wide);
+ }
+
+ // Perform the full multiplication and downscale to get the same scale.
+ //
+ // Note that the right shifts here perform an implicit downwards rounding.
+ // This rounding could discard bits that would technically place the result
+ // outside the representable range. We interpret the spec as allowing us to
+ // perform the rounding step first, avoiding the overflow case that would
+ // arise.
+ llvm::APSInt Result;
+ if (CommonFXSema.isSigned())
+ Result = ThisVal.smul_ov(OtherVal, Overflowed)
+ .ashr(CommonFXSema.getScale());
+ else
+ Result = ThisVal.umul_ov(OtherVal, Overflowed)
+ .lshr(CommonFXSema.getScale());
+ assert(!Overflowed && "Full multiplication cannot overflow!");
+ Result.setIsSigned(CommonFXSema.isSigned());
+
+ // If our result lies outside of the representative range of the common
+ // semantic, we either have overflow or saturation.
+ llvm::APSInt Max = APFixedPoint::getMax(CommonFXSema).getValue()
+ .extOrTrunc(Wide);
+ llvm::APSInt Min = APFixedPoint::getMin(CommonFXSema).getValue()
+ .extOrTrunc(Wide);
+ if (CommonFXSema.isSaturated()) {
+ if (Result < Min)
+ Result = Min;
+ else if (Result > Max)
+ Result = Max;
+ } else
+ Overflowed = Result < Min || Result > Max;
+
+ if (Overflow)
+ *Overflow = Overflowed;
+
+ return APFixedPoint(Result.sextOrTrunc(CommonFXSema.getWidth()),
+ CommonFXSema);
+}
+
+APFixedPoint APFixedPoint::div(const APFixedPoint &Other,
+ bool *Overflow) const {
+ auto CommonFXSema = Sema.getCommonSemantics(Other.getSemantics());
+ APFixedPoint ConvertedThis = convert(CommonFXSema);
+ APFixedPoint ConvertedOther = Other.convert(CommonFXSema);
+ llvm::APSInt ThisVal = ConvertedThis.getValue();
+ llvm::APSInt OtherVal = ConvertedOther.getValue();
+ bool Overflowed = false;
+
+ // Widen the LHS and RHS so we can perform a full division.
+ unsigned Wide = CommonFXSema.getWidth() * 2;
+ if (CommonFXSema.isSigned()) {
+ ThisVal = ThisVal.sextOrSelf(Wide);
+ OtherVal = OtherVal.sextOrSelf(Wide);
+ } else {
+ ThisVal = ThisVal.zextOrSelf(Wide);
+ OtherVal = OtherVal.zextOrSelf(Wide);
+ }
+
+ // Upscale to compensate for the loss of precision from division, and
+ // perform the full division.
+ ThisVal = ThisVal.shl(CommonFXSema.getScale());
+ llvm::APSInt Result;
+ if (CommonFXSema.isSigned()) {
+ llvm::APInt Rem;
+ llvm::APInt::sdivrem(ThisVal, OtherVal, Result, Rem);
+ // If the quotient is negative and the remainder is nonzero, round
+ // towards negative infinity by subtracting epsilon from the result.
+ if (ThisVal.isNegative() != OtherVal.isNegative() && !Rem.isNullValue())
+ Result = Result - 1;
+ } else
+ Result = ThisVal.udiv(OtherVal);
+ Result.setIsSigned(CommonFXSema.isSigned());
+
+ // If our result lies outside of the representative range of the common
+ // semantic, we either have overflow or saturation.
+ llvm::APSInt Max = APFixedPoint::getMax(CommonFXSema).getValue()
+ .extOrTrunc(Wide);
+ llvm::APSInt Min = APFixedPoint::getMin(CommonFXSema).getValue()
+ .extOrTrunc(Wide);
+ if (CommonFXSema.isSaturated()) {
+ if (Result < Min)
+ Result = Min;
+ else if (Result > Max)
+ Result = Max;
+ } else
+ Overflowed = Result < Min || Result > Max;
+
+ if (Overflow)
+ *Overflow = Overflowed;
+
+ return APFixedPoint(Result.sextOrTrunc(CommonFXSema.getWidth()),
+ CommonFXSema);
+}
+
void APFixedPoint::toString(llvm::SmallVectorImpl<char> &Str) const {
llvm::APSInt Val = getValue();
unsigned Scale = getScale();
diff --git a/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp b/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
index ee25bd883caf..36b26d9b7c68 100644
--- a/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
@@ -16,6 +16,7 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TokenKinds.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/FoldingSet.h"
@@ -32,6 +33,12 @@
using namespace clang;
+// A check to make sure the ObjCOrBuiltinID has sufficient room to store the
+// largest possible target/aux-target combination. If we exceed this, we likely
+// need to just change the ObjCOrBuiltinIDBits value in IdentifierTable.h.
+static_assert(2 * LargestBuiltinID < (2 << (ObjCOrBuiltinIDBits - 1)),
+ "Insufficient ObjCOrBuiltinID Bits");
+
//===----------------------------------------------------------------------===//
// IdentifierTable Implementation
//===----------------------------------------------------------------------===//
@@ -97,10 +104,10 @@ namespace {
KEYZVECTOR = 0x40000,
KEYCOROUTINES = 0x80000,
KEYMODULES = 0x100000,
- KEYCXX2A = 0x200000,
+ KEYCXX20 = 0x200000,
KEYOPENCLCXX = 0x400000,
KEYMSCOMPAT = 0x800000,
- KEYALLCXX = KEYCXX | KEYCXX11 | KEYCXX2A,
+ KEYALLCXX = KEYCXX | KEYCXX11 | KEYCXX20,
KEYALL = (0xffffff & ~KEYNOMS18 &
~KEYNOOPENCL) // KEYNOMS18 and KEYNOOPENCL are used to exclude.
};
@@ -122,7 +129,7 @@ static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
if (Flags == KEYALL) return KS_Enabled;
if (LangOpts.CPlusPlus && (Flags & KEYCXX)) return KS_Enabled;
if (LangOpts.CPlusPlus11 && (Flags & KEYCXX11)) return KS_Enabled;
- if (LangOpts.CPlusPlus2a && (Flags & KEYCXX2A)) return KS_Enabled;
+ if (LangOpts.CPlusPlus20 && (Flags & KEYCXX20)) return KS_Enabled;
if (LangOpts.C99 && (Flags & KEYC99)) return KS_Enabled;
if (LangOpts.GNUKeywords && (Flags & KEYGNU)) return KS_Extension;
if (LangOpts.MicrosoftExt && (Flags & KEYMS)) return KS_Extension;
@@ -142,10 +149,12 @@ static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
// We treat bridge casts as objective-C keywords so we can warn on them
// in non-arc mode.
if (LangOpts.ObjC && (Flags & KEYOBJC)) return KS_Enabled;
- if (LangOpts.CPlusPlus2a && (Flags & KEYCONCEPTS)) return KS_Enabled;
+ if (LangOpts.CPlusPlus20 && (Flags & KEYCONCEPTS)) return KS_Enabled;
if (LangOpts.Coroutines && (Flags & KEYCOROUTINES)) return KS_Enabled;
if (LangOpts.ModulesTS && (Flags & KEYMODULES)) return KS_Enabled;
if (LangOpts.CPlusPlus && (Flags & KEYALLCXX)) return KS_Future;
+ if (LangOpts.CPlusPlus && !LangOpts.CPlusPlus20 && (Flags & CHAR8SUPPORT))
+ return KS_Future;
return KS_Disabled;
}
@@ -257,7 +266,7 @@ bool IdentifierInfo::isCPlusPlusKeyword(const LangOptions &LangOpts) const {
LangOptions LangOptsNoCPP = LangOpts;
LangOptsNoCPP.CPlusPlus = false;
LangOptsNoCPP.CPlusPlus11 = false;
- LangOptsNoCPP.CPlusPlus2a = false;
+ LangOptsNoCPP.CPlusPlus20 = false;
return !isKeyword(LangOptsNoCPP);
}
@@ -463,7 +472,7 @@ std::string MultiKeywordSelector::getName() const {
OS << ':';
}
- return OS.str();
+ return std::string(OS.str());
}
std::string Selector::getAsString() const {
@@ -476,7 +485,7 @@ std::string Selector::getAsString() const {
if (getNumArgs() == 0) {
assert(II && "If the number of arguments is 0 then II is guaranteed to "
"not be null.");
- return II->getName();
+ return std::string(II->getName());
}
if (!II)
diff --git a/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp b/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp
index 516b1ff1b7e2..c08670c87fb6 100644
--- a/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp
@@ -24,7 +24,7 @@ void LangOptions::resetNonModularOptions() {
#define LANGOPT(Name, Bits, Default, Description)
#define BENIGN_LANGOPT(Name, Bits, Default, Description) Name = Default;
#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
- Name = Default;
+ Name = static_cast<unsigned>(Default);
#include "clang/Basic/LangOptions.def"
// These options do not affect AST generation.
@@ -47,3 +47,23 @@ VersionTuple LangOptions::getOpenCLVersionTuple() const {
const int Ver = OpenCLCPlusPlus ? OpenCLCPlusPlusVersion : OpenCLVersion;
return VersionTuple(Ver / 100, (Ver % 100) / 10);
}
+
+FPOptions FPOptions::defaultWithoutTrailingStorage(const LangOptions &LO) {
+ FPOptions result(LO);
+ return result;
+}
+
+LLVM_DUMP_METHOD void FPOptions::dump() {
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
+ llvm::errs() << "\n " #NAME " " << get##NAME();
+#include "clang/Basic/FPOptions.def"
+ llvm::errs() << "\n";
+}
+
+LLVM_DUMP_METHOD void FPOptionsOverride::dump() {
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
+ if (has##NAME##Override()) \
+ llvm::errs() << "\n " #NAME " Override is " << get##NAME##Override();
+#include "clang/Basic/FPOptions.def"
+ llvm::errs() << "\n";
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Module.cpp b/contrib/llvm-project/clang/lib/Basic/Module.cpp
index 541431dbbe7d..b3daaa3a4442 100644
--- a/contrib/llvm-project/clang/lib/Basic/Module.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Module.cpp
@@ -37,26 +37,21 @@ using namespace clang;
Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
bool IsFramework, bool IsExplicit, unsigned VisibilityID)
: Name(Name), DefinitionLoc(DefinitionLoc), Parent(Parent),
- VisibilityID(VisibilityID), IsMissingRequirement(false),
+ VisibilityID(VisibilityID), IsUnimportable(false),
HasIncompatibleModuleFile(false), IsAvailable(true),
IsFromModuleFile(false), IsFramework(IsFramework), IsExplicit(IsExplicit),
IsSystem(false), IsExternC(false), IsInferred(false),
InferSubmodules(false), InferExplicitSubmodules(false),
InferExportWildcard(false), ConfigMacrosExhaustive(false),
NoUndeclaredIncludes(false), ModuleMapIsPrivate(false),
- NameVisibility(Hidden) {
+ HasUmbrellaDir(false), NameVisibility(Hidden) {
if (Parent) {
- if (!Parent->isAvailable())
- IsAvailable = false;
- if (Parent->IsSystem)
- IsSystem = true;
- if (Parent->IsExternC)
- IsExternC = true;
- if (Parent->NoUndeclaredIncludes)
- NoUndeclaredIncludes = true;
- if (Parent->ModuleMapIsPrivate)
- ModuleMapIsPrivate = true;
- IsMissingRequirement = Parent->IsMissingRequirement;
+ IsAvailable = Parent->isAvailable();
+ IsUnimportable = Parent->isUnimportable();
+ IsSystem = Parent->IsSystem;
+ IsExternC = Parent->IsExternC;
+ NoUndeclaredIncludes = Parent->NoUndeclaredIncludes;
+ ModuleMapIsPrivate = Parent->ModuleMapIsPrivate;
Parent->SubModuleIndex[Name] = Parent->SubModules.size();
Parent->SubModules.push_back(this);
@@ -132,25 +127,42 @@ static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
return HasFeature;
}
-bool Module::isAvailable(const LangOptions &LangOpts, const TargetInfo &Target,
- Requirement &Req,
- UnresolvedHeaderDirective &MissingHeader,
- Module *&ShadowingModule) const {
- if (IsAvailable)
- return true;
+bool Module::isUnimportable(const LangOptions &LangOpts,
+ const TargetInfo &Target, Requirement &Req,
+ Module *&ShadowingModule) const {
+ if (!IsUnimportable)
+ return false;
for (const Module *Current = this; Current; Current = Current->Parent) {
if (Current->ShadowingModule) {
ShadowingModule = Current->ShadowingModule;
- return false;
+ return true;
}
for (unsigned I = 0, N = Current->Requirements.size(); I != N; ++I) {
if (hasFeature(Current->Requirements[I].first, LangOpts, Target) !=
Current->Requirements[I].second) {
Req = Current->Requirements[I];
- return false;
+ return true;
}
}
+ }
+
+ llvm_unreachable("could not find a reason why module is unimportable");
+}
+
+bool Module::isAvailable(const LangOptions &LangOpts, const TargetInfo &Target,
+ Requirement &Req,
+ UnresolvedHeaderDirective &MissingHeader,
+ Module *&ShadowingModule) const {
+ if (IsAvailable)
+ return true;
+
+ if (isUnimportable(LangOpts, Target, Req, ShadowingModule))
+ return false;
+
+ // FIXME: All missing headers are listed on the top-level module. Should we
+ // just look there?
+ for (const Module *Current = this; Current; Current = Current->Parent) {
if (!Current->MissingHeaders.empty()) {
MissingHeader = Current->MissingHeaders.front();
return false;
@@ -239,7 +251,12 @@ Module::DirectoryName Module::getUmbrellaDir() const {
if (Header U = getUmbrellaHeader())
return {"", U.Entry->getDir()};
- return {UmbrellaAsWritten, Umbrella.dyn_cast<const DirectoryEntry *>()};
+ return {UmbrellaAsWritten, static_cast<const DirectoryEntry *>(Umbrella)};
+}
+
+void Module::addTopHeader(const FileEntry *File) {
+ assert(File);
+ TopHeaders.insert(File);
}
ArrayRef<const FileEntry *> Module::getTopHeaders(FileManager &FileMgr) {
@@ -276,18 +293,18 @@ bool Module::directlyUses(const Module *Requested) const {
void Module::addRequirement(StringRef Feature, bool RequiredState,
const LangOptions &LangOpts,
const TargetInfo &Target) {
- Requirements.push_back(Requirement(Feature, RequiredState));
+ Requirements.push_back(Requirement(std::string(Feature), RequiredState));
// If this feature is currently available, we're done.
if (hasFeature(Feature, LangOpts, Target) == RequiredState)
return;
- markUnavailable(/*MissingRequirement*/true);
+ markUnavailable(/*Unimportable*/true);
}
-void Module::markUnavailable(bool MissingRequirement) {
- auto needUpdate = [MissingRequirement](Module *M) {
- return M->IsAvailable || (!M->IsMissingRequirement && MissingRequirement);
+void Module::markUnavailable(bool Unimportable) {
+ auto needUpdate = [Unimportable](Module *M) {
+ return M->IsAvailable || (!M->IsUnimportable && Unimportable);
};
if (!needUpdate(this))
@@ -303,7 +320,7 @@ void Module::markUnavailable(bool MissingRequirement) {
continue;
Current->IsAvailable = false;
- Current->IsMissingRequirement |= MissingRequirement;
+ Current->IsUnimportable |= Unimportable;
for (submodule_iterator Sub = Current->submodule_begin(),
SubEnd = Current->submodule_end();
Sub != SubEnd; ++Sub) {
@@ -637,8 +654,8 @@ void VisibleModuleSet::setVisible(Module *M, SourceLocation Loc,
SmallVector<Module *, 16> Exports;
V.M->getExportedModules(Exports);
for (Module *E : Exports) {
- // Don't recurse to unavailable submodules.
- if (E->isAvailable())
+ // Don't import non-importable modules.
+ if (!E->isUnimportable())
VisitModule({E, &V});
}
@@ -653,3 +670,18 @@ void VisibleModuleSet::setVisible(Module *M, SourceLocation Loc,
};
VisitModule({M, nullptr});
}
+
+ASTSourceDescriptor::ASTSourceDescriptor(Module &M)
+ : Signature(M.Signature), ClangModule(&M) {
+ if (M.Directory)
+ Path = M.Directory->getName();
+ if (auto *File = M.getASTFile())
+ ASTFile = File->getName();
+}
+
+std::string ASTSourceDescriptor::getModuleName() const {
+ if (ClangModule)
+ return ClangModule->Name;
+ else
+ return std::string(PCHModuleName);
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp b/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
index 414ebb52c0c7..cae61ad4f2e3 100644
--- a/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
@@ -20,94 +20,14 @@
using namespace clang;
using namespace llvm::omp;
-OpenMPContextSelectorSetKind
-clang::getOpenMPContextSelectorSet(llvm::StringRef Str) {
- return llvm::StringSwitch<OpenMPContextSelectorSetKind>(Str)
-#define OPENMP_CONTEXT_SELECTOR_SET(Name) .Case(#Name, OMP_CTX_SET_##Name)
-#include "clang/Basic/OpenMPKinds.def"
- .Default(OMP_CTX_SET_unknown);
-}
-
-llvm::StringRef
-clang::getOpenMPContextSelectorSetName(OpenMPContextSelectorSetKind Kind) {
- switch (Kind) {
- case OMP_CTX_SET_unknown:
- return "unknown";
-#define OPENMP_CONTEXT_SELECTOR_SET(Name) \
- case OMP_CTX_SET_##Name: \
- return #Name;
-#include "clang/Basic/OpenMPKinds.def"
- break;
- }
- llvm_unreachable("Invalid OpenMP context selector set kind");
-}
-
-OpenMPContextSelectorKind clang::getOpenMPContextSelector(llvm::StringRef Str) {
- return llvm::StringSwitch<OpenMPContextSelectorKind>(Str)
-#define OPENMP_CONTEXT_SELECTOR(Name) .Case(#Name, OMP_CTX_##Name)
-#include "clang/Basic/OpenMPKinds.def"
- .Default(OMP_CTX_unknown);
-}
-
-llvm::StringRef
-clang::getOpenMPContextSelectorName(OpenMPContextSelectorKind Kind) {
- switch (Kind) {
- case OMP_CTX_unknown:
- return "unknown";
-#define OPENMP_CONTEXT_SELECTOR(Name) \
- case OMP_CTX_##Name: \
- return #Name;
-#include "clang/Basic/OpenMPKinds.def"
- break;
- }
- llvm_unreachable("Invalid OpenMP context selector kind");
-}
-
-OpenMPClauseKind clang::getOpenMPClauseKind(StringRef Str) {
- // 'flush' clause cannot be specified explicitly, because this is an implicit
- // clause for 'flush' directive. If the 'flush' clause is explicitly specified
- // the Parser should generate a warning about extra tokens at the end of the
- // directive.
- if (Str == "flush")
- return OMPC_unknown;
- return llvm::StringSwitch<OpenMPClauseKind>(Str)
-#define OPENMP_CLAUSE(Name, Class) .Case(#Name, OMPC_##Name)
-#include "clang/Basic/OpenMPKinds.def"
- .Case("uniform", OMPC_uniform)
- .Case("device_type", OMPC_device_type)
- .Case("match", OMPC_match)
- .Default(OMPC_unknown);
-}
-
-const char *clang::getOpenMPClauseName(OpenMPClauseKind Kind) {
- assert(Kind <= OMPC_unknown);
- switch (Kind) {
- case OMPC_unknown:
- return "unknown";
-#define OPENMP_CLAUSE(Name, Class) \
- case OMPC_##Name: \
- return #Name;
-#include "clang/Basic/OpenMPKinds.def"
- case OMPC_uniform:
- return "uniform";
- case OMPC_threadprivate:
- return "threadprivate or thread local";
- case OMPC_device_type:
- return "device_type";
- case OMPC_match:
- return "match";
- }
- llvm_unreachable("Invalid OpenMP clause kind");
-}
-
unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
StringRef Str) {
switch (Kind) {
case OMPC_default:
- return llvm::StringSwitch<OpenMPDefaultClauseKind>(Str)
-#define OPENMP_DEFAULT_KIND(Name) .Case(#Name, OMPC_DEFAULT_##Name)
-#include "clang/Basic/OpenMPKinds.def"
- .Default(OMPC_DEFAULT_unknown);
+ return llvm::StringSwitch<unsigned>(Str)
+#define OMP_DEFAULT_KIND(Enum, Name) .Case(Name, unsigned(Enum))
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+ .Default(unsigned(llvm::omp::OMP_DEFAULT_unknown));
case OMPC_proc_bind:
return llvm::StringSwitch<unsigned>(Str)
#define OMP_PROC_BIND_KIND(Enum, Name, Value) .Case(Name, Value)
@@ -180,6 +100,26 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
#define OPENMP_LASTPRIVATE_KIND(Name) .Case(#Name, OMPC_LASTPRIVATE_##Name)
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_LASTPRIVATE_unknown);
+ case OMPC_order:
+ return llvm::StringSwitch<OpenMPOrderClauseKind>(Str)
+#define OPENMP_ORDER_KIND(Name) .Case(#Name, OMPC_ORDER_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_ORDER_unknown);
+ case OMPC_update:
+ return llvm::StringSwitch<OpenMPDependClauseKind>(Str)
+#define OPENMP_DEPEND_KIND(Name) .Case(#Name, OMPC_DEPEND_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_DEPEND_unknown);
+ case OMPC_device:
+ return llvm::StringSwitch<OpenMPDeviceClauseModifier>(Str)
+#define OPENMP_DEVICE_MODIFIER(Name) .Case(#Name, OMPC_DEVICE_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_DEVICE_unknown);
+ case OMPC_reduction:
+ return llvm::StringSwitch<OpenMPReductionClauseModifier>(Str)
+#define OPENMP_REDUCTION_MODIFIER(Name) .Case(#Name, OMPC_REDUCTION_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_REDUCTION_unknown);
case OMPC_unknown:
case OMPC_threadprivate:
case OMPC_if:
@@ -193,7 +133,6 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
case OMPC_private:
case OMPC_firstprivate:
case OMPC_shared:
- case OMPC_reduction:
case OMPC_task_reduction:
case OMPC_in_reduction:
case OMPC_aligned:
@@ -204,12 +143,15 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
case OMPC_untied:
case OMPC_mergeable:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_read:
case OMPC_write:
- case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
- case OMPC_device:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_threads:
case OMPC_simd:
case OMPC_num_teams:
@@ -221,6 +163,7 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
case OMPC_hint:
case OMPC_uniform:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -228,6 +171,14 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
case OMPC_dynamic_allocators:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ break;
+ default:
break;
}
llvm_unreachable("Invalid OpenMP simple clause kind");
@@ -237,13 +188,11 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
unsigned Type) {
switch (Kind) {
case OMPC_default:
- switch (Type) {
- case OMPC_DEFAULT_unknown:
- return "unknown";
-#define OPENMP_DEFAULT_KIND(Name) \
- case OMPC_DEFAULT_##Name: \
- return #Name;
-#include "clang/Basic/OpenMPKinds.def"
+ switch (llvm::omp::DefaultKind(Type)) {
+#define OMP_DEFAULT_KIND(Enum, Name) \
+ case Enum: \
+ return Name;
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
}
llvm_unreachable("Invalid OpenMP 'default' clause type");
case OMPC_proc_bind:
@@ -382,6 +331,46 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
#include "clang/Basic/OpenMPKinds.def"
}
llvm_unreachable("Invalid OpenMP 'lastprivate' clause type");
+ case OMPC_order:
+ switch (Type) {
+ case OMPC_ORDER_unknown:
+ return "unknown";
+#define OPENMP_ORDER_KIND(Name) \
+ case OMPC_ORDER_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'order' clause type");
+ case OMPC_update:
+ switch (Type) {
+ case OMPC_DEPEND_unknown:
+ return "unknown";
+#define OPENMP_DEPEND_KIND(Name) \
+ case OMPC_DEPEND_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'depend' clause type");
+ case OMPC_device:
+ switch (Type) {
+ case OMPC_DEVICE_unknown:
+ return "unknown";
+#define OPENMP_DEVICE_MODIFIER(Name) \
+ case OMPC_DEVICE_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'device' clause modifier");
+ case OMPC_reduction:
+ switch (Type) {
+ case OMPC_REDUCTION_unknown:
+ return "unknown";
+#define OPENMP_REDUCTION_MODIFIER(Name) \
+ case OMPC_REDUCTION_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'reduction' clause modifier");
case OMPC_unknown:
case OMPC_threadprivate:
case OMPC_if:
@@ -395,7 +384,6 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_private:
case OMPC_firstprivate:
case OMPC_shared:
- case OMPC_reduction:
case OMPC_task_reduction:
case OMPC_in_reduction:
case OMPC_aligned:
@@ -406,12 +394,15 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_untied:
case OMPC_mergeable:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_read:
case OMPC_write:
- case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
- case OMPC_device:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_threads:
case OMPC_simd:
case OMPC_num_teams:
@@ -423,6 +414,7 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_hint:
case OMPC_uniform:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -430,536 +422,17 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_dynamic_allocators:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
break;
- }
- llvm_unreachable("Invalid OpenMP simple clause kind");
-}
-
-bool clang::isAllowedClauseForDirective(OpenMPDirectiveKind DKind,
- OpenMPClauseKind CKind,
- unsigned OpenMPVersion) {
- assert(unsigned(DKind) <= unsigned(OMPD_unknown));
- assert(CKind <= OMPC_unknown);
- // Nontemporal clause is not supported in OpenMP < 5.0.
- if (OpenMPVersion < 50 && CKind == OMPC_nontemporal)
- return false;
- switch (DKind) {
- case OMPD_parallel:
- switch (CKind) {
-#define OPENMP_PARALLEL_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_simd:
- if (OpenMPVersion < 50 && CKind == OMPC_if)
- return false;
- switch (CKind) {
-#define OPENMP_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_for:
- switch (CKind) {
-#define OPENMP_FOR_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_for_simd:
- if (OpenMPVersion < 50 && CKind == OMPC_if)
- return false;
- switch (CKind) {
-#define OPENMP_FOR_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_sections:
- switch (CKind) {
-#define OPENMP_SECTIONS_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_single:
- switch (CKind) {
-#define OPENMP_SINGLE_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_parallel_for:
- switch (CKind) {
-#define OPENMP_PARALLEL_FOR_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_parallel_for_simd:
- switch (CKind) {
-#define OPENMP_PARALLEL_FOR_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_parallel_master:
- switch (CKind) {
-#define OPENMP_PARALLEL_MASTER_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_parallel_sections:
- switch (CKind) {
-#define OPENMP_PARALLEL_SECTIONS_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_task:
- switch (CKind) {
-#define OPENMP_TASK_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_flush:
- return CKind == OMPC_flush;
- break;
- case OMPD_atomic:
- switch (CKind) {
-#define OPENMP_ATOMIC_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target:
- switch (CKind) {
-#define OPENMP_TARGET_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_requires:
- switch (CKind) {
-#define OPENMP_REQUIRES_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_data:
- switch (CKind) {
-#define OPENMP_TARGET_DATA_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_enter_data:
- switch (CKind) {
-#define OPENMP_TARGET_ENTER_DATA_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_exit_data:
- switch (CKind) {
-#define OPENMP_TARGET_EXIT_DATA_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_parallel:
- switch (CKind) {
-#define OPENMP_TARGET_PARALLEL_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_parallel_for:
- switch (CKind) {
-#define OPENMP_TARGET_PARALLEL_FOR_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_update:
- switch (CKind) {
-#define OPENMP_TARGET_UPDATE_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_teams:
- switch (CKind) {
-#define OPENMP_TEAMS_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_cancel:
- switch (CKind) {
-#define OPENMP_CANCEL_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_ordered:
- switch (CKind) {
-#define OPENMP_ORDERED_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_taskloop:
- switch (CKind) {
-#define OPENMP_TASKLOOP_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_taskloop_simd:
- switch (CKind) {
-#define OPENMP_TASKLOOP_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_master_taskloop:
- switch (CKind) {
-#define OPENMP_MASTER_TASKLOOP_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_master_taskloop_simd:
- switch (CKind) {
-#define OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_parallel_master_taskloop:
- switch (CKind) {
-#define OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_parallel_master_taskloop_simd:
- switch (CKind) {
-#define OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_critical:
- switch (CKind) {
-#define OPENMP_CRITICAL_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_distribute:
- switch (CKind) {
-#define OPENMP_DISTRIBUTE_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_distribute_parallel_for:
- switch (CKind) {
-#define OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_distribute_parallel_for_simd:
- switch (CKind) {
-#define OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_distribute_simd:
- if (OpenMPVersion < 50 && CKind == OMPC_if)
- return false;
- switch (CKind) {
-#define OPENMP_DISTRIBUTE_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_parallel_for_simd:
- switch (CKind) {
-#define OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_simd:
- switch (CKind) {
-#define OPENMP_TARGET_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_teams_distribute:
- switch (CKind) {
-#define OPENMP_TEAMS_DISTRIBUTE_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_teams_distribute_simd:
- if (OpenMPVersion < 50 && CKind == OMPC_if)
- return false;
- switch (CKind) {
-#define OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_teams_distribute_parallel_for_simd:
- switch (CKind) {
-#define OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_teams_distribute_parallel_for:
- switch (CKind) {
-#define OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_teams:
- switch (CKind) {
-#define OPENMP_TARGET_TEAMS_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_teams_distribute:
- switch (CKind) {
-#define OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_teams_distribute_parallel_for:
- switch (CKind) {
-#define OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_teams_distribute_parallel_for_simd:
- switch (CKind) {
-#define OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_teams_distribute_simd:
- switch (CKind) {
-#define OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_taskgroup:
- switch (CKind) {
-#define OPENMP_TASKGROUP_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_declare_mapper:
- switch (CKind) {
-#define OPENMP_DECLARE_MAPPER_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_allocate:
- switch (CKind) {
-#define OPENMP_ALLOCATE_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_declare_variant:
- switch (CKind) {
-#define OPENMP_DECLARE_VARIANT_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_unknown:
- case OMPD_threadprivate:
- case OMPD_section:
- case OMPD_master:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_cancellation_point:
- case OMPD_declare_reduction:
- case OMPD_declare_simd:
+ default:
break;
}
- return false;
+ llvm_unreachable("Invalid OpenMP simple clause kind");
}
bool clang::isOpenMPLoopDirective(OpenMPDirectiveKind DKind) {
@@ -1111,7 +584,7 @@ bool clang::isOpenMPLoopBoundSharingDirective(OpenMPDirectiveKind Kind) {
void clang::getOpenMPCaptureRegions(
SmallVectorImpl<OpenMPDirectiveKind> &CaptureRegions,
OpenMPDirectiveKind DKind) {
- assert(DKind <= OMPD_unknown);
+ assert(unsigned(DKind) < llvm::omp::Directive_enumSize);
switch (DKind) {
case OMPD_parallel:
case OMPD_parallel_for:
@@ -1199,6 +672,8 @@ void clang::getOpenMPCaptureRegions(
case OMPD_cancellation_point:
case OMPD_cancel:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
@@ -1206,8 +681,11 @@ void clang::getOpenMPCaptureRegions(
case OMPD_end_declare_target:
case OMPD_requires:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
llvm_unreachable("OpenMP Directive is not allowed");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
}
diff --git a/contrib/llvm-project/clang/lib/Basic/SanitizerBlacklist.cpp b/contrib/llvm-project/clang/lib/Basic/SanitizerBlacklist.cpp
index 4f71349350fd..feb7cbda39b7 100644
--- a/contrib/llvm-project/clang/lib/Basic/SanitizerBlacklist.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/SanitizerBlacklist.cpp
@@ -10,7 +10,12 @@
// sanitizers.
//
//===----------------------------------------------------------------------===//
+
#include "clang/Basic/SanitizerBlacklist.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SanitizerSpecialCaseList.h"
+#include "clang/Basic/Sanitizers.h"
+#include "clang/Basic/SourceManager.h"
using namespace clang;
@@ -20,6 +25,8 @@ SanitizerBlacklist::SanitizerBlacklist(
BlacklistPaths, SM.getFileManager().getVirtualFileSystem())),
SM(SM) {}
+SanitizerBlacklist::~SanitizerBlacklist() = default;
+
bool SanitizerBlacklist::isBlacklistedGlobal(SanitizerMask Mask,
StringRef GlobalName,
StringRef Category) const {
diff --git a/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp b/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp
index 73f2ae96d4a3..0a76c78cd44f 100644
--- a/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp
@@ -17,12 +17,12 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManagerInternals.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Capacity.h"
#include "llvm/Support/Compiler.h"
@@ -389,6 +389,14 @@ void SourceManager::clearIDTables() {
createExpansionLoc(SourceLocation(), SourceLocation(), SourceLocation(), 1);
}
+bool SourceManager::isMainFile(FileEntryRef SourceFile) {
+ assert(MainFileID.isValid() && "expected initialized SourceManager");
+ auto FE = getFileEntryRefForID(MainFileID);
+ if (!FE)
+ return false;
+ return FE->getUID() == SourceFile.getUID();
+}
+
void SourceManager::initializeForReplay(const SourceManager &Old) {
assert(MainFileID.isInvalid() && "expected uninitialized SourceManager");
@@ -560,6 +568,70 @@ FileID SourceManager::getNextFileID(FileID FID) const {
// Methods to create new FileID's and macro expansions.
//===----------------------------------------------------------------------===//
+/// Create a new FileID that represents the specified file
+/// being \#included from the specified IncludePosition.
+///
+/// This translates NULL into standard input.
+FileID SourceManager::createFileID(const FileEntry *SourceFile,
+ SourceLocation IncludePos,
+ SrcMgr::CharacteristicKind FileCharacter,
+ int LoadedID, unsigned LoadedOffset) {
+ assert(SourceFile && "Null source file!");
+ const SrcMgr::ContentCache *IR =
+ getOrCreateContentCache(SourceFile, isSystem(FileCharacter));
+ assert(IR && "getOrCreateContentCache() cannot return NULL");
+ return createFileID(IR, SourceFile->getName(), IncludePos, FileCharacter,
+ LoadedID, LoadedOffset);
+}
+
+FileID SourceManager::createFileID(FileEntryRef SourceFile,
+ SourceLocation IncludePos,
+ SrcMgr::CharacteristicKind FileCharacter,
+ int LoadedID, unsigned LoadedOffset) {
+ const SrcMgr::ContentCache *IR = getOrCreateContentCache(
+ &SourceFile.getFileEntry(), isSystem(FileCharacter));
+ assert(IR && "getOrCreateContentCache() cannot return NULL");
+ return createFileID(IR, SourceFile.getName(), IncludePos, FileCharacter,
+ LoadedID, LoadedOffset);
+}
+
+/// Create a new FileID that represents the specified memory buffer.
+///
+/// This does no caching of the buffer and takes ownership of the
+/// MemoryBuffer, so only pass a MemoryBuffer to this once.
+FileID SourceManager::createFileID(std::unique_ptr<llvm::MemoryBuffer> Buffer,
+ SrcMgr::CharacteristicKind FileCharacter,
+ int LoadedID, unsigned LoadedOffset,
+ SourceLocation IncludeLoc) {
+ StringRef Name = Buffer->getBufferIdentifier();
+ return createFileID(
+ createMemBufferContentCache(Buffer.release(), /*DoNotFree*/ false),
+ Name, IncludeLoc, FileCharacter, LoadedID, LoadedOffset);
+}
+
+/// Create a new FileID that represents the specified memory buffer.
+///
+/// This does not take ownership of the MemoryBuffer. The memory buffer must
+/// outlive the SourceManager.
+FileID SourceManager::createFileID(UnownedTag, const llvm::MemoryBuffer *Buffer,
+ SrcMgr::CharacteristicKind FileCharacter,
+ int LoadedID, unsigned LoadedOffset,
+ SourceLocation IncludeLoc) {
+ return createFileID(createMemBufferContentCache(Buffer, /*DoNotFree*/ true),
+ Buffer->getBufferIdentifier(), IncludeLoc,
+ FileCharacter, LoadedID, LoadedOffset);
+}
+
+/// Get the FileID for \p SourceFile if it exists. Otherwise, create a
+/// new FileID for the \p SourceFile.
+FileID
+SourceManager::getOrCreateFileID(const FileEntry *SourceFile,
+ SrcMgr::CharacteristicKind FileCharacter) {
+ FileID ID = translateFile(SourceFile);
+ return ID.isValid() ? ID : createFileID(SourceFile, SourceLocation(),
+ FileCharacter);
+}
+
/// createFileID - Create a new FileID for the specified ContentCache and
/// include position. This works regardless of whether the ContentCache
/// corresponds to a file or some other input source.
@@ -577,13 +649,15 @@ FileID SourceManager::createFileID(const ContentCache *File, StringRef Filename,
SLocEntryLoaded[Index] = true;
return FileID::get(LoadedID);
}
+ unsigned FileSize = File->getSize();
+ if (!(NextLocalOffset + FileSize + 1 > NextLocalOffset &&
+ NextLocalOffset + FileSize + 1 <= CurrentLoadedOffset)) {
+ Diag.Report(IncludePos, diag::err_include_too_large);
+ return FileID();
+ }
LocalSLocEntryTable.push_back(
SLocEntry::get(NextLocalOffset,
FileInfo::get(IncludePos, File, FileCharacter, Filename)));
- unsigned FileSize = File->getSize();
- assert(NextLocalOffset + FileSize + 1 > NextLocalOffset &&
- NextLocalOffset + FileSize + 1 <= CurrentLoadedOffset &&
- "Ran out of source locations!");
// We do a +1 here because we want a SourceLocation that means "the end of the
// file", e.g. for the "no newline at the end of the file" diagnostic.
NextLocalOffset += FileSize + 1;
@@ -699,6 +773,18 @@ void SourceManager::setFileIsTransient(const FileEntry *File) {
const_cast<SrcMgr::ContentCache *>(CC)->IsTransient = true;
}
+Optional<FileEntryRef> SourceManager::getFileEntryRefForID(FileID FID) const {
+ bool Invalid = false;
+ const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
+ if (Invalid || !Entry.isFile())
+ return None;
+
+ const SrcMgr::ContentCache *Content = Entry.getFile().getContentCache();
+ if (!Content || !Content->OrigEntry)
+ return None;
+ return FileEntryRef(Entry.getFile().getName(), *Content->OrigEntry);
+}
+
StringRef SourceManager::getBufferData(FileID FID, bool *Invalid) const {
bool MyInvalid = false;
const SLocEntry &SLoc = getSLocEntry(FID, &MyInvalid);
@@ -775,11 +861,8 @@ FileID SourceManager::getFileIDLocal(unsigned SLocOffset) const {
--I;
if (I->getOffset() <= SLocOffset) {
FileID Res = FileID::get(int(I - LocalSLocEntryTable.begin()));
-
- // If this isn't an expansion, remember it. We have good locality across
- // FileID lookups.
- if (!I->isExpansion())
- LastFileIDLookup = Res;
+ // Remember it. We have good locality across FileID lookups.
+ LastFileIDLookup = Res;
NumLinearScans += NumProbes+1;
return Res;
}
@@ -796,11 +879,8 @@ FileID SourceManager::getFileIDLocal(unsigned SLocOffset) const {
unsigned LessIndex = 0;
NumProbes = 0;
while (true) {
- bool Invalid = false;
unsigned MiddleIndex = (GreaterIndex-LessIndex)/2+LessIndex;
- unsigned MidOffset = getLocalSLocEntry(MiddleIndex, &Invalid).getOffset();
- if (Invalid)
- return FileID::get(0);
+ unsigned MidOffset = getLocalSLocEntry(MiddleIndex).getOffset();
++NumProbes;
@@ -812,15 +892,12 @@ FileID SourceManager::getFileIDLocal(unsigned SLocOffset) const {
}
// If the middle index contains the value, succeed and return.
- // FIXME: This could be made faster by using a function that's aware of
- // being in the local area.
- if (isOffsetInFileID(FileID::get(MiddleIndex), SLocOffset)) {
+ if (MiddleIndex + 1 == LocalSLocEntryTable.size() ||
+ SLocOffset < getLocalSLocEntry(MiddleIndex + 1).getOffset()) {
FileID Res = FileID::get(MiddleIndex);
- // If this isn't a macro expansion, remember it. We have good locality
- // across FileID lookups.
- if (!LocalSLocEntryTable[MiddleIndex].isExpansion())
- LastFileIDLookup = Res;
+ // Remember it. We have good locality across FileID lookups.
+ LastFileIDLookup = Res;
NumBinaryProbes += NumProbes;
return Res;
}
@@ -858,9 +935,7 @@ FileID SourceManager::getFileIDLoaded(unsigned SLocOffset) const {
const SrcMgr::SLocEntry &E = getLoadedSLocEntry(I);
if (E.getOffset() <= SLocOffset) {
FileID Res = FileID::get(-int(I) - 2);
-
- if (!E.isExpansion())
- LastFileIDLookup = Res;
+ LastFileIDLookup = Res;
NumLinearScans += NumProbes + 1;
return Res;
}
@@ -893,8 +968,7 @@ FileID SourceManager::getFileIDLoaded(unsigned SLocOffset) const {
if (isOffsetInFileID(FileID::get(-int(MiddleIndex) - 2), SLocOffset)) {
FileID Res = FileID::get(-int(MiddleIndex) - 2);
- if (!E.isExpansion())
- LastFileIDLookup = Res;
+ LastFileIDLookup = Res;
NumBinaryProbes += NumProbes;
return Res;
}
@@ -990,6 +1064,13 @@ SourceLocation SourceManager::getImmediateSpellingLoc(SourceLocation Loc) const{
return Loc.getLocWithOffset(LocInfo.second);
}
+/// Return the filename of the file containing a SourceLocation.
+StringRef SourceManager::getFilename(SourceLocation SpellingLoc) const {
+ if (const FileEntry *F = getFileEntryForID(getFileID(SpellingLoc)))
+ return F->getName();
+ return StringRef();
+}
+
/// getImmediateExpansionRange - Loc is required to be an expansion location.
/// Return the start/end of the expansion information.
CharSourceRange
@@ -1602,11 +1683,7 @@ FileID SourceManager::translateFile(const FileEntry *SourceFile) const {
// The location we're looking for isn't in the main file; look
// through all of the local source locations.
for (unsigned I = 0, N = local_sloc_entry_size(); I != N; ++I) {
- bool Invalid = false;
- const SLocEntry &SLoc = getLocalSLocEntry(I, &Invalid);
- if (Invalid)
- return FileID();
-
+ const SLocEntry &SLoc = getLocalSLocEntry(I);
if (SLoc.isFile() && SLoc.getFile().getContentCache() &&
SLoc.getFile().getContentCache()->OrigEntry == SourceFile)
return FileID::get(I);
@@ -1715,15 +1792,23 @@ void SourceManager::computeMacroArgsCache(MacroArgsMap &MacroArgsCache,
return;
if (Entry.isFile()) {
SourceLocation IncludeLoc = Entry.getFile().getIncludeLoc();
- if (IncludeLoc.isInvalid())
+ bool IncludedInFID =
+ (IncludeLoc.isValid() && isInFileID(IncludeLoc, FID)) ||
+ // Predefined header doesn't have a valid include location in main
+ // file, but any files created by it should still be skipped when
+ // computing macro args expanded in the main file.
+ (FID == MainFileID && Entry.getFile().Filename == "<built-in>");
+ if (IncludedInFID) {
+ // Skip the files/macros of the #include'd file, we only care about
+ // macros that lexed macro arguments from our file.
+ if (Entry.getFile().NumCreatedFIDs)
+ ID += Entry.getFile().NumCreatedFIDs - 1 /*because of next ++ID*/;
continue;
- if (!isInFileID(IncludeLoc, FID))
- return; // No more files/macros that may be "contained" in this file.
-
- // Skip the files/macros of the #include'd file, we only care about macros
- // that lexed macro arguments from our file.
- if (Entry.getFile().NumCreatedFIDs)
- ID += Entry.getFile().NumCreatedFIDs - 1/*because of next ++ID*/;
+ } else if (IncludeLoc.isValid()) {
+ // If file was included but not from FID, there is no more files/macros
+ // that may be "contained" in this file.
+ return;
+ }
continue;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp b/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
index 3a21a19e1f19..eccdc21d724a 100644
--- a/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
@@ -36,6 +36,8 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
HasLegalHalfType = false;
HasFloat128 = false;
HasFloat16 = false;
+ HasBFloat16 = false;
+ HasStrictFP = false;
PointerWidth = PointerAlign = 32;
BoolWidth = BoolAlign = 8;
IntWidth = IntAlign = 32;
@@ -113,6 +115,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
HasBuiltinMSVaList = false;
IsRenderScriptTarget = false;
HasAArch64SVETypes = false;
+ ARMCDECoprocMask = 0;
// Default to no types using fpret.
RealTypeUsesObjCFPRet = 0;
@@ -132,6 +135,8 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
// Default to an unknown platform name.
PlatformName = "unknown";
PlatformMinVersion = VersionTuple();
+
+ MaxOpenCLWorkGroupSize = 1024;
}
// Out of line virtual dtor for TargetInfo.
@@ -262,7 +267,8 @@ TargetInfo::IntType TargetInfo::getLeastIntTypeByWidth(unsigned BitWidth,
return NoInt;
}
-TargetInfo::RealType TargetInfo::getRealTypeByWidth(unsigned BitWidth) const {
+TargetInfo::RealType TargetInfo::getRealTypeByWidth(unsigned BitWidth,
+ bool ExplicitIEEE) const {
if (getFloatWidth() == BitWidth)
return Float;
if (getDoubleWidth() == BitWidth)
@@ -274,6 +280,10 @@ TargetInfo::RealType TargetInfo::getRealTypeByWidth(unsigned BitWidth) const {
return LongDouble;
break;
case 128:
+ // The caller explicitly asked for an IEEE compliant type but we still
+ // have to check if the target supports it.
+ if (ExplicitIEEE)
+ return hasFloat128Type() ? Float128 : NoFloat;
if (&getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble() ||
&getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
return LongDouble;
@@ -379,6 +389,20 @@ void TargetInfo::adjust(LangOptions &Opts) {
LongDoubleFormat = &llvm::APFloat::IEEEquad();
}
+ if (Opts.DoubleSize) {
+ if (Opts.DoubleSize == 32) {
+ DoubleWidth = 32;
+ LongDoubleWidth = 32;
+ DoubleFormat = &llvm::APFloat::IEEEsingle();
+ LongDoubleFormat = &llvm::APFloat::IEEEsingle();
+ } else if (Opts.DoubleSize == 64) {
+ DoubleWidth = 64;
+ LongDoubleWidth = 64;
+ DoubleFormat = &llvm::APFloat::IEEEdouble();
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+ }
+ }
+
if (Opts.LongDoubleSize) {
if (Opts.LongDoubleSize == DoubleWidth) {
LongDoubleWidth = DoubleWidth;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets.cpp b/contrib/llvm-project/clang/lib/Basic/Targets.cpp
index c063f8ca4472..6bbcafa27dfe 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets.cpp
@@ -33,6 +33,7 @@
#include "Targets/Sparc.h"
#include "Targets/SystemZ.h"
#include "Targets/TCE.h"
+#include "Targets/VE.h"
#include "Targets/WebAssembly.h"
#include "Targets/X86.h"
#include "Targets/XCore.h"
@@ -117,6 +118,9 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new XCoreTargetInfo(Triple, Opts);
case llvm::Triple::hexagon:
+ if (os == llvm::Triple::Linux &&
+ Triple.getEnvironment() == llvm::Triple::Musl)
+ return new LinuxTargetInfo<HexagonTargetInfo>(Triple, Opts);
return new HexagonTargetInfo(Triple, Opts);
case llvm::Triple::lanai:
@@ -477,6 +481,8 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new OpenBSDI386TargetInfo(Triple, Opts);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ case llvm::Triple::Fuchsia:
+ return new FuchsiaTargetInfo<X86_32TargetInfo>(Triple, Opts);
case llvm::Triple::KFreeBSD:
return new KFreeBSDTargetInfo<X86_32TargetInfo>(Triple, Opts);
case llvm::Triple::Minix:
@@ -608,6 +614,9 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new LinuxTargetInfo<RenderScript32TargetInfo>(Triple, Opts);
case llvm::Triple::renderscript64:
return new LinuxTargetInfo<RenderScript64TargetInfo>(Triple, Opts);
+
+ case llvm::Triple::ve:
+ return new LinuxTargetInfo<VETargetInfo>(Triple, Opts);
}
}
} // namespace targets
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
index cba3e3ada7ea..25c02cb888c1 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "AArch64.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/ArrayRef.h"
@@ -28,6 +29,10 @@ const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
{#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+#include "clang/Basic/BuiltinsSVE.def"
+
+#define BUILTIN(ID, TYPE, ATTRS) \
+ {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
#define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
{#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
@@ -65,6 +70,9 @@ AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ BFloat16Width = BFloat16Align = 16;
+ BFloat16Format = &llvm::APFloat::BFloat();
+
// Make __builtin_ms_va_list available.
HasBuiltinMSVaList = true;
@@ -117,15 +125,15 @@ bool AArch64TargetInfo::validateBranchProtection(StringRef Spec,
return false;
BPI.SignReturnAddr =
- llvm::StringSwitch<CodeGenOptions::SignReturnAddressScope>(PBP.Scope)
- .Case("non-leaf", CodeGenOptions::SignReturnAddressScope::NonLeaf)
- .Case("all", CodeGenOptions::SignReturnAddressScope::All)
- .Default(CodeGenOptions::SignReturnAddressScope::None);
+ llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
+ .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
+ .Case("all", LangOptions::SignReturnAddressScopeKind::All)
+ .Default(LangOptions::SignReturnAddressScopeKind::None);
if (PBP.Key == "a_key")
- BPI.SignKey = CodeGenOptions::SignReturnAddressKeyValue::AKey;
+ BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
else
- BPI.SignKey = CodeGenOptions::SignReturnAddressKeyValue::BKey;
+ BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
return true;
@@ -147,6 +155,7 @@ void AArch64TargetInfo::fillValidCPUList(
void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
MacroBuilder &Builder) const {
+ // FIXME: Armv8.1 makes __ARM_FEATURE_CRC32 mandatory. Handle it here.
Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
}
@@ -167,17 +176,26 @@ void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Also include the Armv8.3 defines
- // FIXME: Armv8.4 makes some extensions mandatory. Handle them here.
+ // FIXME: Armv8.4 makes __ARM_FEATURE_ATOMICS, defined in GCC, mandatory.
+ // Add and handle it here.
getTargetDefinesARMV83A(Opts, Builder);
}
void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Also include the Armv8.4 defines
- // FIXME: Armv8.5 makes some extensions mandatory. Handle them here.
getTargetDefinesARMV84A(Opts, Builder);
}
+void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Also include the Armv8.5 defines
+ // FIXME: Armv8.6 makes the following extensions mandatory:
+ // - __ARM_FEATURE_BF16
+ // - __ARM_FEATURE_MATMUL_INT8
+ // Handle them here.
+ getTargetDefinesARMV85A(Opts, Builder);
+}
void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
@@ -194,6 +212,13 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__LP64__");
}
+ std::string CodeModel = getTargetOpts().CodeModel;
+ if (CodeModel == "default")
+ CodeModel = "small";
+ for (char &c : CodeModel)
+ c = toupper(c);
+ Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
+
// ACLE predefines. Many can only have one possible value on v8 AArch64.
Builder.defineMacro("__ARM_ACLE", "200");
Builder.defineMacro("__ARM_ARCH", "8");
@@ -235,6 +260,24 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__ARM_NEON_FP", "0xE");
}
+ if (FPU & SveMode)
+ Builder.defineMacro("__ARM_FEATURE_SVE", "1");
+
+ if (HasSVE2)
+ Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
+
+ if (HasSVE2 && HasSVE2AES)
+ Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
+
+ if (HasSVE2 && HasSVE2BitPerm)
+ Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
+
+ if (HasSVE2 && HasSVE2SHA3)
+ Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
+
+ if (HasSVE2 && HasSVE2SM4)
+ Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
+
if (HasCRC)
Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
@@ -258,9 +301,53 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasTME)
Builder.defineMacro("__ARM_FEATURE_TME", "1");
+ if (HasMatMul)
+ Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
+
+ if (HasBFloat16) {
+ Builder.defineMacro("__ARM_FEATURE_BF16", "1");
+ Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
+ Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
+ Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
+ }
+
+ if ((FPU & SveMode) && HasBFloat16) {
+ Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
+ }
+
+ if ((FPU & SveMode) && HasMatmulFP64)
+ Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
+
+ if ((FPU & SveMode) && HasMatmulFP32)
+ Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
+
+ if ((FPU & SveMode) && HasMatMul)
+ Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
+
if ((FPU & NeonMode) && HasFP16FML)
Builder.defineMacro("__ARM_FEATURE_FP16FML", "1");
+ if (Opts.hasSignReturnAddress()) {
+ // Bitmask:
+ // 0: Protection using the A key
+ // 1: Protection using the B key
+ // 2: Protection including leaf functions
+ unsigned Value = 0;
+
+ if (Opts.isSignReturnAddressWithAKey())
+ Value |= (1 << 0);
+ else
+ Value |= (1 << 1);
+
+ if (Opts.isSignReturnAddressScopeAll())
+ Value |= (1 << 2);
+
+ Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
+ }
+
+ if (Opts.BranchTargetEnforcement)
+ Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
+
switch (ArchKind) {
default:
break;
@@ -279,6 +366,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
case llvm::AArch64::ArchKind::ARMV8_5A:
getTargetDefinesARMV85A(Opts, Builder);
break;
+ case llvm::AArch64::ArchKind::ARMV8_6A:
+ getTargetDefinesARMV86A(Opts, Builder);
+ break;
}
// All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
@@ -296,7 +386,11 @@ ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
return Feature == "aarch64" || Feature == "arm64" || Feature == "arm" ||
(Feature == "neon" && (FPU & NeonMode)) ||
- (Feature == "sve" && (FPU & SveMode));
+ ((Feature == "sve" || Feature == "sve2" || Feature == "sve2-bitperm" ||
+ Feature == "sve2-aes" || Feature == "sve2-sha3" ||
+ Feature == "sve2-sm4" || Feature == "f64mm" || Feature == "f32mm" ||
+ Feature == "i8mm" || Feature == "bf16") &&
+ (FPU & SveMode));
}
bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
@@ -310,13 +404,62 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasFP16FML = false;
HasMTE = false;
HasTME = false;
+ HasMatMul = false;
+ HasBFloat16 = false;
+ HasSVE2 = false;
+ HasSVE2AES = false;
+ HasSVE2SHA3 = false;
+ HasSVE2SM4 = false;
+ HasSVE2BitPerm = false;
+ HasMatmulFP64 = false;
+ HasMatmulFP32 = false;
+
ArchKind = llvm::AArch64::ArchKind::ARMV8A;
for (const auto &Feature : Features) {
if (Feature == "+neon")
FPU |= NeonMode;
- if (Feature == "+sve")
+ if (Feature == "+sve") {
FPU |= SveMode;
+ HasFullFP16 = 1;
+ }
+ if (Feature == "+sve2") {
+ FPU |= SveMode;
+ HasFullFP16 = 1;
+ HasSVE2 = 1;
+ }
+ if (Feature == "+sve2-aes") {
+ FPU |= SveMode;
+ HasFullFP16 = 1;
+ HasSVE2 = 1;
+ HasSVE2AES = 1;
+ }
+ if (Feature == "+sve2-sha3") {
+ FPU |= SveMode;
+ HasFullFP16 = 1;
+ HasSVE2 = 1;
+ HasSVE2SHA3 = 1;
+ }
+ if (Feature == "+sve2-sm4") {
+ FPU |= SveMode;
+ HasFullFP16 = 1;
+ HasSVE2 = 1;
+ HasSVE2SM4 = 1;
+ }
+ if (Feature == "+sve2-bitperm") {
+ FPU |= SveMode;
+ HasFullFP16 = 1;
+ HasSVE2 = 1;
+ HasSVE2BitPerm = 1;
+ }
+ if (Feature == "+f32mm") {
+ FPU |= SveMode;
+ HasMatmulFP32 = true;
+ }
+ if (Feature == "+f64mm") {
+ FPU |= SveMode;
+ HasMatmulFP64 = true;
+ }
if (Feature == "+crc")
HasCRC = true;
if (Feature == "+crypto")
@@ -333,6 +476,8 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
if (Feature == "+v8.5a")
ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
+ if (Feature == "+v8.6a")
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
if (Feature == "+fullfp16")
HasFullFP16 = true;
if (Feature == "+dotprod")
@@ -343,6 +488,10 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasMTE = true;
if (Feature == "+tme")
HasTME = true;
+ if (Feature == "+i8mm")
+ HasMatMul = true;
+ if (Feature == "+bf16")
+ HasBFloat16 = true;
}
setDataLayout();
@@ -479,17 +628,29 @@ bool AArch64TargetInfo::validateAsmConstraint(
Info.setAllowsRegister();
return true;
case 'U':
+ if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
+ // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
+ Info.setAllowsRegister();
+ Name += 2;
+ return true;
+ }
// Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
// Utf: A memory address suitable for ldp/stp in TF mode.
// Usa: An absolute symbolic address.
// Ush: The high part (bits 32:12) of a pc-relative symbolic address.
- llvm_unreachable("FIXME: Unimplemented support for U* constraints.");
+
+ // Better to return an error saying that it's an unrecognised constraint
+ // even if this is a valid constraint in gcc.
+ return false;
case 'z': // Zero register, wzr or xzr
Info.setAllowsRegister();
return true;
case 'x': // Floating point and SIMD registers (V0-V15)
Info.setAllowsRegister();
return true;
+ case 'y': // SVE registers (V0-V7)
+ Info.setAllowsRegister();
+ return true;
}
return false;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
index 5e78237743c9..d1982897d84e 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
@@ -36,6 +36,14 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
bool HasFP16FML;
bool HasMTE;
bool HasTME;
+ bool HasMatMul;
+ bool HasSVE2;
+ bool HasSVE2AES;
+ bool HasSVE2SHA3;
+ bool HasSVE2SM4;
+ bool HasSVE2BitPerm;
+ bool HasMatmulFP64;
+ bool HasMatmulFP32;
llvm::AArch64::ArchKind ArchKind;
@@ -70,6 +78,8 @@ public:
MacroBuilder &Builder) const;
void getTargetDefinesARMV85A(const LangOptions &Opts,
MacroBuilder &Builder) const;
+ void getTargetDefinesARMV86A(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
@@ -87,6 +97,21 @@ public:
ArrayRef<const char *> getGCCRegNames() const override;
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
+
+ std::string convertConstraint(const char *&Constraint) const override {
+ std::string R;
+ switch (*Constraint) {
+ case 'U': // Three-character constraint; add "@3" hint for later parsing.
+ R = std::string("@3") + std::string(Constraint, 3);
+ Constraint += 2;
+ break;
+ default:
+ R = TargetInfo::convertConstraint(Constraint);
+ break;
+ }
+ return R;
+ }
+
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override;
bool
@@ -101,7 +126,10 @@ public:
int getEHDataRegisterNumber(unsigned RegNo) const override;
+ const char *getBFloat16Mangling() const override { return "u6__bf16"; };
bool hasInt128Type() const override;
+
+ bool hasExtIntType() const override { return true; }
};
class LLVM_LIBRARY_VISIBILITY AArch64leTargetInfo : public AArch64TargetInfo {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp
index 135ad3f97ce1..db7db8d36d03 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp
@@ -17,6 +17,7 @@
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/TargetBuiltins.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Frontend/OpenMP/OMPGridValues.h"
#include "llvm/IR/DataLayout.h"
using namespace clang;
@@ -124,7 +125,36 @@ const char *const AMDGPUTargetInfo::GCCRegNames[] = {
"s113", "s114", "s115", "s116", "s117", "s118", "s119", "s120", "s121",
"s122", "s123", "s124", "s125", "s126", "s127", "exec", "vcc", "scc",
"m0", "flat_scratch", "exec_lo", "exec_hi", "vcc_lo", "vcc_hi",
- "flat_scratch_lo", "flat_scratch_hi"
+ "flat_scratch_lo", "flat_scratch_hi",
+ "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "a8",
+ "a9", "a10", "a11", "a12", "a13", "a14", "a15", "a16", "a17",
+ "a18", "a19", "a20", "a21", "a22", "a23", "a24", "a25", "a26",
+ "a27", "a28", "a29", "a30", "a31", "a32", "a33", "a34", "a35",
+ "a36", "a37", "a38", "a39", "a40", "a41", "a42", "a43", "a44",
+ "a45", "a46", "a47", "a48", "a49", "a50", "a51", "a52", "a53",
+ "a54", "a55", "a56", "a57", "a58", "a59", "a60", "a61", "a62",
+ "a63", "a64", "a65", "a66", "a67", "a68", "a69", "a70", "a71",
+ "a72", "a73", "a74", "a75", "a76", "a77", "a78", "a79", "a80",
+ "a81", "a82", "a83", "a84", "a85", "a86", "a87", "a88", "a89",
+ "a90", "a91", "a92", "a93", "a94", "a95", "a96", "a97", "a98",
+ "a99", "a100", "a101", "a102", "a103", "a104", "a105", "a106", "a107",
+ "a108", "a109", "a110", "a111", "a112", "a113", "a114", "a115", "a116",
+ "a117", "a118", "a119", "a120", "a121", "a122", "a123", "a124", "a125",
+ "a126", "a127", "a128", "a129", "a130", "a131", "a132", "a133", "a134",
+ "a135", "a136", "a137", "a138", "a139", "a140", "a141", "a142", "a143",
+ "a144", "a145", "a146", "a147", "a148", "a149", "a150", "a151", "a152",
+ "a153", "a154", "a155", "a156", "a157", "a158", "a159", "a160", "a161",
+ "a162", "a163", "a164", "a165", "a166", "a167", "a168", "a169", "a170",
+ "a171", "a172", "a173", "a174", "a175", "a176", "a177", "a178", "a179",
+ "a180", "a181", "a182", "a183", "a184", "a185", "a186", "a187", "a188",
+ "a189", "a190", "a191", "a192", "a193", "a194", "a195", "a196", "a197",
+ "a198", "a199", "a200", "a201", "a202", "a203", "a204", "a205", "a206",
+ "a207", "a208", "a209", "a210", "a211", "a212", "a213", "a214", "a215",
+ "a216", "a217", "a218", "a219", "a220", "a221", "a222", "a223", "a224",
+ "a225", "a226", "a227", "a228", "a229", "a230", "a231", "a232", "a233",
+ "a234", "a235", "a236", "a237", "a238", "a239", "a240", "a241", "a242",
+ "a243", "a244", "a245", "a246", "a247", "a248", "a249", "a250", "a251",
+ "a252", "a253", "a254", "a255"
};
ArrayRef<const char *> AMDGPUTargetInfo::getGCCRegNames() const {
@@ -140,6 +170,22 @@ bool AMDGPUTargetInfo::initFeatureMap(
// XXX - What does the member GPU mean if device name string passed here?
if (isAMDGCN(getTriple())) {
switch (llvm::AMDGPU::parseArchAMDGCN(CPU)) {
+ case GK_GFX1030:
+ Features["ci-insts"] = true;
+ Features["dot1-insts"] = true;
+ Features["dot2-insts"] = true;
+ Features["dot5-insts"] = true;
+ Features["dot6-insts"] = true;
+ Features["dl-insts"] = true;
+ Features["flat-address-space"] = true;
+ Features["16-bit-insts"] = true;
+ Features["dpp"] = true;
+ Features["gfx8-insts"] = true;
+ Features["gfx9-insts"] = true;
+ Features["gfx10-insts"] = true;
+ Features["gfx10-3-insts"] = true;
+ Features["s-memrealtime"] = true;
+ break;
case GK_GFX1012:
case GK_GFX1011:
Features["dot1-insts"] = true;
@@ -163,6 +209,7 @@ bool AMDGPUTargetInfo::initFeatureMap(
Features["dot4-insts"] = true;
Features["dot5-insts"] = true;
Features["dot6-insts"] = true;
+ Features["mai-insts"] = true;
LLVM_FALLTHROUGH;
case GK_GFX906:
Features["dl-insts"] = true;
@@ -232,27 +279,6 @@ bool AMDGPUTargetInfo::initFeatureMap(
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeatureVec);
}
-void AMDGPUTargetInfo::adjustTargetOptions(const CodeGenOptions &CGOpts,
- TargetOptions &TargetOpts) const {
- bool hasFP32Denormals = false;
- bool hasFP64Denormals = false;
-
- for (auto &I : TargetOpts.FeaturesAsWritten) {
- if (I == "+fp32-denormals" || I == "-fp32-denormals")
- hasFP32Denormals = true;
- if (I == "+fp64-fp16-denormals" || I == "-fp64-fp16-denormals")
- hasFP64Denormals = true;
- }
- if (!hasFP32Denormals)
- TargetOpts.Features.push_back(
- (Twine(hasFastFMAF() && hasFullRateDenormalsF32() && !CGOpts.FlushDenorm
- ? '+' : '-') + Twine("fp32-denormals"))
- .str());
- // Always do not flush fp64 or fp16 denorms.
- if (!hasFP64Denormals && hasFP64())
- TargetOpts.Features.push_back("+fp64-fp16-denormals");
-}
-
void AMDGPUTargetInfo::fillValidCPUList(
SmallVectorImpl<StringRef> &Values) const {
if (isAMDGCN(getTriple()))
@@ -277,6 +303,7 @@ AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
resetDataLayout(isAMDGCN(getTriple()) ? DataLayoutStringAMDGCN
: DataLayoutStringR600);
assert(DataLayout->getAllocaAddrSpace() == Private);
+ GridValues = llvm::omp::AMDGPUGpuGridValues;
setAddressSpaceMap(Triple.getOS() == llvm::Triple::Mesa3D ||
!isAMDGCN(Triple));
@@ -354,4 +381,17 @@ void AMDGPUTargetInfo::setAuxTarget(const TargetInfo *Aux) {
copyAuxTarget(Aux);
LongDoubleFormat = SaveLongDoubleFormat;
Float128Format = SaveFloat128Format;
+ // For certain builtin types support on the host target, claim they are
+ // support to pass the compilation of the host code during the device-side
+ // compilation.
+ // FIXME: As the side effect, we also accept `__float128` uses in the device
+ // code. To rejct these builtin types supported in the host target but not in
+ // the device target, one approach would support `device_builtin` attribute
+ // so that we could tell the device builtin types from the host ones. The
+ // also solves the different representations of the same builtin type, such
+ // as `size_t` in the MSVC environment.
+ if (Aux->hasFloat128Type()) {
+ HasFloat128 = true;
+ Float128Format = DoubleFormat;
+ }
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
index 456cb2ebb8b5..d0394492cad6 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
@@ -114,11 +114,14 @@ public:
/// Accepted register names: (n, m is unsigned integer, n < m)
/// v
/// s
+ /// a
/// {vn}, {v[n]}
/// {sn}, {s[n]}
+ /// {an}, {a[n]}
/// {S} , where S is a special register name
////{v[n:m]}
/// {s[n:m]}
+ /// {a[n:m]}
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override {
static const ::llvm::StringSet<> SpecialRegs({
@@ -127,7 +130,30 @@ public:
"exec_hi", "tma_lo", "tma_hi", "tba_lo", "tba_hi",
});
+ switch (*Name) {
+ case 'I':
+ Info.setRequiresImmediate(-16, 64);
+ return true;
+ case 'J':
+ Info.setRequiresImmediate(-32768, 32767);
+ return true;
+ case 'A':
+ case 'B':
+ case 'C':
+ Info.setRequiresImmediate();
+ return true;
+ default:
+ break;
+ }
+
StringRef S(Name);
+
+ if (S == "DA" || S == "DB") {
+ Name++;
+ Info.setRequiresImmediate();
+ return true;
+ }
+
bool HasLeftParen = false;
if (S.front() == '{') {
HasLeftParen = true;
@@ -135,7 +161,7 @@ public:
}
if (S.empty())
return false;
- if (S.front() != 'v' && S.front() != 's') {
+ if (S.front() != 'v' && S.front() != 's' && S.front() != 'a') {
if (!HasLeftParen)
return false;
auto E = S.find('}');
@@ -153,7 +179,7 @@ public:
if (!HasLeftParen) {
if (!S.empty())
return false;
- // Found s or v.
+ // Found s, v or a.
Info.setAllowsRegister();
Name = S.data() - 1;
return true;
@@ -184,7 +210,8 @@ public:
S = S.drop_front();
if (!S.empty())
return false;
- // Found {vn}, {sn}, {v[n]}, {s[n]}, {v[n:m]}, or {s[n:m]}.
+ // Found {vn}, {sn}, {an}, {v[n]}, {s[n]}, {a[n]}, {v[n:m]}, {s[n:m]}
+ // or {a[n:m]}.
Info.setAllowsRegister();
Name = S.data() - 1;
return true;
@@ -194,6 +221,12 @@ public:
// the constraint. In practice, it won't be changed unless the
// constraint is longer than one character.
std::string convertConstraint(const char *&Constraint) const override {
+
+ StringRef S(Constraint);
+ if (S == "DA" || S == "DB") {
+ return std::string("^") + std::string(Constraint++, 2);
+ }
+
const char *Begin = Constraint;
TargetInfo::ConstraintInfo Info("", "");
if (validateAsmConstraint(Constraint, Info))
@@ -208,11 +241,10 @@ public:
StringRef CPU,
const std::vector<std::string> &FeatureVec) const override;
- void adjustTargetOptions(const CodeGenOptions &CGOpts,
- TargetOptions &TargetOpts) const override;
-
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
+ bool useFP16ConversionIntrinsics() const override { return false; }
+
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
@@ -263,6 +295,7 @@ public:
Opts.support("cl_khr_int64_base_atomics");
Opts.support("cl_khr_int64_extended_atomics");
Opts.support("cl_khr_mipmap_image");
+ Opts.support("cl_khr_mipmap_image_writes");
Opts.support("cl_khr_subgroups");
Opts.support("cl_khr_3d_image_writes");
Opts.support("cl_amd_media_ops");
@@ -348,10 +381,14 @@ public:
// address space has value 0 but in private and local address space has
// value ~0.
uint64_t getNullPointerValue(LangAS AS) const override {
- return AS == LangAS::opencl_local ? ~0 : 0;
+ // FIXME: Also should handle region.
+ return (AS == LangAS::opencl_local || AS == LangAS::opencl_private)
+ ? ~0 : 0;
}
void setAuxTarget(const TargetInfo *Aux) override;
+
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARC.h b/contrib/llvm-project/clang/lib/Basic/Targets/ARC.h
index c43a39984edb..b314c42be1e9 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARC.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARC.h
@@ -65,6 +65,8 @@ public:
TargetInfo::ConstraintInfo &Info) const override {
return false;
}
+
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
index be088e81cffe..21cfe0107bbb 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
@@ -25,6 +25,9 @@ void ARMTargetInfo::setABIAAPCS() {
IsAAPCS = true;
DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 64;
+ BFloat16Width = BFloat16Align = 16;
+ BFloat16Format = &llvm::APFloat::BFloat();
+
const llvm::Triple &T = getTriple();
bool IsNetBSD = T.isOSNetBSD();
@@ -74,6 +77,8 @@ void ARMTargetInfo::setABIAPCS(bool IsAAPCS16) {
DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 64;
else
DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 32;
+ BFloat16Width = BFloat16Align = 16;
+ BFloat16Format = &llvm::APFloat::BFloat();
WCharType = SignedInt;
@@ -107,7 +112,7 @@ void ARMTargetInfo::setArchInfo() {
StringRef ArchName = getTriple().getArchName();
ArchISA = llvm::ARM::parseArchISA(ArchName);
- CPU = llvm::ARM::getDefaultCPU(ArchName);
+ CPU = std::string(llvm::ARM::getDefaultCPU(ArchName));
llvm::ARM::ArchKind AK = llvm::ARM::parseArch(ArchName);
if (AK != llvm::ARM::ArchKind::INVALID)
ArchKind = AK;
@@ -154,6 +159,8 @@ bool ARMTargetInfo::hasMVEFloat() const {
return hasMVE() && (MVE & MVE_FP);
}
+bool ARMTargetInfo::hasCDE() const { return getARMCDECoprocMask() != 0; }
+
bool ARMTargetInfo::isThumb() const {
return ArchISA == llvm::ARM::ISAKind::THUMB;
}
@@ -199,6 +206,8 @@ StringRef ARMTargetInfo::getCPUAttr() const {
return "8_4A";
case llvm::ARM::ArchKind::ARMV8_5A:
return "8_5A";
+ case llvm::ARM::ArchKind::ARMV8_6A:
+ return "8_6A";
case llvm::ARM::ArchKind::ARMV8MBaseline:
return "8M_BASE";
case llvm::ARM::ArchKind::ARMV8MMainline:
@@ -310,7 +319,7 @@ ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
// Maximum alignment for ARM NEON data types should be 64-bits (AAPCS)
// as well the default alignment
- if (IsAAPCS && (Triple.getEnvironment() != llvm::Triple::Android))
+ if (IsAAPCS && !Triple.isAndroid())
DefaultAlignForAttributeAligned = MaxVectorAlign = 64;
// Do force alignment of members that follow zero length bitfields. If
@@ -372,7 +381,7 @@ bool ARMTargetInfo::initFeatureMap(
llvm::ARM::getFPUFeatures(FPUKind, TargetFeatures);
// get default Extension features
- unsigned Extensions = llvm::ARM::getDefaultExtensions(CPU, Arch);
+ uint64_t Extensions = llvm::ARM::getDefaultExtensions(CPU, Arch);
llvm::ARM::getExtensionFeatures(Extensions, TargetFeatures);
for (auto Feature : TargetFeatures)
@@ -421,7 +430,10 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
// Note that SoftFloatABI is initialized in our constructor.
HWDiv = 0;
DotProd = 0;
+ HasMatMul = 0;
HasFloat16 = true;
+ ARMCDECoprocMask = 0;
+ HasBFloat16 = false;
// This does not diagnose illegal cases like having both
// "+vfpv2" and "+vfpv3" or having "+neon" and "-fp64".
@@ -480,14 +492,20 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
} else if (Feature == "+dotprod") {
DotProd = true;
} else if (Feature == "+mve") {
- DSP = 1;
MVE |= MVE_INT;
} else if (Feature == "+mve.fp") {
- DSP = 1;
HasLegalHalfType = true;
FPU |= FPARMV8;
MVE |= MVE_INT | MVE_FP;
HW_FP |= HW_FP_SP | HW_FP_HP;
+ } else if (Feature == "+i8mm") {
+ HasMatMul = 1;
+ } else if (Feature.size() == strlen("+cdecp0") && Feature >= "+cdecp0" &&
+ Feature <= "+cdecp7") {
+ unsigned Coproc = Feature.back() - '0';
+ ARMCDECoprocMask |= (1U << Coproc);
+ } else if (Feature == "+bf16") {
+ HasBFloat16 = true;
}
}
@@ -537,6 +555,10 @@ bool ARMTargetInfo::hasFeature(StringRef Feature) const {
.Default(false);
}
+bool ARMTargetInfo::hasBFloat16Type() const {
+ return HasBFloat16 && !SoftFloat;
+}
+
bool ARMTargetInfo::isValidCPUName(StringRef Name) const {
return Name == "generic" ||
llvm::ARM::parseCPUArch(Name) != llvm::ARM::ArchKind::INVALID;
@@ -760,6 +782,12 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__ARM_FEATURE_MVE", hasMVEFloat() ? "3" : "1");
}
+ if (hasCDE()) {
+ Builder.defineMacro("__ARM_FEATURE_CDE", "1");
+ Builder.defineMacro("__ARM_FEATURE_CDE_COPROC",
+ "0x" + Twine::utohexstr(getARMCDECoprocMask()));
+ }
+
Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
Twine(Opts.WCharSize ? Opts.WCharSize : 4));
@@ -807,6 +835,15 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
if (DotProd)
Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
+ if (HasMatMul)
+ Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
+
+ if (HasBFloat16) {
+ Builder.defineMacro("__ARM_FEATURE_BF16", "1");
+ Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
+ Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
+ }
+
switch (ArchKind) {
default:
break;
@@ -819,6 +856,7 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
case llvm::ARM::ArchKind::ARMV8_3A:
case llvm::ARM::ArchKind::ARMV8_4A:
case llvm::ARM::ArchKind::ARMV8_5A:
+ case llvm::ARM::ArchKind::ARMV8_6A:
getTargetDefinesARMV83A(Opts, Builder);
break;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
index 9696a4404589..1e80f74d0766 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
@@ -75,6 +75,7 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
unsigned DSP : 1;
unsigned Unaligned : 1;
unsigned DotProd : 1;
+ unsigned HasMatMul : 1;
enum {
LDREX_B = (1 << 0), /// byte (8-bit)
@@ -108,6 +109,7 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
bool supportsThumb2() const;
bool hasMVE() const;
bool hasMVEFloat() const;
+ bool hasCDE() const;
StringRef getCPUAttr() const;
StringRef getCPUProfile() const;
@@ -135,6 +137,8 @@ public:
bool hasFeature(StringRef Feature) const override;
+ bool hasBFloat16Type() const override;
+
bool isValidCPUName(StringRef Name) const override;
void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
@@ -180,6 +184,10 @@ public:
int getEHDataRegisterNumber(unsigned RegNo) const override;
bool hasSjLjLowering() const override;
+
+ bool hasExtIntType() const override { return true; }
+
+ const char *getBFloat16Mangling() const override { return "u6__bf16"; };
};
class LLVM_LIBRARY_VISIBILITY ARMleTargetInfo : public ARMTargetInfo {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp
index d865676700b5..bb215b4114ac 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp
@@ -300,6 +300,7 @@ void AVRTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("AVR");
Builder.defineMacro("__AVR");
Builder.defineMacro("__AVR__");
+ Builder.defineMacro("__ELF__");
if (!this->CPU.empty()) {
auto It = llvm::find_if(
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/BPF.h b/contrib/llvm-project/clang/lib/Basic/Targets/BPF.h
index b2f1831e960e..43e55dfbfb2b 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/BPF.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/BPF.h
@@ -35,9 +35,9 @@ public:
Int64Type = SignedLong;
RegParmMax = 5;
if (Triple.getArch() == llvm::Triple::bpfeb) {
- resetDataLayout("E-m:e-p:64:64-i64:64-n32:64-S128");
+ resetDataLayout("E-m:e-p:64:64-i64:64-i128:128-n32:64-S128");
} else {
- resetDataLayout("e-m:e-p:64:64-i64:64-n32:64-S128");
+ resetDataLayout("e-m:e-p:64:64-i64:64-i128:128-n32:64-S128");
}
MaxAtomicPromoteWidth = 64;
MaxAtomicInlineWidth = 64;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp
index fcb94b93d69d..205601c359d0 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp
@@ -24,6 +24,11 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__qdsp6__", "1");
Builder.defineMacro("__hexagon__", "1");
+ Builder.defineMacro("__ELF__");
+
+ // The macro __HVXDBL__ is deprecated.
+ bool DefineHvxDbl = false;
+
if (CPU == "hexagonv5") {
Builder.defineMacro("__HEXAGON_V5__");
Builder.defineMacro("__HEXAGON_ARCH__", "5");
@@ -37,19 +42,29 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__QDSP6_V55__");
Builder.defineMacro("__QDSP6_ARCH__", "55");
} else if (CPU == "hexagonv60") {
+ DefineHvxDbl = true;
Builder.defineMacro("__HEXAGON_V60__");
Builder.defineMacro("__HEXAGON_ARCH__", "60");
Builder.defineMacro("__QDSP6_V60__");
Builder.defineMacro("__QDSP6_ARCH__", "60");
} else if (CPU == "hexagonv62") {
+ DefineHvxDbl = true;
Builder.defineMacro("__HEXAGON_V62__");
Builder.defineMacro("__HEXAGON_ARCH__", "62");
} else if (CPU == "hexagonv65") {
+ DefineHvxDbl = true;
Builder.defineMacro("__HEXAGON_V65__");
Builder.defineMacro("__HEXAGON_ARCH__", "65");
} else if (CPU == "hexagonv66") {
+ DefineHvxDbl = true;
Builder.defineMacro("__HEXAGON_V66__");
Builder.defineMacro("__HEXAGON_ARCH__", "66");
+ } else if (CPU == "hexagonv67") {
+ Builder.defineMacro("__HEXAGON_V67__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "67");
+ } else if (CPU == "hexagonv67t") {
+ Builder.defineMacro("__HEXAGON_V67T__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "67");
}
if (hasFeature("hvx-length64b")) {
@@ -62,14 +77,29 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__HVX__");
Builder.defineMacro("__HVX_ARCH__", HVXVersion);
Builder.defineMacro("__HVX_LENGTH__", "128");
- // FIXME: This macro is deprecated.
- Builder.defineMacro("__HVXDBL__");
+ if (DefineHvxDbl)
+ Builder.defineMacro("__HVXDBL__");
+ }
+
+ if (hasFeature("audio")) {
+ Builder.defineMacro("__HEXAGON_AUDIO__");
}
+
+ std::string NumPhySlots = isTinyCore() ? "3" : "4";
+ Builder.defineMacro("__HEXAGON_PHYSICAL_SLOTS__", NumPhySlots);
}
bool HexagonTargetInfo::initFeatureMap(
llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
const std::vector<std::string> &FeaturesVec) const {
+ if (isTinyCore())
+ Features["audio"] = true;
+
+ StringRef CPUFeature = CPU;
+ CPUFeature.consume_front("hexagon");
+ CPUFeature.consume_back("t");
+ Features[CPUFeature] = true;
+
Features["long-calls"] = false;
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
@@ -91,6 +121,8 @@ bool HexagonTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
UseLongCalls = true;
else if (F == "-long-calls")
UseLongCalls = false;
+ else if (F == "+audio")
+ HasAudio = true;
}
return true;
}
@@ -125,6 +157,8 @@ const Builtin::Info HexagonTargetInfo::BuiltinInfo[] = {
{#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
{#ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE},
#include "clang/Basic/BuiltinsHexagon.def"
};
@@ -139,6 +173,7 @@ bool HexagonTargetInfo::hasFeature(StringRef Feature) const {
.Case("hvx-length64b", HasHVX64B)
.Case("hvx-length128b", HasHVX128B)
.Case("long-calls", UseLongCalls)
+ .Case("audio", HasAudio)
.Default(false);
}
@@ -148,9 +183,10 @@ struct CPUSuffix {
};
static constexpr CPUSuffix Suffixes[] = {
- {{"hexagonv5"}, {"5"}}, {{"hexagonv55"}, {"55"}},
- {{"hexagonv60"}, {"60"}}, {{"hexagonv62"}, {"62"}},
- {{"hexagonv65"}, {"65"}}, {{"hexagonv66"}, {"66"}},
+ {{"hexagonv5"}, {"5"}}, {{"hexagonv55"}, {"55"}},
+ {{"hexagonv60"}, {"60"}}, {{"hexagonv62"}, {"62"}},
+ {{"hexagonv65"}, {"65"}}, {{"hexagonv66"}, {"66"}},
+ {{"hexagonv67"}, {"67"}}, {{"hexagonv67t"}, {"67t"}},
};
const char *HexagonTargetInfo::getHexagonCPUSuffix(StringRef Name) {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h
index 25a78c181580..d6c7da5f1e40 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h
@@ -32,6 +32,7 @@ class LLVM_LIBRARY_VISIBILITY HexagonTargetInfo : public TargetInfo {
bool HasHVX = false;
bool HasHVX64B = false;
bool HasHVX128B = false;
+ bool HasAudio = false;
bool UseLongCalls = false;
public:
@@ -56,6 +57,13 @@ public:
LargeArrayAlign = 64;
UseBitFieldTypeAlignment = true;
ZeroLengthBitfieldBoundary = 32;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+
+ // These are the default values anyway, but explicitly make sure
+ // that the size of the boolean type is 8 bits. Bool vectors are used
+ // for modeling predicate registers in HVX, and the bool -> byte
+ // correspondence matches the HVX architecture.
+ BoolWidth = BoolAlign = 8;
}
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
@@ -96,6 +104,8 @@ public:
DiagnosticsEngine &Diags) override;
BuiltinVaListKind getBuiltinVaListKind() const override {
+ if (getTriple().isMusl())
+ return TargetInfo::HexagonBuiltinVaList;
return TargetInfo::CharPtrBuiltinVaList;
}
@@ -123,6 +133,13 @@ public:
int getEHDataRegisterNumber(unsigned RegNo) const override {
return RegNo < 2 ? RegNo : -1;
}
+
+ bool isTinyCore() const {
+ // We can write more stricter checks later.
+ return CPU.find('t') != std::string::npos;
+ }
+
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h b/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h
index e119606384c7..9af5427b81c4 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h
@@ -86,6 +86,8 @@ public:
}
const char *getClobbers() const override { return ""; }
+
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.h b/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.h
index 620f12d2b8e3..9d42e4d4bb18 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.h
@@ -64,8 +64,14 @@ public:
ArrayRef<const char *> getGCCRegNames() const override;
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
- // No aliases.
- return None;
+ // Make r0 - r3 be recognized by llc (f.e., in clobber list)
+ static const TargetInfo::GCCRegAlias GCCRegAliases[] = {
+ {{"r0"}, "pc"},
+ {{"r1"}, "sp"},
+ {{"r2"}, "sr"},
+ {{"r3"}, "cg"},
+ };
+ return llvm::makeArrayRef(GCCRegAliases);
}
bool validateAsmConstraint(const char *&Name,
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h b/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h
index 224ec0783edf..b475c03889a1 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h
@@ -406,6 +406,7 @@ public:
unsigned getUnwindWordWidth() const override;
bool validateTarget(DiagnosticsEngine &Diags) const override;
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
index f69e9d84c701..18c3c8370331 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
@@ -16,6 +16,7 @@
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/TargetBuiltins.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Frontend/OpenMP/OMPGridValues.h"
using namespace clang;
using namespace clang::targets;
@@ -44,6 +45,8 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
if (!Feature.startswith("+ptx"))
continue;
PTXVersion = llvm::StringSwitch<unsigned>(Feature)
+ .Case("+ptx70", 70)
+ .Case("+ptx65", 65)
.Case("+ptx64", 64)
.Case("+ptx63", 63)
.Case("+ptx61", 61)
@@ -60,6 +63,7 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
TLSSupported = false;
VLASupported = false;
AddrSpaceMap = &NVPTXAddrSpaceMap;
+ GridValues = llvm::omp::NVPTXGpuGridValues;
UseAddrSpaceMapMangling = true;
// Define available target features
@@ -196,6 +200,7 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
case CudaArch::GFX1010:
case CudaArch::GFX1011:
case CudaArch::GFX1012:
+ case CudaArch::GFX1030:
case CudaArch::LAST:
break;
case CudaArch::UNKNOWN:
@@ -231,6 +236,8 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
return "720";
case CudaArch::SM_75:
return "750";
+ case CudaArch::SM_80:
+ return "800";
}
llvm_unreachable("unhandled CudaArch");
}();
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h
index 63780789c474..f57a0f18efa3 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h
@@ -160,6 +160,8 @@ public:
return HostTarget->checkCallingConvention(CC);
return CCCR_Warning;
}
+
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp
index d4ffffc64ba8..15e475a31d64 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp
@@ -25,7 +25,7 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
Builder.defineMacro("__APPLE_CC__", "6000");
Builder.defineMacro("__APPLE__");
Builder.defineMacro("__STDC_NO_THREADS__");
- Builder.defineMacro("OBJC_NEW_PROPERTIES");
+
// AddressSanitizer doesn't play well with source fortification, which is on
// by default on Darwin.
if (Opts.Sanitize.has(SanitizerKind::Address))
@@ -179,7 +179,7 @@ static void addVisualCDefines(const LangOptions &Opts, MacroBuilder &Builder) {
Builder.defineMacro("_HAS_CHAR16_T_LANGUAGE_SUPPORT", Twine(1));
if (Opts.isCompatibleWithMSVC(LangOptions::MSVC2015)) {
- if (Opts.CPlusPlus2a)
+ if (Opts.CPlusPlus20)
Builder.defineMacro("_MSVC_LANG", "201705L");
else if (Opts.CPlusPlus17)
Builder.defineMacro("_MSVC_LANG", "201703L");
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
index 70fac030bc5d..cfa362bef1b1 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
@@ -87,7 +87,7 @@ protected:
public:
DarwinTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OSTargetInfo<Target>(Triple, Opts) {
- // By default, no TLS, and we whitelist permitted architecture/OS
+ // By default, no TLS, and we list permitted architecture/OS
// combinations.
this->TLSSupported = false;
@@ -706,6 +706,8 @@ protected:
public:
AIXTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OSTargetInfo<Target>(Triple, Opts) {
+ this->TheCXXABI.set(TargetCXXABI::XL);
+
if (this->PointerWidth == 64) {
this->WCharType = this->UnsignedInt;
} else {
@@ -819,7 +821,7 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyOSTargetInfo
: public OSTargetInfo<Target> {
protected:
void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
- MacroBuilder &Builder) const {
+ MacroBuilder &Builder) const override {
// A common platform macro.
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h b/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h
index ab4abf9fc567..d5bfc369583f 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h
@@ -68,6 +68,8 @@ public:
}
const char *getClobbers() const override { return ""; }
+
+ bool hasExtIntType() const override { return true; }
};
// We attempt to use PNaCl (le32) frontend and Mips32EL backend.
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
index 1877d4a5ef70..f0de2bf070ea 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
@@ -54,6 +54,10 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasFloat128 = true;
} else if (Feature == "+power9-vector") {
HasP9Vector = true;
+ } else if (Feature == "+power10-vector") {
+ HasP10Vector = true;
+ } else if (Feature == "+pcrelative-memops") {
+ HasPCRelativeMemops = true;
} else if (Feature == "+spe") {
HasSPE = true;
LongDoubleWidth = LongDoubleAlign = 64;
@@ -151,6 +155,8 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("_ARCH_PWR8");
if (ArchDefs & ArchDefinePwr9)
Builder.defineMacro("_ARCH_PWR9");
+ if (ArchDefs & ArchDefinePwr10)
+ Builder.defineMacro("_ARCH_PWR10");
if (ArchDefs & ArchDefineA2)
Builder.defineMacro("_ARCH_A2");
if (ArchDefs & ArchDefineA2q) {
@@ -189,6 +195,8 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__FLOAT128__");
if (HasP9Vector)
Builder.defineMacro("__POWER9_VECTOR__");
+ if (HasP10Vector)
+ Builder.defineMacro("__POWER10_VECTOR__");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
@@ -223,38 +231,32 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
// - direct-move
// - float128
// - power9-vector
+// - power10-vector
// then go ahead and error since the customer has expressed an incompatible
// set of options.
static bool ppcUserFeaturesCheck(DiagnosticsEngine &Diags,
const std::vector<std::string> &FeaturesVec) {
- if (llvm::find(FeaturesVec, "-vsx") != FeaturesVec.end()) {
- if (llvm::find(FeaturesVec, "+power8-vector") != FeaturesVec.end()) {
- Diags.Report(diag::err_opt_not_valid_with_opt) << "-mpower8-vector"
- << "-mno-vsx";
- return false;
- }
-
- if (llvm::find(FeaturesVec, "+direct-move") != FeaturesVec.end()) {
- Diags.Report(diag::err_opt_not_valid_with_opt) << "-mdirect-move"
- << "-mno-vsx";
- return false;
- }
+ // vsx was not explicitly turned off.
+ if (llvm::find(FeaturesVec, "-vsx") == FeaturesVec.end())
+ return true;
- if (llvm::find(FeaturesVec, "+float128") != FeaturesVec.end()) {
- Diags.Report(diag::err_opt_not_valid_with_opt) << "-mfloat128"
- << "-mno-vsx";
- return false;
+ auto FindVSXSubfeature = [&](StringRef Feature, StringRef Option) {
+ if (llvm::find(FeaturesVec, Feature) != FeaturesVec.end()) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << Option << "-mno-vsx";
+ return true;
}
+ return false;
+ };
- if (llvm::find(FeaturesVec, "+power9-vector") != FeaturesVec.end()) {
- Diags.Report(diag::err_opt_not_valid_with_opt) << "-mpower9-vector"
- << "-mno-vsx";
- return false;
- }
- }
+ bool Found = FindVSXSubfeature("+power8-vector", "-mpower8-vector");
+ Found |= FindVSXSubfeature("+direct-move", "-mdirect-move");
+ Found |= FindVSXSubfeature("+float128", "-mfloat128");
+ Found |= FindVSXSubfeature("+power9-vector", "-mpower9-vector");
+ Found |= FindVSXSubfeature("+power10-vector", "-mpower10-vector");
- return true;
+ // Return false if any vsx subfeatures was found.
+ return !Found;
}
bool PPCTargetInfo::initFeatureMap(
@@ -321,10 +323,17 @@ bool PPCTargetInfo::initFeatureMap(
.Case("e500", true)
.Default(false);
- // Future CPU should include all of the features of Power 9 as well as any
+ // Power10 includes all the same features as Power9 plus any features specific
+ // to the Power10 core.
+ if (CPU == "pwr10" || CPU == "power10") {
+ initFeatureMap(Features, Diags, "pwr9", FeaturesVec);
+ addP10SpecificFeatures(Features);
+ }
+
+ // Future CPU should include all of the features of Power 10 as well as any
// additional features (yet to be determined) specific to it.
if (CPU == "future") {
- initFeatureMap(Features, Diags, "pwr9", FeaturesVec);
+ initFeatureMap(Features, Diags, "pwr10", FeaturesVec);
addFutureSpecificFeatures(Features);
}
@@ -341,6 +350,15 @@ bool PPCTargetInfo::initFeatureMap(
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
+// Add any Power10 specific features.
+void PPCTargetInfo::addP10SpecificFeatures(
+ llvm::StringMap<bool> &Features) const {
+ Features["htm"] = false; // HTM was removed for P10.
+ Features["power10-vector"] = true;
+ Features["pcrelative-memops"] = true;
+ return;
+}
+
// Add features specific to the "Future" CPU.
void PPCTargetInfo::addFutureSpecificFeatures(
llvm::StringMap<bool> &Features) const {
@@ -361,6 +379,8 @@ bool PPCTargetInfo::hasFeature(StringRef Feature) const {
.Case("extdiv", HasExtDiv)
.Case("float128", HasFloat128)
.Case("power9-vector", HasP9Vector)
+ .Case("power10-vector", HasP10Vector)
+ .Case("pcrelative-memops", HasPCRelativeMemops)
.Case("spe", HasSPE)
.Default(false);
}
@@ -375,22 +395,34 @@ void PPCTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
.Case("direct-move", true)
.Case("power8-vector", true)
.Case("power9-vector", true)
+ .Case("power10-vector", true)
.Case("float128", true)
.Default(false);
if (FeatureHasVSX)
Features["vsx"] = Features["altivec"] = true;
if (Name == "power9-vector")
Features["power8-vector"] = true;
- Features[Name] = true;
+ else if (Name == "power10-vector")
+ Features["power8-vector"] = Features["power9-vector"] = true;
+ if (Name == "pcrel")
+ Features["pcrelative-memops"] = true;
+ else
+ Features[Name] = true;
} else {
// If we're disabling altivec or vsx go ahead and disable all of the vsx
// features.
if ((Name == "altivec") || (Name == "vsx"))
Features["vsx"] = Features["direct-move"] = Features["power8-vector"] =
- Features["float128"] = Features["power9-vector"] = false;
+ Features["float128"] = Features["power9-vector"] =
+ Features["power10-vector"] = false;
if (Name == "power8-vector")
- Features["power9-vector"] = false;
- Features[Name] = false;
+ Features["power9-vector"] = Features["power10-vector"] = false;
+ else if (Name == "power9-vector")
+ Features["power10-vector"] = false;
+ if (Name == "pcrel")
+ Features["pcrelative-memops"] = false;
+ else
+ Features[Name] = false;
}
}
@@ -471,18 +503,17 @@ ArrayRef<TargetInfo::AddlRegName> PPCTargetInfo::getGCCAddlRegNames() const {
}
static constexpr llvm::StringLiteral ValidCPUNames[] = {
- {"generic"}, {"440"}, {"450"}, {"601"}, {"602"},
- {"603"}, {"603e"}, {"603ev"}, {"604"}, {"604e"},
- {"620"}, {"630"}, {"g3"}, {"7400"}, {"g4"},
- {"7450"}, {"g4+"}, {"750"}, {"8548"}, {"970"},
- {"g5"}, {"a2"}, {"a2q"}, {"e500"}, {"e500mc"},
- {"e5500"}, {"power3"}, {"pwr3"}, {"power4"}, {"pwr4"},
- {"power5"}, {"pwr5"}, {"power5x"}, {"pwr5x"}, {"power6"},
- {"pwr6"}, {"power6x"}, {"pwr6x"}, {"power7"}, {"pwr7"},
- {"power8"}, {"pwr8"}, {"power9"}, {"pwr9"}, {"powerpc"},
- {"ppc"}, {"powerpc64"}, {"ppc64"}, {"powerpc64le"}, {"ppc64le"},
- {"future"}
-};
+ {"generic"}, {"440"}, {"450"}, {"601"}, {"602"},
+ {"603"}, {"603e"}, {"603ev"}, {"604"}, {"604e"},
+ {"620"}, {"630"}, {"g3"}, {"7400"}, {"g4"},
+ {"7450"}, {"g4+"}, {"750"}, {"8548"}, {"970"},
+ {"g5"}, {"a2"}, {"a2q"}, {"e500"}, {"e500mc"},
+ {"e5500"}, {"power3"}, {"pwr3"}, {"power4"}, {"pwr4"},
+ {"power5"}, {"pwr5"}, {"power5x"}, {"pwr5x"}, {"power6"},
+ {"pwr6"}, {"power6x"}, {"pwr6x"}, {"power7"}, {"pwr7"},
+ {"power8"}, {"pwr8"}, {"power9"}, {"pwr9"}, {"power10"},
+ {"pwr10"}, {"powerpc"}, {"ppc"}, {"powerpc64"}, {"ppc64"},
+ {"powerpc64le"}, {"ppc64le"}, {"future"}};
bool PPCTargetInfo::isValidCPUName(StringRef Name) const {
return llvm::find(ValidCPUNames, Name) != std::end(ValidCPUNames);
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
index ef5c2264a0b0..858059bacb86 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
@@ -43,13 +43,13 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
ArchDefinePwr7 = 1 << 11,
ArchDefinePwr8 = 1 << 12,
ArchDefinePwr9 = 1 << 13,
- ArchDefineFuture = 1 << 14,
- ArchDefineA2 = 1 << 15,
- ArchDefineA2q = 1 << 16,
- ArchDefineE500 = 1 << 17
+ ArchDefinePwr10 = 1 << 14,
+ ArchDefineFuture = 1 << 15,
+ ArchDefineA2 = 1 << 16,
+ ArchDefineA2q = 1 << 17,
+ ArchDefineE500 = 1 << 18
} ArchDefineTypes;
-
ArchDefineTypes ArchDefs = ArchDefineNone;
static const Builtin::Info BuiltinInfo[];
static const char *const GCCRegNames[];
@@ -69,6 +69,8 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
bool HasExtDiv = false;
bool HasP9Vector = false;
bool HasSPE = false;
+ bool HasP10Vector = false;
+ bool HasPCRelativeMemops = false;
protected:
std::string ABI;
@@ -119,20 +121,20 @@ public:
.Case("a2q", ArchDefineName | ArchDefineA2 | ArchDefineA2q)
.Cases("power3", "pwr3", ArchDefinePpcgr)
.Cases("power4", "pwr4",
- ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
.Cases("power5", "pwr5",
- ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
- ArchDefinePpcsq)
+ ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
.Cases("power5x", "pwr5x",
- ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
- ArchDefinePpcgr | ArchDefinePpcsq)
+ ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
.Cases("power6", "pwr6",
- ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
- ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
+ ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
.Cases("power6x", "pwr6x",
- ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x |
- ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
- ArchDefinePpcsq)
+ ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
.Cases("power7", "pwr7",
ArchDefinePwr7 | ArchDefinePwr6 | ArchDefinePwr5x |
ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
@@ -146,11 +148,16 @@ public:
ArchDefinePwr9 | ArchDefinePwr8 | ArchDefinePwr7 |
ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Cases("power10", "pwr10",
+ ArchDefinePwr10 | ArchDefinePwr9 | ArchDefinePwr8 |
+ ArchDefinePwr7 | ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
.Case("future",
- ArchDefineFuture | ArchDefinePwr9 | ArchDefinePwr8 |
- ArchDefinePwr7 | ArchDefinePwr6 | ArchDefinePwr5x |
- ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
- ArchDefinePpcsq)
+ ArchDefineFuture | ArchDefinePwr10 | ArchDefinePwr9 |
+ ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6 |
+ ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
.Cases("8548", "e500", ArchDefineE500)
.Default(ArchDefineNone);
}
@@ -171,6 +178,7 @@ public:
StringRef CPU,
const std::vector<std::string> &FeaturesVec) const override;
+ void addP10SpecificFeatures(llvm::StringMap<bool> &Features) const;
void addFutureSpecificFeatures(llvm::StringMap<bool> &Features) const;
bool handleTargetFeatures(std::vector<std::string> &Features,
@@ -333,13 +341,22 @@ public:
: "u9__ieee128";
}
const char *getFloat128Mangling() const override { return "u9__ieee128"; }
+
+ bool hasExtIntType() const override { return true; }
+
+ bool isSPRegName(StringRef RegName) const override {
+ return RegName.equals("r1") || RegName.equals("x1");
+ }
};
class LLVM_LIBRARY_VISIBILITY PPC32TargetInfo : public PPCTargetInfo {
public:
PPC32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: PPCTargetInfo(Triple, Opts) {
- resetDataLayout("E-m:e-p:32:32-i64:64-n32");
+ if (Triple.isOSAIX())
+ resetDataLayout("E-m:a-p:32:32-i64:64-n32");
+ else
+ resetDataLayout("E-m:e-p:32:32-i64:64-n32");
switch (getTriple().getOS()) {
case llvm::Triple::Linux:
@@ -385,7 +402,11 @@ public:
IntMaxType = SignedLong;
Int64Type = SignedLong;
- if ((Triple.getArch() == llvm::Triple::ppc64le)) {
+ if (Triple.isOSAIX()) {
+ // TODO: Set appropriate ABI for AIX platform.
+ resetDataLayout("E-m:a-i64:64-n32:64");
+ SuitableAlign = 64;
+ } else if ((Triple.getArch() == llvm::Triple::ppc64le)) {
resetDataLayout("e-m:e-i64:64-n32:64");
ABI = "elfv2";
} else {
@@ -393,9 +414,6 @@ public:
ABI = "elfv1";
}
- if (Triple.getOS() == llvm::Triple::AIX)
- SuitableAlign = 64;
-
if (Triple.isOSFreeBSD() || Triple.getOS() == llvm::Triple::AIX ||
Triple.isMusl()) {
LongDoubleWidth = LongDoubleAlign = 64;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
index ab8272c034fd..522776437cd2 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
@@ -125,6 +125,9 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasC)
Builder.defineMacro("__riscv_compressed");
+
+ if (HasB)
+ Builder.defineMacro("__riscv_bitmanip");
}
/// Return true if has this feature, need to sync with handleTargetFeatures.
@@ -139,6 +142,7 @@ bool RISCVTargetInfo::hasFeature(StringRef Feature) const {
.Case("f", HasF)
.Case("d", HasD)
.Case("c", HasC)
+ .Case("experimental-b", HasB)
.Default(false);
}
@@ -156,6 +160,8 @@ bool RISCVTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasD = true;
else if (Feature == "+c")
HasC = true;
+ else if (Feature == "+experimental-b")
+ HasB = true;
}
return true;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
index 9118494a87ab..73652b409e9c 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
@@ -30,11 +30,12 @@ protected:
bool HasF;
bool HasD;
bool HasC;
+ bool HasB;
public:
RISCVTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple), HasM(false), HasA(false), HasF(false),
- HasD(false), HasC(false) {
+ HasD(false), HasC(false), HasB(false) {
LongDoubleWidth = 128;
LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::IEEEquad();
@@ -75,6 +76,8 @@ public:
bool handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) override;
+
+ bool hasExtIntType() const override { return true; }
};
class LLVM_LIBRARY_VISIBILITY RISCV32TargetInfo : public RISCVTargetInfo {
public:
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.cpp
index a9b815d13bc1..9b7aab85314a 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.cpp
@@ -23,10 +23,12 @@ void SPIRTargetInfo::getTargetDefines(const LangOptions &Opts,
void SPIR32TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
+ SPIRTargetInfo::getTargetDefines(Opts, Builder);
DefineStd(Builder, "SPIR32", Opts);
}
void SPIR64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
+ SPIRTargetInfo::getTargetDefines(Opts, Builder);
DefineStd(Builder, "SPIR64", Opts);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
index 279d1866a428..f625d4980e29 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
@@ -100,6 +100,8 @@ public:
// for SPIR since it is a generic target.
getSupportedOpenCLOpts().supportAll();
}
+
+ bool hasExtIntType() const override { return true; }
};
class LLVM_LIBRARY_VISIBILITY SPIR32TargetInfo : public SPIRTargetInfo {
public:
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h b/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h
index 1f799565e99b..d24cf15d7cd6 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h
@@ -176,6 +176,7 @@ public:
MacroBuilder &Builder) const override;
bool hasSjLjLowering() const override { return true; }
+ bool hasExtIntType() const override { return true; }
};
// SPARCV8el is the 32-bit little-endian mode selected by Triple::sparcel.
@@ -227,6 +228,8 @@ public:
return false;
return getCPUGeneration(CPU) == CG_V9;
}
+
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
index e751806f4747..d7869e3754a8 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
@@ -29,11 +29,12 @@ class LLVM_LIBRARY_VISIBILITY SystemZTargetInfo : public TargetInfo {
int ISARevision;
bool HasTransactionalExecution;
bool HasVector;
+ bool SoftFloat;
public:
SystemZTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple), CPU("z10"), ISARevision(8),
- HasTransactionalExecution(false), HasVector(false) {
+ HasTransactionalExecution(false), HasVector(false), SoftFloat(false) {
IntMaxType = SignedLong;
Int64Type = SignedLong;
TLSSupported = true;
@@ -47,6 +48,7 @@ public:
MinGlobalAlign = 16;
resetDataLayout("E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64");
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ HasStrictFP = true;
}
void getTargetDefines(const LangOptions &Opts,
@@ -63,6 +65,10 @@ public:
ArrayRef<TargetInfo::AddlRegName> getGCCAddlRegNames() const override;
+ bool isSPRegName(StringRef RegName) const override {
+ return RegName.equals("r15");
+ }
+
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override;
@@ -109,12 +115,17 @@ public:
DiagnosticsEngine &Diags) override {
HasTransactionalExecution = false;
HasVector = false;
+ SoftFloat = false;
for (const auto &Feature : Features) {
if (Feature == "+transactional-execution")
HasTransactionalExecution = true;
else if (Feature == "+vector")
HasVector = true;
+ else if (Feature == "+soft-float")
+ SoftFloat = true;
}
+ HasVector &= !SoftFloat;
+
// If we use the vector ABI, vector types are 64-bit aligned.
if (HasVector) {
MaxVectorAlign = 64;
@@ -144,6 +155,8 @@ public:
}
const char *getLongDoubleMangling() const override { return "g"; }
+
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp
new file mode 100644
index 000000000000..22223654e8ad
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp
@@ -0,0 +1,39 @@
+//===--- VE.cpp - Implement VE target feature support ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements VE TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "VE.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/MacroBuilder.h"
+#include "clang/Basic/TargetBuiltins.h"
+
+using namespace clang;
+using namespace clang::targets;
+
+void VETargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("_LP64", "1");
+ Builder.defineMacro("unix", "1");
+ Builder.defineMacro("__unix__", "1");
+ Builder.defineMacro("__linux__", "1");
+ Builder.defineMacro("__ve", "1");
+ Builder.defineMacro("__ve__", "1");
+ Builder.defineMacro("__STDC_HOSTED__", "1");
+ Builder.defineMacro("__STDC__", "1");
+ Builder.defineMacro("__NEC__", "1");
+ // FIXME: define __FAST_MATH__ 1 if -ffast-math is enabled
+ // FIXME: define __OPTIMIZE__ n if -On is enabled
+ // FIXME: define __VECTOR__ n 1 if automatic vectorization is enabled
+}
+
+ArrayRef<Builtin::Info> VETargetInfo::getTargetBuiltins() const {
+ return ArrayRef<Builtin::Info>();
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/VE.h b/contrib/llvm-project/clang/lib/Basic/Targets/VE.h
new file mode 100644
index 000000000000..f863a0af0acb
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/VE.h
@@ -0,0 +1,170 @@
+//===--- VE.h - Declare VE target feature support ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares VE TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_VE_H
+#define LLVM_CLANG_LIB_BASIC_TARGETS_VE_H
+
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+namespace targets {
+
+class LLVM_LIBRARY_VISIBILITY VETargetInfo : public TargetInfo {
+ static const Builtin::Info BuiltinInfo[];
+
+public:
+ VETargetInfo(const llvm::Triple &Triple, const TargetOptions &)
+ : TargetInfo(Triple) {
+ NoAsmVariants = true;
+ LongDoubleWidth = 128;
+ LongDoubleAlign = 128;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ DoubleAlign = LongLongAlign = 64;
+ SuitableAlign = 64;
+ LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
+ SizeType = UnsignedLong;
+ PtrDiffType = SignedLong;
+ IntPtrType = SignedLong;
+ IntMaxType = SignedLong;
+ Int64Type = SignedLong;
+ RegParmMax = 8;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+
+ WCharType = UnsignedInt;
+ WIntType = UnsignedInt;
+ UseZeroLengthBitfieldAlignment = true;
+ resetDataLayout("e-m:e-i64:64-n32:64-S128");
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+
+ bool hasSjLjLowering() const override {
+ // TODO
+ return false;
+ }
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override;
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::VoidPtrBuiltinVaList;
+ }
+
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ switch (CC) {
+ default:
+ return CCCR_Warning;
+ case CC_C:
+ return CCCR_OK;
+ }
+ }
+
+ const char *getClobbers() const override { return ""; }
+
+ ArrayRef<const char *> getGCCRegNames() const override {
+ static const char *const GCCRegNames[] = {
+ // Regular registers
+ "sx0", "sx1", "sx2", "sx3", "sx4", "sx5", "sx6", "sx7",
+ "sx8", "sx9", "sx10", "sx11", "sx12", "sx13", "sx14", "sx15",
+ "sx16", "sx17", "sx18", "sx19", "sx20", "sx21", "sx22", "sx23",
+ "sx24", "sx25", "sx26", "sx27", "sx28", "sx29", "sx30", "sx31",
+ "sx32", "sx33", "sx34", "sx35", "sx36", "sx37", "sx38", "sx39",
+ "sx40", "sx41", "sx42", "sx43", "sx44", "sx45", "sx46", "sx47",
+ "sx48", "sx49", "sx50", "sx51", "sx52", "sx53", "sx54", "sx55",
+ "sx56", "sx57", "sx58", "sx59", "sx60", "sx61", "sx62", "sx63",
+ };
+ return llvm::makeArrayRef(GCCRegNames);
+ }
+
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ static const TargetInfo::GCCRegAlias GCCRegAliases[] = {
+ {{"s0"}, "sx0"},
+ {{"s1"}, "sx1"},
+ {{"s2"}, "sx2"},
+ {{"s3"}, "sx3"},
+ {{"s4"}, "sx4"},
+ {{"s5"}, "sx5"},
+ {{"s6"}, "sx6"},
+ {{"s7"}, "sx7"},
+ {{"s8", "sl"}, "sx8"},
+ {{"s9", "fp"}, "sx9"},
+ {{"s10", "lr"}, "sx10"},
+ {{"s11", "sp"}, "sx11"},
+ {{"s12", "outer"}, "sx12"},
+ {{"s13"}, "sx13"},
+ {{"s14", "tp"}, "sx14"},
+ {{"s15", "got"}, "sx15"},
+ {{"s16", "plt"}, "sx16"},
+ {{"s17", "info"}, "sx17"},
+ {{"s18"}, "sx18"},
+ {{"s19"}, "sx19"},
+ {{"s20"}, "sx20"},
+ {{"s21"}, "sx21"},
+ {{"s22"}, "sx22"},
+ {{"s23"}, "sx23"},
+ {{"s24"}, "sx24"},
+ {{"s25"}, "sx25"},
+ {{"s26"}, "sx26"},
+ {{"s27"}, "sx27"},
+ {{"s28"}, "sx28"},
+ {{"s29"}, "sx29"},
+ {{"s30"}, "sx30"},
+ {{"s31"}, "sx31"},
+ {{"s32"}, "sx32"},
+ {{"s33"}, "sx33"},
+ {{"s34"}, "sx34"},
+ {{"s35"}, "sx35"},
+ {{"s36"}, "sx36"},
+ {{"s37"}, "sx37"},
+ {{"s38"}, "sx38"},
+ {{"s39"}, "sx39"},
+ {{"s40"}, "sx40"},
+ {{"s41"}, "sx41"},
+ {{"s42"}, "sx42"},
+ {{"s43"}, "sx43"},
+ {{"s44"}, "sx44"},
+ {{"s45"}, "sx45"},
+ {{"s46"}, "sx46"},
+ {{"s47"}, "sx47"},
+ {{"s48"}, "sx48"},
+ {{"s49"}, "sx49"},
+ {{"s50"}, "sx50"},
+ {{"s51"}, "sx51"},
+ {{"s52"}, "sx52"},
+ {{"s53"}, "sx53"},
+ {{"s54"}, "sx54"},
+ {{"s55"}, "sx55"},
+ {{"s56"}, "sx56"},
+ {{"s57"}, "sx57"},
+ {{"s58"}, "sx58"},
+ {{"s59"}, "sx59"},
+ {{"s60"}, "sx60"},
+ {{"s61"}, "sx61"},
+ {{"s62"}, "sx62"},
+ {{"s63"}, "sx63"},
+ };
+ return llvm::makeArrayRef(GCCRegAliases);
+ }
+
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ return false;
+ }
+
+ bool allowsLargerPreferedTypeAlignment() const override { return false; }
+};
+} // namespace targets
+} // namespace clang
+#endif // LLVM_CLANG_LIB_BASIC_TARGETS_VE_H
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp
index b16442b99b62..6746768090f5 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp
@@ -33,6 +33,16 @@ const Builtin::Info WebAssemblyTargetInfo::BuiltinInfo[] = {
static constexpr llvm::StringLiteral ValidCPUNames[] = {
{"mvp"}, {"bleeding-edge"}, {"generic"}};
+StringRef WebAssemblyTargetInfo::getABI() const { return ABI; }
+
+bool WebAssemblyTargetInfo::setABI(const std::string &Name) {
+ if (Name != "mvp" && Name != "experimental-mv")
+ return false;
+
+ ABI = Name;
+ return true;
+}
+
bool WebAssemblyTargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
.Case("simd128", SIMDLevel >= SIMD128)
@@ -45,6 +55,7 @@ bool WebAssemblyTargetInfo::hasFeature(StringRef Feature) const {
.Case("mutable-globals", HasMutableGlobals)
.Case("multivalue", HasMultivalue)
.Case("tail-call", HasTailCall)
+ .Case("reference-types", HasReferenceTypes)
.Default(false);
}
@@ -80,6 +91,8 @@ void WebAssemblyTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__wasm_multivalue__");
if (HasTailCall)
Builder.defineMacro("__wasm_tail_call__");
+ if (HasReferenceTypes)
+ Builder.defineMacro("__wasm_reference_types__");
}
void WebAssemblyTargetInfo::setSIMDLevel(llvm::StringMap<bool> &Features,
@@ -102,8 +115,10 @@ bool WebAssemblyTargetInfo::initFeatureMap(
if (CPU == "bleeding-edge") {
Features["nontrapping-fptoint"] = true;
Features["sign-ext"] = true;
+ Features["bulk-memory"] = true;
Features["atomics"] = true;
Features["mutable-globals"] = true;
+ Features["tail-call"] = true;
setSIMDLevel(Features, SIMD128);
}
// Other targets do not consider user-configured features here, but while we
@@ -126,6 +141,8 @@ bool WebAssemblyTargetInfo::initFeatureMap(
Features["multivalue"] = true;
if (HasTailCall)
Features["tail-call"] = true;
+ if (HasReferenceTypes)
+ Features["reference-types"] = true;
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
@@ -213,6 +230,14 @@ bool WebAssemblyTargetInfo::handleTargetFeatures(
HasTailCall = false;
continue;
}
+ if (Feature == "+reference-types") {
+ HasReferenceTypes = true;
+ continue;
+ }
+ if (Feature == "-reference-types") {
+ HasReferenceTypes = false;
+ continue;
+ }
Diags.Report(diag::err_opt_not_valid_with_opt)
<< Feature << "-target-feature";
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
index 9665156b143f..77a2fe9ae117 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
@@ -38,6 +38,9 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyTargetInfo : public TargetInfo {
bool HasMutableGlobals = false;
bool HasMultivalue = false;
bool HasTailCall = false;
+ bool HasReferenceTypes = false;
+
+ std::string ABI;
public:
explicit WebAssemblyTargetInfo(const llvm::Triple &T, const TargetOptions &)
@@ -58,6 +61,9 @@ public:
IntPtrType = SignedLong;
}
+ StringRef getABI() const override;
+ bool setABI(const std::string &Name) override;
+
protected:
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
@@ -114,7 +120,22 @@ private:
? (IsSigned ? SignedLongLong : UnsignedLongLong)
: TargetInfo::getLeastIntTypeByWidth(BitWidth, IsSigned);
}
+
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ switch (CC) {
+ case CC_C:
+ case CC_Swift:
+ return CCCR_OK;
+ default:
+ return CCCR_Warning;
+ }
+ }
+
+ bool hasExtIntType() const override { return true; }
+
+ bool hasProtectedVisibility() const override { return false; }
};
+
class LLVM_LIBRARY_VISIBILITY WebAssembly32TargetInfo
: public WebAssemblyTargetInfo {
public:
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
index d099d3742f0b..543f232d2459 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
@@ -17,7 +17,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/X86TargetParser.h"
namespace clang {
namespace targets {
@@ -62,6 +62,7 @@ static const char *const GCCRegNames[] = {
"cr0", "cr2", "cr3", "cr4", "cr8",
"dr0", "dr1", "dr2", "dr3", "dr6", "dr7",
"bnd0", "bnd1", "bnd2", "bnd3",
+ "tmm0", "tmm1", "tmm2", "tmm3", "tmm4", "tmm5", "tmm6", "tmm7",
};
const TargetInfo::AddlRegName AddlRegNames[] = {
@@ -107,339 +108,15 @@ bool X86TargetInfo::initFeatureMap(
// FIXME: This *really* should not be here.
// X86_64 always has SSE2.
if (getTriple().getArch() == llvm::Triple::x86_64)
- setFeatureEnabledImpl(Features, "sse2", true);
+ setFeatureEnabled(Features, "sse2", true);
- const CPUKind Kind = getCPUKind(CPU);
+ using namespace llvm::X86;
- // Enable X87 for all X86 processors but Lakemont.
- if (Kind != CK_Lakemont)
- setFeatureEnabledImpl(Features, "x87", true);
+ SmallVector<StringRef, 16> CPUFeatures;
+ getFeaturesForCPU(CPU, CPUFeatures);
+ for (auto &F : CPUFeatures)
+ setFeatureEnabled(Features, F, true);
- // Enable cmpxchg8 for i586 and greater CPUs. Include generic for backwards
- // compatibility.
- if (Kind >= CK_i586 || Kind == CK_Generic)
- setFeatureEnabledImpl(Features, "cx8", true);
-
- switch (Kind) {
- case CK_Generic:
- case CK_i386:
- case CK_i486:
- case CK_i586:
- case CK_Pentium:
- case CK_PentiumPro:
- case CK_i686:
- case CK_Lakemont:
- break;
-
- case CK_Cooperlake:
- // CPX inherits all CLX features plus AVX512BF16
- setFeatureEnabledImpl(Features, "avx512bf16", true);
- LLVM_FALLTHROUGH;
- case CK_Cascadelake:
- // CLX inherits all SKX features plus AVX512VNNI
- setFeatureEnabledImpl(Features, "avx512vnni", true);
- LLVM_FALLTHROUGH;
- case CK_SkylakeServer:
- setFeatureEnabledImpl(Features, "avx512f", true);
- setFeatureEnabledImpl(Features, "avx512cd", true);
- setFeatureEnabledImpl(Features, "avx512dq", true);
- setFeatureEnabledImpl(Features, "avx512bw", true);
- setFeatureEnabledImpl(Features, "avx512vl", true);
- setFeatureEnabledImpl(Features, "clwb", true);
- setFeatureEnabledImpl(Features, "pku", true);
- // SkylakeServer cores inherits all SKL features, except SGX
- goto SkylakeCommon;
-
- case CK_Tigerlake:
- setFeatureEnabledImpl(Features, "avx512vp2intersect", true);
- setFeatureEnabledImpl(Features, "movdiri", true);
- setFeatureEnabledImpl(Features, "movdir64b", true);
- setFeatureEnabledImpl(Features, "shstk", true);
- // Tigerlake cores inherits IcelakeClient, except pconfig and wbnoinvd
- goto IcelakeCommon;
-
- case CK_IcelakeServer:
- setFeatureEnabledImpl(Features, "pconfig", true);
- setFeatureEnabledImpl(Features, "wbnoinvd", true);
- LLVM_FALLTHROUGH;
- case CK_IcelakeClient:
-IcelakeCommon:
- setFeatureEnabledImpl(Features, "vaes", true);
- setFeatureEnabledImpl(Features, "gfni", true);
- setFeatureEnabledImpl(Features, "vpclmulqdq", true);
- setFeatureEnabledImpl(Features, "avx512bitalg", true);
- setFeatureEnabledImpl(Features, "avx512vbmi2", true);
- setFeatureEnabledImpl(Features, "avx512vnni", true);
- setFeatureEnabledImpl(Features, "avx512vpopcntdq", true);
- setFeatureEnabledImpl(Features, "rdpid", true);
- setFeatureEnabledImpl(Features, "clwb", true);
- LLVM_FALLTHROUGH;
- case CK_Cannonlake:
- setFeatureEnabledImpl(Features, "avx512f", true);
- setFeatureEnabledImpl(Features, "avx512cd", true);
- setFeatureEnabledImpl(Features, "avx512dq", true);
- setFeatureEnabledImpl(Features, "avx512bw", true);
- setFeatureEnabledImpl(Features, "avx512vl", true);
- setFeatureEnabledImpl(Features, "avx512ifma", true);
- setFeatureEnabledImpl(Features, "avx512vbmi", true);
- setFeatureEnabledImpl(Features, "pku", true);
- setFeatureEnabledImpl(Features, "sha", true);
- LLVM_FALLTHROUGH;
- case CK_SkylakeClient:
- setFeatureEnabledImpl(Features, "sgx", true);
- // SkylakeServer cores inherits all SKL features, except SGX
-SkylakeCommon:
- setFeatureEnabledImpl(Features, "xsavec", true);
- setFeatureEnabledImpl(Features, "xsaves", true);
- setFeatureEnabledImpl(Features, "clflushopt", true);
- setFeatureEnabledImpl(Features, "aes", true);
- LLVM_FALLTHROUGH;
- case CK_Broadwell:
- setFeatureEnabledImpl(Features, "rdseed", true);
- setFeatureEnabledImpl(Features, "adx", true);
- setFeatureEnabledImpl(Features, "prfchw", true);
- LLVM_FALLTHROUGH;
- case CK_Haswell:
- setFeatureEnabledImpl(Features, "avx2", true);
- setFeatureEnabledImpl(Features, "lzcnt", true);
- setFeatureEnabledImpl(Features, "bmi", true);
- setFeatureEnabledImpl(Features, "bmi2", true);
- setFeatureEnabledImpl(Features, "fma", true);
- setFeatureEnabledImpl(Features, "invpcid", true);
- setFeatureEnabledImpl(Features, "movbe", true);
- LLVM_FALLTHROUGH;
- case CK_IvyBridge:
- setFeatureEnabledImpl(Features, "rdrnd", true);
- setFeatureEnabledImpl(Features, "f16c", true);
- setFeatureEnabledImpl(Features, "fsgsbase", true);
- LLVM_FALLTHROUGH;
- case CK_SandyBridge:
- setFeatureEnabledImpl(Features, "avx", true);
- setFeatureEnabledImpl(Features, "xsave", true);
- setFeatureEnabledImpl(Features, "xsaveopt", true);
- LLVM_FALLTHROUGH;
- case CK_Westmere:
- setFeatureEnabledImpl(Features, "pclmul", true);
- LLVM_FALLTHROUGH;
- case CK_Nehalem:
- setFeatureEnabledImpl(Features, "sse4.2", true);
- LLVM_FALLTHROUGH;
- case CK_Penryn:
- setFeatureEnabledImpl(Features, "sse4.1", true);
- LLVM_FALLTHROUGH;
- case CK_Core2:
- setFeatureEnabledImpl(Features, "ssse3", true);
- setFeatureEnabledImpl(Features, "sahf", true);
- LLVM_FALLTHROUGH;
- case CK_Nocona:
- setFeatureEnabledImpl(Features, "cx16", true);
- LLVM_FALLTHROUGH;
- case CK_Yonah:
- case CK_Prescott:
- setFeatureEnabledImpl(Features, "sse3", true);
- LLVM_FALLTHROUGH;
- case CK_PentiumM:
- case CK_Pentium4:
- case CK_x86_64:
- setFeatureEnabledImpl(Features, "sse2", true);
- LLVM_FALLTHROUGH;
- case CK_Pentium3:
- case CK_C3_2:
- setFeatureEnabledImpl(Features, "sse", true);
- LLVM_FALLTHROUGH;
- case CK_Pentium2:
- setFeatureEnabledImpl(Features, "fxsr", true);
- LLVM_FALLTHROUGH;
- case CK_PentiumMMX:
- case CK_K6:
- case CK_WinChipC6:
- setFeatureEnabledImpl(Features, "mmx", true);
- break;
-
- case CK_Tremont:
- setFeatureEnabledImpl(Features, "cldemote", true);
- setFeatureEnabledImpl(Features, "movdiri", true);
- setFeatureEnabledImpl(Features, "movdir64b", true);
- setFeatureEnabledImpl(Features, "gfni", true);
- setFeatureEnabledImpl(Features, "waitpkg", true);
- LLVM_FALLTHROUGH;
- case CK_GoldmontPlus:
- setFeatureEnabledImpl(Features, "ptwrite", true);
- setFeatureEnabledImpl(Features, "rdpid", true);
- setFeatureEnabledImpl(Features, "sgx", true);
- LLVM_FALLTHROUGH;
- case CK_Goldmont:
- setFeatureEnabledImpl(Features, "sha", true);
- setFeatureEnabledImpl(Features, "rdseed", true);
- setFeatureEnabledImpl(Features, "xsave", true);
- setFeatureEnabledImpl(Features, "xsaveopt", true);
- setFeatureEnabledImpl(Features, "xsavec", true);
- setFeatureEnabledImpl(Features, "xsaves", true);
- setFeatureEnabledImpl(Features, "clflushopt", true);
- setFeatureEnabledImpl(Features, "fsgsbase", true);
- setFeatureEnabledImpl(Features, "aes", true);
- LLVM_FALLTHROUGH;
- case CK_Silvermont:
- setFeatureEnabledImpl(Features, "rdrnd", true);
- setFeatureEnabledImpl(Features, "pclmul", true);
- setFeatureEnabledImpl(Features, "sse4.2", true);
- setFeatureEnabledImpl(Features, "prfchw", true);
- LLVM_FALLTHROUGH;
- case CK_Bonnell:
- setFeatureEnabledImpl(Features, "movbe", true);
- setFeatureEnabledImpl(Features, "ssse3", true);
- setFeatureEnabledImpl(Features, "fxsr", true);
- setFeatureEnabledImpl(Features, "cx16", true);
- setFeatureEnabledImpl(Features, "sahf", true);
- setFeatureEnabledImpl(Features, "mmx", true);
- break;
-
- case CK_KNM:
- // TODO: Add avx5124fmaps/avx5124vnniw.
- setFeatureEnabledImpl(Features, "avx512vpopcntdq", true);
- LLVM_FALLTHROUGH;
- case CK_KNL:
- setFeatureEnabledImpl(Features, "avx512f", true);
- setFeatureEnabledImpl(Features, "avx512cd", true);
- setFeatureEnabledImpl(Features, "avx512er", true);
- setFeatureEnabledImpl(Features, "avx512pf", true);
- setFeatureEnabledImpl(Features, "prfchw", true);
- setFeatureEnabledImpl(Features, "prefetchwt1", true);
- setFeatureEnabledImpl(Features, "fxsr", true);
- setFeatureEnabledImpl(Features, "rdseed", true);
- setFeatureEnabledImpl(Features, "adx", true);
- setFeatureEnabledImpl(Features, "lzcnt", true);
- setFeatureEnabledImpl(Features, "bmi", true);
- setFeatureEnabledImpl(Features, "bmi2", true);
- setFeatureEnabledImpl(Features, "fma", true);
- setFeatureEnabledImpl(Features, "rdrnd", true);
- setFeatureEnabledImpl(Features, "f16c", true);
- setFeatureEnabledImpl(Features, "fsgsbase", true);
- setFeatureEnabledImpl(Features, "aes", true);
- setFeatureEnabledImpl(Features, "pclmul", true);
- setFeatureEnabledImpl(Features, "cx16", true);
- setFeatureEnabledImpl(Features, "xsaveopt", true);
- setFeatureEnabledImpl(Features, "xsave", true);
- setFeatureEnabledImpl(Features, "movbe", true);
- setFeatureEnabledImpl(Features, "sahf", true);
- setFeatureEnabledImpl(Features, "mmx", true);
- break;
-
- case CK_K6_2:
- case CK_K6_3:
- case CK_WinChip2:
- case CK_C3:
- setFeatureEnabledImpl(Features, "3dnow", true);
- break;
-
- case CK_AMDFAM10:
- setFeatureEnabledImpl(Features, "sse4a", true);
- setFeatureEnabledImpl(Features, "lzcnt", true);
- setFeatureEnabledImpl(Features, "popcnt", true);
- setFeatureEnabledImpl(Features, "sahf", true);
- LLVM_FALLTHROUGH;
- case CK_K8SSE3:
- setFeatureEnabledImpl(Features, "sse3", true);
- LLVM_FALLTHROUGH;
- case CK_K8:
- setFeatureEnabledImpl(Features, "sse2", true);
- LLVM_FALLTHROUGH;
- case CK_AthlonXP:
- setFeatureEnabledImpl(Features, "sse", true);
- setFeatureEnabledImpl(Features, "fxsr", true);
- LLVM_FALLTHROUGH;
- case CK_Athlon:
- case CK_Geode:
- setFeatureEnabledImpl(Features, "3dnowa", true);
- break;
-
- case CK_BTVER2:
- setFeatureEnabledImpl(Features, "avx", true);
- setFeatureEnabledImpl(Features, "aes", true);
- setFeatureEnabledImpl(Features, "pclmul", true);
- setFeatureEnabledImpl(Features, "bmi", true);
- setFeatureEnabledImpl(Features, "f16c", true);
- setFeatureEnabledImpl(Features, "xsaveopt", true);
- setFeatureEnabledImpl(Features, "movbe", true);
- LLVM_FALLTHROUGH;
- case CK_BTVER1:
- setFeatureEnabledImpl(Features, "ssse3", true);
- setFeatureEnabledImpl(Features, "sse4a", true);
- setFeatureEnabledImpl(Features, "lzcnt", true);
- setFeatureEnabledImpl(Features, "popcnt", true);
- setFeatureEnabledImpl(Features, "prfchw", true);
- setFeatureEnabledImpl(Features, "cx16", true);
- setFeatureEnabledImpl(Features, "fxsr", true);
- setFeatureEnabledImpl(Features, "sahf", true);
- setFeatureEnabledImpl(Features, "mmx", true);
- break;
-
- case CK_ZNVER2:
- setFeatureEnabledImpl(Features, "clwb", true);
- setFeatureEnabledImpl(Features, "rdpid", true);
- setFeatureEnabledImpl(Features, "wbnoinvd", true);
- LLVM_FALLTHROUGH;
- case CK_ZNVER1:
- setFeatureEnabledImpl(Features, "adx", true);
- setFeatureEnabledImpl(Features, "aes", true);
- setFeatureEnabledImpl(Features, "avx2", true);
- setFeatureEnabledImpl(Features, "bmi", true);
- setFeatureEnabledImpl(Features, "bmi2", true);
- setFeatureEnabledImpl(Features, "clflushopt", true);
- setFeatureEnabledImpl(Features, "clzero", true);
- setFeatureEnabledImpl(Features, "cx16", true);
- setFeatureEnabledImpl(Features, "f16c", true);
- setFeatureEnabledImpl(Features, "fma", true);
- setFeatureEnabledImpl(Features, "fsgsbase", true);
- setFeatureEnabledImpl(Features, "fxsr", true);
- setFeatureEnabledImpl(Features, "lzcnt", true);
- setFeatureEnabledImpl(Features, "mmx", true);
- setFeatureEnabledImpl(Features, "mwaitx", true);
- setFeatureEnabledImpl(Features, "movbe", true);
- setFeatureEnabledImpl(Features, "pclmul", true);
- setFeatureEnabledImpl(Features, "popcnt", true);
- setFeatureEnabledImpl(Features, "prfchw", true);
- setFeatureEnabledImpl(Features, "rdrnd", true);
- setFeatureEnabledImpl(Features, "rdseed", true);
- setFeatureEnabledImpl(Features, "sahf", true);
- setFeatureEnabledImpl(Features, "sha", true);
- setFeatureEnabledImpl(Features, "sse4a", true);
- setFeatureEnabledImpl(Features, "xsave", true);
- setFeatureEnabledImpl(Features, "xsavec", true);
- setFeatureEnabledImpl(Features, "xsaveopt", true);
- setFeatureEnabledImpl(Features, "xsaves", true);
- break;
-
- case CK_BDVER4:
- setFeatureEnabledImpl(Features, "avx2", true);
- setFeatureEnabledImpl(Features, "bmi2", true);
- setFeatureEnabledImpl(Features, "mwaitx", true);
- LLVM_FALLTHROUGH;
- case CK_BDVER3:
- setFeatureEnabledImpl(Features, "fsgsbase", true);
- setFeatureEnabledImpl(Features, "xsaveopt", true);
- LLVM_FALLTHROUGH;
- case CK_BDVER2:
- setFeatureEnabledImpl(Features, "bmi", true);
- setFeatureEnabledImpl(Features, "fma", true);
- setFeatureEnabledImpl(Features, "f16c", true);
- setFeatureEnabledImpl(Features, "tbm", true);
- LLVM_FALLTHROUGH;
- case CK_BDVER1:
- // xop implies avx, sse4a and fma4.
- setFeatureEnabledImpl(Features, "xop", true);
- setFeatureEnabledImpl(Features, "lwp", true);
- setFeatureEnabledImpl(Features, "lzcnt", true);
- setFeatureEnabledImpl(Features, "aes", true);
- setFeatureEnabledImpl(Features, "pclmul", true);
- setFeatureEnabledImpl(Features, "prfchw", true);
- setFeatureEnabledImpl(Features, "cx16", true);
- setFeatureEnabledImpl(Features, "fxsr", true);
- setFeatureEnabledImpl(Features, "xsave", true);
- setFeatureEnabledImpl(Features, "sahf", true);
- setFeatureEnabledImpl(Features, "mmx", true);
- break;
- }
if (!TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec))
return false;
@@ -452,12 +129,6 @@ SkylakeCommon:
llvm::find(FeaturesVec, "-popcnt") == FeaturesVec.end())
Features["popcnt"] = true;
- // Enable prfchw if 3DNow! is enabled and prfchw is not explicitly disabled.
- I = Features.find("3dnow");
- if (I != Features.end() && I->getValue() &&
- llvm::find(FeaturesVec, "-prfchw") == FeaturesVec.end())
- Features["prfchw"] = true;
-
// Additionally, if SSE is enabled and mmx is not explicitly disabled,
// then enable MMX.
I = Features.find("sse");
@@ -465,264 +136,34 @@ SkylakeCommon:
llvm::find(FeaturesVec, "-mmx") == FeaturesVec.end())
Features["mmx"] = true;
- return true;
-}
-
-void X86TargetInfo::setSSELevel(llvm::StringMap<bool> &Features,
- X86SSEEnum Level, bool Enabled) {
- if (Enabled) {
- switch (Level) {
- case AVX512F:
- Features["avx512f"] = true;
- Features["fma"] = true;
- Features["f16c"] = true;
- LLVM_FALLTHROUGH;
- case AVX2:
- Features["avx2"] = true;
- LLVM_FALLTHROUGH;
- case AVX:
- Features["avx"] = true;
- Features["xsave"] = true;
- LLVM_FALLTHROUGH;
- case SSE42:
- Features["sse4.2"] = true;
- LLVM_FALLTHROUGH;
- case SSE41:
- Features["sse4.1"] = true;
- LLVM_FALLTHROUGH;
- case SSSE3:
- Features["ssse3"] = true;
- LLVM_FALLTHROUGH;
- case SSE3:
- Features["sse3"] = true;
- LLVM_FALLTHROUGH;
- case SSE2:
- Features["sse2"] = true;
- LLVM_FALLTHROUGH;
- case SSE1:
- Features["sse"] = true;
- LLVM_FALLTHROUGH;
- case NoSSE:
- break;
- }
- return;
- }
-
- switch (Level) {
- case NoSSE:
- case SSE1:
- Features["sse"] = false;
- LLVM_FALLTHROUGH;
- case SSE2:
- Features["sse2"] = Features["pclmul"] = Features["aes"] = false;
- Features["sha"] = Features["gfni"] = false;
- LLVM_FALLTHROUGH;
- case SSE3:
- Features["sse3"] = false;
- setXOPLevel(Features, NoXOP, false);
- LLVM_FALLTHROUGH;
- case SSSE3:
- Features["ssse3"] = false;
- LLVM_FALLTHROUGH;
- case SSE41:
- Features["sse4.1"] = false;
- LLVM_FALLTHROUGH;
- case SSE42:
- Features["sse4.2"] = false;
- LLVM_FALLTHROUGH;
- case AVX:
- Features["fma"] = Features["avx"] = Features["f16c"] = false;
- Features["xsave"] = Features["xsaveopt"] = Features["vaes"] = false;
- Features["vpclmulqdq"] = false;
- setXOPLevel(Features, FMA4, false);
- LLVM_FALLTHROUGH;
- case AVX2:
- Features["avx2"] = false;
- LLVM_FALLTHROUGH;
- case AVX512F:
- Features["avx512f"] = Features["avx512cd"] = Features["avx512er"] = false;
- Features["avx512pf"] = Features["avx512dq"] = Features["avx512bw"] = false;
- Features["avx512vl"] = Features["avx512vbmi"] = false;
- Features["avx512ifma"] = Features["avx512vpopcntdq"] = false;
- Features["avx512bitalg"] = Features["avx512vnni"] = false;
- Features["avx512vbmi2"] = Features["avx512bf16"] = false;
- Features["avx512vp2intersect"] = false;
- break;
- }
-}
-
-void X86TargetInfo::setMMXLevel(llvm::StringMap<bool> &Features,
- MMX3DNowEnum Level, bool Enabled) {
- if (Enabled) {
- switch (Level) {
- case AMD3DNowAthlon:
- Features["3dnowa"] = true;
- LLVM_FALLTHROUGH;
- case AMD3DNow:
- Features["3dnow"] = true;
- LLVM_FALLTHROUGH;
- case MMX:
- Features["mmx"] = true;
- LLVM_FALLTHROUGH;
- case NoMMX3DNow:
- break;
- }
- return;
- }
-
- switch (Level) {
- case NoMMX3DNow:
- case MMX:
- Features["mmx"] = false;
- LLVM_FALLTHROUGH;
- case AMD3DNow:
- Features["3dnow"] = false;
- LLVM_FALLTHROUGH;
- case AMD3DNowAthlon:
- Features["3dnowa"] = false;
- break;
- }
-}
-
-void X86TargetInfo::setXOPLevel(llvm::StringMap<bool> &Features, XOPEnum Level,
- bool Enabled) {
- if (Enabled) {
- switch (Level) {
- case XOP:
- Features["xop"] = true;
- LLVM_FALLTHROUGH;
- case FMA4:
- Features["fma4"] = true;
- setSSELevel(Features, AVX, true);
- LLVM_FALLTHROUGH;
- case SSE4A:
- Features["sse4a"] = true;
- setSSELevel(Features, SSE3, true);
- LLVM_FALLTHROUGH;
- case NoXOP:
- break;
- }
- return;
- }
+ // Enable xsave if avx is enabled and xsave is not explicitly disabled.
+ I = Features.find("avx");
+ if (I != Features.end() && I->getValue() &&
+ llvm::find(FeaturesVec, "-xsave") == FeaturesVec.end())
+ Features["xsave"] = true;
- switch (Level) {
- case NoXOP:
- case SSE4A:
- Features["sse4a"] = false;
- LLVM_FALLTHROUGH;
- case FMA4:
- Features["fma4"] = false;
- LLVM_FALLTHROUGH;
- case XOP:
- Features["xop"] = false;
- break;
- }
+ return true;
}
-void X86TargetInfo::setFeatureEnabledImpl(llvm::StringMap<bool> &Features,
- StringRef Name, bool Enabled) {
- // This is a bit of a hack to deal with the sse4 target feature when used
- // as part of the target attribute. We handle sse4 correctly everywhere
- // else. See below for more information on how we handle the sse4 options.
- if (Name != "sse4")
- Features[Name] = Enabled;
-
- if (Name == "mmx") {
- setMMXLevel(Features, MMX, Enabled);
- } else if (Name == "sse") {
- setSSELevel(Features, SSE1, Enabled);
- } else if (Name == "sse2") {
- setSSELevel(Features, SSE2, Enabled);
- } else if (Name == "sse3") {
- setSSELevel(Features, SSE3, Enabled);
- } else if (Name == "ssse3") {
- setSSELevel(Features, SSSE3, Enabled);
- } else if (Name == "sse4.2") {
- setSSELevel(Features, SSE42, Enabled);
- } else if (Name == "sse4.1") {
- setSSELevel(Features, SSE41, Enabled);
- } else if (Name == "3dnow") {
- setMMXLevel(Features, AMD3DNow, Enabled);
- } else if (Name == "3dnowa") {
- setMMXLevel(Features, AMD3DNowAthlon, Enabled);
- } else if (Name == "aes") {
- if (Enabled)
- setSSELevel(Features, SSE2, Enabled);
- else
- Features["vaes"] = false;
- } else if (Name == "vaes") {
- if (Enabled) {
- setSSELevel(Features, AVX, Enabled);
- Features["aes"] = true;
- }
- } else if (Name == "pclmul") {
- if (Enabled)
- setSSELevel(Features, SSE2, Enabled);
- else
- Features["vpclmulqdq"] = false;
- } else if (Name == "vpclmulqdq") {
- if (Enabled) {
- setSSELevel(Features, AVX, Enabled);
- Features["pclmul"] = true;
- }
- } else if (Name == "gfni") {
- if (Enabled)
- setSSELevel(Features, SSE2, Enabled);
- } else if (Name == "avx") {
- setSSELevel(Features, AVX, Enabled);
- } else if (Name == "avx2") {
- setSSELevel(Features, AVX2, Enabled);
- } else if (Name == "avx512f") {
- setSSELevel(Features, AVX512F, Enabled);
- } else if (Name.startswith("avx512")) {
- if (Enabled)
- setSSELevel(Features, AVX512F, Enabled);
- // Enable BWI instruction if certain features are being enabled.
- if ((Name == "avx512vbmi" || Name == "avx512vbmi2" ||
- Name == "avx512bitalg" || Name == "avx512bf16") && Enabled)
- Features["avx512bw"] = true;
- // Also disable some features if BWI is being disabled.
- if (Name == "avx512bw" && !Enabled) {
- Features["avx512vbmi"] = false;
- Features["avx512vbmi2"] = false;
- Features["avx512bitalg"] = false;
- Features["avx512bf16"] = false;
- }
- } else if (Name == "fma") {
- if (Enabled)
- setSSELevel(Features, AVX, Enabled);
- else
- setSSELevel(Features, AVX512F, Enabled);
- } else if (Name == "fma4") {
- setXOPLevel(Features, FMA4, Enabled);
- } else if (Name == "xop") {
- setXOPLevel(Features, XOP, Enabled);
- } else if (Name == "sse4a") {
- setXOPLevel(Features, SSE4A, Enabled);
- } else if (Name == "f16c") {
- if (Enabled)
- setSSELevel(Features, AVX, Enabled);
- else
- setSSELevel(Features, AVX512F, Enabled);
- } else if (Name == "sha") {
- if (Enabled)
- setSSELevel(Features, SSE2, Enabled);
- } else if (Name == "sse4") {
+void X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name, bool Enabled) const {
+ if (Name == "sse4") {
// We can get here via the __target__ attribute since that's not controlled
// via the -msse4/-mno-sse4 command line alias. Handle this the same way
// here - turn on the sse4.2 if enabled, turn off the sse4.1 level if
// disabled.
if (Enabled)
- setSSELevel(Features, SSE42, Enabled);
+ Name = "sse4.2";
else
- setSSELevel(Features, SSE41, Enabled);
- } else if (Name == "xsave") {
- if (!Enabled)
- Features["xsaveopt"] = false;
- } else if (Name == "xsaveopt" || Name == "xsavec" || Name == "xsaves") {
- if (Enabled)
- Features["xsave"] = true;
+ Name = "sse4.1";
}
+
+ Features[Name] = Enabled;
+
+ SmallVector<StringRef, 8> ImpliedFeatures;
+ llvm::X86::getImpliedFeatures(Name, Enabled, ImpliedFeatures);
+ for (const auto &F : ImpliedFeatures)
+ Features[F] = Enabled;
}
/// handleTargetFeatures - Perform initialization based on the user
@@ -857,6 +298,16 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasINVPCID = true;
} else if (Feature == "+enqcmd") {
HasENQCMD = true;
+ } else if (Feature == "+amx-bf16") {
+ HasAMXBF16 = true;
+ } else if (Feature == "+amx-int8") {
+ HasAMXINT8 = true;
+ } else if (Feature == "+amx-tile") {
+ HasAMXTILE = true;
+ } else if (Feature == "+serialize") {
+ HasSERIALIZE = true;
+ } else if (Feature == "+tsxldtrk") {
+ HasTSXLDTRK = true;
}
X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
@@ -911,7 +362,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
std::string CodeModel = getTargetOpts().CodeModel;
if (CodeModel == "default")
CodeModel = "small";
- Builder.defineMacro("__code_model_" + CodeModel + "_");
+ Builder.defineMacro("__code_model_" + CodeModel + "__");
// Target identification.
if (getTriple().getArch() == llvm::Triple::x86_64) {
@@ -935,8 +386,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
// Subtarget options.
// FIXME: We are hard-coding the tune parameters based on the CPU, but they
// truly should be based on -mtune options.
+ using namespace llvm::X86;
switch (CPU) {
- case CK_Generic:
+ case CK_None:
break;
case CK_i386:
// The rest are coming from the i386 define above.
@@ -1247,6 +699,16 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__INVPCID__");
if (HasENQCMD)
Builder.defineMacro("__ENQCMD__");
+ if (HasAMXTILE)
+ Builder.defineMacro("__AMXTILE__");
+ if (HasAMXINT8)
+ Builder.defineMacro("__AMXINT8__");
+ if (HasAMXBF16)
+ Builder.defineMacro("__AMXBF16__");
+ if (HasSERIALIZE)
+ Builder.defineMacro("__SERIALIZE__");
+ if (HasTSXLDTRK)
+ Builder.defineMacro("__TSXLDTRK__");
// Each case falls through to the previous one here.
switch (SSELevel) {
@@ -1319,7 +781,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
break;
}
- if (CPU >= CK_i486 || CPU == CK_Generic) {
+ if (CPU >= CK_i486 || CPU == CK_None) {
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
@@ -1339,6 +801,9 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("3dnowa", true)
.Case("adx", true)
.Case("aes", true)
+ .Case("amx-bf16", true)
+ .Case("amx-int8", true)
+ .Case("amx-tile", true)
.Case("avx", true)
.Case("avx2", true)
.Case("avx512f", true)
@@ -1390,6 +855,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("rdseed", true)
.Case("rtm", true)
.Case("sahf", true)
+ .Case("serialize", true)
.Case("sgx", true)
.Case("sha", true)
.Case("shstk", true)
@@ -1402,6 +868,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("sse4.2", true)
.Case("sse4a", true)
.Case("tbm", true)
+ .Case("tsxldtrk", true)
.Case("vaes", true)
.Case("vpclmulqdq", true)
.Case("wbnoinvd", true)
@@ -1419,6 +886,9 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
.Case("adx", HasADX)
.Case("aes", HasAES)
+ .Case("amx-bf16", HasAMXBF16)
+ .Case("amx-int8", HasAMXINT8)
+ .Case("amx-tile", HasAMXTILE)
.Case("avx", SSELevel >= AVX)
.Case("avx2", SSELevel >= AVX2)
.Case("avx512f", SSELevel >= AVX512F)
@@ -1474,6 +944,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("retpoline-external-thunk", HasRetpolineExternalThunk)
.Case("rtm", HasRTM)
.Case("sahf", HasLAHFSAHF)
+ .Case("serialize", HasSERIALIZE)
.Case("sgx", HasSGX)
.Case("sha", HasSHA)
.Case("shstk", HasSHSTK)
@@ -1485,6 +956,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("sse4.2", SSELevel >= SSE42)
.Case("sse4a", XOPLevel >= SSE4A)
.Case("tbm", HasTBM)
+ .Case("tsxldtrk", HasTSXLDTRK)
.Case("vaes", HasVAES)
.Case("vpclmulqdq", HasVPCLMULQDQ)
.Case("wbnoinvd", HasWBNOINVD)
@@ -1507,14 +979,14 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
// X86TargetInfo::hasFeature for a somewhat comprehensive list).
bool X86TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
return llvm::StringSwitch<bool>(FeatureStr)
-#define X86_FEATURE_COMPAT(VAL, ENUM, STR) .Case(STR, true)
+#define X86_FEATURE_COMPAT(ENUM, STR) .Case(STR, true)
#include "llvm/Support/X86TargetParser.def"
.Default(false);
}
static llvm::X86::ProcessorFeatures getFeature(StringRef Name) {
return llvm::StringSwitch<llvm::X86::ProcessorFeatures>(Name)
-#define X86_FEATURE_COMPAT(VAL, ENUM, STR) .Case(STR, llvm::X86::ENUM)
+#define X86_FEATURE_COMPAT(ENUM, STR) .Case(STR, llvm::X86::FEATURE_##ENUM)
#include "llvm/Support/X86TargetParser.def"
;
// Note, this function should only be used after ensuring the value is
@@ -1539,17 +1011,11 @@ static unsigned getFeaturePriority(llvm::X86::ProcessorFeatures Feat) {
unsigned X86TargetInfo::multiVersionSortPriority(StringRef Name) const {
// Valid CPUs have a 'key feature' that compares just better than its key
// feature.
- CPUKind Kind = getCPUKind(Name);
- if (Kind != CK_Generic) {
- switch (Kind) {
- default:
- llvm_unreachable(
- "CPU Type without a key feature used in 'target' attribute");
-#define PROC_WITH_FEAT(ENUM, STR, IS64, KEY_FEAT) \
- case CK_##ENUM: \
- return (getFeaturePriority(llvm::X86::KEY_FEAT) << 1) + 1;
-#include "clang/Basic/X86Target.def"
- }
+ using namespace llvm::X86;
+ CPUKind Kind = parseArchX86(Name);
+ if (Kind != CK_None) {
+ ProcessorFeatures KeyFeature = getKeyFeature(Kind);
+ return (getFeaturePriority(KeyFeature) << 1) + 1;
}
// Now we know we have a feature, so get its priority and shift it a few so
@@ -1596,10 +1062,9 @@ void X86TargetInfo::getCPUSpecificCPUDispatchFeatures(
bool X86TargetInfo::validateCpuIs(StringRef FeatureStr) const {
return llvm::StringSwitch<bool>(FeatureStr)
#define X86_VENDOR(ENUM, STRING) .Case(STRING, true)
-#define X86_CPU_TYPE_COMPAT_WITH_ALIAS(ARCHNAME, ENUM, STR, ALIAS) \
- .Cases(STR, ALIAS, true)
-#define X86_CPU_TYPE_COMPAT(ARCHNAME, ENUM, STR) .Case(STR, true)
-#define X86_CPU_SUBTYPE_COMPAT(ARCHNAME, ENUM, STR) .Case(STR, true)
+#define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) .Case(ALIAS, true)
+#define X86_CPU_TYPE(ENUM, STR) .Case(STR, true)
+#define X86_CPU_SUBTYPE(ENUM, STR) .Case(STR, true)
#include "llvm/Support/X86TargetParser.def"
.Default(false);
}
@@ -1679,8 +1144,7 @@ bool X86TargetInfo::validateAsmConstraint(
switch (*Name) {
default:
return false;
- case 'z':
- case '0': // First SSE register.
+ case 'z': // First SSE register.
case '2':
case 't': // Any SSE register, when SSE2 is enabled.
case 'i': // Any SSE register, when SSE2 and inter-unit moves enabled.
@@ -1731,6 +1195,121 @@ bool X86TargetInfo::validateAsmConstraint(
}
}
+// Below is based on the following information:
+// +------------------------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+
+// | Processor Name | Cache Line Size (Bytes) | Source |
+// +------------------------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+
+// | i386 | 64 | https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf |
+// | i486 | 16 | "four doublewords" (doubleword = 32 bits, 4 bits * 32 bits = 16 bytes) https://en.wikichip.org/w/images/d/d3/i486_MICROPROCESSOR_HARDWARE_REFERENCE_MANUAL_%281990%29.pdf and http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.126.4216&rep=rep1&type=pdf (page 29) |
+// | i586/Pentium MMX | 32 | https://www.7-cpu.com/cpu/P-MMX.html |
+// | i686/Pentium | 32 | https://www.7-cpu.com/cpu/P6.html |
+// | Netburst/Pentium4 | 64 | https://www.7-cpu.com/cpu/P4-180.html |
+// | Atom | 64 | https://www.7-cpu.com/cpu/Atom.html |
+// | Westmere | 64 | https://en.wikichip.org/wiki/intel/microarchitectures/sandy_bridge_(client) "Cache Architecture" |
+// | Sandy Bridge | 64 | https://en.wikipedia.org/wiki/Sandy_Bridge and https://www.7-cpu.com/cpu/SandyBridge.html |
+// | Ivy Bridge | 64 | https://blog.stuffedcow.net/2013/01/ivb-cache-replacement/ and https://www.7-cpu.com/cpu/IvyBridge.html |
+// | Haswell | 64 | https://www.7-cpu.com/cpu/Haswell.html |
+// | Boadwell | 64 | https://www.7-cpu.com/cpu/Broadwell.html |
+// | Skylake (including skylake-avx512) | 64 | https://www.nas.nasa.gov/hecc/support/kb/skylake-processors_550.html "Cache Hierarchy" |
+// | Cascade Lake | 64 | https://www.nas.nasa.gov/hecc/support/kb/cascade-lake-processors_579.html "Cache Hierarchy" |
+// | Skylake | 64 | https://en.wikichip.org/wiki/intel/microarchitectures/kaby_lake "Memory Hierarchy" |
+// | Ice Lake | 64 | https://www.7-cpu.com/cpu/Ice_Lake.html |
+// | Knights Landing | 64 | https://software.intel.com/en-us/articles/intel-xeon-phi-processor-7200-family-memory-management-optimizations "The Intel® Xeon Phi™ Processor Architecture" |
+// | Knights Mill | 64 | https://software.intel.com/sites/default/files/managed/9e/bc/64-ia-32-architectures-optimization-manual.pdf?countrylabel=Colombia "2.5.5.2 L1 DCache " |
+// +------------------------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+
+Optional<unsigned> X86TargetInfo::getCPUCacheLineSize() const {
+ using namespace llvm::X86;
+ switch (CPU) {
+ // i386
+ case CK_i386:
+ // i486
+ case CK_i486:
+ case CK_WinChipC6:
+ case CK_WinChip2:
+ case CK_C3:
+ // Lakemont
+ case CK_Lakemont:
+ return 16;
+
+ // i586
+ case CK_i586:
+ case CK_Pentium:
+ case CK_PentiumMMX:
+ // i686
+ case CK_PentiumPro:
+ case CK_i686:
+ case CK_Pentium2:
+ case CK_Pentium3:
+ case CK_PentiumM:
+ case CK_C3_2:
+ // K6
+ case CK_K6:
+ case CK_K6_2:
+ case CK_K6_3:
+ // Geode
+ case CK_Geode:
+ return 32;
+
+ // Netburst
+ case CK_Pentium4:
+ case CK_Prescott:
+ case CK_Nocona:
+ // Atom
+ case CK_Bonnell:
+ case CK_Silvermont:
+ case CK_Goldmont:
+ case CK_GoldmontPlus:
+ case CK_Tremont:
+
+ case CK_Westmere:
+ case CK_SandyBridge:
+ case CK_IvyBridge:
+ case CK_Haswell:
+ case CK_Broadwell:
+ case CK_SkylakeClient:
+ case CK_SkylakeServer:
+ case CK_Cascadelake:
+ case CK_Nehalem:
+ case CK_Cooperlake:
+ case CK_Cannonlake:
+ case CK_Tigerlake:
+ case CK_IcelakeClient:
+ case CK_IcelakeServer:
+ case CK_KNL:
+ case CK_KNM:
+ // K7
+ case CK_Athlon:
+ case CK_AthlonXP:
+ // K8
+ case CK_K8:
+ case CK_K8SSE3:
+ case CK_AMDFAM10:
+ // Bobcat
+ case CK_BTVER1:
+ case CK_BTVER2:
+ // Bulldozer
+ case CK_BDVER1:
+ case CK_BDVER2:
+ case CK_BDVER3:
+ case CK_BDVER4:
+ // Zen
+ case CK_ZNVER1:
+ case CK_ZNVER2:
+ // Deprecated
+ case CK_x86_64:
+ case CK_Yonah:
+ case CK_Penryn:
+ case CK_Core2:
+ return 64;
+
+ // The following currently have unknown cache line sizes (but they are probably all 64):
+ // Core
+ case CK_None:
+ return None;
+ }
+ llvm_unreachable("Unknown CPU kind");
+}
+
bool X86TargetInfo::validateOutputSize(const llvm::StringMap<bool> &FeatureMap,
StringRef Constraint,
unsigned Size) const {
@@ -1771,9 +1350,14 @@ bool X86TargetInfo::validateOperandSize(const llvm::StringMap<bool> &FeatureMap,
case 'k':
return Size <= 64;
case 'z':
- case '0':
- // XMM0
- if (FeatureMap.lookup("sse"))
+ // XMM0/YMM/ZMM0
+ if (FeatureMap.lookup("avx512f"))
+ // ZMM0 can be used if target supports AVX512F.
+ return Size <= 512U;
+ else if (FeatureMap.lookup("avx"))
+ // YMM0 can be used if target supports AVX.
+ return Size <= 256U;
+ else if (FeatureMap.lookup("sse"))
return Size <= 128U;
return false;
case 'i':
@@ -1784,7 +1368,7 @@ bool X86TargetInfo::validateOperandSize(const llvm::StringMap<bool> &FeatureMap,
return false;
break;
}
- LLVM_FALLTHROUGH;
+ break;
case 'v':
case 'x':
if (FeatureMap.lookup("avx512f"))
@@ -1839,7 +1423,6 @@ std::string X86TargetInfo::convertConstraint(const char *&Constraint) const {
case 'i':
case 't':
case 'z':
- case '0':
case '2':
// "^" hints llvm that this is a 2 letter constraint.
// "Constraint++" is used to promote the string iterator
@@ -1852,38 +1435,9 @@ std::string X86TargetInfo::convertConstraint(const char *&Constraint) const {
}
}
-bool X86TargetInfo::checkCPUKind(CPUKind Kind) const {
- // Perform any per-CPU checks necessary to determine if this CPU is
- // acceptable.
- switch (Kind) {
- case CK_Generic:
- // No processor selected!
- return false;
-#define PROC(ENUM, STRING, IS64BIT) \
- case CK_##ENUM: \
- return IS64BIT || getTriple().getArch() == llvm::Triple::x86;
-#include "clang/Basic/X86Target.def"
- }
- llvm_unreachable("Unhandled CPU kind");
-}
-
void X86TargetInfo::fillValidCPUList(SmallVectorImpl<StringRef> &Values) const {
-#define PROC(ENUM, STRING, IS64BIT) \
- if (IS64BIT || getTriple().getArch() == llvm::Triple::x86) \
- Values.emplace_back(STRING);
- // For aliases we need to lookup the CPUKind to check get the 64-bit ness.
-#define PROC_ALIAS(ENUM, ALIAS) \
- if (checkCPUKind(CK_##ENUM)) \
- Values.emplace_back(ALIAS);
-#include "clang/Basic/X86Target.def"
-}
-
-X86TargetInfo::CPUKind X86TargetInfo::getCPUKind(StringRef CPU) const {
- return llvm::StringSwitch<CPUKind>(CPU)
-#define PROC(ENUM, STRING, IS64BIT) .Case(STRING, CK_##ENUM)
-#define PROC_ALIAS(ENUM, ALIAS) .Case(ALIAS, CK_##ENUM)
-#include "clang/Basic/X86Target.def"
- .Default(CK_Generic);
+ bool Only64Bit = getTriple().getArch() != llvm::Triple::x86;
+ llvm::X86::fillValidCPUArchList(Values, Only64Bit);
}
ArrayRef<const char *> X86TargetInfo::getGCCRegNames() const {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/X86.h b/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
index 5b5e284e5141..72a01d2514c2 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
@@ -18,6 +18,7 @@
#include "clang/Basic/TargetOptions.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/X86TargetParser.h"
namespace clang {
namespace targets {
@@ -124,21 +125,14 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasPTWRITE = false;
bool HasINVPCID = false;
bool HasENQCMD = false;
+ bool HasAMXTILE = false;
+ bool HasAMXINT8 = false;
+ bool HasAMXBF16 = false;
+ bool HasSERIALIZE = false;
+ bool HasTSXLDTRK = false;
protected:
- /// Enumeration of all of the X86 CPUs supported by Clang.
- ///
- /// Each enumeration represents a particular CPU supported by Clang. These
- /// loosely correspond to the options passed to '-march' or '-mtune' flags.
- enum CPUKind {
- CK_Generic,
-#define PROC(ENUM, STRING, IS64BIT) CK_##ENUM,
-#include "clang/Basic/X86Target.def"
- } CPU = CK_Generic;
-
- bool checkCPUKind(CPUKind Kind) const;
-
- CPUKind getCPUKind(StringRef CPU) const;
+ llvm::X86::CPUKind CPU = llvm::X86::CK_None;
enum FPMathKind { FP_Default, FP_SSE, FP_387 } FPMath = FP_Default;
@@ -147,6 +141,7 @@ public:
: TargetInfo(Triple) {
LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
AddrSpaceMap = &X86AddrSpaceMap;
+ HasStrictFP = true;
}
const char *getLongDoubleMangling() const override {
@@ -166,6 +161,10 @@ public:
ArrayRef<TargetInfo::AddlRegName> getGCCAddlRegNames() const override;
+ bool isSPRegName(StringRef RegName) const override {
+ return RegName.equals("esp") || RegName.equals("rsp");
+ }
+
bool validateCpuSupports(StringRef Name) const override;
bool validateCpuIs(StringRef Name) const override;
@@ -178,6 +177,8 @@ public:
StringRef Name,
llvm::SmallVectorImpl<StringRef> &Features) const override;
+ Optional<unsigned> getCPUCacheLineSize() const override;
+
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override;
@@ -262,24 +263,8 @@ public:
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
- static void setSSELevel(llvm::StringMap<bool> &Features, X86SSEEnum Level,
- bool Enabled);
-
- static void setMMXLevel(llvm::StringMap<bool> &Features, MMX3DNowEnum Level,
- bool Enabled);
-
- static void setXOPLevel(llvm::StringMap<bool> &Features, XOPEnum Level,
- bool Enabled);
-
void setFeatureEnabled(llvm::StringMap<bool> &Features, StringRef Name,
- bool Enabled) const override {
- setFeatureEnabledImpl(Features, Name, Enabled);
- }
-
- // This exists purely to cut down on the number of virtual calls in
- // initFeatureMap which calls this repeatedly.
- static void setFeatureEnabledImpl(llvm::StringMap<bool> &Features,
- StringRef Name, bool Enabled);
+ bool Enabled) const final;
bool
initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
@@ -288,7 +273,7 @@ public:
bool isValidFeatureName(StringRef Name) const override;
- bool hasFeature(StringRef Feature) const override;
+ bool hasFeature(StringRef Feature) const final;
bool handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) override;
@@ -305,13 +290,16 @@ public:
}
bool isValidCPUName(StringRef Name) const override {
- return checkCPUKind(getCPUKind(Name));
+ bool Only64Bit = getTriple().getArch() != llvm::Triple::x86;
+ return llvm::X86::parseArchX86(Name, Only64Bit) != llvm::X86::CK_None;
}
void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
bool setCPU(const std::string &Name) override {
- return checkCPUKind(CPU = getCPUKind(Name));
+ bool Only64Bit = getTriple().getArch() != llvm::Triple::x86;
+ CPU = llvm::X86::parseArchX86(Name, Only64Bit);
+ return CPU != llvm::X86::CK_None;
}
unsigned multiVersionSortPriority(StringRef Name) const override;
@@ -427,6 +415,8 @@ public:
}
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
+
+ bool hasExtIntType() const override { return true; }
};
class LLVM_LIBRARY_VISIBILITY NetBSDI386TargetInfo
@@ -729,6 +719,8 @@ public:
}
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
+
+ bool hasExtIntType() const override { return true; }
};
// x86-64 Windows target
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/XCore.h b/contrib/llvm-project/clang/lib/Basic/Targets/XCore.h
index c94f93a99bca..c33766751aa1 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/XCore.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/XCore.h
@@ -75,6 +75,8 @@ public:
}
bool allowsLargerPreferedTypeAlignment() const override { return false; }
+
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/TypeTraits.cpp b/contrib/llvm-project/clang/lib/Basic/TypeTraits.cpp
new file mode 100644
index 000000000000..3b723afff70b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/TypeTraits.cpp
@@ -0,0 +1,86 @@
+//===--- TypeTraits.cpp - Type Traits Support -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the type traits support functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/TypeTraits.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+using namespace clang;
+
+static constexpr const char *TypeTraitNames[] = {
+#define TYPE_TRAIT_1(Spelling, Name, Key) #Name,
+#include "clang/Basic/TokenKinds.def"
+#define TYPE_TRAIT_2(Spelling, Name, Key) #Name,
+#include "clang/Basic/TokenKinds.def"
+#define TYPE_TRAIT_N(Spelling, Name, Key) #Name,
+#include "clang/Basic/TokenKinds.def"
+};
+
+static constexpr const char *TypeTraitSpellings[] = {
+#define TYPE_TRAIT_1(Spelling, Name, Key) #Spelling,
+#include "clang/Basic/TokenKinds.def"
+#define TYPE_TRAIT_2(Spelling, Name, Key) #Spelling,
+#include "clang/Basic/TokenKinds.def"
+#define TYPE_TRAIT_N(Spelling, Name, Key) #Spelling,
+#include "clang/Basic/TokenKinds.def"
+};
+
+static constexpr const char *ArrayTypeTraitNames[] = {
+#define ARRAY_TYPE_TRAIT(Spelling, Name, Key) #Name,
+#include "clang/Basic/TokenKinds.def"
+};
+
+static constexpr const char *ArrayTypeTraitSpellings[] = {
+#define ARRAY_TYPE_TRAIT(Spelling, Name, Key) #Spelling,
+#include "clang/Basic/TokenKinds.def"
+};
+
+static constexpr const char *UnaryExprOrTypeTraitNames[] = {
+#define UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) #Name,
+#define CXX11_UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) #Name,
+#include "clang/Basic/TokenKinds.def"
+};
+
+static constexpr const char *UnaryExprOrTypeTraitSpellings[] = {
+#define UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) #Spelling,
+#define CXX11_UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) #Spelling,
+#include "clang/Basic/TokenKinds.def"
+};
+
+const char *clang::getTraitName(TypeTrait T) {
+ assert(T <= TT_Last && "invalid enum value!");
+ return TypeTraitNames[T];
+}
+
+const char *clang::getTraitName(ArrayTypeTrait T) {
+ assert(T <= ATT_Last && "invalid enum value!");
+ return ArrayTypeTraitNames[T];
+}
+
+const char *clang::getTraitName(UnaryExprOrTypeTrait T) {
+ assert(T <= UETT_Last && "invalid enum value!");
+ return UnaryExprOrTypeTraitNames[T];
+}
+
+const char *clang::getTraitSpelling(TypeTrait T) {
+ assert(T <= TT_Last && "invalid enum value!");
+ return TypeTraitSpellings[T];
+}
+
+const char *clang::getTraitSpelling(ArrayTypeTrait T) {
+ assert(T <= ATT_Last && "invalid enum value!");
+ return ArrayTypeTraitSpellings[T];
+}
+
+const char *clang::getTraitSpelling(UnaryExprOrTypeTrait T) {
+ assert(T <= UETT_Last && "invalid enum value!");
+ return UnaryExprOrTypeTraitSpellings[T];
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Version.cpp b/contrib/llvm-project/clang/lib/Basic/Version.cpp
index c69d13b2f689..286107cab9d7 100644
--- a/contrib/llvm-project/clang/lib/Basic/Version.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Version.cpp
@@ -28,46 +28,19 @@ std::string getClangRepositoryPath() {
return CLANG_REPOSITORY_STRING;
#else
#ifdef CLANG_REPOSITORY
- StringRef URL(CLANG_REPOSITORY);
+ return CLANG_REPOSITORY;
#else
- StringRef URL("");
+ return "";
#endif
-
- // If the CLANG_REPOSITORY is empty, try to use the SVN keyword. This helps us
- // pick up a tag in an SVN export, for example.
- StringRef SVNRepository("$URL$");
- if (URL.empty()) {
- URL = SVNRepository.slice(SVNRepository.find(':'),
- SVNRepository.find("/lib/Basic"));
- }
-
- // Strip off version from a build from an integration branch.
- URL = URL.slice(0, URL.find("/src/tools/clang"));
-
- // Trim path prefix off, assuming path came from standard cfe path.
- size_t Start = URL.find("cfe/");
- if (Start != StringRef::npos)
- URL = URL.substr(Start + 4);
-
- return URL;
#endif
}
std::string getLLVMRepositoryPath() {
#ifdef LLVM_REPOSITORY
- StringRef URL(LLVM_REPOSITORY);
+ return LLVM_REPOSITORY;
#else
- StringRef URL("");
+ return "";
#endif
-
- // Trim path prefix off, assuming path came from standard llvm path.
- // Leave "llvm/" prefix to distinguish the following llvm revision from the
- // clang revision.
- size_t Start = URL.find("llvm/");
- if (Start != StringRef::npos)
- URL = URL.substr(Start);
-
- return URL;
}
std::string getClangRevision() {
@@ -124,8 +97,12 @@ std::string getClangToolFullVersion(StringRef ToolName) {
#ifdef CLANG_VENDOR
OS << CLANG_VENDOR;
#endif
- OS << ToolName << " version " CLANG_VERSION_STRING " "
- << getClangFullRepositoryVersion();
+ OS << ToolName << " version " CLANG_VERSION_STRING;
+
+ std::string repo = getClangFullRepositoryVersion();
+ if (!repo.empty()) {
+ OS << " " << repo;
+ }
return OS.str();
}
@@ -138,7 +115,13 @@ std::string getClangFullCPPVersion() {
#ifdef CLANG_VENDOR
OS << CLANG_VENDOR;
#endif
- OS << "Clang " CLANG_VERSION_STRING " " << getClangFullRepositoryVersion();
+ OS << "Clang " CLANG_VERSION_STRING;
+
+ std::string repo = getClangFullRepositoryVersion();
+ if (!repo.empty()) {
+ OS << " " << repo;
+ }
+
return OS.str();
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Warnings.cpp b/contrib/llvm-project/clang/lib/Basic/Warnings.cpp
index 88ef2eaa6589..2c909d9510d4 100644
--- a/contrib/llvm-project/clang/lib/Basic/Warnings.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Warnings.cpp
@@ -36,8 +36,9 @@ static void EmitUnknownDiagWarning(DiagnosticsEngine &Diags,
StringRef Opt) {
StringRef Suggestion = DiagnosticIDs::getNearestOption(Flavor, Opt);
Diags.Report(diag::warn_unknown_diag_option)
- << (Flavor == diag::Flavor::WarningOrError ? 0 : 1) << (Prefix.str() += Opt)
- << !Suggestion.empty() << (Prefix.str() += Suggestion);
+ << (Flavor == diag::Flavor::WarningOrError ? 0 : 1)
+ << (Prefix.str() += std::string(Opt)) << !Suggestion.empty()
+ << (Prefix.str() += std::string(Suggestion));
}
void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
diff --git a/contrib/llvm-project/clang/lib/Basic/XRayInstr.cpp b/contrib/llvm-project/clang/lib/Basic/XRayInstr.cpp
index ef2470f67200..79052e05860e 100644
--- a/contrib/llvm-project/clang/lib/Basic/XRayInstr.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/XRayInstr.cpp
@@ -16,13 +16,17 @@
namespace clang {
XRayInstrMask parseXRayInstrValue(StringRef Value) {
- XRayInstrMask ParsedKind = llvm::StringSwitch<XRayInstrMask>(Value)
- .Case("all", XRayInstrKind::All)
- .Case("custom", XRayInstrKind::Custom)
- .Case("function", XRayInstrKind::Function)
- .Case("typed", XRayInstrKind::Typed)
- .Case("none", XRayInstrKind::None)
- .Default(XRayInstrKind::None);
+ XRayInstrMask ParsedKind =
+ llvm::StringSwitch<XRayInstrMask>(Value)
+ .Case("all", XRayInstrKind::All)
+ .Case("custom", XRayInstrKind::Custom)
+ .Case("function",
+ XRayInstrKind::FunctionEntry | XRayInstrKind::FunctionExit)
+ .Case("function-entry", XRayInstrKind::FunctionEntry)
+ .Case("function-exit", XRayInstrKind::FunctionExit)
+ .Case("typed", XRayInstrKind::Typed)
+ .Case("none", XRayInstrKind::None)
+ .Default(XRayInstrKind::None);
return ParsedKind;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/XRayLists.cpp b/contrib/llvm-project/clang/lib/Basic/XRayLists.cpp
index 222a28f79cc5..6d34617d4795 100644
--- a/contrib/llvm-project/clang/lib/Basic/XRayLists.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/XRayLists.cpp
@@ -9,7 +9,11 @@
// User-provided filters for always/never XRay instrumenting certain functions.
//
//===----------------------------------------------------------------------===//
+
#include "clang/Basic/XRayLists.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/SpecialCaseList.h"
using namespace clang;
@@ -25,6 +29,8 @@ XRayFunctionFilter::XRayFunctionFilter(
AttrListPaths, SM.getFileManager().getVirtualFileSystem())),
SM(SM) {}
+XRayFunctionFilter::~XRayFunctionFilter() = default;
+
XRayFunctionFilter::ImbueAttribute
XRayFunctionFilter::shouldImbueFunction(StringRef FunctionName) const {
// First apply the always instrument list, than if it isn't an "always" see
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
index 0c3a076da0b5..bb40dace8a84 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
@@ -60,6 +60,8 @@ namespace swiftcall {
virtual bool supportsSwift() const { return false; }
+ virtual bool allowBFloatArgsAndRet() const { return false; }
+
CodeGen::CGCXXABI &getCXXABI() const;
ASTContext &getContext() const;
llvm::LLVMContext &getVMContext() const;
@@ -102,6 +104,10 @@ namespace swiftcall {
bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
uint64_t &Members) const;
+ // Implement the Type::IsPromotableIntegerType for ABI specific needs. The
+ // only difference is that this considers _ExtInt as well.
+ bool isPromotableIntegerTypeForABI(QualType Ty) const;
+
/// A convenience method to return an indirect ABIArgInfo with an
/// expected alignment equal to the ABI alignment of the given type.
CodeGen::ABIArgInfo
diff --git a/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
index 0bfcab88a3a9..dce0940670a2 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
@@ -18,6 +18,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Analysis/StackSafetyAnalysis.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Bitcode/BitcodeReader.h"
@@ -31,6 +32,7 @@
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/IR/PassManager.h"
#include "llvm/IR/Verifier.h"
#include "llvm/LTO/LTOBackend.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -45,12 +47,18 @@
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/Timer.h"
+#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Transforms/Coroutines.h"
+#include "llvm/Transforms/Coroutines/CoroCleanup.h"
+#include "llvm/Transforms/Coroutines/CoroEarly.h"
+#include "llvm/Transforms/Coroutines/CoroElide.h"
+#include "llvm/Transforms/Coroutines/CoroSplit.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/AlwaysInliner.h"
+#include "llvm/Transforms/IPO/LowerTypeTests.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h"
#include "llvm/Transforms/InstCombine/InstCombine.h"
@@ -71,6 +79,7 @@
#include "llvm/Transforms/Utils/EntryExitInstrumenter.h"
#include "llvm/Transforms/Utils/NameAnonGlobals.h"
#include "llvm/Transforms/Utils/SymbolRewriter.h"
+#include "llvm/Transforms/Utils/UniqueInternalLinkageNames.h"
#include <memory>
using namespace clang;
using namespace llvm;
@@ -216,6 +225,7 @@ getSancovOptsFromCGOpts(const CodeGenOptions &CGOpts) {
Opts.TracePCGuard = CGOpts.SanitizeCoverageTracePCGuard;
Opts.NoPrune = CGOpts.SanitizeCoverageNoPrune;
Opts.Inline8bitCounters = CGOpts.SanitizeCoverageInline8bitCounters;
+ Opts.InlineBoolFlag = CGOpts.SanitizeCoverageInlineBoolFlag;
Opts.PCTable = CGOpts.SanitizeCoveragePCTable;
Opts.StackDepth = CGOpts.SanitizeCoverageStackDepth;
return Opts;
@@ -227,7 +237,9 @@ static void addSanitizerCoveragePass(const PassManagerBuilder &Builder,
static_cast<const PassManagerBuilderWrapper &>(Builder);
const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
auto Opts = getSancovOptsFromCGOpts(CGOpts);
- PM.add(createModuleSanitizerCoverageLegacyPassPass(Opts));
+ PM.add(createModuleSanitizerCoverageLegacyPassPass(
+ Opts, CGOpts.SanitizeCoverageAllowlistFiles,
+ CGOpts.SanitizeCoverageBlocklistFiles));
}
// Check if ASan should use GC-friendly instrumentation for globals.
@@ -350,7 +362,7 @@ static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
break;
case CodeGenOptions::MASSV:
TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::MASSV);
- break;
+ break;
case CodeGenOptions::SVML:
TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::SVML);
break;
@@ -413,7 +425,8 @@ static CodeGenFileType getCodeGenFileType(BackendAction Action) {
}
}
-static void initTargetOptions(llvm::TargetOptions &Options,
+static void initTargetOptions(DiagnosticsEngine &Diags,
+ llvm::TargetOptions &Options,
const CodeGenOptions &CodeGenOpts,
const clang::TargetOptions &TargetOpts,
const LangOptions &LangOpts,
@@ -436,15 +449,15 @@ static void initTargetOptions(llvm::TargetOptions &Options,
// Set FP fusion mode.
switch (LangOpts.getDefaultFPContractMode()) {
- case LangOptions::FPC_Off:
+ case LangOptions::FPM_Off:
// Preserve any contraction performed by the front-end. (Strict performs
// splitting of the muladd intrinsic in the backend.)
Options.AllowFPOpFusion = llvm::FPOpFusion::Standard;
break;
- case LangOptions::FPC_On:
+ case LangOptions::FPM_On:
Options.AllowFPOpFusion = llvm::FPOpFusion::Standard;
break;
- case LangOptions::FPC_Fast:
+ case LangOptions::FPM_Fast:
Options.AllowFPOpFusion = llvm::FPOpFusion::Fast;
break;
}
@@ -466,22 +479,44 @@ static void initTargetOptions(llvm::TargetOptions &Options,
if (LangOpts.WasmExceptions)
Options.ExceptionModel = llvm::ExceptionHandling::Wasm;
- Options.NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
- Options.NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
+ Options.NoInfsFPMath = LangOpts.NoHonorInfs;
+ Options.NoNaNsFPMath = LangOpts.NoHonorNaNs;
Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
- Options.UnsafeFPMath = CodeGenOpts.UnsafeFPMath;
+ Options.UnsafeFPMath = LangOpts.UnsafeFPMath;
Options.StackAlignmentOverride = CodeGenOpts.StackAlignment;
+
+ Options.BBSections =
+ llvm::StringSwitch<llvm::BasicBlockSection>(CodeGenOpts.BBSections)
+ .Case("all", llvm::BasicBlockSection::All)
+ .Case("labels", llvm::BasicBlockSection::Labels)
+ .StartsWith("list=", llvm::BasicBlockSection::List)
+ .Case("none", llvm::BasicBlockSection::None)
+ .Default(llvm::BasicBlockSection::None);
+
+ if (Options.BBSections == llvm::BasicBlockSection::List) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MBOrErr =
+ MemoryBuffer::getFile(CodeGenOpts.BBSections.substr(5));
+ if (!MBOrErr)
+ Diags.Report(diag::err_fe_unable_to_load_basic_block_sections_file)
+ << MBOrErr.getError().message();
+ else
+ Options.BBSectionsFuncListBuf = std::move(*MBOrErr);
+ }
+
Options.FunctionSections = CodeGenOpts.FunctionSections;
Options.DataSections = CodeGenOpts.DataSections;
Options.UniqueSectionNames = CodeGenOpts.UniqueSectionNames;
+ Options.UniqueBasicBlockSectionNames =
+ CodeGenOpts.UniqueBasicBlockSectionNames;
Options.TLSSize = CodeGenOpts.TLSSize;
Options.EmulatedTLS = CodeGenOpts.EmulatedTLS;
Options.ExplicitEmulatedTLS = CodeGenOpts.ExplicitEmulatedTLS;
Options.DebuggerTuning = CodeGenOpts.getDebuggerTuning();
Options.EmitStackSizeSection = CodeGenOpts.StackSizeSection;
Options.EmitAddrsig = CodeGenOpts.Addrsig;
- Options.EnableDebugEntryValues = CodeGenOpts.EnableDebugEntryValues;
Options.ForceDwarfFrameSection = CodeGenOpts.ForceDwarfFrameSection;
+ Options.EmitCallSiteInfo = CodeGenOpts.EmitCallSiteInfo;
+ Options.XRayOmitFunctionIndex = CodeGenOpts.XRayOmitFunctionIndex;
Options.MCOptions.SplitDwarfFile = CodeGenOpts.SplitDwarfFile;
Options.MCOptions.MCRelaxAll = CodeGenOpts.RelaxAll;
@@ -502,6 +537,8 @@ static void initTargetOptions(llvm::TargetOptions &Options,
Entry.Group == frontend::IncludeDirGroup::System))
Options.MCOptions.IASSearchPaths.push_back(
Entry.IgnoreSysRoot ? Entry.Path : HSOpts.Sysroot + Entry.Path);
+ Options.MCOptions.Argv0 = CodeGenOpts.Argv0;
+ Options.MCOptions.CommandLineArgs = CodeGenOpts.CommandLineArgs;
}
static Optional<GCOVOptions> getGCOVOptions(const CodeGenOptions &CodeGenOpts) {
if (CodeGenOpts.DisableGCov)
@@ -514,12 +551,9 @@ static Optional<GCOVOptions> getGCOVOptions(const CodeGenOptions &CodeGenOpts) {
Options.EmitNotes = CodeGenOpts.EmitGcovNotes;
Options.EmitData = CodeGenOpts.EmitGcovArcs;
llvm::copy(CodeGenOpts.CoverageVersion, std::begin(Options.Version));
- Options.UseCfgChecksum = CodeGenOpts.CoverageExtraChecksum;
Options.NoRedZone = CodeGenOpts.DisableRedZone;
- Options.FunctionNamesInData = !CodeGenOpts.CoverageNoFunctionNamesInData;
Options.Filter = CodeGenOpts.ProfileFilterFiles;
Options.Exclude = CodeGenOpts.ProfileExcludeFiles;
- Options.ExitBlockBeforeBody = CodeGenOpts.CoverageExitBlockBeforeBody;
return Options;
}
@@ -553,13 +587,24 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
std::unique_ptr<TargetLibraryInfoImpl> TLII(
createTLII(TargetTriple, CodeGenOpts));
+ // If we reached here with a non-empty index file name, then the index file
+ // was empty and we are not performing ThinLTO backend compilation (used in
+ // testing in a distributed build environment). Drop any the type test
+ // assume sequences inserted for whole program vtables so that codegen doesn't
+ // complain.
+ if (!CodeGenOpts.ThinLTOIndexFile.empty())
+ MPM.add(createLowerTypeTestsPass(/*ExportSummary=*/nullptr,
+ /*ImportSummary=*/nullptr,
+ /*DropTypeTests=*/true));
+
PassManagerBuilderWrapper PMBuilder(TargetTriple, CodeGenOpts, LangOpts);
// At O0 and O1 we only run the always inliner which is more efficient. At
// higher optimization levels we run the normal inliner.
if (CodeGenOpts.OptimizationLevel <= 1) {
- bool InsertLifetimeIntrinsics = (CodeGenOpts.OptimizationLevel != 0 &&
- !CodeGenOpts.DisableLifetimeMarkers);
+ bool InsertLifetimeIntrinsics = ((CodeGenOpts.OptimizationLevel != 0 &&
+ !CodeGenOpts.DisableLifetimeMarkers) ||
+ LangOpts.Coroutines);
PMBuilder.Inliner = createAlwaysInlinerLegacyPass(InsertLifetimeIntrinsics);
} else {
// We do not want to inline hot callsites for SamplePGO module-summary build
@@ -575,6 +620,9 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
PMBuilder.SizeLevel = CodeGenOpts.OptimizeSize;
PMBuilder.SLPVectorize = CodeGenOpts.VectorizeSLP;
PMBuilder.LoopVectorize = CodeGenOpts.VectorizeLoop;
+ // Only enable CGProfilePass when using integrated assembler, since
+ // non-integrated assemblers don't recognize .cgprofile section.
+ PMBuilder.CallGraphProfile = !CodeGenOpts.DisableIntegratedAS;
PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops;
// Loop interleaving in the loop vectorizer has historically been set to be
@@ -689,6 +737,12 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
if (!CodeGenOpts.RewriteMapFiles.empty())
addSymbolRewriterPass(CodeGenOpts, &MPM);
+ // Add UniqueInternalLinkageNames Pass which renames internal linkage symbols
+ // with unique names.
+ if (CodeGenOpts.UniqueInternalLinkageNames) {
+ MPM.add(createUniqueInternalLinkageNamesPass());
+ }
+
if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts)) {
MPM.add(createGCOVProfilerPass(*Options));
if (CodeGenOpts.getDebugInfo() == codegenoptions::NoDebugInfo)
@@ -718,7 +772,7 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
if (!CodeGenOpts.InstrProfileOutput.empty())
PMBuilder.PGOInstrGen = CodeGenOpts.InstrProfileOutput;
else
- PMBuilder.PGOInstrGen = DefaultProfileGenName;
+ PMBuilder.PGOInstrGen = std::string(DefaultProfileGenName);
}
if (CodeGenOpts.hasProfileIRUse()) {
PMBuilder.PGOInstrUse = CodeGenOpts.ProfileInstrumentUsePath;
@@ -766,7 +820,7 @@ void EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
CodeGenOpt::Level OptLevel = getCGOptLevel(CodeGenOpts);
llvm::TargetOptions Options;
- initTargetOptions(Options, CodeGenOpts, TargetOpts, LangOpts, HSOpts);
+ initTargetOptions(Diags, Options, CodeGenOpts, TargetOpts, LangOpts, HSOpts);
TM.reset(TheTarget->createTargetMachine(Triple, TargetOpts.CPU, FeaturesStr,
Options, RM, CM, OptLevel));
}
@@ -924,7 +978,7 @@ static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
llvm_unreachable("Invalid optimization level!");
case 1:
- return PassBuilder::O1;
+ return PassBuilder::OptimizationLevel::O1;
case 2:
switch (Opts.OptimizeSize) {
@@ -932,24 +986,49 @@ static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
llvm_unreachable("Invalid optimization level for size!");
case 0:
- return PassBuilder::O2;
+ return PassBuilder::OptimizationLevel::O2;
case 1:
- return PassBuilder::Os;
+ return PassBuilder::OptimizationLevel::Os;
case 2:
- return PassBuilder::Oz;
+ return PassBuilder::OptimizationLevel::Oz;
}
case 3:
- return PassBuilder::O3;
+ return PassBuilder::OptimizationLevel::O3;
}
}
+static void addCoroutinePassesAtO0(ModulePassManager &MPM,
+ const LangOptions &LangOpts,
+ const CodeGenOptions &CodeGenOpts) {
+ if (!LangOpts.Coroutines)
+ return;
+
+ MPM.addPass(createModuleToFunctionPassAdaptor(CoroEarlyPass()));
+
+ CGSCCPassManager CGPM(CodeGenOpts.DebugPassManager);
+ CGPM.addPass(CoroSplitPass());
+ CGPM.addPass(createCGSCCToFunctionPassAdaptor(CoroElidePass()));
+ MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM)));
+
+ MPM.addPass(createModuleToFunctionPassAdaptor(CoroCleanupPass()));
+}
+
static void addSanitizersAtO0(ModulePassManager &MPM,
const Triple &TargetTriple,
const LangOptions &LangOpts,
const CodeGenOptions &CodeGenOpts) {
+ if (CodeGenOpts.SanitizeCoverageType ||
+ CodeGenOpts.SanitizeCoverageIndirectCalls ||
+ CodeGenOpts.SanitizeCoverageTraceCmp) {
+ auto SancovOpts = getSancovOptsFromCGOpts(CodeGenOpts);
+ MPM.addPass(ModuleSanitizerCoveragePass(
+ SancovOpts, CodeGenOpts.SanitizeCoverageAllowlistFiles,
+ CodeGenOpts.SanitizeCoverageBlocklistFiles));
+ }
+
auto ASanPass = [&](SanitizerMask Mask, bool CompileKernel) {
MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
@@ -970,8 +1049,11 @@ static void addSanitizersAtO0(ModulePassManager &MPM,
}
if (LangOpts.Sanitize.has(SanitizerKind::Memory)) {
- MPM.addPass(MemorySanitizerPass({}));
- MPM.addPass(createModuleToFunctionPassAdaptor(MemorySanitizerPass({})));
+ bool Recover = CodeGenOpts.SanitizeRecover.has(SanitizerKind::Memory);
+ int TrackOrigins = CodeGenOpts.SanitizeMemoryTrackOrigins;
+ MPM.addPass(MemorySanitizerPass({TrackOrigins, Recover, false}));
+ MPM.addPass(createModuleToFunctionPassAdaptor(
+ MemorySanitizerPass({TrackOrigins, Recover, false})));
}
if (LangOpts.Sanitize.has(SanitizerKind::KernelMemory)) {
@@ -1013,7 +1095,7 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
if (CodeGenOpts.hasProfileIRInstr())
// -fprofile-generate.
PGOOpt = PGOOptions(CodeGenOpts.InstrProfileOutput.empty()
- ? DefaultProfileGenName
+ ? std::string(DefaultProfileGenName)
: CodeGenOpts.InstrProfileOutput,
"", "", PGOOptions::IRInstr, PGOOptions::NoCSAction,
CodeGenOpts.DebugInfoForProfiling);
@@ -1046,13 +1128,13 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
"Cannot run CSProfileGen pass with ProfileGen or SampleUse "
" pass");
PGOOpt->CSProfileGenFile = CodeGenOpts.InstrProfileOutput.empty()
- ? DefaultProfileGenName
+ ? std::string(DefaultProfileGenName)
: CodeGenOpts.InstrProfileOutput;
PGOOpt->CSAction = PGOOptions::CSIRInstr;
} else
PGOOpt = PGOOptions("",
CodeGenOpts.InstrProfileOutput.empty()
- ? DefaultProfileGenName
+ ? std::string(DefaultProfileGenName)
: CodeGenOpts.InstrProfileOutput,
"", PGOOptions::NoAction, PGOOptions::CSIRInstr,
CodeGenOpts.DebugInfoForProfiling);
@@ -1065,6 +1147,10 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
PTO.LoopInterleaving = CodeGenOpts.UnrollLoops;
PTO.LoopVectorization = CodeGenOpts.VectorizeLoop;
PTO.SLPVectorization = CodeGenOpts.VectorizeSLP;
+ // Only enable CGProfilePass when using integrated assembler, since
+ // non-integrated assemblers don't recognize .cgprofile section.
+ PTO.CallGraphProfile = !CodeGenOpts.DisableIntegratedAS;
+ PTO.Coroutines = LangOpts.Coroutines;
PassInstrumentationCallbacks PIC;
StandardInstrumentations SI;
@@ -1114,6 +1200,15 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
bool IsLTO = CodeGenOpts.PrepareForLTO;
if (CodeGenOpts.OptimizationLevel == 0) {
+ // If we reached here with a non-empty index file name, then the index
+ // file was empty and we are not performing ThinLTO backend compilation
+ // (used in testing in a distributed build environment). Drop any the type
+ // test assume sequences inserted for whole program vtables so that
+ // codegen doesn't complain.
+ if (!CodeGenOpts.ThinLTOIndexFile.empty())
+ MPM.addPass(LowerTypeTestsPass(/*ExportSummary=*/nullptr,
+ /*ImportSummary=*/nullptr,
+ /*DropTypeTests=*/true));
if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts))
MPM.addPass(GCOVProfilerPass(*Options));
if (Optional<InstrProfOptions> Options =
@@ -1124,7 +1219,10 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// which is just that always inlining occurs. Further, disable generating
// lifetime intrinsics to avoid enabling further optimizations during
// code generation.
- MPM.addPass(AlwaysInlinerPass(/*InsertLifetimeIntrinsics=*/false));
+ // However, we need to insert lifetime intrinsics to avoid invalid access
+ // caused by multithreaded coroutines.
+ MPM.addPass(
+ AlwaysInlinerPass(/*InsertLifetimeIntrinsics=*/LangOpts.Coroutines));
// At -O0, we can still do PGO. Add all the requested passes for
// instrumentation PGO, if requested.
@@ -1140,6 +1238,12 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds))
MPM.addPass(createModuleToFunctionPassAdaptor(BoundsCheckingPass()));
+ // Add UniqueInternalLinkageNames Pass which renames internal linkage
+ // symbols with unique names.
+ if (CodeGenOpts.UniqueInternalLinkageNames) {
+ MPM.addPass(UniqueInternalLinkageNamesPass());
+ }
+
// Lastly, add semantically necessary passes for LTO.
if (IsLTO || IsThinLTO) {
MPM.addPass(CanonicalizeAliasesPass());
@@ -1150,6 +1254,18 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// configure the pipeline.
PassBuilder::OptimizationLevel Level = mapToLevel(CodeGenOpts);
+ // If we reached here with a non-empty index file name, then the index
+ // file was empty and we are not performing ThinLTO backend compilation
+ // (used in testing in a distributed build environment). Drop any the type
+ // test assume sequences inserted for whole program vtables so that
+ // codegen doesn't complain.
+ if (!CodeGenOpts.ThinLTOIndexFile.empty())
+ PB.registerPipelineStartEPCallback([](ModulePassManager &MPM) {
+ MPM.addPass(LowerTypeTestsPass(/*ExportSummary=*/nullptr,
+ /*ImportSummary=*/nullptr,
+ /*DropTypeTests=*/true));
+ });
+
PB.registerPipelineStartEPCallback([](ModulePassManager &MPM) {
MPM.addPass(createModuleToFunctionPassAdaptor(
EntryExitInstrumenterPass(/*PostInlining=*/false)));
@@ -1157,50 +1273,60 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// Register callbacks to schedule sanitizer passes at the appropriate part of
// the pipeline.
- // FIXME: either handle asan/the remaining sanitizers or error out
if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds))
PB.registerScalarOptimizerLateEPCallback(
[](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
FPM.addPass(BoundsCheckingPass());
});
+
+ if (CodeGenOpts.SanitizeCoverageType ||
+ CodeGenOpts.SanitizeCoverageIndirectCalls ||
+ CodeGenOpts.SanitizeCoverageTraceCmp) {
+ PB.registerOptimizerLastEPCallback(
+ [this](ModulePassManager &MPM,
+ PassBuilder::OptimizationLevel Level) {
+ auto SancovOpts = getSancovOptsFromCGOpts(CodeGenOpts);
+ MPM.addPass(ModuleSanitizerCoveragePass(
+ SancovOpts, CodeGenOpts.SanitizeCoverageAllowlistFiles,
+ CodeGenOpts.SanitizeCoverageBlocklistFiles));
+ });
+ }
+
if (LangOpts.Sanitize.has(SanitizerKind::Memory)) {
- PB.registerPipelineStartEPCallback([](ModulePassManager &MPM) {
- MPM.addPass(MemorySanitizerPass({}));
- });
+ int TrackOrigins = CodeGenOpts.SanitizeMemoryTrackOrigins;
+ bool Recover = CodeGenOpts.SanitizeRecover.has(SanitizerKind::Memory);
PB.registerOptimizerLastEPCallback(
- [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
- FPM.addPass(MemorySanitizerPass({}));
+ [TrackOrigins, Recover](ModulePassManager &MPM,
+ PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(MemorySanitizerPass({TrackOrigins, Recover, false}));
+ MPM.addPass(createModuleToFunctionPassAdaptor(
+ MemorySanitizerPass({TrackOrigins, Recover, false})));
});
}
if (LangOpts.Sanitize.has(SanitizerKind::Thread)) {
- PB.registerPipelineStartEPCallback(
- [](ModulePassManager &MPM) { MPM.addPass(ThreadSanitizerPass()); });
PB.registerOptimizerLastEPCallback(
- [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
- FPM.addPass(ThreadSanitizerPass());
+ [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(ThreadSanitizerPass());
+ MPM.addPass(
+ createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
});
}
if (LangOpts.Sanitize.has(SanitizerKind::Address)) {
- PB.registerPipelineStartEPCallback([&](ModulePassManager &MPM) {
- MPM.addPass(
- RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
- });
bool Recover = CodeGenOpts.SanitizeRecover.has(SanitizerKind::Address);
bool UseAfterScope = CodeGenOpts.SanitizeAddressUseAfterScope;
- PB.registerOptimizerLastEPCallback(
- [Recover, UseAfterScope](FunctionPassManager &FPM,
- PassBuilder::OptimizationLevel Level) {
- FPM.addPass(AddressSanitizerPass(
- /*CompileKernel=*/false, Recover, UseAfterScope));
- });
bool ModuleUseAfterScope = asanUseGlobalsGC(TargetTriple, CodeGenOpts);
bool UseOdrIndicator = CodeGenOpts.SanitizeAddressUseOdrIndicator;
- PB.registerPipelineStartEPCallback(
- [Recover, ModuleUseAfterScope,
- UseOdrIndicator](ModulePassManager &MPM) {
+ PB.registerOptimizerLastEPCallback(
+ [Recover, UseAfterScope, ModuleUseAfterScope, UseOdrIndicator](
+ ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(
+ RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
MPM.addPass(ModuleAddressSanitizerPass(
/*CompileKernel=*/false, Recover, ModuleUseAfterScope,
UseOdrIndicator));
+ MPM.addPass(
+ createModuleToFunctionPassAdaptor(AddressSanitizerPass(
+ /*CompileKernel=*/false, Recover, UseAfterScope)));
});
}
if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts))
@@ -1213,6 +1339,12 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
MPM.addPass(InstrProfiling(*Options, false));
});
+ // Add UniqueInternalLinkageNames Pass which renames internal linkage
+ // symbols with unique names.
+ if (CodeGenOpts.UniqueInternalLinkageNames) {
+ MPM.addPass(UniqueInternalLinkageNamesPass());
+ }
+
if (IsThinLTO) {
MPM = PB.buildThinLTOPreLinkDefaultPipeline(
Level, CodeGenOpts.DebugPassManager);
@@ -1229,13 +1361,6 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
}
}
- if (CodeGenOpts.SanitizeCoverageType ||
- CodeGenOpts.SanitizeCoverageIndirectCalls ||
- CodeGenOpts.SanitizeCoverageTraceCmp) {
- auto SancovOpts = getSancovOptsFromCGOpts(CodeGenOpts);
- MPM.addPass(ModuleSanitizerCoveragePass(SancovOpts));
- }
-
if (LangOpts.Sanitize.has(SanitizerKind::HWAddress)) {
bool Recover = CodeGenOpts.SanitizeRecover.has(SanitizerKind::HWAddress);
MPM.addPass(HWAddressSanitizerPass(
@@ -1247,6 +1372,7 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
}
if (CodeGenOpts.OptimizationLevel == 0) {
+ addCoroutinePassesAtO0(MPM, LangOpts, CodeGenOpts);
addSanitizersAtO0(MPM, TargetTriple, LangOpts, CodeGenOpts);
}
}
@@ -1358,15 +1484,12 @@ BitcodeModule *clang::FindThinLTOModule(MutableArrayRef<BitcodeModule> BMs) {
return nullptr;
}
-static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
- const HeaderSearchOptions &HeaderOpts,
- const CodeGenOptions &CGOpts,
- const clang::TargetOptions &TOpts,
- const LangOptions &LOpts,
- std::unique_ptr<raw_pwrite_stream> OS,
- std::string SampleProfile,
- std::string ProfileRemapping,
- BackendAction Action) {
+static void runThinLTOBackend(
+ DiagnosticsEngine &Diags, ModuleSummaryIndex *CombinedIndex, Module *M,
+ const HeaderSearchOptions &HeaderOpts, const CodeGenOptions &CGOpts,
+ const clang::TargetOptions &TOpts, const LangOptions &LOpts,
+ std::unique_ptr<raw_pwrite_stream> OS, std::string SampleProfile,
+ std::string ProfileRemapping, BackendAction Action) {
StringMap<DenseMap<GlobalValue::GUID, GlobalValueSummary *>>
ModuleToDefinedGVSummaries;
CombinedIndex->collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);
@@ -1436,7 +1559,7 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
Conf.RelocModel = CGOpts.RelocationModel;
Conf.CGOptLevel = getCGOptLevel(CGOpts);
Conf.OptLevel = CGOpts.OptimizationLevel;
- initTargetOptions(Conf.Options, CGOpts, TOpts, LOpts, HeaderOpts);
+ initTargetOptions(Diags, Conf.Options, CGOpts, TOpts, LOpts, HeaderOpts);
Conf.SampleProfile = std::move(SampleProfile);
Conf.PTO.LoopUnrolling = CGOpts.UnrollLoops;
// For historical reasons, loop interleaving is set to mirror setting for loop
@@ -1444,6 +1567,9 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
Conf.PTO.LoopInterleaving = CGOpts.UnrollLoops;
Conf.PTO.LoopVectorization = CGOpts.VectorizeLoop;
Conf.PTO.SLPVectorization = CGOpts.VectorizeSLP;
+ // Only enable CGProfilePass when using integrated assembler, since
+ // non-integrated assemblers don't recognize .cgprofile section.
+ Conf.PTO.CallGraphProfile = !CGOpts.DisableIntegratedAS;
// Context sensitive profile.
if (CGOpts.hasProfileCSIRInstr()) {
@@ -1525,8 +1651,8 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
// of an error).
if (CombinedIndex) {
if (!CombinedIndex->skipModuleByDistributedBackend()) {
- runThinLTOBackend(CombinedIndex.get(), M, HeaderOpts, CGOpts, TOpts,
- LOpts, std::move(OS), CGOpts.SampleProfileFile,
+ runThinLTOBackend(Diags, CombinedIndex.get(), M, HeaderOpts, CGOpts,
+ TOpts, LOpts, std::move(OS), CGOpts.SampleProfileFile,
CGOpts.ProfileRemappingFile, Action);
return;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
index 149982d82790..a58450ddd4c5 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
@@ -119,8 +119,9 @@ namespace {
ValueTy = lvalue.getType();
ValueSizeInBits = C.getTypeSize(ValueTy);
AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
- lvalue.getType(), lvalue.getExtVectorAddress()
- .getElementType()->getVectorNumElements());
+ lvalue.getType(), cast<llvm::VectorType>(
+ lvalue.getExtVectorAddress().getElementType())
+ ->getNumElements());
AtomicSizeInBits = C.getTypeSize(AtomicTy);
AtomicAlign = ValueAlign = lvalue.getAlignment();
LVal = lvalue;
@@ -1826,7 +1827,7 @@ void AtomicInfo::EmitAtomicUpdateOp(
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
// Do the atomic load.
- auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
+ auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
// For non-simple lvalues perform compare-and-swap procedure.
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
@@ -1908,7 +1909,7 @@ void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
// Do the atomic load.
- auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
+ auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
// For non-simple lvalues perform compare-and-swap procedure.
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
@@ -2018,6 +2019,10 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
intValue, addr.getElementType(), /*isSigned=*/false);
llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
+ if (AO == llvm::AtomicOrdering::Acquire)
+ AO = llvm::AtomicOrdering::Monotonic;
+ else if (AO == llvm::AtomicOrdering::AcquireRelease)
+ AO = llvm::AtomicOrdering::Release;
// Initializations don't need to be atomic.
if (!isInit)
store->setAtomic(AO);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
index 11f54d1f7fb2..615b78235041 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
@@ -36,7 +36,7 @@ CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
: Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
HasCXXObject(false), UsesStret(false), HasCapturedVariableLayout(false),
CapturesNonExternalType(false), LocalAddress(Address::invalid()),
- StructureType(nullptr), Block(block), DominatingIP(nullptr) {
+ StructureType(nullptr), Block(block) {
// Skip asm prefix, if any. 'name' is usually taken directly from
// the mangled name of the enclosing function.
@@ -775,151 +775,23 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
}
-/// Enter the scope of a block. This should be run at the entrance to
-/// a full-expression so that the block's cleanups are pushed at the
-/// right place in the stack.
-static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
- assert(CGF.HaveInsertPoint());
-
- // Allocate the block info and place it at the head of the list.
- CGBlockInfo &blockInfo =
- *new CGBlockInfo(block, CGF.CurFn->getName());
- blockInfo.NextBlockInfo = CGF.FirstBlockInfo;
- CGF.FirstBlockInfo = &blockInfo;
-
- // Compute information about the layout, etc., of this block,
- // pushing cleanups as necessary.
- computeBlockInfo(CGF.CGM, &CGF, blockInfo);
-
- // Nothing else to do if it can be global.
- if (blockInfo.CanBeGlobal) return;
-
- // Make the allocation for the block.
- blockInfo.LocalAddress = CGF.CreateTempAlloca(blockInfo.StructureType,
- blockInfo.BlockAlign, "block");
-
- // If there are cleanups to emit, enter them (but inactive).
- if (!blockInfo.NeedsCopyDispose) return;
-
- // Walk through the captures (in order) and find the ones not
- // captured by constant.
- for (const auto &CI : block->captures()) {
- // Ignore __block captures; there's nothing special in the
- // on-stack block that we need to do for them.
- if (CI.isByRef()) continue;
-
- // Ignore variables that are constant-captured.
- const VarDecl *variable = CI.getVariable();
- CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
- if (capture.isConstant()) continue;
-
- // Ignore objects that aren't destructed.
- QualType VT = getCaptureFieldType(CGF, CI);
- QualType::DestructionKind dtorKind = VT.isDestructedType();
- if (dtorKind == QualType::DK_none) continue;
-
- CodeGenFunction::Destroyer *destroyer;
-
- // Block captures count as local values and have imprecise semantics.
- // They also can't be arrays, so need to worry about that.
- //
- // For const-qualified captures, emit clang.arc.use to ensure the captured
- // object doesn't get released while we are still depending on its validity
- // within the block.
- if (VT.isConstQualified() &&
- VT.getObjCLifetime() == Qualifiers::OCL_Strong &&
- CGF.CGM.getCodeGenOpts().OptimizationLevel != 0) {
- assert(CGF.CGM.getLangOpts().ObjCAutoRefCount &&
- "expected ObjC ARC to be enabled");
- destroyer = CodeGenFunction::emitARCIntrinsicUse;
- } else if (dtorKind == QualType::DK_objc_strong_lifetime) {
- destroyer = CodeGenFunction::destroyARCStrongImprecise;
- } else {
- destroyer = CGF.getDestroyer(dtorKind);
- }
-
- // GEP down to the address.
- Address addr =
- CGF.Builder.CreateStructGEP(blockInfo.LocalAddress, capture.getIndex());
-
- // We can use that GEP as the dominating IP.
- if (!blockInfo.DominatingIP)
- blockInfo.DominatingIP = cast<llvm::Instruction>(addr.getPointer());
-
- CleanupKind cleanupKind = InactiveNormalCleanup;
- bool useArrayEHCleanup = CGF.needsEHCleanup(dtorKind);
- if (useArrayEHCleanup)
- cleanupKind = InactiveNormalAndEHCleanup;
-
- CGF.pushDestroy(cleanupKind, addr, VT,
- destroyer, useArrayEHCleanup);
-
- // Remember where that cleanup was.
- capture.setCleanup(CGF.EHStack.stable_begin());
- }
-}
-
-/// Enter a full-expression with a non-trivial number of objects to
-/// clean up. This is in this file because, at the moment, the only
-/// kind of cleanup object is a BlockDecl*.
-void CodeGenFunction::enterNonTrivialFullExpression(const FullExpr *E) {
- if (const auto EWC = dyn_cast<ExprWithCleanups>(E)) {
- assert(EWC->getNumObjects() != 0);
- for (const ExprWithCleanups::CleanupObject &C : EWC->getObjects())
- enterBlockScope(*this, C);
- }
-}
-
-/// Find the layout for the given block in a linked list and remove it.
-static CGBlockInfo *findAndRemoveBlockInfo(CGBlockInfo **head,
- const BlockDecl *block) {
- while (true) {
- assert(head && *head);
- CGBlockInfo *cur = *head;
-
- // If this is the block we're looking for, splice it out of the list.
- if (cur->getBlockDecl() == block) {
- *head = cur->NextBlockInfo;
- return cur;
- }
-
- head = &cur->NextBlockInfo;
- }
-}
-
-/// Destroy a chain of block layouts.
-void CodeGenFunction::destroyBlockInfos(CGBlockInfo *head) {
- assert(head && "destroying an empty chain");
- do {
- CGBlockInfo *cur = head;
- head = cur->NextBlockInfo;
- delete cur;
- } while (head != nullptr);
-}
-
/// Emit a block literal expression in the current function.
llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// If the block has no captures, we won't have a pre-computed
// layout for it.
- if (!blockExpr->getBlockDecl()->hasCaptures()) {
+ if (!blockExpr->getBlockDecl()->hasCaptures())
// The block literal is emitted as a global variable, and the block invoke
// function has to be extracted from its initializer.
- if (llvm::Constant *Block = CGM.getAddrOfGlobalBlockIfEmitted(blockExpr)) {
+ if (llvm::Constant *Block = CGM.getAddrOfGlobalBlockIfEmitted(blockExpr))
return Block;
- }
- CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName());
- computeBlockInfo(CGM, this, blockInfo);
- blockInfo.BlockExpression = blockExpr;
- return EmitBlockLiteral(blockInfo);
- }
-
- // Find the block info for this block and take ownership of it.
- std::unique_ptr<CGBlockInfo> blockInfo;
- blockInfo.reset(findAndRemoveBlockInfo(&FirstBlockInfo,
- blockExpr->getBlockDecl()));
- blockInfo->BlockExpression = blockExpr;
- return EmitBlockLiteral(*blockInfo);
+ CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName());
+ computeBlockInfo(CGM, this, blockInfo);
+ blockInfo.BlockExpression = blockExpr;
+ if (!blockInfo.CanBeGlobal)
+ blockInfo.LocalAddress = CreateTempAlloca(blockInfo.StructureType,
+ blockInfo.BlockAlign, "block");
+ return EmitBlockLiteral(blockInfo);
}
llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
@@ -1161,12 +1033,64 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
/*captured by init*/ false);
}
- // Activate the cleanup if layout pushed one.
- if (!CI.isByRef()) {
- EHScopeStack::stable_iterator cleanup = capture.getCleanup();
- if (cleanup.isValid())
- ActivateCleanupBlock(cleanup, blockInfo.DominatingIP);
+ // Push a cleanup for the capture if necessary.
+ if (!blockInfo.NeedsCopyDispose)
+ continue;
+
+ // Ignore __block captures; there's nothing special in the on-stack block
+ // that we need to do for them.
+ if (CI.isByRef())
+ continue;
+
+ // Ignore objects that aren't destructed.
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+ if (dtorKind == QualType::DK_none)
+ continue;
+
+ CodeGenFunction::Destroyer *destroyer;
+
+ // Block captures count as local values and have imprecise semantics.
+ // They also can't be arrays, so need to worry about that.
+ //
+ // For const-qualified captures, emit clang.arc.use to ensure the captured
+ // object doesn't get released while we are still depending on its validity
+ // within the block.
+ if (type.isConstQualified() &&
+ type.getObjCLifetime() == Qualifiers::OCL_Strong &&
+ CGM.getCodeGenOpts().OptimizationLevel != 0) {
+ assert(CGM.getLangOpts().ObjCAutoRefCount &&
+ "expected ObjC ARC to be enabled");
+ destroyer = emitARCIntrinsicUse;
+ } else if (dtorKind == QualType::DK_objc_strong_lifetime) {
+ destroyer = destroyARCStrongImprecise;
+ } else {
+ destroyer = getDestroyer(dtorKind);
}
+
+ CleanupKind cleanupKind = NormalCleanup;
+ bool useArrayEHCleanup = needsEHCleanup(dtorKind);
+ if (useArrayEHCleanup)
+ cleanupKind = NormalAndEHCleanup;
+
+ // Extend the lifetime of the capture to the end of the scope enclosing the
+ // block expression except when the block decl is in the list of RetExpr's
+ // cleanup objects, in which case its lifetime ends after the full
+ // expression.
+ auto IsBlockDeclInRetExpr = [&]() {
+ auto *EWC = llvm::dyn_cast_or_null<ExprWithCleanups>(RetExpr);
+ if (EWC)
+ for (auto &C : EWC->getObjects())
+ if (auto *BD = C.dyn_cast<BlockDecl *>())
+ if (BD == blockDecl)
+ return true;
+ return false;
+ };
+
+ if (IsBlockDeclInRetExpr())
+ pushDestroy(cleanupKind, blockField, type, destroyer, useArrayEHCleanup);
+ else
+ pushLifetimeExtendedDestroy(cleanupKind, blockField, type, destroyer,
+ useArrayEHCleanup);
}
// Cast to the converted block-pointer type, which happens (somewhat
@@ -1449,7 +1373,8 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
llvm::IRBuilder<> b(llvm::BasicBlock::Create(CGM.getLLVMContext(), "entry",
Init));
b.CreateAlignedStore(CGM.getNSConcreteGlobalBlock(),
- b.CreateStructGEP(literal, 0), CGM.getPointerAlign().getQuantity());
+ b.CreateStructGEP(literal, 0),
+ CGM.getPointerAlign().getAsAlign());
b.CreateRetVoid();
// We can't use the normal LLVM global initialisation array, because we
// need to specify that this runs early in library initialisation.
@@ -2031,11 +1956,13 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
FunctionDecl *FD = FunctionDecl::Create(
C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
FunctionTy, nullptr, SC_Static, false, false);
-
setBlockHelperAttributesVisibility(blockInfo.CapturesNonExternalType, Fn, FI,
CGM);
+ // This is necessary to avoid inheriting the previous line number.
+ FD->setImplicit();
StartFunction(FD, ReturnTy, Fn, FI, args);
- ApplyDebugLocation NL{*this, blockInfo.getBlockExpr()->getBeginLoc()};
+ auto AL = ApplyDebugLocation::CreateArtificial(*this);
+
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
Address src = GetAddrOfLocalVar(&SrcDecl);
@@ -2226,10 +2153,12 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
setBlockHelperAttributesVisibility(blockInfo.CapturesNonExternalType, Fn, FI,
CGM);
+ // This is necessary to avoid inheriting the previous line number.
+ FD->setImplicit();
StartFunction(FD, ReturnTy, Fn, FI, args);
markAsIgnoreThreadCheckingAtRuntime(Fn);
- ApplyDebugLocation NL{*this, blockInfo.getBlockExpr()->getBeginLoc()};
+ auto AL = ApplyDebugLocation::CreateArtificial(*this);
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
index c4bfde666154..698ecd3d926a 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
@@ -257,10 +257,6 @@ public:
// This could be zero if no forced alignment is required.
CharUnits BlockHeaderForcedGapSize;
- /// An instruction which dominates the full-expression that the
- /// block is inside.
- llvm::Instruction *DominatingIP;
-
/// The next block in the block-info chain. Invalid if this block
/// info is not part of the CGF's block-info chain, which is true
/// if it corresponds to a global block or a block whose expression
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h b/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
index 107c9275431c..38e96c0f4ee6 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
@@ -22,16 +22,15 @@ class CodeGenFunction;
/// This is an IRBuilder insertion helper that forwards to
/// CodeGenFunction::InsertHelper, which adds necessary metadata to
/// instructions.
-class CGBuilderInserter : protected llvm::IRBuilderDefaultInserter {
+class CGBuilderInserter final : public llvm::IRBuilderDefaultInserter {
public:
CGBuilderInserter() = default;
explicit CGBuilderInserter(CodeGenFunction *CGF) : CGF(CGF) {}
-protected:
/// This forwards to CodeGenFunction::InsertHelper.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
llvm::BasicBlock *BB,
- llvm::BasicBlock::iterator InsertPt) const;
+ llvm::BasicBlock::iterator InsertPt) const override;
private:
CodeGenFunction *CGF = nullptr;
};
@@ -68,38 +67,34 @@ public:
// take an alignment.
llvm::LoadInst *CreateLoad(Address Addr, const llvm::Twine &Name = "") {
return CreateAlignedLoad(Addr.getPointer(),
- Addr.getAlignment().getQuantity(),
- Name);
+ Addr.getAlignment().getAsAlign(), Name);
}
llvm::LoadInst *CreateLoad(Address Addr, const char *Name) {
// This overload is required to prevent string literals from
// ending up in the IsVolatile overload.
return CreateAlignedLoad(Addr.getPointer(),
- Addr.getAlignment().getQuantity(),
- Name);
+ Addr.getAlignment().getAsAlign(), Name);
}
llvm::LoadInst *CreateLoad(Address Addr, bool IsVolatile,
const llvm::Twine &Name = "") {
- return CreateAlignedLoad(Addr.getPointer(),
- Addr.getAlignment().getQuantity(),
- IsVolatile,
- Name);
+ return CreateAlignedLoad(
+ Addr.getPointer(), Addr.getAlignment().getAsAlign(), IsVolatile, Name);
}
using CGBuilderBaseTy::CreateAlignedLoad;
llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
const llvm::Twine &Name = "") {
- return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
+ return CreateAlignedLoad(Addr, Align.getAsAlign(), Name);
}
llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
const char *Name) {
- return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
+ return CreateAlignedLoad(Addr, Align.getAsAlign(), Name);
}
llvm::LoadInst *CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr,
CharUnits Align,
const llvm::Twine &Name = "") {
assert(Addr->getType()->getPointerElementType() == Ty);
- return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
+ return CreateAlignedLoad(Addr, Align.getAsAlign(), Name);
}
// Note that we intentionally hide the CreateStore APIs that don't
@@ -113,7 +108,7 @@ public:
using CGBuilderBaseTy::CreateAlignedStore;
llvm::StoreInst *CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr,
CharUnits Align, bool IsVolatile = false) {
- return CreateAlignedStore(Val, Addr, Align.getQuantity(), IsVolatile);
+ return CreateAlignedStore(Val, Addr, Align.getAsAlign(), IsVolatile);
}
// FIXME: these "default-aligned" APIs should be removed,
@@ -284,6 +279,13 @@ public:
IsVolatile);
}
+ using CGBuilderBaseTy::CreateMemCpyInline;
+ llvm::CallInst *CreateMemCpyInline(Address Dest, Address Src, uint64_t Size) {
+ return CreateMemCpyInline(
+ Dest.getPointer(), Dest.getAlignment().getAsAlign(), Src.getPointer(),
+ Src.getAlignment().getAsAlign(), getInt64(Size));
+ }
+
using CGBuilderBaseTy::CreateMemMove;
llvm::CallInst *CreateMemMove(Address Dest, Address Src, llvm::Value *Size,
bool IsVolatile = false) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
index 2d20f92fbb3d..8994b939093e 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
@@ -28,6 +28,7 @@
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
@@ -43,9 +44,10 @@
#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/MatrixBuilder.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ScopedPrinter.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/X86TargetParser.h"
#include <sstream>
using namespace clang;
@@ -74,6 +76,8 @@ static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
break;
}
}
+ if (CGF.CGM.stopAutoInit())
+ return;
CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
}
@@ -215,8 +219,9 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
if (Invert)
- Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
- llvm::ConstantInt::get(IntType, -1));
+ Result =
+ CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
+ llvm::ConstantInt::getAllOnesValue(IntType));
Result = EmitFromInt(CGF, Result, T, ValueType);
return RValue::get(Result);
}
@@ -411,6 +416,25 @@ static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
}
}
+// Emit an intrinsic where all operands are of the same type as the result.
+// Depending on mode, this may be a constrained floating-point intrinsic.
+static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
+ unsigned IntrinsicID,
+ unsigned ConstrainedIntrinsicID,
+ llvm::Type *Ty,
+ ArrayRef<Value *> Args) {
+ Function *F;
+ if (CGF.Builder.getIsFPConstrained())
+ F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty);
+ else
+ F = CGF.CGM.getIntrinsic(IntrinsicID, Ty);
+
+ if (CGF.Builder.getIsFPConstrained())
+ return CGF.Builder.CreateConstrainedFPCall(F, Args);
+ else
+ return CGF.Builder.CreateCall(F, Args);
+}
+
// Emit a simple mangled intrinsic that has 1 argument and a return type
// matching the argument type.
static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
@@ -566,7 +590,9 @@ static WidthAndSignedness
getIntegerWidthAndSignedness(const clang::ASTContext &context,
const clang::QualType Type) {
assert(Type->isIntegerType() && "Given type is not an integer.");
- unsigned Width = Type->isBooleanType() ? 1 : context.getTypeInfo(Type).Width;
+ unsigned Width = Type->isBooleanType() ? 1
+ : Type->isExtIntType() ? context.getIntWidth(Type)
+ : context.getTypeInfo(Type).Width;
bool Signed = Type->isSignedIntegerType();
return {Width, Signed};
}
@@ -1251,6 +1277,8 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
FunctionDecl *FD = FunctionDecl::Create(
Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
FuncionTy, nullptr, SC_PrivateExtern, false, false);
+ // Avoid generating debug location info for the function.
+ FD->setImplicit();
StartFunction(FD, ReturnTy, Fn, FI, Args);
@@ -1320,14 +1348,42 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
} else if (const Expr *TheExpr = Item.getExpr()) {
ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
- // Check if this is a retainable type.
- if (TheExpr->getType()->isObjCRetainableType()) {
+ // If a temporary object that requires destruction after the full
+ // expression is passed, push a lifetime-extended cleanup to extend its
+ // lifetime to the end of the enclosing block scope.
+ auto LifetimeExtendObject = [&](const Expr *E) {
+ E = E->IgnoreParenCasts();
+ // Extend lifetimes of objects returned by function calls and message
+ // sends.
+
+ // FIXME: We should do this in other cases in which temporaries are
+ // created including arguments of non-ARC types (e.g., C++
+ // temporaries).
+ if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E))
+ return true;
+ return false;
+ };
+
+ if (TheExpr->getType()->isObjCRetainableType() &&
+ getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
"Only scalar can be a ObjC retainable type");
- // Check if the object is constant, if not, save it in
- // RetainableOperands.
- if (!isa<Constant>(ArgVal))
- RetainableOperands.push_back(ArgVal);
+ if (!isa<Constant>(ArgVal)) {
+ CleanupKind Cleanup = getARCCleanupKind();
+ QualType Ty = TheExpr->getType();
+ Address Alloca = Address::invalid();
+ Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
+ ArgVal = EmitARCRetain(Ty, ArgVal);
+ Builder.CreateStore(ArgVal, Addr);
+ pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
+ CodeGenFunction::destroyARCStrongPrecise,
+ Cleanup & EHCleanup);
+
+ // Push a clang.arc.use call to ensure ARC optimizer knows that the
+ // argument has to be alive.
+ if (CGM.getCodeGenOpts().OptimizationLevel != 0)
+ pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal);
+ }
}
} else {
ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
@@ -1349,18 +1405,6 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
Layout, BufAddr.getAlignment());
EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
-
- // Push a clang.arc.use cleanup for each object in RetainableOperands. The
- // cleanup will cause the use to appear after the final log call, keeping
- // the object valid while it’s held in the log buffer. Note that if there’s
- // a release cleanup on the object, it will already be active; since
- // cleanups are emitted in reverse order, the use will occur before the
- // object is released.
- if (!RetainableOperands.empty() && getLangOpts().ObjCAutoRefCount &&
- CGM.getCodeGenOpts().OptimizationLevel != 0)
- for (llvm::Value *Object : RetainableOperands)
- pushFullExprCleanup<CallObjCArcUse>(getARCCleanupKind(), Object);
-
return RValue::get(BufAddr.getPointer());
}
@@ -1521,8 +1565,7 @@ static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
// We check whether we are in a recursive type
if (CanonicalType->isRecordType()) {
- Value *TmpRes =
- dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
+ TmpRes = dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
Res = CGF.Builder.CreateAdd(TmpRes, Res);
continue;
}
@@ -1629,7 +1672,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_ceilf:
case Builtin::BI__builtin_ceilf16:
case Builtin::BI__builtin_ceill:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::ceil,
Intrinsic::experimental_constrained_ceil));
@@ -1650,7 +1693,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_cosf:
case Builtin::BI__builtin_cosf16:
case Builtin::BI__builtin_cosl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::cos,
Intrinsic::experimental_constrained_cos));
@@ -1661,7 +1704,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_expf:
case Builtin::BI__builtin_expf16:
case Builtin::BI__builtin_expl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::exp,
Intrinsic::experimental_constrained_exp));
@@ -1672,7 +1715,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_exp2f:
case Builtin::BI__builtin_exp2f16:
case Builtin::BI__builtin_exp2l:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::exp2,
Intrinsic::experimental_constrained_exp2));
@@ -1693,7 +1736,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_floorf:
case Builtin::BI__builtin_floorf16:
case Builtin::BI__builtin_floorl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::floor,
Intrinsic::experimental_constrained_floor));
@@ -1704,7 +1747,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fmaf:
case Builtin::BI__builtin_fmaf16:
case Builtin::BI__builtin_fmal:
- return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::fma,
Intrinsic::experimental_constrained_fma));
@@ -1715,7 +1758,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fmaxf:
case Builtin::BI__builtin_fmaxf16:
case Builtin::BI__builtin_fmaxl:
- return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::maxnum,
Intrinsic::experimental_constrained_maxnum));
@@ -1726,7 +1769,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fminf:
case Builtin::BI__builtin_fminf16:
case Builtin::BI__builtin_fminl:
- return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::minnum,
Intrinsic::experimental_constrained_minnum));
@@ -1751,7 +1794,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_logf:
case Builtin::BI__builtin_logf16:
case Builtin::BI__builtin_logl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::log,
Intrinsic::experimental_constrained_log));
@@ -1762,7 +1805,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_log10f:
case Builtin::BI__builtin_log10f16:
case Builtin::BI__builtin_log10l:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::log10,
Intrinsic::experimental_constrained_log10));
@@ -1773,7 +1816,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_log2f:
case Builtin::BI__builtin_log2f16:
case Builtin::BI__builtin_log2l:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::log2,
Intrinsic::experimental_constrained_log2));
@@ -1783,7 +1826,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_nearbyint:
case Builtin::BI__builtin_nearbyintf:
case Builtin::BI__builtin_nearbyintl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::nearbyint,
Intrinsic::experimental_constrained_nearbyint));
@@ -1794,7 +1837,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_powf:
case Builtin::BI__builtin_powf16:
case Builtin::BI__builtin_powl:
- return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::pow,
Intrinsic::experimental_constrained_pow));
@@ -1805,7 +1848,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_rintf:
case Builtin::BI__builtin_rintf16:
case Builtin::BI__builtin_rintl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::rint,
Intrinsic::experimental_constrained_rint));
@@ -1816,7 +1859,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_roundf:
case Builtin::BI__builtin_roundf16:
case Builtin::BI__builtin_roundl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::round,
Intrinsic::experimental_constrained_round));
@@ -1827,7 +1870,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_sinf:
case Builtin::BI__builtin_sinf16:
case Builtin::BI__builtin_sinl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::sin,
Intrinsic::experimental_constrained_sin));
@@ -1838,7 +1881,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_sqrtf:
case Builtin::BI__builtin_sqrtf16:
case Builtin::BI__builtin_sqrtl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::sqrt,
Intrinsic::experimental_constrained_sqrt));
@@ -1849,7 +1892,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_truncf:
case Builtin::BI__builtin_truncf16:
case Builtin::BI__builtin_truncl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::trunc,
Intrinsic::experimental_constrained_trunc));
@@ -2152,6 +2195,33 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
return RValue::get(Result);
}
+ case Builtin::BI__builtin_expect_with_probability: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = ArgValue->getType();
+
+ Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
+ llvm::APFloat Probability(0.0);
+ const Expr *ProbArg = E->getArg(2);
+ bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
+ assert(EvalSucceed && "probability should be able to evaluate as float");
+ (void)EvalSucceed;
+ bool LoseInfo = false;
+ Probability.convert(llvm::APFloat::IEEEdouble(),
+ llvm::RoundingMode::Dynamic, &LoseInfo);
+ llvm::Type *Ty = ConvertType(ProbArg->getType());
+ Constant *Confidence = ConstantFP::get(Ty, Probability);
+ // Don't generate llvm.expect.with.probability on -O0 as the backend
+ // won't use it for anything.
+ // Note, we still IRGen ExpectedValue because it could have side-effects.
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0)
+ return RValue::get(ArgValue);
+
+ Function *FnExpect =
+ CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
+ Value *Result = Builder.CreateCall(
+ FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
+ return RValue::get(Result);
+ }
case Builtin::BI__builtin_assume_aligned: {
const Expr *Ptr = E->getArg(0);
Value *PtrValue = EmitScalarExpr(Ptr);
@@ -2164,7 +2234,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
llvm::Value::MaximumAlignment);
- EmitAlignmentAssumption(PtrValue, Ptr,
+ emitAlignmentAssumption(PtrValue, Ptr,
/*The expr loc is sufficient.*/ SourceLocation(),
AlignmentCI, OffsetValue);
return RValue::get(PtrValue);
@@ -2336,6 +2406,53 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
+ case Builtin::BI__builtin_matrix_transpose: {
+ const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
+ Value *MatValue = EmitScalarExpr(E->getArg(0));
+ MatrixBuilder<CGBuilderTy> MB(Builder);
+ Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
+ MatrixTy->getNumColumns());
+ return RValue::get(Result);
+ }
+
+ case Builtin::BI__builtin_matrix_column_major_load: {
+ MatrixBuilder<CGBuilderTy> MB(Builder);
+ // Emit everything that isn't dependent on the first parameter type
+ Value *Stride = EmitScalarExpr(E->getArg(3));
+ const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
+ auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
+ assert(PtrTy && "arg0 must be of pointer type");
+ bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
+
+ Address Src = EmitPointerWithAlignment(E->getArg(0));
+ EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
+ E->getArg(0)->getExprLoc(), FD, 0);
+ Value *Result = MB.CreateColumnMajorLoad(
+ Src.getPointer(), Align(Src.getAlignment().getQuantity()), Stride,
+ IsVolatile, ResultTy->getNumRows(), ResultTy->getNumColumns(),
+ "matrix");
+ return RValue::get(Result);
+ }
+
+ case Builtin::BI__builtin_matrix_column_major_store: {
+ MatrixBuilder<CGBuilderTy> MB(Builder);
+ Value *Matrix = EmitScalarExpr(E->getArg(0));
+ Address Dst = EmitPointerWithAlignment(E->getArg(1));
+ Value *Stride = EmitScalarExpr(E->getArg(2));
+
+ const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
+ auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
+ assert(PtrTy && "arg1 must be of pointer type");
+ bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
+
+ EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(),
+ E->getArg(1)->getExprLoc(), FD, 0);
+ Value *Result = MB.CreateColumnMajorStore(
+ Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()),
+ Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns());
+ return RValue::get(Result);
+ }
+
case Builtin::BIfinite:
case Builtin::BI__finite:
case Builtin::BIfinitef:
@@ -2518,6 +2635,19 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Dest.getPointer());
}
+ case Builtin::BI__builtin_memcpy_inline: {
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Address Src = EmitPointerWithAlignment(E->getArg(1));
+ uint64_t Size =
+ E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
+ EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
+ E->getArg(0)->getExprLoc(), FD, 0);
+ EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
+ E->getArg(1)->getExprLoc(), FD, 1);
+ Builder.CreateMemCpyInline(Dest, Src, Size);
+ return RValue::get(nullptr);
+ }
+
case Builtin::BI__builtin_char_memchr:
BuiltinID = Builtin::BI__builtin_memchr;
break;
@@ -3930,7 +4060,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
auto *V =
Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
Builder.CreateAlignedStore(
- V, GEP, CGM.getDataLayout().getPrefTypeAlignment(SizeTy));
+ V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
}
return std::tie(ElemPtr, TmpSize, TmpPtr);
};
@@ -3949,19 +4079,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Create a vector of the arguments, as well as a constant value to
// express to the runtime the number of variadic arguments.
- std::vector<llvm::Value *> Args = {
- Queue, Flags, Range,
- Kernel, Block, ConstantInt::get(IntTy, NumArgs - 4),
- ElemPtr};
- std::vector<llvm::Type *> ArgTys = {
+ llvm::Value *const Args[] = {Queue, Flags,
+ Range, Kernel,
+ Block, ConstantInt::get(IntTy, NumArgs - 4),
+ ElemPtr};
+ llvm::Type *const ArgTys[] = {
QueueTy, IntTy, RangeTy, GenericVoidPtrTy,
GenericVoidPtrTy, IntTy, ElemPtr->getType()};
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- auto Call =
- RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- llvm::ArrayRef<llvm::Value *>(Args)));
+ llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
+ auto Call = RValue::get(
+ Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
if (TmpSize)
EmitLifetimeEnd(TmpSize, TmpPtr);
return Call;
@@ -4117,6 +4245,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BIprintf:
if (getTarget().getTriple().isNVPTX())
return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue);
+ if (getTarget().getTriple().getArch() == Triple::amdgcn &&
+ getLangOpts().HIP)
+ return EmitAMDGPUDevicePrintfCallExpr(E, ReturnValue);
break;
case Builtin::BI__builtin_canonicalize:
case Builtin::BI__builtin_canonicalizef:
@@ -4429,35 +4560,41 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
NeonTypeFlags TypeFlags,
- bool HasLegalHalfType=true,
- bool V1Ty=false) {
+ bool HasLegalHalfType = true,
+ bool V1Ty = false,
+ bool AllowBFloatArgsAndRet = true) {
int IsQuad = TypeFlags.isQuad();
switch (TypeFlags.getEltType()) {
case NeonTypeFlags::Int8:
case NeonTypeFlags::Poly8:
- return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
case NeonTypeFlags::Int16:
case NeonTypeFlags::Poly16:
- return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
+ case NeonTypeFlags::BFloat16:
+ if (AllowBFloatArgsAndRet)
+ return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad));
+ else
+ return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::Float16:
if (HasLegalHalfType)
- return llvm::VectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
else
- return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::Int32:
- return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
case NeonTypeFlags::Int64:
case NeonTypeFlags::Poly64:
- return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
case NeonTypeFlags::Poly128:
// FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
// There is a lot of i128 and f128 API missing.
// so we use v16i8 to represent poly128 and get pattern matched.
- return llvm::VectorType::get(CGF->Int8Ty, 16);
+ return llvm::FixedVectorType::get(CGF->Int8Ty, 16);
case NeonTypeFlags::Float32:
- return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
case NeonTypeFlags::Float64:
- return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
}
llvm_unreachable("Unknown vector element type!");
}
@@ -4467,34 +4604,46 @@ static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
int IsQuad = IntTypeFlags.isQuad();
switch (IntTypeFlags.getEltType()) {
case NeonTypeFlags::Int16:
- return llvm::VectorType::get(CGF->HalfTy, (4 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->HalfTy, (4 << IsQuad));
case NeonTypeFlags::Int32:
- return llvm::VectorType::get(CGF->FloatTy, (2 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->FloatTy, (2 << IsQuad));
case NeonTypeFlags::Int64:
- return llvm::VectorType::get(CGF->DoubleTy, (1 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->DoubleTy, (1 << IsQuad));
default:
llvm_unreachable("Type can't be converted to floating-point!");
}
}
-Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
- unsigned nElts = V->getType()->getVectorNumElements();
- Value* SV = llvm::ConstantVector::getSplat(nElts, C);
+Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C,
+ const ElementCount &Count) {
+ Value *SV = llvm::ConstantVector::getSplat(Count, C);
return Builder.CreateShuffleVector(V, V, SV, "lane");
}
+Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
+ ElementCount EC = cast<llvm::VectorType>(V->getType())->getElementCount();
+ return EmitNeonSplat(V, C, EC);
+}
+
Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
const char *name,
unsigned shift, bool rightshift) {
unsigned j = 0;
for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
- ai != ae; ++ai, ++j)
+ ai != ae; ++ai, ++j) {
+ if (F->isConstrainedFPIntrinsic())
+ if (ai->getType()->isMetadataTy())
+ continue;
if (shift > 0 && shift == j)
Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
else
Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
+ }
- return Builder.CreateCall(F, Ops, name);
+ if (F->isConstrainedFPIntrinsic())
+ return Builder.CreateConstrainedFPCall(F, Ops, name);
+ else
+ return Builder.CreateCall(F, Ops, name);
}
Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
@@ -4558,17 +4707,17 @@ enum {
};
namespace {
-struct NeonIntrinsicInfo {
+struct ARMVectorIntrinsicInfo {
const char *NameHint;
unsigned BuiltinID;
unsigned LLVMIntrinsic;
unsigned AltLLVMIntrinsic;
- unsigned TypeModifier;
+ uint64_t TypeModifier;
bool operator<(unsigned RHSBuiltinID) const {
return BuiltinID < RHSBuiltinID;
}
- bool operator<(const NeonIntrinsicInfo &TE) const {
+ bool operator<(const ARMVectorIntrinsicInfo &TE) const {
return BuiltinID < TE.BuiltinID;
}
};
@@ -4586,7 +4735,12 @@ struct NeonIntrinsicInfo {
Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
TypeModifier }
-static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
+static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
+ NEONMAP1(__a32_vcvt_bf16_v, arm_neon_vcvtfp2bf, 0),
+ NEONMAP0(splat_lane_v),
+ NEONMAP0(splat_laneq_v),
+ NEONMAP0(splatq_lane_v),
+ NEONMAP0(splatq_laneq_v),
NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
NEONMAP1(vabs_v, arm_neon_vabs, 0),
@@ -4596,6 +4750,11 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vaeseq_v, arm_neon_aese, 0),
NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
+ NEONMAP1(vbfdot_v, arm_neon_bfdot, 0),
+ NEONMAP1(vbfdotq_v, arm_neon_bfdot, 0),
+ NEONMAP1(vbfmlalbq_v, arm_neon_bfmlalb, 0),
+ NEONMAP1(vbfmlaltq_v, arm_neon_bfmlalt, 0),
+ NEONMAP1(vbfmmlaq_v, arm_neon_bfmmla, 0),
NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
@@ -4656,6 +4815,7 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
+ NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
@@ -4754,6 +4914,7 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0),
NEONMAP0(vmovl_v),
NEONMAP0(vmovn_v),
NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
@@ -4861,13 +5022,21 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP0(vtrnq_v),
NEONMAP0(vtst_v),
NEONMAP0(vtstq_v),
+ NEONMAP1(vusdot_v, arm_neon_usdot, 0),
+ NEONMAP1(vusdotq_v, arm_neon_usdot, 0),
+ NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0),
NEONMAP0(vuzp_v),
NEONMAP0(vuzpq_v),
NEONMAP0(vzip_v),
NEONMAP0(vzipq_v)
};
-static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
+static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
+ NEONMAP1(__a64_vcvtq_low_bf16_v, aarch64_neon_bfcvtn, 0),
+ NEONMAP0(splat_lane_v),
+ NEONMAP0(splat_laneq_v),
+ NEONMAP0(splatq_lane_v),
+ NEONMAP0(splatq_laneq_v),
NEONMAP1(vabs_v, aarch64_neon_abs, 0),
NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
NEONMAP0(vaddhn_v),
@@ -4875,6 +5044,11 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
+ NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0),
+ NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0),
+ NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0),
+ NEONMAP1(vbfmlaltq_v, aarch64_neon_bfmlalt, 0),
+ NEONMAP1(vbfmmlaq_v, aarch64_neon_bfmmla, 0),
NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
@@ -4918,6 +5092,7 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP0(vcvtq_f16_v),
NEONMAP0(vcvtq_f32_v),
+ NEONMAP1(vcvtq_high_bf16_v, aarch64_neon_bfcvtn2, 0),
NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
@@ -4952,6 +5127,7 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
+ NEONMAP2(vmmlaq_v, aarch64_neon_ummla, aarch64_neon_smmla, 0),
NEONMAP0(vmovl_v),
NEONMAP0(vmovn_v),
NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
@@ -4966,14 +5142,22 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
+ NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0),
+ NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
+ NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0),
+ NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
+ NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
+ NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
+ NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0),
+ NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
@@ -5026,9 +5210,12 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP0(vsubhn_v),
NEONMAP0(vtst_v),
NEONMAP0(vtstq_v),
+ NEONMAP1(vusdot_v, aarch64_neon_usdot, 0),
+ NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0),
+ NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0),
};
-static const NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
+static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
@@ -5061,6 +5248,7 @@ static const NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0),
NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
@@ -5258,24 +5446,42 @@ static const NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
#undef NEONMAP1
#undef NEONMAP2
+#define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
+ { \
+ #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
+ TypeModifier \
+ }
+
+#define SVEMAP2(NameBase, TypeModifier) \
+ { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier }
+static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
+#define GET_SVE_LLVM_INTRINSIC_MAP
+#include "clang/Basic/arm_sve_builtin_cg.inc"
+#undef GET_SVE_LLVM_INTRINSIC_MAP
+};
+
+#undef SVEMAP1
+#undef SVEMAP2
+
static bool NEONSIMDIntrinsicsProvenSorted = false;
static bool AArch64SIMDIntrinsicsProvenSorted = false;
static bool AArch64SISDIntrinsicsProvenSorted = false;
+static bool AArch64SVEIntrinsicsProvenSorted = false;
-
-static const NeonIntrinsicInfo *
-findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap,
- unsigned BuiltinID, bool &MapProvenSorted) {
+static const ARMVectorIntrinsicInfo *
+findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
+ unsigned BuiltinID, bool &MapProvenSorted) {
#ifndef NDEBUG
if (!MapProvenSorted) {
- assert(std::is_sorted(std::begin(IntrinsicMap), std::end(IntrinsicMap)));
+ assert(llvm::is_sorted(IntrinsicMap));
MapProvenSorted = true;
}
#endif
- const NeonIntrinsicInfo *Builtin = llvm::lower_bound(IntrinsicMap, BuiltinID);
+ const ARMVectorIntrinsicInfo *Builtin =
+ llvm::lower_bound(IntrinsicMap, BuiltinID);
if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
return Builtin;
@@ -5298,7 +5504,7 @@ Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
if (Modifier & AddRetType) {
llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
if (Modifier & VectorizeRetType)
- Ty = llvm::VectorType::get(
+ Ty = llvm::FixedVectorType::get(
Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
Tys.push_back(Ty);
@@ -5307,7 +5513,7 @@ Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
// Arguments.
if (Modifier & VectorizeArgTypes) {
int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
- ArgType = llvm::VectorType::get(ArgType, Elts);
+ ArgType = llvm::FixedVectorType::get(ArgType, Elts);
}
if (Modifier & (Add1ArgType | Add2ArgTypes))
@@ -5322,10 +5528,9 @@ Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
return CGM.getIntrinsic(IntrinsicID, Tys);
}
-static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
- const NeonIntrinsicInfo &SISDInfo,
- SmallVectorImpl<Value *> &Ops,
- const CallExpr *E) {
+static Value *EmitCommonNeonSISDBuiltinExpr(
+ CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo,
+ SmallVectorImpl<Value *> &Ops, const CallExpr *E) {
unsigned BuiltinID = SISDInfo.BuiltinID;
unsigned int Int = SISDInfo.LLVMIntrinsic;
unsigned Modifier = SISDInfo.TypeModifier;
@@ -5370,8 +5575,8 @@ static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
// The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
// it before inserting.
- Ops[j] =
- CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType());
+ Ops[j] = CGF.Builder.CreateTruncOrBitCast(
+ Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType());
Ops[j] =
CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
}
@@ -5401,8 +5606,11 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
bool Usgn = Type.isUnsigned();
bool Quad = Type.isQuad();
const bool HasLegalHalfType = getTarget().hasLegalHalfType();
+ const bool AllowBFloatArgsAndRet =
+ getTargetHooks().getABIInfo().allowBFloatArgsAndRet();
- llvm::VectorType *VTy = GetNeonType(this, Type, HasLegalHalfType);
+ llvm::VectorType *VTy = GetNeonType(this, Type, HasLegalHalfType, false,
+ AllowBFloatArgsAndRet);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
@@ -5417,6 +5625,19 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
switch (BuiltinID) {
default: break;
+ case NEON::BI__builtin_neon_splat_lane_v:
+ case NEON::BI__builtin_neon_splat_laneq_v:
+ case NEON::BI__builtin_neon_splatq_lane_v:
+ case NEON::BI__builtin_neon_splatq_laneq_v: {
+ auto NumElements = VTy->getElementCount();
+ if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v)
+ NumElements = NumElements * 2;
+ if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v)
+ NumElements = NumElements / 2;
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
+ return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements);
+ }
case NEON::BI__builtin_neon_vpadd_v:
case NEON::BI__builtin_neon_vpaddq_v:
// We don't allow fp/int overloading of intrinsics.
@@ -5469,7 +5690,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Ty = HalfTy;
break;
}
- llvm::Type *VecFlt = llvm::VectorType::get(Ty, VTy->getNumElements());
+ auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements());
llvm::Type *Tys[] = { VTy, VecFlt };
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
return EmitNeonCall(F, Ops, NameHint);
@@ -5616,7 +5837,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vext_v:
case NEON::BI__builtin_neon_vextq_v: {
int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
- SmallVector<uint32_t, 16> Indices;
+ SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(i+CV);
@@ -5626,13 +5847,14 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
}
case NEON::BI__builtin_neon_vfma_v:
case NEON::BI__builtin_neon_vfmaq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
- return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
+ return emitCallMaybeConstrainedFPBuiltin(
+ *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
+ {Ops[1], Ops[2], Ops[0]});
}
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v: {
@@ -5646,7 +5868,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vld1q_x3_v:
case NEON::BI__builtin_neon_vld1_x4_v:
case NEON::BI__builtin_neon_vld1q_x4_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
+ llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
@@ -5728,8 +5950,8 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
llvm::Type *EltTy =
llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
- llvm::Type *NarrowTy =
- llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
+ auto *NarrowTy =
+ llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
llvm::Type *Tys[2] = { Ty, NarrowTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
}
@@ -5738,8 +5960,8 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
// The source operand type has twice as many elements of half the size.
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
- llvm::Type *NarrowTy =
- llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
+ auto *NarrowTy =
+ llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
llvm::Type *Tys[2] = { Ty, NarrowTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
}
@@ -5751,6 +5973,29 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Ops.resize(2);
return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
}
+ case NEON::BI__builtin_neon_vqdmulhq_lane_v:
+ case NEON::BI__builtin_neon_vqdmulh_lane_v:
+ case NEON::BI__builtin_neon_vqrdmulhq_lane_v:
+ case NEON::BI__builtin_neon_vqrdmulh_lane_v: {
+ auto *RTy = cast<llvm::VectorType>(Ty);
+ if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
+ BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
+ RTy = llvm::FixedVectorType::get(RTy->getElementType(),
+ RTy->getNumElements() * 2);
+ llvm::Type *Tys[2] = {
+ RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
+ /*isQuad*/ false))};
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
+ }
+ case NEON::BI__builtin_neon_vqdmulhq_laneq_v:
+ case NEON::BI__builtin_neon_vqdmulh_laneq_v:
+ case NEON::BI__builtin_neon_vqrdmulhq_laneq_v:
+ case NEON::BI__builtin_neon_vqrdmulh_laneq_v: {
+ llvm::Type *Tys[2] = {
+ Ty, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
+ /*isQuad*/ true))};
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
+ }
case NEON::BI__builtin_neon_vqshl_n_v:
case NEON::BI__builtin_neon_vqshlq_n_v:
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
@@ -5767,7 +6012,9 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
case NEON::BI__builtin_neon_vrndi_v:
case NEON::BI__builtin_neon_vrndiq_v:
- Int = Intrinsic::nearbyint;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_nearbyint
+ : Intrinsic::nearbyint;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
case NEON::BI__builtin_neon_vrshr_n_v:
case NEON::BI__builtin_neon_vrshrq_n_v:
@@ -5825,7 +6072,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vst1q_x3_v:
case NEON::BI__builtin_neon_vst1_x4_v:
case NEON::BI__builtin_neon_vst1q_x4_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
+ llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
// TODO: Currently in AArch32 mode the pointer operand comes first, whereas
// in AArch64 it comes last. We may want to stick to one or another.
if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
@@ -5862,7 +6109,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
+ SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back(i+vi);
Indices.push_back(i+e+vi);
@@ -5890,7 +6137,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
+ SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(2*i+vi);
@@ -5908,7 +6155,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
+ SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back((i + vi*e) >> 1);
Indices.push_back(((i + vi*e) >> 1)+e);
@@ -5921,40 +6168,91 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
}
case NEON::BI__builtin_neon_vdot_v:
case NEON::BI__builtin_neon_vdotq_v: {
- llvm::Type *InputTy =
- llvm::VectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ auto *InputTy =
+ llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
llvm::Type *Tys[2] = { Ty, InputTy };
Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
}
case NEON::BI__builtin_neon_vfmlal_low_v:
case NEON::BI__builtin_neon_vfmlalq_low_v: {
- llvm::Type *InputTy =
- llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
+ auto *InputTy =
+ llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
}
case NEON::BI__builtin_neon_vfmlsl_low_v:
case NEON::BI__builtin_neon_vfmlslq_low_v: {
- llvm::Type *InputTy =
- llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
+ auto *InputTy =
+ llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
}
case NEON::BI__builtin_neon_vfmlal_high_v:
case NEON::BI__builtin_neon_vfmlalq_high_v: {
- llvm::Type *InputTy =
- llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
+ auto *InputTy =
+ llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
}
case NEON::BI__builtin_neon_vfmlsl_high_v:
case NEON::BI__builtin_neon_vfmlslq_high_v: {
- llvm::Type *InputTy =
- llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
+ auto *InputTy =
+ llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
}
+ case NEON::BI__builtin_neon_vmmlaq_v: {
+ auto *InputTy =
+ llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmmla");
+ }
+ case NEON::BI__builtin_neon_vusmmlaq_v: {
+ auto *InputTy =
+ llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla");
+ }
+ case NEON::BI__builtin_neon_vusdot_v:
+ case NEON::BI__builtin_neon_vusdotq_v: {
+ auto *InputTy =
+ llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot");
+ }
+ case NEON::BI__builtin_neon_vbfdot_v:
+ case NEON::BI__builtin_neon_vbfdotq_v: {
+ llvm::Type *InputTy =
+ llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot");
+ }
+ case NEON::BI__builtin_neon_vbfmmlaq_v: {
+ llvm::Type *InputTy =
+ llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfmmla");
+ }
+ case NEON::BI__builtin_neon_vbfmlalbq_v: {
+ llvm::Type *InputTy =
+ llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfmlalb");
+ }
+ case NEON::BI__builtin_neon_vbfmlaltq_v: {
+ llvm::Type *InputTy =
+ llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfmlalt");
+ }
+ case NEON::BI__builtin_neon___a32_vcvt_bf16_v: {
+ llvm::Type *Tys[1] = { Ty };
+ Function *F = CGM.getIntrinsic(Int, Tys);
+ return EmitNeonCall(F, Ops, "vcvtfp2bf");
+ }
+
}
assert(Int && "Expected valid intrinsic number");
@@ -5999,7 +6297,7 @@ static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
TblOps.push_back(ExtOp);
// Build a vector containing sequential number like (0, 1, 2, ..., 15)
- SmallVector<uint32_t, 16> Indices;
+ SmallVector<int, 16> Indices;
llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
Indices.push_back(2*i);
@@ -6063,6 +6361,12 @@ Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
llvm::ConstantInt::get(Int32Ty, Value));
}
+enum SpecialRegisterAccessKind {
+ NormalRead,
+ VolatileRead,
+ Write,
+};
+
// Generates the IR for the read/write special register builtin,
// ValueType is the type of the value that is to be written or read,
// RegisterType is the type of the register being written to or read from.
@@ -6070,7 +6374,7 @@ static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
llvm::Type *RegisterType,
llvm::Type *ValueType,
- bool IsRead,
+ SpecialRegisterAccessKind AccessKind,
StringRef SysReg = "") {
// write and register intrinsics only support 32 and 64 bit operations.
assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))
@@ -6095,8 +6399,12 @@ static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
&& "Can't fit 64-bit value in 32-bit register");
- if (IsRead) {
- llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
+ if (AccessKind != Write) {
+ assert(AccessKind == NormalRead || AccessKind == VolatileRead);
+ llvm::Function *F = CGM.getIntrinsic(
+ AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register
+ : llvm::Intrinsic::read_register,
+ Types);
llvm::Value *Call = Builder.CreateCall(F, Metadata);
if (MixedTypes)
@@ -6134,21 +6442,27 @@ static bool HasExtraNeonArgument(unsigned BuiltinID) {
default: break;
case NEON::BI__builtin_neon_vget_lane_i8:
case NEON::BI__builtin_neon_vget_lane_i16:
+ case NEON::BI__builtin_neon_vget_lane_bf16:
case NEON::BI__builtin_neon_vget_lane_i32:
case NEON::BI__builtin_neon_vget_lane_i64:
case NEON::BI__builtin_neon_vget_lane_f32:
case NEON::BI__builtin_neon_vgetq_lane_i8:
case NEON::BI__builtin_neon_vgetq_lane_i16:
+ case NEON::BI__builtin_neon_vgetq_lane_bf16:
case NEON::BI__builtin_neon_vgetq_lane_i32:
case NEON::BI__builtin_neon_vgetq_lane_i64:
case NEON::BI__builtin_neon_vgetq_lane_f32:
+ case NEON::BI__builtin_neon_vduph_lane_bf16:
+ case NEON::BI__builtin_neon_vduph_laneq_bf16:
case NEON::BI__builtin_neon_vset_lane_i8:
case NEON::BI__builtin_neon_vset_lane_i16:
+ case NEON::BI__builtin_neon_vset_lane_bf16:
case NEON::BI__builtin_neon_vset_lane_i32:
case NEON::BI__builtin_neon_vset_lane_i64:
case NEON::BI__builtin_neon_vset_lane_f32:
case NEON::BI__builtin_neon_vsetq_lane_i8:
case NEON::BI__builtin_neon_vsetq_lane_i16:
+ case NEON::BI__builtin_neon_vsetq_lane_bf16:
case NEON::BI__builtin_neon_vsetq_lane_i32:
case NEON::BI__builtin_neon_vsetq_lane_i64:
case NEON::BI__builtin_neon_vsetq_lane_f32:
@@ -6156,6 +6470,7 @@ static bool HasExtraNeonArgument(unsigned BuiltinID) {
case NEON::BI__builtin_neon_vsha1cq_u32:
case NEON::BI__builtin_neon_vsha1pq_u32:
case NEON::BI__builtin_neon_vsha1mq_u32:
+ case NEON::BI__builtin_neon_vcvth_bf16_f32:
case clang::ARM::BI_MoveToCoprocessor:
case clang::ARM::BI_MoveToCoprocessor2:
return false;
@@ -6468,9 +6783,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
BuiltinID == ARM::BI__builtin_arm_wsr64 ||
BuiltinID == ARM::BI__builtin_arm_wsrp) {
- bool IsRead = BuiltinID == ARM::BI__builtin_arm_rsr ||
- BuiltinID == ARM::BI__builtin_arm_rsr64 ||
- BuiltinID == ARM::BI__builtin_arm_rsrp;
+ SpecialRegisterAccessKind AccessKind = Write;
+ if (BuiltinID == ARM::BI__builtin_arm_rsr ||
+ BuiltinID == ARM::BI__builtin_arm_rsr64 ||
+ BuiltinID == ARM::BI__builtin_arm_rsrp)
+ AccessKind = VolatileRead;
bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
BuiltinID == ARM::BI__builtin_arm_wsrp;
@@ -6489,12 +6806,16 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
ValueType = RegisterType = Int32Ty;
}
- return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
+ return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
+ AccessKind);
}
// Deal with MVE builtins
if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
return Result;
+ // Handle CDE builtins
+ if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
+ return Result;
// Find out if any arguments are required to be integer constant
// expressions.
@@ -6591,12 +6912,16 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vget_lane_i16:
case NEON::BI__builtin_neon_vget_lane_i32:
case NEON::BI__builtin_neon_vget_lane_i64:
+ case NEON::BI__builtin_neon_vget_lane_bf16:
case NEON::BI__builtin_neon_vget_lane_f32:
case NEON::BI__builtin_neon_vgetq_lane_i8:
case NEON::BI__builtin_neon_vgetq_lane_i16:
case NEON::BI__builtin_neon_vgetq_lane_i32:
case NEON::BI__builtin_neon_vgetq_lane_i64:
+ case NEON::BI__builtin_neon_vgetq_lane_bf16:
case NEON::BI__builtin_neon_vgetq_lane_f32:
+ case NEON::BI__builtin_neon_vduph_lane_bf16:
+ case NEON::BI__builtin_neon_vduph_laneq_bf16:
return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
case NEON::BI__builtin_neon_vrndns_f32: {
@@ -6609,11 +6934,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vset_lane_i16:
case NEON::BI__builtin_neon_vset_lane_i32:
case NEON::BI__builtin_neon_vset_lane_i64:
+ case NEON::BI__builtin_neon_vset_lane_bf16:
case NEON::BI__builtin_neon_vset_lane_f32:
case NEON::BI__builtin_neon_vsetq_lane_i8:
case NEON::BI__builtin_neon_vsetq_lane_i16:
case NEON::BI__builtin_neon_vsetq_lane_i32:
case NEON::BI__builtin_neon_vsetq_lane_i64:
+ case NEON::BI__builtin_neon_vsetq_lane_bf16:
case NEON::BI__builtin_neon_vsetq_lane_f32:
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
@@ -6630,6 +6957,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
"vsha1h");
+ case NEON::BI__builtin_neon_vcvth_bf16_f32: {
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops,
+ "vcvtbfp2bf");
+ }
+
// The ARM _MoveToCoprocessor builtins put the input register value as
// the first argument, but the LLVM intrinsic expects it as the third one.
case ARM::BI_MoveToCoprocessor:
@@ -6809,7 +7141,9 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
bool rightShift = false;
llvm::VectorType *VTy = GetNeonType(this, Type,
- getTarget().hasLegalHalfType());
+ getTarget().hasLegalHalfType(),
+ false,
+ getTarget().hasBFloat16Type());
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
@@ -6817,7 +7151,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Many NEON builtins have identical semantics and uses in ARM and
// AArch64. Emit these in a single function.
auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
- const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
+ const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
if (Builtin)
return EmitCommonNeonBuiltinExpr(
@@ -6833,19 +7167,18 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (VTy->getElementType()->isIntegerTy(64)) {
// Extract the other lane.
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- uint32_t Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
+ int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
// Load the value as a one-element vector.
- Ty = llvm::VectorType::get(VTy->getElementType(), 1);
+ Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1);
llvm::Type *Tys[] = {Ty, Int8PtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
Value *Align = getAlignmentValue32(PtrOp0);
Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
// Combine them.
- uint32_t Indices[] = {1 - Lane, Lane};
- SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices);
- return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
+ int Indices[] = {1 - Lane, Lane};
+ return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane");
}
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vld1_lane_v: {
@@ -6968,8 +7301,9 @@ static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V,
// equal to the lane size. In LLVM IR, an LShr with that parameter would be
// undefined behavior, but in MVE it's legal, so we must convert it to code
// that is not undefined in IR.
- unsigned LaneBits =
- V->getType()->getVectorElementType()->getPrimitiveSizeInBits();
+ unsigned LaneBits = cast<llvm::VectorType>(V->getType())
+ ->getElementType()
+ ->getPrimitiveSizeInBits();
if (Shift == LaneBits) {
// An unsigned shift of the full lane size always generates zero, so we can
// simply emit a zero vector. A signed shift of the full lane size does the
@@ -6990,6 +7324,86 @@ static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) {
return Builder.CreateVectorSplat(Elements, V);
}
+static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder,
+ CodeGenFunction *CGF,
+ llvm::Value *V,
+ llvm::Type *DestType) {
+ // Convert one MVE vector type into another by reinterpreting its in-register
+ // format.
+ //
+ // Little-endian, this is identical to a bitcast (which reinterprets the
+ // memory format). But big-endian, they're not necessarily the same, because
+ // the register and memory formats map to each other differently depending on
+ // the lane size.
+ //
+ // We generate a bitcast whenever we can (if we're little-endian, or if the
+ // lane sizes are the same anyway). Otherwise we fall back to an IR intrinsic
+ // that performs the different kind of reinterpretation.
+ if (CGF->getTarget().isBigEndian() &&
+ V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) {
+ return Builder.CreateCall(
+ CGF->CGM.getIntrinsic(Intrinsic::arm_mve_vreinterpretq,
+ {DestType, V->getType()}),
+ V);
+ } else {
+ return Builder.CreateBitCast(V, DestType);
+ }
+}
+
+static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) {
+ // Make a shufflevector that extracts every other element of a vector (evens
+ // or odds, as desired).
+ SmallVector<int, 16> Indices;
+ unsigned InputElements =
+ cast<llvm::VectorType>(V->getType())->getNumElements();
+ for (unsigned i = 0; i < InputElements; i += 2)
+ Indices.push_back(i + Odd);
+ return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()),
+ Indices);
+}
+
+static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
+ llvm::Value *V1) {
+ // Make a shufflevector that interleaves two vectors element by element.
+ assert(V0->getType() == V1->getType() && "Can't zip different vector types");
+ SmallVector<int, 16> Indices;
+ unsigned InputElements =
+ cast<llvm::VectorType>(V0->getType())->getNumElements();
+ for (unsigned i = 0; i < InputElements; i++) {
+ Indices.push_back(i);
+ Indices.push_back(i + InputElements);
+ }
+ return Builder.CreateShuffleVector(V0, V1, Indices);
+}
+
+template<unsigned HighBit, unsigned OtherBits>
+static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) {
+ // MVE-specific helper function to make a vector splat of a constant such as
+ // UINT_MAX or INT_MIN, in which all bits below the highest one are equal.
+ llvm::Type *T = cast<llvm::VectorType>(VT)->getElementType();
+ unsigned LaneBits = T->getPrimitiveSizeInBits();
+ uint32_t Value = HighBit << (LaneBits - 1);
+ if (OtherBits)
+ Value |= (1UL << (LaneBits - 1)) - 1;
+ llvm::Value *Lane = llvm::ConstantInt::get(T, Value);
+ return ARMMVEVectorSplat(Builder, Lane);
+}
+
+static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder,
+ llvm::Value *V,
+ unsigned ReverseWidth) {
+ // MVE-specific helper function which reverses the elements of a
+ // vector within every (ReverseWidth)-bit collection of lanes.
+ SmallVector<int, 16> Indices;
+ unsigned LaneSize = V->getType()->getScalarSizeInBits();
+ unsigned Elements = 128 / LaneSize;
+ unsigned Mask = ReverseWidth / LaneSize - 1;
+ for (unsigned i = 0; i < Elements; i++)
+ Indices.push_back(i ^ Mask);
+ return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()),
+ Indices);
+}
+
Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue,
@@ -7091,6 +7505,17 @@ Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
llvm_unreachable("unknown custom codegen type.");
}
+Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E,
+ ReturnValueSlot ReturnValue,
+ llvm::Triple::ArchType Arch) {
+ switch (BuiltinID) {
+ default:
+ return nullptr;
+#include "clang/Basic/arm_cde_builtin_cg.inc"
+ }
+}
+
static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
const CallExpr *E,
SmallVectorImpl<Value *> &Ops,
@@ -7240,7 +7665,7 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID
}
Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
- llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
+ auto *VTy = llvm::FixedVectorType::get(Int16Ty, 4);
Op = Builder.CreateBitCast(Op, Int16Ty);
Value *V = UndefValue::get(VTy);
llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
@@ -7248,9 +7673,840 @@ Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
return Op;
}
+/// SVEBuiltinMemEltTy - Returns the memory element type for this memory
+/// access builtin. Only required if it can't be inferred from the base pointer
+/// operand.
+llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(SVETypeFlags TypeFlags) {
+ switch (TypeFlags.getMemEltType()) {
+ case SVETypeFlags::MemEltTyDefault:
+ return getEltType(TypeFlags);
+ case SVETypeFlags::MemEltTyInt8:
+ return Builder.getInt8Ty();
+ case SVETypeFlags::MemEltTyInt16:
+ return Builder.getInt16Ty();
+ case SVETypeFlags::MemEltTyInt32:
+ return Builder.getInt32Ty();
+ case SVETypeFlags::MemEltTyInt64:
+ return Builder.getInt64Ty();
+ }
+ llvm_unreachable("Unknown MemEltType");
+}
+
+llvm::Type *CodeGenFunction::getEltType(SVETypeFlags TypeFlags) {
+ switch (TypeFlags.getEltType()) {
+ default:
+ llvm_unreachable("Invalid SVETypeFlag!");
+
+ case SVETypeFlags::EltTyInt8:
+ return Builder.getInt8Ty();
+ case SVETypeFlags::EltTyInt16:
+ return Builder.getInt16Ty();
+ case SVETypeFlags::EltTyInt32:
+ return Builder.getInt32Ty();
+ case SVETypeFlags::EltTyInt64:
+ return Builder.getInt64Ty();
+
+ case SVETypeFlags::EltTyFloat16:
+ return Builder.getHalfTy();
+ case SVETypeFlags::EltTyFloat32:
+ return Builder.getFloatTy();
+ case SVETypeFlags::EltTyFloat64:
+ return Builder.getDoubleTy();
+
+ case SVETypeFlags::EltTyBFloat16:
+ return Builder.getBFloatTy();
+
+ case SVETypeFlags::EltTyBool8:
+ case SVETypeFlags::EltTyBool16:
+ case SVETypeFlags::EltTyBool32:
+ case SVETypeFlags::EltTyBool64:
+ return Builder.getInt1Ty();
+ }
+}
+
+// Return the llvm predicate vector type corresponding to the specified element
+// TypeFlags.
+llvm::ScalableVectorType *
+CodeGenFunction::getSVEPredType(SVETypeFlags TypeFlags) {
+ switch (TypeFlags.getEltType()) {
+ default: llvm_unreachable("Unhandled SVETypeFlag!");
+
+ case SVETypeFlags::EltTyInt8:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
+ case SVETypeFlags::EltTyInt16:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
+ case SVETypeFlags::EltTyInt32:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
+ case SVETypeFlags::EltTyInt64:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
+
+ case SVETypeFlags::EltTyBFloat16:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
+ case SVETypeFlags::EltTyFloat16:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
+ case SVETypeFlags::EltTyFloat32:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
+ case SVETypeFlags::EltTyFloat64:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
+
+ case SVETypeFlags::EltTyBool8:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
+ case SVETypeFlags::EltTyBool16:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
+ case SVETypeFlags::EltTyBool32:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
+ case SVETypeFlags::EltTyBool64:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
+ }
+}
+
+// Return the llvm vector type corresponding to the specified element TypeFlags.
+llvm::ScalableVectorType *
+CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) {
+ switch (TypeFlags.getEltType()) {
+ default:
+ llvm_unreachable("Invalid SVETypeFlag!");
+
+ case SVETypeFlags::EltTyInt8:
+ return llvm::ScalableVectorType::get(Builder.getInt8Ty(), 16);
+ case SVETypeFlags::EltTyInt16:
+ return llvm::ScalableVectorType::get(Builder.getInt16Ty(), 8);
+ case SVETypeFlags::EltTyInt32:
+ return llvm::ScalableVectorType::get(Builder.getInt32Ty(), 4);
+ case SVETypeFlags::EltTyInt64:
+ return llvm::ScalableVectorType::get(Builder.getInt64Ty(), 2);
+
+ case SVETypeFlags::EltTyFloat16:
+ return llvm::ScalableVectorType::get(Builder.getHalfTy(), 8);
+ case SVETypeFlags::EltTyBFloat16:
+ return llvm::ScalableVectorType::get(Builder.getBFloatTy(), 8);
+ case SVETypeFlags::EltTyFloat32:
+ return llvm::ScalableVectorType::get(Builder.getFloatTy(), 4);
+ case SVETypeFlags::EltTyFloat64:
+ return llvm::ScalableVectorType::get(Builder.getDoubleTy(), 2);
+
+ case SVETypeFlags::EltTyBool8:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
+ case SVETypeFlags::EltTyBool16:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
+ case SVETypeFlags::EltTyBool32:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
+ case SVETypeFlags::EltTyBool64:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
+ }
+}
+
+llvm::Value *CodeGenFunction::EmitSVEAllTruePred(SVETypeFlags TypeFlags) {
+ Function *Ptrue =
+ CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags));
+ return Builder.CreateCall(Ptrue, {Builder.getInt32(/*SV_ALL*/ 31)});
+}
+
+constexpr unsigned SVEBitsPerBlock = 128;
+
+static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) {
+ unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits();
+ return llvm::ScalableVectorType::get(EltTy, NumElts);
+}
+
+// Reinterpret the input predicate so that it can be used to correctly isolate
+// the elements of the specified datatype.
+Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
+ llvm::ScalableVectorType *VTy) {
+ auto *RTy = llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy);
+ if (Pred->getType() == RTy)
+ return Pred;
+
+ unsigned IntID;
+ llvm::Type *IntrinsicTy;
+ switch (VTy->getMinNumElements()) {
+ default:
+ llvm_unreachable("unsupported element count!");
+ case 2:
+ case 4:
+ case 8:
+ IntID = Intrinsic::aarch64_sve_convert_from_svbool;
+ IntrinsicTy = RTy;
+ break;
+ case 16:
+ IntID = Intrinsic::aarch64_sve_convert_to_svbool;
+ IntrinsicTy = Pred->getType();
+ break;
+ }
+
+ Function *F = CGM.getIntrinsic(IntID, IntrinsicTy);
+ Value *C = Builder.CreateCall(F, Pred);
+ assert(C->getType() == RTy && "Unexpected return type!");
+ return C;
+}
+
+Value *CodeGenFunction::EmitSVEGatherLoad(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned IntID) {
+ auto *ResultTy = getSVEType(TypeFlags);
+ auto *OverloadedTy =
+ llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), ResultTy);
+
+ // At the ACLE level there's only one predicate type, svbool_t, which is
+ // mapped to <n x 16 x i1>. However, this might be incompatible with the
+ // actual type being loaded. For example, when loading doubles (i64) the
+ // predicated should be <n x 2 x i1> instead. At the IR level the type of
+ // the predicate and the data being loaded must match. Cast accordingly.
+ Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
+
+ Function *F = nullptr;
+ if (Ops[1]->getType()->isVectorTy())
+ // This is the "vector base, scalar offset" case. In order to uniquely
+ // map this built-in to an LLVM IR intrinsic, we need both the return type
+ // and the type of the vector base.
+ F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()});
+ else
+ // This is the "scalar base, vector offset case". The type of the offset
+ // is encoded in the name of the intrinsic. We only need to specify the
+ // return type in order to uniquely map this built-in to an LLVM IR
+ // intrinsic.
+ F = CGM.getIntrinsic(IntID, OverloadedTy);
+
+ // Pass 0 when the offset is missing. This can only be applied when using
+ // the "vector base" addressing mode for which ACLE allows no offset. The
+ // corresponding LLVM IR always requires an offset.
+ if (Ops.size() == 2) {
+ assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
+ Ops.push_back(ConstantInt::get(Int64Ty, 0));
+ }
+
+ // For "vector base, scalar index" scale the index so that it becomes a
+ // scalar offset.
+ if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) {
+ unsigned BytesPerElt =
+ OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
+ Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
+ Ops[2] = Builder.CreateMul(Ops[2], Scale);
+ }
+
+ Value *Call = Builder.CreateCall(F, Ops);
+
+ // The following sext/zext is only needed when ResultTy != OverloadedTy. In
+ // other cases it's folded into a nop.
+ return TypeFlags.isZExtReturn() ? Builder.CreateZExt(Call, ResultTy)
+ : Builder.CreateSExt(Call, ResultTy);
+}
+
+Value *CodeGenFunction::EmitSVEScatterStore(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned IntID) {
+ auto *SrcDataTy = getSVEType(TypeFlags);
+ auto *OverloadedTy =
+ llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), SrcDataTy);
+
+ // In ACLE the source data is passed in the last argument, whereas in LLVM IR
+ // it's the first argument. Move it accordingly.
+ Ops.insert(Ops.begin(), Ops.pop_back_val());
+
+ Function *F = nullptr;
+ if (Ops[2]->getType()->isVectorTy())
+ // This is the "vector base, scalar offset" case. In order to uniquely
+ // map this built-in to an LLVM IR intrinsic, we need both the return type
+ // and the type of the vector base.
+ F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()});
+ else
+ // This is the "scalar base, vector offset case". The type of the offset
+ // is encoded in the name of the intrinsic. We only need to specify the
+ // return type in order to uniquely map this built-in to an LLVM IR
+ // intrinsic.
+ F = CGM.getIntrinsic(IntID, OverloadedTy);
+
+ // Pass 0 when the offset is missing. This can only be applied when using
+ // the "vector base" addressing mode for which ACLE allows no offset. The
+ // corresponding LLVM IR always requires an offset.
+ if (Ops.size() == 3) {
+ assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
+ Ops.push_back(ConstantInt::get(Int64Ty, 0));
+ }
+
+ // Truncation is needed when SrcDataTy != OverloadedTy. In other cases it's
+ // folded into a nop.
+ Ops[0] = Builder.CreateTrunc(Ops[0], OverloadedTy);
+
+ // At the ACLE level there's only one predicate type, svbool_t, which is
+ // mapped to <n x 16 x i1>. However, this might be incompatible with the
+ // actual type being stored. For example, when storing doubles (i64) the
+ // predicated should be <n x 2 x i1> instead. At the IR level the type of
+ // the predicate and the data being stored must match. Cast accordingly.
+ Ops[1] = EmitSVEPredicateCast(Ops[1], OverloadedTy);
+
+ // For "vector base, scalar index" scale the index so that it becomes a
+ // scalar offset.
+ if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) {
+ unsigned BytesPerElt =
+ OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
+ Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
+ Ops[3] = Builder.CreateMul(Ops[3], Scale);
+ }
+
+ return Builder.CreateCall(F, Ops);
+}
+
+Value *CodeGenFunction::EmitSVEGatherPrefetch(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned IntID) {
+ // The gather prefetches are overloaded on the vector input - this can either
+ // be the vector of base addresses or vector of offsets.
+ auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType());
+ if (!OverloadedTy)
+ OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType());
+
+ // Cast the predicate from svbool_t to the right number of elements.
+ Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
+
+ // vector + imm addressing modes
+ if (Ops[1]->getType()->isVectorTy()) {
+ if (Ops.size() == 3) {
+ // Pass 0 for 'vector+imm' when the index is omitted.
+ Ops.push_back(ConstantInt::get(Int64Ty, 0));
+
+ // The sv_prfop is the last operand in the builtin and IR intrinsic.
+ std::swap(Ops[2], Ops[3]);
+ } else {
+ // Index needs to be passed as scaled offset.
+ llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
+ unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8;
+ Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
+ Ops[2] = Builder.CreateMul(Ops[2], Scale);
+ }
+ }
+
+ Function *F = CGM.getIntrinsic(IntID, OverloadedTy);
+ return Builder.CreateCall(F, Ops);
+}
+
+Value *CodeGenFunction::EmitSVEStructLoad(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value*> &Ops,
+ unsigned IntID) {
+ llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
+ auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
+ auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
+
+ unsigned N;
+ switch (IntID) {
+ case Intrinsic::aarch64_sve_ld2:
+ N = 2;
+ break;
+ case Intrinsic::aarch64_sve_ld3:
+ N = 3;
+ break;
+ case Intrinsic::aarch64_sve_ld4:
+ N = 4;
+ break;
+ default:
+ llvm_unreachable("unknown intrinsic!");
+ }
+ auto RetTy = llvm::VectorType::get(VTy->getElementType(),
+ VTy->getElementCount() * N);
+
+ Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
+ Value *BasePtr= Builder.CreateBitCast(Ops[1], VecPtrTy);
+ Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
+ BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
+ BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
+
+ Function *F = CGM.getIntrinsic(IntID, {RetTy, Predicate->getType()});
+ return Builder.CreateCall(F, { Predicate, BasePtr });
+}
+
+Value *CodeGenFunction::EmitSVEStructStore(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value*> &Ops,
+ unsigned IntID) {
+ llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
+ auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
+ auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
+
+ unsigned N;
+ switch (IntID) {
+ case Intrinsic::aarch64_sve_st2:
+ N = 2;
+ break;
+ case Intrinsic::aarch64_sve_st3:
+ N = 3;
+ break;
+ case Intrinsic::aarch64_sve_st4:
+ N = 4;
+ break;
+ default:
+ llvm_unreachable("unknown intrinsic!");
+ }
+ auto TupleTy =
+ llvm::VectorType::get(VTy->getElementType(), VTy->getElementCount() * N);
+
+ Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
+ Value *BasePtr = Builder.CreateBitCast(Ops[1], VecPtrTy);
+ Value *Offset = Ops.size() > 3 ? Ops[2] : Builder.getInt32(0);
+ Value *Val = Ops.back();
+ BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
+ BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
+
+ // The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we
+ // need to break up the tuple vector.
+ SmallVector<llvm::Value*, 5> Operands;
+ Function *FExtr =
+ CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
+ for (unsigned I = 0; I < N; ++I)
+ Operands.push_back(Builder.CreateCall(FExtr, {Val, Builder.getInt32(I)}));
+ Operands.append({Predicate, BasePtr});
+
+ Function *F = CGM.getIntrinsic(IntID, { VTy });
+ return Builder.CreateCall(F, Operands);
+}
+
+// SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and
+// svpmullt_pair intrinsics, with the exception that their results are bitcast
+// to a wider type.
+Value *CodeGenFunction::EmitSVEPMull(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned BuiltinID) {
+ // Splat scalar operand to vector (intrinsics with _n infix)
+ if (TypeFlags.hasSplatOperand()) {
+ unsigned OpNo = TypeFlags.getSplatOperand();
+ Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
+ }
+
+ // The pair-wise function has a narrower overloaded type.
+ Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType());
+ Value *Call = Builder.CreateCall(F, {Ops[0], Ops[1]});
+
+ // Now bitcast to the wider result type.
+ llvm::ScalableVectorType *Ty = getSVEType(TypeFlags);
+ return EmitSVEReinterpret(Call, Ty);
+}
+
+Value *CodeGenFunction::EmitSVEMovl(SVETypeFlags TypeFlags,
+ ArrayRef<Value *> Ops, unsigned BuiltinID) {
+ llvm::Type *OverloadedTy = getSVEType(TypeFlags);
+ Function *F = CGM.getIntrinsic(BuiltinID, OverloadedTy);
+ return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)});
+}
+
+Value *CodeGenFunction::EmitSVEPrefetchLoad(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned BuiltinID) {
+ auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
+ auto *VectorTy = getSVEVectorForElementType(MemEltTy);
+ auto *MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
+
+ Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
+ Value *BasePtr = Ops[1];
+
+ // Implement the index operand if not omitted.
+ if (Ops.size() > 3) {
+ BasePtr = Builder.CreateBitCast(BasePtr, MemoryTy->getPointerTo());
+ BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
+ }
+
+ // Prefetch intriniscs always expect an i8*
+ BasePtr = Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty));
+ Value *PrfOp = Ops.back();
+
+ Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType());
+ return Builder.CreateCall(F, {Predicate, BasePtr, PrfOp});
+}
+
+Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
+ llvm::Type *ReturnTy,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned BuiltinID,
+ bool IsZExtReturn) {
+ QualType LangPTy = E->getArg(1)->getType();
+ llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
+ LangPTy->getAs<PointerType>()->getPointeeType());
+
+ // The vector type that is returned may be different from the
+ // eventual type loaded from memory.
+ auto VectorTy = cast<llvm::ScalableVectorType>(ReturnTy);
+ auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
+
+ Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
+ Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
+ Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
+ BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
+
+ BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
+ Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
+ Value *Load = Builder.CreateCall(F, {Predicate, BasePtr});
+
+ return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy)
+ : Builder.CreateSExt(Load, VectorTy);
+}
+
+Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned BuiltinID) {
+ QualType LangPTy = E->getArg(1)->getType();
+ llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
+ LangPTy->getAs<PointerType>()->getPointeeType());
+
+ // The vector type that is stored may be different from the
+ // eventual type stored to memory.
+ auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType());
+ auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
+
+ Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
+ Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
+ Value *Offset = Ops.size() == 4 ? Ops[2] : Builder.getInt32(0);
+ BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
+
+ // Last value is always the data
+ llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy);
+
+ BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
+ Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
+ return Builder.CreateCall(F, {Val, Predicate, BasePtr});
+}
+
+// Limit the usage of scalable llvm IR generated by the ACLE by using the
+// sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
+Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {
+ auto F = CGM.getIntrinsic(Intrinsic::aarch64_sve_dup_x, Ty);
+ return Builder.CreateCall(F, Scalar);
+}
+
+Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {
+ return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType()));
+}
+
+Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) {
+ // FIXME: For big endian this needs an additional REV, or needs a separate
+ // intrinsic that is code-generated as a no-op, because the LLVM bitcast
+ // instruction is defined as 'bitwise' equivalent from memory point of
+ // view (when storing/reloading), whereas the svreinterpret builtin
+ // implements bitwise equivalent cast from register point of view.
+ // LLVM CodeGen for a bitcast must add an explicit REV for big-endian.
+ return Builder.CreateBitCast(Val, Ty);
+}
+
+static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty,
+ SmallVectorImpl<Value *> &Ops) {
+ auto *SplatZero = Constant::getNullValue(Ty);
+ Ops.insert(Ops.begin(), SplatZero);
+}
+
+static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty,
+ SmallVectorImpl<Value *> &Ops) {
+ auto *SplatUndef = UndefValue::get(Ty);
+ Ops.insert(Ops.begin(), SplatUndef);
+}
+
+SmallVector<llvm::Type *, 2> CodeGenFunction::getSVEOverloadTypes(
+ SVETypeFlags TypeFlags, llvm::Type *ResultType, ArrayRef<Value *> Ops) {
+ if (TypeFlags.isOverloadNone())
+ return {};
+
+ llvm::Type *DefaultType = getSVEType(TypeFlags);
+
+ if (TypeFlags.isOverloadWhile())
+ return {DefaultType, Ops[1]->getType()};
+
+ if (TypeFlags.isOverloadWhileRW())
+ return {getSVEPredType(TypeFlags), Ops[0]->getType()};
+
+ if (TypeFlags.isOverloadCvt() || TypeFlags.isTupleSet())
+ return {Ops[0]->getType(), Ops.back()->getType()};
+
+ if (TypeFlags.isTupleCreate() || TypeFlags.isTupleGet())
+ return {ResultType, Ops[0]->getType()};
+
+ assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads");
+ return {DefaultType};
+}
+
+Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ // Find out if any arguments are required to be integer constant expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+
+ llvm::Type *Ty = ConvertType(E->getType());
+ if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 &&
+ BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64) {
+ Value *Val = EmitScalarExpr(E->getArg(0));
+ return EmitSVEReinterpret(Val, Ty);
+ }
+
+ llvm::SmallVector<Value *, 4> Ops;
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
+ if ((ICEArguments & (1 << i)) == 0)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ else {
+ // If this is required to be a constant, constant fold it so that we know
+ // that the generated intrinsic gets a ConstantInt.
+ llvm::APSInt Result;
+ if (!E->getArg(i)->isIntegerConstantExpr(Result, getContext()))
+ llvm_unreachable("Expected argument to be a constant");
+
+ // Immediates for SVE llvm intrinsics are always 32bit. We can safely
+ // truncate because the immediate has been range checked and no valid
+ // immediate requires more than a handful of bits.
+ Result = Result.extOrTrunc(32);
+ Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
+ }
+ }
+
+ auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID,
+ AArch64SVEIntrinsicsProvenSorted);
+ SVETypeFlags TypeFlags(Builtin->TypeModifier);
+ if (TypeFlags.isLoad())
+ return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic,
+ TypeFlags.isZExtReturn());
+ else if (TypeFlags.isStore())
+ return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isGatherLoad())
+ return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isScatterStore())
+ return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isPrefetch())
+ return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isGatherPrefetch())
+ return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isStructLoad())
+ return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isStructStore())
+ return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isUndef())
+ return UndefValue::get(Ty);
+ else if (Builtin->LLVMIntrinsic != 0) {
+ if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp)
+ InsertExplicitZeroOperand(Builder, Ty, Ops);
+
+ if (TypeFlags.getMergeType() == SVETypeFlags::MergeAnyExp)
+ InsertExplicitUndefOperand(Builder, Ty, Ops);
+
+ // Some ACLE builtins leave out the argument to specify the predicate
+ // pattern, which is expected to be expanded to an SV_ALL pattern.
+ if (TypeFlags.isAppendSVALL())
+ Ops.push_back(Builder.getInt32(/*SV_ALL*/ 31));
+ if (TypeFlags.isInsertOp1SVALL())
+ Ops.insert(&Ops[1], Builder.getInt32(/*SV_ALL*/ 31));
+
+ // Predicates must match the main datatype.
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
+ if (PredTy->getElementType()->isIntegerTy(1))
+ Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
+
+ // Splat scalar operand to vector (intrinsics with _n infix)
+ if (TypeFlags.hasSplatOperand()) {
+ unsigned OpNo = TypeFlags.getSplatOperand();
+ Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
+ }
+
+ if (TypeFlags.isReverseCompare())
+ std::swap(Ops[1], Ops[2]);
+
+ if (TypeFlags.isReverseUSDOT())
+ std::swap(Ops[1], Ops[2]);
+
+ // Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
+ if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) {
+ llvm::Type *OpndTy = Ops[1]->getType();
+ auto *SplatZero = Constant::getNullValue(OpndTy);
+ Function *Sel = CGM.getIntrinsic(Intrinsic::aarch64_sve_sel, OpndTy);
+ Ops[1] = Builder.CreateCall(Sel, {Ops[0], Ops[1], SplatZero});
+ }
+
+ Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic,
+ getSVEOverloadTypes(TypeFlags, Ty, Ops));
+ Value *Call = Builder.CreateCall(F, Ops);
+
+ // Predicate results must be converted to svbool_t.
+ if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType()))
+ if (PredTy->getScalarType()->isIntegerTy(1))
+ Call = EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
+
+ return Call;
+ }
+
+ switch (BuiltinID) {
+ default:
+ return nullptr;
+
+ case SVE::BI__builtin_sve_svmov_b_z: {
+ // svmov_b_z(pg, op) <=> svand_b_z(pg, op, op)
+ SVETypeFlags TypeFlags(Builtin->TypeModifier);
+ llvm::Type* OverloadedTy = getSVEType(TypeFlags);
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy);
+ return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]});
+ }
+
+ case SVE::BI__builtin_sve_svnot_b_z: {
+ // svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg)
+ SVETypeFlags TypeFlags(Builtin->TypeModifier);
+ llvm::Type* OverloadedTy = getSVEType(TypeFlags);
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy);
+ return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]});
+ }
+
+ case SVE::BI__builtin_sve_svmovlb_u16:
+ case SVE::BI__builtin_sve_svmovlb_u32:
+ case SVE::BI__builtin_sve_svmovlb_u64:
+ return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllb);
+
+ case SVE::BI__builtin_sve_svmovlb_s16:
+ case SVE::BI__builtin_sve_svmovlb_s32:
+ case SVE::BI__builtin_sve_svmovlb_s64:
+ return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllb);
+
+ case SVE::BI__builtin_sve_svmovlt_u16:
+ case SVE::BI__builtin_sve_svmovlt_u32:
+ case SVE::BI__builtin_sve_svmovlt_u64:
+ return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllt);
+
+ case SVE::BI__builtin_sve_svmovlt_s16:
+ case SVE::BI__builtin_sve_svmovlt_s32:
+ case SVE::BI__builtin_sve_svmovlt_s64:
+ return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllt);
+
+ case SVE::BI__builtin_sve_svpmullt_u16:
+ case SVE::BI__builtin_sve_svpmullt_u64:
+ case SVE::BI__builtin_sve_svpmullt_n_u16:
+ case SVE::BI__builtin_sve_svpmullt_n_u64:
+ return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullt_pair);
+
+ case SVE::BI__builtin_sve_svpmullb_u16:
+ case SVE::BI__builtin_sve_svpmullb_u64:
+ case SVE::BI__builtin_sve_svpmullb_n_u16:
+ case SVE::BI__builtin_sve_svpmullb_n_u64:
+ return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair);
+
+ case SVE::BI__builtin_sve_svdup_n_b8:
+ case SVE::BI__builtin_sve_svdup_n_b16:
+ case SVE::BI__builtin_sve_svdup_n_b32:
+ case SVE::BI__builtin_sve_svdup_n_b64: {
+ Value *CmpNE =
+ Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType()));
+ llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags);
+ Value *Dup = EmitSVEDupX(CmpNE, OverloadedTy);
+ return EmitSVEPredicateCast(Dup, cast<llvm::ScalableVectorType>(Ty));
+ }
+
+ case SVE::BI__builtin_sve_svdupq_n_b8:
+ case SVE::BI__builtin_sve_svdupq_n_b16:
+ case SVE::BI__builtin_sve_svdupq_n_b32:
+ case SVE::BI__builtin_sve_svdupq_n_b64:
+ case SVE::BI__builtin_sve_svdupq_n_u8:
+ case SVE::BI__builtin_sve_svdupq_n_s8:
+ case SVE::BI__builtin_sve_svdupq_n_u64:
+ case SVE::BI__builtin_sve_svdupq_n_f64:
+ case SVE::BI__builtin_sve_svdupq_n_s64:
+ case SVE::BI__builtin_sve_svdupq_n_u16:
+ case SVE::BI__builtin_sve_svdupq_n_f16:
+ case SVE::BI__builtin_sve_svdupq_n_bf16:
+ case SVE::BI__builtin_sve_svdupq_n_s16:
+ case SVE::BI__builtin_sve_svdupq_n_u32:
+ case SVE::BI__builtin_sve_svdupq_n_f32:
+ case SVE::BI__builtin_sve_svdupq_n_s32: {
+ // These builtins are implemented by storing each element to an array and using
+ // ld1rq to materialize a vector.
+ unsigned NumOpnds = Ops.size();
+
+ bool IsBoolTy =
+ cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1);
+
+ // For svdupq_n_b* the element type of is an integer of type 128/numelts,
+ // so that the compare can use the width that is natural for the expected
+ // number of predicate lanes.
+ llvm::Type *EltTy = Ops[0]->getType();
+ if (IsBoolTy)
+ EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds);
+
+ Address Alloca = CreateTempAlloca(llvm::ArrayType::get(EltTy, NumOpnds),
+ CharUnits::fromQuantity(16));
+ for (unsigned I = 0; I < NumOpnds; ++I)
+ Builder.CreateDefaultAlignedStore(
+ IsBoolTy ? Builder.CreateZExt(Ops[I], EltTy) : Ops[I],
+ Builder.CreateGEP(Alloca.getPointer(),
+ {Builder.getInt64(0), Builder.getInt64(I)}));
+
+ SVETypeFlags TypeFlags(Builtin->TypeModifier);
+ Value *Pred = EmitSVEAllTruePred(TypeFlags);
+
+ llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy);
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_ld1rq, OverloadedTy);
+ Value *Alloca0 = Builder.CreateGEP(
+ Alloca.getPointer(), {Builder.getInt64(0), Builder.getInt64(0)});
+ Value *LD1RQ = Builder.CreateCall(F, {Pred, Alloca0});
+
+ if (!IsBoolTy)
+ return LD1RQ;
+
+ // For svdupq_n_b* we need to add an additional 'cmpne' with '0'.
+ F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne
+ : Intrinsic::aarch64_sve_cmpne_wide,
+ OverloadedTy);
+ Value *Call =
+ Builder.CreateCall(F, {Pred, LD1RQ, EmitSVEDupX(Builder.getInt64(0))});
+ return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
+ }
+
+ case SVE::BI__builtin_sve_svpfalse_b:
+ return ConstantInt::getFalse(Ty);
+
+ case SVE::BI__builtin_sve_svlen_bf16:
+ case SVE::BI__builtin_sve_svlen_f16:
+ case SVE::BI__builtin_sve_svlen_f32:
+ case SVE::BI__builtin_sve_svlen_f64:
+ case SVE::BI__builtin_sve_svlen_s8:
+ case SVE::BI__builtin_sve_svlen_s16:
+ case SVE::BI__builtin_sve_svlen_s32:
+ case SVE::BI__builtin_sve_svlen_s64:
+ case SVE::BI__builtin_sve_svlen_u8:
+ case SVE::BI__builtin_sve_svlen_u16:
+ case SVE::BI__builtin_sve_svlen_u32:
+ case SVE::BI__builtin_sve_svlen_u64: {
+ SVETypeFlags TF(Builtin->TypeModifier);
+ auto VTy = cast<llvm::VectorType>(getSVEType(TF));
+ auto NumEls = llvm::ConstantInt::get(Ty, VTy->getElementCount().Min);
+
+ Function *F = CGM.getIntrinsic(Intrinsic::vscale, Ty);
+ return Builder.CreateMul(NumEls, Builder.CreateCall(F));
+ }
+
+ case SVE::BI__builtin_sve_svtbl2_u8:
+ case SVE::BI__builtin_sve_svtbl2_s8:
+ case SVE::BI__builtin_sve_svtbl2_u16:
+ case SVE::BI__builtin_sve_svtbl2_s16:
+ case SVE::BI__builtin_sve_svtbl2_u32:
+ case SVE::BI__builtin_sve_svtbl2_s32:
+ case SVE::BI__builtin_sve_svtbl2_u64:
+ case SVE::BI__builtin_sve_svtbl2_s64:
+ case SVE::BI__builtin_sve_svtbl2_f16:
+ case SVE::BI__builtin_sve_svtbl2_bf16:
+ case SVE::BI__builtin_sve_svtbl2_f32:
+ case SVE::BI__builtin_sve_svtbl2_f64: {
+ SVETypeFlags TF(Builtin->TypeModifier);
+ auto VTy = cast<llvm::VectorType>(getSVEType(TF));
+ auto TupleTy = llvm::VectorType::get(VTy->getElementType(),
+ VTy->getElementCount() * 2);
+ Function *FExtr =
+ CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
+ Value *V0 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(0)});
+ Value *V1 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(1)});
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy);
+ return Builder.CreateCall(F, {V0, V1, Ops[1]});
+ }
+ }
+
+ /// Should not happen
+ return nullptr;
+}
+
Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
llvm::Triple::ArchType Arch) {
+ if (BuiltinID >= AArch64::FirstSVEBuiltin &&
+ BuiltinID <= AArch64::LastSVEBuiltin)
+ return EmitAArch64SVEBuiltinExpr(BuiltinID, E);
+
unsigned HintID = static_cast<unsigned>(-1);
switch (BuiltinID) {
default: break;
@@ -7591,9 +8847,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
BuiltinID == AArch64::BI__builtin_arm_wsrp) {
- bool IsRead = BuiltinID == AArch64::BI__builtin_arm_rsr ||
- BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_rsrp;
+ SpecialRegisterAccessKind AccessKind = Write;
+ if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
+ BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_rsrp)
+ AccessKind = VolatileRead;
bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
BuiltinID == AArch64::BI__builtin_arm_wsrp;
@@ -7611,7 +8869,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
ValueType = Int32Ty;
}
- return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
+ return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
+ AccessKind);
}
if (BuiltinID == AArch64::BI_ReadStatusReg ||
@@ -7667,7 +8926,27 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
assert(Error == ASTContext::GE_None && "Should not codegen an error");
llvm::SmallVector<Value*, 4> Ops;
+ Address PtrOp0 = Address::invalid();
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
+ if (i == 0) {
+ switch (BuiltinID) {
+ case NEON::BI__builtin_neon_vld1_v:
+ case NEON::BI__builtin_neon_vld1q_v:
+ case NEON::BI__builtin_neon_vld1_dup_v:
+ case NEON::BI__builtin_neon_vld1q_dup_v:
+ case NEON::BI__builtin_neon_vld1_lane_v:
+ case NEON::BI__builtin_neon_vld1q_lane_v:
+ case NEON::BI__builtin_neon_vst1_v:
+ case NEON::BI__builtin_neon_vst1q_v:
+ case NEON::BI__builtin_neon_vst1_lane_v:
+ case NEON::BI__builtin_neon_vst1q_lane_v:
+ // Get the alignment for the argument in addition to the value;
+ // we'll use it later.
+ PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
+ Ops.push_back(PtrOp0.getPointer());
+ continue;
+ }
+ }
if ((ICEArguments & (1 << i)) == 0) {
Ops.push_back(EmitScalarExpr(E->getArg(i)));
} else {
@@ -7682,7 +8961,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
- const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
+ const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
if (Builtin) {
@@ -7898,7 +9177,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
}
case NEON::BI__builtin_neon_vpaddd_s64: {
- llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
+ auto *Ty = llvm::FixedVectorType::get(Int64Ty, 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f64, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
@@ -7910,8 +9189,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateAdd(Op0, Op1, "vpaddd");
}
case NEON::BI__builtin_neon_vpaddd_f64: {
- llvm::Type *Ty =
- llvm::VectorType::get(DoubleTy, 2);
+ auto *Ty = llvm::FixedVectorType::get(DoubleTy, 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f64, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
@@ -7923,8 +9201,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateFAdd(Op0, Op1, "vpaddd");
}
case NEON::BI__builtin_neon_vpadds_f32: {
- llvm::Type *Ty =
- llvm::VectorType::get(FloatTy, 2);
+ auto *Ty = llvm::FixedVectorType::get(FloatTy, 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f32, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
@@ -8087,97 +9364,107 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vset_lane_i16:
case NEON::BI__builtin_neon_vset_lane_i32:
case NEON::BI__builtin_neon_vset_lane_i64:
+ case NEON::BI__builtin_neon_vset_lane_bf16:
case NEON::BI__builtin_neon_vset_lane_f32:
case NEON::BI__builtin_neon_vsetq_lane_i8:
case NEON::BI__builtin_neon_vsetq_lane_i16:
case NEON::BI__builtin_neon_vsetq_lane_i32:
case NEON::BI__builtin_neon_vsetq_lane_i64:
+ case NEON::BI__builtin_neon_vsetq_lane_bf16:
case NEON::BI__builtin_neon_vsetq_lane_f32:
Ops.push_back(EmitScalarExpr(E->getArg(2)));
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vset_lane_f64:
// The vector type needs a cast for the v1f64 variant.
- Ops[1] = Builder.CreateBitCast(Ops[1],
- llvm::VectorType::get(DoubleTy, 1));
+ Ops[1] =
+ Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 1));
Ops.push_back(EmitScalarExpr(E->getArg(2)));
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vsetq_lane_f64:
// The vector type needs a cast for the v2f64 variant.
- Ops[1] = Builder.CreateBitCast(Ops[1],
- llvm::VectorType::get(DoubleTy, 2));
+ Ops[1] =
+ Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 2));
Ops.push_back(EmitScalarExpr(E->getArg(2)));
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vget_lane_i8:
case NEON::BI__builtin_neon_vdupb_lane_i8:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 8));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 8));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_i8:
case NEON::BI__builtin_neon_vdupb_laneq_i8:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 16));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 16));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i16:
case NEON::BI__builtin_neon_vduph_lane_i16:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 4));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_i16:
case NEON::BI__builtin_neon_vduph_laneq_i16:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 8));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 8));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i32:
case NEON::BI__builtin_neon_vdups_lane_i32:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 2));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vdups_lane_f32:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(FloatTy, 2));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vdups_lane");
case NEON::BI__builtin_neon_vgetq_lane_i32:
case NEON::BI__builtin_neon_vdups_laneq_i32:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i64:
case NEON::BI__builtin_neon_vdupd_lane_i64:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 1));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vdupd_lane_f64:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(DoubleTy, 1));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vdupd_lane");
case NEON::BI__builtin_neon_vgetq_lane_i64:
case NEON::BI__builtin_neon_vdupd_laneq_i64:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_f32:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(FloatTy, 2));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vget_lane_f64:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(DoubleTy, 1));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_f32:
case NEON::BI__builtin_neon_vdups_laneq_f32:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(FloatTy, 4));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vgetq_lane_f64:
case NEON::BI__builtin_neon_vdupd_laneq_f64:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(DoubleTy, 2));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vaddh_f16:
@@ -8192,18 +9479,20 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vdivh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
- case NEON::BI__builtin_neon_vfmah_f16: {
- Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
+ case NEON::BI__builtin_neon_vfmah_f16:
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
- return Builder.CreateCall(F,
- {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
- }
+ return emitCallMaybeConstrainedFPBuiltin(
+ *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
+ {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
case NEON::BI__builtin_neon_vfmsh_f16: {
- Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
+ // FIXME: This should be an fneg instruction:
Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
+
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
- return Builder.CreateCall(F, {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
+ return emitCallMaybeConstrainedFPBuiltin(
+ *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
+ {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
}
case NEON::BI__builtin_neon_vaddd_s64:
case NEON::BI__builtin_neon_vaddd_u64:
@@ -8216,7 +9505,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
SmallVector<Value *, 2> ProductOps;
ProductOps.push_back(vectorWrapScalar16(Ops[1]));
ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
- llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
+ auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
ProductOps, "vqdmlXl");
Constant *CI = ConstantInt::get(SizeTy, 0);
@@ -8313,7 +9602,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
SmallVector<Value *, 2> ProductOps;
ProductOps.push_back(vectorWrapScalar16(Ops[1]));
ProductOps.push_back(vectorWrapScalar16(Ops[2]));
- llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
+ auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
ProductOps, "vqdmlXl");
Constant *CI = ConstantInt::get(SizeTy, 0);
@@ -8360,10 +9649,14 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
: Intrinsic::aarch64_neon_sqsub;
return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
}
+ case NEON::BI__builtin_neon_vget_lane_bf16:
+ case NEON::BI__builtin_neon_vduph_lane_bf16:
case NEON::BI__builtin_neon_vduph_lane_f16: {
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
}
+ case NEON::BI__builtin_neon_vgetq_lane_bf16:
+ case NEON::BI__builtin_neon_vduph_laneq_bf16:
case NEON::BI__builtin_neon_vduph_laneq_f16: {
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
@@ -8522,8 +9815,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
// Not all intrinsics handled by the common case work for AArch64 yet, so only
// defer to common code if it's been added to our special map.
- Builtin = findNeonIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
- AArch64SIMDIntrinsicsProvenSorted);
+ Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
+ AArch64SIMDIntrinsicsProvenSorted);
if (Builtin)
return EmitCommonNeonBuiltinExpr(
@@ -8561,16 +9854,18 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[2] = Addend;
// Now adjust things to handle the lane access.
- llvm::Type *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v ?
- llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() / 2) :
- VTy;
+ auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v
+ ? llvm::FixedVectorType::get(VTy->getElementType(),
+ VTy->getNumElements() / 2)
+ : VTy;
llvm::Constant *cst = cast<Constant>(Ops[3]);
- Value *SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), cst);
+ Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst);
Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
Ops.pop_back();
- Int = Intrinsic::fma;
+ Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma
+ : Intrinsic::fma;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
}
case NEON::BI__builtin_neon_vfma_laneq_v: {
@@ -8583,31 +9878,35 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
NeonTypeFlags(NeonTypeFlags::Float64, false, true));
Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
- Function *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
- Value *Result = Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
+ Value *Result;
+ Result = emitCallMaybeConstrainedFPBuiltin(
+ *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
+ DoubleTy, {Ops[1], Ops[2], Ops[0]});
return Builder.CreateBitCast(Result, Ty);
}
- Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
- VTy->getNumElements() * 2);
+ auto *STy = llvm::FixedVectorType::get(VTy->getElementType(),
+ VTy->getNumElements() * 2);
Ops[2] = Builder.CreateBitCast(Ops[2], STy);
- Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
+ Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(),
cast<ConstantInt>(Ops[3]));
Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
- return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
+ return emitCallMaybeConstrainedFPBuiltin(
+ *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
+ {Ops[2], Ops[1], Ops[0]});
}
case NEON::BI__builtin_neon_vfmaq_laneq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
- return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
+ return emitCallMaybeConstrainedFPBuiltin(
+ *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
+ {Ops[2], Ops[1], Ops[0]});
}
case NEON::BI__builtin_neon_vfmah_lane_f16:
case NEON::BI__builtin_neon_vfmas_lane_f32:
@@ -8617,9 +9916,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vfmad_laneq_f64: {
Ops.push_back(EmitScalarExpr(E->getArg(3)));
llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
- Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
- return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
+ return emitCallMaybeConstrainedFPBuiltin(
+ *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
+ {Ops[1], Ops[2], Ops[0]});
}
case NEON::BI__builtin_neon_vmull_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
@@ -8659,8 +9959,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
unsigned ArgElts = VTy->getNumElements();
llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
unsigned BitWidth = EltTy->getBitWidth();
- llvm::Type *ArgTy = llvm::VectorType::get(
- llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts);
+ auto *ArgTy = llvm::FixedVectorType::get(
+ llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts);
llvm::Type* Tys[2] = { VTy, ArgTy };
Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
SmallVector<llvm::Value*, 1> TmpOps;
@@ -8728,27 +10028,37 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
case NEON::BI__builtin_neon_vrndah_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::round;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_round
+ : Intrinsic::round;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
}
case NEON::BI__builtin_neon_vrnda_v:
case NEON::BI__builtin_neon_vrndaq_v: {
- Int = Intrinsic::round;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_round
+ : Intrinsic::round;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
}
case NEON::BI__builtin_neon_vrndih_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::nearbyint;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_nearbyint
+ : Intrinsic::nearbyint;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
}
case NEON::BI__builtin_neon_vrndmh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::floor;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_floor
+ : Intrinsic::floor;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
}
case NEON::BI__builtin_neon_vrndm_v:
case NEON::BI__builtin_neon_vrndmq_v: {
- Int = Intrinsic::floor;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_floor
+ : Intrinsic::floor;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
}
case NEON::BI__builtin_neon_vrndnh_f16: {
@@ -8768,32 +10078,44 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vrndph_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::ceil;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_ceil
+ : Intrinsic::ceil;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
}
case NEON::BI__builtin_neon_vrndp_v:
case NEON::BI__builtin_neon_vrndpq_v: {
- Int = Intrinsic::ceil;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_ceil
+ : Intrinsic::ceil;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
}
case NEON::BI__builtin_neon_vrndxh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::rint;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_rint
+ : Intrinsic::rint;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
}
case NEON::BI__builtin_neon_vrndx_v:
case NEON::BI__builtin_neon_vrndxq_v: {
- Int = Intrinsic::rint;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_rint
+ : Intrinsic::rint;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
}
case NEON::BI__builtin_neon_vrndh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::trunc;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_trunc
+ : Intrinsic::trunc;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
}
case NEON::BI__builtin_neon_vrnd_v:
case NEON::BI__builtin_neon_vrndq_v: {
- Int = Intrinsic::trunc;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_trunc
+ : Intrinsic::trunc;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
}
case NEON::BI__builtin_neon_vcvt_f64_v:
@@ -8944,12 +10266,16 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vsqrth_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::sqrt;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_sqrt
+ : Intrinsic::sqrt;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
}
case NEON::BI__builtin_neon_vsqrt_v:
case NEON::BI__builtin_neon_vsqrtq_v: {
- Int = Intrinsic::sqrt;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_sqrt
+ : Intrinsic::sqrt;
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
}
@@ -8965,7 +10291,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddv_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
@@ -8977,7 +10303,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddv_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
@@ -8989,7 +10315,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddvq_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
@@ -9001,7 +10327,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddvq_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
@@ -9010,7 +10336,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxv_u8: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9019,7 +10345,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxv_u16: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9028,7 +10354,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxvq_u8: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9037,7 +10363,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxvq_u16: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9046,7 +10372,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxv_s8: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9055,7 +10381,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxv_s16: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9064,7 +10390,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxvq_s8: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9073,7 +10399,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxvq_s16: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9082,7 +10408,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxv_f16: {
Int = Intrinsic::aarch64_neon_fmaxv;
Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 4);
+ VTy = llvm::FixedVectorType::get(HalfTy, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9091,7 +10417,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxvq_f16: {
Int = Intrinsic::aarch64_neon_fmaxv;
Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 8);
+ VTy = llvm::FixedVectorType::get(HalfTy, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9100,7 +10426,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminv_u8: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9109,7 +10435,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminv_u16: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9118,7 +10444,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminvq_u8: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9127,7 +10453,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminvq_u16: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9136,7 +10462,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminv_s8: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9145,7 +10471,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminv_s16: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9154,7 +10480,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminvq_s8: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9163,7 +10489,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminvq_s16: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9172,7 +10498,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminv_f16: {
Int = Intrinsic::aarch64_neon_fminv;
Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 4);
+ VTy = llvm::FixedVectorType::get(HalfTy, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9181,7 +10507,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminvq_f16: {
Int = Intrinsic::aarch64_neon_fminv;
Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 8);
+ VTy = llvm::FixedVectorType::get(HalfTy, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9190,7 +10516,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxnmv_f16: {
Int = Intrinsic::aarch64_neon_fmaxnmv;
Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 4);
+ VTy = llvm::FixedVectorType::get(HalfTy, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
@@ -9199,7 +10525,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxnmvq_f16: {
Int = Intrinsic::aarch64_neon_fmaxnmv;
Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 8);
+ VTy = llvm::FixedVectorType::get(HalfTy, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
@@ -9208,7 +10534,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminnmv_f16: {
Int = Intrinsic::aarch64_neon_fminnmv;
Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 4);
+ VTy = llvm::FixedVectorType::get(HalfTy, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
@@ -9217,7 +10543,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminnmvq_f16: {
Int = Intrinsic::aarch64_neon_fminnmv;
Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 8);
+ VTy = llvm::FixedVectorType::get(HalfTy, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
@@ -9231,7 +10557,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddlv_u8: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
@@ -9240,7 +10566,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddlv_u16: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
@@ -9248,7 +10574,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddlvq_u8: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
@@ -9257,7 +10583,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddlvq_u16: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
@@ -9265,7 +10591,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddlv_s8: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
@@ -9274,7 +10600,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddlv_s16: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
@@ -9282,7 +10608,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddlvq_s8: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
@@ -9291,7 +10617,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddlvq_s16: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
@@ -9327,24 +10653,20 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
- auto Alignment = CharUnits::fromQuantity(
- BuiltinID == NEON::BI__builtin_neon_vld1_v ? 8 : 16);
- return Builder.CreateAlignedLoad(VTy, Ops[0], Alignment);
+ return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment());
}
case NEON::BI__builtin_neon_vst1_v:
case NEON::BI__builtin_neon_vst1q_v:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
case NEON::BI__builtin_neon_vld1_lane_v:
case NEON::BI__builtin_neon_vld1q_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- auto Alignment = CharUnits::fromQuantity(
- BuiltinID == NEON::BI__builtin_neon_vld1_lane_v ? 8 : 16);
- Ops[0] =
- Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
+ Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
+ PtrOp0.getAlignment());
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
}
case NEON::BI__builtin_neon_vld1_dup_v:
@@ -9352,10 +10674,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *V = UndefValue::get(Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- auto Alignment = CharUnits::fromQuantity(
- BuiltinID == NEON::BI__builtin_neon_vld1_dup_v ? 8 : 16);
- Ops[0] =
- Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
+ Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
+ PtrOp0.getAlignment());
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
return EmitNeonSplat(Ops[0], CI);
@@ -9365,8 +10685,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- return Builder.CreateDefaultAlignedStore(Ops[1],
- Builder.CreateBitCast(Ops[0], Ty));
+ return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty),
+ PtrOp0.getAlignment());
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
@@ -9540,7 +10860,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
+ SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back(i+vi);
Indices.push_back(i+e+vi);
@@ -9559,7 +10879,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
+ SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(2*i+vi);
@@ -9577,7 +10897,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
+ SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back((i + vi*e) >> 1);
Indices.push_back(((i + vi*e) >> 1)+e);
@@ -9635,33 +10955,103 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
- assert(BuiltinID == BPF::BI__builtin_preserve_field_info &&
- "unexpected ARM builtin");
+ assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
+ BuiltinID == BPF::BI__builtin_btf_type_id) &&
+ "unexpected BPF builtin");
- const Expr *Arg = E->getArg(0);
- bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField;
+ switch (BuiltinID) {
+ default:
+ llvm_unreachable("Unexpected BPF builtin");
+ case BPF::BI__builtin_preserve_field_info: {
+ const Expr *Arg = E->getArg(0);
+ bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField;
- if (!getDebugInfo()) {
- CGM.Error(E->getExprLoc(), "using builtin_preserve_field_info() without -g");
- return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
- : EmitLValue(Arg).getPointer(*this);
- }
+ if (!getDebugInfo()) {
+ CGM.Error(E->getExprLoc(),
+ "using __builtin_preserve_field_info() without -g");
+ return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
+ : EmitLValue(Arg).getPointer(*this);
+ }
- // Enable underlying preserve_*_access_index() generation.
- bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
- IsInPreservedAIRegion = true;
- Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
- : EmitLValue(Arg).getPointer(*this);
- IsInPreservedAIRegion = OldIsInPreservedAIRegion;
+ // Enable underlying preserve_*_access_index() generation.
+ bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
+ IsInPreservedAIRegion = true;
+ Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
+ : EmitLValue(Arg).getPointer(*this);
+ IsInPreservedAIRegion = OldIsInPreservedAIRegion;
+
+ ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
+ Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
+
+ // Built the IR for the preserve_field_info intrinsic.
+ llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
+ &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
+ {FieldAddr->getType()});
+ return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
+ }
+ case BPF::BI__builtin_btf_type_id: {
+ Value *FieldVal = nullptr;
+
+ // The LValue cannot be converted Value in order to be used as the function
+ // parameter. If it is a structure, it is the "alloca" result of the LValue
+ // (a pointer) is used in the parameter. If it is a simple type,
+ // the value will be loaded from its corresponding "alloca" and used as
+ // the parameter. In our case, let us just get a pointer of the LValue
+ // since we do not really use the parameter. The purpose of parameter
+ // is to prevent the generated IR llvm.bpf.btf.type.id intrinsic call,
+ // which carries metadata, from being changed.
+ bool IsLValue = E->getArg(0)->isLValue();
+ if (IsLValue)
+ FieldVal = EmitLValue(E->getArg(0)).getPointer(*this);
+ else
+ FieldVal = EmitScalarExpr(E->getArg(0));
+
+ if (!getDebugInfo()) {
+ CGM.Error(E->getExprLoc(), "using __builtin_btf_type_id() without -g");
+ return nullptr;
+ }
- ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
- Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
+ // Generate debuginfo type for the first argument.
+ llvm::DIType *DbgInfo =
+ getDebugInfo()->getOrCreateStandaloneType(E->getArg(0)->getType(),
+ E->getArg(0)->getExprLoc());
- // Built the IR for the preserve_field_info intrinsic.
- llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
- &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
- {FieldAddr->getType()});
- return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
+ ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
+ Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
+
+ // Built the IR for the btf_type_id intrinsic.
+ //
+ // In the above, we converted LValue argument to a pointer to LValue.
+ // For example, the following
+ // int v;
+ // C1: __builtin_btf_type_id(v, flag);
+ // will be converted to
+ // L1: llvm.bpf.btf.type.id(&v, flag)
+ // This makes it hard to differentiate from
+ // C2: __builtin_btf_type_id(&v, flag);
+ // to
+ // L2: llvm.bpf.btf.type.id(&v, flag)
+ //
+ // If both C1 and C2 are present in the code, the llvm may later
+ // on do CSE on L1 and L2, which will result in incorrect tagged types.
+ //
+ // The C1->L1 transformation only happens if the argument of
+ // __builtin_btf_type_id() is a LValue. So Let us put whether
+ // the argument is an LValue or not into generated IR. This should
+ // prevent potential CSE from causing debuginfo type loss.
+ //
+ // The generated IR intrinsics will hence look like
+ // L1: llvm.bpf.btf.type.id(&v, 1, flag) !di_type_for_{v};
+ // L2: llvm.bpf.btf.type.id(&v, 0, flag) !di_type_for_{&v};
+ Constant *CV = ConstantInt::get(IntTy, IsLValue);
+ llvm::Function *FnBtfTypeId = llvm::Intrinsic::getDeclaration(
+ &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id,
+ {FieldVal->getType(), CV->getType()});
+ CallInst *Fn = Builder.CreateCall(FnBtfTypeId, {FieldVal, CV, FlagValue});
+ Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
+ return Fn;
+ }
+ }
}
llvm::Value *CodeGenFunction::
@@ -9681,8 +11071,8 @@ BuildVector(ArrayRef<llvm::Value*> Ops) {
}
// Otherwise, insertelement the values to build the vector.
- Value *Result =
- llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
+ Value *Result = llvm::UndefValue::get(
+ llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size()));
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
@@ -9694,14 +11084,15 @@ BuildVector(ArrayRef<llvm::Value*> Ops) {
static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
unsigned NumElts) {
- llvm::VectorType *MaskTy = llvm::VectorType::get(CGF.Builder.getInt1Ty(),
- cast<IntegerType>(Mask->getType())->getBitWidth());
+ auto *MaskTy = llvm::FixedVectorType::get(
+ CGF.Builder.getInt1Ty(),
+ cast<IntegerType>(Mask->getType())->getBitWidth());
Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
// If we have less than 8 elements, then the starting mask was an i8 and
// we need to extract down to the right number of elements.
if (NumElts < 8) {
- uint32_t Indices[4];
+ int Indices[4];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i;
MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
@@ -9711,42 +11102,40 @@ static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
return MaskVec;
}
-static Value *EmitX86MaskedStore(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops,
- unsigned Align) {
+static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
+ Align Alignment) {
// Cast the pointer to right type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- Value *MaskVec = getMaskVecValue(CGF, Ops[2],
- Ops[1]->getType()->getVectorNumElements());
+ Value *MaskVec = getMaskVecValue(
+ CGF, Ops[2], cast<llvm::VectorType>(Ops[1]->getType())->getNumElements());
- return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec);
+ return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
}
-static Value *EmitX86MaskedLoad(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops, unsigned Align) {
+static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
+ Align Alignment) {
// Cast the pointer to right type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- Value *MaskVec = getMaskVecValue(CGF, Ops[2],
- Ops[1]->getType()->getVectorNumElements());
+ Value *MaskVec = getMaskVecValue(
+ CGF, Ops[2], cast<llvm::VectorType>(Ops[1]->getType())->getNumElements());
- return CGF.Builder.CreateMaskedLoad(Ptr, Align, MaskVec, Ops[1]);
+ return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]);
}
static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
ArrayRef<Value *> Ops) {
- llvm::Type *ResultTy = Ops[1]->getType();
- llvm::Type *PtrTy = ResultTy->getVectorElementType();
+ auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
+ llvm::Type *PtrTy = ResultTy->getElementType();
// Cast the pointer to element type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(PtrTy));
- Value *MaskVec = getMaskVecValue(CGF, Ops[2],
- ResultTy->getVectorNumElements());
+ Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
ResultTy);
@@ -9756,10 +11145,9 @@ static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
ArrayRef<Value *> Ops,
bool IsCompress) {
- llvm::Type *ResultTy = Ops[1]->getType();
+ auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
- Value *MaskVec = getMaskVecValue(CGF, Ops[2],
- ResultTy->getVectorNumElements());
+ Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
: Intrinsic::x86_avx512_mask_expand;
@@ -9769,15 +11157,14 @@ static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
static Value *EmitX86CompressStore(CodeGenFunction &CGF,
ArrayRef<Value *> Ops) {
- llvm::Type *ResultTy = Ops[1]->getType();
- llvm::Type *PtrTy = ResultTy->getVectorElementType();
+ auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
+ llvm::Type *PtrTy = ResultTy->getElementType();
// Cast the pointer to element type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(PtrTy));
- Value *MaskVec = getMaskVecValue(CGF, Ops[2],
- ResultTy->getVectorNumElements());
+ Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
ResultTy);
@@ -9806,7 +11193,7 @@ static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
// Funnel shifts amounts are treated as modulo and types are all power-of-2 so
// we only care about the lowest log2 bits anyway.
if (Amt->getType() != Ty) {
- unsigned NumElts = Ty->getVectorNumElements();
+ unsigned NumElts = cast<llvm::VectorType>(Ty)->getNumElements();
Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
}
@@ -9864,7 +11251,8 @@ static Value *EmitX86Select(CodeGenFunction &CGF,
if (C->isAllOnesValue())
return Op0;
- Mask = getMaskVecValue(CGF, Mask, Op0->getType()->getVectorNumElements());
+ Mask = getMaskVecValue(
+ CGF, Mask, cast<llvm::VectorType>(Op0->getType())->getNumElements());
return CGF.Builder.CreateSelect(Mask, Op0, Op1);
}
@@ -9876,9 +11264,8 @@ static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
if (C->isAllOnesValue())
return Op0;
- llvm::VectorType *MaskTy =
- llvm::VectorType::get(CGF.Builder.getInt1Ty(),
- Mask->getType()->getIntegerBitWidth());
+ auto *MaskTy = llvm::FixedVectorType::get(
+ CGF.Builder.getInt1Ty(), Mask->getType()->getIntegerBitWidth());
Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
return CGF.Builder.CreateSelect(Mask, Op0, Op1);
@@ -9893,7 +11280,7 @@ static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
}
if (NumElts < 8) {
- uint32_t Indices[8];
+ int Indices[8];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i;
for (unsigned i = NumElts; i != 8; ++i)
@@ -9911,15 +11298,16 @@ static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
bool Signed, ArrayRef<Value *> Ops) {
assert((Ops.size() == 2 || Ops.size() == 4) &&
"Unexpected number of arguments");
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
Value *Cmp;
if (CC == 3) {
Cmp = Constant::getNullValue(
- llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts));
+ llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
} else if (CC == 7) {
Cmp = Constant::getAllOnesValue(
- llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts));
+ llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
} else {
ICmpInst::Predicate Pred;
switch (CC) {
@@ -10035,24 +11423,19 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
// Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
if (IID != Intrinsic::not_intrinsic &&
- cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4) {
+ (cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 ||
+ IsAddSub)) {
Function *Intr = CGF.CGM.getIntrinsic(IID);
Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
} else {
llvm::Type *Ty = A->getType();
- Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
- Res = CGF.Builder.CreateCall(FMA, {A, B, C} );
-
- if (IsAddSub) {
- // Negate even elts in C using a mask.
- unsigned NumElts = Ty->getVectorNumElements();
- SmallVector<uint32_t, 16> Indices(NumElts);
- for (unsigned i = 0; i != NumElts; ++i)
- Indices[i] = i + (i % 2) * NumElts;
-
- Value *NegC = CGF.Builder.CreateFNeg(C);
- Value *FMSub = CGF.Builder.CreateCall(FMA, {A, B, NegC} );
- Res = CGF.Builder.CreateShuffleVector(FMSub, Res, Indices);
+ Function *FMA;
+ if (CGF.Builder.getIsFPConstrained()) {
+ FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty);
+ Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C});
+ } else {
+ FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Res = CGF.Builder.CreateCall(FMA, {A, B, C});
}
}
@@ -10110,6 +11493,10 @@ EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops,
Intrinsic::x86_avx512_vfmadd_f64;
Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
{Ops[0], Ops[1], Ops[2], Ops[4]});
+ } else if (CGF.Builder.getIsFPConstrained()) {
+ Function *FMA = CGF.CGM.getIntrinsic(
+ Intrinsic::experimental_constrained_fma, Ops[0]->getType());
+ Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3));
} else {
Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
@@ -10134,8 +11521,8 @@ static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
ArrayRef<Value *> Ops) {
llvm::Type *Ty = Ops[0]->getType();
// Arguments have a vXi32 type so cast to vXi64.
- Ty = llvm::VectorType::get(CGF.Int64Ty,
- Ty->getPrimitiveSizeInBits() / 64);
+ Ty = llvm::FixedVectorType::get(CGF.Int64Ty,
+ Ty->getPrimitiveSizeInBits() / 64);
Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
@@ -10189,7 +11576,7 @@ static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
llvm::Type *DstTy) {
- unsigned NumberOfElements = DstTy->getVectorNumElements();
+ unsigned NumberOfElements = cast<llvm::VectorType>(DstTy)->getNumElements();
Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
}
@@ -10211,6 +11598,43 @@ Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
return EmitX86CpuIs(CPUStr);
}
+// Convert F16 halfs to floats.
+static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
+ ArrayRef<Value *> Ops,
+ llvm::Type *DstTy) {
+ assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&
+ "Unknown cvtph2ps intrinsic");
+
+ // If the SAE intrinsic doesn't use default rounding then we can't upgrade.
+ if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) {
+ Function *F =
+ CGF.CGM.getIntrinsic(Intrinsic::x86_avx512_mask_vcvtph2ps_512);
+ return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
+ }
+
+ unsigned NumDstElts = cast<llvm::VectorType>(DstTy)->getNumElements();
+ Value *Src = Ops[0];
+
+ // Extract the subvector.
+ if (NumDstElts != cast<llvm::VectorType>(Src->getType())->getNumElements()) {
+ assert(NumDstElts == 4 && "Unexpected vector size");
+ Src = CGF.Builder.CreateShuffleVector(Src, UndefValue::get(Src->getType()),
+ ArrayRef<int>{0, 1, 2, 3});
+ }
+
+ // Bitcast from vXi16 to vXf16.
+ auto *HalfTy = llvm::FixedVectorType::get(
+ llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts);
+ Src = CGF.Builder.CreateBitCast(Src, HalfTy);
+
+ // Perform the fp-extension.
+ Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps");
+
+ if (Ops.size() >= 3)
+ Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]);
+ return Res;
+}
+
// Convert a BF16 to a float.
static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
const CallExpr *E,
@@ -10247,11 +11671,11 @@ Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
#define X86_VENDOR(ENUM, STRING) \
.Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
-#define X86_CPU_TYPE_COMPAT_WITH_ALIAS(ARCHNAME, ENUM, STR, ALIAS) \
- .Cases(STR, ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
-#define X86_CPU_TYPE_COMPAT(ARCHNAME, ENUM, STR) \
+#define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \
+ .Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
+#define X86_CPU_TYPE(ENUM, STR) \
.Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
-#define X86_CPU_SUBTYPE_COMPAT(ARCHNAME, ENUM, STR) \
+#define X86_CPU_SUBTYPE(ENUM, STR) \
.Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
#include "llvm/Support/X86TargetParser.def"
.Default({0, 0});
@@ -10281,7 +11705,7 @@ CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) {
for (const StringRef &FeatureStr : FeatureStrs) {
unsigned Feature =
StringSwitch<unsigned>(FeatureStr)
-#define X86_FEATURE_COMPAT(VAL, ENUM, STR) .Case(STR, VAL)
+#define X86_FEATURE_COMPAT(ENUM, STR) .Case(STR, llvm::X86::FEATURE_##ENUM)
#include "llvm/Support/X86TargetParser.def"
;
FeaturesMask |= (1ULL << Feature);
@@ -10406,8 +11830,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// TODO: The builtins could be removed if the SSE header files used vector
// extension comparisons directly (vector ordered/unordered may need
// additional support via __builtin_isnan()).
- auto getVectorFCmpIR = [this, &Ops](CmpInst::Predicate Pred) {
- Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
+ auto getVectorFCmpIR = [this, &Ops](CmpInst::Predicate Pred,
+ bool IsSignaling) {
+ Value *Cmp;
+ if (IsSignaling)
+ Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
+ else
+ Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
@@ -10486,7 +11915,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vec_ext_v16hi:
case X86::BI__builtin_ia32_vec_ext_v8si:
case X86::BI__builtin_ia32_vec_ext_v4di: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
Index &= NumElts - 1;
// These builtins exist so we can ensure the index is an ICE and in range.
@@ -10501,7 +11931,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vec_set_v16hi:
case X86::BI__builtin_ia32_vec_set_v8si:
case X86::BI__builtin_ia32_vec_set_v4di: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
Index &= NumElts - 1;
// These builtins exist so we can ensure the index is an ICE and in range.
@@ -10589,12 +12020,12 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_storedquqi512_mask:
case X86::BI__builtin_ia32_storeupd512_mask:
case X86::BI__builtin_ia32_storeups512_mask:
- return EmitX86MaskedStore(*this, Ops, 1);
+ return EmitX86MaskedStore(*this, Ops, Align(1));
case X86::BI__builtin_ia32_storess128_mask:
- case X86::BI__builtin_ia32_storesd128_mask: {
- return EmitX86MaskedStore(*this, Ops, 1);
- }
+ case X86::BI__builtin_ia32_storesd128_mask:
+ return EmitX86MaskedStore(*this, Ops, Align(1));
+
case X86::BI__builtin_ia32_vpopcntb_128:
case X86::BI__builtin_ia32_vpopcntd_128:
case X86::BI__builtin_ia32_vpopcntq_128:
@@ -10680,10 +12111,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vfmaddpd512_mask3:
case X86::BI__builtin_ia32_vfmsubpd512_mask3:
return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/false);
- case X86::BI__builtin_ia32_vfmaddsubps:
- case X86::BI__builtin_ia32_vfmaddsubpd:
- case X86::BI__builtin_ia32_vfmaddsubps256:
- case X86::BI__builtin_ia32_vfmaddsubpd256:
case X86::BI__builtin_ia32_vfmaddsubps512_mask:
case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
@@ -10705,11 +12132,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_movdqa32store512_mask:
case X86::BI__builtin_ia32_movdqa64store512_mask:
case X86::BI__builtin_ia32_storeaps512_mask:
- case X86::BI__builtin_ia32_storeapd512_mask: {
- unsigned Align =
- getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
- return EmitX86MaskedStore(*this, Ops, Align);
- }
+ case X86::BI__builtin_ia32_storeapd512_mask:
+ return EmitX86MaskedStore(
+ *this, Ops,
+ getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
+
case X86::BI__builtin_ia32_loadups128_mask:
case X86::BI__builtin_ia32_loadups256_mask:
case X86::BI__builtin_ia32_loadups512_mask:
@@ -10728,11 +12155,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_loaddqudi128_mask:
case X86::BI__builtin_ia32_loaddqudi256_mask:
case X86::BI__builtin_ia32_loaddqudi512_mask:
- return EmitX86MaskedLoad(*this, Ops, 1);
+ return EmitX86MaskedLoad(*this, Ops, Align(1));
case X86::BI__builtin_ia32_loadss128_mask:
case X86::BI__builtin_ia32_loadsd128_mask:
- return EmitX86MaskedLoad(*this, Ops, 1);
+ return EmitX86MaskedLoad(*this, Ops, Align(1));
case X86::BI__builtin_ia32_loadaps128_mask:
case X86::BI__builtin_ia32_loadaps256_mask:
@@ -10745,11 +12172,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_movdqa32load512_mask:
case X86::BI__builtin_ia32_movdqa64load128_mask:
case X86::BI__builtin_ia32_movdqa64load256_mask:
- case X86::BI__builtin_ia32_movdqa64load512_mask: {
- unsigned Align =
- getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
- return EmitX86MaskedLoad(*this, Ops, Align);
- }
+ case X86::BI__builtin_ia32_movdqa64load512_mask:
+ return EmitX86MaskedLoad(
+ *this, Ops,
+ getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
case X86::BI__builtin_ia32_expandloaddf128_mask:
case X86::BI__builtin_ia32_expandloaddf256_mask:
@@ -10932,8 +12358,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
break;
}
- unsigned MinElts = std::min(Ops[0]->getType()->getVectorNumElements(),
- Ops[2]->getType()->getVectorNumElements());
+ unsigned MinElts =
+ std::min(cast<llvm::VectorType>(Ops[0]->getType())->getNumElements(),
+ cast<llvm::VectorType>(Ops[2]->getType())->getNumElements());
Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
Function *Intr = CGM.getIntrinsic(IID);
return Builder.CreateCall(Intr, Ops);
@@ -11040,8 +12467,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
break;
}
- unsigned MinElts = std::min(Ops[2]->getType()->getVectorNumElements(),
- Ops[3]->getType()->getVectorNumElements());
+ unsigned MinElts =
+ std::min(cast<llvm::VectorType>(Ops[2]->getType())->getNumElements(),
+ cast<llvm::VectorType>(Ops[3]->getType())->getNumElements());
Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
Function *Intr = CGM.getIntrinsic(IID);
return Builder.CreateCall(Intr, Ops);
@@ -11063,16 +12491,17 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_extracti64x2_256_mask:
case X86::BI__builtin_ia32_extractf64x2_512_mask:
case X86::BI__builtin_ia32_extracti64x2_512_mask: {
- llvm::Type *DstTy = ConvertType(E->getType());
- unsigned NumElts = DstTy->getVectorNumElements();
- unsigned SrcNumElts = Ops[0]->getType()->getVectorNumElements();
+ auto *DstTy = cast<llvm::VectorType>(ConvertType(E->getType()));
+ unsigned NumElts = DstTy->getNumElements();
+ unsigned SrcNumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
unsigned SubVectors = SrcNumElts / NumElts;
unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
Index &= SubVectors - 1; // Remove any extra bits.
Index *= NumElts;
- uint32_t Indices[16];
+ int Indices[16];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i + Index;
@@ -11102,15 +12531,17 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_inserti64x2_256:
case X86::BI__builtin_ia32_insertf64x2_512:
case X86::BI__builtin_ia32_inserti64x2_512: {
- unsigned DstNumElts = Ops[0]->getType()->getVectorNumElements();
- unsigned SrcNumElts = Ops[1]->getType()->getVectorNumElements();
+ unsigned DstNumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ unsigned SrcNumElts =
+ cast<llvm::VectorType>(Ops[1]->getType())->getNumElements();
unsigned SubVectors = DstNumElts / SrcNumElts;
unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
Index &= SubVectors - 1; // Remove any extra bits.
Index *= SrcNumElts;
- uint32_t Indices[16];
+ int Indices[16];
for (unsigned i = 0; i != DstNumElts; ++i)
Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
@@ -11167,10 +12598,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pblendw256:
case X86::BI__builtin_ia32_pblendd128:
case X86::BI__builtin_ia32_pblendd256: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
- uint32_t Indices[16];
+ int Indices[16];
// If there are more than 8 elements, the immediate is used twice so make
// sure we handle that.
for (unsigned i = 0; i != NumElts; ++i)
@@ -11184,13 +12616,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pshuflw256:
case X86::BI__builtin_ia32_pshuflw512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
+ auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ unsigned NumElts = Ty->getNumElements();
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
Imm = (Imm & 0xff) * 0x01010101;
- uint32_t Indices[32];
+ int Indices[32];
for (unsigned l = 0; l != NumElts; l += 8) {
for (unsigned i = 0; i != 4; ++i) {
Indices[l + i] = l + (Imm & 3);
@@ -11208,13 +12640,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pshufhw256:
case X86::BI__builtin_ia32_pshufhw512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
+ auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ unsigned NumElts = Ty->getNumElements();
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
Imm = (Imm & 0xff) * 0x01010101;
- uint32_t Indices[32];
+ int Indices[32];
for (unsigned l = 0; l != NumElts; l += 8) {
for (unsigned i = 0; i != 4; ++i)
Indices[l + i] = l + i;
@@ -11238,15 +12670,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vpermilpd512:
case X86::BI__builtin_ia32_vpermilps512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
+ auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ unsigned NumElts = Ty->getNumElements();
unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
unsigned NumLaneElts = NumElts / NumLanes;
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
Imm = (Imm & 0xff) * 0x01010101;
- uint32_t Indices[16];
+ int Indices[16];
for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
for (unsigned i = 0; i != NumLaneElts; ++i) {
Indices[i + l] = (Imm % NumLaneElts) + l;
@@ -11265,15 +12697,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_shufps256:
case X86::BI__builtin_ia32_shufps512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
+ auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ unsigned NumElts = Ty->getNumElements();
unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
unsigned NumLaneElts = NumElts / NumLanes;
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
Imm = (Imm & 0xff) * 0x01010101;
- uint32_t Indices[16];
+ int Indices[16];
for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
for (unsigned i = 0; i != NumLaneElts; ++i) {
unsigned Index = Imm % NumLaneElts;
@@ -11293,11 +12725,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_permdi512:
case X86::BI__builtin_ia32_permdf512: {
unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
+ auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ unsigned NumElts = Ty->getNumElements();
// These intrinsics operate on 256-bit lanes of four 64-bit elements.
- uint32_t Indices[8];
+ int Indices[8];
for (unsigned l = 0; l != NumElts; l += 4)
for (unsigned i = 0; i != 4; ++i)
Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
@@ -11311,7 +12743,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_palignr512: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
assert(NumElts % 16 == 0);
// If palignr is shifting the pair of vectors more than the size of two
@@ -11327,7 +12760,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
}
- uint32_t Indices[64];
+ int Indices[64];
// 256-bit palignr operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != NumElts; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
@@ -11348,13 +12781,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_alignq128:
case X86::BI__builtin_ia32_alignq256:
case X86::BI__builtin_ia32_alignq512: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
// Mask the shift amount to width of two vectors.
ShiftVal &= (2 * NumElts) - 1;
- uint32_t Indices[16];
+ int Indices[16];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i + ShiftVal;
@@ -11371,12 +12805,12 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_shuf_i32x4:
case X86::BI__builtin_ia32_shuf_i64x2: {
unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
+ auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ unsigned NumElts = Ty->getNumElements();
unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
unsigned NumLaneElts = NumElts / NumLanes;
- uint32_t Indices[16];
+ int Indices[16];
for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
unsigned Index = (Imm % NumLanes) * NumLaneElts;
Imm /= NumLanes; // Discard the bits we just used.
@@ -11397,7 +12831,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vperm2f128_si256:
case X86::BI__builtin_ia32_permti256: {
unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
// This takes a very simple approach since there are two lanes and a
// shuffle can have 2 inputs. So we reserve the first input for the first
@@ -11405,7 +12840,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// duplicate sources, but this can be dealt with in the backend.
Value *OutOps[2];
- uint32_t Indices[8];
+ int Indices[8];
for (unsigned l = 0; l != 2; ++l) {
// Determine the source for this lane.
if (Imm & (1 << ((l * 4) + 3)))
@@ -11435,15 +12870,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pslldqi256_byteshift:
case X86::BI__builtin_ia32_pslldqi512_byteshift: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
- llvm::Type *ResultType = Ops[0]->getType();
+ auto *ResultType = cast<llvm::VectorType>(Ops[0]->getType());
// Builtin type is vXi64 so multiply by 8 to get bytes.
- unsigned NumElts = ResultType->getVectorNumElements() * 8;
+ unsigned NumElts = ResultType->getNumElements() * 8;
// If pslldq is shifting the vector more than 15 bytes, emit zero.
if (ShiftVal >= 16)
return llvm::Constant::getNullValue(ResultType);
- uint32_t Indices[64];
+ int Indices[64];
// 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != NumElts; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
@@ -11453,7 +12888,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
}
- llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts);
+ auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Value *Zero = llvm::Constant::getNullValue(VecTy);
Value *SV = Builder.CreateShuffleVector(Zero, Cast,
@@ -11465,15 +12900,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_psrldqi256_byteshift:
case X86::BI__builtin_ia32_psrldqi512_byteshift: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
- llvm::Type *ResultType = Ops[0]->getType();
+ auto *ResultType = cast<llvm::VectorType>(Ops[0]->getType());
// Builtin type is vXi64 so multiply by 8 to get bytes.
- unsigned NumElts = ResultType->getVectorNumElements() * 8;
+ unsigned NumElts = ResultType->getNumElements() * 8;
// If psrldq is shifting the vector more than 15 bytes, emit zero.
if (ShiftVal >= 16)
return llvm::Constant::getNullValue(ResultType);
- uint32_t Indices[64];
+ int Indices[64];
// 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != NumElts; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
@@ -11483,7 +12918,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
}
- llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts);
+ auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Value *Zero = llvm::Constant::getNullValue(VecTy);
Value *SV = Builder.CreateShuffleVector(Cast, Zero,
@@ -11503,7 +12938,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *In = getMaskVecValue(*this, Ops[0], NumElts);
- uint32_t Indices[64];
+ int Indices[64];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = NumElts + i - ShiftVal;
@@ -11525,7 +12960,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *In = getMaskVecValue(*this, Ops[0], NumElts);
- uint32_t Indices[64];
+ int Indices[64];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i + ShiftVal;
@@ -11557,7 +12992,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// Unaligned nontemporal store of the scalar value.
StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
- SI->setAlignment(llvm::Align::None());
+ SI->setAlignment(llvm::Align(1));
return SI;
}
// Rotate is a special case of funnel shift - 1st 2 args are the same.
@@ -11805,7 +13240,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
- uint32_t Indices[64];
+ int Indices[64];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i;
@@ -11834,8 +13269,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_sqrtss:
case X86::BI__builtin_ia32_sqrtsd: {
Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
- Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
- A = Builder.CreateCall(F, {A});
+ Function *F;
+ if (Builder.getIsFPConstrained()) {
+ F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
+ A->getType());
+ A = Builder.CreateConstrainedFPCall(F, {A});
+ } else {
+ F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
+ A = Builder.CreateCall(F, {A});
+ }
return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
}
case X86::BI__builtin_ia32_sqrtsd_round_mask:
@@ -11850,8 +13292,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
}
Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
- Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
- A = Builder.CreateCall(F, A);
+ Function *F;
+ if (Builder.getIsFPConstrained()) {
+ F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
+ A->getType());
+ A = Builder.CreateConstrainedFPCall(F, A);
+ } else {
+ F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
+ A = Builder.CreateCall(F, A);
+ }
Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
@@ -11873,8 +13322,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
}
}
- Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
- return Builder.CreateCall(F, Ops[0]);
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
+ Ops[0]->getType());
+ return Builder.CreateConstrainedFPCall(F, Ops[0]);
+ } else {
+ Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
+ return Builder.CreateCall(F, Ops[0]);
+ }
}
case X86::BI__builtin_ia32_pabsb128:
case X86::BI__builtin_ia32_pabsw128:
@@ -12091,7 +13546,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_fpclasspd128_mask:
case X86::BI__builtin_ia32_fpclasspd256_mask:
case X86::BI__builtin_ia32_fpclasspd512_mask: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
Value *MaskIn = Ops[2];
Ops.erase(&Ops[2]);
@@ -12128,7 +13584,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vp2intersect_d_512:
case X86::BI__builtin_ia32_vp2intersect_d_256:
case X86::BI__builtin_ia32_vp2intersect_d_128: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
Intrinsic::ID ID;
switch (BuiltinID) {
@@ -12186,7 +13643,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
Value *MaskIn = Ops[2];
Ops.erase(&Ops[2]);
@@ -12211,28 +13669,28 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// packed comparison intrinsics
case X86::BI__builtin_ia32_cmpeqps:
case X86::BI__builtin_ia32_cmpeqpd:
- return getVectorFCmpIR(CmpInst::FCMP_OEQ);
+ return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false);
case X86::BI__builtin_ia32_cmpltps:
case X86::BI__builtin_ia32_cmpltpd:
- return getVectorFCmpIR(CmpInst::FCMP_OLT);
+ return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true);
case X86::BI__builtin_ia32_cmpleps:
case X86::BI__builtin_ia32_cmplepd:
- return getVectorFCmpIR(CmpInst::FCMP_OLE);
+ return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true);
case X86::BI__builtin_ia32_cmpunordps:
case X86::BI__builtin_ia32_cmpunordpd:
- return getVectorFCmpIR(CmpInst::FCMP_UNO);
+ return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false);
case X86::BI__builtin_ia32_cmpneqps:
case X86::BI__builtin_ia32_cmpneqpd:
- return getVectorFCmpIR(CmpInst::FCMP_UNE);
+ return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false);
case X86::BI__builtin_ia32_cmpnltps:
case X86::BI__builtin_ia32_cmpnltpd:
- return getVectorFCmpIR(CmpInst::FCMP_UGE);
+ return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true);
case X86::BI__builtin_ia32_cmpnleps:
case X86::BI__builtin_ia32_cmpnlepd:
- return getVectorFCmpIR(CmpInst::FCMP_UGT);
+ return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true);
case X86::BI__builtin_ia32_cmpordps:
case X86::BI__builtin_ia32_cmpordpd:
- return getVectorFCmpIR(CmpInst::FCMP_ORD);
+ return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false);
case X86::BI__builtin_ia32_cmpps:
case X86::BI__builtin_ia32_cmpps256:
case X86::BI__builtin_ia32_cmppd:
@@ -12257,42 +13715,90 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// Ignoring requested signaling behaviour,
// e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
FCmpInst::Predicate Pred;
- switch (CC) {
- case 0x00: Pred = FCmpInst::FCMP_OEQ; break;
- case 0x01: Pred = FCmpInst::FCMP_OLT; break;
- case 0x02: Pred = FCmpInst::FCMP_OLE; break;
- case 0x03: Pred = FCmpInst::FCMP_UNO; break;
- case 0x04: Pred = FCmpInst::FCMP_UNE; break;
- case 0x05: Pred = FCmpInst::FCMP_UGE; break;
- case 0x06: Pred = FCmpInst::FCMP_UGT; break;
- case 0x07: Pred = FCmpInst::FCMP_ORD; break;
- case 0x08: Pred = FCmpInst::FCMP_UEQ; break;
- case 0x09: Pred = FCmpInst::FCMP_ULT; break;
- case 0x0a: Pred = FCmpInst::FCMP_ULE; break;
- case 0x0b: Pred = FCmpInst::FCMP_FALSE; break;
- case 0x0c: Pred = FCmpInst::FCMP_ONE; break;
- case 0x0d: Pred = FCmpInst::FCMP_OGE; break;
- case 0x0e: Pred = FCmpInst::FCMP_OGT; break;
- case 0x0f: Pred = FCmpInst::FCMP_TRUE; break;
- case 0x10: Pred = FCmpInst::FCMP_OEQ; break;
- case 0x11: Pred = FCmpInst::FCMP_OLT; break;
- case 0x12: Pred = FCmpInst::FCMP_OLE; break;
- case 0x13: Pred = FCmpInst::FCMP_UNO; break;
- case 0x14: Pred = FCmpInst::FCMP_UNE; break;
- case 0x15: Pred = FCmpInst::FCMP_UGE; break;
- case 0x16: Pred = FCmpInst::FCMP_UGT; break;
- case 0x17: Pred = FCmpInst::FCMP_ORD; break;
- case 0x18: Pred = FCmpInst::FCMP_UEQ; break;
- case 0x19: Pred = FCmpInst::FCMP_ULT; break;
- case 0x1a: Pred = FCmpInst::FCMP_ULE; break;
- case 0x1b: Pred = FCmpInst::FCMP_FALSE; break;
- case 0x1c: Pred = FCmpInst::FCMP_ONE; break;
- case 0x1d: Pred = FCmpInst::FCMP_OGE; break;
- case 0x1e: Pred = FCmpInst::FCMP_OGT; break;
- case 0x1f: Pred = FCmpInst::FCMP_TRUE; break;
+ bool IsSignaling;
+ // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling
+ // behavior is inverted. We'll handle that after the switch.
+ switch (CC & 0xf) {
+ case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break;
+ case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break;
+ case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break;
+ case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break;
+ case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break;
+ case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break;
+ case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break;
+ case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break;
+ case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break;
+ case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break;
+ case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break;
+ case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break;
+ case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break;
+ case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break;
+ case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break;
+ case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break;
default: llvm_unreachable("Unhandled CC");
}
+ // Invert the signalling behavior for 16-31.
+ if (CC & 0x10)
+ IsSignaling = !IsSignaling;
+
+ // If the predicate is true or false and we're using constrained intrinsics,
+ // we don't have a compare intrinsic we can use. Just use the legacy X86
+ // specific intrinsic.
+ if ((Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE) &&
+ Builder.getIsFPConstrained()) {
+
+ Intrinsic::ID IID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unexpected builtin");
+ case X86::BI__builtin_ia32_cmpps:
+ IID = Intrinsic::x86_sse_cmp_ps;
+ break;
+ case X86::BI__builtin_ia32_cmpps256:
+ IID = Intrinsic::x86_avx_cmp_ps_256;
+ break;
+ case X86::BI__builtin_ia32_cmppd:
+ IID = Intrinsic::x86_sse2_cmp_pd;
+ break;
+ case X86::BI__builtin_ia32_cmppd256:
+ IID = Intrinsic::x86_avx_cmp_pd_256;
+ break;
+ case X86::BI__builtin_ia32_cmpps512_mask:
+ IID = Intrinsic::x86_avx512_cmp_ps_512;
+ break;
+ case X86::BI__builtin_ia32_cmppd512_mask:
+ IID = Intrinsic::x86_avx512_cmp_pd_512;
+ break;
+ case X86::BI__builtin_ia32_cmpps128_mask:
+ IID = Intrinsic::x86_avx512_cmp_ps_128;
+ break;
+ case X86::BI__builtin_ia32_cmpps256_mask:
+ IID = Intrinsic::x86_avx512_cmp_ps_256;
+ break;
+ case X86::BI__builtin_ia32_cmppd128_mask:
+ IID = Intrinsic::x86_avx512_cmp_pd_128;
+ break;
+ case X86::BI__builtin_ia32_cmppd256_mask:
+ IID = Intrinsic::x86_avx512_cmp_pd_256;
+ break;
+ }
+
+ Function *Intr = CGM.getIntrinsic(IID);
+ if (cast<llvm::VectorType>(Intr->getReturnType())
+ ->getElementType()
+ ->isIntegerTy(1)) {
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ Value *MaskIn = Ops[3];
+ Ops.erase(&Ops[3]);
+
+ Value *Cmp = Builder.CreateCall(Intr, Ops);
+ return EmitX86MaskedCompareResult(*this, Cmp, NumElts, MaskIn);
+ }
+
+ return Builder.CreateCall(Intr, Ops);
+ }
+
// Builtins without the _mask suffix return a vector of integers
// of the same width as the input vectors
switch (BuiltinID) {
@@ -12302,12 +13808,18 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_cmpps256_mask:
case X86::BI__builtin_ia32_cmppd128_mask:
case X86::BI__builtin_ia32_cmppd256_mask: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
- Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
+ // FIXME: Support SAE.
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ Value *Cmp;
+ if (IsSignaling)
+ Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
+ else
+ Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
}
default:
- return getVectorFCmpIR(Pred);
+ return getVectorFCmpIR(Pred, IsSignaling);
}
}
@@ -12345,10 +13857,19 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_cmpordsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
+ // f16c half2float intrinsics
+ case X86::BI__builtin_ia32_vcvtph2ps:
+ case X86::BI__builtin_ia32_vcvtph2ps256:
+ case X86::BI__builtin_ia32_vcvtph2ps_mask:
+ case X86::BI__builtin_ia32_vcvtph2ps256_mask:
+ case X86::BI__builtin_ia32_vcvtph2ps512_mask:
+ return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
+
// AVX512 bf16 intrinsics
case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
- Ops[2] = getMaskVecValue(*this, Ops[2],
- Ops[0]->getType()->getVectorNumElements());
+ Ops[2] = getMaskVecValue(
+ *this, Ops[2],
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements());
Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
}
@@ -12508,7 +14029,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__stosb: {
// We treat __stosb as a volatile memset - it may not generate "rep stosb"
// instruction, but it will create a memset that won't be optimized away.
- return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align::None(), true);
+ return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align(1), true);
}
case X86::BI__ud2:
// llvm.trap makes a ud2a instruction on x86.
@@ -12733,9 +14254,14 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_vsx_xvsqrtdp: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
- ID = Intrinsic::sqrt;
- llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
- return Builder.CreateCall(F, X);
+ if (Builder.getIsFPConstrained()) {
+ llvm::Function *F = CGM.getIntrinsic(
+ Intrinsic::experimental_constrained_sqrt, ResultType);
+ return Builder.CreateConstrainedFPCall(F, X);
+ } else {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
+ return Builder.CreateCall(F, X);
+ }
}
// Count leading zeros
case PPC::BI__builtin_altivec_vclzb:
@@ -12792,21 +14318,32 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Value *X = EmitScalarExpr(E->getArg(0));
if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
BuiltinID == PPC::BI__builtin_vsx_xvrspim)
- ID = Intrinsic::floor;
+ ID = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_floor
+ : Intrinsic::floor;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
BuiltinID == PPC::BI__builtin_vsx_xvrspi)
- ID = Intrinsic::round;
+ ID = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_round
+ : Intrinsic::round;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
BuiltinID == PPC::BI__builtin_vsx_xvrspic)
- ID = Intrinsic::nearbyint;
+ ID = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_nearbyint
+ : Intrinsic::nearbyint;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
BuiltinID == PPC::BI__builtin_vsx_xvrspip)
- ID = Intrinsic::ceil;
+ ID = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_ceil
+ : Intrinsic::ceil;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
- ID = Intrinsic::trunc;
+ ID = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_trunc
+ : Intrinsic::trunc;
llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
- return Builder.CreateCall(F, X);
+ return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X)
+ : Builder.CreateCall(F, X);
}
// Absolute value
@@ -12831,25 +14368,43 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
- Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
+ llvm::Function *F;
+ if (Builder.getIsFPConstrained())
+ F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+ else
+ F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
switch (BuiltinID) {
case PPC::BI__builtin_vsx_xvmaddadp:
case PPC::BI__builtin_vsx_xvmaddasp:
- return Builder.CreateCall(F, {X, Y, Z});
+ if (Builder.getIsFPConstrained())
+ return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
+ else
+ return Builder.CreateCall(F, {X, Y, Z});
case PPC::BI__builtin_vsx_xvnmaddadp:
case PPC::BI__builtin_vsx_xvnmaddasp:
- return Builder.CreateFSub(Zero,
- Builder.CreateCall(F, {X, Y, Z}), "sub");
+ if (Builder.getIsFPConstrained())
+ return Builder.CreateFNeg(
+ Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
+ else
+ return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
case PPC::BI__builtin_vsx_xvmsubadp:
case PPC::BI__builtin_vsx_xvmsubasp:
- return Builder.CreateCall(F,
- {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
+ if (Builder.getIsFPConstrained())
+ return Builder.CreateConstrainedFPCall(
+ F, {X, Y, Builder.CreateFNeg(Z, "neg")});
+ else
+ return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
case PPC::BI__builtin_vsx_xvnmsubadp:
case PPC::BI__builtin_vsx_xvnmsubasp:
- Value *FsubRes =
- Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
- return Builder.CreateFSub(Zero, FsubRes, "sub");
+ if (Builder.getIsFPConstrained())
+ return Builder.CreateFNeg(
+ Builder.CreateConstrainedFPCall(
+ F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
+ "neg");
+ else
+ return Builder.CreateFNeg(
+ Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
+ "neg");
}
llvm_unreachable("Unknown FMA operation");
return nullptr; // Suppress no-return warning
@@ -12875,25 +14430,22 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// Need to cast the second argument from a vector of unsigned int to a
// vector of long long.
- Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2));
+ Ops[1] =
+ Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
if (getTarget().isLittleEndian()) {
- // Create a shuffle mask of (1, 0)
- Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1),
- ConstantInt::get(Int32Ty, 0)
- };
- Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
-
// Reverse the double words in the vector we will extract from.
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
- Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ShuffleMask);
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
+ Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{1, 0});
// Reverse the index.
Index = MaxIndex - Index;
}
// Intrinsic expects the first arg to be a vector of int.
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
return Builder.CreateCall(F, Ops);
}
@@ -12902,7 +14454,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
// Intrinsic expects the first argument to be a vector of doublewords.
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
// The second argument is a compile time constant int that needs to
// be clamped to the range [0, 12].
@@ -12920,13 +14473,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// Emit the call, then reverse the double words of the results vector.
Value *Call = Builder.CreateCall(F, Ops);
- // Create a shuffle mask of (1, 0)
- Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1),
- ConstantInt::get(Int32Ty, 0)
- };
- Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
-
- Value *ShuffleCall = Builder.CreateShuffleVector(Call, Call, ShuffleMask);
+ Value *ShuffleCall =
+ Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0});
return ShuffleCall;
} else {
Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
@@ -12939,21 +14487,20 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
assert(ArgCI && "Third arg must be constant integer!");
unsigned Index = ArgCI->getZExtValue();
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
- Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
+ Ops[1] =
+ Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
// Account for endianness by treating this as just a shuffle. So we use the
// same indices for both LE and BE in order to produce expected results in
// both cases.
- unsigned ElemIdx0 = (Index & 2) >> 1;
- unsigned ElemIdx1 = 2 + (Index & 1);
-
- Constant *ShuffleElts[2] = {ConstantInt::get(Int32Ty, ElemIdx0),
- ConstantInt::get(Int32Ty, ElemIdx1)};
- Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
+ int ElemIdx0 = (Index & 2) >> 1;
+ int ElemIdx1 = 2 + (Index & 1);
+ int ShuffleElts[2] = {ElemIdx0, ElemIdx1};
Value *ShuffleCall =
- Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask);
+ Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
QualType BIRetType = E->getType();
auto RetTy = ConvertType(BIRetType);
return Builder.CreateBitCast(ShuffleCall, RetTy);
@@ -12963,14 +14510,16 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
assert(ArgCI && "Third argument must be a compile time constant");
unsigned Index = ArgCI->getZExtValue() & 0x3;
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
- Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int32Ty, 4));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
+ Ops[1] =
+ Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int32Ty, 4));
// Create a shuffle mask
- unsigned ElemIdx0;
- unsigned ElemIdx1;
- unsigned ElemIdx2;
- unsigned ElemIdx3;
+ int ElemIdx0;
+ int ElemIdx1;
+ int ElemIdx2;
+ int ElemIdx3;
if (getTarget().isLittleEndian()) {
// Little endian element N comes from element 8+N-Index of the
// concatenated wide vector (of course, using modulo arithmetic on
@@ -12987,14 +14536,9 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
ElemIdx3 = Index + 3;
}
- Constant *ShuffleElts[4] = {ConstantInt::get(Int32Ty, ElemIdx0),
- ConstantInt::get(Int32Ty, ElemIdx1),
- ConstantInt::get(Int32Ty, ElemIdx2),
- ConstantInt::get(Int32Ty, ElemIdx3)};
-
- Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
+ int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3};
Value *ShuffleCall =
- Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask);
+ Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
QualType BIRetType = E->getType();
auto RetTy = ConvertType(BIRetType);
return Builder.CreateBitCast(ShuffleCall, RetTy);
@@ -13003,7 +14547,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_pack_vector_int128: {
bool isLittleEndian = getTarget().isLittleEndian();
Value *UndefValue =
- llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), 2));
+ llvm::UndefValue::get(llvm::FixedVectorType::get(Ops[0]->getType(), 2));
Value *Res = Builder.CreateInsertElement(
UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
Res = Builder.CreateInsertElement(Res, Ops[1],
@@ -13014,7 +14558,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_unpack_vector_int128: {
ConstantInt *Index = cast<ConstantInt>(Ops[1]);
Value *Unpacked = Builder.CreateBitCast(
- Ops[0], llvm::VectorType::get(ConvertType(E->getType()), 2));
+ Ops[0], llvm::FixedVectorType::get(ConvertType(E->getType()), 2));
if (getTarget().isLittleEndian())
Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
@@ -13024,8 +14568,91 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
}
}
+namespace {
+// If \p E is not null pointer, insert address space cast to match return
+// type of \p E if necessary.
+Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
+ const CallExpr *E = nullptr) {
+ auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr);
+ auto *Call = CGF.Builder.CreateCall(F);
+ Call->addAttribute(
+ AttributeList::ReturnIndex,
+ Attribute::getWithDereferenceableBytes(Call->getContext(), 64));
+ Call->addAttribute(AttributeList::ReturnIndex,
+ Attribute::getWithAlignment(Call->getContext(), Align(4)));
+ if (!E)
+ return Call;
+ QualType BuiltinRetType = E->getType();
+ auto *RetTy = cast<llvm::PointerType>(CGF.ConvertType(BuiltinRetType));
+ if (RetTy == Call->getType())
+ return Call;
+ return CGF.Builder.CreateAddrSpaceCast(Call, RetTy);
+}
+
+// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
+Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
+ const unsigned XOffset = 4;
+ auto *DP = EmitAMDGPUDispatchPtr(CGF);
+ // Indexing the HSA kernel_dispatch_packet struct.
+ auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 2);
+ auto *GEP = CGF.Builder.CreateGEP(DP, Offset);
+ auto *DstTy =
+ CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
+ auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
+ auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(2)));
+ llvm::MDBuilder MDHelper(CGF.getLLVMContext());
+ llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
+ APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
+ LD->setMetadata(llvm::LLVMContext::MD_range, RNode);
+ LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
+ llvm::MDNode::get(CGF.getLLVMContext(), None));
+ return LD;
+}
+} // namespace
+
+// For processing memory ordering and memory scope arguments of various
+// amdgcn builtins.
+// \p Order takes a C++11 comptabile memory-ordering specifier and converts
+// it into LLVM's memory ordering specifier using atomic C ABI, and writes
+// to \p AO. \p Scope takes a const char * and converts it into AMDGCN
+// specific SyncScopeID and writes it to \p SSID.
+bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
+ llvm::AtomicOrdering &AO,
+ llvm::SyncScope::ID &SSID) {
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+
+ // Map C11/C++11 memory ordering to LLVM memory ordering
+ switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
+ case llvm::AtomicOrderingCABI::acquire:
+ AO = llvm::AtomicOrdering::Acquire;
+ break;
+ case llvm::AtomicOrderingCABI::release:
+ AO = llvm::AtomicOrdering::Release;
+ break;
+ case llvm::AtomicOrderingCABI::acq_rel:
+ AO = llvm::AtomicOrdering::AcquireRelease;
+ break;
+ case llvm::AtomicOrderingCABI::seq_cst:
+ AO = llvm::AtomicOrdering::SequentiallyConsistent;
+ break;
+ case llvm::AtomicOrderingCABI::consume:
+ case llvm::AtomicOrderingCABI::relaxed:
+ break;
+ }
+
+ StringRef scp;
+ llvm::getConstantStringInfo(Scope, scp);
+ SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
+ return true;
+ }
+ return false;
+}
+
Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
+ llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
+ llvm::SyncScope::ID SSID;
switch (BuiltinID) {
case AMDGPU::BI__builtin_amdgcn_div_scale:
case AMDGPU::BI__builtin_amdgcn_div_scalef: {
@@ -13093,6 +14720,10 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_rcpf:
case AMDGPU::BI__builtin_amdgcn_rcph:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
+ case AMDGPU::BI__builtin_amdgcn_sqrt:
+ case AMDGPU::BI__builtin_amdgcn_sqrtf:
+ case AMDGPU::BI__builtin_amdgcn_sqrth:
+ return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt);
case AMDGPU::BI__builtin_amdgcn_rsq:
case AMDGPU::BI__builtin_amdgcn_rsqf:
case AMDGPU::BI__builtin_amdgcn_rsqh:
@@ -13106,6 +14737,8 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_cosf:
case AMDGPU::BI__builtin_amdgcn_cosh:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
+ case AMDGPU::BI__builtin_amdgcn_dispatch_ptr:
+ return EmitAMDGPUDispatchPtr(*this, E);
case AMDGPU::BI__builtin_amdgcn_log_clampf:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
case AMDGPU::BI__builtin_amdgcn_ldexp:
@@ -13148,7 +14781,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
// FIXME-GFX10: How should 32 bit mask be handled?
- Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
+ Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
{ Builder.getInt64Ty(), Src0->getType() });
return Builder.CreateCall(F, { Src0, Src1, Src2 });
}
@@ -13159,7 +14792,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
// FIXME-GFX10: How should 32 bit mask be handled?
- Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
+ Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
{ Builder.getInt64Ty(), Src0->getType() });
return Builder.CreateCall(F, { Src0, Src1, Src2 });
}
@@ -13180,7 +14813,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
}
case AMDGPU::BI__builtin_amdgcn_read_exec: {
CallInst *CI = cast<CallInst>(
- EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, true, "exec"));
+ EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec"));
CI->setConvergent();
return CI;
}
@@ -13189,7 +14822,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ?
"exec_lo" : "exec_hi";
CallInst *CI = cast<CallInst>(
- EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, true, RegName));
+ EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, NormalRead, RegName));
CI->setConvergent();
return CI;
}
@@ -13201,6 +14834,14 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
+ // amdgcn workgroup size
+ case AMDGPU::BI__builtin_amdgcn_workgroup_size_x:
+ return EmitAMDGPUWorkGroupSize(*this, 0);
+ case AMDGPU::BI__builtin_amdgcn_workgroup_size_y:
+ return EmitAMDGPUWorkGroupSize(*this, 1);
+ case AMDGPU::BI__builtin_amdgcn_workgroup_size_z:
+ return EmitAMDGPUWorkGroupSize(*this, 2);
+
// r600 intrinsics
case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
@@ -13211,6 +14852,61 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
case AMDGPU::BI__builtin_r600_read_tidig_z:
return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
+ case AMDGPU::BI__builtin_amdgcn_alignbit: {
+ llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
+ llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
+ Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType());
+ return Builder.CreateCall(F, { Src0, Src1, Src2 });
+ }
+
+ case AMDGPU::BI__builtin_amdgcn_fence: {
+ if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
+ EmitScalarExpr(E->getArg(1)), AO, SSID))
+ return Builder.CreateFence(AO, SSID);
+ LLVM_FALLTHROUGH;
+ }
+ case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
+ case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
+ case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
+ case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
+ unsigned BuiltinAtomicOp;
+ llvm::Type *ResultType = ConvertType(E->getType());
+
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
+ case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
+ BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
+ case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
+ BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec;
+ break;
+ }
+
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Value *Val = EmitScalarExpr(E->getArg(1));
+
+ llvm::Function *F =
+ CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()});
+
+ if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
+ EmitScalarExpr(E->getArg(3)), AO, SSID)) {
+
+ // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and
+ // scope as unsigned values
+ Value *MemOrder = Builder.getInt32(static_cast<int>(AO));
+ Value *MemScope = Builder.getInt32(static_cast<int>(SSID));
+
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
+ bool Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+ Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile));
+
+ return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile});
+ }
+ LLVM_FALLTHROUGH;
+ }
default:
return nullptr;
}
@@ -13308,8 +15004,13 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
case SystemZ::BI__builtin_s390_vfsqdb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
- Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
- return Builder.CreateCall(F, X);
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType);
+ return Builder.CreateConstrainedFPCall(F, { X });
+ } else {
+ Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
+ return Builder.CreateCall(F, X);
+ }
}
case SystemZ::BI__builtin_s390_vfmasb:
case SystemZ::BI__builtin_s390_vfmadb: {
@@ -13317,8 +15018,13 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
- Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
- return Builder.CreateCall(F, {X, Y, Z});
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+ return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
+ } else {
+ Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
+ return Builder.CreateCall(F, {X, Y, Z});
+ }
}
case SystemZ::BI__builtin_s390_vfmssb:
case SystemZ::BI__builtin_s390_vfmsdb: {
@@ -13326,8 +15032,13 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
- Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
- return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+ return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
+ } else {
+ Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
+ return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
+ }
}
case SystemZ::BI__builtin_s390_vfnmasb:
case SystemZ::BI__builtin_s390_vfnmadb: {
@@ -13335,8 +15046,13 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
- Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
- return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+ return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
+ } else {
+ Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
+ return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
+ }
}
case SystemZ::BI__builtin_s390_vfnmssb:
case SystemZ::BI__builtin_s390_vfnmsdb: {
@@ -13344,9 +15060,15 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
- Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
- Value *NegZ = Builder.CreateFNeg(Z, "neg");
- return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+ Value *NegZ = Builder.CreateFNeg(Z, "sub");
+ return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ}));
+ } else {
+ Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
+ Value *NegZ = Builder.CreateFNeg(Z, "neg");
+ return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
+ }
}
case SystemZ::BI__builtin_s390_vflpsb:
case SystemZ::BI__builtin_s390_vflpdb: {
@@ -13375,30 +15097,42 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
// Check whether this instance can be represented via a LLVM standard
// intrinsic. We only support some combinations of M4 and M5.
Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ Intrinsic::ID CI;
switch (M4.getZExtValue()) {
default: break;
case 0: // IEEE-inexact exception allowed
switch (M5.getZExtValue()) {
default: break;
- case 0: ID = Intrinsic::rint; break;
+ case 0: ID = Intrinsic::rint;
+ CI = Intrinsic::experimental_constrained_rint; break;
}
break;
case 4: // IEEE-inexact exception suppressed
switch (M5.getZExtValue()) {
default: break;
- case 0: ID = Intrinsic::nearbyint; break;
- case 1: ID = Intrinsic::round; break;
- case 5: ID = Intrinsic::trunc; break;
- case 6: ID = Intrinsic::ceil; break;
- case 7: ID = Intrinsic::floor; break;
+ case 0: ID = Intrinsic::nearbyint;
+ CI = Intrinsic::experimental_constrained_nearbyint; break;
+ case 1: ID = Intrinsic::round;
+ CI = Intrinsic::experimental_constrained_round; break;
+ case 5: ID = Intrinsic::trunc;
+ CI = Intrinsic::experimental_constrained_trunc; break;
+ case 6: ID = Intrinsic::ceil;
+ CI = Intrinsic::experimental_constrained_ceil; break;
+ case 7: ID = Intrinsic::floor;
+ CI = Intrinsic::experimental_constrained_floor; break;
}
break;
}
if (ID != Intrinsic::not_intrinsic) {
- Function *F = CGM.getIntrinsic(ID, ResultType);
- return Builder.CreateCall(F, X);
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(CI, ResultType);
+ return Builder.CreateConstrainedFPCall(F, X);
+ } else {
+ Function *F = CGM.getIntrinsic(ID, ResultType);
+ return Builder.CreateCall(F, X);
+ }
}
- switch (BuiltinID) {
+ switch (BuiltinID) { // FIXME: constrained version?
case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
default: llvm_unreachable("Unknown BuiltinID");
@@ -13421,13 +15155,20 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
// Check whether this instance can be represented via a LLVM standard
// intrinsic. We only support some values of M4.
Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ Intrinsic::ID CI;
switch (M4.getZExtValue()) {
default: break;
- case 4: ID = Intrinsic::maxnum; break;
+ case 4: ID = Intrinsic::maxnum;
+ CI = Intrinsic::experimental_constrained_maxnum; break;
}
if (ID != Intrinsic::not_intrinsic) {
- Function *F = CGM.getIntrinsic(ID, ResultType);
- return Builder.CreateCall(F, {X, Y});
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(CI, ResultType);
+ return Builder.CreateConstrainedFPCall(F, {X, Y});
+ } else {
+ Function *F = CGM.getIntrinsic(ID, ResultType);
+ return Builder.CreateCall(F, {X, Y});
+ }
}
switch (BuiltinID) {
case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
@@ -13451,13 +15192,20 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
// Check whether this instance can be represented via a LLVM standard
// intrinsic. We only support some values of M4.
Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ Intrinsic::ID CI;
switch (M4.getZExtValue()) {
default: break;
- case 4: ID = Intrinsic::minnum; break;
+ case 4: ID = Intrinsic::minnum;
+ CI = Intrinsic::experimental_constrained_minnum; break;
}
if (ID != Intrinsic::not_intrinsic) {
- Function *F = CGM.getIntrinsic(ID, ResultType);
- return Builder.CreateCall(F, {X, Y});
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(CI, ResultType);
+ return Builder.CreateConstrainedFPCall(F, {X, Y});
+ } else {
+ Function *F = CGM.getIntrinsic(ID, ResultType);
+ return Builder.CreateCall(F, {X, Y});
+ }
}
switch (BuiltinID) {
case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
@@ -13817,7 +15565,7 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
auto MakeLdg = [&](unsigned IntrinsicID) {
Value *Ptr = EmitScalarExpr(E->getArg(0));
clang::CharUnits Align =
- getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
+ CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
return Builder.CreateCall(
CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
Ptr->getType()}),
@@ -14346,7 +16094,7 @@ RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
Result = Builder.CreatePointerCast(Result, Args.SrcType);
// Emit an alignment assumption to ensure that the new alignment is
// propagated to loads/stores, etc.
- EmitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
+ emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
}
assert(Result->getType() == Args.SrcType);
return RValue::get(Result);
@@ -14370,30 +16118,6 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
return Builder.CreateCall(Callee, Args);
}
- case WebAssembly::BI__builtin_wasm_memory_init: {
- llvm::APSInt SegConst;
- if (!E->getArg(0)->isIntegerConstantExpr(SegConst, getContext()))
- llvm_unreachable("Constant arg isn't actually constant?");
- llvm::APSInt MemConst;
- if (!E->getArg(1)->isIntegerConstantExpr(MemConst, getContext()))
- llvm_unreachable("Constant arg isn't actually constant?");
- if (!MemConst.isNullValue())
- ErrorUnsupported(E, "non-zero memory index");
- Value *Args[] = {llvm::ConstantInt::get(getLLVMContext(), SegConst),
- llvm::ConstantInt::get(getLLVMContext(), MemConst),
- EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)),
- EmitScalarExpr(E->getArg(4))};
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_init);
- return Builder.CreateCall(Callee, Args);
- }
- case WebAssembly::BI__builtin_wasm_data_drop: {
- llvm::APSInt SegConst;
- if (!E->getArg(0)->isIntegerConstantExpr(SegConst, getContext()))
- llvm_unreachable("Constant arg isn't actually constant?");
- Value *Arg = llvm::ConstantInt::get(getLLVMContext(), SegConst);
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_data_drop);
- return Builder.CreateCall(Callee, {Arg});
- }
case WebAssembly::BI__builtin_wasm_tls_size: {
llvm::Type *ResultType = ConvertType(E->getType());
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType);
@@ -14462,8 +16186,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64x2_f64x2: {
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: {
Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Type *ResT = ConvertType(E->getType());
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed,
@@ -14474,8 +16197,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64x2_f64x2: {
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: {
Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Type *ResT = ConvertType(E->getType());
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned,
@@ -14502,6 +16224,55 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
+ case WebAssembly::BI__builtin_wasm_pmin_f32x4:
+ case WebAssembly::BI__builtin_wasm_pmin_f64x2: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_pmax_f32x4:
+ case WebAssembly::BI__builtin_wasm_pmax_f64x2: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_ceil_f32x4:
+ case WebAssembly::BI__builtin_wasm_floor_f32x4:
+ case WebAssembly::BI__builtin_wasm_trunc_f32x4:
+ case WebAssembly::BI__builtin_wasm_nearest_f32x4:
+ case WebAssembly::BI__builtin_wasm_ceil_f64x2:
+ case WebAssembly::BI__builtin_wasm_floor_f64x2:
+ case WebAssembly::BI__builtin_wasm_trunc_f64x2:
+ case WebAssembly::BI__builtin_wasm_nearest_f64x2: {
+ unsigned IntNo;
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_ceil_f32x4:
+ case WebAssembly::BI__builtin_wasm_ceil_f64x2:
+ IntNo = Intrinsic::wasm_ceil;
+ break;
+ case WebAssembly::BI__builtin_wasm_floor_f32x4:
+ case WebAssembly::BI__builtin_wasm_floor_f64x2:
+ IntNo = Intrinsic::wasm_floor;
+ break;
+ case WebAssembly::BI__builtin_wasm_trunc_f32x4:
+ case WebAssembly::BI__builtin_wasm_trunc_f64x2:
+ IntNo = Intrinsic::wasm_trunc;
+ break;
+ case WebAssembly::BI__builtin_wasm_nearest_f32x4:
+ case WebAssembly::BI__builtin_wasm_nearest_f64x2:
+ IntNo = Intrinsic::wasm_nearest;
+ break;
+ default:
+ llvm_unreachable("unexpected builtin ID");
+ }
+ Value *Value = EmitScalarExpr(E->getArg(0));
+ Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, Value);
+ }
case WebAssembly::BI__builtin_wasm_swizzle_v8x16: {
Value *Src = EmitScalarExpr(E->getArg(0));
Value *Indices = EmitScalarExpr(E->getArg(1));
@@ -14553,7 +16324,8 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
switch (BuiltinID) {
case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: {
- llvm::Type *ElemType = ConvertType(E->getType())->getVectorElementType();
+ llvm::Type *ElemType =
+ cast<llvm::VectorType>(ConvertType(E->getType()))->getElementType();
Value *Trunc = Builder.CreateTrunc(Val, ElemType);
return Builder.CreateInsertElement(Vec, Trunc, Lane);
}
@@ -14600,6 +16372,56 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
+ case WebAssembly::BI__builtin_wasm_abs_i8x16:
+ case WebAssembly::BI__builtin_wasm_abs_i16x8:
+ case WebAssembly::BI__builtin_wasm_abs_i32x4: {
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ Value *Neg = Builder.CreateNeg(Vec, "neg");
+ Constant *Zero = llvm::Constant::getNullValue(Vec->getType());
+ Value *ICmp = Builder.CreateICmpSLT(Vec, Zero, "abscond");
+ return Builder.CreateSelect(ICmp, Neg, Vec, "abs");
+ }
+ case WebAssembly::BI__builtin_wasm_min_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_min_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_max_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_max_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_min_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_min_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_max_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_max_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_min_s_i32x4:
+ case WebAssembly::BI__builtin_wasm_min_u_i32x4:
+ case WebAssembly::BI__builtin_wasm_max_s_i32x4:
+ case WebAssembly::BI__builtin_wasm_max_u_i32x4: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Value *ICmp;
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_min_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_min_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_min_s_i32x4:
+ ICmp = Builder.CreateICmpSLT(LHS, RHS);
+ break;
+ case WebAssembly::BI__builtin_wasm_min_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_min_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_min_u_i32x4:
+ ICmp = Builder.CreateICmpULT(LHS, RHS);
+ break;
+ case WebAssembly::BI__builtin_wasm_max_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_max_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_max_s_i32x4:
+ ICmp = Builder.CreateICmpSGT(LHS, RHS);
+ break;
+ case WebAssembly::BI__builtin_wasm_max_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_max_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_max_u_i32x4:
+ ICmp = Builder.CreateICmpUGT(LHS, RHS);
+ break;
+ default:
+ llvm_unreachable("unexpected builtin ID");
+ }
+ return Builder.CreateSelect(ICmp, LHS, RHS);
+ }
case WebAssembly::BI__builtin_wasm_avgr_u_i8x16:
case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: {
Value *LHS = EmitScalarExpr(E->getArg(0));
@@ -14651,6 +16473,14 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
return Builder.CreateCall(Callee, {Vec});
}
+ case WebAssembly::BI__builtin_wasm_bitmask_i8x16:
+ case WebAssembly::BI__builtin_wasm_bitmask_i16x8:
+ case WebAssembly::BI__builtin_wasm_bitmask_i32x4: {
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType());
+ return Builder.CreateCall(Callee, {Vec});
+ }
case WebAssembly::BI__builtin_wasm_abs_f32x4:
case WebAssembly::BI__builtin_wasm_abs_f64x2: {
Value *Vec = EmitScalarExpr(E->getArg(0));
@@ -14743,68 +16573,124 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Vec->getType()});
return Builder.CreateCall(Callee, Vec);
}
+ case WebAssembly::BI__builtin_wasm_shuffle_v8x16: {
+ Value *Ops[18];
+ size_t OpIdx = 0;
+ Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
+ Ops[OpIdx++] = EmitScalarExpr(E->getArg(1));
+ while (OpIdx < 18) {
+ llvm::APSInt LaneConst;
+ if (!E->getArg(OpIdx)->isIntegerConstantExpr(LaneConst, getContext()))
+ llvm_unreachable("Constant arg isn't actually constant?");
+ Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
+ }
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
+ return Builder.CreateCall(Callee, Ops);
+ }
default:
return nullptr;
}
}
+static std::pair<Intrinsic::ID, unsigned>
+getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) {
+ struct Info {
+ unsigned BuiltinID;
+ Intrinsic::ID IntrinsicID;
+ unsigned VecLen;
+ };
+ Info Infos[] = {
+#define CUSTOM_BUILTIN_MAPPING(x,s) \
+ { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s },
+ CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128)
+#include "clang/Basic/BuiltinsHexagonMapCustomDep.def"
+#undef CUSTOM_BUILTIN_MAPPING
+ };
+
+ auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; };
+ static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true);
+ (void)SortOnce;
+
+ const Info *F = std::lower_bound(std::begin(Infos), std::end(Infos),
+ Info{BuiltinID, 0, 0}, CmpInfo);
+ if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
+ return {Intrinsic::not_intrinsic, 0};
+
+ return {F->IntrinsicID, F->VecLen};
+}
+
Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
- SmallVector<llvm::Value *, 4> Ops;
- Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ Intrinsic::ID ID;
+ unsigned VecLen;
+ std::tie(ID, VecLen) = getIntrinsicForHexagonNonGCCBuiltin(BuiltinID);
- auto MakeCircLd = [&](unsigned IntID, bool HasImm) {
+ auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
// The base pointer is passed by address, so it needs to be loaded.
- Address BP = EmitPointerWithAlignment(E->getArg(0));
- BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy),
- BP.getAlignment());
+ Address A = EmitPointerWithAlignment(E->getArg(0));
+ Address BP = Address(
+ Builder.CreateBitCast(A.getPointer(), Int8PtrPtrTy), A.getAlignment());
llvm::Value *Base = Builder.CreateLoad(BP);
- // Operands are Base, Increment, Modifier, Start.
- if (HasImm)
- Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)),
- EmitScalarExpr(E->getArg(3)) };
- else
- Ops = { Base, EmitScalarExpr(E->getArg(1)),
- EmitScalarExpr(E->getArg(2)) };
+ // The treatment of both loads and stores is the same: the arguments for
+ // the builtin are the same as the arguments for the intrinsic.
+ // Load:
+ // builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start)
+ // builtin(Base, Mod, Start) -> intr(Base, Mod, Start)
+ // Store:
+ // builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start)
+ // builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start)
+ SmallVector<llvm::Value*,5> Ops = { Base };
+ for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
- llvm::Value *NewBase = Builder.CreateExtractValue(Result, 1);
- llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)),
- NewBase->getType()->getPointerTo());
+ // The load intrinsics generate two results (Value, NewBase), stores
+ // generate one (NewBase). The new base address needs to be stored.
+ llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1)
+ : Result;
+ llvm::Value *LV = Builder.CreateBitCast(
+ EmitScalarExpr(E->getArg(0)), NewBase->getType()->getPointerTo());
Address Dest = EmitPointerWithAlignment(E->getArg(0));
- // The intrinsic generates two results. The new value for the base pointer
- // needs to be stored.
- Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
- return Builder.CreateExtractValue(Result, 0);
- };
-
- auto MakeCircSt = [&](unsigned IntID, bool HasImm) {
- // The base pointer is passed by address, so it needs to be loaded.
- Address BP = EmitPointerWithAlignment(E->getArg(0));
- BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy),
- BP.getAlignment());
- llvm::Value *Base = Builder.CreateLoad(BP);
- // Operands are Base, Increment, Modifier, Value, Start.
- if (HasImm)
- Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)),
- EmitScalarExpr(E->getArg(3)), EmitScalarExpr(E->getArg(4)) };
- else
- Ops = { Base, EmitScalarExpr(E->getArg(1)),
- EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)) };
-
- llvm::Value *NewBase = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
- llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)),
- NewBase->getType()->getPointerTo());
- Address Dest = EmitPointerWithAlignment(E->getArg(0));
- // The intrinsic generates one result, which is the new value for the base
- // pointer. It needs to be stored.
- return Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
+ llvm::Value *RetVal =
+ Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
+ if (IsLoad)
+ RetVal = Builder.CreateExtractValue(Result, 0);
+ return RetVal;
};
// Handle the conversion of bit-reverse load intrinsics to bit code.
// The intrinsic call after this function only reads from memory and the
// write to memory is dealt by the store instruction.
- auto MakeBrevLd = [&](unsigned IntID, llvm::Type *DestTy) {
+ auto MakeBrevLd = [this, E](unsigned IntID, llvm::Type *DestTy) {
// The intrinsic generates one result, which is the new value for the base
// pointer. It needs to be returned. The result of the load instruction is
// passed to intrinsic by address, so the value needs to be stored.
@@ -14822,9 +16708,9 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
// Operands are Base, Dest, Modifier.
// The intrinsic format in LLVM IR is defined as
// { ValueType, i8* } (i8*, i32).
- Ops = {BaseAddress, EmitScalarExpr(E->getArg(2))};
+ llvm::Value *Result = Builder.CreateCall(
+ CGM.getIntrinsic(IntID), {BaseAddress, EmitScalarExpr(E->getArg(2))});
- llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
// The value needs to be stored as the variable is passed by reference.
llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
@@ -14840,95 +16726,65 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
return Builder.CreateExtractValue(Result, 1);
};
+ auto V2Q = [this, VecLen] (llvm::Value *Vec) {
+ Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B
+ : Intrinsic::hexagon_V6_vandvrt;
+ return Builder.CreateCall(CGM.getIntrinsic(ID),
+ {Vec, Builder.getInt32(-1)});
+ };
+ auto Q2V = [this, VecLen] (llvm::Value *Pred) {
+ Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B
+ : Intrinsic::hexagon_V6_vandqrt;
+ return Builder.CreateCall(CGM.getIntrinsic(ID),
+ {Pred, Builder.getInt32(-1)});
+ };
+
switch (BuiltinID) {
+ // These intrinsics return a tuple {Vector, VectorPred} in LLVM IR,
+ // and the corresponding C/C++ builtins use loads/stores to update
+ // the predicate.
case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
- case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B: {
- Address Dest = EmitPointerWithAlignment(E->getArg(2));
- unsigned Size;
- if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vaddcarry) {
- Size = 512;
- ID = Intrinsic::hexagon_V6_vaddcarry;
- } else {
- Size = 1024;
- ID = Intrinsic::hexagon_V6_vaddcarry_128B;
- }
- Dest = Builder.CreateBitCast(Dest,
- llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0));
- LoadInst *QLd = Builder.CreateLoad(Dest);
- Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd };
- llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
- llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1);
- llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)),
- Vprd->getType()->getPointerTo(0));
- Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment());
- return Builder.CreateExtractValue(Result, 0);
- }
+ case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B:
case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
- Address Dest = EmitPointerWithAlignment(E->getArg(2));
- unsigned Size;
- if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vsubcarry) {
- Size = 512;
- ID = Intrinsic::hexagon_V6_vsubcarry;
- } else {
- Size = 1024;
- ID = Intrinsic::hexagon_V6_vsubcarry_128B;
- }
- Dest = Builder.CreateBitCast(Dest,
- llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0));
- LoadInst *QLd = Builder.CreateLoad(Dest);
- Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd };
- llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
- llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1);
- llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)),
- Vprd->getType()->getPointerTo(0));
- Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment());
+ // Get the type from the 0-th argument.
+ llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
+ Address PredAddr = Builder.CreateBitCast(
+ EmitPointerWithAlignment(E->getArg(2)), VecType->getPointerTo(0));
+ llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
+ llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
+ {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
+
+ llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
+ Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
+ PredAddr.getAlignment());
return Builder.CreateExtractValue(Result, 0);
}
+
case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadri_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadri_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pcr, /*HasImm*/false);
+ return MakeCircOp(ID, /*IsLoad=*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
- return MakeCircSt(Intrinsic::hexagon_S2_storerb_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
- return MakeCircSt(Intrinsic::hexagon_S2_storerh_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
- return MakeCircSt(Intrinsic::hexagon_S2_storerf_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
- return MakeCircSt(Intrinsic::hexagon_S2_storeri_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
- return MakeCircSt(Intrinsic::hexagon_S2_storerd_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
- return MakeCircSt(Intrinsic::hexagon_S2_storerb_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
- return MakeCircSt(Intrinsic::hexagon_S2_storerh_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
- return MakeCircSt(Intrinsic::hexagon_S2_storerf_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
- return MakeCircSt(Intrinsic::hexagon_S2_storeri_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
- return MakeCircSt(Intrinsic::hexagon_S2_storerd_pcr, /*HasImm*/false);
+ return MakeCircOp(ID, /*IsLoad=*/false);
case Hexagon::BI__builtin_brev_ldub:
return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
case Hexagon::BI__builtin_brev_ldb:
@@ -14941,8 +16797,40 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
case Hexagon::BI__builtin_brev_ldd:
return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
- default:
- break;
+
+ default: {
+ if (ID == Intrinsic::not_intrinsic)
+ return nullptr;
+
+ auto IsVectorPredTy = [](llvm::Type *T) {
+ return T->isVectorTy() &&
+ cast<llvm::VectorType>(T)->getElementType()->isIntegerTy(1);
+ };
+
+ llvm::Function *IntrFn = CGM.getIntrinsic(ID);
+ llvm::FunctionType *IntrTy = IntrFn->getFunctionType();
+ SmallVector<llvm::Value*,4> Ops;
+ for (unsigned i = 0, e = IntrTy->getNumParams(); i != e; ++i) {
+ llvm::Type *T = IntrTy->getParamType(i);
+ const Expr *A = E->getArg(i);
+ if (IsVectorPredTy(T)) {
+ // There will be an implicit cast to a boolean vector. Strip it.
+ if (auto *Cast = dyn_cast<ImplicitCastExpr>(A)) {
+ if (Cast->getCastKind() == CK_BitCast)
+ A = Cast->getSubExpr();
+ }
+ Ops.push_back(V2Q(EmitScalarExpr(A)));
+ } else {
+ Ops.push_back(EmitScalarExpr(A));
+ }
+ }
+
+ llvm::Value *Call = Builder.CreateCall(IntrFn, Ops);
+ if (IsVectorPredTy(IntrTy->getReturnType()))
+ Call = Q2V(Call);
+
+ return Call;
+ } // default
} // switch
return nullptr;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
index 5c5cbaff0252..baf2c79cc2b6 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
@@ -50,7 +50,7 @@ private:
struct VarInfo {
llvm::GlobalVariable *Var;
const VarDecl *D;
- unsigned Flag;
+ DeviceVarFlags Flags;
};
llvm::SmallVector<VarInfo, 16> DeviceVars;
/// Keeps track of variable containing handle of GPU binary. Populated by
@@ -117,23 +117,38 @@ private:
void emitDeviceStubBodyLegacy(CodeGenFunction &CGF, FunctionArgList &Args);
void emitDeviceStubBodyNew(CodeGenFunction &CGF, FunctionArgList &Args);
- std::string getDeviceSideName(const Decl *ND);
+ std::string getDeviceSideName(const NamedDecl *ND) override;
public:
CGNVCUDARuntime(CodeGenModule &CGM);
void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) override;
void registerDeviceVar(const VarDecl *VD, llvm::GlobalVariable &Var,
- unsigned Flags) override {
- DeviceVars.push_back({&Var, VD, Flags});
+ bool Extern, bool Constant) override {
+ DeviceVars.push_back({&Var,
+ VD,
+ {DeviceVarFlags::Variable, Extern, Constant,
+ /*Normalized*/ false, /*Type*/ 0}});
+ }
+ void registerDeviceSurf(const VarDecl *VD, llvm::GlobalVariable &Var,
+ bool Extern, int Type) override {
+ DeviceVars.push_back({&Var,
+ VD,
+ {DeviceVarFlags::Surface, Extern, /*Constant*/ false,
+ /*Normalized*/ false, Type}});
+ }
+ void registerDeviceTex(const VarDecl *VD, llvm::GlobalVariable &Var,
+ bool Extern, int Type, bool Normalized) override {
+ DeviceVars.push_back({&Var,
+ VD,
+ {DeviceVarFlags::Texture, Extern, /*Constant*/ false,
+ Normalized, Type}});
}
/// Creates module constructor function
llvm::Function *makeModuleCtorFunction() override;
/// Creates module destructor function
llvm::Function *makeModuleDtorFunction() override;
- /// Construct and return the stub name of a kernel.
- std::string getDeviceStubName(llvm::StringRef Name) const override;
};
}
@@ -204,40 +219,30 @@ llvm::FunctionType *CGNVCUDARuntime::getRegisterLinkedBinaryFnTy() const {
return llvm::FunctionType::get(VoidTy, Params, false);
}
-std::string CGNVCUDARuntime::getDeviceSideName(const Decl *D) {
- auto *ND = cast<const NamedDecl>(D);
+std::string CGNVCUDARuntime::getDeviceSideName(const NamedDecl *ND) {
+ GlobalDecl GD;
+ // D could be either a kernel or a variable.
+ if (auto *FD = dyn_cast<FunctionDecl>(ND))
+ GD = GlobalDecl(FD, KernelReferenceKind::Kernel);
+ else
+ GD = GlobalDecl(ND);
std::string DeviceSideName;
if (DeviceMC->shouldMangleDeclName(ND)) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
- DeviceMC->mangleName(ND, Out);
- DeviceSideName = Out.str();
+ DeviceMC->mangleName(GD, Out);
+ DeviceSideName = std::string(Out.str());
} else
- DeviceSideName = ND->getIdentifier()->getName();
+ DeviceSideName = std::string(ND->getIdentifier()->getName());
return DeviceSideName;
}
void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
FunctionArgList &Args) {
- // Ensure either we have different ABIs between host and device compilations,
- // says host compilation following MSVC ABI but device compilation follows
- // Itanium C++ ABI or, if they follow the same ABI, kernel names after
- // mangling should be the same after name stubbing. The later checking is
- // very important as the device kernel name being mangled in host-compilation
- // is used to resolve the device binaries to be executed. Inconsistent naming
- // result in undefined behavior. Even though we cannot check that naming
- // directly between host- and device-compilations, the host- and
- // device-mangling in host compilation could help catching certain ones.
- assert((CGF.CGM.getContext().getAuxTargetInfo() &&
- (CGF.CGM.getContext().getAuxTargetInfo()->getCXXABI() !=
- CGF.CGM.getContext().getTargetInfo().getCXXABI())) ||
- getDeviceStubName(getDeviceSideName(CGF.CurFuncDecl)) ==
- CGF.CurFn->getName());
-
EmittedKernels.push_back({CGF.CurFn, CGF.CurFuncDecl});
if (CudaFeatureEnabled(CGM.getTarget().getSDKVersion(),
CudaFeature::CUDA_USES_NEW_LAUNCH) ||
- CGF.getLangOpts().HIPUseNewLaunchAPI)
+ (CGF.getLangOpts().HIP && CGF.getLangOpts().HIPUseNewLaunchAPI))
emitDeviceStubBodyNew(CGF, Args);
else
emitDeviceStubBodyLegacy(CGF, Args);
@@ -418,7 +423,8 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
// each emitted kernel.
llvm::Argument &GpuBinaryHandlePtr = *RegisterKernelsFunc->arg_begin();
for (auto &&I : EmittedKernels) {
- llvm::Constant *KernelName = makeConstantString(getDeviceSideName(I.D));
+ llvm::Constant *KernelName =
+ makeConstantString(getDeviceSideName(cast<NamedDecl>(I.D)));
llvm::Constant *NullPtr = llvm::ConstantPointerNull::get(VoidPtrTy);
llvm::Value *Args[] = {
&GpuBinaryHandlePtr,
@@ -434,30 +440,70 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
Builder.CreateCall(RegisterFunc, Args);
}
+ llvm::Type *VarSizeTy = IntTy;
+ // For HIP or CUDA 9.0+, device variable size is type of `size_t`.
+ if (CGM.getLangOpts().HIP ||
+ ToCudaVersion(CGM.getTarget().getSDKVersion()) >= CudaVersion::CUDA_90)
+ VarSizeTy = SizeTy;
+
// void __cudaRegisterVar(void **, char *, char *, const char *,
// int, int, int, int)
llvm::Type *RegisterVarParams[] = {VoidPtrPtrTy, CharPtrTy, CharPtrTy,
- CharPtrTy, IntTy, IntTy,
+ CharPtrTy, IntTy, VarSizeTy,
IntTy, IntTy};
llvm::FunctionCallee RegisterVar = CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(IntTy, RegisterVarParams, false),
+ llvm::FunctionType::get(VoidTy, RegisterVarParams, false),
addUnderscoredPrefixToName("RegisterVar"));
+ // void __cudaRegisterSurface(void **, const struct surfaceReference *,
+ // const void **, const char *, int, int);
+ llvm::FunctionCallee RegisterSurf = CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(
+ VoidTy, {VoidPtrPtrTy, VoidPtrTy, CharPtrTy, CharPtrTy, IntTy, IntTy},
+ false),
+ addUnderscoredPrefixToName("RegisterSurface"));
+ // void __cudaRegisterTexture(void **, const struct textureReference *,
+ // const void **, const char *, int, int, int)
+ llvm::FunctionCallee RegisterTex = CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(
+ VoidTy,
+ {VoidPtrPtrTy, VoidPtrTy, CharPtrTy, CharPtrTy, IntTy, IntTy, IntTy},
+ false),
+ addUnderscoredPrefixToName("RegisterTexture"));
for (auto &&Info : DeviceVars) {
llvm::GlobalVariable *Var = Info.Var;
- unsigned Flags = Info.Flag;
llvm::Constant *VarName = makeConstantString(getDeviceSideName(Info.D));
- uint64_t VarSize =
- CGM.getDataLayout().getTypeAllocSize(Var->getValueType());
- llvm::Value *Args[] = {
- &GpuBinaryHandlePtr,
- Builder.CreateBitCast(Var, VoidPtrTy),
- VarName,
- VarName,
- llvm::ConstantInt::get(IntTy, (Flags & ExternDeviceVar) ? 1 : 0),
- llvm::ConstantInt::get(IntTy, VarSize),
- llvm::ConstantInt::get(IntTy, (Flags & ConstantDeviceVar) ? 1 : 0),
- llvm::ConstantInt::get(IntTy, 0)};
- Builder.CreateCall(RegisterVar, Args);
+ switch (Info.Flags.getKind()) {
+ case DeviceVarFlags::Variable: {
+ uint64_t VarSize =
+ CGM.getDataLayout().getTypeAllocSize(Var->getValueType());
+ llvm::Value *Args[] = {
+ &GpuBinaryHandlePtr,
+ Builder.CreateBitCast(Var, VoidPtrTy),
+ VarName,
+ VarName,
+ llvm::ConstantInt::get(IntTy, Info.Flags.isExtern()),
+ llvm::ConstantInt::get(VarSizeTy, VarSize),
+ llvm::ConstantInt::get(IntTy, Info.Flags.isConstant()),
+ llvm::ConstantInt::get(IntTy, 0)};
+ Builder.CreateCall(RegisterVar, Args);
+ break;
+ }
+ case DeviceVarFlags::Surface:
+ Builder.CreateCall(
+ RegisterSurf,
+ {&GpuBinaryHandlePtr, Builder.CreateBitCast(Var, VoidPtrTy), VarName,
+ VarName, llvm::ConstantInt::get(IntTy, Info.Flags.getSurfTexType()),
+ llvm::ConstantInt::get(IntTy, Info.Flags.isExtern())});
+ break;
+ case DeviceVarFlags::Texture:
+ Builder.CreateCall(
+ RegisterTex,
+ {&GpuBinaryHandlePtr, Builder.CreateBitCast(Var, VoidPtrTy), VarName,
+ VarName, llvm::ConstantInt::get(IntTy, Info.Flags.getSurfTexType()),
+ llvm::ConstantInt::get(IntTy, Info.Flags.isNormalized()),
+ llvm::ConstantInt::get(IntTy, Info.Flags.isExtern())});
+ break;
+ }
}
Builder.CreateRetVoid();
@@ -551,8 +597,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
if (CudaGpuBinary) {
// If fatbin is available from early finalization, create a string
// literal containing the fat binary loaded from the given file.
- FatBinStr = makeConstantString(CudaGpuBinary->getBuffer(), "",
- FatbinConstantName, 8);
+ FatBinStr = makeConstantString(std::string(CudaGpuBinary->getBuffer()),
+ "", FatbinConstantName, 8);
} else {
// If fatbin is not available, create an external symbol
// __hip_fatbin in section .hip_fatbin. The external symbol is supposed
@@ -586,7 +632,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// For CUDA, create a string literal containing the fat binary loaded from
// the given file.
- FatBinStr = makeConstantString(CudaGpuBinary->getBuffer(), "",
+ FatBinStr = makeConstantString(std::string(CudaGpuBinary->getBuffer()), "",
FatbinConstantName, 8);
FatMagic = CudaFatMagic;
}
@@ -691,8 +737,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
SmallString<64> ModuleID;
llvm::raw_svector_ostream OS(ModuleID);
OS << ModuleIDPrefix << llvm::format("%" PRIx64, FatbinWrapper->getGUID());
- llvm::Constant *ModuleIDConstant =
- makeConstantString(ModuleID.str(), "", ModuleIDSectionName, 32);
+ llvm::Constant *ModuleIDConstant = makeConstantString(
+ std::string(ModuleID.str()), "", ModuleIDSectionName, 32);
// Create an alias for the FatbinWrapper that nvcc will look for.
llvm::GlobalAlias::create(llvm::GlobalValue::ExternalLinkage,
@@ -797,12 +843,6 @@ llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
return ModuleDtorFunc;
}
-std::string CGNVCUDARuntime::getDeviceStubName(llvm::StringRef Name) const {
- if (!CGM.getLangOpts().HIP)
- return Name;
- return (Name + ".stub").str();
-}
-
CGCUDARuntime *CodeGen::CreateNVCUDARuntime(CodeGenModule &CGM) {
return new CGNVCUDARuntime(CGM);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h
index e548a3a546d4..19e70a2022a5 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h
@@ -25,6 +25,7 @@ class GlobalVariable;
namespace clang {
class CUDAKernelCallExpr;
+class NamedDecl;
class VarDecl;
namespace CodeGen {
@@ -41,9 +42,30 @@ protected:
public:
// Global variable properties that must be passed to CUDA runtime.
- enum DeviceVarFlags {
- ExternDeviceVar = 0x01, // extern
- ConstantDeviceVar = 0x02, // __constant__
+ class DeviceVarFlags {
+ public:
+ enum DeviceVarKind {
+ Variable, // Variable
+ Surface, // Builtin surface
+ Texture, // Builtin texture
+ };
+
+ private:
+ unsigned Kind : 2;
+ unsigned Extern : 1;
+ unsigned Constant : 1; // Constant variable.
+ unsigned Normalized : 1; // Normalized texture.
+ int SurfTexType; // Type of surface/texutre.
+
+ public:
+ DeviceVarFlags(DeviceVarKind K, bool E, bool C, bool N, int T)
+ : Kind(K), Extern(E), Constant(C), Normalized(N), SurfTexType(T) {}
+
+ DeviceVarKind getKind() const { return static_cast<DeviceVarKind>(Kind); }
+ bool isExtern() const { return Extern; }
+ bool isConstant() const { return Constant; }
+ bool isNormalized() const { return Normalized; }
+ int getSurfTexType() const { return SurfTexType; }
};
CGCUDARuntime(CodeGenModule &CGM) : CGM(CGM) {}
@@ -56,7 +78,11 @@ public:
/// Emits a kernel launch stub.
virtual void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) = 0;
virtual void registerDeviceVar(const VarDecl *VD, llvm::GlobalVariable &Var,
- unsigned Flags) = 0;
+ bool Extern, bool Constant) = 0;
+ virtual void registerDeviceSurf(const VarDecl *VD, llvm::GlobalVariable &Var,
+ bool Extern, int Type) = 0;
+ virtual void registerDeviceTex(const VarDecl *VD, llvm::GlobalVariable &Var,
+ bool Extern, int Type, bool Normalized) = 0;
/// Constructs and returns a module initialization function or nullptr if it's
/// not needed. Must be called after all kernels have been emitted.
@@ -66,8 +92,9 @@ public:
/// Must be called after ModuleCtorFunction
virtual llvm::Function *makeModuleDtorFunction() = 0;
- /// Construct and return the stub name of a kernel.
- virtual std::string getDeviceStubName(llvm::StringRef Name) const = 0;
+ /// Returns function or variable name on device side even if the current
+ /// compilation is for host.
+ virtual std::string getDeviceSideName(const NamedDecl *ND) = 0;
};
/// Creates an instance of a CUDA runtime class.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp
index 1928e0df3809..a4bd2c6d5da0 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp
@@ -263,8 +263,8 @@ static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
AddressPoint.AddressPointIndex;
llvm::Value *VFuncPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
- llvm::Value *VFunc =
- CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.PointerAlignInBytes);
+ llvm::Value *VFunc = CGF.Builder.CreateAlignedLoad(
+ VFuncPtr, llvm::Align(CGF.PointerAlignInBytes));
CGCallee Callee(GD, VFunc);
return Callee;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
index 7ada4032b3ee..65327a2435b5 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
@@ -156,6 +156,8 @@ void CGCXXABI::setCXXABIThisValue(CodeGenFunction &CGF, llvm::Value *ThisPtr) {
void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
RValue RV, QualType ResultType) {
+ assert(!CGF.hasAggregateEvaluationKind(ResultType) &&
+ "cannot handle aggregates");
CGF.EmitReturnOfRValue(RV, ResultType);
}
@@ -313,3 +315,20 @@ CatchTypeInfo CGCXXABI::getCatchAllTypeInfo() {
std::vector<CharUnits> CGCXXABI::getVBPtrOffsets(const CXXRecordDecl *RD) {
return std::vector<CharUnits>();
}
+
+CGCXXABI::AddedStructorArgCounts CGCXXABI::addImplicitConstructorArgs(
+ CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
+ bool ForVirtualBase, bool Delegating, CallArgList &Args) {
+ AddedStructorArgs AddedArgs =
+ getImplicitConstructorArgs(CGF, D, Type, ForVirtualBase, Delegating);
+ for (size_t i = 0; i < AddedArgs.Prefix.size(); ++i) {
+ Args.insert(Args.begin() + 1 + i,
+ CallArg(RValue::get(AddedArgs.Prefix[i].Value),
+ AddedArgs.Prefix[i].Type));
+ }
+ for (const auto &arg : AddedArgs.Suffix) {
+ Args.add(RValue::get(arg.Value), arg.Type);
+ }
+ return AddedStructorArgCounts(AddedArgs.Prefix.size(),
+ AddedArgs.Suffix.size());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
index bff49be7a3c4..f5b3fc13bbbd 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
@@ -16,6 +16,7 @@
#include "CodeGenFunction.h"
#include "clang/Basic/LLVM.h"
+#include "clang/CodeGen/CodeGenABITypes.h"
namespace llvm {
class Constant;
@@ -107,6 +108,8 @@ public:
virtual bool hasMostDerivedReturn(GlobalDecl GD) const { return false; }
+ virtual bool useSinitAndSterm() const { return false; }
+
/// Returns true if the target allows calling a function through a pointer
/// with a different signature than the actual function (or equivalently,
/// bitcasting a function or function pointer to a different function type).
@@ -287,24 +290,44 @@ public:
/// Emit constructor variants required by this ABI.
virtual void EmitCXXConstructors(const CXXConstructorDecl *D) = 0;
- /// Notes how many arguments were added to the beginning (Prefix) and ending
- /// (Suffix) of an arg list.
+ /// Additional implicit arguments to add to the beginning (Prefix) and end
+ /// (Suffix) of a constructor / destructor arg list.
///
- /// Note that Prefix actually refers to the number of args *after* the first
- /// one: `this` arguments always come first.
+ /// Note that Prefix should actually be inserted *after* the first existing
+ /// arg; `this` arguments always come first.
struct AddedStructorArgs {
+ struct Arg {
+ llvm::Value *Value;
+ QualType Type;
+ };
+ SmallVector<Arg, 1> Prefix;
+ SmallVector<Arg, 1> Suffix;
+ AddedStructorArgs() = default;
+ AddedStructorArgs(SmallVector<Arg, 1> P, SmallVector<Arg, 1> S)
+ : Prefix(std::move(P)), Suffix(std::move(S)) {}
+ static AddedStructorArgs prefix(SmallVector<Arg, 1> Args) {
+ return {std::move(Args), {}};
+ }
+ static AddedStructorArgs suffix(SmallVector<Arg, 1> Args) {
+ return {{}, std::move(Args)};
+ }
+ };
+
+ /// Similar to AddedStructorArgs, but only notes the number of additional
+ /// arguments.
+ struct AddedStructorArgCounts {
unsigned Prefix = 0;
unsigned Suffix = 0;
- AddedStructorArgs() = default;
- AddedStructorArgs(unsigned P, unsigned S) : Prefix(P), Suffix(S) {}
- static AddedStructorArgs prefix(unsigned N) { return {N, 0}; }
- static AddedStructorArgs suffix(unsigned N) { return {0, N}; }
+ AddedStructorArgCounts() = default;
+ AddedStructorArgCounts(unsigned P, unsigned S) : Prefix(P), Suffix(S) {}
+ static AddedStructorArgCounts prefix(unsigned N) { return {N, 0}; }
+ static AddedStructorArgCounts suffix(unsigned N) { return {0, N}; }
};
/// Build the signature of the given constructor or destructor variant by
/// adding any required parameters. For convenience, ArgTys has been
/// initialized with the type of 'this'.
- virtual AddedStructorArgs
+ virtual AddedStructorArgCounts
buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) = 0;
@@ -365,14 +388,26 @@ public:
/// Emit the ABI-specific prolog for the function.
virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF) = 0;
+ virtual AddedStructorArgs
+ getImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
+ CXXCtorType Type, bool ForVirtualBase,
+ bool Delegating) = 0;
+
/// Add any ABI-specific implicit arguments needed to call a constructor.
///
/// \return The number of arguments added at the beginning and end of the
/// call, which is typically zero or one.
- virtual AddedStructorArgs
+ AddedStructorArgCounts
addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
CXXCtorType Type, bool ForVirtualBase,
- bool Delegating, CallArgList &Args) = 0;
+ bool Delegating, CallArgList &Args);
+
+ /// Get the implicit (second) parameter that comes after the "this" pointer,
+ /// or nullptr if there is isn't one.
+ virtual llvm::Value *
+ getCXXDestructorImplicitParam(CodeGenFunction &CGF,
+ const CXXDestructorDecl *DD, CXXDtorType Type,
+ bool ForVirtualBase, bool Delegating) = 0;
/// Emit the destructor call.
virtual void EmitDestructorCall(CodeGenFunction &CGF,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
index e4803fde230f..e8235c775d8f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
@@ -16,6 +16,7 @@
#include "CGBlocks.h"
#include "CGCXXABI.h"
#include "CGCleanup.h"
+#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "TargetInfo.h"
@@ -325,7 +326,7 @@ CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
if (PassParams)
appendParameterTypes(*this, argTypes, paramInfos, FTP);
- CGCXXABI::AddedStructorArgs AddedArgs =
+ CGCXXABI::AddedStructorArgCounts AddedArgs =
TheCXXABI.buildStructorSignature(GD, argTypes);
if (!paramInfos.empty()) {
// Note: prefix implies after the first param.
@@ -815,6 +816,7 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
FI->ASTCallingConvention = info.getCC();
FI->InstanceMethod = instanceMethod;
FI->ChainCall = chainCall;
+ FI->CmseNSCall = info.getCmseNSCall();
FI->NoReturn = info.getNoReturn();
FI->ReturnsRetained = info.getProducesResult();
FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
@@ -1014,8 +1016,8 @@ static void forConstantArrayExpansion(CodeGenFunction &CGF,
}
}
-void CodeGenFunction::ExpandTypeFromArgs(
- QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
+void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
+ llvm::Function::arg_iterator &AI) {
assert(LV.isSimple() &&
"Unexpected non-simple lvalue during struct expansion.");
@@ -1044,17 +1046,17 @@ void CodeGenFunction::ExpandTypeFromArgs(
ExpandTypeFromArgs(FD->getType(), SubLV, AI);
}
} else if (isa<ComplexExpansion>(Exp.get())) {
- auto realValue = *AI++;
- auto imagValue = *AI++;
+ auto realValue = &*AI++;
+ auto imagValue = &*AI++;
EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
} else {
// Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
// primitive store.
assert(isa<NoExpansion>(Exp.get()));
if (LV.isBitField())
- EmitStoreThroughLValue(RValue::get(*AI++), LV);
+ EmitStoreThroughLValue(RValue::get(&*AI++), LV);
else
- EmitStoreOfScalar(*AI++, LV);
+ EmitStoreOfScalar(&*AI++, LV);
}
}
@@ -1232,7 +1234,7 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
- SrcTy = Src.getType()->getElementType();
+ SrcTy = Src.getElementType();
}
uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
@@ -1260,11 +1262,9 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
// Otherwise do coercion through memory. This is stupid, but simple.
Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
- Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
- Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty);
- CGF.Builder.CreateMemCpy(Casted, SrcCasted,
- llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
- false);
+ CGF.Builder.CreateMemCpy(Tmp.getPointer(), Tmp.getAlignment().getAsAlign(),
+ Src.getPointer(), Src.getAlignment().getAsAlign(),
+ llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize));
return CGF.Builder.CreateLoad(Tmp);
}
@@ -1272,18 +1272,17 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
// store the elements rather than the aggregate to be more friendly to
// fast-isel.
// FIXME: Do we need to recurse here?
-static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
- Address Dest, bool DestIsVolatile) {
+void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
+ bool DestIsVolatile) {
// Prefer scalar stores to first-class aggregate stores.
- if (llvm::StructType *STy =
- dyn_cast<llvm::StructType>(Val->getType())) {
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i);
- llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
- CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
+ Address EltPtr = Builder.CreateStructGEP(Dest, i);
+ llvm::Value *Elt = Builder.CreateExtractValue(Val, i);
+ Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
}
} else {
- CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
+ Builder.CreateStore(Val, Dest, DestIsVolatile);
}
}
@@ -1298,7 +1297,7 @@ static void CreateCoercedStore(llvm::Value *Src,
bool DstIsVolatile,
CodeGenFunction &CGF) {
llvm::Type *SrcTy = Src->getType();
- llvm::Type *DstTy = Dst.getType()->getElementType();
+ llvm::Type *DstTy = Dst.getElementType();
if (SrcTy == DstTy) {
CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
return;
@@ -1308,7 +1307,7 @@ static void CreateCoercedStore(llvm::Value *Src,
if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
- DstTy = Dst.getType()->getElementType();
+ DstTy = Dst.getElementType();
}
llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
@@ -1334,7 +1333,7 @@ static void CreateCoercedStore(llvm::Value *Src,
// If store is legal, just bitcast the src pointer.
if (SrcSize <= DstSize) {
Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
- BuildAggStore(CGF, Src, Dst, DstIsVolatile);
+ CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
} else {
// Otherwise do coercion through memory. This is stupid, but
// simple.
@@ -1347,11 +1346,9 @@ static void CreateCoercedStore(llvm::Value *Src,
// to that information.
Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
CGF.Builder.CreateStore(Src, Tmp);
- Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
- Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty);
- CGF.Builder.CreateMemCpy(DstCasted, Casted,
- llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
- false);
+ CGF.Builder.CreateMemCpy(Dst.getPointer(), Dst.getAlignment().getAsAlign(),
+ Tmp.getPointer(), Tmp.getAlignment().getAsAlign(),
+ llvm::ConstantInt::get(CGF.IntPtrTy, DstSize));
}
}
@@ -1702,8 +1699,9 @@ static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
}
-void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
- bool AttrOnCallSite,
+void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
+ bool HasOptnone,
+ bool AttrOnCallSite,
llvm::AttrBuilder &FuncAttrs) {
// OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
if (!HasOptnone) {
@@ -1746,13 +1744,20 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
if (CodeGenOpts.NullPointerIsValid)
- FuncAttrs.addAttribute("null-pointer-is-valid", "true");
- if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::Invalid)
+ FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
+
+ if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE())
FuncAttrs.addAttribute("denormal-fp-math",
- llvm::denormalModeName(CodeGenOpts.FPDenormalMode));
+ CodeGenOpts.FPDenormalMode.str());
+ if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) {
+ FuncAttrs.addAttribute(
+ "denormal-fp-math-f32",
+ CodeGenOpts.FP32DenormalMode.str());
+ }
FuncAttrs.addAttribute("no-trapping-math",
- llvm::toStringRef(CodeGenOpts.NoTrappingMath));
+ llvm::toStringRef(LangOpts.getFPExceptionMode() ==
+ LangOptions::FPE_Ignore));
// Strict (compliant) code is the default, so only add this attribute to
// indicate that we are trying to workaround a problem case.
@@ -1762,25 +1767,21 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
// TODO: Are these all needed?
// unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
FuncAttrs.addAttribute("no-infs-fp-math",
- llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
+ llvm::toStringRef(LangOpts.NoHonorInfs));
FuncAttrs.addAttribute("no-nans-fp-math",
- llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
+ llvm::toStringRef(LangOpts.NoHonorNaNs));
FuncAttrs.addAttribute("unsafe-fp-math",
- llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
+ llvm::toStringRef(LangOpts.UnsafeFPMath));
FuncAttrs.addAttribute("use-soft-float",
llvm::toStringRef(CodeGenOpts.SoftFloat));
FuncAttrs.addAttribute("stack-protector-buffer-size",
llvm::utostr(CodeGenOpts.SSPBufferSize));
FuncAttrs.addAttribute("no-signed-zeros-fp-math",
- llvm::toStringRef(CodeGenOpts.NoSignedZeros));
+ llvm::toStringRef(LangOpts.NoSignedZero));
FuncAttrs.addAttribute(
"correctly-rounded-divide-sqrt-fp-math",
llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
- if (getLangOpts().OpenCL)
- FuncAttrs.addAttribute("denorms-are-zero",
- llvm::toStringRef(CodeGenOpts.FlushDenorm));
-
// TODO: Reciprocal estimate codegen options should apply to instructions?
const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
if (!Recips.empty())
@@ -1796,6 +1797,8 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
FuncAttrs.addAttribute("stackrealign");
if (CodeGenOpts.Backchain)
FuncAttrs.addAttribute("backchain");
+ if (CodeGenOpts.EnableSegmentedStacks)
+ FuncAttrs.addAttribute("split-stack");
if (CodeGenOpts.SpeculativeLoadHardening)
FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
@@ -1813,10 +1816,6 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
// Exceptions aren't supported in CUDA device code.
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
-
- // Respect -fcuda-flush-denormals-to-zero.
- if (CodeGenOpts.FlushDenorm)
- FuncAttrs.addAttribute("nvptx-f32ftz", "true");
}
for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
@@ -1826,31 +1825,100 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
}
}
-void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
+void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) {
llvm::AttrBuilder FuncAttrs;
- ConstructDefaultFnAttrList(F.getName(), F.hasOptNone(),
- /* AttrOnCallSite = */ false, FuncAttrs);
+ getDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
+ /* AttrOnCallSite = */ false, FuncAttrs);
+ // TODO: call GetCPUAndFeaturesAttributes?
F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
}
+void CodeGenModule::addDefaultFunctionDefinitionAttributes(
+ llvm::AttrBuilder &attrs) {
+ getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false,
+ /*for call*/ false, attrs);
+ GetCPUAndFeaturesAttributes(GlobalDecl(), attrs);
+}
+
+static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs,
+ const LangOptions &LangOpts,
+ const NoBuiltinAttr *NBA = nullptr) {
+ auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
+ SmallString<32> AttributeName;
+ AttributeName += "no-builtin-";
+ AttributeName += BuiltinName;
+ FuncAttrs.addAttribute(AttributeName);
+ };
+
+ // First, handle the language options passed through -fno-builtin.
+ if (LangOpts.NoBuiltin) {
+ // -fno-builtin disables them all.
+ FuncAttrs.addAttribute("no-builtins");
+ return;
+ }
+
+ // Then, add attributes for builtins specified through -fno-builtin-<name>.
+ llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr);
+
+ // Now, let's check the __attribute__((no_builtin("...")) attribute added to
+ // the source.
+ if (!NBA)
+ return;
+
+ // If there is a wildcard in the builtin names specified through the
+ // attribute, disable them all.
+ if (llvm::is_contained(NBA->builtinNames(), "*")) {
+ FuncAttrs.addAttribute("no-builtins");
+ return;
+ }
+
+ // And last, add the rest of the builtin names.
+ llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
+}
+
+/// Construct the IR attribute list of a function or call.
+///
+/// When adding an attribute, please consider where it should be handled:
+///
+/// - getDefaultFunctionAttributes is for attributes that are essentially
+/// part of the global target configuration (but perhaps can be
+/// overridden on a per-function basis). Adding attributes there
+/// will cause them to also be set in frontends that build on Clang's
+/// target-configuration logic, as well as for code defined in library
+/// modules such as CUDA's libdevice.
+///
+/// - ConstructAttributeList builds on top of getDefaultFunctionAttributes
+/// and adds declaration-specific, convention-specific, and
+/// frontend-specific logic. The last is of particular importance:
+/// attributes that restrict how the frontend generates code must be
+/// added here rather than getDefaultFunctionAttributes.
+///
void CodeGenModule::ConstructAttributeList(
StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
llvm::AttrBuilder FuncAttrs;
llvm::AttrBuilder RetAttrs;
+ // Collect function IR attributes from the CC lowering.
+ // We'll collect the paramete and result attributes later.
CallingConv = FI.getEffectiveCallingConvention();
if (FI.isNoReturn())
FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
+ if (FI.isCmseNSCall())
+ FuncAttrs.addAttribute("cmse_nonsecure_call");
- // If we have information about the function prototype, we can learn
- // attributes from there.
+ // Collect function IR attributes from the callee prototype if we have one.
AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
CalleeInfo.getCalleeFunctionProtoType());
const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
bool HasOptnone = false;
+ // The NoBuiltinAttr attached to the target FunctionDecl.
+ const NoBuiltinAttr *NBA = nullptr;
+
+ // Collect function IR attributes based on declaration-specific
+ // information.
// FIXME: handle sseregparm someday...
if (TargetDecl) {
if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
@@ -1869,6 +1937,13 @@ void CodeGenModule::ConstructAttributeList(
if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
AddAttributesFromFunctionProtoType(
getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
+ if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
+ // A sane operator new returns a non-aliasing pointer.
+ auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
+ if (getCodeGenOpts().AssumeSaneOperatorNew &&
+ (Kind == OO_New || Kind == OO_Array_New))
+ RetAttrs.addAttribute(llvm::Attribute::NoAlias);
+ }
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
const bool IsVirtualCall = MD && MD->isVirtual();
// Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
@@ -1876,22 +1951,7 @@ void CodeGenModule::ConstructAttributeList(
if (!(AttrOnCallSite && IsVirtualCall)) {
if (Fn->isNoReturn())
FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
-
- const auto *NBA = Fn->getAttr<NoBuiltinAttr>();
- bool HasWildcard = NBA && llvm::is_contained(NBA->builtinNames(), "*");
- if (getLangOpts().NoBuiltin || HasWildcard)
- FuncAttrs.addAttribute("no-builtins");
- else {
- auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
- SmallString<32> AttributeName;
- AttributeName += "no-builtin-";
- AttributeName += BuiltinName;
- FuncAttrs.addAttribute(AttributeName);
- };
- llvm::for_each(getLangOpts().NoBuiltinFuncs, AddNoBuiltinAttr);
- if (NBA)
- llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
- }
+ NBA = Fn->getAttr<NoBuiltinAttr>();
}
}
@@ -1924,70 +1984,93 @@ void CodeGenModule::ConstructAttributeList(
FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
NumElemsParam);
}
+
+ if (TargetDecl->hasAttr<OpenCLKernelAttr>()) {
+ if (getLangOpts().OpenCLVersion <= 120) {
+ // OpenCL v1.2 Work groups are always uniform
+ FuncAttrs.addAttribute("uniform-work-group-size", "true");
+ } else {
+ // OpenCL v2.0 Work groups may be whether uniform or not.
+ // '-cl-uniform-work-group-size' compile option gets a hint
+ // to the compiler that the global work-size be a multiple of
+ // the work-group size specified to clEnqueueNDRangeKernel
+ // (i.e. work groups are uniform).
+ FuncAttrs.addAttribute("uniform-work-group-size",
+ llvm::toStringRef(CodeGenOpts.UniformWGSize));
+ }
+ }
}
- ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
+ // Attach "no-builtins" attributes to:
+ // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>".
+ // * definitions: "no-builtins" or "no-builtin-<name>" only.
+ // The attributes can come from:
+ // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name>
+ // * FunctionDecl attributes: __attribute__((no_builtin(...)))
+ addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA);
+
+ // Collect function IR attributes based on global settiings.
+ getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
- // This must run after constructing the default function attribute list
- // to ensure that the speculative load hardening attribute is removed
- // in the case where the -mspeculative-load-hardening flag was passed.
+ // Override some default IR attributes based on declaration-specific
+ // information.
if (TargetDecl) {
if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
- }
-
- if (CodeGenOpts.EnableSegmentedStacks &&
- !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
- FuncAttrs.addAttribute("split-stack");
-
- // Add NonLazyBind attribute to function declarations when -fno-plt
- // is used.
- if (TargetDecl && CodeGenOpts.NoPLT) {
- if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
- if (!Fn->isDefined() && !AttrOnCallSite) {
- FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
+ if (TargetDecl->hasAttr<NoSplitStackAttr>())
+ FuncAttrs.removeAttribute("split-stack");
+
+ // Add NonLazyBind attribute to function declarations when -fno-plt
+ // is used.
+ // FIXME: what if we just haven't processed the function definition
+ // yet, or if it's an external definition like C99 inline?
+ if (CodeGenOpts.NoPLT) {
+ if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
+ if (!Fn->isDefined() && !AttrOnCallSite) {
+ FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
+ }
}
}
}
- if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
- if (getLangOpts().OpenCLVersion <= 120) {
- // OpenCL v1.2 Work groups are always uniform
- FuncAttrs.addAttribute("uniform-work-group-size", "true");
- } else {
- // OpenCL v2.0 Work groups may be whether uniform or not.
- // '-cl-uniform-work-group-size' compile option gets a hint
- // to the compiler that the global work-size be a multiple of
- // the work-group size specified to clEnqueueNDRangeKernel
- // (i.e. work groups are uniform).
- FuncAttrs.addAttribute("uniform-work-group-size",
- llvm::toStringRef(CodeGenOpts.UniformWGSize));
- }
- }
-
+ // Collect non-call-site function IR attributes from declaration-specific
+ // information.
if (!AttrOnCallSite) {
- bool DisableTailCalls = false;
+ if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>())
+ FuncAttrs.addAttribute("cmse_nonsecure_entry");
+
+ // Whether tail calls are enabled.
+ auto shouldDisableTailCalls = [&] {
+ // Should this be honored in getDefaultFunctionAttributes?
+ if (CodeGenOpts.DisableTailCalls)
+ return true;
+
+ if (!TargetDecl)
+ return false;
- if (CodeGenOpts.DisableTailCalls)
- DisableTailCalls = true;
- else if (TargetDecl) {
if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
TargetDecl->hasAttr<AnyX86InterruptAttr>())
- DisableTailCalls = true;
- else if (CodeGenOpts.NoEscapingBlockTailCalls) {
+ return true;
+
+ if (CodeGenOpts.NoEscapingBlockTailCalls) {
if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
if (!BD->doesNotEscape())
- DisableTailCalls = true;
+ return true;
}
- }
+ return false;
+ };
FuncAttrs.addAttribute("disable-tail-calls",
- llvm::toStringRef(DisableTailCalls));
+ llvm::toStringRef(shouldDisableTailCalls()));
+
+ // CPU/feature overrides. addDefaultFunctionDefinitionAttributes
+ // handles these separately to set them based on the global defaults.
GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
}
+ // Collect attributes from arguments and return values.
ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
QualType RetTy = FI.getReturnType();
@@ -2024,11 +2107,16 @@ void CodeGenModule::ConstructAttributeList(
if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
QualType PTy = RefTy->getPointeeType();
if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
- RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
- .getQuantity());
- else if (getContext().getTargetAddressSpace(PTy) == 0 &&
- !CodeGenOpts.NullPointerIsValid)
+ RetAttrs.addDereferenceableAttr(
+ getMinimumObjectSize(PTy).getQuantity());
+ if (getContext().getTargetAddressSpace(PTy) == 0 &&
+ !CodeGenOpts.NullPointerIsValid)
RetAttrs.addAttribute(llvm::Attribute::NonNull);
+ if (PTy->isObjectType()) {
+ llvm::Align Alignment =
+ getNaturalPointeeTypeAlignment(RetTy).getAsAlign();
+ RetAttrs.addAlignmentAttr(Alignment);
+ }
}
bool hasUsedSRet = false;
@@ -2041,6 +2129,7 @@ void CodeGenModule::ConstructAttributeList(
hasUsedSRet = true;
if (RetAI.getInReg())
SRETAttrs.addAttribute(llvm::Attribute::InReg);
+ SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity());
ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
}
@@ -2134,11 +2223,16 @@ void CodeGenModule::ConstructAttributeList(
if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
QualType PTy = RefTy->getPointeeType();
if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
- Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
- .getQuantity());
- else if (getContext().getTargetAddressSpace(PTy) == 0 &&
- !CodeGenOpts.NullPointerIsValid)
+ Attrs.addDereferenceableAttr(
+ getMinimumObjectSize(PTy).getQuantity());
+ if (getContext().getTargetAddressSpace(PTy) == 0 &&
+ !CodeGenOpts.NullPointerIsValid)
Attrs.addAttribute(llvm::Attribute::NonNull);
+ if (PTy->isObjectType()) {
+ llvm::Align Alignment =
+ getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
+ Attrs.addAlignmentAttr(Alignment);
+ }
}
switch (FI.getExtParameterInfo(ArgNo).getABI()) {
@@ -2161,8 +2255,7 @@ void CodeGenModule::ConstructAttributeList(
if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
auto info = getContext().getTypeInfoInChars(PTy);
Attrs.addDereferenceableAttr(info.first.getQuantity());
- Attrs.addAttribute(llvm::Attribute::getWithAlignment(
- getLLVMContext(), info.second.getAsAlign()));
+ Attrs.addAlignmentAttr(info.second.getAsAlign());
}
break;
}
@@ -2278,19 +2371,13 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// simplify.
ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
- // Flattened function arguments.
- SmallVector<llvm::Value *, 16> FnArgs;
- FnArgs.reserve(IRFunctionArgs.totalIRArgs());
- for (auto &Arg : Fn->args()) {
- FnArgs.push_back(&Arg);
- }
- assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
+ assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
// If we're using inalloca, all the memory arguments are GEPs off of the last
// parameter, which is a pointer to the complete memory area.
Address ArgStruct = Address::invalid();
if (IRFunctionArgs.hasInallocaArg()) {
- ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
+ ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
FI.getArgStructAlignment());
assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
@@ -2298,7 +2385,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Name the struct return parameter.
if (IRFunctionArgs.hasSRetArg()) {
- auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
+ auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
AI->setName("agg.result");
AI->addAttr(llvm::Attribute::NoAlias);
}
@@ -2340,13 +2427,17 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
auto FieldIndex = ArgI.getInAllocaFieldIndex();
Address V =
Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
+ if (ArgI.getInAllocaIndirect())
+ V = Address(Builder.CreateLoad(V),
+ getContext().getTypeAlignInChars(Ty));
ArgVals.push_back(ParamValue::forIndirect(V));
break;
}
case ABIArgInfo::Indirect: {
assert(NumIRArgs == 1);
- Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
+ Address ParamAddr =
+ Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign());
if (!hasScalarEvaluationKind(Ty)) {
// Aggregates and complex variables are accessed by reference. All we
@@ -2361,10 +2452,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// FIXME: We should have a common utility for generating an aggregate
// copy.
CharUnits Size = getContext().getTypeSizeInChars(Ty);
- auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
- Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
- Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
- Builder.CreateMemCpy(Dst, Src, SizeVal, false);
+ Builder.CreateMemCpy(
+ AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(),
+ ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(),
+ llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()));
V = AlignedTemp;
}
ArgVals.push_back(ParamValue::forIndirect(V));
@@ -2382,16 +2473,18 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
-
- // If we have the trivial case, handle it with no muss and fuss.
- if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
- ArgI.getCoerceToType() == ConvertType(Ty) &&
- ArgI.getDirectOffset() == 0) {
+ auto AI = Fn->getArg(FirstIRArg);
+ llvm::Type *LTy = ConvertType(Arg->getType());
+
+ // Prepare parameter attributes. So far, only attributes for pointer
+ // parameters are prepared. See
+ // http://llvm.org/docs/LangRef.html#paramattrs.
+ if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() &&
+ ArgI.getCoerceToType()->isPointerTy()) {
assert(NumIRArgs == 1);
- llvm::Value *V = FnArgs[FirstIRArg];
- auto AI = cast<llvm::Argument>(V);
if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
+ // Set `nonnull` attribute if any.
if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
PVD->getFunctionScopeIndex()) &&
!CGM.getCodeGenOpts().NullPointerIsValid)
@@ -2411,9 +2504,11 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
ArrSize) {
llvm::AttrBuilder Attrs;
Attrs.addDereferenceableAttr(
- getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
+ getContext().getTypeSizeInChars(ETy).getQuantity() *
+ ArrSize);
AI->addAttrs(Attrs);
- } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
+ } else if (getContext().getTargetInfo().getNullPointerValue(
+ ETy.getAddressSpace()) == 0 &&
!CGM.getCodeGenOpts().NullPointerIsValid) {
AI->addAttr(llvm::Attribute::NonNull);
}
@@ -2429,6 +2524,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
AI->addAttr(llvm::Attribute::NonNull);
}
+ // Set `align` attribute if any.
const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
if (!AVAttr)
if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
@@ -2437,21 +2533,33 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If alignment-assumption sanitizer is enabled, we do *not* add
// alignment attribute here, but emit normal alignment assumption,
// so the UBSAN check could function.
- llvm::Value *AlignmentValue =
- EmitScalarExpr(AVAttr->getAlignment());
llvm::ConstantInt *AlignmentCI =
- cast<llvm::ConstantInt>(AlignmentValue);
- unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
- +llvm::Value::MaximumAlignment);
- AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
+ cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment()));
+ unsigned AlignmentInt =
+ AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
+ if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
+ AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
+ AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(
+ llvm::Align(AlignmentInt)));
+ }
}
}
+ // Set 'noalias' if an argument type has the `restrict` qualifier.
if (Arg->getType().isRestrictQualified())
AI->addAttr(llvm::Attribute::NoAlias);
+ }
+
+ // Prepare the argument value. If we have the trivial case, handle it
+ // with no muss and fuss.
+ if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
+ ArgI.getCoerceToType() == ConvertType(Ty) &&
+ ArgI.getDirectOffset() == 0) {
+ assert(NumIRArgs == 1);
// LLVM expects swifterror parameters to be used in very restricted
// ways. Copy the value into a less-restricted temporary.
+ llvm::Value *V = AI;
if (FI.getExtParameterInfo(ArgNo).getABI()
== ParameterABI::SwiftErrorResult) {
QualType pointeeTy = Ty->getPointeeType();
@@ -2513,7 +2621,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(STy->getNumElements() == NumIRArgs);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- auto AI = FnArgs[FirstIRArg + i];
+ auto AI = Fn->getArg(FirstIRArg + i);
AI->setName(Arg->getName() + ".coerce" + Twine(i));
Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
Builder.CreateStore(AI, EltPtr);
@@ -2526,7 +2634,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
} else {
// Simple case, just do a coerced store of the argument into the alloca.
assert(NumIRArgs == 1);
- auto AI = FnArgs[FirstIRArg];
+ auto AI = Fn->getArg(FirstIRArg);
AI->setName(Arg->getName() + ".coerce");
CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
}
@@ -2559,7 +2667,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
continue;
auto eltAddr = Builder.CreateStructGEP(alloca, i);
- auto elt = FnArgs[argIndex++];
+ auto elt = Fn->getArg(argIndex++);
Builder.CreateStore(elt, eltAddr);
}
assert(argIndex == FirstIRArg + NumIRArgs);
@@ -2574,11 +2682,11 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
LValue LV = MakeAddrLValue(Alloca, Ty);
ArgVals.push_back(ParamValue::forIndirect(Alloca));
- auto FnArgIter = FnArgs.begin() + FirstIRArg;
+ auto FnArgIter = Fn->arg_begin() + FirstIRArg;
ExpandTypeFromArgs(Ty, LV, FnArgIter);
- assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
+ assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
- auto AI = FnArgs[FirstIRArg + i];
+ auto AI = Fn->getArg(FirstIRArg + i);
AI->setName(Arg->getName() + "." + Twine(i));
}
break;
@@ -2655,10 +2763,10 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
bool doRetainAutorelease;
- if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
+ if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) {
doRetainAutorelease = true;
- } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
- .objc_retainAutoreleasedReturnValue) {
+ } else if (call->getCalledOperand() ==
+ CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) {
doRetainAutorelease = false;
// If we emitted an assembly marker for this call (and the
@@ -2674,8 +2782,8 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
assert(prev);
}
assert(isa<llvm::CallInst>(prev));
- assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
- CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
+ assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
+ CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
InstsToKill.push_back(prev);
}
} else {
@@ -2718,8 +2826,8 @@ static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
// Look for a retain call.
llvm::CallInst *retainCall =
dyn_cast<llvm::CallInst>(result->stripPointerCasts());
- if (!retainCall ||
- retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
+ if (!retainCall || retainCall->getCalledOperand() !=
+ CGF.CGM.getObjCEntrypoints().objc_retain)
return nullptr;
// Look for an ordinary load of 'self'.
@@ -2825,6 +2933,199 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
return store;
}
+// Helper functions for EmitCMSEClearRecord
+
+// Set the bits corresponding to a field having width `BitWidth` and located at
+// offset `BitOffset` (from the least significant bit) within a storage unit of
+// `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte.
+// Use little-endian layout, i.e.`Bits[0]` is the LSB.
+static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset,
+ int BitWidth, int CharWidth) {
+ assert(CharWidth <= 64);
+ assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
+
+ int Pos = 0;
+ if (BitOffset >= CharWidth) {
+ Pos += BitOffset / CharWidth;
+ BitOffset = BitOffset % CharWidth;
+ }
+
+ const uint64_t Used = (uint64_t(1) << CharWidth) - 1;
+ if (BitOffset + BitWidth >= CharWidth) {
+ Bits[Pos++] |= (Used << BitOffset) & Used;
+ BitWidth -= CharWidth - BitOffset;
+ BitOffset = 0;
+ }
+
+ while (BitWidth >= CharWidth) {
+ Bits[Pos++] = Used;
+ BitWidth -= CharWidth;
+ }
+
+ if (BitWidth > 0)
+ Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset;
+}
+
+// Set the bits corresponding to a field having width `BitWidth` and located at
+// offset `BitOffset` (from the least significant bit) within a storage unit of
+// `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of
+// `Bits` corresponds to one target byte. Use target endian layout.
+static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset,
+ int StorageSize, int BitOffset, int BitWidth,
+ int CharWidth, bool BigEndian) {
+
+ SmallVector<uint64_t, 8> TmpBits(StorageSize);
+ setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
+
+ if (BigEndian)
+ std::reverse(TmpBits.begin(), TmpBits.end());
+
+ for (uint64_t V : TmpBits)
+ Bits[StorageOffset++] |= V;
+}
+
+static void setUsedBits(CodeGenModule &, QualType, int,
+ SmallVectorImpl<uint64_t> &);
+
+// Set the bits in `Bits`, which correspond to the value representations of
+// the actual members of the record type `RTy`. Note that this function does
+// not handle base classes, virtual tables, etc, since they cannot happen in
+// CMSE function arguments or return. The bit mask corresponds to the target
+// memory layout, i.e. it's endian dependent.
+static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
+ SmallVectorImpl<uint64_t> &Bits) {
+ ASTContext &Context = CGM.getContext();
+ int CharWidth = Context.getCharWidth();
+ const RecordDecl *RD = RTy->getDecl()->getDefinition();
+ const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD);
+ const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD);
+
+ int Idx = 0;
+ for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) {
+ const FieldDecl *F = *I;
+
+ if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) ||
+ F->getType()->isIncompleteArrayType())
+ continue;
+
+ if (F->isBitField()) {
+ const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F);
+ setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(),
+ BFI.StorageSize / CharWidth, BFI.Offset,
+ BFI.Size, CharWidth,
+ CGM.getDataLayout().isBigEndian());
+ continue;
+ }
+
+ setUsedBits(CGM, F->getType(),
+ Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits);
+ }
+}
+
+// Set the bits in `Bits`, which correspond to the value representations of
+// the elements of an array type `ATy`.
+static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy,
+ int Offset, SmallVectorImpl<uint64_t> &Bits) {
+ const ASTContext &Context = CGM.getContext();
+
+ QualType ETy = Context.getBaseElementType(ATy);
+ int Size = Context.getTypeSizeInChars(ETy).getQuantity();
+ SmallVector<uint64_t, 4> TmpBits(Size);
+ setUsedBits(CGM, ETy, 0, TmpBits);
+
+ for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
+ auto Src = TmpBits.begin();
+ auto Dst = Bits.begin() + Offset + I * Size;
+ for (int J = 0; J < Size; ++J)
+ *Dst++ |= *Src++;
+ }
+}
+
+// Set the bits in `Bits`, which correspond to the value representations of
+// the type `QTy`.
+static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset,
+ SmallVectorImpl<uint64_t> &Bits) {
+ if (const auto *RTy = QTy->getAs<RecordType>())
+ return setUsedBits(CGM, RTy, Offset, Bits);
+
+ ASTContext &Context = CGM.getContext();
+ if (const auto *ATy = Context.getAsConstantArrayType(QTy))
+ return setUsedBits(CGM, ATy, Offset, Bits);
+
+ int Size = Context.getTypeSizeInChars(QTy).getQuantity();
+ if (Size <= 0)
+ return;
+
+ std::fill_n(Bits.begin() + Offset, Size,
+ (uint64_t(1) << Context.getCharWidth()) - 1);
+}
+
+static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits,
+ int Pos, int Size, int CharWidth,
+ bool BigEndian) {
+ assert(Size > 0);
+ uint64_t Mask = 0;
+ if (BigEndian) {
+ for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
+ ++P)
+ Mask = (Mask << CharWidth) | *P;
+ } else {
+ auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
+ do
+ Mask = (Mask << CharWidth) | *--P;
+ while (P != End);
+ }
+ return Mask;
+}
+
+// Emit code to clear the bits in a record, which aren't a part of any user
+// declared member, when the record is a function return.
+llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
+ llvm::IntegerType *ITy,
+ QualType QTy) {
+ assert(Src->getType() == ITy);
+ assert(ITy->getScalarSizeInBits() <= 64);
+
+ const llvm::DataLayout &DataLayout = CGM.getDataLayout();
+ int Size = DataLayout.getTypeStoreSize(ITy);
+ SmallVector<uint64_t, 4> Bits(Size);
+ setUsedBits(CGM, QTy->getAs<RecordType>(), 0, Bits);
+
+ int CharWidth = CGM.getContext().getCharWidth();
+ uint64_t Mask =
+ buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian());
+
+ return Builder.CreateAnd(Src, Mask, "cmse.clear");
+}
+
+// Emit code to clear the bits in a record, which aren't a part of any user
+// declared member, when the record is a function argument.
+llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
+ llvm::ArrayType *ATy,
+ QualType QTy) {
+ const llvm::DataLayout &DataLayout = CGM.getDataLayout();
+ int Size = DataLayout.getTypeStoreSize(ATy);
+ SmallVector<uint64_t, 16> Bits(Size);
+ setUsedBits(CGM, QTy->getAs<RecordType>(), 0, Bits);
+
+ // Clear each element of the LLVM array.
+ int CharWidth = CGM.getContext().getCharWidth();
+ int CharsPerElt =
+ ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
+ int MaskIndex = 0;
+ llvm::Value *R = llvm::UndefValue::get(ATy);
+ for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
+ uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth,
+ DataLayout.isBigEndian());
+ MaskIndex += CharsPerElt;
+ llvm::Value *T0 = Builder.CreateExtractValue(Src, I);
+ llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear");
+ R = Builder.CreateInsertValue(R, T1, I);
+ }
+
+ return R;
+}
+
void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
bool EmitRetDbgLoc,
SourceLocation EndLoc) {
@@ -2991,6 +3292,14 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
llvm::Instruction *Ret;
if (RV) {
+ if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) {
+ // For certain return types, clear padding bits, as they may reveal
+ // sensitive information.
+ // Small struct/union types are passed as integers.
+ auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
+ if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType()))
+ RV = EmitCMSEClearRecord(RV, ITy, RetTy);
+ }
EmitReturnValueCheck(RV);
Ret = Builder.CreateRet(RV);
} else {
@@ -3006,6 +3315,11 @@ void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
if (!CurCodeDecl)
return;
+ // If the return block isn't reachable, neither is this check, so don't emit
+ // it.
+ if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty())
+ return;
+
ReturnsNonNullAttr *RetNNAttr = nullptr;
if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
@@ -3026,7 +3340,7 @@ void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
} else {
if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
if (auto *TSI = DD->getTypeSourceInfo())
- if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
+ if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>())
AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
CheckKind = SanitizerKind::NullabilityReturn;
Handler = SanitizerHandler::NullabilityReturn;
@@ -3811,6 +4125,110 @@ void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
DeferredReplacements.push_back(std::make_pair(Old, New));
}
+namespace {
+
+/// Specify given \p NewAlign as the alignment of return value attribute. If
+/// such attribute already exists, re-set it to the maximal one of two options.
+LLVM_NODISCARD llvm::AttributeList
+maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
+ const llvm::AttributeList &Attrs,
+ llvm::Align NewAlign) {
+ llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
+ if (CurAlign >= NewAlign)
+ return Attrs;
+ llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
+ return Attrs
+ .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex,
+ llvm::Attribute::AttrKind::Alignment)
+ .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr);
+}
+
+template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter {
+protected:
+ CodeGenFunction &CGF;
+
+ /// We do nothing if this is, or becomes, nullptr.
+ const AlignedAttrTy *AA = nullptr;
+
+ llvm::Value *Alignment = nullptr; // May or may not be a constant.
+ llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero.
+
+ AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
+ : CGF(CGF_) {
+ if (!FuncDecl)
+ return;
+ AA = FuncDecl->getAttr<AlignedAttrTy>();
+ }
+
+public:
+ /// If we can, materialize the alignment as an attribute on return value.
+ LLVM_NODISCARD llvm::AttributeList
+ TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) {
+ if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment))
+ return Attrs;
+ const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
+ if (!AlignmentCI)
+ return Attrs;
+ // We may legitimately have non-power-of-2 alignment here.
+ // If so, this is UB land, emit it via `@llvm.assume` instead.
+ if (!AlignmentCI->getValue().isPowerOf2())
+ return Attrs;
+ llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
+ CGF.getLLVMContext(), Attrs,
+ llvm::Align(
+ AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
+ AA = nullptr; // We're done. Disallow doing anything else.
+ return NewAttrs;
+ }
+
+ /// Emit alignment assumption.
+ /// This is a general fallback that we take if either there is an offset,
+ /// or the alignment is variable or we are sanitizing for alignment.
+ void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
+ if (!AA)
+ return;
+ CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc,
+ AA->getLocation(), Alignment, OffsetCI);
+ AA = nullptr; // We're done. Disallow doing anything else.
+ }
+};
+
+/// Helper data structure to emit `AssumeAlignedAttr`.
+class AssumeAlignedAttrEmitter final
+ : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
+public:
+ AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
+ : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
+ if (!AA)
+ return;
+ // It is guaranteed that the alignment/offset are constants.
+ Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment()));
+ if (Expr *Offset = AA->getOffset()) {
+ OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset));
+ if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset.
+ OffsetCI = nullptr;
+ }
+ }
+};
+
+/// Helper data structure to emit `AllocAlignAttr`.
+class AllocAlignAttrEmitter final
+ : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
+public:
+ AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl,
+ const CallArgList &CallArgs)
+ : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
+ if (!AA)
+ return;
+ // Alignment may or may not be a constant, and that is okay.
+ Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
+ .getRValue(CGF)
+ .getScalarVal();
+ }
+};
+
+} // namespace
+
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const CGCallee &Callee,
ReturnValueSlot ReturnValue,
@@ -3829,7 +4247,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
// We can only guarantee that a function is called from the correct
// context/function based on the appropriate target attributes,
// so only check in the case where we have both always_inline and target
@@ -3840,6 +4258,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
TargetDecl->hasAttr<TargetAttr>())
checkTargetFeatures(Loc, FD);
+ // Some architectures (such as x86-64) have the ABI changed based on
+ // attribute-target/features. Give them a chance to diagnose.
+ CGM.getTargetCodeGenInfo().checkFunctionCallABI(
+ CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs);
+ }
+
#ifndef NDEBUG
if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
// For an inalloca varargs function, we don't expect CallInfo to match the
@@ -3940,18 +4364,39 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(NumIRArgs == 0);
assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
if (I->isAggregate()) {
- // Replace the placeholder with the appropriate argument slot GEP.
Address Addr = I->hasLValue()
? I->getKnownLValue().getAddress(*this)
: I->getKnownRValue().getAggregateAddress();
llvm::Instruction *Placeholder =
cast<llvm::Instruction>(Addr.getPointer());
- CGBuilderTy::InsertPoint IP = Builder.saveIP();
- Builder.SetInsertPoint(Placeholder);
- Addr =
- Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
- Builder.restoreIP(IP);
+
+ if (!ArgInfo.getInAllocaIndirect()) {
+ // Replace the placeholder with the appropriate argument slot GEP.
+ CGBuilderTy::InsertPoint IP = Builder.saveIP();
+ Builder.SetInsertPoint(Placeholder);
+ Addr = Builder.CreateStructGEP(ArgMemory,
+ ArgInfo.getInAllocaFieldIndex());
+ Builder.restoreIP(IP);
+ } else {
+ // For indirect things such as overaligned structs, replace the
+ // placeholder with a regular aggregate temporary alloca. Store the
+ // address of this alloca into the struct.
+ Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp");
+ Address ArgSlot = Builder.CreateStructGEP(
+ ArgMemory, ArgInfo.getInAllocaFieldIndex());
+ Builder.CreateStore(Addr.getPointer(), ArgSlot);
+ }
deferPlaceholderReplacement(Placeholder, Addr.getPointer());
+ } else if (ArgInfo.getInAllocaIndirect()) {
+ // Make a temporary alloca and store the address of it into the argument
+ // struct.
+ Address Addr = CreateMemTempWithoutCast(
+ I->Ty, getContext().getTypeAlignInChars(I->Ty),
+ "indirect-arg-temp");
+ I->copyInto(*this, Addr);
+ Address ArgSlot =
+ Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
+ Builder.CreateStore(Addr.getPointer(), ArgSlot);
} else {
// Store the RValue into the argument struct.
Address Addr =
@@ -4001,8 +4446,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
bool NeedCopy = false;
if (Addr.getAlignment() < Align &&
- llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
- Align.getQuantity()) {
+ llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
+ Align.getAsAlign()) {
NeedCopy = true;
} else if (I->hasLValue()) {
auto LV = I->getKnownLValue();
@@ -4128,7 +4573,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
- llvm::Type *SrcTy = Src.getType()->getElementType();
+ llvm::Type *SrcTy = Src.getElementType();
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
@@ -4156,8 +4601,18 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
} else {
// In the simple case, just pass the coerced loaded value.
assert(NumIRArgs == 1);
- IRCallArgs[FirstIRArg] =
- CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
+ llvm::Value *Load =
+ CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
+
+ if (CallInfo.isCmseNSCall()) {
+ // For certain parameter types, clear padding bits, as they may reveal
+ // sensitive information.
+ // Small struct/union types are passed as integer arrays.
+ auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
+ if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
+ Load = EmitCMSEClearRecord(Load, ATy, I->Ty);
+ }
+ IRCallArgs[FirstIRArg] = Load;
}
break;
@@ -4328,8 +4783,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Update the largest vector width if any arguments have vector types.
for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
- LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getFixedSize());
+ LargestVectorWidth =
+ std::max((uint64_t)LargestVectorWidth,
+ VT->getPrimitiveSizeInBits().getKnownMinSize());
}
// Compute the calling convention and attributes.
@@ -4346,6 +4802,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::StrictFP);
+ // Add call-site nomerge attribute if exists.
+ if (InNoMergeAttributedStmt)
+ Attrs =
+ Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::NoMerge);
+
// Apply some call-site-specific attributes.
// TODO: work this into building the attribute set.
@@ -4378,8 +4840,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
CannotThrow = true;
} else {
// Otherwise, nounwind call sites will never throw.
- CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoUnwind);
+ CannotThrow = Attrs.hasFnAttribute(llvm::Attribute::NoUnwind);
}
// If we made a temporary, be sure to clean up after ourselves. Note that we
@@ -4402,6 +4863,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::StrictFP);
+ AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl);
+ Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
+
+ AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs);
+ Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
+
// Emit the actual call/invoke instruction.
llvm::CallBase *CI;
if (!InvokeDest) {
@@ -4437,8 +4904,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Update largest vector width from the return type.
if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
- LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getFixedSize());
+ LargestVectorWidth =
+ std::max((uint64_t)LargestVectorWidth,
+ VT->getPrimitiveSizeInBits().getKnownMinSize());
// Insert instrumentation or attach profile metadata at indirect call sites.
// For more details, see the comment before the definition of
@@ -4461,7 +4929,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Add metadata for calls to MSAllocator functions
if (getDebugInfo() && TargetDecl &&
TargetDecl->hasAttr<MSAllocatorAttr>())
- getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy, Loc);
+ getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc);
// 4. Finish the call.
@@ -4581,7 +5049,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
DestPtr = CreateMemTemp(RetTy, "agg.tmp");
DestIsVolatile = false;
}
- BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
+ EmitAggregateStore(CI, DestPtr, DestIsVolatile);
return RValue::getAggregate(DestPtr);
}
case TEK_Scalar: {
@@ -4620,22 +5088,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Emit the assume_aligned check on the return value.
if (Ret.isScalar() && TargetDecl) {
- if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
- llvm::Value *OffsetValue = nullptr;
- if (const auto *Offset = AA->getOffset())
- OffsetValue = EmitScalarExpr(Offset);
-
- llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
- llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
- EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
- AlignmentCI, OffsetValue);
- } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
- llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()]
- .getRValue(*this)
- .getScalarVal();
- EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
- AlignmentVal);
- }
+ AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
+ AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
}
// Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
@@ -4643,6 +5097,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
LifetimeEnd.Emit(*this, /*Flags=*/{});
+ if (!ReturnValue.isExternallyDestructed() &&
+ RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct)
+ pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(),
+ RetTy);
+
return Ret;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.h b/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
index 34558be5adb1..509ca43a9784 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
@@ -16,6 +16,7 @@
#include "CGValue.h"
#include "EHScopeStack.h"
+#include "clang/AST/ASTFwd.h"
#include "clang/AST/CanonicalType.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Type.h"
@@ -357,27 +358,26 @@ class FunctionArgList : public SmallVector<const VarDecl *, 16> {};
/// ReturnValueSlot - Contains the address where the return value of a
/// function can be stored, and whether the address is volatile or not.
class ReturnValueSlot {
- llvm::PointerIntPair<llvm::Value *, 2, unsigned int> Value;
- CharUnits Alignment;
+ Address Addr = Address::invalid();
// Return value slot flags
- enum Flags {
- IS_VOLATILE = 0x1,
- IS_UNUSED = 0x2,
- };
+ unsigned IsVolatile : 1;
+ unsigned IsUnused : 1;
+ unsigned IsExternallyDestructed : 1;
public:
- ReturnValueSlot() {}
- ReturnValueSlot(Address Addr, bool IsVolatile, bool IsUnused = false)
- : Value(Addr.isValid() ? Addr.getPointer() : nullptr,
- (IsVolatile ? IS_VOLATILE : 0) | (IsUnused ? IS_UNUSED : 0)),
- Alignment(Addr.isValid() ? Addr.getAlignment() : CharUnits::Zero()) {}
-
- bool isNull() const { return !getValue().isValid(); }
-
- bool isVolatile() const { return Value.getInt() & IS_VOLATILE; }
- Address getValue() const { return Address(Value.getPointer(), Alignment); }
- bool isUnused() const { return Value.getInt() & IS_UNUSED; }
+ ReturnValueSlot()
+ : IsVolatile(false), IsUnused(false), IsExternallyDestructed(false) {}
+ ReturnValueSlot(Address Addr, bool IsVolatile, bool IsUnused = false,
+ bool IsExternallyDestructed = false)
+ : Addr(Addr), IsVolatile(IsVolatile), IsUnused(IsUnused),
+ IsExternallyDestructed(IsExternallyDestructed) {}
+
+ bool isNull() const { return !Addr.isValid(); }
+ bool isVolatile() const { return IsVolatile; }
+ Address getValue() const { return Addr; }
+ bool isUnused() const { return IsUnused; }
+ bool isExternallyDestructed() const { return IsExternallyDestructed; }
};
} // end namespace CodeGen
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
index 9963926b9557..4d143e3e1bdf 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
@@ -35,20 +35,37 @@ using namespace CodeGen;
/// Return the best known alignment for an unknown pointer to a
/// particular class.
CharUnits CodeGenModule::getClassPointerAlignment(const CXXRecordDecl *RD) {
- if (!RD->isCompleteDefinition())
+ if (!RD->hasDefinition())
return CharUnits::One(); // Hopefully won't be used anywhere.
auto &layout = getContext().getASTRecordLayout(RD);
// If the class is final, then we know that the pointer points to an
// object of that type and can use the full alignment.
- if (RD->hasAttr<FinalAttr>()) {
+ if (RD->isEffectivelyFinal())
return layout.getAlignment();
// Otherwise, we have to assume it could be a subclass.
- } else {
- return layout.getNonVirtualAlignment();
- }
+ return layout.getNonVirtualAlignment();
+}
+
+/// Return the smallest possible amount of storage that might be allocated
+/// starting from the beginning of an object of a particular class.
+///
+/// This may be smaller than sizeof(RD) if RD has virtual base classes.
+CharUnits CodeGenModule::getMinimumClassObjectSize(const CXXRecordDecl *RD) {
+ if (!RD->hasDefinition())
+ return CharUnits::One();
+
+ auto &layout = getContext().getASTRecordLayout(RD);
+
+ // If the class is final, then we know that the pointer points to an
+ // object of that type and can use the full alignment.
+ if (RD->isEffectivelyFinal())
+ return layout.getSize();
+
+ // Otherwise, we have to assume it could be a subclass.
+ return std::max(layout.getNonVirtualSize(), CharUnits::One());
}
/// Return the best known alignment for a pointer to a virtual base,
@@ -138,8 +155,8 @@ CodeGenFunction::EmitCXXMemberDataPointerAddress(const Expr *E, Address base,
memberPtr, memberPtrType);
QualType memberType = memberPtrType->getPointeeType();
- CharUnits memberAlign = getNaturalTypeAlignment(memberType, BaseInfo,
- TBAAInfo);
+ CharUnits memberAlign =
+ CGM.getNaturalTypeAlignment(memberType, BaseInfo, TBAAInfo);
memberAlign =
CGM.getDynamicOffsetAlignment(base.getAlignment(),
memberPtrType->getClass()->getAsCXXRecordDecl(),
@@ -236,8 +253,13 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr,
// Compute the offset from the static and dynamic components.
llvm::Value *baseOffset;
if (!nonVirtualOffset.isZero()) {
- baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy,
- nonVirtualOffset.getQuantity());
+ llvm::Type *OffsetType =
+ (CGF.CGM.getTarget().getCXXABI().isItaniumFamily() &&
+ CGF.CGM.getItaniumVTableContext().isRelativeLayout())
+ ? CGF.Int32Ty
+ : CGF.PtrDiffTy;
+ baseOffset =
+ llvm::ConstantInt::get(OffsetType, nonVirtualOffset.getQuantity());
if (virtualOffset) {
baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset);
}
@@ -730,7 +752,7 @@ bool CodeGenFunction::IsConstructorDelegationValid(
// parameters
// - etc.
// If we ever add any of the above cases, remember that:
- // - function-try-blocks will always blacklist this optimization
+ // - function-try-blocks will always exclude this optimization
// - we need to perform the constructor prologue and cleanup in
// EmitConstructorBody.
@@ -2128,7 +2150,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
QualType SrcTy = D->getParamDecl(0)->getType().getNonReferenceType();
Address Src(Args[1].getRValue(*this).getScalarVal(),
- getNaturalTypeAlignment(SrcTy));
+ CGM.getNaturalTypeAlignment(SrcTy));
LValue SrcLVal = MakeAddrLValue(Src, SrcTy);
QualType DestTy = getContext().getTypeDeclType(ClassDecl);
LValue DestLVal = MakeAddrLValue(This, DestTy);
@@ -2148,7 +2170,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
}
// Insert any ABI-specific implicit constructor arguments.
- CGCXXABI::AddedStructorArgs ExtraArgs =
+ CGCXXABI::AddedStructorArgCounts ExtraArgs =
CGM.getCXXABI().addImplicitConstructorArgs(*this, D, Type, ForVirtualBase,
Delegating, Args);
@@ -2641,7 +2663,9 @@ void CodeGenFunction::EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
if (SanOpts.has(SanitizerKind::CFIVCall))
EmitVTablePtrCheckForCall(RD, VTable, CodeGenFunction::CFITCK_VCall, Loc);
else if (CGM.getCodeGenOpts().WholeProgramVTables &&
- CGM.HasHiddenLTOVisibility(RD)) {
+ // Don't insert type test assumes if we are forcing public std
+ // visibility.
+ !CGM.HasLTOVisibilityPublicStd(RD)) {
llvm::Metadata *MD =
CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
llvm::Value *TypeId =
@@ -2850,7 +2874,9 @@ void CodeGenFunction::EmitForwardingCallToLambda(
if (!resultType->isVoidType() &&
calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
!hasScalarEvaluationKind(calleeFnInfo.getReturnType()))
- returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified());
+ returnSlot =
+ ReturnValueSlot(ReturnValue, resultType.isVolatileQualified(),
+ /*IsUnused=*/false, /*IsExternallyDestructed=*/true);
// We don't need to separately arrange the call arguments because
// the call can't be variadic anyway --- it's impossible to forward
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp
index c117dd5c25c1..ad543ef86c1a 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp
@@ -179,12 +179,10 @@ void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size));
bool IsNormalCleanup = Kind & NormalCleanup;
bool IsEHCleanup = Kind & EHCleanup;
- bool IsActive = !(Kind & InactiveCleanup);
bool IsLifetimeMarker = Kind & LifetimeMarker;
EHCleanupScope *Scope =
new (Buffer) EHCleanupScope(IsNormalCleanup,
IsEHCleanup,
- IsActive,
Size,
BranchFixups.size(),
InnermostNormalCleanup,
@@ -309,9 +307,9 @@ static void createStoreInstBefore(llvm::Value *value, Address addr,
static llvm::LoadInst *createLoadInstBefore(Address addr, const Twine &name,
llvm::Instruction *beforeInst) {
- auto load = new llvm::LoadInst(addr.getPointer(), name, beforeInst);
- load->setAlignment(addr.getAlignment().getAsAlign());
- return load;
+ return new llvm::LoadInst(addr.getElementType(), addr.getPointer(), name,
+ false, addr.getAlignment().getAsAlign(),
+ beforeInst);
}
/// All the branch fixups on the EH stack have propagated out past the
@@ -859,6 +857,9 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// TODO: base this on the number of branch-afters and fixups
const unsigned SwitchCapacity = 10;
+ // pass the abnormal exit flag to Fn (SEH cleanup)
+ cleanupFlags.setHasExitSwitch();
+
llvm::LoadInst *Load =
createLoadInstBefore(getNormalCleanupDestSlot(), "cleanup.dest",
nullptr);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
index ffe0f9d9dd20..ef4f6b9ec133 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
@@ -102,7 +102,7 @@ protected:
};
public:
- enum Kind { Cleanup, Catch, Terminate, Filter, PadEnd };
+ enum Kind { Cleanup, Catch, Terminate, Filter };
EHScope(Kind kind, EHScopeStack::stable_iterator enclosingEHScope)
: CachedLandingPad(nullptr), CachedEHDispatchBlock(nullptr),
@@ -284,8 +284,8 @@ public:
return sizeof(EHCleanupScope) + CleanupBits.CleanupSize;
}
- EHCleanupScope(bool isNormal, bool isEH, bool isActive,
- unsigned cleanupSize, unsigned fixupDepth,
+ EHCleanupScope(bool isNormal, bool isEH, unsigned cleanupSize,
+ unsigned fixupDepth,
EHScopeStack::stable_iterator enclosingNormal,
EHScopeStack::stable_iterator enclosingEH)
: EHScope(EHScope::Cleanup, enclosingEH),
@@ -293,7 +293,7 @@ public:
ActiveFlag(nullptr), ExtInfo(nullptr), FixupDepth(fixupDepth) {
CleanupBits.IsNormalCleanup = isNormal;
CleanupBits.IsEHCleanup = isEH;
- CleanupBits.IsActive = isActive;
+ CleanupBits.IsActive = true;
CleanupBits.IsLifetimeMarker = false;
CleanupBits.TestFlagInNormalCleanup = false;
CleanupBits.TestFlagInEHCleanup = false;
@@ -487,17 +487,6 @@ public:
}
};
-class EHPadEndScope : public EHScope {
-public:
- EHPadEndScope(EHScopeStack::stable_iterator enclosingEHScope)
- : EHScope(PadEnd, enclosingEHScope) {}
- static size_t getSize() { return sizeof(EHPadEndScope); }
-
- static bool classof(const EHScope *scope) {
- return scope->getKind() == PadEnd;
- }
-};
-
/// A non-stable pointer into the scope stack.
class EHScopeStack::iterator {
char *Ptr;
@@ -535,10 +524,6 @@ public:
case EHScope::Terminate:
Size = EHTerminateScope::getSize();
break;
-
- case EHScope::PadEnd:
- Size = EHPadEndScope::getSize();
- break;
}
Ptr += llvm::alignTo(Size, ScopeStackAlignment);
return *this;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
index aee5a927a055..5c57ad0685d5 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
@@ -275,9 +275,9 @@ RValue CodeGenFunction::EmitCoyieldExpr(const CoyieldExpr &E,
void CodeGenFunction::EmitCoreturnStmt(CoreturnStmt const &S) {
++CurCoro.Data->CoreturnCount;
const Expr *RV = S.getOperand();
- if (RV && RV->getType()->isVoidType()) {
- // Make sure to evaluate the expression of a co_return with a void
- // expression for side effects.
+ if (RV && RV->getType()->isVoidType() && !isa<InitListExpr>(RV)) {
+ // Make sure to evaluate the non initlist expression of a co_return
+ // with a void expression for side effects.
RunCleanupsScope cleanupScope(*this);
EmitIgnoredExpr(RV);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
index cbd524eda9d0..6965c4a1209c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -231,9 +231,16 @@ PrintingPolicy CGDebugInfo::getPrintingPolicy() const {
// If we're emitting codeview, it's important to try to match MSVC's naming so
// that visualizers written for MSVC will trigger for our class names. In
// particular, we can't have spaces between arguments of standard templates
- // like basic_string and vector.
- if (CGM.getCodeGenOpts().EmitCodeView)
+ // like basic_string and vector, but we must have spaces between consecutive
+ // angle brackets that close nested template argument lists.
+ if (CGM.getCodeGenOpts().EmitCodeView) {
PP.MSVCFormatting = true;
+ PP.SplitTemplateClosers = true;
+ } else {
+ // For DWARF, printing rules are underspecified.
+ // SplitTemplateClosers yields better interop with GCC and GDB (PR46052).
+ PP.SplitTemplateClosers = true;
+ }
// Apply -fdebug-prefix-map.
PP.Callbacks = &PrintCB;
@@ -470,10 +477,14 @@ CGDebugInfo::createFile(StringRef FileName,
}
std::string CGDebugInfo::remapDIPath(StringRef Path) const {
+ if (DebugPrefixMap.empty())
+ return Path.str();
+
+ SmallString<256> P = Path;
for (const auto &Entry : DebugPrefixMap)
- if (Path.startswith(Entry.first))
- return (Twine(Entry.second) + Path.substr(Entry.first.size())).str();
- return Path.str();
+ if (llvm::sys::path::replace_path_prefix(P, Entry.first, Entry.second))
+ break;
+ return P.str().str();
}
unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
@@ -532,11 +543,12 @@ void CGDebugInfo::CreateCompileUnit() {
// file to determine the real absolute path for the file.
std::string MainFileDir;
if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
- MainFileDir = MainFile->getDir()->getName();
+ MainFileDir = std::string(MainFile->getDir()->getName());
if (!llvm::sys::path::is_absolute(MainFileName)) {
llvm::SmallString<1024> MainFileDirSS(MainFileDir);
llvm::sys::path::append(MainFileDirSS, MainFileName);
- MainFileName = llvm::sys::path::remove_leading_dotslash(MainFileDirSS);
+ MainFileName =
+ std::string(llvm::sys::path::remove_leading_dotslash(MainFileDirSS));
}
// If the main file name provided is identical to the input file name, and
// if the input file is a preprocessed source, use the module name for
@@ -610,6 +622,16 @@ void CGDebugInfo::CreateCompileUnit() {
remapDIPath(MainFileName), remapDIPath(getCurrentDirname()), CSInfo,
getSource(SM, SM.getMainFileID()));
+ StringRef Sysroot, SDK;
+ if (CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::LLDB) {
+ Sysroot = CGM.getHeaderSearchOpts().Sysroot;
+ auto B = llvm::sys::path::rbegin(Sysroot);
+ auto E = llvm::sys::path::rend(Sysroot);
+ auto It = std::find_if(B, E, [](auto SDK) { return SDK.endswith(".sdk"); });
+ if (It != E)
+ SDK = *It;
+ }
+
// Create new compile unit.
TheCU = DBuilder.createCompileUnit(
LangTag, CUFile, CGOpts.EmitVersionIdentMetadata ? Producer : "",
@@ -620,7 +642,7 @@ void CGDebugInfo::CreateCompileUnit() {
? llvm::DICompileUnit::DebugNameTableKind::None
: static_cast<llvm::DICompileUnit::DebugNameTableKind>(
CGOpts.DebugNameTable),
- CGOpts.DebugRangesBaseAddress);
+ CGOpts.DebugRangesBaseAddress, remapDIPath(Sysroot), SDK);
}
llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
@@ -750,6 +772,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
case BuiltinType::Float:
case BuiltinType::LongDouble:
case BuiltinType::Float16:
+ case BuiltinType::BFloat16:
case BuiltinType::Float128:
case BuiltinType::Double:
// FIXME: For targets where long double and __float128 have the same size,
@@ -811,6 +834,21 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
return DBuilder.createBasicType(BTName, Size, Encoding);
}
+llvm::DIType *CGDebugInfo::CreateType(const AutoType *Ty) {
+ return DBuilder.createUnspecifiedType("auto");
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const ExtIntType *Ty) {
+
+ StringRef Name = Ty->isUnsigned() ? "unsigned _ExtInt" : "_ExtInt";
+ llvm::dwarf::TypeKind Encoding = Ty->isUnsigned()
+ ? llvm::dwarf::DW_ATE_unsigned
+ : llvm::dwarf::DW_ATE_signed;
+
+ return DBuilder.createBasicType(Name, CGM.getContext().getTypeSize(Ty),
+ Encoding);
+}
+
llvm::DIType *CGDebugInfo::CreateType(const ComplexType *Ty) {
// Bit size and offset of the type.
llvm::dwarf::TypeKind Encoding = llvm::dwarf::DW_ATE_complex_float;
@@ -976,11 +1014,21 @@ CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty,
uint64_t Size = 0;
uint32_t Align = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagFwdDecl;
+
+ // Add flag to nontrivial forward declarations. To be consistent with MSVC,
+ // add the flag if a record has no definition because we don't know whether
+ // it will be trivial or not.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ if (!CXXRD->hasDefinition() ||
+ (CXXRD->hasDefinition() && !CXXRD->isTrivial()))
+ Flags |= llvm::DINode::FlagNonTrivial;
+
// Create the type.
SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
llvm::DICompositeType *RetTy = DBuilder.createReplaceableCompositeType(
- getTagForRecord(RD), RDName, Ctx, DefUnit, Line, 0, Size, Align,
- llvm::DINode::FlagFwdDecl, Identifier);
+ getTagForRecord(RD), RDName, Ctx, DefUnit, Line, 0, Size, Align, Flags,
+ Identifier);
if (CGM.getCodeGenOpts().DebugFwdTemplateParams)
if (auto *TSpecial = dyn_cast<ClassTemplateSpecializationDecl>(RD))
DBuilder.replaceArrays(RetTy, llvm::DINodeArray(),
@@ -1458,16 +1506,18 @@ void CGDebugInfo::CollectRecordFields(
llvm::DISubroutineType *
CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
- llvm::DIFile *Unit) {
+ llvm::DIFile *Unit, bool decl) {
const FunctionProtoType *Func = Method->getType()->getAs<FunctionProtoType>();
if (Method->isStatic())
return cast_or_null<llvm::DISubroutineType>(
getOrCreateType(QualType(Func, 0), Unit));
- return getOrCreateInstanceMethodType(Method->getThisType(), Func, Unit);
+ return getOrCreateInstanceMethodType(Method->getThisType(), Func, Unit, decl);
}
-llvm::DISubroutineType *CGDebugInfo::getOrCreateInstanceMethodType(
- QualType ThisPtr, const FunctionProtoType *Func, llvm::DIFile *Unit) {
+llvm::DISubroutineType *
+CGDebugInfo::getOrCreateInstanceMethodType(QualType ThisPtr,
+ const FunctionProtoType *Func,
+ llvm::DIFile *Unit, bool decl) {
// Add "this" pointer.
llvm::DITypeRefArray Args(
cast<llvm::DISubroutineType>(getOrCreateType(QualType(Func, 0), Unit))
@@ -1475,9 +1525,12 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateInstanceMethodType(
assert(Args.size() && "Invalid number of arguments!");
SmallVector<llvm::Metadata *, 16> Elts;
-
// First element is always return type. For 'void' functions it is NULL.
- Elts.push_back(Args[0]);
+ QualType temp = Func->getReturnType();
+ if (temp->getTypeClass() == Type::Auto && decl)
+ Elts.push_back(CreateType(cast<AutoType>(temp)));
+ else
+ Elts.push_back(Args[0]);
// "this" pointer is always first argument.
const CXXRecordDecl *RD = ThisPtr->getPointeeCXXRecordDecl();
@@ -1536,7 +1589,7 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method);
StringRef MethodName = getFunctionName(Method);
- llvm::DISubroutineType *MethodTy = getOrCreateMethodType(Method, Unit);
+ llvm::DISubroutineType *MethodTy = getOrCreateMethodType(Method, Unit, true);
// Since a single ctor/dtor corresponds to multiple functions, it doesn't
// make sense to give a single ctor/dtor a linkage name.
@@ -1773,18 +1826,38 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
for (unsigned i = 0, e = TAList.size(); i != e; ++i) {
const TemplateArgument &TA = TAList[i];
StringRef Name;
+ bool defaultParameter = false;
if (TPList)
Name = TPList->getParam(i)->getName();
switch (TA.getKind()) {
case TemplateArgument::Type: {
llvm::DIType *TTy = getOrCreateType(TA.getAsType(), Unit);
- TemplateParams.push_back(
- DBuilder.createTemplateTypeParameter(TheCU, Name, TTy));
+
+ if (TPList)
+ if (auto *templateType =
+ dyn_cast_or_null<TemplateTypeParmDecl>(TPList->getParam(i)))
+ if (templateType->hasDefaultArgument())
+ defaultParameter =
+ templateType->getDefaultArgument() == TA.getAsType();
+
+ TemplateParams.push_back(DBuilder.createTemplateTypeParameter(
+ TheCU, Name, TTy, defaultParameter));
+
} break;
case TemplateArgument::Integral: {
llvm::DIType *TTy = getOrCreateType(TA.getIntegralType(), Unit);
+ if (TPList && CGM.getCodeGenOpts().DwarfVersion >= 5)
+ if (auto *templateType =
+ dyn_cast_or_null<NonTypeTemplateParmDecl>(TPList->getParam(i)))
+ if (templateType->hasDefaultArgument() &&
+ !templateType->getDefaultArgument()->isValueDependent())
+ defaultParameter = llvm::APSInt::isSameValue(
+ templateType->getDefaultArgument()->EvaluateKnownConstInt(
+ CGM.getContext()),
+ TA.getAsIntegral());
+
TemplateParams.push_back(DBuilder.createTemplateValueParameter(
- TheCU, Name, TTy,
+ TheCU, Name, TTy, defaultParameter,
llvm::ConstantInt::get(CGM.getLLVMContext(), TA.getAsIntegral())));
} break;
case TemplateArgument::Declaration: {
@@ -1818,12 +1891,14 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
CharUnits chars =
CGM.getContext().toCharUnitsFromBits((int64_t)fieldOffset);
V = CGM.getCXXABI().EmitMemberDataPointer(MPT, chars);
+ } else if (const auto *GD = dyn_cast<MSGuidDecl>(D)) {
+ V = CGM.GetAddrOfMSGuidDecl(GD).getPointer();
}
assert(V && "Failed to find template parameter pointer");
V = V->stripPointerCasts();
}
TemplateParams.push_back(DBuilder.createTemplateValueParameter(
- TheCU, Name, TTy, cast_or_null<llvm::Constant>(V)));
+ TheCU, Name, TTy, defaultParameter, cast_or_null<llvm::Constant>(V)));
} break;
case TemplateArgument::NullPtr: {
QualType T = TA.getNullPtrType();
@@ -1841,8 +1916,8 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
V = CGM.getCXXABI().EmitNullMemberPointer(MPT);
if (!V)
V = llvm::ConstantInt::get(CGM.Int8Ty, 0);
- TemplateParams.push_back(
- DBuilder.createTemplateValueParameter(TheCU, Name, TTy, V));
+ TemplateParams.push_back(DBuilder.createTemplateValueParameter(
+ TheCU, Name, TTy, defaultParameter, V));
} break;
case TemplateArgument::Template:
TemplateParams.push_back(DBuilder.createTemplateTemplateParameter(
@@ -1863,7 +1938,7 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
assert(V && "Expression in template argument isn't constant");
llvm::DIType *TTy = getOrCreateType(T, Unit);
TemplateParams.push_back(DBuilder.createTemplateValueParameter(
- TheCU, Name, TTy, V->stripPointerCasts()));
+ TheCU, Name, TTy, defaultParameter, V->stripPointerCasts()));
} break;
// And the following should never occur:
case TemplateArgument::TemplateExpansion:
@@ -2071,16 +2146,17 @@ llvm::DIType *CGDebugInfo::getOrCreateStandaloneType(QualType D,
return T;
}
-void CGDebugInfo::addHeapAllocSiteMetadata(llvm::Instruction *CI,
- QualType D,
+void CGDebugInfo::addHeapAllocSiteMetadata(llvm::CallBase *CI,
+ QualType AllocatedTy,
SourceLocation Loc) {
+ if (CGM.getCodeGenOpts().getDebugInfo() <=
+ codegenoptions::DebugLineTablesOnly)
+ return;
llvm::MDNode *node;
- if (D.getTypePtr()->isVoidPointerType()) {
+ if (AllocatedTy->isVoidType())
node = llvm::MDNode::get(CGM.getLLVMContext(), None);
- } else {
- QualType PointeeTy = D.getTypePtr()->getPointeeType();
- node = getOrCreateType(PointeeTy, getOrCreateFile(Loc));
- }
+ else
+ node = getOrCreateType(AllocatedTy, getOrCreateFile(Loc));
CI->setMetadata("heapallocsite", node);
}
@@ -2221,12 +2297,11 @@ static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
// constructor is emitted. Skip this optimization if the class or any of
// its methods are marked dllimport.
if (DebugKind == codegenoptions::DebugInfoConstructor &&
- !CXXDecl->isLambda() && !isClassOrMethodDLLImport(CXXDecl)) {
- for (const auto *Ctor : CXXDecl->ctors()) {
+ !CXXDecl->isLambda() && !CXXDecl->hasConstexprNonCopyMoveConstructor() &&
+ !isClassOrMethodDLLImport(CXXDecl))
+ for (const auto *Ctor : CXXDecl->ctors())
if (Ctor->isUserProvided())
return true;
- }
- }
TemplateSpecializationKind Spec = TSK_Undeclared;
if (const auto *SD = dyn_cast<ClassTemplateSpecializationDecl>(RD))
@@ -2399,9 +2474,8 @@ llvm::DIType *CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
return CreateTypeDefinition(Ty, Unit);
}
-llvm::DIModule *
-CGDebugInfo::getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
- bool CreateSkeletonCU) {
+llvm::DIModule *CGDebugInfo::getOrCreateModuleRef(ASTSourceDescriptor Mod,
+ bool CreateSkeletonCU) {
// Use the Module pointer as the key into the cache. This is a
// nullptr if the "Module" is a PCH, which is safe because we don't
// support chained PCH debug info, so there can only be a single PCH.
@@ -2446,32 +2520,51 @@ CGDebugInfo::getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
assert(StringRef(M->Name).startswith(CGM.getLangOpts().ModuleName) &&
"clang module without ASTFile must be specified by -fmodule-name");
+ // Return a StringRef to the remapped Path.
+ auto RemapPath = [this](StringRef Path) -> std::string {
+ std::string Remapped = remapDIPath(Path);
+ StringRef Relative(Remapped);
+ StringRef CompDir = TheCU->getDirectory();
+ if (Relative.consume_front(CompDir))
+ Relative.consume_front(llvm::sys::path::get_separator());
+
+ return Relative.str();
+ };
+
if (CreateSkeletonCU && IsRootModule && !Mod.getASTFile().empty()) {
// PCH files don't have a signature field in the control block,
// but LLVM detects skeleton CUs by looking for a non-zero DWO id.
// We use the lower 64 bits for debug info.
- uint64_t Signature =
- Mod.getSignature()
- ? (uint64_t)Mod.getSignature()[1] << 32 | Mod.getSignature()[0]
- : ~1ULL;
+
+ uint64_t Signature = 0;
+ if (const auto &ModSig = Mod.getSignature()) {
+ for (unsigned I = 0; I != sizeof(Signature); ++I)
+ Signature |= (uint64_t)ModSig[I] << (I * 8);
+ } else {
+ Signature = ~1ULL;
+ }
llvm::DIBuilder DIB(CGM.getModule());
- DIB.createCompileUnit(TheCU->getSourceLanguage(),
- // TODO: Support "Source" from external AST providers?
- DIB.createFile(Mod.getModuleName(), Mod.getPath()),
- TheCU->getProducer(), true, StringRef(), 0,
- Mod.getASTFile(), llvm::DICompileUnit::FullDebug,
- Signature);
+ SmallString<0> PCM;
+ if (!llvm::sys::path::is_absolute(Mod.getASTFile()))
+ PCM = Mod.getPath();
+ llvm::sys::path::append(PCM, Mod.getASTFile());
+ DIB.createCompileUnit(
+ TheCU->getSourceLanguage(),
+ // TODO: Support "Source" from external AST providers?
+ DIB.createFile(Mod.getModuleName(), TheCU->getDirectory()),
+ TheCU->getProducer(), false, StringRef(), 0, RemapPath(PCM),
+ llvm::DICompileUnit::FullDebug, Signature);
DIB.finalize();
}
llvm::DIModule *Parent =
IsRootModule ? nullptr
- : getOrCreateModuleRef(
- ExternalASTSource::ASTSourceDescriptor(*M->Parent),
- CreateSkeletonCU);
+ : getOrCreateModuleRef(ASTSourceDescriptor(*M->Parent),
+ CreateSkeletonCU);
+ std::string IncludePath = Mod.getPath().str();
llvm::DIModule *DIMod =
DBuilder.createModule(Parent, Mod.getModuleName(), ConfigMacros,
- Mod.getPath(), CGM.getHeaderSearchOpts().Sysroot);
+ RemapPath(IncludePath));
ModuleCache[M].reset(DIMod);
return DIMod;
}
@@ -2649,9 +2742,17 @@ llvm::DIType *CGDebugInfo::CreateType(const VectorType *Ty,
QualType QTy(Ty, 0);
auto SizeExpr = SizeExprCache.find(QTy);
if (SizeExpr != SizeExprCache.end())
- Subscript = DBuilder.getOrCreateSubrange(0, SizeExpr->getSecond());
- else
- Subscript = DBuilder.getOrCreateSubrange(0, Count ? Count : -1);
+ Subscript = DBuilder.getOrCreateSubrange(
+ SizeExpr->getSecond() /*count*/, nullptr /*lowerBound*/,
+ nullptr /*upperBound*/, nullptr /*stride*/);
+ else {
+ auto *CountNode =
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
+ llvm::Type::getInt64Ty(CGM.getLLVMContext()), Count ? Count : -1));
+ Subscript = DBuilder.getOrCreateSubrange(
+ CountNode /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/,
+ nullptr /*stride*/);
+ }
llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscript);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
@@ -2660,6 +2761,33 @@ llvm::DIType *CGDebugInfo::CreateType(const VectorType *Ty,
return DBuilder.createVectorType(Size, Align, ElementTy, SubscriptArray);
}
+llvm::DIType *CGDebugInfo::CreateType(const ConstantMatrixType *Ty,
+ llvm::DIFile *Unit) {
+ // FIXME: Create another debug type for matrices
+ // For the time being, it treats it like a nested ArrayType.
+
+ llvm::DIType *ElementTy = getOrCreateType(Ty->getElementType(), Unit);
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint32_t Align = getTypeAlignIfRequired(Ty, CGM.getContext());
+
+ // Create ranges for both dimensions.
+ llvm::SmallVector<llvm::Metadata *, 2> Subscripts;
+ auto *ColumnCountNode =
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
+ llvm::Type::getInt64Ty(CGM.getLLVMContext()), Ty->getNumColumns()));
+ auto *RowCountNode =
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
+ llvm::Type::getInt64Ty(CGM.getLLVMContext()), Ty->getNumRows()));
+ Subscripts.push_back(DBuilder.getOrCreateSubrange(
+ ColumnCountNode /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/,
+ nullptr /*stride*/));
+ Subscripts.push_back(DBuilder.getOrCreateSubrange(
+ RowCountNode /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/,
+ nullptr /*stride*/));
+ llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscripts);
+ return DBuilder.createArrayType(Size, Align, ElementTy, SubscriptArray);
+}
+
llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) {
uint64_t Size;
uint32_t Align;
@@ -2710,10 +2838,17 @@ llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) {
auto SizeNode = SizeExprCache.find(EltTy);
if (SizeNode != SizeExprCache.end())
- Subscripts.push_back(
- DBuilder.getOrCreateSubrange(0, SizeNode->getSecond()));
- else
- Subscripts.push_back(DBuilder.getOrCreateSubrange(0, Count));
+ Subscripts.push_back(DBuilder.getOrCreateSubrange(
+ SizeNode->getSecond() /*count*/, nullptr /*lowerBound*/,
+ nullptr /*upperBound*/, nullptr /*stride*/));
+ else {
+ auto *CountNode =
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
+ llvm::Type::getInt64Ty(CGM.getLLVMContext()), Count));
+ Subscripts.push_back(DBuilder.getOrCreateSubrange(
+ CountNode /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/,
+ nullptr /*stride*/));
+ }
EltTy = Ty->getElementType();
}
@@ -2772,7 +2907,7 @@ llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty,
return DBuilder.createMemberPointerType(
getOrCreateInstanceMethodType(
CXXMethodDecl::getThisType(FPT, Ty->getMostRecentCXXRecordDecl()),
- FPT, U),
+ FPT, U, false),
ClassType, Size, /*Align=*/0, Flags);
}
@@ -3025,7 +3160,7 @@ llvm::DIModule *CGDebugInfo::getParentModuleOrNull(const Decl *D) {
// option.
if (Module *M = D->getOwningModule()) {
// This is a (sub-)module.
- auto Info = ExternalASTSource::ASTSourceDescriptor(*M);
+ auto Info = ASTSourceDescriptor(*M);
return getOrCreateModuleRef(Info, /*SkeletonCU=*/false);
} else {
// This the precompiled header being built.
@@ -3053,6 +3188,8 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
case Type::ExtVector:
case Type::Vector:
return CreateType(cast<VectorType>(Ty), Unit);
+ case Type::ConstantMatrix:
+ return CreateType(cast<ConstantMatrixType>(Ty), Unit);
case Type::ObjCObjectPointer:
return CreateType(cast<ObjCObjectPointerType>(Ty), Unit);
case Type::ObjCObject:
@@ -3094,6 +3231,8 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
case Type::Atomic:
return CreateType(cast<AtomicType>(Ty), Unit);
+ case Type::ExtInt:
+ return CreateType(cast<ExtIntType>(Ty));
case Type::Pipe:
return CreateType(cast<PipeType>(Ty), Unit);
@@ -3547,7 +3686,7 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
return DBuilder.createSubroutineType(DBuilder.getOrCreateTypeArray(None));
if (const auto *Method = dyn_cast<CXXMethodDecl>(D))
- return getOrCreateMethodType(Method, F);
+ return getOrCreateMethodType(Method, F, false);
const auto *FTy = FnType->getAs<FunctionType>();
CallingConv CC = FTy ? FTy->getCallConv() : CallingConv::CC_C;
@@ -3651,8 +3790,11 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
Name = getDynamicInitializerName(cast<VarDecl>(D), GD.getDynamicInitKind(),
Fn);
} else {
- // Use llvm function name.
Name = Fn->getName();
+
+ if (isa<BlockDecl>(D))
+ LinkageName = Name;
+
Flags |= llvm::DINode::FlagPrototyped;
}
if (Name.startswith("\01"))
@@ -3764,7 +3906,7 @@ void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
if (IsDeclForCallSite)
Fn->setSubprogram(SP);
- DBuilder.retainType(SP);
+ DBuilder.finalizeSubprogram(SP);
}
void CGDebugInfo::EmitFuncDeclForCallSite(llvm::CallBase *CallOrInvoke,
@@ -3778,12 +3920,12 @@ void CGDebugInfo::EmitFuncDeclForCallSite(llvm::CallBase *CallOrInvoke,
if (Func->getSubprogram())
return;
- // Do not emit a declaration subprogram for a builtin or if call site info
- // isn't required. Also, elide declarations for functions with reserved names,
- // as call site-related features aren't interesting in this case (& also, the
- // compiler may emit calls to these functions without debug locations, which
- // makes the verifier complain).
- if (CalleeDecl->getBuiltinID() != 0 ||
+ // Do not emit a declaration subprogram for a builtin, a function with nodebug
+ // attribute, or if call site info isn't required. Also, elide declarations
+ // for functions with reserved names, as call site-related features aren't
+ // interesting in this case (& also, the compiler may emit calls to these
+ // functions without debug locations, which makes the verifier complain).
+ if (CalleeDecl->getBuiltinID() != 0 || CalleeDecl->hasAttr<NoDebugAttr>() ||
getCallSiteRelatedAttrs() == llvm::DINode::FlagZero)
return;
if (const auto *Id = CalleeDecl->getIdentifier())
@@ -4680,7 +4822,7 @@ void CGDebugInfo::EmitImportDecl(const ImportDecl &ID) {
if (CGM.getCodeGenOpts().getDebuggerTuning() != llvm::DebuggerKind::LLDB)
return;
if (Module *M = ID.getImportedModule()) {
- auto Info = ExternalASTSource::ASTSourceDescriptor(*M);
+ auto Info = ASTSourceDescriptor(*M);
auto Loc = ID.getLocation();
DBuilder.createImportedDeclaration(
getCurrentContextDescriptor(cast<Decl>(ID.getDeclContext())),
@@ -4844,8 +4986,7 @@ llvm::DINode::DIFlags CGDebugInfo::getCallSiteRelatedAttrs() const {
(CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::LLDB ||
CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::GDB);
- if (!SupportsDWARFv4Ext && CGM.getCodeGenOpts().DwarfVersion < 5 &&
- !CGM.getCodeGenOpts().EnableDebugEntryValues)
+ if (!SupportsDWARFv4Ext && CGM.getCodeGenOpts().DwarfVersion < 5)
return llvm::DINode::FlagZero;
return llvm::DINode::FlagAllCallsDescribed;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
index 90e9a61ebe96..96ef6c7c1d27 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
@@ -17,9 +17,11 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/Module.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
@@ -60,7 +62,7 @@ class CGDebugInfo {
llvm::DIBuilder DBuilder;
llvm::DICompileUnit *TheCU = nullptr;
ModuleMap *ClangModuleMap = nullptr;
- ExternalASTSource::ASTSourceDescriptor PCHDescriptor;
+ ASTSourceDescriptor PCHDescriptor;
SourceLocation CurLoc;
llvm::MDNode *CurInlinedAt = nullptr;
llvm::DIType *VTablePtrType = nullptr;
@@ -165,6 +167,8 @@ class CGDebugInfo {
/// ivars and property accessors.
llvm::DIType *CreateType(const BuiltinType *Ty);
llvm::DIType *CreateType(const ComplexType *Ty);
+ llvm::DIType *CreateType(const AutoType *Ty);
+ llvm::DIType *CreateType(const ExtIntType *Ty);
llvm::DIType *CreateQualifiedType(QualType Ty, llvm::DIFile *Fg);
llvm::DIType *CreateType(const TypedefType *Ty, llvm::DIFile *Fg);
llvm::DIType *CreateType(const TemplateSpecializationType *Ty,
@@ -188,6 +192,7 @@ class CGDebugInfo {
llvm::DIType *CreateType(const ObjCTypeParamType *Ty, llvm::DIFile *Unit);
llvm::DIType *CreateType(const VectorType *Ty, llvm::DIFile *F);
+ llvm::DIType *CreateType(const ConstantMatrixType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const ArrayType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const LValueReferenceType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const RValueReferenceType *Ty, llvm::DIFile *Unit);
@@ -214,10 +219,10 @@ class CGDebugInfo {
/// not updated to include implicit \c this pointer. Use this routine
/// to get a method type which includes \c this pointer.
llvm::DISubroutineType *getOrCreateMethodType(const CXXMethodDecl *Method,
- llvm::DIFile *F);
+ llvm::DIFile *F, bool decl);
llvm::DISubroutineType *
getOrCreateInstanceMethodType(QualType ThisPtr, const FunctionProtoType *Func,
- llvm::DIFile *Unit);
+ llvm::DIFile *Unit, bool decl);
llvm::DISubroutineType *
getOrCreateFunctionType(const Decl *D, QualType FnType, llvm::DIFile *F);
/// \return debug info descriptor for vtable.
@@ -378,9 +383,7 @@ public:
/// When generating debug information for a clang module or
/// precompiled header, this module map will be used to determine
/// the module of origin of each Decl.
- void setPCHDescriptor(ExternalASTSource::ASTSourceDescriptor PCH) {
- PCHDescriptor = PCH;
- }
+ void setPCHDescriptor(ASTSourceDescriptor PCH) { PCHDescriptor = PCH; }
/// @}
/// Update the current source location. If \arg loc is invalid it is
@@ -506,7 +509,7 @@ public:
llvm::DIType *getOrCreateStandaloneType(QualType Ty, SourceLocation Loc);
/// Add heapallocsite metadata for MSAllocator calls.
- void addHeapAllocSiteMetadata(llvm::Instruction *CallSite, QualType Ty,
+ void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy,
SourceLocation Loc);
void completeType(const EnumDecl *ED);
@@ -589,9 +592,8 @@ private:
/// Get a reference to a clang module. If \p CreateSkeletonCU is true,
/// this also creates a split dwarf skeleton compile unit.
- llvm::DIModule *
- getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
- bool CreateSkeletonCU);
+ llvm::DIModule *getOrCreateModuleRef(ASTSourceDescriptor Mod,
+ bool CreateSkeletonCU);
/// DebugTypeExtRefs: If \p D originated in a clang module, return it.
llvm::DIModule *getParentModuleOrNull(const Decl *D);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
index 60f1dba7c768..1729c7ed3c31 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
@@ -31,6 +31,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/Sema/Sema.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GlobalVariable.h"
@@ -40,6 +41,9 @@
using namespace clang;
using namespace CodeGen;
+static_assert(clang::Sema::MaximumAlignment <= llvm::Value::MaximumAlignment,
+ "Clang max alignment greater than what LLVM supports?");
+
void CodeGenFunction::EmitDecl(const Decl &D) {
switch (D.getKind()) {
case Decl::BuiltinTemplate:
@@ -104,6 +108,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
case Decl::Label: // __label__ x;
case Decl::Import:
+ case Decl::MSGuid: // __declspec(uuid("..."))
case Decl::OMPThreadPrivate:
case Decl::OMPAllocate:
case Decl::OMPCapturedExpr:
@@ -206,9 +211,9 @@ static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
if (auto *CD = dyn_cast<CapturedDecl>(DC))
DC = cast<DeclContext>(CD->getNonClosureContext());
if (const auto *FD = dyn_cast<FunctionDecl>(DC))
- ContextName = CGM.getMangledName(FD);
+ ContextName = std::string(CGM.getMangledName(FD));
else if (const auto *BD = dyn_cast<BlockDecl>(DC))
- ContextName = CGM.getBlockMangledName(GlobalDecl(), BD);
+ ContextName = std::string(CGM.getBlockMangledName(GlobalDecl(), BD));
else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
ContextName = OMD->getSelector().getAsString();
else
@@ -233,7 +238,7 @@ llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
// Use the label if the variable is renamed with the asm-label extension.
std::string Name;
if (D.hasAttr<AsmLabelAttr>())
- Name = getMangledName(&D);
+ Name = std::string(getMangledName(&D));
else
Name = getStaticDeclName(*this, D);
@@ -245,7 +250,7 @@ llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
// variables cannot have an initializer.
llvm::Constant *Init = nullptr;
if (Ty.getAddressSpace() == LangAS::opencl_local ||
- D.hasAttr<CUDASharedAttr>())
+ D.hasAttr<CUDASharedAttr>() || D.hasAttr<LoaderUninitializedAttr>())
Init = llvm::UndefValue::get(LTy);
else
Init = EmitNullConstant(Ty);
@@ -337,7 +342,7 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
// the global to match the initializer. (We have to do this
// because some types, like unions, can't be completely represented
// in the LLVM type system.)
- if (GV->getType()->getElementType() != Init->getType()) {
+ if (GV->getValueType() != Init->getType()) {
llvm::GlobalVariable *OldGV = GV;
GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
@@ -757,10 +762,8 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
// If we're emitting a value with lifetime, we have to do the
// initialization *before* we leave the cleanup scopes.
- if (const FullExpr *fe = dyn_cast<FullExpr>(init)) {
- enterFullExpression(fe);
- init = fe->getSubExpr();
- }
+ if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(init))
+ init = EWC->getSubExpr();
CodeGenFunction::RunCleanupsScope Scope(*this);
// We have to maintain the illusion that the variable is
@@ -1046,13 +1049,13 @@ static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
llvm::Type *OrigTy = constant->getType();
if (const auto STy = dyn_cast<llvm::StructType>(OrigTy))
return constStructWithPadding(CGM, isPattern, STy, constant);
- if (auto *STy = dyn_cast<llvm::SequentialType>(OrigTy)) {
+ if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(OrigTy)) {
llvm::SmallVector<llvm::Constant *, 8> Values;
- unsigned Size = STy->getNumElements();
+ uint64_t Size = ArrayTy->getNumElements();
if (!Size)
return constant;
- llvm::Type *ElemTy = STy->getElementType();
- bool ZeroInitializer = constant->isZeroValue();
+ llvm::Type *ElemTy = ArrayTy->getElementType();
+ bool ZeroInitializer = constant->isNullValue();
llvm::Constant *OpValue, *PaddedOp;
if (ZeroInitializer) {
OpValue = llvm::Constant::getNullValue(ElemTy);
@@ -1068,13 +1071,12 @@ static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
auto *NewElemTy = Values[0]->getType();
if (NewElemTy == ElemTy)
return constant;
- if (OrigTy->isArrayTy()) {
- auto *ArrayTy = llvm::ArrayType::get(NewElemTy, Size);
- return llvm::ConstantArray::get(ArrayTy, Values);
- } else {
- return llvm::ConstantVector::get(Values);
- }
+ auto *NewArrayTy = llvm::ArrayType::get(NewElemTy, Size);
+ return llvm::ConstantArray::get(NewArrayTy, Values);
}
+ // FIXME: Add handling for tail padding in vectors. Vectors don't
+ // have padding between or inside elements, but the total amount of
+ // data can be less than the allocated size.
return constant;
}
@@ -1087,7 +1089,7 @@ Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
return CC->getNameAsString();
if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
return CD->getNameAsString();
- return getMangledName(FD);
+ return std::string(getMangledName(FD));
} else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
return OM->getNameAsString();
} else if (isa<BlockDecl>(DC)) {
@@ -1398,10 +1400,15 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
Address address = Address::invalid();
Address AllocaAddr = Address::invalid();
- Address OpenMPLocalAddr =
- getLangOpts().OpenMP
- ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
- : Address::invalid();
+ Address OpenMPLocalAddr = Address::invalid();
+ if (CGM.getLangOpts().OpenMPIRBuilder)
+ OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(*this, &D);
+ else
+ OpenMPLocalAddr =
+ getLangOpts().OpenMP
+ ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
+ : Address::invalid();
+
bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
@@ -1513,9 +1520,12 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// is rare.
if (!Bypasses.IsBypassed(&D) &&
!(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
- uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
+ llvm::TypeSize size =
+ CGM.getDataLayout().getTypeAllocSize(allocaTy);
emission.SizeForLifetimeMarkers =
- EmitLifetimeStart(size, AllocaAddr.getPointer());
+ size.isScalable() ? EmitLifetimeStart(-1, AllocaAddr.getPointer())
+ : EmitLifetimeStart(size.getFixedSize(),
+ AllocaAddr.getPointer());
}
} else {
assert(!emission.useLifetimeMarkers());
@@ -1672,9 +1682,13 @@ void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
case LangOptions::TrivialAutoVarInitKind::Uninitialized:
llvm_unreachable("Uninitialized handled by caller");
case LangOptions::TrivialAutoVarInitKind::Zero:
+ if (CGM.stopAutoInit())
+ return;
emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
break;
case LangOptions::TrivialAutoVarInitKind::Pattern:
+ if (CGM.stopAutoInit())
+ return;
emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
break;
}
@@ -1697,6 +1711,8 @@ void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
llvm_unreachable("Uninitialized handled by caller");
case LangOptions::TrivialAutoVarInitKind::Zero:
+ if (CGM.stopAutoInit())
+ return;
if (!EltSize.isOne())
SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
@@ -1704,6 +1720,8 @@ void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
break;
case LangOptions::TrivialAutoVarInitKind::Pattern: {
+ if (CGM.stopAutoInit())
+ return;
llvm::Type *ElTy = Loc.getElementType();
llvm::Constant *Constant = constWithPadding(
CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
@@ -1862,9 +1880,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
///
/// \param init the initializing expression
/// \param D the object to act as if we're initializing
-/// \param loc the address to initialize; its type is a pointer
-/// to the LLVM mapping of the object's type
-/// \param alignment the alignment of the address
+/// \param lvalue the lvalue to initialize
/// \param capturedByInit true if \p D is a __block variable
/// whose address is potentially changed by the initializer
void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
@@ -2533,5 +2549,5 @@ void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
}
void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
- getOpenMPRuntime().checkArchForUnifiedAddressing(D);
+ getOpenMPRuntime().processRequiresDirective(D);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
index 3baa0a080f5d..5a8500364295 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -16,11 +16,12 @@
#include "CodeGenFunction.h"
#include "TargetInfo.h"
#include "clang/AST/Attr.h"
-#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/LangOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/Support/Path.h"
+#include "llvm/Transforms/Utils/ModuleUtils.h"
using namespace clang;
using namespace CodeGen;
@@ -239,7 +240,7 @@ llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD,
}
const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
- llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(
+ llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction(
ty, FnName.str(), FI, VD.getLocation());
CodeGenFunction CGF(CGM);
@@ -249,7 +250,7 @@ llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD,
llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
- // Make sure the call and the callee agree on calling convention.
+ // Make sure the call and the callee agree on calling convention.
if (auto *dtorFn = dyn_cast<llvm::Function>(
dtor.getCallee()->stripPointerCastsAndAliases()))
call->setCallingConv(dtorFn->getCallingConv());
@@ -270,8 +271,12 @@ void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) {
// extern "C" int atexit(void (*f)(void));
+ assert(cast<llvm::Function>(dtorStub)->getFunctionType() ==
+ llvm::FunctionType::get(CGM.VoidTy, false) &&
+ "Argument to atexit has a wrong type.");
+
llvm::FunctionType *atexitTy =
- llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
+ llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
llvm::FunctionCallee atexit =
CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeList(),
@@ -282,6 +287,30 @@ void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) {
EmitNounwindRuntimeCall(atexit, dtorStub);
}
+llvm::Value *
+CodeGenFunction::unregisterGlobalDtorWithUnAtExit(llvm::Function *dtorStub) {
+ // The unatexit subroutine unregisters __dtor functions that were previously
+ // registered by the atexit subroutine. If the referenced function is found,
+ // it is removed from the list of functions that are called at normal program
+ // termination and the unatexit returns a value of 0, otherwise a non-zero
+ // value is returned.
+ //
+ // extern "C" int unatexit(void (*f)(void));
+ assert(dtorStub->getFunctionType() ==
+ llvm::FunctionType::get(CGM.VoidTy, false) &&
+ "Argument to unatexit has a wrong type.");
+
+ llvm::FunctionType *unatexitTy =
+ llvm::FunctionType::get(IntTy, {dtorStub->getType()}, /*isVarArg=*/false);
+
+ llvm::FunctionCallee unatexit =
+ CGM.CreateRuntimeFunction(unatexitTy, "unatexit", llvm::AttributeList());
+
+ cast<llvm::Function>(unatexit.getCallee())->setDoesNotThrow();
+
+ return EmitNounwindRuntimeCall(unatexit, dtorStub);
+}
+
void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
llvm::GlobalVariable *DeclPtr,
bool PerformInit) {
@@ -333,19 +362,23 @@ void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
Builder.CreateCondBr(NeedsInit, InitBlock, NoInitBlock, Weights);
}
-llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
+llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI,
- SourceLocation Loc, bool TLS) {
- llvm::Function *Fn =
- llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
- Name, &getModule());
+ SourceLocation Loc, bool TLS, bool IsExternalLinkage) {
+ llvm::Function *Fn = llvm::Function::Create(
+ FTy,
+ IsExternalLinkage ? llvm::GlobalValue::ExternalLinkage
+ : llvm::GlobalValue::InternalLinkage,
+ Name, &getModule());
+
if (!getLangOpts().AppleKext && !TLS) {
// Set the section if needed.
if (const char *Section = getTarget().getStaticInitSectionSpecifier())
Fn->setSection(Section);
}
- SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
+ if (Fn->hasInternalLinkage())
+ SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
Fn->setCallingConv(getRuntimeCC());
@@ -392,20 +425,20 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
!isInSanitizerBlacklist(SanitizerKind::ShadowCallStack, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
- auto RASignKind = getCodeGenOpts().getSignReturnAddress();
- if (RASignKind != CodeGenOptions::SignReturnAddressScope::None) {
+ auto RASignKind = getLangOpts().getSignReturnAddressScope();
+ if (RASignKind != LangOptions::SignReturnAddressScopeKind::None) {
Fn->addFnAttr("sign-return-address",
- RASignKind == CodeGenOptions::SignReturnAddressScope::All
+ RASignKind == LangOptions::SignReturnAddressScopeKind::All
? "all"
: "non-leaf");
- auto RASignKey = getCodeGenOpts().getSignReturnAddressKey();
+ auto RASignKey = getLangOpts().getSignReturnAddressKey();
Fn->addFnAttr("sign-return-address-key",
- RASignKey == CodeGenOptions::SignReturnAddressKeyValue::AKey
+ RASignKey == LangOptions::SignReturnAddressKeyKind::AKey
? "a_key"
: "b_key");
}
- if (getCodeGenOpts().BranchTargetEnforcement)
+ if (getLangOpts().BranchTargetEnforcement)
Fn->addFnAttr("branch-target-enforcement");
return Fn;
@@ -461,10 +494,8 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
}
// Create a variable initialization function.
- llvm::Function *Fn =
- CreateGlobalInitOrDestructFunction(FTy, FnName.str(),
- getTypes().arrangeNullaryFunction(),
- D->getLocation());
+ llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
+ FTy, FnName.str(), getTypes().arrangeNullaryFunction(), D->getLocation());
auto *ISA = D->getAttr<InitSegAttr>();
CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
@@ -533,6 +564,22 @@ void CodeGenModule::EmitCXXThreadLocalInitFunc() {
CXXThreadLocals.clear();
}
+static SmallString<128> getTransformedFileName(llvm::Module &M) {
+ SmallString<128> FileName = llvm::sys::path::filename(M.getName());
+
+ if (FileName.empty())
+ FileName = "<null>";
+
+ for (size_t i = 0; i < FileName.size(); ++i) {
+ // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens
+ // to be the set of C preprocessing numbers.
+ if (!isPreprocessingNumberBody(FileName[i]))
+ FileName[i] = '_';
+ }
+
+ return FileName;
+}
+
void
CodeGenModule::EmitCXXGlobalInitFunc() {
while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
@@ -541,11 +588,27 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
return;
+ const bool UseSinitAndSterm = getCXXABI().useSinitAndSterm();
+ if (UseSinitAndSterm) {
+ GlobalUniqueModuleId = getUniqueModuleId(&getModule());
+
+ // FIXME: We need to figure out what to hash on or encode into the unique ID
+ // we need.
+ if (GlobalUniqueModuleId.compare("") == 0)
+ llvm::report_fatal_error(
+ "cannot produce a unique identifier for this module"
+ " based on strong external symbols");
+ GlobalUniqueModuleId = GlobalUniqueModuleId.substr(1);
+ }
+
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
- // Create our global initialization function.
+ // Create our global prioritized initialization function.
if (!PrioritizedCXXGlobalInits.empty()) {
+ assert(!UseSinitAndSterm && "Prioritized sinit and sterm functions are not"
+ " supported yet.");
+
SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
PrioritizedCXXGlobalInits.end());
@@ -565,7 +628,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
std::string PrioritySuffix = llvm::utostr(Priority);
// Priority is always <= 65535 (enforced by sema).
PrioritySuffix = std::string(6-PrioritySuffix.size(), '0')+PrioritySuffix;
- llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
+ llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
FTy, "_GLOBAL__I_" + PrioritySuffix, FI);
for (; I < PrioE; ++I)
@@ -577,22 +640,27 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
PrioritizedCXXGlobalInits.clear();
}
- // Include the filename in the symbol name. Including "sub_" matches gcc and
- // makes sure these symbols appear lexicographically behind the symbols with
- // priority emitted above.
- SmallString<128> FileName = llvm::sys::path::filename(getModule().getName());
- if (FileName.empty())
- FileName = "<null>";
+ if (UseSinitAndSterm && CXXGlobalInits.empty())
+ return;
- for (size_t i = 0; i < FileName.size(); ++i) {
- // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens
- // to be the set of C preprocessing numbers.
- if (!isPreprocessingNumberBody(FileName[i]))
- FileName[i] = '_';
+ // Create our global initialization function.
+ SmallString<128> FuncName;
+ bool IsExternalLinkage = false;
+ if (UseSinitAndSterm) {
+ llvm::Twine("__sinit80000000_clang_", GlobalUniqueModuleId)
+ .toVector(FuncName);
+ IsExternalLinkage = true;
+ } else {
+ // Include the filename in the symbol name. Including "sub_" matches gcc
+ // and makes sure these symbols appear lexicographically behind the symbols
+ // with priority emitted above.
+ llvm::Twine("_GLOBAL__sub_I_", getTransformedFileName(getModule()))
+ .toVector(FuncName);
}
- llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
- FTy, llvm::Twine("_GLOBAL__sub_I_", FileName), FI);
+ llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
+ FTy, FuncName, FI, SourceLocation(), false /* TLS */,
+ IsExternalLinkage);
CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits);
AddGlobalCtor(Fn);
@@ -618,19 +686,38 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
CXXGlobalInits.clear();
}
-void CodeGenModule::EmitCXXGlobalDtorFunc() {
- if (CXXGlobalDtors.empty())
+void CodeGenModule::EmitCXXGlobalCleanUpFunc() {
+ if (CXXGlobalDtorsOrStermFinalizers.empty())
return;
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
-
- // Create our global destructor function.
const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
- llvm::Function *Fn =
- CreateGlobalInitOrDestructFunction(FTy, "_GLOBAL__D_a", FI);
- CodeGenFunction(*this).GenerateCXXGlobalDtorsFunc(Fn, CXXGlobalDtors);
+ // Create our global cleanup function.
+ llvm::Function *Fn = nullptr;
+ if (getCXXABI().useSinitAndSterm()) {
+ if (GlobalUniqueModuleId.empty()) {
+ GlobalUniqueModuleId = getUniqueModuleId(&getModule());
+ // FIXME: We need to figure out what to hash on or encode into the unique
+ // ID we need.
+ if (GlobalUniqueModuleId.compare("") == 0)
+ llvm::report_fatal_error(
+ "cannot produce a unique identifier for this module"
+ " based on strong external symbols");
+ GlobalUniqueModuleId = GlobalUniqueModuleId.substr(1);
+ }
+
+ Fn = CreateGlobalInitOrCleanUpFunction(
+ FTy, llvm::Twine("__sterm80000000_clang_", GlobalUniqueModuleId), FI,
+ SourceLocation(), false /* TLS */, true /* IsExternalLinkage */);
+ } else {
+ Fn = CreateGlobalInitOrCleanUpFunction(FTy, "_GLOBAL__D_a", FI);
+ }
+
+ CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc(
+ Fn, CXXGlobalDtorsOrStermFinalizers);
AddGlobalDtor(Fn);
+ CXXGlobalDtorsOrStermFinalizers.clear();
}
/// Emit the code necessary to initialize the given global variable.
@@ -726,10 +813,10 @@ CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
FinishFunction();
}
-void CodeGenFunction::GenerateCXXGlobalDtorsFunc(
+void CodeGenFunction::GenerateCXXGlobalCleanUpFunc(
llvm::Function *Fn,
const std::vector<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
- llvm::Constant *>> &DtorsAndObjects) {
+ llvm::Constant *>> &DtorsOrStermFinalizers) {
{
auto NL = ApplyDebugLocation::CreateEmpty(*this);
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
@@ -737,13 +824,22 @@ void CodeGenFunction::GenerateCXXGlobalDtorsFunc(
// Emit an artificial location for this function.
auto AL = ApplyDebugLocation::CreateArtificial(*this);
- // Emit the dtors, in reverse order from construction.
- for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) {
+ // Emit the cleanups, in reverse order from construction.
+ for (unsigned i = 0, e = DtorsOrStermFinalizers.size(); i != e; ++i) {
llvm::FunctionType *CalleeTy;
llvm::Value *Callee;
llvm::Constant *Arg;
- std::tie(CalleeTy, Callee, Arg) = DtorsAndObjects[e - i - 1];
- llvm::CallInst *CI = Builder.CreateCall(CalleeTy, Callee, Arg);
+ std::tie(CalleeTy, Callee, Arg) = DtorsOrStermFinalizers[e - i - 1];
+
+ llvm::CallInst *CI = nullptr;
+ if (Arg == nullptr) {
+ assert(
+ CGM.getCXXABI().useSinitAndSterm() &&
+ "Arg could not be nullptr unless using sinit and sterm functions.");
+ CI = Builder.CreateCall(CalleeTy, Callee);
+ } else
+ CI = Builder.CreateCall(CalleeTy, Callee, Arg);
+
// Make sure the call and the callee agree on calling convention.
if (llvm::Function *F = dyn_cast<llvm::Function>(Callee))
CI->setCallingConv(F->getCallingConv());
@@ -767,7 +863,7 @@ llvm::Function *CodeGenFunction::generateDestroyHelper(
const CGFunctionInfo &FI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, args);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
- llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(
+ llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction(
FTy, "__cxx_global_array_dtor", FI, VD->getLocation());
CurEHLocation = VD->getBeginLoc();
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
index 53fafab3e0e6..bdf70252b5ad 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/TargetBuiltins.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
@@ -468,6 +469,18 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
// encode these in an object file but MSVC doesn't do anything with it.
if (getTarget().getCXXABI().isMicrosoft())
return;
+ // In wasm we currently treat 'throw()' in the same way as 'noexcept'. In
+ // case of throw with types, we ignore it and print a warning for now.
+ // TODO Correctly handle exception specification in wasm
+ if (CGM.getLangOpts().WasmExceptions) {
+ if (EST == EST_DynamicNone)
+ EHStack.pushTerminate();
+ else
+ CGM.getDiags().Report(D->getLocation(),
+ diag::warn_wasm_dynamic_exception_spec_ignored)
+ << FD->getExceptionSpecSourceRange();
+ return;
+ }
unsigned NumExceptions = Proto->getNumExceptions();
EHFilterScope *Filter = EHStack.pushFilter(NumExceptions);
@@ -544,6 +557,14 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
// encode these in an object file but MSVC doesn't do anything with it.
if (getTarget().getCXXABI().isMicrosoft())
return;
+ // In wasm we currently treat 'throw()' in the same way as 'noexcept'. In
+ // case of throw with types, we ignore it and print a warning for now.
+ // TODO Correctly handle exception specification in wasm
+ if (CGM.getLangOpts().WasmExceptions) {
+ if (EST == EST_DynamicNone)
+ EHStack.popTerminate();
+ return;
+ }
EHFilterScope &filterScope = cast<EHFilterScope>(*EHStack.begin());
emitFilterDispatchBlock(*this, filterScope);
EHStack.popFilter();
@@ -630,9 +651,6 @@ CodeGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) {
case EHScope::Terminate:
dispatchBlock = getTerminateHandler();
break;
-
- case EHScope::PadEnd:
- llvm_unreachable("PadEnd unnecessary for Itanium!");
}
scope.setCachedEHDispatchBlock(dispatchBlock);
}
@@ -674,9 +692,6 @@ CodeGenFunction::getFuncletEHDispatchBlock(EHScopeStack::stable_iterator SI) {
case EHScope::Terminate:
DispatchBlock->setName("terminate");
break;
-
- case EHScope::PadEnd:
- llvm_unreachable("PadEnd dispatch block missing!");
}
EHS.setCachedEHDispatchBlock(DispatchBlock);
return DispatchBlock;
@@ -692,7 +707,6 @@ static bool isNonEHScope(const EHScope &S) {
case EHScope::Filter:
case EHScope::Catch:
case EHScope::Terminate:
- case EHScope::PadEnd:
return false;
}
@@ -703,12 +717,12 @@ llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
assert(EHStack.requiresLandingPad());
assert(!EHStack.empty());
- // If exceptions are disabled and SEH is not in use, then there is no invoke
- // destination. SEH "works" even if exceptions are off. In practice, this
- // means that C++ destructors and other EH cleanups don't run, which is
+ // If exceptions are disabled/ignored and SEH is not in use, then there is no
+ // invoke destination. SEH "works" even if exceptions are off. In practice,
+ // this means that C++ destructors and other EH cleanups don't run, which is
// consistent with MSVC's behavior.
const LangOptions &LO = CGM.getLangOpts();
- if (!LO.Exceptions) {
+ if (!LO.Exceptions || LO.IgnoreExceptions) {
if (!LO.Borland && !LO.MicrosoftExt)
return nullptr;
if (!currentFunctionUsesSEHTry())
@@ -751,15 +765,14 @@ llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
assert(EHStack.requiresLandingPad());
-
+ assert(!CGM.getLangOpts().IgnoreExceptions &&
+ "LandingPad should not be emitted when -fignore-exceptions are in "
+ "effect.");
EHScope &innermostEHScope = *EHStack.find(EHStack.getInnermostEHScope());
switch (innermostEHScope.getKind()) {
case EHScope::Terminate:
return getTerminateLandingPad();
- case EHScope::PadEnd:
- llvm_unreachable("PadEnd unnecessary for Itanium!");
-
case EHScope::Catch:
case EHScope::Cleanup:
case EHScope::Filter:
@@ -825,9 +838,6 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
case EHScope::Catch:
break;
-
- case EHScope::PadEnd:
- llvm_unreachable("PadEnd unnecessary for Itanium!");
}
EHCatchScope &catchScope = cast<EHCatchScope>(*I);
@@ -1637,6 +1647,19 @@ struct PerformSEHFinally final : EHScopeStack::Cleanup {
llvm::Value *IsForEH =
llvm::ConstantInt::get(CGF.ConvertType(ArgTys[0]), F.isForEHCleanup());
+
+ // Except _leave and fall-through at the end, all other exits in a _try
+ // (return/goto/continue/break) are considered as abnormal terminations
+ // since _leave/fall-through is always Indexed 0,
+ // just use NormalCleanupDestSlot (>= 1 for goto/return/..),
+ // as 1st Arg to indicate abnormal termination
+ if (!F.isForEHCleanup() && F.hasExitSwitch()) {
+ Address Addr = CGF.getNormalCleanupDestSlot();
+ llvm::Value *Load = CGF.Builder.CreateLoad(Addr, "cleanup.dest");
+ llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int32Ty);
+ IsForEH = CGF.Builder.CreateICmpNE(Load, Zero);
+ }
+
Args.add(RValue::get(IsForEH), ArgTys[0]);
Args.add(RValue::get(FP), ArgTys[1]);
@@ -1792,6 +1815,48 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
llvm::Constant *ParentI8Fn =
llvm::ConstantExpr::getBitCast(ParentCGF.CurFn, Int8PtrTy);
ParentFP = Builder.CreateCall(RecoverFPIntrin, {ParentI8Fn, EntryFP});
+
+ // if the parent is a _finally, the passed-in ParentFP is the FP
+ // of parent _finally, not Establisher's FP (FP of outermost function).
+ // Establkisher FP is 2nd paramenter passed into parent _finally.
+ // Fortunately, it's always saved in parent's frame. The following
+ // code retrieves it, and escapes it so that spill instruction won't be
+ // optimized away.
+ if (ParentCGF.ParentCGF != nullptr) {
+ // Locate and escape Parent's frame_pointer.addr alloca
+ // Depending on target, should be 1st/2nd one in LocalDeclMap.
+ // Let's just scan for ImplicitParamDecl with VoidPtrTy.
+ llvm::AllocaInst *FramePtrAddrAlloca = nullptr;
+ for (auto &I : ParentCGF.LocalDeclMap) {
+ const VarDecl *D = cast<VarDecl>(I.first);
+ if (isa<ImplicitParamDecl>(D) &&
+ D->getType() == getContext().VoidPtrTy) {
+ assert(D->getName().startswith("frame_pointer"));
+ FramePtrAddrAlloca = cast<llvm::AllocaInst>(I.second.getPointer());
+ break;
+ }
+ }
+ assert(FramePtrAddrAlloca);
+ auto InsertPair = ParentCGF.EscapedLocals.insert(
+ std::make_pair(FramePtrAddrAlloca, ParentCGF.EscapedLocals.size()));
+ int FrameEscapeIdx = InsertPair.first->second;
+
+ // an example of a filter's prolog::
+ // %0 = call i8* @llvm.eh.recoverfp(bitcast(@"?fin$0@0@main@@"),..)
+ // %1 = call i8* @llvm.localrecover(bitcast(@"?fin$0@0@main@@"),..)
+ // %2 = bitcast i8* %1 to i8**
+ // %3 = load i8*, i8* *%2, align 8
+ // ==> %3 is the frame-pointer of outermost host function
+ llvm::Function *FrameRecoverFn = llvm::Intrinsic::getDeclaration(
+ &CGM.getModule(), llvm::Intrinsic::localrecover);
+ llvm::Constant *ParentI8Fn =
+ llvm::ConstantExpr::getBitCast(ParentCGF.CurFn, Int8PtrTy);
+ ParentFP = Builder.CreateCall(
+ FrameRecoverFn, {ParentI8Fn, ParentFP,
+ llvm::ConstantInt::get(Int32Ty, FrameEscapeIdx)});
+ ParentFP = Builder.CreateBitCast(ParentFP, CGM.VoidPtrPtrTy);
+ ParentFP = Builder.CreateLoad(Address(ParentFP, getPointerAlign()));
+ }
}
// Create llvm.localrecover calls for all captures.
@@ -1885,7 +1950,7 @@ void CodeGenFunction::startOutlinedSEHHelper(CodeGenFunction &ParentCGF,
OutlinedStmt->getBeginLoc(), OutlinedStmt->getBeginLoc());
CurSEHParent = ParentCGF.CurSEHParent;
- CGM.SetLLVMFunctionAttributes(GlobalDecl(), FnInfo, CurFn);
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), CurFn, FnInfo);
EmitCapturedLocals(ParentCGF, OutlinedStmt, IsFilter);
}
@@ -1990,6 +2055,7 @@ void CodeGenFunction::pushSEHCleanup(CleanupKind Kind,
void CodeGenFunction::EnterSEHTryStmt(const SEHTryStmt &S) {
CodeGenFunction HelperCGF(CGM, /*suppressNewContext=*/true);
+ HelperCGF.ParentCGF = this;
if (const SEHFinallyStmt *Finally = S.getFinallyHandler()) {
// Outline the finally block.
llvm::Function *FinallyFunc =
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
index 8e0604181fb1..9e8770573d70 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
@@ -27,6 +27,7 @@
#include "clang/AST/NSAPI.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
@@ -125,8 +126,8 @@ Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
void CodeGenFunction::InitTempAlloca(Address Var, llvm::Value *Init) {
assert(isa<llvm::AllocaInst>(Var.getPointer()));
- auto *Store = new llvm::StoreInst(Init, Var.getPointer());
- Store->setAlignment(Var.getAlignment().getAsAlign());
+ auto *Store = new llvm::StoreInst(Init, Var.getPointer(), /*volatile*/ false,
+ Var.getAlignment().getAsAlign());
llvm::BasicBlock *Block = AllocaInsertPt->getParent();
Block->getInstList().insertAfter(AllocaInsertPt->getIterator(), Store);
}
@@ -144,8 +145,19 @@ Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name,
Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
const Twine &Name, Address *Alloca) {
- return CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
- /*ArraySize=*/nullptr, Alloca);
+ Address Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
+ /*ArraySize=*/nullptr, Alloca);
+
+ if (Ty->isConstantMatrixType()) {
+ auto *ArrayTy = cast<llvm::ArrayType>(Result.getType()->getElementType());
+ auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
+ ArrayTy->getNumElements());
+
+ Result = Address(
+ Builder.CreateBitCast(Result.getPointer(), VectorTy->getPointerTo()),
+ Result.getAlignment());
+ }
+ return Result;
}
Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, CharUnits Align,
@@ -415,6 +427,11 @@ static Address createReferenceTemporary(CodeGenFunction &CGF,
llvm_unreachable("unknown storage duration");
}
+/// Helper method to check if the underlying ABI is AAPCS
+static bool isAAPCS(const TargetInfo &TargetInfo) {
+ return TargetInfo.getABI().startswith("aapcs");
+}
+
LValue CodeGenFunction::
EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
const Expr *E = M->getSubExpr();
@@ -711,7 +728,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
if (SanOpts.has(SanitizerKind::ObjectSize) &&
!SkippedChecks.has(SanitizerKind::ObjectSize) &&
!Ty->isIncompleteType()) {
- uint64_t TySize = getContext().getTypeSizeInChars(Ty).getQuantity();
+ uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity();
llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
if (ArraySize)
Size = Builder.CreateMul(Size, ArraySize);
@@ -742,7 +759,9 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
!SkippedChecks.has(SanitizerKind::Alignment)) {
AlignVal = Alignment.getQuantity();
if (!Ty->isIncompleteType() && !AlignVal)
- AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity();
+ AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
+ /*ForPointeeType=*/true)
+ .getQuantity();
// The glvalue must be suitably aligned.
if (AlignVal > 1 &&
@@ -858,8 +877,12 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
static bool isFlexibleArrayMemberExpr(const Expr *E) {
// For compatibility with existing code, we treat arrays of length 0 or
// 1 as flexible array members.
+ // FIXME: This is inconsistent with the warning code in SemaChecking. Unify
+ // the two mechanisms.
const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe();
if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
+ // FIXME: Sema doesn't treat [1] as a flexible array member if the bound
+ // was produced by macro expansion.
if (CAT->getSize().ugt(1))
return false;
} else if (!isa<IncompleteArrayType>(AT))
@@ -872,6 +895,10 @@ static bool isFlexibleArrayMemberExpr(const Expr *E) {
// FIXME: If the base type of the member expr is not FD->getParent(),
// this should not be treated as a flexible array member access.
if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ // FIXME: Sema doesn't treat a T[1] union member as a flexible array
+ // member, only a T[0] or T[] member gets that treatment.
+ if (FD->getParent()->isUnion())
+ return true;
RecordDecl::field_iterator FI(
DeclContext::decl_iterator(const_cast<FieldDecl *>(FD)));
return ++FI == FD->getParent()->field_end();
@@ -1069,9 +1096,8 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
if (isa<ExplicitCastExpr>(CE)) {
LValueBaseInfo TargetTypeBaseInfo;
TBAAAccessInfo TargetTypeTBAAInfo;
- CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(),
- &TargetTypeBaseInfo,
- &TargetTypeTBAAInfo);
+ CharUnits Align = CGM.getNaturalPointeeTypeAlignment(
+ E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
if (TBAAInfo)
*TBAAInfo = CGM.mergeTBAAInfoForCast(*TBAAInfo,
TargetTypeTBAAInfo);
@@ -1139,8 +1165,8 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
// TODO: conditional operators, comma.
// Otherwise, use the alignment of the type.
- CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(), BaseInfo,
- TBAAInfo);
+ CharUnits Align =
+ CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo);
return Address(EmitScalarExpr(E), Align);
}
@@ -1276,8 +1302,15 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitVAArgExprLValue(cast<VAArgExpr>(E));
case Expr::DeclRefExprClass:
return EmitDeclRefLValue(cast<DeclRefExpr>(E));
- case Expr::ConstantExprClass:
+ case Expr::ConstantExprClass: {
+ const ConstantExpr *CE = cast<ConstantExpr>(E);
+ if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
+ QualType RetType = cast<CallExpr>(CE->getSubExpr()->IgnoreImplicit())
+ ->getCallReturnType(getContext());
+ return MakeNaturalAlignAddrLValue(Result, RetType);
+ }
return EmitLValue(cast<ConstantExpr>(E)->getSubExpr());
+ }
case Expr::ParenExprClass:
return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
case Expr::GenericSelectionExprClass:
@@ -1304,7 +1337,6 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::ExprWithCleanupsClass: {
const auto *cleanups = cast<ExprWithCleanups>(E);
- enterFullExpression(cleanups);
RunCleanupsScope Scope(*this);
LValue LV = EmitLValue(cleanups->getSubExpr());
if (LV.isSimple()) {
@@ -1343,6 +1375,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitUnaryOpLValue(cast<UnaryOperator>(E));
case Expr::ArraySubscriptExprClass:
return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
+ case Expr::MatrixSubscriptExprClass:
+ return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
case Expr::OMPArraySectionExprClass:
return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E));
case Expr::ExtVectorElementExprClass:
@@ -1368,6 +1402,7 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::CXXDynamicCastExprClass:
case Expr::CXXReinterpretCastExprClass:
case Expr::CXXConstCastExprClass:
+ case Expr::CXXAddrspaceCastExprClass:
case Expr::ObjCBridgedCastExprClass:
return EmitCastLValue(cast<CastExpr>(E));
@@ -1651,15 +1686,14 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
if (VTy->getNumElements() == 3) {
// Bitcast to vec4 type.
- llvm::VectorType *vec4Ty =
- llvm::VectorType::get(VTy->getElementType(), 4);
+ auto *vec4Ty = llvm::FixedVectorType::get(VTy->getElementType(), 4);
Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4");
// Now load value.
llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
// Shuffle vector to get vec3.
V = Builder.CreateShuffleVector(V, llvm::UndefValue::get(vec4Ty),
- {0, 1, 2}, "extractVec");
+ ArrayRef<int>{0, 1, 2}, "extractVec");
return EmitFromMemory(V, Ty);
}
}
@@ -1716,6 +1750,42 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
return Value;
}
+// Convert the pointer of \p Addr to a pointer to a vector (the value type of
+// MatrixType), if it points to a array (the memory type of MatrixType).
+static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF,
+ bool IsVector = true) {
+ auto *ArrayTy = dyn_cast<llvm::ArrayType>(
+ cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
+ if (ArrayTy && IsVector) {
+ auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
+ ArrayTy->getNumElements());
+
+ return Address(CGF.Builder.CreateElementBitCast(Addr, VectorTy));
+ }
+ auto *VectorTy = dyn_cast<llvm::VectorType>(
+ cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
+ if (VectorTy && !IsVector) {
+ auto *ArrayTy = llvm::ArrayType::get(VectorTy->getElementType(),
+ VectorTy->getNumElements());
+
+ return Address(CGF.Builder.CreateElementBitCast(Addr, ArrayTy));
+ }
+
+ return Addr;
+}
+
+// Emit a store of a matrix LValue. This may require casting the original
+// pointer to memory address (ArrayType) to a pointer to the value type
+// (VectorType).
+static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
+ bool isInit, CodeGenFunction &CGF) {
+ Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(CGF), CGF,
+ value->getType()->isVectorTy());
+ CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
+ lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
+ lvalue.isNontemporal());
+}
+
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
bool Volatile, QualType Ty,
LValueBaseInfo BaseInfo,
@@ -1729,13 +1799,10 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
// Handle vec3 special.
if (VecTy && VecTy->getNumElements() == 3) {
// Our source is a vec3, do a shuffle vector to make it a vec4.
- llvm::Constant *Mask[] = {Builder.getInt32(0), Builder.getInt32(1),
- Builder.getInt32(2),
- llvm::UndefValue::get(Builder.getInt32Ty())};
- llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy),
- MaskV, "extractVec");
- SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4);
+ ArrayRef<int>{0, 1, 2, -1},
+ "extractVec");
+ SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);
}
if (Addr.getElementType() != SrcTy) {
Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp");
@@ -1766,11 +1833,26 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
bool isInit) {
+ if (lvalue.getType()->isConstantMatrixType()) {
+ EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
+ return;
+ }
+
EmitStoreOfScalar(value, lvalue.getAddress(*this), lvalue.isVolatile(),
lvalue.getType(), lvalue.getBaseInfo(),
lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
}
+// Emit a load of a LValue of matrix type. This may require casting the pointer
+// to memory address (ArrayType) to a pointer to the value type (VectorType).
+static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc,
+ CodeGenFunction &CGF) {
+ assert(LV.getType()->isConstantMatrixType());
+ Address Addr = MaybeConvertMatrixAddress(LV.getAddress(CGF), CGF);
+ LV.setAddress(Addr);
+ return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
+}
+
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
/// method emits the address of the lvalue, then loads the result as an rvalue,
/// returning the rvalue.
@@ -1796,6 +1878,9 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
if (LV.isSimple()) {
assert(!LV.getType()->isFunctionType());
+ if (LV.getType()->isConstantMatrixType())
+ return EmitLoadOfMatrixLValue(LV, Loc, *this);
+
// Everything needs a load.
return RValue::get(EmitLoadOfScalar(LV, Loc));
}
@@ -1809,13 +1894,21 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
// If this is a reference to a subset of the elements of a vector, either
// shuffle the input or extract/insert them as appropriate.
- if (LV.isExtVectorElt())
+ if (LV.isExtVectorElt()) {
return EmitLoadOfExtVectorElementLValue(LV);
+ }
// Global Register variables always invoke intrinsics
if (LV.isGlobalReg())
return EmitLoadOfGlobalRegLValue(LV);
+ if (LV.isMatrixElt()) {
+ llvm::LoadInst *Load =
+ Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified());
+ return RValue::get(
+ Builder.CreateExtractElement(Load, LV.getMatrixIdx(), "matrixext"));
+ }
+
assert(LV.isBitField() && "Unknown LValue type!");
return EmitLoadOfBitfieldLValue(LV, Loc);
}
@@ -1870,13 +1963,12 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
// Always use shuffle vector to try to retain the original program structure
unsigned NumResultElts = ExprVT->getNumElements();
- SmallVector<llvm::Constant*, 4> Mask;
+ SmallVector<int, 4> Mask;
for (unsigned i = 0; i != NumResultElts; ++i)
- Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
+ Mask.push_back(getAccessedFieldNo(i, Elts));
- llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
- MaskV);
+ Mask);
return RValue::get(Vec);
}
@@ -1922,7 +2014,6 @@ RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) {
return RValue::get(Call);
}
-
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
@@ -1948,6 +2039,15 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (Dst.isGlobalReg())
return EmitStoreThroughGlobalRegLValue(Src, Dst);
+ if (Dst.isMatrixElt()) {
+ llvm::Value *Vec = Builder.CreateLoad(Dst.getMatrixAddress());
+ Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
+ Dst.getMatrixIdx(), "matins");
+ Builder.CreateStore(Vec, Dst.getMatrixAddress(),
+ Dst.isVolatileQualified());
+ return;
+ }
+
assert(Dst.isBitField() && "Unknown LValue type");
return EmitStoreThroughBitfieldLValue(Src, Dst);
}
@@ -2066,6 +2166,14 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
} else {
assert(Info.Offset == 0);
+ // According to the AACPS:
+ // When a volatile bit-field is written, and its container does not overlap
+ // with any non-bit-field member, its container must be read exactly once and
+ // written exactly once using the access width appropriate to the type of the
+ // container. The two accesses are not atomic.
+ if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
+ CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
+ Builder.CreateLoad(Ptr, true, "bf.load");
}
// Write the new value back out.
@@ -2103,37 +2211,33 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
unsigned NumSrcElts = VTy->getNumElements();
- unsigned NumDstElts = Vec->getType()->getVectorNumElements();
+ unsigned NumDstElts =
+ cast<llvm::VectorType>(Vec->getType())->getNumElements();
if (NumDstElts == NumSrcElts) {
// Use shuffle vector is the src and destination are the same number of
// elements and restore the vector mask since it is on the side it will be
// stored.
- SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
+ SmallVector<int, 4> Mask(NumDstElts);
for (unsigned i = 0; i != NumSrcElts; ++i)
- Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
+ Mask[getAccessedFieldNo(i, Elts)] = i;
- llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
- Vec = Builder.CreateShuffleVector(SrcVal,
- llvm::UndefValue::get(Vec->getType()),
- MaskV);
+ Vec = Builder.CreateShuffleVector(
+ SrcVal, llvm::UndefValue::get(Vec->getType()), Mask);
} else if (NumDstElts > NumSrcElts) {
// Extended the source vector to the same length and then shuffle it
// into the destination.
// FIXME: since we're shuffling with undef, can we just use the indices
// into that? This could be simpler.
- SmallVector<llvm::Constant*, 4> ExtMask;
+ SmallVector<int, 4> ExtMask;
for (unsigned i = 0; i != NumSrcElts; ++i)
- ExtMask.push_back(Builder.getInt32(i));
- ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
- llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
- llvm::Value *ExtSrcVal =
- Builder.CreateShuffleVector(SrcVal,
- llvm::UndefValue::get(SrcVal->getType()),
- ExtMaskV);
+ ExtMask.push_back(i);
+ ExtMask.resize(NumDstElts, -1);
+ llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(
+ SrcVal, llvm::UndefValue::get(SrcVal->getType()), ExtMask);
// build identity
- SmallVector<llvm::Constant*, 4> Mask;
+ SmallVector<int, 4> Mask;
for (unsigned i = 0; i != NumDstElts; ++i)
- Mask.push_back(Builder.getInt32(i));
+ Mask.push_back(i);
// When the vector size is odd and .odd or .hi is used, the last element
// of the Elts constant array will be one past the size of the vector.
@@ -2143,9 +2247,8 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// modify when what gets shuffled in
for (unsigned i = 0; i != NumSrcElts; ++i)
- Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
- llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
- Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
+ Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
+ Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
} else {
// We should never shorten the vector
llvm_unreachable("unexpected shorten vector length");
@@ -2295,7 +2398,13 @@ EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
static LValue EmitThreadPrivateVarDeclLValue(
CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
llvm::Type *RealVarTy, SourceLocation Loc) {
- Addr = CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
+ if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
+ Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
+ CGF, VD, Addr, Loc);
+ else
+ Addr =
+ CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
+
Addr = CGF.Builder.CreateElementBitCast(Addr, RealVarTy);
return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
@@ -2327,9 +2436,9 @@ CodeGenFunction::EmitLoadOfReference(LValue RefLVal,
Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile());
CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
- CharUnits Align = getNaturalTypeAlignment(RefLVal.getType()->getPointeeType(),
- PointeeBaseInfo, PointeeTBAAInfo,
- /* forPointeeType= */ true);
+ CharUnits Align = CGM.getNaturalTypeAlignment(
+ RefLVal.getType()->getPointeeType(), PointeeBaseInfo, PointeeTBAAInfo,
+ /* forPointeeType= */ true);
return Address(Load, Align);
}
@@ -2347,9 +2456,9 @@ Address CodeGenFunction::EmitLoadOfPointer(Address Ptr,
LValueBaseInfo *BaseInfo,
TBAAAccessInfo *TBAAInfo) {
llvm::Value *Addr = Builder.CreateLoad(Ptr);
- return Address(Addr, getNaturalTypeAlignment(PtrTy->getPointeeType(),
- BaseInfo, TBAAInfo,
- /*forPointeeType=*/true));
+ return Address(Addr, CGM.getNaturalTypeAlignment(PtrTy->getPointeeType(),
+ BaseInfo, TBAAInfo,
+ /*forPointeeType=*/true));
}
LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr,
@@ -2397,13 +2506,14 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
}
static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM,
- const FunctionDecl *FD) {
+ GlobalDecl GD) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (FD->hasAttr<WeakRefAttr>()) {
ConstantAddress aliasee = CGM.GetWeakRefReference(FD);
return aliasee.getPointer();
}
- llvm::Constant *V = CGM.GetAddrOfFunction(FD);
+ llvm::Constant *V = CGM.GetAddrOfFunction(GD);
if (!FD->hasPrototype()) {
if (const FunctionProtoType *Proto =
FD->getType()->getAs<FunctionProtoType>()) {
@@ -2420,9 +2530,10 @@ static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM,
return V;
}
-static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
- const Expr *E, const FunctionDecl *FD) {
- llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, FD);
+static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E,
+ GlobalDecl GD) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+ llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, GD);
CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
return CGF.MakeAddrLValue(V, E->getType(), Alignment,
AlignmentSource::Decl);
@@ -2552,10 +2663,10 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
} else {
// Should we be using the alignment of the constant pointer we emitted?
CharUnits Alignment =
- getNaturalTypeAlignment(E->getType(),
- /* BaseInfo= */ nullptr,
- /* TBAAInfo= */ nullptr,
- /* forPointeeType= */ true);
+ CGM.getNaturalTypeAlignment(E->getType(),
+ /* BaseInfo= */ nullptr,
+ /* TBAAInfo= */ nullptr,
+ /* forPointeeType= */ true);
Addr = Address(Val, Alignment);
}
return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
@@ -2689,6 +2800,12 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (const auto *BD = dyn_cast<BindingDecl>(ND))
return EmitLValue(BD->getBinding());
+ // We can form DeclRefExprs naming GUID declarations when reconstituting
+ // non-type template parameters into expressions.
+ if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
+ return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T,
+ AlignmentSource::Decl);
+
llvm_unreachable("Unhandled DeclRefExpr");
}
@@ -2779,7 +2896,7 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
- std::string Name = SL->getString();
+ std::string Name = std::string(SL->getString());
if (!Name.empty()) {
unsigned Discriminator =
CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
@@ -2788,7 +2905,8 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
} else {
- auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str());
+ auto C =
+ CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());
return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
}
}
@@ -2918,7 +3036,8 @@ llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
FilenameString = llvm::sys::path::filename(FilenameString);
}
- auto FilenameGV = CGM.GetAddrOfConstantCString(FilenameString, ".src");
+ auto FilenameGV =
+ CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
cast<llvm::GlobalVariable>(FilenameGV.getPointer()));
Filename = FilenameGV.getPointer();
@@ -3665,6 +3784,23 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
return LV;
}
+LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) {
+ assert(
+ !E->isIncomplete() &&
+ "incomplete matrix subscript expressions should be rejected during Sema");
+ LValue Base = EmitLValue(E->getBase());
+ llvm::Value *RowIdx = EmitScalarExpr(E->getRowIdx());
+ llvm::Value *ColIdx = EmitScalarExpr(E->getColumnIdx());
+ llvm::Value *NumRows = Builder.getIntN(
+ RowIdx->getType()->getScalarSizeInBits(),
+ E->getBase()->getType()->getAs<ConstantMatrixType>()->getNumRows());
+ llvm::Value *FinalIdx =
+ Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
+ return LValue::MakeMatrixElt(
+ MaybeConvertMatrixAddress(Base.getAddress(*this), *this), FinalIdx,
+ E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
+}
+
static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
LValueBaseInfo &BaseInfo,
TBAAAccessInfo &TBAAInfo,
@@ -3695,8 +3831,8 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
}
LValueBaseInfo TypeBaseInfo;
TBAAAccessInfo TypeTBAAInfo;
- CharUnits Align = CGF.getNaturalTypeAlignment(ElTy, &TypeBaseInfo,
- &TypeTBAAInfo);
+ CharUnits Align =
+ CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
BaseInfo.mergeForCast(TypeBaseInfo);
TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)), Align);
@@ -3713,7 +3849,7 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
else
ResultExprTy = BaseTy->getPointeeType();
llvm::Value *Idx = nullptr;
- if (IsLowerBound || E->getColonLoc().isInvalid()) {
+ if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
// Requesting lower bound or upper bound, but without provided length and
// without ':' symbol for the default length -> length = 1.
// Idx = LowerBound ?: 0;
@@ -4020,17 +4156,17 @@ static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
return CGF.Builder.CreateStructGEP(base, idx, field->getName());
}
-static Address emitPreserveStructAccess(CodeGenFunction &CGF, Address base,
- const FieldDecl *field) {
+static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base,
+ Address addr, const FieldDecl *field) {
const RecordDecl *rec = field->getParent();
- llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateRecordType(
- CGF.getContext().getRecordType(rec), rec->getLocation());
+ llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
+ base.getType(), rec->getLocation());
unsigned idx =
CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
return CGF.Builder.CreatePreserveStructAccessIndex(
- base, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
+ addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
}
static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
@@ -4154,8 +4290,8 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
if (IsInPreservedAIRegion ||
(getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
// Remember the original union field index
- llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
- getContext().getRecordType(rec), rec->getLocation());
+ llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
+ rec->getLocation());
addr = Address(
Builder.CreatePreserveUnionAccessIndex(
addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
@@ -4172,7 +4308,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
addr = emitAddrOfFieldStorage(*this, addr, field);
else
// Remember the original struct field index
- addr = emitPreserveStructAccess(*this, addr, field);
+ addr = emitPreserveStructAccess(*this, base, addr, field);
}
// If this is a reference field, load the reference right now.
@@ -4248,6 +4384,14 @@ LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
/*Init*/ true);
+ // Block-scope compound literals are destroyed at the end of the enclosing
+ // scope in C.
+ if (!getLangOpts().CPlusPlus)
+ if (QualType::DestructionKind DtorKind = E->getType().isDestructedType())
+ pushLifetimeExtendedDestroy(getCleanupKind(DtorKind), DeclPtr,
+ E->getType(), getDestroyer(DtorKind),
+ DtorKind & EHCleanup);
+
return Result;
}
@@ -4295,6 +4439,16 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
// If the true case is live, we need to track its region.
if (CondExprBool)
incrementProfileCounter(expr);
+ // If a throw expression we emit it and return an undefined lvalue
+ // because it can't be used.
+ if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
+ EmitCXXThrowExpr(ThrowExpr);
+ llvm::Type *Ty =
+ llvm::PointerType::getUnqual(ConvertType(dead->getType()));
+ return MakeAddrLValue(
+ Address(llvm::UndefValue::get(Ty), CharUnits::One()),
+ dead->getType());
+ }
return EmitLValue(live);
}
}
@@ -4620,7 +4774,8 @@ RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E,
return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue);
}
-static CGCallee EmitDirectCallee(CodeGenFunction &CGF, const FunctionDecl *FD) {
+static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (auto builtinID = FD->getBuiltinID()) {
// Replaceable builtin provide their own implementation of a builtin. Unless
@@ -4632,8 +4787,8 @@ static CGCallee EmitDirectCallee(CodeGenFunction &CGF, const FunctionDecl *FD) {
return CGCallee::forBuiltin(builtinID, FD);
}
- llvm::Constant *calleePtr = EmitFunctionDeclPointer(CGF.CGM, FD);
- return CGCallee::forDirect(calleePtr, GlobalDecl(FD));
+ llvm::Constant *calleePtr = EmitFunctionDeclPointer(CGF.CGM, GD);
+ return CGCallee::forDirect(calleePtr, GD);
}
CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
@@ -4774,7 +4929,7 @@ CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
}
Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
- return Builder.CreateElementBitCast(CGM.GetAddrOfUuidDescriptor(E),
+ return Builder.CreateElementBitCast(CGM.GetAddrOfMSGuidDecl(E->getGuidDecl()),
ConvertType(E->getType()));
}
@@ -5019,7 +5174,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
// to the function type.
if (isa<FunctionNoProtoType>(FnType) || Chain) {
llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
- CalleeTy = CalleeTy->getPointerTo();
+ int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace();
+ CalleeTy = CalleeTy->getPointerTo(AS);
llvm::Value *CalleePtr = Callee.getFunctionPointer();
CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast");
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
index 8de609a2ccd9..fb96d70732e8 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
@@ -15,6 +15,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
+#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
@@ -126,6 +127,11 @@ public:
}
void VisitConstantExpr(ConstantExpr *E) {
+ if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
+ CGF.EmitAggregateStore(Result, Dest.getAddress(),
+ E->getType().isVolatileQualified());
+ return;
+ }
return Visit(E->getSubExpr());
}
@@ -249,7 +255,7 @@ void AggExprEmitter::withReturnValueSlot(
const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
QualType RetTy = E->getType();
bool RequiresDestruction =
- Dest.isIgnored() &&
+ !Dest.isExternallyDestructed() &&
RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct;
// If it makes no observable difference, save a memcpy + temporary.
@@ -287,10 +293,8 @@ void AggExprEmitter::withReturnValueSlot(
}
RValue Src =
- EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused));
-
- if (RequiresDestruction)
- CGF.pushDestroy(RetTy.isDestructedType(), Src.getAggregateAddress(), RetTy);
+ EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused,
+ Dest.isExternallyDestructed()));
if (!UseTemp)
return;
@@ -659,22 +663,32 @@ AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
}
AggValueSlot Slot = EnsureSlot(E->getType());
+
+ // Block-scope compound literals are destroyed at the end of the enclosing
+ // scope in C.
+ bool Destruct =
+ !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed();
+ if (Destruct)
+ Slot.setExternallyDestructed();
+
CGF.EmitAggExpr(E->getInitializer(), Slot);
+
+ if (Destruct)
+ if (QualType::DestructionKind DtorKind = E->getType().isDestructedType())
+ CGF.pushLifetimeExtendedDestroy(
+ CGF.getCleanupKind(DtorKind), Slot.getAddress(), E->getType(),
+ CGF.getDestroyer(DtorKind), DtorKind & EHCleanup);
}
/// Attempt to look through various unimportant expressions to find a
/// cast of the given kind.
-static Expr *findPeephole(Expr *op, CastKind kind) {
- while (true) {
- op = op->IgnoreParens();
- if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
- if (castE->getCastKind() == kind)
- return castE->getSubExpr();
- if (castE->getCastKind() == CK_NoOp)
- continue;
- }
- return nullptr;
+static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) {
+ op = op->IgnoreParenNoopCasts(ctx);
+ if (auto castE = dyn_cast<CastExpr>(op)) {
+ if (castE->getCastKind() == kind)
+ return castE->getSubExpr();
}
+ return nullptr;
}
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
@@ -763,7 +777,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
(isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
// These two cases are reverses of each other; try to peephole them.
- if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
+ if (Expr *op =
+ findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) {
assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
E->getType()) &&
"peephole significantly changed types?");
@@ -813,8 +828,19 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
// If we're loading from a volatile type, force the destination
// into existence.
if (E->getSubExpr()->getType().isVolatileQualified()) {
+ bool Destruct =
+ !Dest.isExternallyDestructed() &&
+ E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct;
+ if (Destruct)
+ Dest.setExternallyDestructed();
EnsureDest(E->getType());
- return Visit(E->getSubExpr());
+ Visit(E->getSubExpr());
+
+ if (Destruct)
+ CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),
+ E->getType());
+
+ return;
}
LLVM_FALLTHROUGH;
@@ -1328,7 +1354,6 @@ AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
}
void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
- CGF.enterFullExpression(E);
CodeGenFunction::RunCleanupsScope cleanups(CGF);
Visit(E->getSubExpr());
}
@@ -1923,6 +1948,18 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
}
}
+ if (getLangOpts().CUDAIsDevice) {
+ if (Ty->isCUDADeviceBuiltinSurfaceType()) {
+ if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest,
+ Src))
+ return;
+ } else if (Ty->isCUDADeviceBuiltinTextureType()) {
+ if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest,
+ Src))
+ return;
+ }
+ }
+
// Aggregate assignment turns into llvm.memcpy. This is almost valid per
// C99 6.5.16.1p3, which states "If the value being stored in an object is
// read from another object that overlaps in anyway the storage of the first
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
index a68d5109baf8..d59aa6ce0fb9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
@@ -1638,6 +1638,12 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
RValue RV =
EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
+ // Set !heapallocsite metadata on the call to operator new.
+ if (getDebugInfo())
+ if (auto *newCall = dyn_cast<llvm::CallBase>(RV.getScalarVal()))
+ getDebugInfo()->addHeapAllocSiteMetadata(newCall, allocType,
+ E->getExprLoc());
+
// If this was a call to a global replaceable allocation function that does
// not take an alignment argument, the allocator is known to produce
// storage that's suitably aligned for any object that fits, up to a known
@@ -1867,10 +1873,13 @@ static void EmitDestroyingObjectDelete(CodeGenFunction &CGF,
}
/// Emit the code for deleting a single object.
-static void EmitObjectDelete(CodeGenFunction &CGF,
+/// \return \c true if we started emitting UnconditionalDeleteBlock, \c false
+/// if not.
+static bool EmitObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
Address Ptr,
- QualType ElementType) {
+ QualType ElementType,
+ llvm::BasicBlock *UnconditionalDeleteBlock) {
// C++11 [expr.delete]p3:
// If the static type of the object to be deleted is different from its
// dynamic type, the static type shall be a base class of the dynamic type
@@ -1917,7 +1926,7 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
if (UseVirtualCall) {
CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
Dtor);
- return;
+ return false;
}
}
}
@@ -1952,7 +1961,15 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
}
}
+ // When optimizing for size, call 'operator delete' unconditionally.
+ if (CGF.CGM.getCodeGenOpts().OptimizeSize > 1) {
+ CGF.EmitBlock(UnconditionalDeleteBlock);
+ CGF.PopCleanupBlock();
+ return true;
+ }
+
CGF.PopCleanupBlock();
+ return false;
}
namespace {
@@ -2029,6 +2046,12 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
Address Ptr = EmitPointerWithAlignment(Arg);
// Null check the pointer.
+ //
+ // We could avoid this null check if we can determine that the object
+ // destruction is trivial and doesn't require an array cookie; we can
+ // unconditionally perform the operator delete call in that case. For now, we
+ // assume that deleted pointers are null rarely enough that it's better to
+ // keep the branch. This might be worth revisiting for a -O0 code size win.
llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
@@ -2074,11 +2097,11 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
if (E->isArrayForm()) {
EmitArrayDelete(*this, E, Ptr, DeleteTy);
+ EmitBlock(DeleteEnd);
} else {
- EmitObjectDelete(*this, E, Ptr, DeleteTy);
+ if (!EmitObjectDelete(*this, E, Ptr, DeleteTy, DeleteEnd))
+ EmitBlock(DeleteEnd);
}
-
- EmitBlock(DeleteEnd);
}
static bool isGLValueFromPointerDeref(const Expr *E) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp
index f7a4e9e94712..a49817898ae3 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp
@@ -13,6 +13,7 @@
#include "CGOpenMPRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "ConstantEmitter.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/Constants.h"
@@ -97,11 +98,14 @@ public:
}
ComplexPairTy VisitStmt(Stmt *S) {
- S->dump(CGF.getContext().getSourceManager());
+ S->dump(llvm::errs(), CGF.getContext());
llvm_unreachable("Stmt can't have complex result type!");
}
ComplexPairTy VisitExpr(Expr *S);
ComplexPairTy VisitConstantExpr(ConstantExpr *E) {
+ if (llvm::Constant *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E))
+ return ComplexPairTy(Result->getAggregateElement(0U),
+ Result->getAggregateElement(1U));
return Visit(E->getSubExpr());
}
ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
@@ -222,7 +226,6 @@ public:
return Visit(DIE->getExpr());
}
ComplexPairTy VisitExprWithCleanups(ExprWithCleanups *E) {
- CGF.enterFullExpression(E);
CodeGenFunction::RunCleanupsScope Scope(CGF);
ComplexPairTy Vals = Visit(E->getSubExpr());
// Defend against dominance problems caused by jumps out of expression
@@ -431,8 +434,10 @@ ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
// C99 6.3.1.6: When a value of complex type is converted to another
// complex type, both the real and imaginary parts follow the conversion
// rules for the corresponding real types.
- Val.first = CGF.EmitScalarConversion(Val.first, SrcType, DestType, Loc);
- Val.second = CGF.EmitScalarConversion(Val.second, SrcType, DestType, Loc);
+ if (Val.first)
+ Val.first = CGF.EmitScalarConversion(Val.first, SrcType, DestType, Loc);
+ if (Val.second)
+ Val.second = CGF.EmitScalarConversion(Val.second, SrcType, DestType, Loc);
return Val;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
index 46ed90a20264..c6b2930faece 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
@@ -318,12 +318,17 @@ bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) {
CharUnits Offset = Offsets[Index];
if (auto *CA = dyn_cast<llvm::ConstantAggregate>(C)) {
+ // Expand the sequence into its contained elements.
+ // FIXME: This assumes vector elements are byte-sized.
replace(Elems, Index, Index + 1,
llvm::map_range(llvm::seq(0u, CA->getNumOperands()),
[&](unsigned Op) { return CA->getOperand(Op); }));
- if (auto *Seq = dyn_cast<llvm::SequentialType>(CA->getType())) {
+ if (isa<llvm::ArrayType>(CA->getType()) ||
+ isa<llvm::VectorType>(CA->getType())) {
// Array or vector.
- CharUnits ElemSize = getSize(Seq->getElementType());
+ llvm::Type *ElemTy =
+ llvm::GetElementPtrInst::getTypeAtIndex(CA->getType(), (uint64_t)0);
+ CharUnits ElemSize = getSize(ElemTy);
replace(
Offsets, Index, Index + 1,
llvm::map_range(llvm::seq(0u, CA->getNumOperands()),
@@ -344,6 +349,8 @@ bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) {
}
if (auto *CDS = dyn_cast<llvm::ConstantDataSequential>(C)) {
+ // Expand the sequence into its contained elements.
+ // FIXME: This assumes vector elements are byte-sized.
// FIXME: If possible, split into two ConstantDataSequentials at Hint.
CharUnits ElemSize = getSize(CDS->getElementType());
replace(Elems, Index, Index + 1,
@@ -359,6 +366,7 @@ bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) {
}
if (isa<llvm::ConstantAggregateZero>(C)) {
+ // Split into two zeros at the hinted offset.
CharUnits ElemSize = getSize(C);
assert(Hint > Offset && Hint < Offset + ElemSize && "nothing to split");
replace(Elems, Index, Index + 1,
@@ -368,6 +376,7 @@ bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) {
}
if (isa<llvm::UndefValue>(C)) {
+ // Drop undef; it doesn't contribute to the final layout.
replace(Elems, Index, Index + 1, {});
replace(Offsets, Index, Index + 1, {});
return true;
@@ -589,19 +598,21 @@ bool ConstStructBuilder::AppendBytes(CharUnits FieldOffsetInChars,
bool ConstStructBuilder::AppendBitField(
const FieldDecl *Field, uint64_t FieldOffset, llvm::ConstantInt *CI,
bool AllowOverwrite) {
- uint64_t FieldSize = Field->getBitWidthValue(CGM.getContext());
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(Field->getParent());
+ const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
llvm::APInt FieldValue = CI->getValue();
// Promote the size of FieldValue if necessary
// FIXME: This should never occur, but currently it can because initializer
// constants are cast to bool, and because clang is not enforcing bitfield
// width limits.
- if (FieldSize > FieldValue.getBitWidth())
- FieldValue = FieldValue.zext(FieldSize);
+ if (Info.Size > FieldValue.getBitWidth())
+ FieldValue = FieldValue.zext(Info.Size);
// Truncate the size of FieldValue to the bit field size.
- if (FieldSize < FieldValue.getBitWidth())
- FieldValue = FieldValue.trunc(FieldSize);
+ if (Info.Size < FieldValue.getBitWidth())
+ FieldValue = FieldValue.trunc(Info.Size);
return Builder.addBits(FieldValue,
CGM.getContext().toBits(StartOffset) + FieldOffset,
@@ -766,7 +777,7 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
// Add a vtable pointer, if we need one and it hasn't already been added.
- if (CD->isDynamicClass() && !IsPrimaryBase) {
+ if (Layout.hasOwnVFPtr()) {
llvm::Constant *VTableAddressPoint =
CGM.getCXXABI().getVTableAddressPointForConstExpr(
BaseSubobject(CD, Offset), VTableClass);
@@ -1000,6 +1011,8 @@ public:
}
llvm::Constant *VisitConstantExpr(ConstantExpr *CE, QualType T) {
+ if (llvm::Constant *Result = Emitter.tryEmitConstantExpr(CE))
+ return Result;
return Visit(CE->getSubExpr(), T);
}
@@ -1167,9 +1180,7 @@ public:
}
llvm::Constant *VisitExprWithCleanups(ExprWithCleanups *E, QualType T) {
- if (!E->cleanupsHaveSideEffects())
- return Visit(E->getSubExpr(), T);
- return nullptr;
+ return Visit(E->getSubExpr(), T);
}
llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E,
@@ -1269,19 +1280,7 @@ public:
if (!E->getConstructor()->isTrivial())
return nullptr;
- // FIXME: We should not have to call getBaseElementType here.
- const auto *RT =
- CGM.getContext().getBaseElementType(Ty)->castAs<RecordType>();
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
-
- // If the class doesn't have a trivial destructor, we can't emit it as a
- // constant expr.
- if (!RD->hasTrivialDestructor())
- return nullptr;
-
- // Only copy and default constructors can be trivial.
-
-
+ // Only default and copy/move constructors can be trivial.
if (E->getNumArgs()) {
assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
assert(E->getConstructor()->isCopyOrMoveConstructor() &&
@@ -1361,6 +1360,20 @@ ConstantEmitter::tryEmitAbstract(const APValue &value, QualType destType) {
return validateAndPopAbstract(C, state);
}
+llvm::Constant *ConstantEmitter::tryEmitConstantExpr(const ConstantExpr *CE) {
+ if (!CE->hasAPValueResult())
+ return nullptr;
+ const Expr *Inner = CE->getSubExpr()->IgnoreImplicit();
+ QualType RetType;
+ if (auto *Call = dyn_cast<CallExpr>(Inner))
+ RetType = Call->getCallReturnType(CGF->getContext());
+ else if (auto *Ctor = dyn_cast<CXXConstructExpr>(Inner))
+ RetType = Ctor->getType();
+ llvm::Constant *Res =
+ emitAbstract(CE->getBeginLoc(), CE->getAPValueResult(), RetType);
+ return Res;
+}
+
llvm::Constant *
ConstantEmitter::emitAbstract(const Expr *E, QualType destType) {
auto state = pushAbstract();
@@ -1769,7 +1782,6 @@ private:
ConstantLValue VisitCallExpr(const CallExpr *E);
ConstantLValue VisitBlockExpr(const BlockExpr *E);
ConstantLValue VisitCXXTypeidExpr(const CXXTypeidExpr *E);
- ConstantLValue VisitCXXUuidofExpr(const CXXUuidofExpr *E);
ConstantLValue VisitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *E);
@@ -1884,6 +1896,9 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
}
}
+ if (auto *GD = dyn_cast<MSGuidDecl>(D))
+ return CGM.GetAddrOfMSGuidDecl(GD);
+
return nullptr;
}
@@ -1904,6 +1919,8 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
ConstantLValue
ConstantLValueEmitter::VisitConstantExpr(const ConstantExpr *E) {
+ if (llvm::Constant *Result = Emitter.tryEmitConstantExpr(E))
+ return Result;
return Visit(E->getSubExpr());
}
@@ -1994,11 +2011,6 @@ ConstantLValueEmitter::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
}
ConstantLValue
-ConstantLValueEmitter::VisitCXXUuidofExpr(const CXXUuidofExpr *E) {
- return CGM.GetAddrOfUuidDescriptor(E);
-}
-
-ConstantLValue
ConstantLValueEmitter::VisitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *E) {
assert(E->getStorageDuration() == SD_Static);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
index de5c3a03fb68..6131f97995dc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
@@ -37,6 +37,7 @@
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsPowerPC.h"
+#include "llvm/IR/MatrixBuilder.h"
#include "llvm/IR/Module.h"
#include <cstdarg>
@@ -129,11 +130,10 @@ struct BinOpInfo {
return true;
}
- /// Check if either operand is a fixed point type or integer type, with at
- /// least one being a fixed point type. In any case, this
- /// operation did not follow usual arithmetic conversion and both operands may
- /// not be the same.
- bool isFixedPointBinOp() const {
+ /// Check if at least one operand is a fixed point type. In such cases, this
+ /// operation did not follow usual arithmetic conversion and both operands
+ /// might not be of the same type.
+ bool isFixedPointOp() const {
// We cannot simply check the result type since comparison operations return
// an int.
if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
@@ -141,6 +141,8 @@ struct BinOpInfo {
QualType RHSType = BinOp->getRHS()->getType();
return LHSType->isFixedPointType() || RHSType->isFixedPointType();
}
+ if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
+ return UnOp->getSubExpr()->getType()->isFixedPointType();
return false;
}
};
@@ -213,22 +215,6 @@ static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
(2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
}
-/// Update the FastMathFlags of LLVM IR from the FPOptions in LangOptions.
-static void updateFastMathFlags(llvm::FastMathFlags &FMF,
- FPOptions FPFeatures) {
- FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
-}
-
-/// Propagate fast-math flags from \p Op to the instruction in \p V.
-static Value *propagateFMFlags(Value *V, const BinOpInfo &Op) {
- if (auto *I = dyn_cast<llvm::Instruction>(V)) {
- llvm::FastMathFlags FMF = I->getFastMathFlags();
- updateFastMathFlags(FMF, Op.FPFeatures);
- I->setFastMathFlags(FMF);
- }
- return V;
-}
-
class ScalarExprEmitter
: public StmtVisitor<ScalarExprEmitter, Value*> {
CodeGenFunction &CGF;
@@ -297,7 +283,7 @@ public:
Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
- CGF.EmitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
+ CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
}
/// EmitLoadOfLValue - Given an expression with complex type that represents a
@@ -427,12 +413,18 @@ public:
}
Value *VisitStmt(Stmt *S) {
- S->dump(CGF.getContext().getSourceManager());
+ S->dump(llvm::errs(), CGF.getContext());
llvm_unreachable("Stmt can't have complex result type!");
}
Value *VisitExpr(Expr *S);
Value *VisitConstantExpr(ConstantExpr *E) {
+ if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
+ if (E->isGLValue())
+ return CGF.Builder.CreateLoad(Address(
+ Result, CGF.getContext().getTypeAlignInChars(E->getType())));
+ return Result;
+ }
return Visit(E->getSubExpr());
}
Value *VisitParenExpr(ParenExpr *PE) {
@@ -551,11 +543,17 @@ public:
}
Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
+ Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
Value *VisitMemberExpr(MemberExpr *E);
Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
+ // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
+ // literals aren't l-values in C++. We do so simply because that's the
+ // cleanest way to handle compound literals in C++.
+ // See the discussion here: https://reviews.llvm.org/D64464
return EmitLoadOfLValue(E);
}
@@ -732,15 +730,34 @@ public:
}
}
+ if (Ops.Ty->isConstantMatrixType()) {
+ llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ // We need to check the types of the operands of the operator to get the
+ // correct matrix dimensions.
+ auto *BO = cast<BinaryOperator>(Ops.E);
+ auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
+ BO->getLHS()->getType().getCanonicalType());
+ auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
+ BO->getRHS()->getType().getCanonicalType());
+ if (LHSMatTy && RHSMatTy)
+ return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
+ LHSMatTy->getNumColumns(),
+ RHSMatTy->getNumColumns());
+ return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
+ }
+
if (Ops.Ty->isUnsignedIntegerType() &&
CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
!CanElideOverflowCheck(CGF.getContext(), Ops))
return EmitOverflowCheckedBinOp(Ops);
if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
- Value *V = Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
- return propagateFMFlags(V, Ops);
+ // Preserve the old values
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
+ return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
}
+ if (Ops.isFixedPointOp())
+ return EmitFixedPointBinOp(Ops);
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
}
/// Create a binary op that checks for overflow.
@@ -752,6 +769,11 @@ public:
llvm::Value *Zero,bool isDiv);
// Common helper for getting how wide LHS of shift is.
static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
+
+ // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
+ // non powers of two.
+ Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
+
Value *EmitDiv(const BinOpInfo &Ops);
Value *EmitRem(const BinOpInfo &Ops);
Value *EmitAdd(const BinOpInfo &Ops);
@@ -1301,7 +1323,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
"Splatted expr doesn't match with vector element type?");
// Splat the element across to all elements
- unsigned NumElements = DstTy->getVectorNumElements();
+ unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
return Builder.CreateVectorSplat(NumElements, Src, "splat");
}
@@ -1319,8 +1341,8 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// short or half vector.
// Source and destination are both expected to be vectors.
- llvm::Type *SrcElementTy = SrcTy->getVectorElementType();
- llvm::Type *DstElementTy = DstTy->getVectorElementType();
+ llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
+ llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
(void)DstElementTy;
assert(((SrcElementTy->isIntegerTy() &&
@@ -1626,8 +1648,8 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
// n = extract mask i
// x = extract val n
// newv = insert newv, x, i
- llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
- MTy->getNumElements());
+ auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
+ MTy->getNumElements());
Value* NewV = llvm::UndefValue::get(RTy);
for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
@@ -1642,18 +1664,17 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
- SmallVector<llvm::Constant*, 32> indices;
+ SmallVector<int, 32> Indices;
for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
// Check for -1 and output it as undef in the IR.
if (Idx.isSigned() && Idx.isAllOnesValue())
- indices.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+ Indices.push_back(-1);
else
- indices.push_back(Builder.getInt32(Idx.getZExtValue()));
+ Indices.push_back(Idx.getZExtValue());
}
- Value *SV = llvm::ConstantVector::get(indices);
- return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
+ return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
}
Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
@@ -1686,8 +1707,8 @@ Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
assert(DstTy->isVectorTy() &&
"ConvertVector destination IR type must be a vector");
- llvm::Type *SrcEltTy = SrcTy->getVectorElementType(),
- *DstEltTy = DstTy->getVectorElementType();
+ llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
+ *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
if (DstEltType->isBooleanType()) {
assert((SrcEltTy->isFloatingPointTy() ||
@@ -1768,22 +1789,34 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
return Builder.CreateExtractElement(Base, Idx, "vecext");
}
-static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
- unsigned Off, llvm::Type *I32Ty) {
+Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
+ TestAndClearIgnoreResultAssign();
+
+ // Handle the vector case. The base must be a vector, the index must be an
+ // integer value.
+ Value *RowIdx = Visit(E->getRowIdx());
+ Value *ColumnIdx = Visit(E->getColumnIdx());
+ Value *Matrix = Visit(E->getBase());
+
+ // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
+ llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ return MB.CreateExtractElement(
+ Matrix, RowIdx, ColumnIdx,
+ E->getBase()->getType()->getAs<ConstantMatrixType>()->getNumRows());
+}
+
+static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
+ unsigned Off) {
int MV = SVI->getMaskValue(Idx);
if (MV == -1)
- return llvm::UndefValue::get(I32Ty);
- return llvm::ConstantInt::get(I32Ty, Off+MV);
+ return -1;
+ return Off + MV;
}
-static llvm::Constant *getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
- if (C->getBitWidth() != 32) {
- assert(llvm::ConstantInt::isValueValidForType(I32Ty,
- C->getZExtValue()) &&
- "Index operand too large for shufflevector mask!");
- return llvm::ConstantInt::get(I32Ty, C->getZExtValue());
- }
- return C;
+static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
+ assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
+ "Index operand too large for shufflevector mask!");
+ return C->getZExtValue();
}
Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
@@ -1820,7 +1853,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
for (unsigned i = 0; i != NumInitElements; ++i) {
Expr *IE = E->getInit(i);
Value *Init = Visit(IE);
- SmallVector<llvm::Constant*, 16> Args;
+ SmallVector<int, 16> Args;
llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
@@ -1838,7 +1871,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// insert into undef -> shuffle (src, undef)
// shufflemask must use an i32
Args.push_back(getAsInt32(C, CGF.Int32Ty));
- Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+ Args.resize(ResElts, -1);
LHS = EI->getVectorOperand();
RHS = V;
@@ -1847,17 +1880,16 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// insert into undefshuffle && size match -> shuffle (v, src)
llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
for (unsigned j = 0; j != CurIdx; ++j)
- Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
- Args.push_back(Builder.getInt32(ResElts + C->getZExtValue()));
- Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+ Args.push_back(getMaskElt(SVV, j, 0));
+ Args.push_back(ResElts + C->getZExtValue());
+ Args.resize(ResElts, -1);
LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
RHS = EI->getVectorOperand();
VIsUndefShuffle = false;
}
if (!Args.empty()) {
- llvm::Constant *Mask = llvm::ConstantVector::get(Args);
- V = Builder.CreateShuffleVector(LHS, RHS, Mask);
+ V = Builder.CreateShuffleVector(LHS, RHS, Args);
++CurIdx;
continue;
}
@@ -1886,15 +1918,14 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// If the current vector initializer is a shuffle with undef, merge
// this shuffle directly into it.
if (VIsUndefShuffle) {
- Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
- CGF.Int32Ty));
+ Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
} else {
- Args.push_back(Builder.getInt32(j));
+ Args.push_back(j);
}
}
for (unsigned j = 0, je = InitElts; j != je; ++j)
- Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
- Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+ Args.push_back(getMaskElt(SVI, j, Offset));
+ Args.resize(ResElts, -1);
if (VIsUndefShuffle)
V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
@@ -1907,26 +1938,24 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// to the vector initializer into V.
if (Args.empty()) {
for (unsigned j = 0; j != InitElts; ++j)
- Args.push_back(Builder.getInt32(j));
- Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
- llvm::Constant *Mask = llvm::ConstantVector::get(Args);
- Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
- Mask, "vext");
+ Args.push_back(j);
+ Args.resize(ResElts, -1);
+ Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT), Args,
+ "vext");
Args.clear();
for (unsigned j = 0; j != CurIdx; ++j)
- Args.push_back(Builder.getInt32(j));
+ Args.push_back(j);
for (unsigned j = 0; j != InitElts; ++j)
- Args.push_back(Builder.getInt32(j+Offset));
- Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+ Args.push_back(j + Offset);
+ Args.resize(ResElts, -1);
}
// If V is undef, make sure it ends up on the RHS of the shuffle to aid
// merging subsequent shuffles into this one.
if (CurIdx == 0)
std::swap(V, Init);
- llvm::Constant *Mask = llvm::ConstantVector::get(Args);
- V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
+ V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
VIsUndefShuffle = isa<llvm::UndefValue>(Init);
CurIdx += InitElts;
}
@@ -2040,11 +2069,15 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
}
- // Update heapallocsite metadata when there is an explicit cast.
- if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(Src))
- if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE))
- CGF.getDebugInfo()->
- addHeapAllocSiteMetadata(CI, CE->getType(), CE->getExprLoc());
+ // Update heapallocsite metadata when there is an explicit pointer cast.
+ if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
+ if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) {
+ QualType PointeeType = DestTy->getPointeeType();
+ if (!PointeeType.isNull())
+ CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
+ CE->getExprLoc());
+ }
+ }
return Builder.CreateBitCast(Src, DstTy);
}
@@ -2214,7 +2247,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
llvm::Type *DstTy = ConvertType(DestTy);
Value *Elt = Visit(const_cast<Expr*>(E));
// Splat the element across to all elements
- unsigned NumElements = DstTy->getVectorNumElements();
+ unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
return Builder.CreateVectorSplat(NumElements, Elt, "splat");
}
@@ -2315,7 +2348,6 @@ Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
}
Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
- CGF.enterFullExpression(E);
CodeGenFunction::RunCleanupsScope Scope(CGF);
Value *V = Visit(E->getSubExpr());
// Defend against dominance problems caused by jumps out of expression
@@ -2329,13 +2361,14 @@ Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
//===----------------------------------------------------------------------===//
static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
- llvm::Value *InVal, bool IsInc) {
+ llvm::Value *InVal, bool IsInc,
+ FPOptions FPFeatures) {
BinOpInfo BinOp;
BinOp.LHS = InVal;
BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
BinOp.Ty = E->getType();
BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
- // FIXME: once UnaryOperator carries FPFeatures, copy it here.
+ BinOp.FPFeatures = FPFeatures;
BinOp.E = E;
return BinOp;
}
@@ -2355,7 +2388,8 @@ llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
case LangOptions::SOB_Trapping:
if (!E->canOverflow())
return Builder.CreateNSWAdd(InVal, Amount, Name);
- return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc));
+ return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
+ E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
}
llvm_unreachable("Unknown SignedOverflowBehaviorTy");
}
@@ -2501,8 +2535,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
} else if (E->canOverflow() && type->isUnsignedIntegerType() &&
CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
- value =
- EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, value, isInc));
+ value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
+ E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
} else {
llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
@@ -2613,6 +2647,36 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
}
}
+ // Fixed-point types.
+ } else if (type->isFixedPointType()) {
+ // Fixed-point types are tricky. In some cases, it isn't possible to
+ // represent a 1 or a -1 in the type at all. Piggyback off of
+ // EmitFixedPointBinOp to avoid having to reimplement saturation.
+ BinOpInfo Info;
+ Info.E = E;
+ Info.Ty = E->getType();
+ Info.Opcode = isInc ? BO_Add : BO_Sub;
+ Info.LHS = value;
+ Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
+ // If the type is signed, it's better to represent this as +(-1) or -(-1),
+ // since -1 is guaranteed to be representable.
+ if (type->isSignedFixedPointType()) {
+ Info.Opcode = isInc ? BO_Sub : BO_Add;
+ Info.RHS = Builder.CreateNeg(Info.RHS);
+ }
+ // Now, convert from our invented integer literal to the type of the unary
+ // op. This will upscale and saturate if necessary. This value can become
+ // undef in some cases.
+ FixedPointSemantics SrcSema =
+ FixedPointSemantics::GetIntegerSemantics(value->getType()
+ ->getScalarSizeInBits(),
+ /*IsSigned=*/true);
+ FixedPointSemantics DstSema =
+ CGF.getContext().getFixedPointSemantics(Info.Ty);
+ Info.RHS = EmitFixedPointConversion(Info.RHS, SrcSema, DstSema,
+ E->getExprLoc());
+ value = EmitFixedPointBinOp(Info);
+
// Objective-C pointer types.
} else {
const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
@@ -2672,7 +2736,7 @@ Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
BinOp.Ty = E->getType();
BinOp.Opcode = BO_Sub;
- // FIXME: once UnaryOperator carries FPFeatures, copy it here.
+ BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
BinOp.E = E;
return EmitSub(BinOp);
}
@@ -2685,13 +2749,17 @@ Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
// Perform vector logical not on comparison with zero vector.
- if (E->getType()->isExtVectorType()) {
+ if (E->getType()->isVectorType() &&
+ E->getType()->castAs<VectorType>()->getVectorKind() ==
+ VectorType::GenericVector) {
Value *Oper = Visit(E->getSubExpr());
Value *Zero = llvm::Constant::getNullValue(Oper->getType());
Value *Result;
- if (Oper->getType()->isFPOrFPVectorTy())
+ if (Oper->getType()->isFPOrFPVectorTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
+ CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
- else
+ } else
Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
}
@@ -2892,7 +2960,7 @@ BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
Result.RHS = Visit(E->getRHS());
Result.Ty = E->getType();
Result.Opcode = E->getOpcode();
- Result.FPFeatures = E->getFPFeatures();
+ Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
Result.E = E;
return Result;
}
@@ -2912,7 +2980,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
OpInfo.RHS = Visit(E->getRHS());
OpInfo.Ty = E->getComputationResultType();
OpInfo.Opcode = E->getOpcode();
- OpInfo.FPFeatures = E->getFPFeatures();
+ OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
OpInfo.E = E;
// Load/convert the LHS.
LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
@@ -3100,7 +3168,9 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
}
if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
- llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
+ llvm::Value *Val;
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
+ Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
if (CGF.getLangOpts().OpenCL &&
!CGF.CGM.getCodeGenOpts().CorrectlyRoundedDivSqrt) {
// OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
@@ -3116,6 +3186,8 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
}
return Val;
}
+ else if (Ops.isFixedPointOp())
+ return EmitFixedPointBinOp(Ops);
else if (Ops.Ty->hasUnsignedIntegerRepresentation())
return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
else
@@ -3365,7 +3437,7 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// the add operand respectively. This allows fmuladd to represent a*b-c, or
// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
// efficient operations.
-static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
+static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
const CodeGenFunction &CGF, CGBuilderTy &Builder,
bool negMul, bool negAdd) {
assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
@@ -3377,12 +3449,23 @@ static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
if (negAdd)
Addend = Builder.CreateFNeg(Addend, "neg");
- Value *FMulAdd = Builder.CreateCall(
- CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
- {MulOp0, MulOp1, Addend});
- MulOp->eraseFromParent();
+ Value *FMulAdd = nullptr;
+ if (Builder.getIsFPConstrained()) {
+ assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
+ "Only constrained operation should be created when Builder is in FP "
+ "constrained mode");
+ FMulAdd = Builder.CreateConstrainedFPCall(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
+ Addend->getType()),
+ {MulOp0, MulOp1, Addend});
+ } else {
+ FMulAdd = Builder.CreateCall(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
+ {MulOp0, MulOp1, Addend});
+ }
+ MulOp->eraseFromParent();
- return FMulAdd;
+ return FMulAdd;
}
// Check whether it would be legal to emit an fmuladd intrinsic call to
@@ -3417,6 +3500,19 @@ static Value* tryEmitFMulAdd(const BinOpInfo &op,
return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
}
+ if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) {
+ if (LHSBinOp->getIntrinsicID() ==
+ llvm::Intrinsic::experimental_constrained_fmul &&
+ LHSBinOp->use_empty())
+ return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
+ }
+ if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) {
+ if (RHSBinOp->getIntrinsicID() ==
+ llvm::Intrinsic::experimental_constrained_fmul &&
+ RHSBinOp->use_empty())
+ return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
+ }
+
return nullptr;
}
@@ -3440,21 +3536,26 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
}
}
+ if (op.Ty->isConstantMatrixType()) {
+ llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ return MB.CreateAdd(op.LHS, op.RHS);
+ }
+
if (op.Ty->isUnsignedIntegerType() &&
CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
!CanElideOverflowCheck(CGF.getContext(), op))
return EmitOverflowCheckedBinOp(op);
if (op.LHS->getType()->isFPOrFPVectorTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
// Try to form an fmuladd.
if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
return FMulAdd;
- Value *V = Builder.CreateFAdd(op.LHS, op.RHS, "add");
- return propagateFMFlags(V, op);
+ return Builder.CreateFAdd(op.LHS, op.RHS, "add");
}
- if (op.isFixedPointBinOp())
+ if (op.isFixedPointOp())
return EmitFixedPointBinOp(op);
return Builder.CreateAdd(op.LHS, op.RHS, "add");
@@ -3466,14 +3567,27 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
using llvm::APSInt;
using llvm::ConstantInt;
- const auto *BinOp = cast<BinaryOperator>(op.E);
-
- // The result is a fixed point type and at least one of the operands is fixed
- // point while the other is either fixed point or an int. This resulting type
- // should be determined by Sema::handleFixedPointConversions().
+ // This is either a binary operation where at least one of the operands is
+ // a fixed-point type, or a unary operation where the operand is a fixed-point
+ // type. The result type of a binary operation is determined by
+ // Sema::handleFixedPointConversions().
QualType ResultTy = op.Ty;
- QualType LHSTy = BinOp->getLHS()->getType();
- QualType RHSTy = BinOp->getRHS()->getType();
+ QualType LHSTy, RHSTy;
+ if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
+ RHSTy = BinOp->getRHS()->getType();
+ if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
+ // For compound assignment, the effective type of the LHS at this point
+ // is the computation LHS type, not the actual LHS type, and the final
+ // result type is not the type of the expression but rather the
+ // computation result type.
+ LHSTy = CAO->getComputationLHSType();
+ ResultTy = CAO->getComputationResultType();
+ } else
+ LHSTy = BinOp->getLHS()->getType();
+ } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
+ LHSTy = UnOp->getSubExpr()->getType();
+ RHSTy = UnOp->getSubExpr()->getType();
+ }
ASTContext &Ctx = CGF.getContext();
Value *LHS = op.LHS;
Value *RHS = op.RHS;
@@ -3485,16 +3599,17 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
// Convert the operands to the full precision type.
Value *FullLHS = EmitFixedPointConversion(LHS, LHSFixedSema, CommonFixedSema,
- BinOp->getExprLoc());
+ op.E->getExprLoc());
Value *FullRHS = EmitFixedPointConversion(RHS, RHSFixedSema, CommonFixedSema,
- BinOp->getExprLoc());
+ op.E->getExprLoc());
- // Perform the actual addition.
+ // Perform the actual operation.
Value *Result;
- switch (BinOp->getOpcode()) {
+ switch (op.Opcode) {
+ case BO_AddAssign:
case BO_Add: {
- if (ResultFixedSema.isSaturated()) {
- llvm::Intrinsic::ID IID = ResultFixedSema.isSigned()
+ if (CommonFixedSema.isSaturated()) {
+ llvm::Intrinsic::ID IID = CommonFixedSema.isSigned()
? llvm::Intrinsic::sadd_sat
: llvm::Intrinsic::uadd_sat;
Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
@@ -3503,9 +3618,10 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
}
break;
}
+ case BO_SubAssign:
case BO_Sub: {
- if (ResultFixedSema.isSaturated()) {
- llvm::Intrinsic::ID IID = ResultFixedSema.isSigned()
+ if (CommonFixedSema.isSaturated()) {
+ llvm::Intrinsic::ID IID = CommonFixedSema.isSigned()
? llvm::Intrinsic::ssub_sat
: llvm::Intrinsic::usub_sat;
Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
@@ -3514,6 +3630,32 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
}
break;
}
+ case BO_MulAssign:
+ case BO_Mul: {
+ llvm::Intrinsic::ID IID;
+ if (CommonFixedSema.isSaturated())
+ IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::smul_fix_sat
+ : llvm::Intrinsic::umul_fix_sat;
+ else
+ IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::smul_fix
+ : llvm::Intrinsic::umul_fix;
+ Result = Builder.CreateIntrinsic(IID, {FullLHS->getType()},
+ {FullLHS, FullRHS, Builder.getInt32(CommonFixedSema.getScale())});
+ break;
+ }
+ case BO_DivAssign:
+ case BO_Div: {
+ llvm::Intrinsic::ID IID;
+ if (CommonFixedSema.isSaturated())
+ IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::sdiv_fix_sat
+ : llvm::Intrinsic::udiv_fix_sat;
+ else
+ IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::sdiv_fix
+ : llvm::Intrinsic::udiv_fix;
+ Result = Builder.CreateIntrinsic(IID, {FullLHS->getType()},
+ {FullLHS, FullRHS, Builder.getInt32(CommonFixedSema.getScale())});
+ break;
+ }
case BO_LT:
return CommonFixedSema.isSigned() ? Builder.CreateICmpSLT(FullLHS, FullRHS)
: Builder.CreateICmpULT(FullLHS, FullRHS);
@@ -3533,17 +3675,11 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
return Builder.CreateICmpEQ(FullLHS, FullRHS);
case BO_NE:
return Builder.CreateICmpNE(FullLHS, FullRHS);
- case BO_Mul:
- case BO_Div:
case BO_Shl:
case BO_Shr:
case BO_Cmp:
case BO_LAnd:
case BO_LOr:
- case BO_MulAssign:
- case BO_DivAssign:
- case BO_AddAssign:
- case BO_SubAssign:
case BO_ShlAssign:
case BO_ShrAssign:
llvm_unreachable("Found unimplemented fixed point binary operation");
@@ -3564,7 +3700,7 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
// Convert to the result type.
return EmitFixedPointConversion(Result, CommonFixedSema, ResultFixedSema,
- BinOp->getExprLoc());
+ op.E->getExprLoc());
}
Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
@@ -3585,20 +3721,25 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
}
}
+ if (op.Ty->isConstantMatrixType()) {
+ llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ return MB.CreateSub(op.LHS, op.RHS);
+ }
+
if (op.Ty->isUnsignedIntegerType() &&
CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
!CanElideOverflowCheck(CGF.getContext(), op))
return EmitOverflowCheckedBinOp(op);
if (op.LHS->getType()->isFPOrFPVectorTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
// Try to form an fmuladd.
if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
return FMulAdd;
- Value *V = Builder.CreateFSub(op.LHS, op.RHS, "sub");
- return propagateFMFlags(V, op);
+ return Builder.CreateFSub(op.LHS, op.RHS, "sub");
}
- if (op.isFixedPointBinOp())
+ if (op.isFixedPointOp())
return EmitFixedPointBinOp(op);
return Builder.CreateSub(op.LHS, op.RHS, "sub");
@@ -3670,6 +3811,21 @@ Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
}
+Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
+ const Twine &Name) {
+ llvm::IntegerType *Ty;
+ if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
+ Ty = cast<llvm::IntegerType>(VT->getElementType());
+ else
+ Ty = cast<llvm::IntegerType>(LHS->getType());
+
+ if (llvm::isPowerOf2_64(Ty->getBitWidth()))
+ return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name);
+
+ return Builder.CreateURem(
+ RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
+}
+
Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
// LLVM requires the LHS and RHS to be the same type: promote or truncate the
// RHS to the same size as the LHS.
@@ -3680,12 +3836,11 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
bool SanitizeBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
Ops.Ty->hasSignedIntegerRepresentation() &&
!CGF.getLangOpts().isSignedOverflowDefined() &&
- !CGF.getLangOpts().CPlusPlus2a;
+ !CGF.getLangOpts().CPlusPlus20;
bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
// OpenCL 6.3j: shift values are effectively % word size of LHS.
if (CGF.getLangOpts().OpenCL)
- RHS =
- Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shl.mask");
+ RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
else if ((SanitizeBase || SanitizeExponent) &&
isa<llvm::IntegerType>(Ops.LHS->getType())) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
@@ -3747,8 +3902,7 @@ Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
// OpenCL 6.3j: shift values are effectively % word size of LHS.
if (CGF.getLangOpts().OpenCL)
- RHS =
- Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shr.mask");
+ RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
isa<llvm::IntegerType>(Ops.LHS->getType())) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
@@ -3901,9 +4055,10 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
E->getExprLoc());
}
- if (BOInfo.isFixedPointBinOp()) {
+ if (BOInfo.isFixedPointOp()) {
Result = EmitFixedPointBinOp(BOInfo);
} else if (LHS->getType()->isFPOrFPVectorTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
if (!IsSignaling)
Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
else
@@ -4056,6 +4211,8 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
Value *RHS = Visit(E->getRHS());
Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
if (LHS->getType()->isFPOrFPVectorTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
+ CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
} else {
@@ -4140,6 +4297,8 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
Value *RHS = Visit(E->getRHS());
Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
if (LHS->getType()->isFPOrFPVectorTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
+ CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
} else {
@@ -4273,8 +4432,8 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
// OpenCL: If the condition is a vector, we can treat this condition like
// the select function.
- if (CGF.getLangOpts().OpenCL
- && condExpr->getType()->isVectorType()) {
+ if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
+ condExpr->getType()->isExtVectorType()) {
CGF.incrementProfileCounter(E);
llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
@@ -4289,10 +4448,8 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
- llvm::Value *tmp = Builder.CreateSExt(TestMSB,
- llvm::VectorType::get(elemType,
- numElem),
- "sext");
+ llvm::Value *tmp = Builder.CreateSExt(
+ TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
llvm::Value *tmp2 = Builder.CreateNot(tmp);
// Cast float to int to perform ANDs if necessary.
@@ -4431,14 +4588,9 @@ Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
Value *Src, unsigned NumElementsDst) {
llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
- SmallVector<llvm::Constant*, 4> Args;
- Args.push_back(Builder.getInt32(0));
- Args.push_back(Builder.getInt32(1));
- Args.push_back(Builder.getInt32(2));
- if (NumElementsDst == 4)
- Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
- llvm::Constant *Mask = llvm::ConstantVector::get(Args);
- return Builder.CreateShuffleVector(Src, UnV, Mask);
+ static constexpr int Mask[] = {0, 1, 2, -1};
+ return Builder.CreateShuffleVector(Src, UnV,
+ llvm::makeArrayRef(Mask, NumElementsDst));
}
// Create cast instructions for converting LLVM value \p Src to LLVM type \p
@@ -4516,7 +4668,8 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
// get a vec3.
if (NumElementsSrc != 3 && NumElementsDst == 3) {
if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
- auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4);
+ auto *Vec4Ty = llvm::FixedVectorType::get(
+ cast<llvm::VectorType>(DstTy)->getElementType(), 4);
Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
Vec4Ty);
}
@@ -4659,7 +4812,7 @@ struct GEPOffsetAndOverflow {
static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
llvm::LLVMContext &VMContext,
CodeGenModule &CGM,
- CGBuilderTy Builder) {
+ CGBuilderTy &Builder) {
const auto &DL = CGM.getDataLayout();
// The total (signed) byte offset for the GEP.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp
index d7e267630762..f860623e2bc3 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp
@@ -16,6 +16,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Instruction.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Transforms/Utils/AMDGPUEmitPrintf.h"
using namespace clang;
using namespace CodeGen;
@@ -110,7 +111,7 @@ CodeGenFunction::EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I) {
llvm::Value *P = Builder.CreateStructGEP(AllocaTy, Alloca, I - 1);
llvm::Value *Arg = Args[I].getRValue(*this).getScalarVal();
- Builder.CreateAlignedStore(Arg, P, DL.getPrefTypeAlignment(Arg->getType()));
+ Builder.CreateAlignedStore(Arg, P, DL.getPrefTypeAlign(Arg->getType()));
}
BufferPtr = Builder.CreatePointerCast(Alloca, llvm::Type::getInt8PtrTy(Ctx));
}
@@ -120,3 +121,36 @@ CodeGenFunction::EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
return RValue::get(Builder.CreateCall(
VprintfFunc, {Args[0].getRValue(*this).getScalarVal(), BufferPtr}));
}
+
+RValue
+CodeGenFunction::EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ assert(getTarget().getTriple().getArch() == llvm::Triple::amdgcn);
+ assert(E->getBuiltinCallee() == Builtin::BIprintf ||
+ E->getBuiltinCallee() == Builtin::BI__builtin_printf);
+ assert(E->getNumArgs() >= 1); // printf always has at least one arg.
+
+ CallArgList CallArgs;
+ EmitCallArgs(CallArgs,
+ E->getDirectCallee()->getType()->getAs<FunctionProtoType>(),
+ E->arguments(), E->getDirectCallee(),
+ /* ParamsToSkip = */ 0);
+
+ SmallVector<llvm::Value *, 8> Args;
+ for (auto A : CallArgs) {
+ // We don't know how to emit non-scalar varargs.
+ if (!A.getRValue(*this).isScalar()) {
+ CGM.ErrorUnsupported(E, "non-scalar arg to printf");
+ return RValue::get(llvm::ConstantInt::get(IntTy, -1));
+ }
+
+ llvm::Value *Arg = A.getRValue(*this).getScalarVal();
+ Args.push_back(Arg);
+ }
+
+ llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
+ IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
+ auto Printf = llvm::emitAMDGPUPrintfCall(IRB, Args);
+ Builder.SetInsertPoint(IRB.GetInsertBlock(), IRB.GetInsertPoint());
+ return RValue::get(Printf);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.cpp
index e4b184eb8798..78da72eda0cf 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.cpp
@@ -9,6 +9,8 @@
#include "CGLoopInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
@@ -572,6 +574,7 @@ void LoopInfoStack::push(BasicBlock *Header, const llvm::DebugLoc &StartLoc,
}
void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
+ const clang::CodeGenOptions &CGOpts,
ArrayRef<const clang::Attr *> Attrs,
const llvm::DebugLoc &StartLoc,
const llvm::DebugLoc &EndLoc) {
@@ -752,6 +755,14 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
}
}
+ if (CGOpts.OptimizationLevel > 0)
+ // Disable unrolling for the loop, if unrolling is disabled (via
+ // -fno-unroll-loops) and no pragmas override the decision.
+ if (!CGOpts.UnrollLoops &&
+ (StagedAttrs.UnrollEnable == LoopAttributes::Unspecified &&
+ StagedAttrs.UnrollCount == 0))
+ setUnrollState(LoopAttributes::Disable);
+
/// Stage the attributes.
push(Header, StartLoc, EndLoc);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.h b/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.h
index 5abcf37c5433..e379c64c99a8 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.h
@@ -29,6 +29,7 @@ class MDNode;
namespace clang {
class Attr;
class ASTContext;
+class CodeGenOptions;
namespace CodeGen {
/// Attributes that may be specified on loops.
@@ -202,6 +203,7 @@ public:
/// Begin a new structured loop. Stage attributes from the Attrs list.
/// The staged attributes are applied to the loop and then cleared.
void push(llvm::BasicBlock *Header, clang::ASTContext &Ctx,
+ const clang::CodeGenOptions &CGOpts,
llvm::ArrayRef<const Attr *> Attrs, const llvm::DebugLoc &StartLoc,
const llvm::DebugLoc &EndLoc);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
index d5f378c52232..d134be83a9dc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -254,6 +254,10 @@ struct GenBinaryFuncName : CopyStructVisitor<GenBinaryFuncName<IsMove>, IsMove>,
void visitVolatileTrivial(QualType FT, const FieldDecl *FD,
CharUnits CurStructOffset) {
+ // Zero-length bit-fields don't need to be copied/assigned.
+ if (FD && FD->isZeroLengthBitField(this->Ctx))
+ return;
+
// Because volatile fields can be bit-fields and are individually copied,
// their offset and width are in bits.
uint64_t OffsetInBits =
@@ -317,6 +321,16 @@ static const CGFunctionInfo &getFunctionInfo(CodeGenModule &CGM,
return CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
}
+template <size_t N, size_t... Ints>
+static std::array<Address, N> getParamAddrs(std::index_sequence<Ints...> IntSeq,
+ std::array<CharUnits, N> Alignments,
+ FunctionArgList Args,
+ CodeGenFunction *CGF) {
+ return std::array<Address, N>{{
+ Address(CGF->Builder.CreateLoad(CGF->GetAddrOfLocalVar(Args[Ints])),
+ Alignments[Ints])...}};
+}
+
// Template classes that are used as bases for classes that emit special
// functions.
template <class Derived> struct GenFuncBase {
@@ -424,9 +438,9 @@ template <class Derived> struct GenFuncBase {
}
template <size_t N>
- llvm::Function *
- getFunction(StringRef FuncName, QualType QT, std::array<Address, N> Addrs,
- std::array<CharUnits, N> Alignments, CodeGenModule &CGM) {
+ llvm::Function *getFunction(StringRef FuncName, QualType QT,
+ std::array<CharUnits, N> Alignments,
+ CodeGenModule &CGM) {
// If the special function already exists in the module, return it.
if (llvm::Function *F = CGM.getModule().getFunction(FuncName)) {
bool WrongType = false;
@@ -439,7 +453,7 @@ template <class Derived> struct GenFuncBase {
}
if (WrongType) {
- std::string FuncName = F->getName();
+ std::string FuncName = std::string(F->getName());
SourceLocation Loc = QT->castAs<RecordType>()->getDecl()->getLocation();
CGM.Error(Loc, "special function " + FuncName +
" for non-trivial C struct has incorrect type");
@@ -466,12 +480,8 @@ template <class Derived> struct GenFuncBase {
CodeGenFunction NewCGF(CGM);
setCGF(&NewCGF);
CGF->StartFunction(FD, Ctx.VoidTy, F, FI, Args);
-
- for (unsigned I = 0; I < N; ++I) {
- llvm::Value *V = CGF->Builder.CreateLoad(CGF->GetAddrOfLocalVar(Args[I]));
- Addrs[I] = Address(V, Alignments[I]);
- }
-
+ std::array<Address, N> Addrs =
+ getParamAddrs<N>(std::make_index_sequence<N>{}, Alignments, Args, CGF);
asDerived().visitStructFields(QT, CharUnits::Zero(), Addrs);
CGF->FinishFunction();
return F;
@@ -491,7 +501,7 @@ template <class Derived> struct GenFuncBase {
}
if (llvm::Function *F =
- getFunction(FuncName, QT, Addrs, Alignments, CallerCGF.CGM))
+ getFunction(FuncName, QT, Alignments, CallerCGF.CGM))
CallerCGF.EmitNounwindRuntimeCall(F, Ptrs);
}
@@ -543,6 +553,10 @@ struct GenBinaryFunc : CopyStructVisitor<Derived, IsMove>,
std::array<Address, 2> Addrs) {
LValue DstLV, SrcLV;
if (FD) {
+ // No need to copy zero-length bit-fields.
+ if (FD->isZeroLengthBitField(this->CGF->getContext()))
+ return;
+
QualType RT = QualType(FD->getParent()->getTypeForDecl(), 0);
llvm::PointerType *PtrTy = this->CGF->ConvertType(RT)->getPointerTo();
Address DstAddr = this->getAddrWithOffset(Addrs[DstIdx], Offset);
@@ -825,17 +839,6 @@ static void callSpecialFunction(G &&Gen, StringRef FuncName, QualType QT,
Gen.callFunc(FuncName, QT, Addrs, CGF);
}
-template <size_t N> static std::array<Address, N> createNullAddressArray();
-
-template <> std::array<Address, 1> createNullAddressArray() {
- return std::array<Address, 1>({{Address(nullptr, CharUnits::Zero())}});
-}
-
-template <> std::array<Address, 2> createNullAddressArray() {
- return std::array<Address, 2>({{Address(nullptr, CharUnits::Zero()),
- Address(nullptr, CharUnits::Zero())}});
-}
-
template <class G, size_t N>
static llvm::Function *
getSpecialFunction(G &&Gen, StringRef FuncName, QualType QT, bool IsVolatile,
@@ -844,8 +847,7 @@ getSpecialFunction(G &&Gen, StringRef FuncName, QualType QT, bool IsVolatile,
// The following call requires an array of addresses as arguments, but doesn't
// actually use them (it overwrites them with the addresses of the arguments
// of the created function).
- return Gen.getFunction(FuncName, QT, createNullAddressArray<N>(), Alignments,
- CGM);
+ return Gen.getFunction(FuncName, QT, Alignments, CGM);
}
// Functions to emit calls to the special functions of a non-trivial C struct.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
index 90fca2836d99..cd2b84f5dd20 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
@@ -1491,11 +1491,10 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
argLoad.getType()))
finalArg = &argCast;
-
- BinaryOperator assign(&ivarRef, finalArg, BO_Assign,
- ivarRef.getType(), VK_RValue, OK_Ordinary,
- SourceLocation(), FPOptions());
- EmitStmt(&assign);
+ BinaryOperator *assign = BinaryOperator::Create(
+ getContext(), &ivarRef, finalArg, BO_Assign, ivarRef.getType(), VK_RValue,
+ OK_Ordinary, SourceLocation(), FPOptionsOverride());
+ EmitStmt(assign);
}
/// Generate an Objective-C property setter function.
@@ -1837,6 +1836,40 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
llvm::Value *CurrentItem =
Builder.CreateAlignedLoad(CurrentItemPtr, getPointerAlign());
+ if (SanOpts.has(SanitizerKind::ObjCCast)) {
+ // Before using an item from the collection, check that the implicit cast
+ // from id to the element type is valid. This is done with instrumentation
+ // roughly corresponding to:
+ //
+ // if (![item isKindOfClass:expectedCls]) { /* emit diagnostic */ }
+ const ObjCObjectPointerType *ObjPtrTy =
+ elementType->getAsObjCInterfacePointerType();
+ const ObjCInterfaceType *InterfaceTy =
+ ObjPtrTy ? ObjPtrTy->getInterfaceType() : nullptr;
+ if (InterfaceTy) {
+ SanitizerScope SanScope(this);
+ auto &C = CGM.getContext();
+ assert(InterfaceTy->getDecl() && "No decl for ObjC interface type");
+ Selector IsKindOfClassSel = GetUnarySelector("isKindOfClass", C);
+ CallArgList IsKindOfClassArgs;
+ llvm::Value *Cls =
+ CGM.getObjCRuntime().GetClass(*this, InterfaceTy->getDecl());
+ IsKindOfClassArgs.add(RValue::get(Cls), C.getObjCClassType());
+ llvm::Value *IsClass =
+ CGM.getObjCRuntime()
+ .GenerateMessageSend(*this, ReturnValueSlot(), C.BoolTy,
+ IsKindOfClassSel, CurrentItem,
+ IsKindOfClassArgs)
+ .getScalarVal();
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(S.getBeginLoc()),
+ EmitCheckTypeDescriptor(QualType(InterfaceTy, 0))};
+ EmitCheck({{IsClass, SanitizerKind::ObjCCast}},
+ SanitizerHandler::InvalidObjCCast,
+ ArrayRef<llvm::Constant *>(StaticData), CurrentItem);
+ }
+ }
+
// Cast that value to the right type.
CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType,
"currentitem");
@@ -2160,7 +2193,8 @@ llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
if (!mandatory && isa<llvm::Instruction>(result)) {
llvm::CallInst *call
= cast<llvm::CallInst>(result->stripPointerCasts());
- assert(call->getCalledValue() == CGM.getObjCEntrypoints().objc_retainBlock);
+ assert(call->getCalledOperand() ==
+ CGM.getObjCEntrypoints().objc_retainBlock);
call->setMetadata("clang.arc.copy_on_escape",
llvm::MDNode::get(Builder.getContext(), None));
@@ -3255,7 +3289,6 @@ static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
// The retain needs to happen within the full-expression.
if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
- enterFullExpression(cleanups);
RunCleanupsScope scope(*this);
return EmitARCRetainScalarExpr(cleanups->getSubExpr());
}
@@ -3271,7 +3304,6 @@ llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
// The retain needs to happen within the full-expression.
if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
- enterFullExpression(cleanups);
RunCleanupsScope scope(*this);
return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr());
}
@@ -3382,7 +3414,6 @@ static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF,
llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) {
// Look through full-expressions.
if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
- enterFullExpression(cleanups);
RunCleanupsScope scope(*this);
return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr());
}
@@ -3505,7 +3536,7 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
if (!Ty->isRecordType())
return nullptr;
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
- if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
+ if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic)))
return nullptr;
llvm::Constant *HelperFn = nullptr;
if (hasTrivialSetExpr(PID))
@@ -3555,21 +3586,21 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
StartFunction(FD, ReturnTy, Fn, FI, args);
- DeclRefExpr DstExpr(getContext(), &DstDecl, false, DestTy, VK_RValue,
- SourceLocation());
- UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(),
- VK_LValue, OK_Ordinary, SourceLocation(), false);
+ DeclRefExpr DstExpr(C, &DstDecl, false, DestTy, VK_RValue, SourceLocation());
+ UnaryOperator *DST = UnaryOperator::Create(
+ C, &DstExpr, UO_Deref, DestTy->getPointeeType(), VK_LValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
- DeclRefExpr SrcExpr(getContext(), &SrcDecl, false, SrcTy, VK_RValue,
- SourceLocation());
- UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
- VK_LValue, OK_Ordinary, SourceLocation(), false);
+ DeclRefExpr SrcExpr(C, &SrcDecl, false, SrcTy, VK_RValue, SourceLocation());
+ UnaryOperator *SRC = UnaryOperator::Create(
+ C, &SrcExpr, UO_Deref, SrcTy->getPointeeType(), VK_LValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
- Expr *Args[2] = { &DST, &SRC };
+ Expr *Args[2] = {DST, SRC};
CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
C, OO_Equal, CalleeExp->getCallee(), Args, DestTy->getPointeeType(),
- VK_LValue, SourceLocation(), FPOptions());
+ VK_LValue, SourceLocation(), FPOptionsOverride());
EmitStmt(TheCall);
@@ -3589,7 +3620,7 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
QualType Ty = PD->getType();
if (!Ty->isRecordType())
return nullptr;
- if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
+ if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic)))
return nullptr;
llvm::Constant *HelperFn = nullptr;
if (hasTrivialGetExpr(PID))
@@ -3641,14 +3672,15 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
DeclRefExpr SrcExpr(getContext(), &SrcDecl, false, SrcTy, VK_RValue,
SourceLocation());
- UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
- VK_LValue, OK_Ordinary, SourceLocation(), false);
+ UnaryOperator *SRC = UnaryOperator::Create(
+ C, &SrcExpr, UO_Deref, SrcTy->getPointeeType(), VK_LValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
CXXConstructExpr *CXXConstExpr =
cast<CXXConstructExpr>(PID->getGetterCXXConstructor());
SmallVector<Expr*, 4> ConstructorArgs;
- ConstructorArgs.push_back(&SRC);
+ ConstructorArgs.push_back(SRC);
ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()),
CXXConstExpr->arg_end());
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
index a27b6d4ed637..bb9c494ae68e 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -203,7 +203,8 @@ protected:
/// the start of the string. The result of this function can be used anywhere
/// where the C code specifies const char*.
llvm::Constant *MakeConstantString(StringRef Str, const char *Name = "") {
- ConstantAddress Array = CGM.GetAddrOfConstantCString(Str, Name);
+ ConstantAddress Array =
+ CGM.GetAddrOfConstantCString(std::string(Str), Name);
return llvm::ConstantExpr::getGetElementPtr(Array.getElementType(),
Array.getPointer(), Zeros);
}
@@ -254,11 +255,11 @@ protected:
isDynamic=true) {
int attrs = property->getPropertyAttributes();
// For read-only properties, clear the copy and retain flags
- if (attrs & ObjCPropertyDecl::OBJC_PR_readonly) {
- attrs &= ~ObjCPropertyDecl::OBJC_PR_copy;
- attrs &= ~ObjCPropertyDecl::OBJC_PR_retain;
- attrs &= ~ObjCPropertyDecl::OBJC_PR_weak;
- attrs &= ~ObjCPropertyDecl::OBJC_PR_strong;
+ if (attrs & ObjCPropertyAttribute::kind_readonly) {
+ attrs &= ~ObjCPropertyAttribute::kind_copy;
+ attrs &= ~ObjCPropertyAttribute::kind_retain;
+ attrs &= ~ObjCPropertyAttribute::kind_weak;
+ attrs &= ~ObjCPropertyAttribute::kind_strong;
}
// The first flags field has the same attribute values as clang uses internally
Fields.addInt(Int8Ty, attrs & 0xff);
@@ -616,6 +617,13 @@ public:
llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF,
const ObjCProtocolDecl *PD) override;
void GenerateProtocol(const ObjCProtocolDecl *PD) override;
+
+ virtual llvm::Constant *GenerateProtocolRef(const ObjCProtocolDecl *PD);
+
+ llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD) override {
+ return GenerateProtocolRef(PD);
+ }
+
llvm::Function *ModuleInitFunction() override;
llvm::FunctionCallee GetPropertyGetFunction() override;
llvm::FunctionCallee GetPropertySetFunction() override;
@@ -820,7 +828,7 @@ class CGObjCGNUstep : public CGObjCGNU {
// Slot_t objc_slot_lookup_super(struct objc_super*, SEL);
SlotLookupSuperFn.init(&CGM, "objc_slot_lookup_super", SlotTy,
PtrToObjCSuperTy, SelectorTy);
- // If we're in ObjC++ mode, then we want to make
+ // If we're in ObjC++ mode, then we want to make
if (usesSEHExceptions) {
llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
// void objc_exception_rethrow(void)
@@ -1347,7 +1355,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
void GenerateProtocol(const ObjCProtocolDecl *PD) override {
// Do nothing - we only emit referenced protocols.
}
- llvm::Constant *GenerateProtocolRef(const ObjCProtocolDecl *PD) {
+ llvm::Constant *GenerateProtocolRef(const ObjCProtocolDecl *PD) override {
std::string ProtocolName = PD->getNameAsString();
auto *&Protocol = ExistingProtocols[ProtocolName];
if (Protocol)
@@ -1433,7 +1441,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
llvm::Constant *GetTypeString(llvm::StringRef TypeEncoding) {
if (TypeEncoding.empty())
return NULLPtr;
- std::string MangledTypes = TypeEncoding;
+ std::string MangledTypes = std::string(TypeEncoding);
std::replace(MangledTypes.begin(), MangledTypes.end(),
'@', '\1');
std::string TypesVarName = ".objc_sel_types_" + MangledTypes;
@@ -1556,7 +1564,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
// We have to do this by hand, rather than with @llvm.ctors, so that the
// linker can remove the duplicate invocations.
auto *InitVar = new llvm::GlobalVariable(TheModule, LoadFunction->getType(),
- /*isConstant*/true, llvm::GlobalValue::LinkOnceAnyLinkage,
+ /*isConstant*/false, llvm::GlobalValue::LinkOnceAnyLinkage,
LoadFunction, ".objc_ctor");
// Check that this hasn't been renamed. This shouldn't happen, because
// this function should be called precisely once.
@@ -1647,14 +1655,16 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
for (const auto &lateInit : EarlyInitList) {
auto *global = TheModule.getGlobalVariable(lateInit.first);
if (global) {
- b.CreateAlignedStore(global,
- b.CreateStructGEP(lateInit.second.first, lateInit.second.second), CGM.getPointerAlign().getQuantity());
+ b.CreateAlignedStore(
+ global,
+ b.CreateStructGEP(lateInit.second.first, lateInit.second.second),
+ CGM.getPointerAlign().getAsAlign());
}
}
b.CreateRetVoid();
// We can't use the normal LLVM global initialisation array, because we
// need to specify that this runs early in library initialisation.
- auto *InitVar = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
+ auto *InitVar = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
/*isConstant*/true, llvm::GlobalValue::InternalLinkage,
Init, ".objc_early_init_ptr");
InitVar->setSection(".CRT$XCLb");
@@ -1943,7 +1953,8 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
if (SuperClass) {
std::pair<llvm::Constant*, int> v{classStruct, 1};
- EarlyInitList.emplace_back(SuperClass->getName(), std::move(v));
+ EarlyInitList.emplace_back(std::string(SuperClass->getName()),
+ std::move(v));
}
}
@@ -2410,7 +2421,8 @@ llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) {
assert(PT && "Invalid @catch type.");
const ObjCInterfaceType *IT = PT->getInterfaceType();
assert(IT && "Invalid @catch type.");
- std::string className = IT->getDecl()->getIdentifier()->getName();
+ std::string className =
+ std::string(IT->getDecl()->getIdentifier()->getName());
std::string typeinfoName = "__objc_eh_typeinfo_" + className;
@@ -3034,13 +3046,18 @@ CGObjCGNU::GenerateProtocolList(ArrayRef<std::string> Protocols) {
llvm::Value *CGObjCGNU::GenerateProtocolRef(CodeGenFunction &CGF,
const ObjCProtocolDecl *PD) {
+ auto protocol = GenerateProtocolRef(PD);
+ llvm::Type *T =
+ CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
+ return CGF.Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
+}
+
+llvm::Constant *CGObjCGNU::GenerateProtocolRef(const ObjCProtocolDecl *PD) {
llvm::Constant *&protocol = ExistingProtocols[PD->getNameAsString()];
if (!protocol)
GenerateProtocol(PD);
assert(protocol && "Unknown protocol");
- llvm::Type *T =
- CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
- return CGF.Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
+ return protocol;
}
llvm::Constant *
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
index f36c28a85a68..1d0379afb4b5 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
@@ -1107,11 +1107,6 @@ public:
void GenerateProtocol(const ObjCProtocolDecl *PD) override;
- /// GetOrEmitProtocol - Get the protocol object for the given
- /// declaration, emitting it if necessary. The return value has type
- /// ProtocolPtrTy.
- virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD)=0;
-
/// GetOrEmitProtocolRef - Get a forward reference to the protocol
/// object for the given declaration, emitting it if needed. These
/// forward references will be filled in with empty bodies if no
@@ -2035,7 +2030,7 @@ CGObjCCommonMac::GenerateConstantNSString(const StringLiteral *Literal) {
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
// Don't enforce the target's minimum global alignment, since the only use
// of the string is via this class initializer.
- GV->setAlignment(llvm::Align::None());
+ GV->setAlignment(llvm::Align(1));
Fields.addBitCast(GV, CGM.Int8PtrTy);
// String length.
@@ -2558,9 +2553,8 @@ void CGObjCCommonMac::BuildRCRecordLayout(const llvm::StructLayout *RecLayout,
}
if (FQT->isRecordType() && ElCount) {
int OldIndex = RunSkipBlockVars.size() - 1;
- const RecordType *RT = FQT->getAs<RecordType>();
- BuildRCBlockVarRecordLayout(RT, BytePos + FieldOffset,
- HasUnion);
+ auto *RT = FQT->castAs<RecordType>();
+ BuildRCBlockVarRecordLayout(RT, BytePos + FieldOffset, HasUnion);
// Replicate layout information for each array element. Note that
// one element is already done.
@@ -3047,9 +3041,10 @@ llvm::Value *CGObjCCommonMac::EmitClassRefViaRuntime(
ObjCCommonTypesHelper &ObjCTypes) {
llvm::FunctionCallee lookUpClassFn = ObjCTypes.getLookUpClassFn();
- llvm::Value *className =
- CGF.CGM.GetAddrOfConstantCString(ID->getObjCRuntimeNameAsString())
- .getPointer();
+ llvm::Value *className = CGF.CGM
+ .GetAddrOfConstantCString(std::string(
+ ID->getObjCRuntimeNameAsString()))
+ .getPointer();
ASTContext &ctx = CGF.CGM.getContext();
className =
CGF.Builder.CreateBitCast(className,
@@ -3291,6 +3286,8 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
for (auto *PD : ClassExt->properties()) {
if (IsClassProperty != PD->isClassProperty())
continue;
+ if (PD->isDirectProperty())
+ continue;
PropertySet.insert(PD->getIdentifier());
Properties.push_back(PD);
}
@@ -3302,6 +3299,8 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
// class extension.
if (!PropertySet.insert(PD->getIdentifier()).second)
continue;
+ if (PD->isDirectProperty())
+ continue;
Properties.push_back(PD);
}
@@ -3327,8 +3326,6 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
values.addInt(ObjCTypes.IntTy, Properties.size());
auto propertiesArray = values.beginArray(ObjCTypes.PropertyTy);
for (auto PD : Properties) {
- if (PD->isDirectProperty())
- continue;
auto property = propertiesArray.beginStruct(ObjCTypes.PropertyTy);
property.add(GetPropertyName(PD->getIdentifier()));
property.add(GetPropertyTypeString(PD, Container));
@@ -3637,7 +3634,7 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
// Check for a forward reference.
llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
if (GV) {
- assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ assert(GV->getValueType() == ObjCTypes.ClassTy &&
"Forward metaclass reference has incorrect type.");
values.finishAndSetAsInitializer(GV);
GV->setSection(Section);
@@ -3700,7 +3697,7 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
// Check for a forward reference.
llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
if (GV) {
- assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ assert(GV->getValueType() == ObjCTypes.ClassTy &&
"Forward metaclass reference has incorrect type.");
values.finishAndSetAsInitializer(GV);
} else {
@@ -3731,7 +3728,7 @@ llvm::Constant *CGObjCMac::EmitMetaClassRef(const ObjCInterfaceDecl *ID) {
llvm::GlobalValue::PrivateLinkage, nullptr,
Name);
- assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ assert(GV->getValueType() == ObjCTypes.ClassTy &&
"Forward metaclass reference has incorrect type.");
return GV;
}
@@ -3745,7 +3742,7 @@ llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
llvm::GlobalValue::PrivateLinkage, nullptr,
Name);
- assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ assert(GV->getValueType() == ObjCTypes.ClassTy &&
"Forward class metadata reference has incorrect type.");
return GV;
}
@@ -4029,22 +4026,49 @@ llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
llvm::Function *
CGObjCCommonMac::GenerateDirectMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD) {
- auto I = DirectMethodDefinitions.find(OMD->getCanonicalDecl());
- if (I != DirectMethodDefinitions.end())
- return I->second;
+ auto *COMD = OMD->getCanonicalDecl();
+ auto I = DirectMethodDefinitions.find(COMD);
+ llvm::Function *OldFn = nullptr, *Fn = nullptr;
- SmallString<256> Name;
- GetNameForMethod(OMD, CD, Name, /*ignoreCategoryNamespace*/true);
+ if (I != DirectMethodDefinitions.end()) {
+ // Objective-C allows for the declaration and implementation types
+ // to differ slightly.
+ //
+ // If we're being asked for the Function associated for a method
+ // implementation, a previous value might have been cached
+ // based on the type of the canonical declaration.
+ //
+ // If these do not match, then we'll replace this function with
+ // a new one that has the proper type below.
+ if (!OMD->getBody() || COMD->getReturnType() == OMD->getReturnType())
+ return I->second;
+ OldFn = I->second;
+ }
CodeGenTypes &Types = CGM.getTypes();
llvm::FunctionType *MethodTy =
Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD));
- llvm::Function *Method =
- llvm::Function::Create(MethodTy, llvm::GlobalValue::ExternalLinkage,
- Name.str(), &CGM.getModule());
- DirectMethodDefinitions.insert(std::make_pair(OMD->getCanonicalDecl(), Method));
- return Method;
+ if (OldFn) {
+ Fn = llvm::Function::Create(MethodTy, llvm::GlobalValue::ExternalLinkage,
+ "", &CGM.getModule());
+ Fn->takeName(OldFn);
+ OldFn->replaceAllUsesWith(
+ llvm::ConstantExpr::getBitCast(Fn, OldFn->getType()));
+ OldFn->eraseFromParent();
+
+ // Replace the cached function in the map.
+ I->second = Fn;
+ } else {
+ SmallString<256> Name;
+ GetNameForMethod(OMD, CD, Name, /*ignoreCategoryNamespace*/ true);
+
+ Fn = llvm::Function::Create(MethodTy, llvm::GlobalValue::ExternalLinkage,
+ Name.str(), &CGM.getModule());
+ DirectMethodDefinitions.insert(std::make_pair(COMD, Fn));
+ }
+
+ return Fn;
}
void CGObjCCommonMac::GenerateDirectMethodPrologue(
@@ -4195,7 +4219,8 @@ CGObjCCommonMac::CreateCStringLiteral(StringRef Name, ObjCLabelType Type,
: "__TEXT,__cstring,cstring_literals";
break;
case ObjCLabelType::PropertyName:
- Section = "__TEXT,__cstring,cstring_literals";
+ Section = NonFragile ? "__TEXT,__objc_methname,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals";
break;
}
@@ -5128,15 +5153,18 @@ void CGObjCCommonMac::EmitImageInfo() {
Mod.addModuleFlag(llvm::Module::Error, "Objective-C Image Info Section",
llvm::MDString::get(VMContext, Section));
+ auto Int8Ty = llvm::Type::getInt8Ty(VMContext);
if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
// Non-GC overrides those files which specify GC.
- Mod.addModuleFlag(llvm::Module::Override,
- "Objective-C Garbage Collection", (uint32_t)0);
+ Mod.addModuleFlag(llvm::Module::Error,
+ "Objective-C Garbage Collection",
+ llvm::ConstantInt::get(Int8Ty,0));
} else {
// Add the ObjC garbage collection value.
Mod.addModuleFlag(llvm::Module::Error,
"Objective-C Garbage Collection",
- eImageInfo_GarbageCollected);
+ llvm::ConstantInt::get(Int8Ty,
+ (uint8_t)eImageInfo_GarbageCollected));
if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
// Add the ObjC GC Only value.
@@ -5147,7 +5175,7 @@ void CGObjCCommonMac::EmitImageInfo() {
llvm::Metadata *Ops[2] = {
llvm::MDString::get(VMContext, "Objective-C Garbage Collection"),
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), eImageInfo_GarbageCollected))};
+ Int8Ty, eImageInfo_GarbageCollected))};
Mod.addModuleFlag(llvm::Module::Require, "Objective-C GC Only",
llvm::MDNode::get(VMContext, Ops));
}
@@ -5423,7 +5451,7 @@ llvm::Constant *IvarLayoutBuilder::buildBitmap(CGObjCCommonMac &CGObjC,
// This isn't a stable sort, but our algorithm should handle it fine.
llvm::array_pod_sort(IvarsInfo.begin(), IvarsInfo.end());
} else {
- assert(std::is_sorted(IvarsInfo.begin(), IvarsInfo.end()));
+ assert(llvm::is_sorted(IvarsInfo));
}
assert(IvarsInfo.back().Offset < InstanceEnd);
@@ -6217,11 +6245,9 @@ void CGObjCNonFragileABIMac::AddModuleClassList(
assert((!CGM.getTriple().isOSBinFormatMachO() ||
SectionName.startswith("__DATA")) &&
"SectionName expected to start with __DATA on MachO");
- llvm::GlobalValue::LinkageTypes LT =
- getLinkageTypeForObjCMetadata(CGM, SectionName);
- llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false, LT, Init,
- SymbolName);
+ llvm::GlobalVariable *GV = new llvm::GlobalVariable(
+ CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::PrivateLinkage, Init, SymbolName);
GV->setAlignment(
llvm::Align(CGM.getDataLayout().getABITypeAlignment(Init->getType())));
GV->setSection(SectionName);
@@ -6350,7 +6376,7 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
unsigned InstanceStart,
unsigned InstanceSize,
const ObjCImplementationDecl *ID) {
- std::string ClassName = ID->getObjCRuntimeNameAsString();
+ std::string ClassName = std::string(ID->getObjCRuntimeNameAsString());
CharUnits beginInstance = CharUnits::fromQuantity(InstanceStart);
CharUnits endInstance = CharUnits::fromQuantity(InstanceSize);
@@ -7509,10 +7535,9 @@ CGObjCNonFragileABIMac::EmitSuperClassRef(CodeGenFunction &CGF,
llvm::Constant *ClassGV = GetClassGlobalForClassRef(ID);
std::string SectionName =
GetSectionName("__objc_superrefs", "regular,no_dead_strip");
- Entry = new llvm::GlobalVariable(
- CGM.getModule(), ClassGV->getType(), false,
- getLinkageTypeForObjCMetadata(CGM, SectionName), ClassGV,
- "OBJC_CLASSLIST_SUP_REFS_$_");
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ClassGV->getType(), false,
+ llvm::GlobalValue::PrivateLinkage, ClassGV,
+ "OBJC_CLASSLIST_SUP_REFS_$_");
Entry->setAlignment(CGF.getPointerAlign().getAsAlign());
Entry->setSection(SectionName);
CGM.addCompilerUsedGlobal(Entry);
@@ -7533,10 +7558,9 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
auto MetaClassGV = GetClassGlobal(ID, /*metaclass*/ true, NotForDefinition);
std::string SectionName =
GetSectionName("__objc_superrefs", "regular,no_dead_strip");
- Entry = new llvm::GlobalVariable(
- CGM.getModule(), ObjCTypes.ClassnfABIPtrTy, false,
- getLinkageTypeForObjCMetadata(CGM, SectionName), MetaClassGV,
- "OBJC_CLASSLIST_SUP_REFS_$_");
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ MetaClassGV, "OBJC_CLASSLIST_SUP_REFS_$_");
Entry->setAlignment(Align.getAsAlign());
Entry->setSection(SectionName);
CGM.addCompilerUsedGlobal(Entry);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp
index f8b831d0e9be..39efe040302d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -13,14 +13,15 @@
//===----------------------------------------------------------------------===//
#include "CGObjCRuntime.h"
-#include "CGCleanup.h"
#include "CGCXXABI.h"
+#include "CGCleanup.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtObjC.h"
#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/CodeGen/CodeGenABITypes.h"
#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
@@ -211,7 +212,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
CGF.pushSEHCleanup(NormalAndEHCleanup, FinallyFunc);
}
-
+
// Emit the try body.
CGF.EmitStmt(S.getTryBody());
@@ -271,7 +272,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
cleanups.ForceCleanup();
CGF.EmitBranchThroughCleanup(Cont);
- }
+ }
// Go back to the try-statement fallthrough.
CGF.Builder.restoreIP(SavedIP);
@@ -383,3 +384,9 @@ CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
CGM.getTypes().GetFunctionType(argsInfo)->getPointerTo();
return MessageSendInfo(argsInfo, signatureType);
}
+
+llvm::Constant *
+clang::CodeGen::emitObjCProtocolObject(CodeGenModule &CGM,
+ const ObjCProtocolDecl *protocol) {
+ return CGM.getObjCRuntime().GetOrEmitProtocol(protocol);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.h
index f0b3525cfde2..a2c189585f7b 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.h
@@ -211,6 +211,11 @@ public:
/// implementations.
virtual void GenerateProtocol(const ObjCProtocolDecl *OPD) = 0;
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD) = 0;
+
/// Generate a function preamble for a method with the specified
/// types.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 97b17799a03e..43cbe9c720ea 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -21,17 +21,24 @@
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/BitmaskEnum.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/OpenMPKinds.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Value.h"
+#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
+#include <numeric>
using namespace clang;
using namespace CodeGen;
@@ -562,205 +569,6 @@ enum OpenMPSchedType {
OMP_sch_modifier_nonmonotonic = (1 << 30),
};
-enum OpenMPRTLFunction {
- /// Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
- /// kmpc_micro microtask, ...);
- OMPRTL__kmpc_fork_call,
- /// Call to void *__kmpc_threadprivate_cached(ident_t *loc,
- /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
- OMPRTL__kmpc_threadprivate_cached,
- /// Call to void __kmpc_threadprivate_register( ident_t *,
- /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
- OMPRTL__kmpc_threadprivate_register,
- // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
- OMPRTL__kmpc_global_thread_num,
- // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *crit);
- OMPRTL__kmpc_critical,
- // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32
- // global_tid, kmp_critical_name *crit, uintptr_t hint);
- OMPRTL__kmpc_critical_with_hint,
- // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *crit);
- OMPRTL__kmpc_end_critical,
- // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
- // global_tid);
- OMPRTL__kmpc_cancel_barrier,
- // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
- OMPRTL__kmpc_barrier,
- // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
- OMPRTL__kmpc_for_static_fini,
- // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
- // global_tid);
- OMPRTL__kmpc_serialized_parallel,
- // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
- // global_tid);
- OMPRTL__kmpc_end_serialized_parallel,
- // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
- // kmp_int32 num_threads);
- OMPRTL__kmpc_push_num_threads,
- // Call to void __kmpc_flush(ident_t *loc);
- OMPRTL__kmpc_flush,
- // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
- OMPRTL__kmpc_master,
- // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
- OMPRTL__kmpc_end_master,
- // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
- // int end_part);
- OMPRTL__kmpc_omp_taskyield,
- // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
- OMPRTL__kmpc_single,
- // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
- OMPRTL__kmpc_end_single,
- // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
- // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
- // kmp_routine_entry_t *task_entry);
- OMPRTL__kmpc_omp_task_alloc,
- // Call to kmp_task_t * __kmpc_omp_target_task_alloc(ident_t *,
- // kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t,
- // size_t sizeof_shareds, kmp_routine_entry_t *task_entry,
- // kmp_int64 device_id);
- OMPRTL__kmpc_omp_target_task_alloc,
- // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
- // new_task);
- OMPRTL__kmpc_omp_task,
- // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
- // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
- // kmp_int32 didit);
- OMPRTL__kmpc_copyprivate,
- // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
- // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
- // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
- OMPRTL__kmpc_reduce,
- // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
- // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
- // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
- // *lck);
- OMPRTL__kmpc_reduce_nowait,
- // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *lck);
- OMPRTL__kmpc_end_reduce,
- // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *lck);
- OMPRTL__kmpc_end_reduce_nowait,
- // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
- // kmp_task_t * new_task);
- OMPRTL__kmpc_omp_task_begin_if0,
- // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
- // kmp_task_t * new_task);
- OMPRTL__kmpc_omp_task_complete_if0,
- // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
- OMPRTL__kmpc_ordered,
- // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
- OMPRTL__kmpc_end_ordered,
- // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
- // global_tid);
- OMPRTL__kmpc_omp_taskwait,
- // Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
- OMPRTL__kmpc_taskgroup,
- // Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
- OMPRTL__kmpc_end_taskgroup,
- // Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
- // int proc_bind);
- OMPRTL__kmpc_push_proc_bind,
- // Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32
- // gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t
- // *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
- OMPRTL__kmpc_omp_task_with_deps,
- // Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32
- // gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
- // ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
- OMPRTL__kmpc_omp_wait_deps,
- // Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
- // global_tid, kmp_int32 cncl_kind);
- OMPRTL__kmpc_cancellationpoint,
- // Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
- // kmp_int32 cncl_kind);
- OMPRTL__kmpc_cancel,
- // Call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
- // kmp_int32 num_teams, kmp_int32 thread_limit);
- OMPRTL__kmpc_push_num_teams,
- // Call to void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
- // microtask, ...);
- OMPRTL__kmpc_fork_teams,
- // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
- // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
- // sched, kmp_uint64 grainsize, void *task_dup);
- OMPRTL__kmpc_taskloop,
- // Call to void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
- // num_dims, struct kmp_dim *dims);
- OMPRTL__kmpc_doacross_init,
- // Call to void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
- OMPRTL__kmpc_doacross_fini,
- // Call to void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
- // *vec);
- OMPRTL__kmpc_doacross_post,
- // Call to void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
- // *vec);
- OMPRTL__kmpc_doacross_wait,
- // Call to void *__kmpc_task_reduction_init(int gtid, int num_data, void
- // *data);
- OMPRTL__kmpc_task_reduction_init,
- // Call to void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
- // *d);
- OMPRTL__kmpc_task_reduction_get_th_data,
- // Call to void *__kmpc_alloc(int gtid, size_t sz, omp_allocator_handle_t al);
- OMPRTL__kmpc_alloc,
- // Call to void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t al);
- OMPRTL__kmpc_free,
-
- //
- // Offloading related calls
- //
- // Call to void __kmpc_push_target_tripcount(int64_t device_id, kmp_uint64
- // size);
- OMPRTL__kmpc_push_target_tripcount,
- // Call to int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
- // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- OMPRTL__tgt_target,
- // Call to int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
- // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- OMPRTL__tgt_target_nowait,
- // Call to int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
- // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types, int32_t num_teams, int32_t thread_limit);
- OMPRTL__tgt_target_teams,
- // Call to int32_t __tgt_target_teams_nowait(int64_t device_id, void
- // *host_ptr, int32_t arg_num, void** args_base, void **args, int64_t
- // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
- OMPRTL__tgt_target_teams_nowait,
- // Call to void __tgt_register_requires(int64_t flags);
- OMPRTL__tgt_register_requires,
- // Call to void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
- // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
- OMPRTL__tgt_target_data_begin,
- // Call to void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- OMPRTL__tgt_target_data_begin_nowait,
- // Call to void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
- // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
- OMPRTL__tgt_target_data_end,
- // Call to void __tgt_target_data_end_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- OMPRTL__tgt_target_data_end_nowait,
- // Call to void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
- // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
- OMPRTL__tgt_target_data_update,
- // Call to void __tgt_target_data_update_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- OMPRTL__tgt_target_data_update_nowait,
- // Call to int64_t __tgt_mapper_num_components(void *rt_mapper_handle);
- OMPRTL__tgt_mapper_num_components,
- // Call to void __tgt_push_mapper_component(void *rt_mapper_handle, void
- // *base, void *begin, int64_t size, int64_t type);
- OMPRTL__tgt_push_mapper_component,
-};
-
/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
/// region.
class CleanupTy final : public EHScopeStack::Cleanup {
@@ -971,27 +779,37 @@ void ReductionCodeGen::emitAggregateInitialization(
}
ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
+ ArrayRef<const Expr *> Origs,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> ReductionOps) {
ClausesData.reserve(Shareds.size());
SharedAddresses.reserve(Shareds.size());
Sizes.reserve(Shareds.size());
BaseDecls.reserve(Shareds.size());
- auto IPriv = Privates.begin();
- auto IRed = ReductionOps.begin();
+ const auto *IOrig = Origs.begin();
+ const auto *IPriv = Privates.begin();
+ const auto *IRed = ReductionOps.begin();
for (const Expr *Ref : Shareds) {
- ClausesData.emplace_back(Ref, *IPriv, *IRed);
+ ClausesData.emplace_back(Ref, *IOrig, *IPriv, *IRed);
+ std::advance(IOrig, 1);
std::advance(IPriv, 1);
std::advance(IRed, 1);
}
}
-void ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, unsigned N) {
- assert(SharedAddresses.size() == N &&
+void ReductionCodeGen::emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N) {
+ assert(SharedAddresses.size() == N && OrigAddresses.size() == N &&
"Number of generated lvalues must be exactly N.");
- LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
- LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
+ LValue First = emitSharedLValue(CGF, ClausesData[N].Shared);
+ LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Shared);
SharedAddresses.emplace_back(First, Second);
+ if (ClausesData[N].Shared == ClausesData[N].Ref) {
+ OrigAddresses.emplace_back(First, Second);
+ } else {
+ LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
+ LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
+ OrigAddresses.emplace_back(First, Second);
+ }
}
void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
@@ -1001,26 +819,25 @@ void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
if (!PrivateType->isVariablyModifiedType()) {
Sizes.emplace_back(
- CGF.getTypeSize(
- SharedAddresses[N].first.getType().getNonReferenceType()),
+ CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType()),
nullptr);
return;
}
llvm::Value *Size;
llvm::Value *SizeInChars;
- auto *ElemType = cast<llvm::PointerType>(
- SharedAddresses[N].first.getPointer(CGF)->getType())
- ->getElementType();
+ auto *ElemType =
+ cast<llvm::PointerType>(OrigAddresses[N].first.getPointer(CGF)->getType())
+ ->getElementType();
auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
if (AsArraySection) {
- Size = CGF.Builder.CreatePtrDiff(SharedAddresses[N].second.getPointer(CGF),
- SharedAddresses[N].first.getPointer(CGF));
+ Size = CGF.Builder.CreatePtrDiff(OrigAddresses[N].second.getPointer(CGF),
+ OrigAddresses[N].first.getPointer(CGF));
Size = CGF.Builder.CreateNUWAdd(
Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
} else {
- SizeInChars = CGF.getTypeSize(
- SharedAddresses[N].first.getType().getNonReferenceType());
+ SizeInChars =
+ CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType());
Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
}
Sizes.emplace_back(SizeInChars, Size);
@@ -1243,7 +1060,7 @@ static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
StringRef Separator)
: CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
- OffloadEntriesInfoManager(CGM) {
+ OMPBuilder(CGM.getModule()), OffloadEntriesInfoManager(CGM) {
ASTContext &C = CGM.getContext();
RecordDecl *RD = C.buildImplicitRecord("ident_t");
QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
@@ -1263,55 +1080,11 @@ CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
IdentTy = CGM.getTypes().ConvertRecordDeclType(RD);
KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
+ // Initialize Types used in OpenMPIRBuilder from OMPKinds.def
+ OMPBuilder.initialize();
loadOffloadInfoMetadata();
}
-bool CGOpenMPRuntime::tryEmitDeclareVariant(const GlobalDecl &NewGD,
- const GlobalDecl &OldGD,
- llvm::GlobalValue *OrigAddr,
- bool IsForDefinition) {
- // Emit at least a definition for the aliasee if the the address of the
- // original function is requested.
- if (IsForDefinition || OrigAddr)
- (void)CGM.GetAddrOfGlobal(NewGD);
- StringRef NewMangledName = CGM.getMangledName(NewGD);
- llvm::GlobalValue *Addr = CGM.GetGlobalValue(NewMangledName);
- if (Addr && !Addr->isDeclaration()) {
- const auto *D = cast<FunctionDecl>(OldGD.getDecl());
- const CGFunctionInfo &FI = CGM.getTypes().arrangeGlobalDeclaration(NewGD);
- llvm::Type *DeclTy = CGM.getTypes().GetFunctionType(FI);
-
- // Create a reference to the named value. This ensures that it is emitted
- // if a deferred decl.
- llvm::GlobalValue::LinkageTypes LT = CGM.getFunctionLinkage(OldGD);
-
- // Create the new alias itself, but don't set a name yet.
- auto *GA =
- llvm::GlobalAlias::create(DeclTy, 0, LT, "", Addr, &CGM.getModule());
-
- if (OrigAddr) {
- assert(OrigAddr->isDeclaration() && "Expected declaration");
-
- GA->takeName(OrigAddr);
- OrigAddr->replaceAllUsesWith(
- llvm::ConstantExpr::getBitCast(GA, OrigAddr->getType()));
- OrigAddr->eraseFromParent();
- } else {
- GA->setName(CGM.getMangledName(OldGD));
- }
-
- // Set attributes which are particular to an alias; this is a
- // specialization of the attributes which may be set on a global function.
- if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakRefAttr>() ||
- D->isWeakImported())
- GA->setLinkage(llvm::Function::WeakAnyLinkage);
-
- CGM.SetCommonAttributes(OldGD, GA);
- return true;
- }
- return false;
-}
-
void CGOpenMPRuntime::clear() {
InternalVars.clear();
// Clean non-target variable declarations possibly used only in debug info.
@@ -1325,14 +1098,6 @@ void CGOpenMPRuntime::clear() {
continue;
GV->eraseFromParent();
}
- // Emit aliases for the deferred aliasees.
- for (const auto &Pair : DeferredVariantFunction) {
- StringRef MangledName = CGM.getMangledName(Pair.second.second);
- llvm::GlobalValue *Addr = CGM.GetGlobalValue(MangledName);
- // If not able to emit alias, just emit original declaration.
- (void)tryEmitDeclareVariant(Pair.second.first, Pair.second.second, Addr,
- /*IsForDefinition=*/false);
- }
}
std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
@@ -1343,7 +1108,7 @@ std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
OS << Sep << Part;
Sep = Separator;
}
- return OS.str();
+ return std::string(OS.str());
}
static llvm::Function *
@@ -1494,6 +1259,8 @@ static llvm::Function *emitParallelOrTeamsOutlinedFunction(
bool HasCancel = false;
if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
HasCancel = OPD->hasCancel();
+ else if (const auto *OPD = dyn_cast<OMPTargetParallelDirective>(&D))
+ HasCancel = OPD->hasCancel();
else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
HasCancel = OPSD->hasCancel();
else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
@@ -1511,12 +1278,12 @@ static llvm::Function *emitParallelOrTeamsOutlinedFunction(
// TODO: Temporarily inform the OpenMPIRBuilder, if any, about the new
// parallel region to make cancellation barriers work properly.
- llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder();
- PushAndPopStackRAII PSR(OMPBuilder, CGF, HasCancel);
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+ PushAndPopStackRAII PSR(&OMPBuilder, CGF, HasCancel);
CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
HasCancel, OutlinedHelperName);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- return CGF.GenerateOpenMPCapturedStmtFunction(*CS);
+ return CGF.GenerateOpenMPCapturedStmtFunction(*CS, D.getBeginLoc());
}
llvm::Function *CGOpenMPRuntime::emitParallelOutlinedFunction(
@@ -1549,7 +1316,9 @@ llvm::Function *CGOpenMPRuntime::emitTaskOutlinedFunction(
CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
TaskTVar->getType()->castAs<PointerType>())
.getPointer(CGF)};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task), TaskArgs);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_omp_task),
+ TaskArgs);
};
CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
UntiedCodeGen);
@@ -1560,11 +1329,19 @@ llvm::Function *CGOpenMPRuntime::emitTaskOutlinedFunction(
isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
: OMPD_task;
const CapturedStmt *CS = D.getCapturedStmt(Region);
- const auto *TD = dyn_cast<OMPTaskDirective>(&D);
+ bool HasCancel = false;
+ if (const auto *TD = dyn_cast<OMPTaskDirective>(&D))
+ HasCancel = TD->hasCancel();
+ else if (const auto *TD = dyn_cast<OMPTaskLoopDirective>(&D))
+ HasCancel = TD->hasCancel();
+ else if (const auto *TD = dyn_cast<OMPMasterTaskLoopDirective>(&D))
+ HasCancel = TD->hasCancel();
+ else if (const auto *TD = dyn_cast<OMPParallelMasterTaskLoopDirective>(&D))
+ HasCancel = TD->hasCancel();
+
CodeGenFunction CGF(CGM, true);
CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
- InnermostKind,
- TD ? TD->hasCancel() : false, Action);
+ InnermostKind, HasCancel, Action);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
llvm::Function *Res = CGF.GenerateCapturedStmtFunction(*CS);
if (!Tied)
@@ -1786,7 +1563,8 @@ llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
llvm::CallInst *Call = CGF.Builder.CreateCall(
- createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___kmpc_global_thread_num),
emitUpdateLocation(CGF, Loc));
Call->setCallingConv(CGF.getRuntimeCC());
Elem.second.ThreadID = Call;
@@ -1800,16 +1578,17 @@ void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
OpenMPLocThreadIDMap.erase(CGF.CurFn);
}
if (FunctionUDRMap.count(CGF.CurFn) > 0) {
- for(auto *D : FunctionUDRMap[CGF.CurFn])
+ for(const auto *D : FunctionUDRMap[CGF.CurFn])
UDRMap.erase(D);
FunctionUDRMap.erase(CGF.CurFn);
}
auto I = FunctionUDMMap.find(CGF.CurFn);
if (I != FunctionUDMMap.end()) {
- for(auto *D : I->second)
+ for(const auto *D : I->second)
UDMMap.erase(D);
FunctionUDMMap.erase(I);
}
+ LastprivateConditionalToTypes.erase(CGF.CurFn);
}
llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
@@ -1826,766 +1605,6 @@ llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
return llvm::PointerType::getUnqual(Kmpc_MicroTy);
}
-llvm::FunctionCallee CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
- llvm::FunctionCallee RTLFn = nullptr;
- switch (static_cast<OpenMPRTLFunction>(Function)) {
- case OMPRTL__kmpc_fork_call: {
- // Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
- // microtask, ...);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- getKmpc_MicroPointerTy()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
- if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) {
- if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
- llvm::LLVMContext &Ctx = F->getContext();
- llvm::MDBuilder MDB(Ctx);
- // Annotate the callback behavior of the __kmpc_fork_call:
- // - The callback callee is argument number 2 (microtask).
- // - The first two arguments of the callback callee are unknown (-1).
- // - All variadic arguments to the __kmpc_fork_call are passed to the
- // callback callee.
- F->addMetadata(
- llvm::LLVMContext::MD_callback,
- *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
- 2, {-1, -1},
- /* VarArgsArePassed */ true)}));
- }
- }
- break;
- }
- case OMPRTL__kmpc_global_thread_num: {
- // Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
- break;
- }
- case OMPRTL__kmpc_threadprivate_cached: {
- // Build void *__kmpc_threadprivate_cached(ident_t *loc,
- // kmp_int32 global_tid, void *data, size_t size, void ***cache);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- CGM.VoidPtrTy, CGM.SizeTy,
- CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
- break;
- }
- case OMPRTL__kmpc_critical: {
- // Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *crit);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), CGM.Int32Ty,
- llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
- break;
- }
- case OMPRTL__kmpc_critical_with_hint: {
- // Build void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *crit, uintptr_t hint);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- llvm::PointerType::getUnqual(KmpCriticalNameTy),
- CGM.IntPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical_with_hint");
- break;
- }
- case OMPRTL__kmpc_threadprivate_register: {
- // Build void __kmpc_threadprivate_register(ident_t *, void *data,
- // kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
- // typedef void *(*kmpc_ctor)(void *);
- auto *KmpcCtorTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
- /*isVarArg*/ false)->getPointerTo();
- // typedef void *(*kmpc_cctor)(void *, void *);
- llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
- auto *KmpcCopyCtorTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
- /*isVarArg*/ false)
- ->getPointerTo();
- // typedef void (*kmpc_dtor)(void *);
- auto *KmpcDtorTy =
- llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
- ->getPointerTo();
- llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
- KmpcCopyCtorTy, KmpcDtorTy};
- auto *FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
- /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
- break;
- }
- case OMPRTL__kmpc_end_critical: {
- // Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *crit);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), CGM.Int32Ty,
- llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
- break;
- }
- case OMPRTL__kmpc_cancel_barrier: {
- // Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
- // global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
- break;
- }
- case OMPRTL__kmpc_barrier: {
- // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
- break;
- }
- case OMPRTL__kmpc_for_static_fini: {
- // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
- break;
- }
- case OMPRTL__kmpc_push_num_threads: {
- // Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
- // kmp_int32 num_threads)
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
- break;
- }
- case OMPRTL__kmpc_serialized_parallel: {
- // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
- // global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
- break;
- }
- case OMPRTL__kmpc_end_serialized_parallel: {
- // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
- // global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
- break;
- }
- case OMPRTL__kmpc_flush: {
- // Build void __kmpc_flush(ident_t *loc);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
- break;
- }
- case OMPRTL__kmpc_master: {
- // Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
- break;
- }
- case OMPRTL__kmpc_end_master: {
- // Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
- break;
- }
- case OMPRTL__kmpc_omp_taskyield: {
- // Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
- // int end_part);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield");
- break;
- }
- case OMPRTL__kmpc_single: {
- // Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single");
- break;
- }
- case OMPRTL__kmpc_end_single: {
- // Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single");
- break;
- }
- case OMPRTL__kmpc_omp_task_alloc: {
- // Build kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
- // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
- // kmp_routine_entry_t *task_entry);
- assert(KmpRoutineEntryPtrTy != nullptr &&
- "Type kmp_routine_entry_t must be created.");
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
- CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy};
- // Return void * and then cast to particular kmp_task_t type.
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc");
- break;
- }
- case OMPRTL__kmpc_omp_target_task_alloc: {
- // Build kmp_task_t *__kmpc_omp_target_task_alloc(ident_t *, kmp_int32 gtid,
- // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
- // kmp_routine_entry_t *task_entry, kmp_int64 device_id);
- assert(KmpRoutineEntryPtrTy != nullptr &&
- "Type kmp_routine_entry_t must be created.");
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
- CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy,
- CGM.Int64Ty};
- // Return void * and then cast to particular kmp_task_t type.
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_target_task_alloc");
- break;
- }
- case OMPRTL__kmpc_omp_task: {
- // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
- // *new_task);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task");
- break;
- }
- case OMPRTL__kmpc_copyprivate: {
- // Build void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
- // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
- // kmp_int32 didit);
- llvm::Type *CpyTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
- auto *CpyFnTy =
- llvm::FunctionType::get(CGM.VoidTy, CpyTypeParams, /*isVarArg=*/false);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy,
- CGM.VoidPtrTy, CpyFnTy->getPointerTo(),
- CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate");
- break;
- }
- case OMPRTL__kmpc_reduce: {
- // Build kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
- // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
- // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
- llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
- auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
- /*isVarArg=*/false);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
- CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
- llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce");
- break;
- }
- case OMPRTL__kmpc_reduce_nowait: {
- // Build kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
- // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
- // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
- // *lck);
- llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
- auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
- /*isVarArg=*/false);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
- CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
- llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait");
- break;
- }
- case OMPRTL__kmpc_end_reduce: {
- // Build void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *lck);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), CGM.Int32Ty,
- llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce");
- break;
- }
- case OMPRTL__kmpc_end_reduce_nowait: {
- // Build __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *lck);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), CGM.Int32Ty,
- llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn =
- CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait");
- break;
- }
- case OMPRTL__kmpc_omp_task_begin_if0: {
- // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
- // *new_task);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn =
- CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0");
- break;
- }
- case OMPRTL__kmpc_omp_task_complete_if0: {
- // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
- // *new_task);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy,
- /*Name=*/"__kmpc_omp_task_complete_if0");
- break;
- }
- case OMPRTL__kmpc_ordered: {
- // Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered");
- break;
- }
- case OMPRTL__kmpc_end_ordered: {
- // Build void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered");
- break;
- }
- case OMPRTL__kmpc_omp_taskwait: {
- // Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait");
- break;
- }
- case OMPRTL__kmpc_taskgroup: {
- // Build void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_taskgroup");
- break;
- }
- case OMPRTL__kmpc_end_taskgroup: {
- // Build void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_taskgroup");
- break;
- }
- case OMPRTL__kmpc_push_proc_bind: {
- // Build void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
- // int proc_bind)
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_proc_bind");
- break;
- }
- case OMPRTL__kmpc_omp_task_with_deps: {
- // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
- // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
- // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), CGM.Int32Ty, CGM.VoidPtrTy, CGM.Int32Ty,
- CGM.VoidPtrTy, CGM.Int32Ty, CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn =
- CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_with_deps");
- break;
- }
- case OMPRTL__kmpc_omp_wait_deps: {
- // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
- // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
- // kmp_depend_info_t *noalias_dep_list);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- CGM.Int32Ty, CGM.VoidPtrTy,
- CGM.Int32Ty, CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_wait_deps");
- break;
- }
- case OMPRTL__kmpc_cancellationpoint: {
- // Build kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
- // global_tid, kmp_int32 cncl_kind)
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancellationpoint");
- break;
- }
- case OMPRTL__kmpc_cancel: {
- // Build kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
- // kmp_int32 cncl_kind)
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel");
- break;
- }
- case OMPRTL__kmpc_push_num_teams: {
- // Build void kmpc_push_num_teams (ident_t loc, kmp_int32 global_tid,
- // kmp_int32 num_teams, kmp_int32 num_threads)
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
- CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_teams");
- break;
- }
- case OMPRTL__kmpc_fork_teams: {
- // Build void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
- // microtask, ...);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- getKmpc_MicroPointerTy()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_teams");
- if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) {
- if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
- llvm::LLVMContext &Ctx = F->getContext();
- llvm::MDBuilder MDB(Ctx);
- // Annotate the callback behavior of the __kmpc_fork_teams:
- // - The callback callee is argument number 2 (microtask).
- // - The first two arguments of the callback callee are unknown (-1).
- // - All variadic arguments to the __kmpc_fork_teams are passed to the
- // callback callee.
- F->addMetadata(
- llvm::LLVMContext::MD_callback,
- *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
- 2, {-1, -1},
- /* VarArgsArePassed */ true)}));
- }
- }
- break;
- }
- case OMPRTL__kmpc_taskloop: {
- // Build void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
- // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
- // sched, kmp_uint64 grainsize, void *task_dup);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
- CGM.IntTy,
- CGM.VoidPtrTy,
- CGM.IntTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty,
- CGM.IntTy,
- CGM.IntTy,
- CGM.Int64Ty,
- CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_taskloop");
- break;
- }
- case OMPRTL__kmpc_doacross_init: {
- // Build void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
- // num_dims, struct kmp_dim *dims);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
- CGM.Int32Ty,
- CGM.Int32Ty,
- CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_init");
- break;
- }
- case OMPRTL__kmpc_doacross_fini: {
- // Build void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_fini");
- break;
- }
- case OMPRTL__kmpc_doacross_post: {
- // Build void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
- // *vec);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_post");
- break;
- }
- case OMPRTL__kmpc_doacross_wait: {
- // Build void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
- // *vec);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_wait");
- break;
- }
- case OMPRTL__kmpc_task_reduction_init: {
- // Build void *__kmpc_task_reduction_init(int gtid, int num_data, void
- // *data);
- llvm::Type *TypeParams[] = {CGM.IntTy, CGM.IntTy, CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
- RTLFn =
- CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_task_reduction_init");
- break;
- }
- case OMPRTL__kmpc_task_reduction_get_th_data: {
- // Build void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
- // *d);
- llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_task_reduction_get_th_data");
- break;
- }
- case OMPRTL__kmpc_alloc: {
- // Build to void *__kmpc_alloc(int gtid, size_t sz, omp_allocator_handle_t
- // al); omp_allocator_handle_t type is void *.
- llvm::Type *TypeParams[] = {CGM.IntTy, CGM.SizeTy, CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_alloc");
- break;
- }
- case OMPRTL__kmpc_free: {
- // Build to void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t
- // al); omp_allocator_handle_t type is void *.
- llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_free");
- break;
- }
- case OMPRTL__kmpc_push_target_tripcount: {
- // Build void __kmpc_push_target_tripcount(int64_t device_id, kmp_uint64
- // size);
- llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int64Ty};
- llvm::FunctionType *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_target_tripcount");
- break;
- }
- case OMPRTL__tgt_target: {
- // Build int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
- // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.VoidPtrTy,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
- break;
- }
- case OMPRTL__tgt_target_nowait: {
- // Build int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
- // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes,
- // int64_t *arg_types);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.VoidPtrTy,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_nowait");
- break;
- }
- case OMPRTL__tgt_target_teams: {
- // Build int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
- // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes,
- // int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.VoidPtrTy,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo(),
- CGM.Int32Ty,
- CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams");
- break;
- }
- case OMPRTL__tgt_target_teams_nowait: {
- // Build int32_t __tgt_target_teams_nowait(int64_t device_id, void
- // *host_ptr, int32_t arg_num, void** args_base, void **args, int64_t
- // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.VoidPtrTy,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo(),
- CGM.Int32Ty,
- CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams_nowait");
- break;
- }
- case OMPRTL__tgt_register_requires: {
- // Build void __tgt_register_requires(int64_t flags);
- llvm::Type *TypeParams[] = {CGM.Int64Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_requires");
- break;
- }
- case OMPRTL__tgt_target_data_begin: {
- // Build void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
- // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin");
- break;
- }
- case OMPRTL__tgt_target_data_begin_nowait: {
- // Build void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin_nowait");
- break;
- }
- case OMPRTL__tgt_target_data_end: {
- // Build void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
- // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end");
- break;
- }
- case OMPRTL__tgt_target_data_end_nowait: {
- // Build void __tgt_target_data_end_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end_nowait");
- break;
- }
- case OMPRTL__tgt_target_data_update: {
- // Build void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
- // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update");
- break;
- }
- case OMPRTL__tgt_target_data_update_nowait: {
- // Build void __tgt_target_data_update_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update_nowait");
- break;
- }
- case OMPRTL__tgt_mapper_num_components: {
- // Build int64_t __tgt_mapper_num_components(void *rt_mapper_handle);
- llvm::Type *TypeParams[] = {CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int64Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_mapper_num_components");
- break;
- }
- case OMPRTL__tgt_push_mapper_component: {
- // Build void __tgt_push_mapper_component(void *rt_mapper_handle, void
- // *base, void *begin, int64_t size, int64_t type);
- llvm::Type *TypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy, CGM.VoidPtrTy,
- CGM.Int64Ty, CGM.Int64Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_push_mapper_component");
- break;
- }
- }
- assert(RTLFn && "Unable to find OpenMP runtime function");
- return RTLFn;
-}
-
llvm::FunctionCallee
CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize, bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
@@ -2764,7 +1783,9 @@ Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
getOrCreateThreadPrivateCache(VD)};
return Address(CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
+ Args),
VDAddr.getAlignment());
}
@@ -2774,7 +1795,8 @@ void CGOpenMPRuntime::emitThreadPrivateVarInit(
// Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
// library.
llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_global_thread_num),
OMPLoc);
// Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
// to register constructor/destructor for variable.
@@ -2782,7 +1804,9 @@ void CGOpenMPRuntime::emitThreadPrivateVarInit(
OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
Ctor, CopyCtor, Dtor};
CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args);
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_threadprivate_register),
+ Args);
}
llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
@@ -2813,7 +1837,7 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
std::string Name = getName({"__kmpc_global_ctor_", ""});
llvm::Function *Fn =
- CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
+ CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
Args, Loc, Loc);
llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
@@ -2846,7 +1870,7 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
std::string Name = getName({"__kmpc_global_dtor_", ""});
llvm::Function *Fn =
- CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
+ CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
Loc, Loc);
@@ -2889,7 +1913,7 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
auto *InitFunctionTy =
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
std::string Name = getName({"__omp_threadprivate_init_", ""});
- llvm::Function *InitFunction = CGM.CreateGlobalInitOrDestructFunction(
+ llvm::Function *InitFunction = CGM.CreateGlobalInitOrCleanUpFunction(
InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
CodeGenFunction InitCGF(CGM);
FunctionArgList ArgList;
@@ -2918,12 +1942,14 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
HasRequiresUnifiedSharedMemory))
return CGM.getLangOpts().OpenMPIsDevice;
VD = VD->getDefinition(CGM.getContext());
- if (VD && !DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
+ assert(VD && "Unknown VarDecl");
+
+ if (!DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
return CGM.getLangOpts().OpenMPIsDevice;
QualType ASTTy = VD->getType();
-
SourceLocation Loc = VD->getCanonicalDecl()->getBeginLoc();
+
// Produce the unique prefix to identify the new target regions. We use
// the source location of the variable declaration which we know to not
// conflict with any target region.
@@ -2949,7 +1975,7 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
- llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
+ llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
FTy, Twine(Buffer, "_ctor"), FI, Loc);
auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
@@ -2987,7 +2013,7 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
- llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
+ llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
FTy, Twine(Buffer, "_dtor"), FI, Loc);
auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
@@ -3042,7 +2068,9 @@ Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
return Address(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
+ Args),
VarLVType->getPointerTo(/*AddrSpace=*/0)),
CGM.getContext().getTypeAlignInChars(VarType));
}
@@ -3093,8 +2121,9 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
if (!CGF.HaveInsertPoint())
return;
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- auto &&ThenGen = [OutlinedFn, CapturedVars, RTLoc](CodeGenFunction &CGF,
- PrePostActionTy &) {
+ auto &M = CGM.getModule();
+ auto &&ThenGen = [&M, OutlinedFn, CapturedVars, RTLoc,
+ this](CodeGenFunction &CGF, PrePostActionTy &) {
// Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
llvm::Value *Args[] = {
@@ -3106,18 +2135,19 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
RealArgs.append(CapturedVars.begin(), CapturedVars.end());
llvm::FunctionCallee RTLFn =
- RT.createRuntimeFunction(OMPRTL__kmpc_fork_call);
+ OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_fork_call);
CGF.EmitRuntimeCall(RTLFn, RealArgs);
};
- auto &&ElseGen = [OutlinedFn, CapturedVars, RTLoc, Loc](CodeGenFunction &CGF,
- PrePostActionTy &) {
+ auto &&ElseGen = [&M, OutlinedFn, CapturedVars, RTLoc, Loc,
+ this](CodeGenFunction &CGF, PrePostActionTy &) {
CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
// Build calls:
// __kmpc_serialized_parallel(&Loc, GTid);
llvm::Value *Args[] = {RTLoc, ThreadID};
- CGF.EmitRuntimeCall(
- RT.createRuntimeFunction(OMPRTL__kmpc_serialized_parallel), Args);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ M, OMPRTL___kmpc_serialized_parallel),
+ Args);
// OutlinedFn(&GTid, &zero_bound, CapturedStruct);
Address ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc);
@@ -3134,9 +2164,9 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
// __kmpc_end_serialized_parallel(&Loc, GTid);
llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
- CGF.EmitRuntimeCall(
- RT.createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel),
- EndArgs);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ M, OMPRTL___kmpc_end_serialized_parallel),
+ EndArgs);
};
if (IfCond) {
emitIfClause(CGF, IfCond, ThenGen, ElseGen);
@@ -3250,12 +2280,16 @@ void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
std::end(Args));
if (Hint) {
EnterArgs.push_back(CGF.Builder.CreateIntCast(
- CGF.EmitScalarExpr(Hint), CGM.IntPtrTy, /*isSigned=*/false));
+ CGF.EmitScalarExpr(Hint), CGM.Int32Ty, /*isSigned=*/false));
}
CommonActionTy Action(
- createRuntimeFunction(Hint ? OMPRTL__kmpc_critical_with_hint
- : OMPRTL__kmpc_critical),
- EnterArgs, createRuntimeFunction(OMPRTL__kmpc_end_critical), Args);
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(),
+ Hint ? OMPRTL___kmpc_critical_with_hint : OMPRTL___kmpc_critical),
+ EnterArgs,
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___kmpc_end_critical),
+ Args);
CriticalOpGen.setAction(Action);
emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
}
@@ -3271,8 +2305,12 @@ void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
// }
// Prepare arguments and build a call to __kmpc_master
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
- CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_master), Args,
- createRuntimeFunction(OMPRTL__kmpc_end_master), Args,
+ CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_master),
+ Args,
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_end_master),
+ Args,
/*Conditional=*/true);
MasterOpGen.setAction(Action);
emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
@@ -3283,11 +2321,18 @@ void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
SourceLocation Loc) {
if (!CGF.HaveInsertPoint())
return;
- // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
- llvm::Value *Args[] = {
- emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
- llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskyield), Args);
+ if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
+ OMPBuilder.CreateTaskyield(CGF.Builder);
+ } else {
+ // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
+ llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_omp_taskyield),
+ Args);
+ }
+
if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
Region->emitUntiedSwitch(CGF);
}
@@ -3302,8 +2347,11 @@ void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
// __kmpc_end_taskgroup(ident_t *, gtid);
// Prepare arguments and build a call to __kmpc_taskgroup
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
- CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_taskgroup), Args,
- createRuntimeFunction(OMPRTL__kmpc_end_taskgroup),
+ CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_taskgroup),
+ Args,
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_end_taskgroup),
Args);
TaskgroupOpGen.setAction(Action);
emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
@@ -3409,8 +2457,12 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
}
// Prepare arguments and build a call to __kmpc_single
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
- CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_single), Args,
- createRuntimeFunction(OMPRTL__kmpc_end_single), Args,
+ CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_single),
+ Args,
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_end_single),
+ Args,
/*Conditional=*/true);
SingleOpGen.setAction(Action);
emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
@@ -3455,7 +2507,9 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
CpyFn, // void (*) (void *, void *) <copy_func>
DidItVal // i32 did_it
};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_copyprivate), Args);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_copyprivate),
+ Args);
}
}
@@ -3470,8 +2524,11 @@ void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
// Prepare arguments and build a call to __kmpc_ordered
if (IsThreads) {
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
- CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_ordered), Args,
- createRuntimeFunction(OMPRTL__kmpc_end_ordered),
+ CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_ordered),
+ Args,
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_end_ordered),
Args);
OrderedOpGen.setAction(Action);
emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
@@ -3519,9 +2576,8 @@ void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
// Check if we should use the OMPBuilder
auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo);
- llvm::OpenMPIRBuilder *OMPBuilder = CGF.CGM.getOpenMPIRBuilder();
- if (OMPBuilder) {
- CGF.Builder.restoreIP(OMPBuilder->CreateBarrier(
+ if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
+ CGF.Builder.restoreIP(OMPBuilder.CreateBarrier(
CGF.Builder, Kind, ForceSimpleCall, EmitChecks));
return;
}
@@ -3538,7 +2594,9 @@ void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
if (OMPRegionInfo) {
if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
llvm::Value *Result = CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_cancel_barrier), Args);
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___kmpc_cancel_barrier),
+ Args);
if (EmitChecks) {
// if (__kmpc_cancel_barrier()) {
// exit from construct;
@@ -3557,7 +2615,9 @@ void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
return;
}
}
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_barrier), Args);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_barrier),
+ Args);
}
/// Map the OpenMP loop schedule to the runtime enumeration.
@@ -3771,6 +2831,7 @@ void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
llvm::Value *ThreadId = getThreadID(CGF, Loc);
llvm::FunctionCallee StaticInitFunction =
createForStaticInitFunction(Values.IVSize, Values.IVSigned);
+ auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
}
@@ -3805,7 +2866,9 @@ void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
? OMP_IDENT_WORK_LOOP
: OMP_IDENT_WORK_SECTIONS),
getThreadID(CGF, Loc)};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_for_static_fini),
+ auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_for_static_fini),
Args);
}
@@ -3853,7 +2916,8 @@ void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_threads),
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_push_num_threads),
Args);
}
@@ -3867,16 +2931,23 @@ void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
llvm::ConstantInt::get(CGM.IntTy, unsigned(ProcBind), /*isSigned=*/true)};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_proc_bind), Args);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_push_proc_bind),
+ Args);
}
void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
- SourceLocation Loc) {
- if (!CGF.HaveInsertPoint())
- return;
- // Build call void __kmpc_flush(ident_t *loc)
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_flush),
- emitUpdateLocation(CGF, Loc));
+ SourceLocation Loc, llvm::AtomicOrdering AO) {
+ if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
+ OMPBuilder.CreateFlush(CGF.Builder);
+ } else {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Build call void __kmpc_flush(ident_t *loc)
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_flush),
+ emitUpdateLocation(CGF, Loc));
+ }
}
namespace {
@@ -4358,13 +3429,14 @@ QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
namespace {
struct PrivateHelpersTy {
- PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy,
- const VarDecl *PrivateElemInit)
- : Original(Original), PrivateCopy(PrivateCopy),
+ PrivateHelpersTy(const Expr *OriginalRef, const VarDecl *Original,
+ const VarDecl *PrivateCopy, const VarDecl *PrivateElemInit)
+ : OriginalRef(OriginalRef), Original(Original), PrivateCopy(PrivateCopy),
PrivateElemInit(PrivateElemInit) {}
- const VarDecl *Original;
- const VarDecl *PrivateCopy;
- const VarDecl *PrivateElemInit;
+ const Expr *OriginalRef = nullptr;
+ const VarDecl *Original = nullptr;
+ const VarDecl *PrivateCopy = nullptr;
+ const VarDecl *PrivateElemInit = nullptr;
};
typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
} // anonymous namespace
@@ -4744,7 +3816,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
// For target-based directives skip 3 firstprivate arrays BasePointersArray,
// PointersArray and SizesArray. The original variables for these arrays are
// not captured and we get their addresses explicitly.
- if ((!IsTargetTask && !Data.FirstprivateVars.empty()) ||
+ if ((!IsTargetTask && !Data.FirstprivateVars.empty() && ForDup) ||
(IsTargetTask && KmpTaskSharedsPtr.isValid())) {
SrcBase = CGF.MakeAddrLValue(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
@@ -4776,13 +3848,23 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
"Expected artificial target data variable.");
SharedRefLValue =
CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type);
- } else {
+ } else if (ForDup) {
SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
SharedRefLValue = CGF.MakeAddrLValue(
Address(SharedRefLValue.getPointer(CGF),
C.getDeclAlign(OriginalVD)),
SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
SharedRefLValue.getTBAAInfo());
+ } else if (CGF.LambdaCaptureFields.count(
+ Pair.second.Original->getCanonicalDecl()) > 0 ||
+ dyn_cast_or_null<BlockDecl>(CGF.CurCodeDecl)) {
+ SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
+ } else {
+ // Processing for implicitly captured variables.
+ InlinedOpenMPRegionRAII Region(
+ CGF, [](CodeGenFunction &, PrePostActionTy &) {}, OMPD_unknown,
+ /*HasCancel=*/false);
+ SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
}
if (Type->isArrayType()) {
// Initialize firstprivate array.
@@ -4915,7 +3997,7 @@ emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
Base, *std::next(KmpTaskTQTyRD->field_begin(),
KmpTaskTShareds)),
Loc),
- CGF.getNaturalTypeAlignment(SharedsTy));
+ CGM.getNaturalTypeAlignment(SharedsTy));
}
emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
@@ -4938,6 +4020,135 @@ checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD) {
return NeedsCleanup;
}
+namespace {
+/// Loop generator for OpenMP iterator expression.
+class OMPIteratorGeneratorScope final
+ : public CodeGenFunction::OMPPrivateScope {
+ CodeGenFunction &CGF;
+ const OMPIteratorExpr *E = nullptr;
+ SmallVector<CodeGenFunction::JumpDest, 4> ContDests;
+ SmallVector<CodeGenFunction::JumpDest, 4> ExitDests;
+ OMPIteratorGeneratorScope() = delete;
+ OMPIteratorGeneratorScope(OMPIteratorGeneratorScope &) = delete;
+
+public:
+ OMPIteratorGeneratorScope(CodeGenFunction &CGF, const OMPIteratorExpr *E)
+ : CodeGenFunction::OMPPrivateScope(CGF), CGF(CGF), E(E) {
+ if (!E)
+ return;
+ SmallVector<llvm::Value *, 4> Uppers;
+ for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
+ Uppers.push_back(CGF.EmitScalarExpr(E->getHelper(I).Upper));
+ const auto *VD = cast<VarDecl>(E->getIteratorDecl(I));
+ addPrivate(VD, [&CGF, VD]() {
+ return CGF.CreateMemTemp(VD->getType(), VD->getName());
+ });
+ const OMPIteratorHelperData &HelperData = E->getHelper(I);
+ addPrivate(HelperData.CounterVD, [&CGF, &HelperData]() {
+ return CGF.CreateMemTemp(HelperData.CounterVD->getType(),
+ "counter.addr");
+ });
+ }
+ Privatize();
+
+ for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
+ const OMPIteratorHelperData &HelperData = E->getHelper(I);
+ LValue CLVal =
+ CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(HelperData.CounterVD),
+ HelperData.CounterVD->getType());
+ // Counter = 0;
+ CGF.EmitStoreOfScalar(
+ llvm::ConstantInt::get(CLVal.getAddress(CGF).getElementType(), 0),
+ CLVal);
+ CodeGenFunction::JumpDest &ContDest =
+ ContDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.cont"));
+ CodeGenFunction::JumpDest &ExitDest =
+ ExitDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.exit"));
+ // N = <number-of_iterations>;
+ llvm::Value *N = Uppers[I];
+ // cont:
+ // if (Counter < N) goto body; else goto exit;
+ CGF.EmitBlock(ContDest.getBlock());
+ auto *CVal =
+ CGF.EmitLoadOfScalar(CLVal, HelperData.CounterVD->getLocation());
+ llvm::Value *Cmp =
+ HelperData.CounterVD->getType()->isSignedIntegerOrEnumerationType()
+ ? CGF.Builder.CreateICmpSLT(CVal, N)
+ : CGF.Builder.CreateICmpULT(CVal, N);
+ llvm::BasicBlock *BodyBB = CGF.createBasicBlock("iter.body");
+ CGF.Builder.CreateCondBr(Cmp, BodyBB, ExitDest.getBlock());
+ // body:
+ CGF.EmitBlock(BodyBB);
+ // Iteri = Begini + Counter * Stepi;
+ CGF.EmitIgnoredExpr(HelperData.Update);
+ }
+ }
+ ~OMPIteratorGeneratorScope() {
+ if (!E)
+ return;
+ for (unsigned I = E->numOfIterators(); I > 0; --I) {
+ // Counter = Counter + 1;
+ const OMPIteratorHelperData &HelperData = E->getHelper(I - 1);
+ CGF.EmitIgnoredExpr(HelperData.CounterUpdate);
+ // goto cont;
+ CGF.EmitBranchThroughCleanup(ContDests[I - 1]);
+ // exit:
+ CGF.EmitBlock(ExitDests[I - 1].getBlock(), /*IsFinished=*/I == 1);
+ }
+ }
+};
+} // namespace
+
+static std::pair<llvm::Value *, llvm::Value *>
+getPointerAndSize(CodeGenFunction &CGF, const Expr *E) {
+ const auto *OASE = dyn_cast<OMPArrayShapingExpr>(E);
+ llvm::Value *Addr;
+ if (OASE) {
+ const Expr *Base = OASE->getBase();
+ Addr = CGF.EmitScalarExpr(Base);
+ } else {
+ Addr = CGF.EmitLValue(E).getPointer(CGF);
+ }
+ llvm::Value *SizeVal;
+ QualType Ty = E->getType();
+ if (OASE) {
+ SizeVal = CGF.getTypeSize(OASE->getBase()->getType()->getPointeeType());
+ for (const Expr *SE : OASE->getDimensions()) {
+ llvm::Value *Sz = CGF.EmitScalarExpr(SE);
+ Sz = CGF.EmitScalarConversion(
+ Sz, SE->getType(), CGF.getContext().getSizeType(), SE->getExprLoc());
+ SizeVal = CGF.Builder.CreateNUWMul(SizeVal, Sz);
+ }
+ } else if (const auto *ASE =
+ dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
+ LValue UpAddrLVal =
+ CGF.EmitOMPArraySectionExpr(ASE, /*IsLowerBound=*/false);
+ llvm::Value *UpAddr =
+ CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(CGF), /*Idx0=*/1);
+ llvm::Value *LowIntPtr = CGF.Builder.CreatePtrToInt(Addr, CGF.SizeTy);
+ llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGF.SizeTy);
+ SizeVal = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
+ } else {
+ SizeVal = CGF.getTypeSize(Ty);
+ }
+ return std::make_pair(Addr, SizeVal);
+}
+
+/// Builds kmp_depend_info, if it is not built yet, and builds flags type.
+static void getKmpAffinityType(ASTContext &C, QualType &KmpTaskAffinityInfoTy) {
+ QualType FlagsTy = C.getIntTypeForBitwidth(32, /*Signed=*/false);
+ if (KmpTaskAffinityInfoTy.isNull()) {
+ RecordDecl *KmpAffinityInfoRD =
+ C.buildImplicitRecord("kmp_task_affinity_info_t");
+ KmpAffinityInfoRD->startDefinition();
+ addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getIntPtrType());
+ addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getSizeType());
+ addFieldToRecordDecl(C, KmpAffinityInfoRD, FlagsTy);
+ KmpAffinityInfoRD->completeDefinition();
+ KmpTaskAffinityInfoTy = C.getRecordType(KmpAffinityInfoRD);
+ }
+}
+
CGOpenMPRuntime::TaskResultTy
CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
@@ -4946,23 +4157,23 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
ASTContext &C = CGM.getContext();
llvm::SmallVector<PrivateDataTy, 4> Privates;
// Aggregate privates and sort them by the alignment.
- auto I = Data.PrivateCopies.begin();
+ const auto *I = Data.PrivateCopies.begin();
for (const Expr *E : Data.PrivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Privates.emplace_back(
C.getDeclAlign(VD),
- PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
+ PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
/*PrivateElemInit=*/nullptr));
++I;
}
I = Data.FirstprivateCopies.begin();
- auto IElemInitRef = Data.FirstprivateInits.begin();
+ const auto *IElemInitRef = Data.FirstprivateInits.begin();
for (const Expr *E : Data.FirstprivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Privates.emplace_back(
C.getDeclAlign(VD),
PrivateHelpersTy(
- VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
+ E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl())));
++I;
++IElemInitRef;
@@ -4972,7 +4183,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Privates.emplace_back(
C.getDeclAlign(VD),
- PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
+ PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
/*PrivateElemInit=*/nullptr));
++I;
}
@@ -5046,7 +4257,8 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
TiedFlag = 0x1,
FinalFlag = 0x2,
DestructorsFlag = 0x8,
- PriorityFlag = 0x20
+ PriorityFlag = 0x20,
+ DetachableFlag = 0x40,
};
unsigned Flags = Data.Tied ? TiedFlag : 0;
bool NeedsCleanup = false;
@@ -5057,6 +4269,8 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
}
if (Data.Priority.getInt())
Flags = Flags | PriorityFlag;
+ if (D.hasClausesOfKind<OMPDetachClause>())
+ Flags = Flags | DetachableFlag;
llvm::Value *TaskFlags =
Data.Final.getPointer()
? CGF.Builder.CreateSelect(Data.Final.getPointer(),
@@ -5084,10 +4298,170 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
AllocArgs.push_back(DeviceID);
NewTask = CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_omp_target_task_alloc), AllocArgs);
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_omp_target_task_alloc),
+ AllocArgs);
} else {
- NewTask = CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_omp_task_alloc), AllocArgs);
+ NewTask =
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_omp_task_alloc),
+ AllocArgs);
+ }
+ // Emit detach clause initialization.
+ // evt = (typeof(evt))__kmpc_task_allow_completion_event(loc, tid,
+ // task_descriptor);
+ if (const auto *DC = D.getSingleClause<OMPDetachClause>()) {
+ const Expr *Evt = DC->getEventHandler()->IgnoreParenImpCasts();
+ LValue EvtLVal = CGF.EmitLValue(Evt);
+
+ // Build kmp_event_t *__kmpc_task_allow_completion_event(ident_t *loc_ref,
+ // int gtid, kmp_task_t *task);
+ llvm::Value *Loc = emitUpdateLocation(CGF, DC->getBeginLoc());
+ llvm::Value *Tid = getThreadID(CGF, DC->getBeginLoc());
+ Tid = CGF.Builder.CreateIntCast(Tid, CGF.IntTy, /*isSigned=*/false);
+ llvm::Value *EvtVal = CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_task_allow_completion_event),
+ {Loc, Tid, NewTask});
+ EvtVal = CGF.EmitScalarConversion(EvtVal, C.VoidPtrTy, Evt->getType(),
+ Evt->getExprLoc());
+ CGF.EmitStoreOfScalar(EvtVal, EvtLVal);
+ }
+ // Process affinity clauses.
+ if (D.hasClausesOfKind<OMPAffinityClause>()) {
+ // Process list of affinity data.
+ ASTContext &C = CGM.getContext();
+ Address AffinitiesArray = Address::invalid();
+ // Calculate number of elements to form the array of affinity data.
+ llvm::Value *NumOfElements = nullptr;
+ unsigned NumAffinities = 0;
+ for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
+ if (const Expr *Modifier = C->getModifier()) {
+ const auto *IE = cast<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts());
+ for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
+ llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
+ Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false);
+ NumOfElements =
+ NumOfElements ? CGF.Builder.CreateNUWMul(NumOfElements, Sz) : Sz;
+ }
+ } else {
+ NumAffinities += C->varlist_size();
+ }
+ }
+ getKmpAffinityType(CGM.getContext(), KmpTaskAffinityInfoTy);
+ // Fields ids in kmp_task_affinity_info record.
+ enum RTLAffinityInfoFieldsTy { BaseAddr, Len, Flags };
+
+ QualType KmpTaskAffinityInfoArrayTy;
+ if (NumOfElements) {
+ NumOfElements = CGF.Builder.CreateNUWAdd(
+ llvm::ConstantInt::get(CGF.SizeTy, NumAffinities), NumOfElements);
+ OpaqueValueExpr OVE(
+ Loc,
+ C.getIntTypeForBitwidth(C.getTypeSize(C.getSizeType()), /*Signed=*/0),
+ VK_RValue);
+ CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE,
+ RValue::get(NumOfElements));
+ KmpTaskAffinityInfoArrayTy =
+ C.getVariableArrayType(KmpTaskAffinityInfoTy, &OVE, ArrayType::Normal,
+ /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
+ // Properly emit variable-sized array.
+ auto *PD = ImplicitParamDecl::Create(C, KmpTaskAffinityInfoArrayTy,
+ ImplicitParamDecl::Other);
+ CGF.EmitVarDecl(*PD);
+ AffinitiesArray = CGF.GetAddrOfLocalVar(PD);
+ NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
+ /*isSigned=*/false);
+ } else {
+ KmpTaskAffinityInfoArrayTy = C.getConstantArrayType(
+ KmpTaskAffinityInfoTy,
+ llvm::APInt(C.getTypeSize(C.getSizeType()), NumAffinities), nullptr,
+ ArrayType::Normal, /*IndexTypeQuals=*/0);
+ AffinitiesArray =
+ CGF.CreateMemTemp(KmpTaskAffinityInfoArrayTy, ".affs.arr.addr");
+ AffinitiesArray = CGF.Builder.CreateConstArrayGEP(AffinitiesArray, 0);
+ NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumAffinities,
+ /*isSigned=*/false);
+ }
+
+ const auto *KmpAffinityInfoRD = KmpTaskAffinityInfoTy->getAsRecordDecl();
+ // Fill array by elements without iterators.
+ unsigned Pos = 0;
+ bool HasIterator = false;
+ for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
+ if (C->getModifier()) {
+ HasIterator = true;
+ continue;
+ }
+ for (const Expr *E : C->varlists()) {
+ llvm::Value *Addr;
+ llvm::Value *Size;
+ std::tie(Addr, Size) = getPointerAndSize(CGF, E);
+ LValue Base =
+ CGF.MakeAddrLValue(CGF.Builder.CreateConstGEP(AffinitiesArray, Pos),
+ KmpTaskAffinityInfoTy);
+ // affs[i].base_addr = &<Affinities[i].second>;
+ LValue BaseAddrLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
+ CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
+ BaseAddrLVal);
+ // affs[i].len = sizeof(<Affinities[i].second>);
+ LValue LenLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
+ CGF.EmitStoreOfScalar(Size, LenLVal);
+ ++Pos;
+ }
+ }
+ LValue PosLVal;
+ if (HasIterator) {
+ PosLVal = CGF.MakeAddrLValue(
+ CGF.CreateMemTemp(C.getSizeType(), "affs.counter.addr"),
+ C.getSizeType());
+ CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
+ }
+ // Process elements with iterators.
+ for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
+ const Expr *Modifier = C->getModifier();
+ if (!Modifier)
+ continue;
+ OMPIteratorGeneratorScope IteratorScope(
+ CGF, cast_or_null<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts()));
+ for (const Expr *E : C->varlists()) {
+ llvm::Value *Addr;
+ llvm::Value *Size;
+ std::tie(Addr, Size) = getPointerAndSize(CGF, E);
+ llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
+ LValue Base = CGF.MakeAddrLValue(
+ Address(CGF.Builder.CreateGEP(AffinitiesArray.getPointer(), Idx),
+ AffinitiesArray.getAlignment()),
+ KmpTaskAffinityInfoTy);
+ // affs[i].base_addr = &<Affinities[i].second>;
+ LValue BaseAddrLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
+ CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
+ BaseAddrLVal);
+ // affs[i].len = sizeof(<Affinities[i].second>);
+ LValue LenLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
+ CGF.EmitStoreOfScalar(Size, LenLVal);
+ Idx = CGF.Builder.CreateNUWAdd(
+ Idx, llvm::ConstantInt::get(Idx->getType(), 1));
+ CGF.EmitStoreOfScalar(Idx, PosLVal);
+ }
+ }
+ // Call to kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref,
+ // kmp_int32 gtid, kmp_task_t *new_task, kmp_int32
+ // naffins, kmp_task_affinity_info_t *affin_list);
+ llvm::Value *LocRef = emitUpdateLocation(CGF, Loc);
+ llvm::Value *GTid = getThreadID(CGF, Loc);
+ llvm::Value *AffinListPtr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ AffinitiesArray.getPointer(), CGM.VoidPtrTy);
+ // FIXME: Emit the function and ignore its result for now unless the
+ // runtime function is properly implemented.
+ (void)CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_omp_reg_task_with_affinity),
+ {LocRef, GTid, NewTask, NumOfElements, AffinListPtr});
}
llvm::Value *NewTaskNewTaskTTy =
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
@@ -5106,7 +4480,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
KmpTaskTShareds)),
Loc),
- CGF.getNaturalTypeAlignment(SharedsTy));
+ CGM.getNaturalTypeAlignment(SharedsTy));
LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap);
@@ -5158,6 +4532,540 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
return Result;
}
+namespace {
+/// Dependence kind for RTL.
+enum RTLDependenceKindTy {
+ DepIn = 0x01,
+ DepInOut = 0x3,
+ DepMutexInOutSet = 0x4
+};
+/// Fields ids in kmp_depend_info record.
+enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
+} // namespace
+
+/// Translates internal dependency kind into the runtime kind.
+static RTLDependenceKindTy translateDependencyKind(OpenMPDependClauseKind K) {
+ RTLDependenceKindTy DepKind;
+ switch (K) {
+ case OMPC_DEPEND_in:
+ DepKind = DepIn;
+ break;
+ // Out and InOut dependencies must use the same code.
+ case OMPC_DEPEND_out:
+ case OMPC_DEPEND_inout:
+ DepKind = DepInOut;
+ break;
+ case OMPC_DEPEND_mutexinoutset:
+ DepKind = DepMutexInOutSet;
+ break;
+ case OMPC_DEPEND_source:
+ case OMPC_DEPEND_sink:
+ case OMPC_DEPEND_depobj:
+ case OMPC_DEPEND_unknown:
+ llvm_unreachable("Unknown task dependence type");
+ }
+ return DepKind;
+}
+
+/// Builds kmp_depend_info, if it is not built yet, and builds flags type.
+static void getDependTypes(ASTContext &C, QualType &KmpDependInfoTy,
+ QualType &FlagsTy) {
+ FlagsTy = C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
+ if (KmpDependInfoTy.isNull()) {
+ RecordDecl *KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
+ KmpDependInfoRD->startDefinition();
+ addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
+ addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
+ addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
+ KmpDependInfoRD->completeDefinition();
+ KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
+ }
+}
+
+std::pair<llvm::Value *, LValue>
+CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
+ SourceLocation Loc) {
+ ASTContext &C = CGM.getContext();
+ QualType FlagsTy;
+ getDependTypes(C, KmpDependInfoTy, FlagsTy);
+ RecordDecl *KmpDependInfoRD =
+ cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+ LValue Base = CGF.EmitLoadOfPointerLValue(
+ DepobjLVal.getAddress(CGF),
+ C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
+ QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
+ Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
+ Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
+ Base.getTBAAInfo());
+ llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
+ Addr.getPointer(),
+ llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
+ LValue NumDepsBase = CGF.MakeAddrLValue(
+ Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
+ Base.getBaseInfo(), Base.getTBAAInfo());
+ // NumDeps = deps[i].base_addr;
+ LValue BaseAddrLVal = CGF.EmitLValueForField(
+ NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
+ llvm::Value *NumDeps = CGF.EmitLoadOfScalar(BaseAddrLVal, Loc);
+ return std::make_pair(NumDeps, Base);
+}
+
+static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
+ llvm::PointerUnion<unsigned *, LValue *> Pos,
+ const OMPTaskDataTy::DependData &Data,
+ Address DependenciesArray) {
+ CodeGenModule &CGM = CGF.CGM;
+ ASTContext &C = CGM.getContext();
+ QualType FlagsTy;
+ getDependTypes(C, KmpDependInfoTy, FlagsTy);
+ RecordDecl *KmpDependInfoRD =
+ cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+ llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
+
+ OMPIteratorGeneratorScope IteratorScope(
+ CGF, cast_or_null<OMPIteratorExpr>(
+ Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
+ : nullptr));
+ for (const Expr *E : Data.DepExprs) {
+ llvm::Value *Addr;
+ llvm::Value *Size;
+ std::tie(Addr, Size) = getPointerAndSize(CGF, E);
+ LValue Base;
+ if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
+ Base = CGF.MakeAddrLValue(
+ CGF.Builder.CreateConstGEP(DependenciesArray, *P), KmpDependInfoTy);
+ } else {
+ LValue &PosLVal = *Pos.get<LValue *>();
+ llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
+ Base = CGF.MakeAddrLValue(
+ Address(CGF.Builder.CreateGEP(DependenciesArray.getPointer(), Idx),
+ DependenciesArray.getAlignment()),
+ KmpDependInfoTy);
+ }
+ // deps[i].base_addr = &<Dependencies[i].second>;
+ LValue BaseAddrLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
+ CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
+ BaseAddrLVal);
+ // deps[i].len = sizeof(<Dependencies[i].second>);
+ LValue LenLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpDependInfoRD->field_begin(), Len));
+ CGF.EmitStoreOfScalar(Size, LenLVal);
+ // deps[i].flags = <Dependencies[i].first>;
+ RTLDependenceKindTy DepKind = translateDependencyKind(Data.DepKind);
+ LValue FlagsLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
+ CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
+ FlagsLVal);
+ if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
+ ++(*P);
+ } else {
+ LValue &PosLVal = *Pos.get<LValue *>();
+ llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
+ Idx = CGF.Builder.CreateNUWAdd(Idx,
+ llvm::ConstantInt::get(Idx->getType(), 1));
+ CGF.EmitStoreOfScalar(Idx, PosLVal);
+ }
+ }
+}
+
+static SmallVector<llvm::Value *, 4>
+emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
+ const OMPTaskDataTy::DependData &Data) {
+ assert(Data.DepKind == OMPC_DEPEND_depobj &&
+ "Expected depobj dependecy kind.");
+ SmallVector<llvm::Value *, 4> Sizes;
+ SmallVector<LValue, 4> SizeLVals;
+ ASTContext &C = CGF.getContext();
+ QualType FlagsTy;
+ getDependTypes(C, KmpDependInfoTy, FlagsTy);
+ RecordDecl *KmpDependInfoRD =
+ cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+ QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
+ llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy);
+ {
+ OMPIteratorGeneratorScope IteratorScope(
+ CGF, cast_or_null<OMPIteratorExpr>(
+ Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
+ : nullptr));
+ for (const Expr *E : Data.DepExprs) {
+ LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
+ LValue Base = CGF.EmitLoadOfPointerLValue(
+ DepobjLVal.getAddress(CGF),
+ C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
+ Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Base.getAddress(CGF), KmpDependInfoPtrT);
+ Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
+ Base.getTBAAInfo());
+ llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
+ Addr.getPointer(),
+ llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
+ LValue NumDepsBase = CGF.MakeAddrLValue(
+ Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
+ Base.getBaseInfo(), Base.getTBAAInfo());
+ // NumDeps = deps[i].base_addr;
+ LValue BaseAddrLVal = CGF.EmitLValueForField(
+ NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
+ llvm::Value *NumDeps =
+ CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc());
+ LValue NumLVal = CGF.MakeAddrLValue(
+ CGF.CreateMemTemp(C.getUIntPtrType(), "depobj.size.addr"),
+ C.getUIntPtrType());
+ CGF.InitTempAlloca(NumLVal.getAddress(CGF),
+ llvm::ConstantInt::get(CGF.IntPtrTy, 0));
+ llvm::Value *PrevVal = CGF.EmitLoadOfScalar(NumLVal, E->getExprLoc());
+ llvm::Value *Add = CGF.Builder.CreateNUWAdd(PrevVal, NumDeps);
+ CGF.EmitStoreOfScalar(Add, NumLVal);
+ SizeLVals.push_back(NumLVal);
+ }
+ }
+ for (unsigned I = 0, E = SizeLVals.size(); I < E; ++I) {
+ llvm::Value *Size =
+ CGF.EmitLoadOfScalar(SizeLVals[I], Data.DepExprs[I]->getExprLoc());
+ Sizes.push_back(Size);
+ }
+ return Sizes;
+}
+
+static void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
+ LValue PosLVal,
+ const OMPTaskDataTy::DependData &Data,
+ Address DependenciesArray) {
+ assert(Data.DepKind == OMPC_DEPEND_depobj &&
+ "Expected depobj dependecy kind.");
+ ASTContext &C = CGF.getContext();
+ QualType FlagsTy;
+ getDependTypes(C, KmpDependInfoTy, FlagsTy);
+ RecordDecl *KmpDependInfoRD =
+ cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+ QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
+ llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy);
+ llvm::Value *ElSize = CGF.getTypeSize(KmpDependInfoTy);
+ {
+ OMPIteratorGeneratorScope IteratorScope(
+ CGF, cast_or_null<OMPIteratorExpr>(
+ Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
+ : nullptr));
+ for (unsigned I = 0, End = Data.DepExprs.size(); I < End; ++I) {
+ const Expr *E = Data.DepExprs[I];
+ LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
+ LValue Base = CGF.EmitLoadOfPointerLValue(
+ DepobjLVal.getAddress(CGF),
+ C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
+ Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Base.getAddress(CGF), KmpDependInfoPtrT);
+ Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
+ Base.getTBAAInfo());
+
+ // Get number of elements in a single depobj.
+ llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
+ Addr.getPointer(),
+ llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
+ LValue NumDepsBase = CGF.MakeAddrLValue(
+ Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
+ Base.getBaseInfo(), Base.getTBAAInfo());
+ // NumDeps = deps[i].base_addr;
+ LValue BaseAddrLVal = CGF.EmitLValueForField(
+ NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
+ llvm::Value *NumDeps =
+ CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc());
+
+ // memcopy dependency data.
+ llvm::Value *Size = CGF.Builder.CreateNUWMul(
+ ElSize,
+ CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false));
+ llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
+ Address DepAddr =
+ Address(CGF.Builder.CreateGEP(DependenciesArray.getPointer(), Pos),
+ DependenciesArray.getAlignment());
+ CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size);
+
+ // Increase pos.
+ // pos += size;
+ llvm::Value *Add = CGF.Builder.CreateNUWAdd(Pos, NumDeps);
+ CGF.EmitStoreOfScalar(Add, PosLVal);
+ }
+ }
+}
+
+std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
+ CodeGenFunction &CGF, ArrayRef<OMPTaskDataTy::DependData> Dependencies,
+ SourceLocation Loc) {
+ if (llvm::all_of(Dependencies, [](const OMPTaskDataTy::DependData &D) {
+ return D.DepExprs.empty();
+ }))
+ return std::make_pair(nullptr, Address::invalid());
+ // Process list of dependencies.
+ ASTContext &C = CGM.getContext();
+ Address DependenciesArray = Address::invalid();
+ llvm::Value *NumOfElements = nullptr;
+ unsigned NumDependencies = std::accumulate(
+ Dependencies.begin(), Dependencies.end(), 0,
+ [](unsigned V, const OMPTaskDataTy::DependData &D) {
+ return D.DepKind == OMPC_DEPEND_depobj
+ ? V
+ : (V + (D.IteratorExpr ? 0 : D.DepExprs.size()));
+ });
+ QualType FlagsTy;
+ getDependTypes(C, KmpDependInfoTy, FlagsTy);
+ bool HasDepobjDeps = false;
+ bool HasRegularWithIterators = false;
+ llvm::Value *NumOfDepobjElements = llvm::ConstantInt::get(CGF.IntPtrTy, 0);
+ llvm::Value *NumOfRegularWithIterators =
+ llvm::ConstantInt::get(CGF.IntPtrTy, 1);
+ // Calculate number of depobj dependecies and regular deps with the iterators.
+ for (const OMPTaskDataTy::DependData &D : Dependencies) {
+ if (D.DepKind == OMPC_DEPEND_depobj) {
+ SmallVector<llvm::Value *, 4> Sizes =
+ emitDepobjElementsSizes(CGF, KmpDependInfoTy, D);
+ for (llvm::Value *Size : Sizes) {
+ NumOfDepobjElements =
+ CGF.Builder.CreateNUWAdd(NumOfDepobjElements, Size);
+ }
+ HasDepobjDeps = true;
+ continue;
+ }
+ // Include number of iterations, if any.
+ if (const auto *IE = cast_or_null<OMPIteratorExpr>(D.IteratorExpr)) {
+ for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
+ llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
+ Sz = CGF.Builder.CreateIntCast(Sz, CGF.IntPtrTy, /*isSigned=*/false);
+ NumOfRegularWithIterators =
+ CGF.Builder.CreateNUWMul(NumOfRegularWithIterators, Sz);
+ }
+ HasRegularWithIterators = true;
+ continue;
+ }
+ }
+
+ QualType KmpDependInfoArrayTy;
+ if (HasDepobjDeps || HasRegularWithIterators) {
+ NumOfElements = llvm::ConstantInt::get(CGM.IntPtrTy, NumDependencies,
+ /*isSigned=*/false);
+ if (HasDepobjDeps) {
+ NumOfElements =
+ CGF.Builder.CreateNUWAdd(NumOfDepobjElements, NumOfElements);
+ }
+ if (HasRegularWithIterators) {
+ NumOfElements =
+ CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumOfElements);
+ }
+ OpaqueValueExpr OVE(Loc,
+ C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0),
+ VK_RValue);
+ CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE,
+ RValue::get(NumOfElements));
+ KmpDependInfoArrayTy =
+ C.getVariableArrayType(KmpDependInfoTy, &OVE, ArrayType::Normal,
+ /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
+ // CGF.EmitVariablyModifiedType(KmpDependInfoArrayTy);
+ // Properly emit variable-sized array.
+ auto *PD = ImplicitParamDecl::Create(C, KmpDependInfoArrayTy,
+ ImplicitParamDecl::Other);
+ CGF.EmitVarDecl(*PD);
+ DependenciesArray = CGF.GetAddrOfLocalVar(PD);
+ NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
+ /*isSigned=*/false);
+ } else {
+ KmpDependInfoArrayTy = C.getConstantArrayType(
+ KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies), nullptr,
+ ArrayType::Normal, /*IndexTypeQuals=*/0);
+ DependenciesArray =
+ CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
+ DependenciesArray = CGF.Builder.CreateConstArrayGEP(DependenciesArray, 0);
+ NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumDependencies,
+ /*isSigned=*/false);
+ }
+ unsigned Pos = 0;
+ for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
+ if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
+ Dependencies[I].IteratorExpr)
+ continue;
+ emitDependData(CGF, KmpDependInfoTy, &Pos, Dependencies[I],
+ DependenciesArray);
+ }
+ // Copy regular dependecies with iterators.
+ LValue PosLVal = CGF.MakeAddrLValue(
+ CGF.CreateMemTemp(C.getSizeType(), "dep.counter.addr"), C.getSizeType());
+ CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
+ for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
+ if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
+ !Dependencies[I].IteratorExpr)
+ continue;
+ emitDependData(CGF, KmpDependInfoTy, &PosLVal, Dependencies[I],
+ DependenciesArray);
+ }
+ // Copy final depobj arrays without iterators.
+ if (HasDepobjDeps) {
+ for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
+ if (Dependencies[I].DepKind != OMPC_DEPEND_depobj)
+ continue;
+ emitDepobjElements(CGF, KmpDependInfoTy, PosLVal, Dependencies[I],
+ DependenciesArray);
+ }
+ }
+ DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ DependenciesArray, CGF.VoidPtrTy);
+ return std::make_pair(NumOfElements, DependenciesArray);
+}
+
+Address CGOpenMPRuntime::emitDepobjDependClause(
+ CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies,
+ SourceLocation Loc) {
+ if (Dependencies.DepExprs.empty())
+ return Address::invalid();
+ // Process list of dependencies.
+ ASTContext &C = CGM.getContext();
+ Address DependenciesArray = Address::invalid();
+ unsigned NumDependencies = Dependencies.DepExprs.size();
+ QualType FlagsTy;
+ getDependTypes(C, KmpDependInfoTy, FlagsTy);
+ RecordDecl *KmpDependInfoRD =
+ cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+
+ llvm::Value *Size;
+ // Define type kmp_depend_info[<Dependencies.size()>];
+ // For depobj reserve one extra element to store the number of elements.
+ // It is required to handle depobj(x) update(in) construct.
+ // kmp_depend_info[<Dependencies.size()>] deps;
+ llvm::Value *NumDepsVal;
+ CharUnits Align = C.getTypeAlignInChars(KmpDependInfoTy);
+ if (const auto *IE =
+ cast_or_null<OMPIteratorExpr>(Dependencies.IteratorExpr)) {
+ NumDepsVal = llvm::ConstantInt::get(CGF.SizeTy, 1);
+ for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
+ llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
+ Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false);
+ NumDepsVal = CGF.Builder.CreateNUWMul(NumDepsVal, Sz);
+ }
+ Size = CGF.Builder.CreateNUWAdd(llvm::ConstantInt::get(CGF.SizeTy, 1),
+ NumDepsVal);
+ CharUnits SizeInBytes =
+ C.getTypeSizeInChars(KmpDependInfoTy).alignTo(Align);
+ llvm::Value *RecSize = CGM.getSize(SizeInBytes);
+ Size = CGF.Builder.CreateNUWMul(Size, RecSize);
+ NumDepsVal =
+ CGF.Builder.CreateIntCast(NumDepsVal, CGF.IntPtrTy, /*isSigned=*/false);
+ } else {
+ QualType KmpDependInfoArrayTy = C.getConstantArrayType(
+ KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies + 1),
+ nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
+ CharUnits Sz = C.getTypeSizeInChars(KmpDependInfoArrayTy);
+ Size = CGM.getSize(Sz.alignTo(Align));
+ NumDepsVal = llvm::ConstantInt::get(CGF.IntPtrTy, NumDependencies);
+ }
+ // Need to allocate on the dynamic memory.
+ llvm::Value *ThreadID = getThreadID(CGF, Loc);
+ // Use default allocator.
+ llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ llvm::Value *Args[] = {ThreadID, Size, Allocator};
+
+ llvm::Value *Addr =
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_alloc),
+ Args, ".dep.arr.addr");
+ Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Addr, CGF.ConvertTypeForMem(KmpDependInfoTy)->getPointerTo());
+ DependenciesArray = Address(Addr, Align);
+ // Write number of elements in the first element of array for depobj.
+ LValue Base = CGF.MakeAddrLValue(DependenciesArray, KmpDependInfoTy);
+ // deps[i].base_addr = NumDependencies;
+ LValue BaseAddrLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
+ CGF.EmitStoreOfScalar(NumDepsVal, BaseAddrLVal);
+ llvm::PointerUnion<unsigned *, LValue *> Pos;
+ unsigned Idx = 1;
+ LValue PosLVal;
+ if (Dependencies.IteratorExpr) {
+ PosLVal = CGF.MakeAddrLValue(
+ CGF.CreateMemTemp(C.getSizeType(), "iterator.counter.addr"),
+ C.getSizeType());
+ CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Idx), PosLVal,
+ /*IsInit=*/true);
+ Pos = &PosLVal;
+ } else {
+ Pos = &Idx;
+ }
+ emitDependData(CGF, KmpDependInfoTy, Pos, Dependencies, DependenciesArray);
+ DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder.CreateConstGEP(DependenciesArray, 1), CGF.VoidPtrTy);
+ return DependenciesArray;
+}
+
+void CGOpenMPRuntime::emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
+ SourceLocation Loc) {
+ ASTContext &C = CGM.getContext();
+ QualType FlagsTy;
+ getDependTypes(C, KmpDependInfoTy, FlagsTy);
+ LValue Base = CGF.EmitLoadOfPointerLValue(
+ DepobjLVal.getAddress(CGF),
+ C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
+ QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
+ Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
+ llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
+ Addr.getPointer(),
+ llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
+ DepObjAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(DepObjAddr,
+ CGF.VoidPtrTy);
+ llvm::Value *ThreadID = getThreadID(CGF, Loc);
+ // Use default allocator.
+ llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ llvm::Value *Args[] = {ThreadID, DepObjAddr, Allocator};
+
+ // _kmpc_free(gtid, addr, nullptr);
+ (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_free),
+ Args);
+}
+
+void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
+ OpenMPDependClauseKind NewDepKind,
+ SourceLocation Loc) {
+ ASTContext &C = CGM.getContext();
+ QualType FlagsTy;
+ getDependTypes(C, KmpDependInfoTy, FlagsTy);
+ RecordDecl *KmpDependInfoRD =
+ cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+ llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
+ llvm::Value *NumDeps;
+ LValue Base;
+ std::tie(NumDeps, Base) = getDepobjElements(CGF, DepobjLVal, Loc);
+
+ Address Begin = Base.getAddress(CGF);
+ // Cast from pointer to array type to pointer to single element.
+ llvm::Value *End = CGF.Builder.CreateGEP(Begin.getPointer(), NumDeps);
+ // The basic structure here is a while-do loop.
+ llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.body");
+ llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.done");
+ llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
+ CGF.EmitBlock(BodyBB);
+ llvm::PHINode *ElementPHI =
+ CGF.Builder.CreatePHI(Begin.getType(), 2, "omp.elementPast");
+ ElementPHI->addIncoming(Begin.getPointer(), EntryBB);
+ Begin = Address(ElementPHI, Begin.getAlignment());
+ Base = CGF.MakeAddrLValue(Begin, KmpDependInfoTy, Base.getBaseInfo(),
+ Base.getTBAAInfo());
+ // deps[i].flags = NewDepKind;
+ RTLDependenceKindTy DepKind = translateDependencyKind(NewDepKind);
+ LValue FlagsLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
+ CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
+ FlagsLVal);
+
+ // Shift the address forward by one element.
+ Address ElementNext =
+ CGF.Builder.CreateConstGEP(Begin, /*Index=*/1, "omp.elementNext");
+ ElementPHI->addIncoming(ElementNext.getPointer(),
+ CGF.Builder.GetInsertBlock());
+ llvm::Value *IsEmpty =
+ CGF.Builder.CreateICmpEQ(ElementNext.getPointer(), End, "omp.isempty");
+ CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
+ // Done.
+ CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
+}
+
void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction,
@@ -5174,94 +5082,11 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
LValue TDBase = Result.TDBase;
const RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
- ASTContext &C = CGM.getContext();
// Process list of dependences.
Address DependenciesArray = Address::invalid();
- unsigned NumDependencies = Data.Dependences.size();
- if (NumDependencies) {
- // Dependence kind for RTL.
- enum RTLDependenceKindTy { DepIn = 0x01, DepInOut = 0x3, DepMutexInOutSet = 0x4 };
- enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
- RecordDecl *KmpDependInfoRD;
- QualType FlagsTy =
- C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
- llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
- if (KmpDependInfoTy.isNull()) {
- KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
- KmpDependInfoRD->startDefinition();
- addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
- addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
- addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
- KmpDependInfoRD->completeDefinition();
- KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
- } else {
- KmpDependInfoRD = cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
- }
- // Define type kmp_depend_info[<Dependences.size()>];
- QualType KmpDependInfoArrayTy = C.getConstantArrayType(
- KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies),
- nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
- // kmp_depend_info[<Dependences.size()>] deps;
- DependenciesArray =
- CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
- for (unsigned I = 0; I < NumDependencies; ++I) {
- const Expr *E = Data.Dependences[I].second;
- LValue Addr = CGF.EmitLValue(E);
- llvm::Value *Size;
- QualType Ty = E->getType();
- if (const auto *ASE =
- dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
- LValue UpAddrLVal =
- CGF.EmitOMPArraySectionExpr(ASE, /*IsLowerBound=*/false);
- llvm::Value *UpAddr = CGF.Builder.CreateConstGEP1_32(
- UpAddrLVal.getPointer(CGF), /*Idx0=*/1);
- llvm::Value *LowIntPtr =
- CGF.Builder.CreatePtrToInt(Addr.getPointer(CGF), CGM.SizeTy);
- llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGM.SizeTy);
- Size = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
- } else {
- Size = CGF.getTypeSize(Ty);
- }
- LValue Base = CGF.MakeAddrLValue(
- CGF.Builder.CreateConstArrayGEP(DependenciesArray, I),
- KmpDependInfoTy);
- // deps[i].base_addr = &<Dependences[i].second>;
- LValue BaseAddrLVal = CGF.EmitLValueForField(
- Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
- CGF.EmitStoreOfScalar(
- CGF.Builder.CreatePtrToInt(Addr.getPointer(CGF), CGF.IntPtrTy),
- BaseAddrLVal);
- // deps[i].len = sizeof(<Dependences[i].second>);
- LValue LenLVal = CGF.EmitLValueForField(
- Base, *std::next(KmpDependInfoRD->field_begin(), Len));
- CGF.EmitStoreOfScalar(Size, LenLVal);
- // deps[i].flags = <Dependences[i].first>;
- RTLDependenceKindTy DepKind;
- switch (Data.Dependences[I].first) {
- case OMPC_DEPEND_in:
- DepKind = DepIn;
- break;
- // Out and InOut dependencies must use the same code.
- case OMPC_DEPEND_out:
- case OMPC_DEPEND_inout:
- DepKind = DepInOut;
- break;
- case OMPC_DEPEND_mutexinoutset:
- DepKind = DepMutexInOutSet;
- break;
- case OMPC_DEPEND_source:
- case OMPC_DEPEND_sink:
- case OMPC_DEPEND_unknown:
- llvm_unreachable("Unknown task dependence type");
- }
- LValue FlagsLVal = CGF.EmitLValueForField(
- Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
- CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
- FlagsLVal);
- }
- DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateConstArrayGEP(DependenciesArray, 0), CGF.VoidPtrTy);
- }
+ llvm::Value *NumOfElements;
+ std::tie(NumOfElements, DependenciesArray) =
+ emitDependClause(CGF, Data.Dependences, Loc);
// NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
// libcall.
@@ -5273,28 +5098,30 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
llvm::Value *DepTaskArgs[7];
- if (NumDependencies) {
+ if (!Data.Dependences.empty()) {
DepTaskArgs[0] = UpLoc;
DepTaskArgs[1] = ThreadID;
DepTaskArgs[2] = NewTask;
- DepTaskArgs[3] = CGF.Builder.getInt32(NumDependencies);
+ DepTaskArgs[3] = NumOfElements;
DepTaskArgs[4] = DependenciesArray.getPointer();
DepTaskArgs[5] = CGF.Builder.getInt32(0);
DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
}
- auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, NumDependencies,
- &TaskArgs,
+ auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, &TaskArgs,
&DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) {
if (!Data.Tied) {
auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
LValue PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal);
}
- if (NumDependencies) {
+ if (!Data.Dependences.empty()) {
CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_omp_task_with_deps), DepTaskArgs);
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_omp_task_with_deps),
+ DepTaskArgs);
} else {
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task),
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_omp_task),
TaskArgs);
}
// Check if parent region is untied and build return for untied task;
@@ -5304,26 +5131,27 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
};
llvm::Value *DepWaitTaskArgs[6];
- if (NumDependencies) {
+ if (!Data.Dependences.empty()) {
DepWaitTaskArgs[0] = UpLoc;
DepWaitTaskArgs[1] = ThreadID;
- DepWaitTaskArgs[2] = CGF.Builder.getInt32(NumDependencies);
+ DepWaitTaskArgs[2] = NumOfElements;
DepWaitTaskArgs[3] = DependenciesArray.getPointer();
DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
}
- auto &&ElseCodeGen = [&TaskArgs, ThreadID, NewTaskNewTaskTTy, TaskEntry,
- NumDependencies, &DepWaitTaskArgs,
+ auto &M = CGM.getModule();
+ auto &&ElseCodeGen = [this, &M, &TaskArgs, ThreadID, NewTaskNewTaskTTy,
+ TaskEntry, &Data, &DepWaitTaskArgs,
Loc](CodeGenFunction &CGF, PrePostActionTy &) {
- CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
CodeGenFunction::RunCleanupsScope LocalScope(CGF);
// Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
// kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
// ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
// is specified.
- if (NumDependencies)
- CGF.EmitRuntimeCall(RT.createRuntimeFunction(OMPRTL__kmpc_omp_wait_deps),
- DepWaitTaskArgs);
+ if (!Data.Dependences.empty())
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_omp_wait_deps),
+ DepWaitTaskArgs);
// Call proxy_task_entry(gtid, new_task);
auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy,
Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
@@ -5338,9 +5166,12 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
// Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
// kmp_task_t *new_task);
RegionCodeGenTy RCG(CodeGen);
- CommonActionTy Action(
- RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_begin_if0), TaskArgs,
- RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_complete_if0), TaskArgs);
+ CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
+ M, OMPRTL___kmpc_omp_task_begin_if0),
+ TaskArgs,
+ OMPBuilder.getOrCreateRuntimeFunction(
+ M, OMPRTL___kmpc_omp_task_complete_if0),
+ TaskArgs);
RCG.setAction(Action);
RCG(CGF);
};
@@ -5434,7 +5265,9 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
Result.TaskDupFn ? CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
Result.TaskDupFn, CGF.VoidPtrTy)
: llvm::ConstantPointerNull::get(CGF.VoidPtrTy)};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_taskloop), TaskArgs);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_taskloop),
+ TaskArgs);
}
/// Emit reduction operation for each element of array (required for
@@ -5776,8 +5609,9 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
Lock // kmp_critical_name *&<lock>
};
llvm::Value *Res = CGF.EmitRuntimeCall(
- createRuntimeFunction(WithNowait ? OMPRTL__kmpc_reduce_nowait
- : OMPRTL__kmpc_reduce),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(),
+ WithNowait ? OMPRTL___kmpc_reduce_nowait : OMPRTL___kmpc_reduce),
Args);
// 5. Build switch(res)
@@ -5818,8 +5652,9 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
RegionCodeGenTy RCG(CodeGen);
CommonActionTy Action(
nullptr, llvm::None,
- createRuntimeFunction(WithNowait ? OMPRTL__kmpc_end_reduce_nowait
- : OMPRTL__kmpc_end_reduce),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), WithNowait ? OMPRTL___kmpc_end_reduce_nowait
+ : OMPRTL___kmpc_end_reduce),
EndArgs);
RCG.setAction(Action);
RCG(CGF);
@@ -5942,7 +5777,8 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
Lock // kmp_critical_name *&<lock>
};
CommonActionTy Action(nullptr, llvm::None,
- createRuntimeFunction(OMPRTL__kmpc_end_reduce),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_end_reduce),
EndArgs);
AtomicRCG.setAction(Action);
AtomicRCG(CGF);
@@ -5969,12 +5805,12 @@ static std::string generateUniqueName(CodeGenModule &CGM, StringRef Prefix,
{D->isLocalVarDeclOrParm() ? D->getName() : CGM.getMangledName(D)});
Out << Prefix << Name << "_"
<< D->getCanonicalDecl()->getBeginLoc().getRawEncoding();
- return Out.str();
+ return std::string(Out.str());
}
/// Emits reduction initializer function:
/// \code
-/// void @.red_init(void* %arg) {
+/// void @.red_init(void* %arg, void* %orig) {
/// %0 = bitcast void* %arg to <type>*
/// store <type> <init>, <type>* %0
/// ret void
@@ -5984,10 +5820,15 @@ static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N) {
ASTContext &C = CGM.getContext();
+ QualType VoidPtrTy = C.VoidPtrTy;
+ VoidPtrTy.addRestrict();
FunctionArgList Args;
- ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
+ ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, VoidPtrTy,
ImplicitParamDecl::Other);
+ ImplicitParamDecl ParamOrig(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, VoidPtrTy,
+ ImplicitParamDecl::Other);
Args.emplace_back(&Param);
+ Args.emplace_back(&ParamOrig);
const auto &FnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
@@ -6012,28 +5853,25 @@ static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
CGM.getContext().getSizeType(), Loc);
}
RCG.emitAggregateType(CGF, N, Size);
- LValue SharedLVal;
+ LValue OrigLVal;
// If initializer uses initializer from declare reduction construct, emit a
// pointer to the address of the original reduction item (reuired by reduction
// initializer)
if (RCG.usesReductionInitializer(N)) {
- Address SharedAddr =
- CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
- CGF, CGM.getContext().VoidPtrTy,
- generateUniqueName(CGM, "reduction", RCG.getRefExpr(N)));
+ Address SharedAddr = CGF.GetAddrOfLocalVar(&ParamOrig);
SharedAddr = CGF.EmitLoadOfPointer(
SharedAddr,
CGM.getContext().VoidPtrTy.castAs<PointerType>()->getTypePtr());
- SharedLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy);
+ OrigLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy);
} else {
- SharedLVal = CGF.MakeNaturalAlignAddrLValue(
+ OrigLVal = CGF.MakeNaturalAlignAddrLValue(
llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
CGM.getContext().VoidPtrTy);
}
// Emit the initializer:
// %0 = bitcast void* %arg to <type>*
// store <type> <init>, <type>* %0
- RCG.emitInitialization(CGF, N, PrivateAddr, SharedLVal,
+ RCG.emitInitialization(CGF, N, PrivateAddr, OrigLVal,
[](CodeGenFunction &) { return false; });
CGF.FinishFunction();
return Fn;
@@ -6173,18 +6011,20 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
return nullptr;
// Build typedef struct:
- // kmp_task_red_input {
+ // kmp_taskred_input {
// void *reduce_shar; // shared reduction item
+ // void *reduce_orig; // original reduction item used for initialization
// size_t reduce_size; // size of data item
// void *reduce_init; // data initialization routine
// void *reduce_fini; // data finalization routine
// void *reduce_comb; // data combiner routine
// kmp_task_red_flags_t flags; // flags for additional info from compiler
- // } kmp_task_red_input_t;
+ // } kmp_taskred_input_t;
ASTContext &C = CGM.getContext();
- RecordDecl *RD = C.buildImplicitRecord("kmp_task_red_input_t");
+ RecordDecl *RD = C.buildImplicitRecord("kmp_taskred_input_t");
RD->startDefinition();
const FieldDecl *SharedFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
+ const FieldDecl *OrigFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
const FieldDecl *SizeFD = addFieldToRecordDecl(C, RD, C.getSizeType());
const FieldDecl *InitFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
const FieldDecl *FiniFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
@@ -6199,8 +6039,8 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
RDType, ArraySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
// kmp_task_red_input_t .rd_input.[Size];
Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
- ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionCopies,
- Data.ReductionOps);
+ ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionOrigs,
+ Data.ReductionCopies, Data.ReductionOps);
for (unsigned Cnt = 0; Cnt < Size; ++Cnt) {
// kmp_task_red_input_t &ElemLVal = .rd_input.[Cnt];
llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0),
@@ -6212,20 +6052,24 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
// ElemLVal.reduce_shar = &Shareds[Cnt];
LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
- RCG.emitSharedLValue(CGF, Cnt);
+ RCG.emitSharedOrigLValue(CGF, Cnt);
llvm::Value *CastedShared =
CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer(CGF));
CGF.EmitStoreOfScalar(CastedShared, SharedLVal);
+ // ElemLVal.reduce_orig = &Origs[Cnt];
+ LValue OrigLVal = CGF.EmitLValueForField(ElemLVal, OrigFD);
+ llvm::Value *CastedOrig =
+ CGF.EmitCastToVoidPtr(RCG.getOrigLValue(Cnt).getPointer(CGF));
+ CGF.EmitStoreOfScalar(CastedOrig, OrigLVal);
RCG.emitAggregateType(CGF, Cnt);
llvm::Value *SizeValInChars;
llvm::Value *SizeVal;
std::tie(SizeValInChars, SizeVal) = RCG.getSizes(Cnt);
- // We use delayed creation/initialization for VLAs, array sections and
- // custom reduction initializations. It is required because runtime does not
- // provide the way to pass the sizes of VLAs/array sections to
- // initializer/combiner/finalizer functions and does not pass the pointer to
- // original reduction item to the initializer. Instead threadprivate global
- // variables are used to store these values and use them in the functions.
+ // We use delayed creation/initialization for VLAs and array sections. It is
+ // required because runtime does not provide the way to pass the sizes of
+ // VLAs/array sections to initializer/combiner/finalizer functions. Instead
+ // threadprivate global variables are used to store these values and use
+ // them in the functions.
bool DelayedCreation = !!SizeVal;
SizeValInChars = CGF.Builder.CreateIntCast(SizeValInChars, CGM.SizeTy,
/*isSigned=*/false);
@@ -6236,7 +6080,6 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
llvm::Value *InitAddr =
CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt));
CGF.EmitStoreOfScalar(InitAddr, InitLVal);
- DelayedCreation = DelayedCreation || RCG.usesReductionInitializer(Cnt);
// ElemLVal.reduce_fini = fini;
LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD);
llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt);
@@ -6260,16 +6103,52 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
CGF.EmitNullInitialization(FlagsLVal.getAddress(CGF),
FlagsLVal.getType());
}
- // Build call void *__kmpc_task_reduction_init(int gtid, int num_data, void
- // *data);
+ if (Data.IsReductionWithTaskMod) {
+ // Build call void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int
+ // is_ws, int num, void *data);
+ llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *GTid = CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
+ CGM.IntTy, /*isSigned=*/true);
+ llvm::Value *Args[] = {
+ IdentTLoc, GTid,
+ llvm::ConstantInt::get(CGM.IntTy, Data.IsWorksharingReduction ? 1 : 0,
+ /*isSigned=*/true),
+ llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ TaskRedInput.getPointer(), CGM.VoidPtrTy)};
+ return CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_taskred_modifier_init),
+ Args);
+ }
+ // Build call void *__kmpc_taskred_init(int gtid, int num_data, void *data);
llvm::Value *Args[] = {
CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
/*isSigned=*/true),
llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskRedInput.getPointer(),
CGM.VoidPtrTy)};
- return CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_task_reduction_init), Args);
+ return CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_taskred_init),
+ Args);
+}
+
+void CGOpenMPRuntime::emitTaskReductionFini(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ bool IsWorksharingReduction) {
+ // Build call void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int
+ // is_ws, int num, void *data);
+ llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *GTid = CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
+ CGM.IntTy, /*isSigned=*/true);
+ llvm::Value *Args[] = {IdentTLoc, GTid,
+ llvm::ConstantInt::get(CGM.IntTy,
+ IsWorksharingReduction ? 1 : 0,
+ /*isSigned=*/true)};
+ (void)CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_task_reduction_modifier_fini),
+ Args);
}
void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
@@ -6287,16 +6166,6 @@ void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
CGF.Builder.CreateStore(SizeVal, SizeAddr, /*IsVolatile=*/false);
}
- // Store address of the original reduction item if custom initializer is used.
- if (RCG.usesReductionInitializer(N)) {
- Address SharedAddr = getAddrOfArtificialThreadPrivate(
- CGF, CGM.getContext().VoidPtrTy,
- generateUniqueName(CGM, "reduction", RCG.getRefExpr(N)));
- CGF.Builder.CreateStore(
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- RCG.getSharedLValue(N).getPointer(CGF), CGM.VoidPtrTy),
- SharedAddr, /*IsVolatile=*/false);
- }
}
Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
@@ -6313,7 +6182,9 @@ Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
SharedLVal.getPointer(CGF), CGM.VoidPtrTy)};
return Address(
CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_task_reduction_get_th_data), Args),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_task_reduction_get_th_data),
+ Args),
SharedLVal.getAlignment());
}
@@ -6321,11 +6192,19 @@ void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
SourceLocation Loc) {
if (!CGF.HaveInsertPoint())
return;
- // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
- // global_tid);
- llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
- // Ignore return result until untied tasks are supported.
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskwait), Args);
+
+ if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
+ OMPBuilder.CreateTaskwait(CGF.Builder);
+ } else {
+ // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
+ // global_tid);
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
+ // Ignore return result until untied tasks are supported.
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_omp_taskwait),
+ Args);
+ }
+
if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
Region->emitUntiedSwitch(CGF);
}
@@ -6382,7 +6261,9 @@ void CGOpenMPRuntime::emitCancellationPointCall(
CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
// Ignore return result until untied tasks are supported.
llvm::Value *Result = CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_cancellationpoint), Args);
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_cancellationpoint),
+ Args);
// if (__kmpc_cancellationpoint()) {
// exit from construct;
// }
@@ -6407,17 +6288,18 @@ void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
return;
// Build call kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
// kmp_int32 cncl_kind);
+ auto &M = CGM.getModule();
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
- auto &&ThenGen = [Loc, CancelRegion, OMPRegionInfo](CodeGenFunction &CGF,
- PrePostActionTy &) {
+ auto &&ThenGen = [this, &M, Loc, CancelRegion,
+ OMPRegionInfo](CodeGenFunction &CGF, PrePostActionTy &) {
CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
llvm::Value *Args[] = {
RT.emitUpdateLocation(CGF, Loc), RT.getThreadID(CGF, Loc),
CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
// Ignore return result until untied tasks are supported.
llvm::Value *Result = CGF.EmitRuntimeCall(
- RT.createRuntimeFunction(OMPRTL__kmpc_cancel), Args);
+ OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_cancel), Args);
// if (__kmpc_cancel()) {
// exit from construct;
// }
@@ -6442,16 +6324,106 @@ void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
}
}
+namespace {
+/// Cleanup action for uses_allocators support.
+class OMPUsesAllocatorsActionTy final : public PrePostActionTy {
+ ArrayRef<std::pair<const Expr *, const Expr *>> Allocators;
+
+public:
+ OMPUsesAllocatorsActionTy(
+ ArrayRef<std::pair<const Expr *, const Expr *>> Allocators)
+ : Allocators(Allocators) {}
+ void Enter(CodeGenFunction &CGF) override {
+ if (!CGF.HaveInsertPoint())
+ return;
+ for (const auto &AllocatorData : Allocators) {
+ CGF.CGM.getOpenMPRuntime().emitUsesAllocatorsInit(
+ CGF, AllocatorData.first, AllocatorData.second);
+ }
+ }
+ void Exit(CodeGenFunction &CGF) override {
+ if (!CGF.HaveInsertPoint())
+ return;
+ for (const auto &AllocatorData : Allocators) {
+ CGF.CGM.getOpenMPRuntime().emitUsesAllocatorsFini(CGF,
+ AllocatorData.first);
+ }
+ }
+};
+} // namespace
+
void CGOpenMPRuntime::emitTargetOutlinedFunction(
const OMPExecutableDirective &D, StringRef ParentName,
llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
assert(!ParentName.empty() && "Invalid target region parent name!");
HasEmittedTargetRegion = true;
+ SmallVector<std::pair<const Expr *, const Expr *>, 4> Allocators;
+ for (const auto *C : D.getClausesOfKind<OMPUsesAllocatorsClause>()) {
+ for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
+ const OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
+ if (!D.AllocatorTraits)
+ continue;
+ Allocators.emplace_back(D.Allocator, D.AllocatorTraits);
+ }
+ }
+ OMPUsesAllocatorsActionTy UsesAllocatorAction(Allocators);
+ CodeGen.setAction(UsesAllocatorAction);
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
}
+void CGOpenMPRuntime::emitUsesAllocatorsInit(CodeGenFunction &CGF,
+ const Expr *Allocator,
+ const Expr *AllocatorTraits) {
+ llvm::Value *ThreadId = getThreadID(CGF, Allocator->getExprLoc());
+ ThreadId = CGF.Builder.CreateIntCast(ThreadId, CGF.IntTy, /*isSigned=*/true);
+ // Use default memspace handle.
+ llvm::Value *MemSpaceHandle = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ llvm::Value *NumTraits = llvm::ConstantInt::get(
+ CGF.IntTy, cast<ConstantArrayType>(
+ AllocatorTraits->getType()->getAsArrayTypeUnsafe())
+ ->getSize()
+ .getLimitedValue());
+ LValue AllocatorTraitsLVal = CGF.EmitLValue(AllocatorTraits);
+ Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ AllocatorTraitsLVal.getAddress(CGF), CGF.VoidPtrPtrTy);
+ AllocatorTraitsLVal = CGF.MakeAddrLValue(Addr, CGF.getContext().VoidPtrTy,
+ AllocatorTraitsLVal.getBaseInfo(),
+ AllocatorTraitsLVal.getTBAAInfo());
+ llvm::Value *Traits =
+ CGF.EmitLoadOfScalar(AllocatorTraitsLVal, AllocatorTraits->getExprLoc());
+
+ llvm::Value *AllocatorVal =
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_init_allocator),
+ {ThreadId, MemSpaceHandle, NumTraits, Traits});
+ // Store to allocator.
+ CGF.EmitVarDecl(*cast<VarDecl>(
+ cast<DeclRefExpr>(Allocator->IgnoreParenImpCasts())->getDecl()));
+ LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts());
+ AllocatorVal =
+ CGF.EmitScalarConversion(AllocatorVal, CGF.getContext().VoidPtrTy,
+ Allocator->getType(), Allocator->getExprLoc());
+ CGF.EmitStoreOfScalar(AllocatorVal, AllocatorLVal);
+}
+
+void CGOpenMPRuntime::emitUsesAllocatorsFini(CodeGenFunction &CGF,
+ const Expr *Allocator) {
+ llvm::Value *ThreadId = getThreadID(CGF, Allocator->getExprLoc());
+ ThreadId = CGF.Builder.CreateIntCast(ThreadId, CGF.IntTy, /*isSigned=*/true);
+ LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts());
+ llvm::Value *AllocatorVal =
+ CGF.EmitLoadOfScalar(AllocatorLVal, Allocator->getExprLoc());
+ AllocatorVal = CGF.EmitScalarConversion(AllocatorVal, Allocator->getType(),
+ CGF.getContext().VoidPtrTy,
+ Allocator->getExprLoc());
+ (void)CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___kmpc_destroy_allocator),
+ {ThreadId, AllocatorVal});
+}
+
void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
const OMPExecutableDirective &D, StringRef ParentName,
llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
@@ -6483,7 +6455,7 @@ void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS);
+ OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS, D.getBeginLoc());
// If this target outline function is not an offload entry, we don't need to
// register it.
@@ -6669,6 +6641,8 @@ emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
@@ -6684,6 +6658,8 @@ emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -6697,6 +6673,8 @@ emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
case OMPD_requires:
case OMPD_unknown:
break;
+ default:
+ break;
}
llvm_unreachable("Unexpected directive kind.");
}
@@ -6980,6 +6958,8 @@ emitNumThreadsForTargetDirective(CodeGenFunction &CGF,
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
@@ -6995,6 +6975,8 @@ emitNumThreadsForTargetDirective(CodeGenFunction &CGF,
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -7008,6 +6990,8 @@ emitNumThreadsForTargetDirective(CodeGenFunction &CGF,
case OMPD_requires:
case OMPD_unknown:
break;
+ default:
+ break;
}
llvm_unreachable("Unsupported directive kind.");
}
@@ -7044,7 +7028,7 @@ public:
OMP_MAP_TARGET_PARAM = 0x20,
/// Signal that the runtime library has to return the device pointer
/// in the current position for the data being mapped. Used when we have the
- /// use_device_ptr clause.
+ /// use_device_ptr or use_device_addr clause.
OMP_MAP_RETURN_PARAM = 0x40,
/// This flag signals that the reference being passed is a pointer to
/// private data.
@@ -7112,26 +7096,30 @@ private:
ArrayRef<OpenMPMapModifierKind> MapModifiers;
bool ReturnDevicePointer = false;
bool IsImplicit = false;
+ bool ForDeviceAddr = false;
MapInfo() = default;
MapInfo(
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
OpenMPMapClauseKind MapType,
- ArrayRef<OpenMPMapModifierKind> MapModifiers,
- bool ReturnDevicePointer, bool IsImplicit)
+ ArrayRef<OpenMPMapModifierKind> MapModifiers, bool ReturnDevicePointer,
+ bool IsImplicit, bool ForDeviceAddr = false)
: Components(Components), MapType(MapType), MapModifiers(MapModifiers),
- ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit) {}
+ ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit),
+ ForDeviceAddr(ForDeviceAddr) {}
};
- /// If use_device_ptr is used on a pointer which is a struct member and there
- /// is no map information about it, then emission of that entry is deferred
- /// until the whole struct has been processed.
+ /// If use_device_ptr or use_device_addr is used on a decl which is a struct
+ /// member and there is no map information about it, then emission of that
+ /// entry is deferred until the whole struct has been processed.
struct DeferredDevicePtrEntryTy {
const Expr *IE = nullptr;
const ValueDecl *VD = nullptr;
+ bool ForDeviceAddr = false;
- DeferredDevicePtrEntryTy(const Expr *IE, const ValueDecl *VD)
- : IE(IE), VD(VD) {}
+ DeferredDevicePtrEntryTy(const Expr *IE, const ValueDecl *VD,
+ bool ForDeviceAddr)
+ : IE(IE), VD(VD), ForDeviceAddr(ForDeviceAddr) {}
};
/// The target directive from where the mappable clauses were extracted. It
@@ -7158,6 +7146,20 @@ private:
llvm::Value *getExprTypeSize(const Expr *E) const {
QualType ExprTy = E->getType().getCanonicalType();
+ // Calculate the size for array shaping expression.
+ if (const auto *OAE = dyn_cast<OMPArrayShapingExpr>(E)) {
+ llvm::Value *Size =
+ CGF.getTypeSize(OAE->getBase()->getType()->getPointeeType());
+ for (const Expr *SE : OAE->getDimensions()) {
+ llvm::Value *Sz = CGF.EmitScalarExpr(SE);
+ Sz = CGF.EmitScalarConversion(Sz, SE->getType(),
+ CGF.getContext().getSizeType(),
+ SE->getExprLoc());
+ Size = CGF.Builder.CreateNUWMul(Size, Sz);
+ }
+ return Size;
+ }
+
// Reference types are ignored for mapping purposes.
if (const auto *RefTy = ExprTy->getAs<ReferenceType>())
ExprTy = RefTy->getPointeeType().getCanonicalType();
@@ -7173,7 +7175,7 @@ private:
// If there is no length associated with the expression and lower bound is
// not specified too, that means we are using the whole length of the
// base.
- if (!OAE->getLength() && OAE->getColonLoc().isValid() &&
+ if (!OAE->getLength() && OAE->getColonLocFirst().isValid() &&
!OAE->getLowerBound())
return CGF.getTypeSize(BaseTy);
@@ -7188,7 +7190,7 @@ private:
// If we don't have a length at this point, that is because we have an
// array section with a single element.
- if (!OAE->getLength() && OAE->getColonLoc().isInvalid())
+ if (!OAE->getLength() && OAE->getColonLocFirst().isInvalid())
return ElemSize;
if (const Expr *LenExpr = OAE->getLength()) {
@@ -7198,7 +7200,7 @@ private:
LenExpr->getExprLoc());
return CGF.Builder.CreateNUWMul(LengthVal, ElemSize);
}
- assert(!OAE->getLength() && OAE->getColonLoc().isValid() &&
+ assert(!OAE->getLength() && OAE->getColonLocFirst().isValid() &&
OAE->getLowerBound() && "expected array_section[lb:].");
// Size = sizetype - lb * elemtype;
llvm::Value *LengthVal = CGF.getTypeSize(BaseTy);
@@ -7271,7 +7273,7 @@ private:
return false;
// An array section with no colon always refer to a single element.
- if (OASE->getColonLoc().isInvalid())
+ if (OASE->getColonLocFirst().isInvalid())
return false;
const Expr *Length = OASE->getLength();
@@ -7305,13 +7307,12 @@ private:
/// \a IsFirstComponent should be set to true if the provided set of
/// components is the first associated with a capture.
void generateInfoForComponentList(
- OpenMPMapClauseKind MapType,
- ArrayRef<OpenMPMapModifierKind> MapModifiers,
+ OpenMPMapClauseKind MapType, ArrayRef<OpenMPMapModifierKind> MapModifiers,
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
StructRangeInfoTy &PartialStruct, bool IsFirstComponentList,
- bool IsImplicit,
+ bool IsImplicit, bool ForDeviceAddr = false,
ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
OverlappedElements = llvm::None) const {
// The following summarizes what has to be generated for each map and the
@@ -7489,6 +7490,7 @@ private:
const Expr *AssocExpr = I->getAssociatedExpression();
const auto *AE = dyn_cast<ArraySubscriptExpr>(AssocExpr);
const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
+ const auto *OAShE = dyn_cast<OMPArrayShapingExpr>(AssocExpr);
if (isa<MemberExpr>(AssocExpr)) {
// The base is the 'this' pointer. The content of the pointer is going
@@ -7498,6 +7500,11 @@ private:
(OASE &&
isa<CXXThisExpr>(OASE->getBase()->IgnoreParenImpCasts()))) {
BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
+ } else if (OAShE &&
+ isa<CXXThisExpr>(OAShE->getBase()->IgnoreParenCasts())) {
+ BP = Address(
+ CGF.EmitScalarExpr(OAShE->getBase()),
+ CGF.getContext().getTypeAlignInChars(OAShE->getBase()->getType()));
} else {
// The base is the reference to the variable.
// BP = &Var.
@@ -7580,29 +7587,44 @@ private:
// types.
const auto *OASE =
dyn_cast<OMPArraySectionExpr>(I->getAssociatedExpression());
+ const auto *OAShE =
+ dyn_cast<OMPArrayShapingExpr>(I->getAssociatedExpression());
+ const auto *UO = dyn_cast<UnaryOperator>(I->getAssociatedExpression());
+ const auto *BO = dyn_cast<BinaryOperator>(I->getAssociatedExpression());
bool IsPointer =
+ OAShE ||
(OASE && OMPArraySectionExpr::getBaseOriginalType(OASE)
.getCanonicalType()
->isAnyPointerType()) ||
I->getAssociatedExpression()->getType()->isAnyPointerType();
+ bool IsNonDerefPointer = IsPointer && !UO && !BO;
- if (Next == CE || IsPointer || IsFinalArraySection) {
+ if (Next == CE || IsNonDerefPointer || IsFinalArraySection) {
// If this is not the last component, we expect the pointer to be
// associated with an array expression or member expression.
assert((Next == CE ||
isa<MemberExpr>(Next->getAssociatedExpression()) ||
isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) ||
- isa<OMPArraySectionExpr>(Next->getAssociatedExpression())) &&
+ isa<OMPArraySectionExpr>(Next->getAssociatedExpression()) ||
+ isa<UnaryOperator>(Next->getAssociatedExpression()) ||
+ isa<BinaryOperator>(Next->getAssociatedExpression())) &&
"Unexpected expression");
- Address LB = CGF.EmitOMPSharedLValue(I->getAssociatedExpression())
- .getAddress(CGF);
+ Address LB = Address::invalid();
+ if (OAShE) {
+ LB = Address(CGF.EmitScalarExpr(OAShE->getBase()),
+ CGF.getContext().getTypeAlignInChars(
+ OAShE->getBase()->getType()));
+ } else {
+ LB = CGF.EmitOMPSharedLValue(I->getAssociatedExpression())
+ .getAddress(CGF);
+ }
// If this component is a pointer inside the base struct then we don't
// need to create any entry for it - it will be combined with the object
// it is pointing to into a single PTR_AND_OBJ entry.
- bool IsMemberPointer =
- IsPointer && EncounteredME &&
+ bool IsMemberPointerOrAddr =
+ (IsPointer || ForDeviceAddr) && EncounteredME &&
(dyn_cast<MemberExpr>(I->getAssociatedExpression()) ==
EncounteredME);
if (!OverlappedElements.empty()) {
@@ -7669,7 +7691,7 @@ private:
break;
}
llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
- if (!IsMemberPointer) {
+ if (!IsMemberPointerOrAddr) {
BasePointers.push_back(BP.getPointer());
Pointers.push_back(LB.getPointer());
Sizes.push_back(
@@ -7708,13 +7730,20 @@ private:
// mapped member. If the parent is "*this", then the value declaration
// is nullptr.
if (EncounteredME) {
- const auto *FD = dyn_cast<FieldDecl>(EncounteredME->getMemberDecl());
+ const auto *FD = cast<FieldDecl>(EncounteredME->getMemberDecl());
unsigned FieldIndex = FD->getFieldIndex();
// Update info about the lowest and highest elements for this struct
if (!PartialStruct.Base.isValid()) {
PartialStruct.LowestElem = {FieldIndex, LB};
- PartialStruct.HighestElem = {FieldIndex, LB};
+ if (IsFinalArraySection) {
+ Address HB =
+ CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false)
+ .getAddress(CGF);
+ PartialStruct.HighestElem = {FieldIndex, HB};
+ } else {
+ PartialStruct.HighestElem = {FieldIndex, LB};
+ }
PartialStruct.Base = BP;
} else if (FieldIndex < PartialStruct.LowestElem.first) {
PartialStruct.LowestElem = {FieldIndex, LB};
@@ -7851,6 +7880,19 @@ public:
for (const auto *D : C->varlists())
FirstPrivateDecls.try_emplace(
cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl()), C->isImplicit());
+ // Extract implicit firstprivates from uses_allocators clauses.
+ for (const auto *C : Dir.getClausesOfKind<OMPUsesAllocatorsClause>()) {
+ for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
+ OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
+ if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(D.AllocatorTraits))
+ FirstPrivateDecls.try_emplace(cast<VarDecl>(DRE->getDecl()),
+ /*Implicit=*/true);
+ else if (const auto *VD = dyn_cast<VarDecl>(
+ cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts())
+ ->getDecl()))
+ FirstPrivateDecls.try_emplace(VD, /*Implicit=*/true);
+ }
+ }
// Extract device pointer clause information.
for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
for (auto L : C->component_lists())
@@ -7910,17 +7952,18 @@ public:
// Helper function to fill the information map for the different supported
// clauses.
- auto &&InfoGen = [&Info](
- const ValueDecl *D,
- OMPClauseMappableExprCommon::MappableExprComponentListRef L,
- OpenMPMapClauseKind MapType,
- ArrayRef<OpenMPMapModifierKind> MapModifiers,
- bool ReturnDevicePointer, bool IsImplicit) {
- const ValueDecl *VD =
- D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
- Info[VD].emplace_back(L, MapType, MapModifiers, ReturnDevicePointer,
- IsImplicit);
- };
+ auto &&InfoGen =
+ [&Info](const ValueDecl *D,
+ OMPClauseMappableExprCommon::MappableExprComponentListRef L,
+ OpenMPMapClauseKind MapType,
+ ArrayRef<OpenMPMapModifierKind> MapModifiers,
+ bool ReturnDevicePointer, bool IsImplicit,
+ bool ForDeviceAddr = false) {
+ const ValueDecl *VD =
+ D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
+ Info[VD].emplace_back(L, MapType, MapModifiers, ReturnDevicePointer,
+ IsImplicit, ForDeviceAddr);
+ };
assert(CurDir.is<const OMPExecutableDirective *>() &&
"Expect a executable directive");
@@ -7990,7 +8033,7 @@ public:
// partial struct.
InfoGen(nullptr, L.second, OMPC_MAP_unknown, llvm::None,
/*ReturnDevicePointer=*/false, C->isImplicit());
- DeferredInfo[nullptr].emplace_back(IE, VD);
+ DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/false);
} else {
llvm::Value *Ptr =
CGF.EmitLoadOfScalar(CGF.EmitLValue(IE), IE->getExprLoc());
@@ -8002,6 +8045,70 @@ public:
}
}
+ // Look at the use_device_addr clause information and mark the existing map
+ // entries as such. If there is no map information for an entry in the
+ // use_device_addr list, we create one with map type 'alloc' and zero size
+ // section. It is the user fault if that was not mapped before. If there is
+ // no map information and the pointer is a struct member, then we defer the
+ // emission of that entry until the whole struct has been processed.
+ llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
+ for (const auto *C :
+ CurExecDir->getClausesOfKind<OMPUseDeviceAddrClause>()) {
+ for (const auto L : C->component_lists()) {
+ assert(!L.second.empty() && "Not expecting empty list of components!");
+ const ValueDecl *VD = L.second.back().getAssociatedDeclaration();
+ if (!Processed.insert(VD).second)
+ continue;
+ VD = cast<ValueDecl>(VD->getCanonicalDecl());
+ const Expr *IE = L.second.back().getAssociatedExpression();
+ // If the first component is a member expression, we have to look into
+ // 'this', which maps to null in the map of map information. Otherwise
+ // look directly for the information.
+ auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
+
+ // We potentially have map information for this declaration already.
+ // Look for the first set of components that refer to it.
+ if (It != Info.end()) {
+ auto *CI = llvm::find_if(It->second, [VD](const MapInfo &MI) {
+ return MI.Components.back().getAssociatedDeclaration() == VD;
+ });
+ // If we found a map entry, signal that the pointer has to be returned
+ // and move on to the next declaration.
+ if (CI != It->second.end()) {
+ CI->ReturnDevicePointer = true;
+ continue;
+ }
+ }
+
+ // We didn't find any match in our map information - generate a zero
+ // size array section - if the pointer is a struct member we defer this
+ // action until the whole struct has been processed.
+ if (isa<MemberExpr>(IE)) {
+ // Insert the pointer into Info to be processed by
+ // generateInfoForComponentList. Because it is a member pointer
+ // without a pointee, no entry will be generated for it, therefore
+ // we need to generate one after the whole struct has been processed.
+ // Nonetheless, generateInfoForComponentList must be called to take
+ // the pointer into account for the calculation of the range of the
+ // partial struct.
+ InfoGen(nullptr, L.second, OMPC_MAP_unknown, llvm::None,
+ /*ReturnDevicePointer=*/false, C->isImplicit(),
+ /*ForDeviceAddr=*/true);
+ DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/true);
+ } else {
+ llvm::Value *Ptr;
+ if (IE->isGLValue())
+ Ptr = CGF.EmitLValue(IE).getPointer(CGF);
+ else
+ Ptr = CGF.EmitScalarExpr(IE);
+ BasePointers.emplace_back(Ptr, VD);
+ Pointers.push_back(Ptr);
+ Sizes.push_back(llvm::Constant::getNullValue(CGF.Int64Ty));
+ Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_TARGET_PARAM);
+ }
+ }
+ }
+
for (const auto &M : Info) {
// We need to know when we generate information for the first component
// associated with a capture, because the mapping flags depend on it.
@@ -8020,10 +8127,10 @@ public:
// Remember the current base pointer index.
unsigned CurrentBasePointersIdx = CurBasePointers.size();
- generateInfoForComponentList(L.MapType, L.MapModifiers, L.Components,
- CurBasePointers, CurPointers, CurSizes,
- CurTypes, PartialStruct,
- IsFirstComponentList, L.IsImplicit);
+ generateInfoForComponentList(
+ L.MapType, L.MapModifiers, L.Components, CurBasePointers,
+ CurPointers, CurSizes, CurTypes, PartialStruct,
+ IsFirstComponentList, L.IsImplicit, L.ForDeviceAddr);
// If this entry relates with a device pointer, set the relevant
// declaration and add the 'return pointer' flag.
@@ -8043,21 +8150,35 @@ public:
}
// Append any pending zero-length pointers which are struct members and
- // used with use_device_ptr.
+ // used with use_device_ptr or use_device_addr.
auto CI = DeferredInfo.find(M.first);
if (CI != DeferredInfo.end()) {
for (const DeferredDevicePtrEntryTy &L : CI->second) {
- llvm::Value *BasePtr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
- llvm::Value *Ptr = this->CGF.EmitLoadOfScalar(
- this->CGF.EmitLValue(L.IE), L.IE->getExprLoc());
+ llvm::Value *BasePtr;
+ llvm::Value *Ptr;
+ if (L.ForDeviceAddr) {
+ if (L.IE->isGLValue())
+ Ptr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
+ else
+ Ptr = this->CGF.EmitScalarExpr(L.IE);
+ BasePtr = Ptr;
+ // Entry is RETURN_PARAM. Also, set the placeholder value
+ // MEMBER_OF=FFFF so that the entry is later updated with the
+ // correct value of MEMBER_OF.
+ CurTypes.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_MEMBER_OF);
+ } else {
+ BasePtr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
+ Ptr = this->CGF.EmitLoadOfScalar(this->CGF.EmitLValue(L.IE),
+ L.IE->getExprLoc());
+ // Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the placeholder
+ // value MEMBER_OF=FFFF so that the entry is later updated with the
+ // correct value of MEMBER_OF.
+ CurTypes.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM |
+ OMP_MAP_MEMBER_OF);
+ }
CurBasePointers.emplace_back(BasePtr, L.VD);
CurPointers.push_back(Ptr);
CurSizes.push_back(llvm::Constant::getNullValue(this->CGF.Int64Ty));
- // Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the placeholder
- // value MEMBER_OF=FFFF so that the entry is later updated with the
- // correct value of MEMBER_OF.
- CurTypes.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM |
- OMP_MAP_MEMBER_OF);
}
}
@@ -8126,10 +8247,10 @@ public:
for (const MapInfo &L : M.second) {
assert(!L.Components.empty() &&
"Not expecting declaration with no component lists.");
- generateInfoForComponentList(L.MapType, L.MapModifiers, L.Components,
- CurBasePointers, CurPointers, CurSizes,
- CurTypes, PartialStruct,
- IsFirstComponentList, L.IsImplicit);
+ generateInfoForComponentList(
+ L.MapType, L.MapModifiers, L.Components, CurBasePointers,
+ CurPointers, CurSizes, CurTypes, PartialStruct,
+ IsFirstComponentList, L.IsImplicit, L.ForDeviceAddr);
IsFirstComponentList = false;
}
@@ -8395,10 +8516,10 @@ public:
ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
OverlappedComponents = Pair.getSecond();
bool IsFirstComponentList = true;
- generateInfoForComponentList(MapType, MapModifiers, Components,
- BasePointers, Pointers, Sizes, Types,
- PartialStruct, IsFirstComponentList,
- IsImplicit, OverlappedComponents);
+ generateInfoForComponentList(
+ MapType, MapModifiers, Components, BasePointers, Pointers, Sizes,
+ Types, PartialStruct, IsFirstComponentList, IsImplicit,
+ /*ForDeviceAddr=*/false, OverlappedComponents);
}
// Go through other elements without overlapped elements.
bool IsFirstComponentList = OverlappedData.empty();
@@ -8759,6 +8880,8 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
@@ -8774,6 +8897,8 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -8786,6 +8911,7 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
case OMPD_unknown:
+ default:
llvm_unreachable("Unexpected directive.");
}
}
@@ -8935,7 +9061,9 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
// pre-existing components.
llvm::Value *OffloadingArgs[] = {Handle};
llvm::Value *PreviousSize = MapperCGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__tgt_mapper_num_components), OffloadingArgs);
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___tgt_mapper_num_components),
+ OffloadingArgs);
llvm::Value *ShiftedPreviousSize = MapperCGF.Builder.CreateShl(
PreviousSize,
MapperCGF.Builder.getInt64(MappableExprsHandler::getFlagMemberOffset()));
@@ -9041,7 +9169,8 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
llvm::Value *OffloadingArgs[] = {Handle, CurBaseArg, CurBeginArg,
CurSizeArg, CurMapType};
MapperCGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__tgt_push_mapper_component),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___tgt_push_mapper_component),
OffloadingArgs);
}
@@ -9085,8 +9214,9 @@ void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
// Evaluate if this is an array section.
llvm::BasicBlock *IsDeleteBB =
- MapperCGF.createBasicBlock("omp.array" + Prefix + ".evaldelete");
- llvm::BasicBlock *BodyBB = MapperCGF.createBasicBlock("omp.array" + Prefix);
+ MapperCGF.createBasicBlock(getName({"omp.array", Prefix, ".evaldelete"}));
+ llvm::BasicBlock *BodyBB =
+ MapperCGF.createBasicBlock(getName({"omp.array", Prefix}));
llvm::Value *IsArray = MapperCGF.Builder.CreateICmpSGE(
Size, MapperCGF.Builder.getInt64(1), "omp.arrayinit.isarray");
MapperCGF.Builder.CreateCondBr(IsArray, IsDeleteBB, ExitBB);
@@ -9099,10 +9229,10 @@ void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
llvm::Value *DeleteCond;
if (IsInit) {
DeleteCond = MapperCGF.Builder.CreateIsNull(
- DeleteBit, "omp.array" + Prefix + ".delete");
+ DeleteBit, getName({"omp.array", Prefix, ".delete"}));
} else {
DeleteCond = MapperCGF.Builder.CreateIsNotNull(
- DeleteBit, "omp.array" + Prefix + ".delete");
+ DeleteBit, getName({"omp.array", Prefix, ".delete"}));
}
MapperCGF.Builder.CreateCondBr(DeleteCond, BodyBB, ExitBB);
@@ -9121,7 +9251,9 @@ void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
// data structure.
llvm::Value *OffloadingArgs[] = {Handle, Base, Begin, ArraySize, MapTypeArg};
MapperCGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__tgt_push_mapper_component), OffloadingArgs);
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___tgt_push_mapper_component),
+ OffloadingArgs);
}
void CGOpenMPRuntime::emitTargetNumIterationsCall(
@@ -9143,7 +9275,9 @@ void CGOpenMPRuntime::emitTargetNumIterationsCall(
if (llvm::Value *NumIterations = SizeEmitter(CGF, *LD)) {
llvm::Value *Args[] = {DeviceID, NumIterations};
CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_push_target_tripcount), Args);
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_push_target_tripcount),
+ Args);
}
};
emitInlinedDirective(CGF, OMPD_unknown, CodeGen);
@@ -9152,7 +9286,7 @@ void CGOpenMPRuntime::emitTargetNumIterationsCall(
void CGOpenMPRuntime::emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
- const Expr *Device,
+ llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) {
@@ -9176,6 +9310,16 @@ void CGOpenMPRuntime::emitTargetCall(
auto &&ThenGen = [this, Device, OutlinedFn, OutlinedFnID, &D, &InputInfo,
&MapTypesArray, &CS, RequiresOuterTask, &CapturedVars,
SizeEmitter](CodeGenFunction &CGF, PrePostActionTy &) {
+ if (Device.getInt() == OMPC_DEVICE_ancestor) {
+ // Reverse offloading is not supported, so just execute on the host.
+ if (RequiresOuterTask) {
+ CapturedVars.clear();
+ CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
+ }
+ emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
+ return;
+ }
+
// On top of the arrays that were filled up, the target offloading call
// takes as arguments the device id as well as the host pointer. The host
// pointer is used by the runtime library to identify the current target
@@ -9190,9 +9334,13 @@ void CGOpenMPRuntime::emitTargetCall(
// Emit device ID if any.
llvm::Value *DeviceID;
- if (Device) {
- DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
- CGF.Int64Ty, /*isSigned=*/true);
+ if (Device.getPointer()) {
+ assert((Device.getInt() == OMPC_DEVICE_unknown ||
+ Device.getInt() == OMPC_DEVICE_device_num) &&
+ "Expected device_num modifier.");
+ llvm::Value *DevVal = CGF.EmitScalarExpr(Device.getPointer());
+ DeviceID =
+ CGF.Builder.CreateIntCast(DevVal, CGF.Int64Ty, /*isSigned=*/true);
} else {
DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
}
@@ -9256,8 +9404,9 @@ void CGOpenMPRuntime::emitTargetCall(
NumTeams,
NumThreads};
Return = CGF.EmitRuntimeCall(
- createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_teams_nowait
- : OMPRTL__tgt_target_teams),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), HasNowait ? OMPRTL___tgt_target_teams_nowait
+ : OMPRTL___tgt_target_teams),
OffloadingArgs);
} else {
llvm::Value *OffloadingArgs[] = {DeviceID,
@@ -9268,8 +9417,9 @@ void CGOpenMPRuntime::emitTargetCall(
InputInfo.SizesArray.getPointer(),
MapTypesArray};
Return = CGF.EmitRuntimeCall(
- createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_nowait
- : OMPRTL__tgt_target),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(),
+ HasNowait ? OMPRTL___tgt_target_nowait : OMPRTL___tgt_target),
OffloadingArgs);
}
@@ -9521,6 +9671,8 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
@@ -9536,6 +9688,8 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -9548,6 +9702,7 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown target directive for OpenMP device codegen.");
}
return;
@@ -9774,22 +9929,40 @@ void CGOpenMPRuntime::adjustTargetSpecificDataForLambdas(
" Expected target-based directive.");
}
-void CGOpenMPRuntime::checkArchForUnifiedAddressing(
- const OMPRequiresDecl *D) {
+void CGOpenMPRuntime::processRequiresDirective(const OMPRequiresDecl *D) {
for (const OMPClause *Clause : D->clauselists()) {
if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
HasRequiresUnifiedSharedMemory = true;
- break;
+ } else if (const auto *AC =
+ dyn_cast<OMPAtomicDefaultMemOrderClause>(Clause)) {
+ switch (AC->getAtomicDefaultMemOrderKind()) {
+ case OMPC_ATOMIC_DEFAULT_MEM_ORDER_acq_rel:
+ RequiresAtomicOrdering = llvm::AtomicOrdering::AcquireRelease;
+ break;
+ case OMPC_ATOMIC_DEFAULT_MEM_ORDER_seq_cst:
+ RequiresAtomicOrdering = llvm::AtomicOrdering::SequentiallyConsistent;
+ break;
+ case OMPC_ATOMIC_DEFAULT_MEM_ORDER_relaxed:
+ RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
+ break;
+ case OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown:
+ break;
+ }
}
}
}
+llvm::AtomicOrdering CGOpenMPRuntime::getDefaultMemoryOrdering() const {
+ return RequiresAtomicOrdering;
+}
+
bool CGOpenMPRuntime::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
LangAS &AS) {
if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
return false;
const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
switch(A->getAllocatorType()) {
+ case OMPAllocateDeclAttr::OMPNullMemAlloc:
case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
// Not supported, fallback to the default mem space.
case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
@@ -9865,7 +10038,7 @@ llvm::Function *CGOpenMPRuntime::emitRequiresDirectiveRegFun() {
const auto &FI = CGM.getTypes().arrangeNullaryFunction();
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
std::string ReqName = getName({"omp_offloading", "requires_reg"});
- RequiresRegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, ReqName, FI);
+ RequiresRegFn = CGM.CreateGlobalInitOrCleanUpFunction(FTy, ReqName, FI);
CGF.StartFunction(GlobalDecl(), C.VoidTy, RequiresRegFn, FI, {});
OpenMPOffloadingRequiresDirFlags Flags = OMP_REQ_NONE;
// TODO: check for other requires clauses.
@@ -9880,8 +10053,9 @@ llvm::Function *CGOpenMPRuntime::emitRequiresDirectiveRegFun() {
"Target or declare target region expected.");
if (HasRequiresUnifiedSharedMemory)
Flags = OMP_REQ_UNIFIED_SHARED_MEMORY;
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_register_requires),
- llvm::ConstantInt::get(CGM.Int64Ty, Flags));
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___tgt_register_requires),
+ llvm::ConstantInt::get(CGM.Int64Ty, Flags));
CGF.FinishFunction();
}
return RequiresRegFn;
@@ -9907,7 +10081,8 @@ void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
RealArgs.append(std::begin(Args), std::end(Args));
RealArgs.append(CapturedVars.begin(), CapturedVars.end());
- llvm::FunctionCallee RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_teams);
+ llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_fork_teams);
CGF.EmitRuntimeCall(RTLFn, RealArgs);
}
@@ -9935,7 +10110,8 @@ void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
// Build call __kmpc_push_num_teamss(&loc, global_tid, num_teams, thread_limit)
llvm::Value *PushNumTeamsArgs[] = {RTLoc, getThreadID(CGF, Loc), NumTeamsVal,
ThreadLimitVal};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_teams),
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_push_num_teams),
PushNumTeamsArgs);
}
@@ -9989,7 +10165,8 @@ void CGOpenMPRuntime::emitTargetDataCalls(
llvm::Value *OffloadingArgs[] = {
DeviceID, PointerNum, BasePointersArrayArg,
PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target_data_begin),
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___tgt_target_data_begin),
OffloadingArgs);
// If device pointer privatization is required, emit the body of the region
@@ -10025,7 +10202,8 @@ void CGOpenMPRuntime::emitTargetDataCalls(
llvm::Value *OffloadingArgs[] = {
DeviceID, PointerNum, BasePointersArrayArg,
PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target_data_end),
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___tgt_target_data_end),
OffloadingArgs);
};
@@ -10105,19 +10283,19 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
// Select the right runtime function call for each expected standalone
// directive.
const bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
- OpenMPRTLFunction RTLFn;
+ RuntimeFunction RTLFn;
switch (D.getDirectiveKind()) {
case OMPD_target_enter_data:
- RTLFn = HasNowait ? OMPRTL__tgt_target_data_begin_nowait
- : OMPRTL__tgt_target_data_begin;
+ RTLFn = HasNowait ? OMPRTL___tgt_target_data_begin_nowait
+ : OMPRTL___tgt_target_data_begin;
break;
case OMPD_target_exit_data:
- RTLFn = HasNowait ? OMPRTL__tgt_target_data_end_nowait
- : OMPRTL__tgt_target_data_end;
+ RTLFn = HasNowait ? OMPRTL___tgt_target_data_end_nowait
+ : OMPRTL___tgt_target_data_end;
break;
case OMPD_target_update:
- RTLFn = HasNowait ? OMPRTL__tgt_target_data_update_nowait
- : OMPRTL__tgt_target_data_update;
+ RTLFn = HasNowait ? OMPRTL___tgt_target_data_update_nowait
+ : OMPRTL___tgt_target_data_update;
break;
case OMPD_parallel:
case OMPD_for:
@@ -10144,6 +10322,8 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_distribute:
@@ -10156,6 +10336,8 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
case OMPD_teams_distribute_parallel_for_simd:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -10178,10 +10360,13 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
case OMPD_target_parallel_for_simd:
case OMPD_requires:
case OMPD_unknown:
+ default:
llvm_unreachable("Unexpected standalone target data directive.");
break;
}
- CGF.EmitRuntimeCall(createRuntimeFunction(RTLFn), OffloadingArgs);
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), RTLFn),
+ OffloadingArgs);
};
auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray](
@@ -10343,7 +10528,7 @@ emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
break;
case Linear:
Out << 'l';
- if (!!ParamAttr.StrideOrArg)
+ if (ParamAttr.StrideOrArg != 1)
Out << ParamAttr.StrideOrArg;
break;
case Uniform:
@@ -10420,7 +10605,7 @@ static bool getAArch64PBV(QualType QT, ASTContext &C) {
/// as defined by `LS(P)` in 3.2.1 of the AAVFABI.
/// TODO: Add support for references, section 3.2.1, item 1.
static unsigned getAArch64LS(QualType QT, ParamKindTy Kind, ASTContext &C) {
- if (getAArch64MTV(QT, Kind) && QT.getCanonicalType()->isPointerType()) {
+ if (!getAArch64MTV(QT, Kind) && QT.getCanonicalType()->isPointerType()) {
QualType PTy = QT.getCanonicalType()->getPointeeType();
if (getAArch64PBV(PTy, C))
return C.getTypeSize(PTy);
@@ -10483,7 +10668,7 @@ static std::string mangleVectorParameters(ArrayRef<ParamAttrTy> ParamAttrs) {
Out << 'l';
// Don't print the step value if it is not present or if it is
// equal to 1.
- if (!!ParamAttr.StrideOrArg && ParamAttr.StrideOrArg != 1)
+ if (ParamAttr.StrideOrArg != 1)
Out << ParamAttr.StrideOrArg;
break;
case Uniform:
@@ -10498,7 +10683,7 @@ static std::string mangleVectorParameters(ArrayRef<ParamAttrTy> ParamAttrs) {
Out << 'a' << ParamAttr.Alignment;
}
- return Out.str();
+ return std::string(Out.str());
}
// Function used to add the attribute. The parameter `VLEN` is
@@ -10721,15 +10906,24 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
for (const Expr *E : Attr->linears()) {
E = E->IgnoreParenImpCasts();
unsigned Pos;
+ // Rescaling factor needed to compute the linear parameter
+ // value in the mangled name.
+ unsigned PtrRescalingFactor = 1;
if (isa<CXXThisExpr>(E)) {
Pos = ParamPositions[FD];
} else {
const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
->getCanonicalDecl();
Pos = ParamPositions[PVD];
+ if (auto *P = dyn_cast<PointerType>(PVD->getType()))
+ PtrRescalingFactor = CGM.getContext()
+ .getTypeSizeInChars(P->getPointeeType())
+ .getQuantity();
}
ParamAttrTy &ParamAttr = ParamAttrs[Pos];
ParamAttr.Kind = Linear;
+ // Assuming a stride of 1, for `linear` without modifiers.
+ ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(1);
if (*SI) {
Expr::EvalResult Result;
if (!(*SI)->EvaluateAsInt(Result, C, Expr::SE_AllowSideEffects)) {
@@ -10745,6 +10939,11 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
ParamAttr.StrideOrArg = Result.Val.getInt();
}
}
+ // If we are using a linear clause on a pointer, we need to
+ // rescale the value of linear_step with the byte size of the
+ // pointee type.
+ if (Linear == ParamAttr.Kind)
+ ParamAttr.StrideOrArg = ParamAttr.StrideOrArg * PtrRescalingFactor;
++SI;
++MI;
}
@@ -10837,10 +11036,9 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
// dims.upper = num_iterations;
LValue UpperLVal = CGF.EmitLValueForField(
DimsLVal, *std::next(RD->field_begin(), UpperFD));
- llvm::Value *NumIterVal =
- CGF.EmitScalarConversion(CGF.EmitScalarExpr(NumIterations[I]),
- D.getNumIterations()->getType(), Int64Ty,
- D.getNumIterations()->getExprLoc());
+ llvm::Value *NumIterVal = CGF.EmitScalarConversion(
+ CGF.EmitScalarExpr(NumIterations[I]), NumIterations[I]->getType(),
+ Int64Ty, NumIterations[I]->getExprLoc());
CGF.EmitStoreOfScalar(NumIterVal, UpperLVal);
// dims.stride = 1;
LValue StrideLVal = CGF.EmitLValueForField(
@@ -10859,13 +11057,13 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
CGF.Builder.CreateConstArrayGEP(DimsAddr, 0).getPointer(),
CGM.VoidPtrTy)};
- llvm::FunctionCallee RTLFn =
- createRuntimeFunction(OMPRTL__kmpc_doacross_init);
+ llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_doacross_init);
CGF.EmitRuntimeCall(RTLFn, Args);
llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = {
emitUpdateLocation(CGF, D.getEndLoc()), getThreadID(CGF, D.getEndLoc())};
- llvm::FunctionCallee FiniRTLFn =
- createRuntimeFunction(OMPRTL__kmpc_doacross_fini);
+ llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_doacross_fini);
CGF.EHStack.pushCleanup<DoacrossCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
llvm::makeArrayRef(FiniArgs));
}
@@ -10893,10 +11091,12 @@ void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
CGF.Builder.CreateConstArrayGEP(CntAddr, 0).getPointer()};
llvm::FunctionCallee RTLFn;
if (C->getDependencyKind() == OMPC_DEPEND_source) {
- RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_post);
+ RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___kmpc_doacross_post);
} else {
assert(C->getDependencyKind() == OMPC_DEPEND_sink);
- RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_wait);
+ RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___kmpc_doacross_wait);
}
CGF.EmitRuntimeCall(RTLFn, Args);
}
@@ -10969,7 +11169,8 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
return Address::invalid();
const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
// Use the default allocation.
- if (AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc &&
+ if ((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc ||
+ AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) &&
!AA->getAllocator())
return Address::invalid();
llvm::Value *Size;
@@ -10999,296 +11200,23 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
llvm::Value *Args[] = {ThreadID, Size, Allocator};
llvm::Value *Addr =
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_alloc), Args,
- CVD->getName() + ".void.addr");
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_alloc),
+ Args, getName({CVD->getName(), ".void.addr"}));
llvm::Value *FiniArgs[OMPAllocateCleanupTy::CleanupArgs] = {ThreadID, Addr,
Allocator};
- llvm::FunctionCallee FiniRTLFn = createRuntimeFunction(OMPRTL__kmpc_free);
+ llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_free);
CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
llvm::makeArrayRef(FiniArgs));
Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
Addr,
CGF.ConvertTypeForMem(CGM.getContext().getPointerType(CVD->getType())),
- CVD->getName() + ".addr");
+ getName({CVD->getName(), ".addr"}));
return Address(Addr, Align);
}
-namespace {
-using OMPContextSelectorData =
- OpenMPCtxSelectorData<ArrayRef<StringRef>, llvm::APSInt>;
-using CompleteOMPContextSelectorData = SmallVector<OMPContextSelectorData, 4>;
-} // anonymous namespace
-
-/// Checks current context and returns true if it matches the context selector.
-template <OpenMPContextSelectorSetKind CtxSet, OpenMPContextSelectorKind Ctx,
- typename... Arguments>
-static bool checkContext(const OMPContextSelectorData &Data,
- Arguments... Params) {
- assert(Data.CtxSet != OMP_CTX_SET_unknown && Data.Ctx != OMP_CTX_unknown &&
- "Unknown context selector or context selector set.");
- return false;
-}
-
-/// Checks for implementation={vendor(<vendor>)} context selector.
-/// \returns true iff <vendor>="llvm", false otherwise.
-template <>
-bool checkContext<OMP_CTX_SET_implementation, OMP_CTX_vendor>(
- const OMPContextSelectorData &Data) {
- return llvm::all_of(Data.Names,
- [](StringRef S) { return !S.compare_lower("llvm"); });
-}
-
-/// Checks for device={kind(<kind>)} context selector.
-/// \returns true if <kind>="host" and compilation is for host.
-/// true if <kind>="nohost" and compilation is for device.
-/// true if <kind>="cpu" and compilation is for Arm, X86 or PPC CPU.
-/// true if <kind>="gpu" and compilation is for NVPTX or AMDGCN.
-/// false otherwise.
-template <>
-bool checkContext<OMP_CTX_SET_device, OMP_CTX_kind, CodeGenModule &>(
- const OMPContextSelectorData &Data, CodeGenModule &CGM) {
- for (StringRef Name : Data.Names) {
- if (!Name.compare_lower("host")) {
- if (CGM.getLangOpts().OpenMPIsDevice)
- return false;
- continue;
- }
- if (!Name.compare_lower("nohost")) {
- if (!CGM.getLangOpts().OpenMPIsDevice)
- return false;
- continue;
- }
- switch (CGM.getTriple().getArch()) {
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::aarch64:
- case llvm::Triple::aarch64_be:
- case llvm::Triple::aarch64_32:
- case llvm::Triple::ppc:
- case llvm::Triple::ppc64:
- case llvm::Triple::ppc64le:
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- if (Name.compare_lower("cpu"))
- return false;
- break;
- case llvm::Triple::amdgcn:
- case llvm::Triple::nvptx:
- case llvm::Triple::nvptx64:
- if (Name.compare_lower("gpu"))
- return false;
- break;
- case llvm::Triple::UnknownArch:
- case llvm::Triple::arc:
- case llvm::Triple::avr:
- case llvm::Triple::bpfel:
- case llvm::Triple::bpfeb:
- case llvm::Triple::hexagon:
- case llvm::Triple::mips:
- case llvm::Triple::mipsel:
- case llvm::Triple::mips64:
- case llvm::Triple::mips64el:
- case llvm::Triple::msp430:
- case llvm::Triple::r600:
- case llvm::Triple::riscv32:
- case llvm::Triple::riscv64:
- case llvm::Triple::sparc:
- case llvm::Triple::sparcv9:
- case llvm::Triple::sparcel:
- case llvm::Triple::systemz:
- case llvm::Triple::tce:
- case llvm::Triple::tcele:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
- case llvm::Triple::xcore:
- case llvm::Triple::le32:
- case llvm::Triple::le64:
- case llvm::Triple::amdil:
- case llvm::Triple::amdil64:
- case llvm::Triple::hsail:
- case llvm::Triple::hsail64:
- case llvm::Triple::spir:
- case llvm::Triple::spir64:
- case llvm::Triple::kalimba:
- case llvm::Triple::shave:
- case llvm::Triple::lanai:
- case llvm::Triple::wasm32:
- case llvm::Triple::wasm64:
- case llvm::Triple::renderscript32:
- case llvm::Triple::renderscript64:
- case llvm::Triple::ve:
- return false;
- }
- }
- return true;
-}
-
-static bool matchesContext(CodeGenModule &CGM,
- const CompleteOMPContextSelectorData &ContextData) {
- for (const OMPContextSelectorData &Data : ContextData) {
- switch (Data.Ctx) {
- case OMP_CTX_vendor:
- assert(Data.CtxSet == OMP_CTX_SET_implementation &&
- "Expected implementation context selector set.");
- if (!checkContext<OMP_CTX_SET_implementation, OMP_CTX_vendor>(Data))
- return false;
- break;
- case OMP_CTX_kind:
- assert(Data.CtxSet == OMP_CTX_SET_device &&
- "Expected device context selector set.");
- if (!checkContext<OMP_CTX_SET_device, OMP_CTX_kind, CodeGenModule &>(Data,
- CGM))
- return false;
- break;
- case OMP_CTX_unknown:
- llvm_unreachable("Unknown context selector kind.");
- }
- }
- return true;
-}
-
-static CompleteOMPContextSelectorData
-translateAttrToContextSelectorData(ASTContext &C,
- const OMPDeclareVariantAttr *A) {
- CompleteOMPContextSelectorData Data;
- for (unsigned I = 0, E = A->scores_size(); I < E; ++I) {
- Data.emplace_back();
- auto CtxSet = static_cast<OpenMPContextSelectorSetKind>(
- *std::next(A->ctxSelectorSets_begin(), I));
- auto Ctx = static_cast<OpenMPContextSelectorKind>(
- *std::next(A->ctxSelectors_begin(), I));
- Data.back().CtxSet = CtxSet;
- Data.back().Ctx = Ctx;
- const Expr *Score = *std::next(A->scores_begin(), I);
- Data.back().Score = Score->EvaluateKnownConstInt(C);
- switch (Ctx) {
- case OMP_CTX_vendor:
- assert(CtxSet == OMP_CTX_SET_implementation &&
- "Expected implementation context selector set.");
- Data.back().Names =
- llvm::makeArrayRef(A->implVendors_begin(), A->implVendors_end());
- break;
- case OMP_CTX_kind:
- assert(CtxSet == OMP_CTX_SET_device &&
- "Expected device context selector set.");
- Data.back().Names =
- llvm::makeArrayRef(A->deviceKinds_begin(), A->deviceKinds_end());
- break;
- case OMP_CTX_unknown:
- llvm_unreachable("Unknown context selector kind.");
- }
- }
- return Data;
-}
-
-static bool isStrictSubset(const CompleteOMPContextSelectorData &LHS,
- const CompleteOMPContextSelectorData &RHS) {
- llvm::SmallDenseMap<std::pair<int, int>, llvm::StringSet<>, 4> RHSData;
- for (const OMPContextSelectorData &D : RHS) {
- auto &Pair = RHSData.FindAndConstruct(std::make_pair(D.CtxSet, D.Ctx));
- Pair.getSecond().insert(D.Names.begin(), D.Names.end());
- }
- bool AllSetsAreEqual = true;
- for (const OMPContextSelectorData &D : LHS) {
- auto It = RHSData.find(std::make_pair(D.CtxSet, D.Ctx));
- if (It == RHSData.end())
- return false;
- if (D.Names.size() > It->getSecond().size())
- return false;
- if (llvm::set_union(It->getSecond(), D.Names))
- return false;
- AllSetsAreEqual =
- AllSetsAreEqual && (D.Names.size() == It->getSecond().size());
- }
-
- return LHS.size() != RHS.size() || !AllSetsAreEqual;
-}
-
-static bool greaterCtxScore(const CompleteOMPContextSelectorData &LHS,
- const CompleteOMPContextSelectorData &RHS) {
- // Score is calculated as sum of all scores + 1.
- llvm::APSInt LHSScore(llvm::APInt(64, 1), /*isUnsigned=*/false);
- bool RHSIsSubsetOfLHS = isStrictSubset(RHS, LHS);
- if (RHSIsSubsetOfLHS) {
- LHSScore = llvm::APSInt::get(0);
- } else {
- for (const OMPContextSelectorData &Data : LHS) {
- if (Data.Score.getBitWidth() > LHSScore.getBitWidth()) {
- LHSScore = LHSScore.extend(Data.Score.getBitWidth()) + Data.Score;
- } else if (Data.Score.getBitWidth() < LHSScore.getBitWidth()) {
- LHSScore += Data.Score.extend(LHSScore.getBitWidth());
- } else {
- LHSScore += Data.Score;
- }
- }
- }
- llvm::APSInt RHSScore(llvm::APInt(64, 1), /*isUnsigned=*/false);
- if (!RHSIsSubsetOfLHS && isStrictSubset(LHS, RHS)) {
- RHSScore = llvm::APSInt::get(0);
- } else {
- for (const OMPContextSelectorData &Data : RHS) {
- if (Data.Score.getBitWidth() > RHSScore.getBitWidth()) {
- RHSScore = RHSScore.extend(Data.Score.getBitWidth()) + Data.Score;
- } else if (Data.Score.getBitWidth() < RHSScore.getBitWidth()) {
- RHSScore += Data.Score.extend(RHSScore.getBitWidth());
- } else {
- RHSScore += Data.Score;
- }
- }
- }
- return llvm::APSInt::compareValues(LHSScore, RHSScore) >= 0;
-}
-
-/// Finds the variant function that matches current context with its context
-/// selector.
-static const FunctionDecl *getDeclareVariantFunction(CodeGenModule &CGM,
- const FunctionDecl *FD) {
- if (!FD->hasAttrs() || !FD->hasAttr<OMPDeclareVariantAttr>())
- return FD;
- // Iterate through all DeclareVariant attributes and check context selectors.
- const OMPDeclareVariantAttr *TopMostAttr = nullptr;
- CompleteOMPContextSelectorData TopMostData;
- for (const auto *A : FD->specific_attrs<OMPDeclareVariantAttr>()) {
- CompleteOMPContextSelectorData Data =
- translateAttrToContextSelectorData(CGM.getContext(), A);
- if (!matchesContext(CGM, Data))
- continue;
- // If the attribute matches the context, find the attribute with the highest
- // score.
- if (!TopMostAttr || !greaterCtxScore(TopMostData, Data)) {
- TopMostAttr = A;
- TopMostData.swap(Data);
- }
- }
- if (!TopMostAttr)
- return FD;
- return cast<FunctionDecl>(
- cast<DeclRefExpr>(TopMostAttr->getVariantFuncRef()->IgnoreParenImpCasts())
- ->getDecl());
-}
-
-bool CGOpenMPRuntime::emitDeclareVariant(GlobalDecl GD, bool IsForDefinition) {
- const auto *D = cast<FunctionDecl>(GD.getDecl());
- // If the original function is defined already, use its definition.
- StringRef MangledName = CGM.getMangledName(GD);
- llvm::GlobalValue *Orig = CGM.GetGlobalValue(MangledName);
- if (Orig && !Orig->isDeclaration())
- return false;
- const FunctionDecl *NewFD = getDeclareVariantFunction(CGM, D);
- // Emit original function if it does not have declare variant attribute or the
- // context does not match.
- if (NewFD == D)
- return false;
- GlobalDecl NewGD = GD.getWithDecl(NewFD);
- if (tryEmitDeclareVariant(NewGD, GD, Orig, IsForDefinition)) {
- DeferredVariantFunction.erase(D);
- return true;
- }
- DeferredVariantFunction.insert(std::make_pair(D, std::make_pair(NewGD, GD)));
- return true;
-}
-
CGOpenMPRuntime::NontemporalDeclsRAII::NontemporalDeclsRAII(
CodeGenModule &CGM, const OMPLoopDirective &S)
: CGM(CGM), NeedToPush(S.hasClausesOfKind<OMPNontemporalClause>()) {
@@ -11329,17 +11257,101 @@ bool CGOpenMPRuntime::isNontemporalDecl(const ValueDecl *VD) const {
[VD](const NontemporalDeclsSet &Set) { return Set.count(VD) > 0; });
}
+void CGOpenMPRuntime::LastprivateConditionalRAII::tryToDisableInnerAnalysis(
+ const OMPExecutableDirective &S,
+ llvm::DenseSet<CanonicalDeclPtr<const Decl>> &NeedToAddForLPCsAsDisabled)
+ const {
+ llvm::DenseSet<CanonicalDeclPtr<const Decl>> NeedToCheckForLPCs;
+ // Vars in target/task regions must be excluded completely.
+ if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()) ||
+ isOpenMPTaskingDirective(S.getDirectiveKind())) {
+ SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
+ getOpenMPCaptureRegions(CaptureRegions, S.getDirectiveKind());
+ const CapturedStmt *CS = S.getCapturedStmt(CaptureRegions.front());
+ for (const CapturedStmt::Capture &Cap : CS->captures()) {
+ if (Cap.capturesVariable() || Cap.capturesVariableByCopy())
+ NeedToCheckForLPCs.insert(Cap.getCapturedVar());
+ }
+ }
+ // Exclude vars in private clauses.
+ for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ NeedToCheckForLPCs.insert(DRE->getDecl());
+ }
+ }
+ for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ NeedToCheckForLPCs.insert(DRE->getDecl());
+ }
+ }
+ for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ NeedToCheckForLPCs.insert(DRE->getDecl());
+ }
+ }
+ for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ NeedToCheckForLPCs.insert(DRE->getDecl());
+ }
+ }
+ for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ NeedToCheckForLPCs.insert(DRE->getDecl());
+ }
+ }
+ for (const Decl *VD : NeedToCheckForLPCs) {
+ for (const LastprivateConditionalData &Data :
+ llvm::reverse(CGM.getOpenMPRuntime().LastprivateConditionalStack)) {
+ if (Data.DeclToUniqueName.count(VD) > 0) {
+ if (!Data.Disabled)
+ NeedToAddForLPCsAsDisabled.insert(VD);
+ break;
+ }
+ }
+ }
+}
+
CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal)
: CGM(CGF.CGM),
- NeedToPush(llvm::any_of(S.getClausesOfKind<OMPLastprivateClause>(),
- [](const OMPLastprivateClause *C) {
- return C->getKind() ==
- OMPC_LASTPRIVATE_conditional;
- })) {
+ Action((CGM.getLangOpts().OpenMP >= 50 &&
+ llvm::any_of(S.getClausesOfKind<OMPLastprivateClause>(),
+ [](const OMPLastprivateClause *C) {
+ return C->getKind() ==
+ OMPC_LASTPRIVATE_conditional;
+ }))
+ ? ActionToDo::PushAsLastprivateConditional
+ : ActionToDo::DoNotPush) {
assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.");
- if (!NeedToPush)
+ if (CGM.getLangOpts().OpenMP < 50 || Action == ActionToDo::DoNotPush)
return;
+ assert(Action == ActionToDo::PushAsLastprivateConditional &&
+ "Expected a push action.");
LastprivateConditionalData &Data =
CGM.getOpenMPRuntime().LastprivateConditionalStack.emplace_back();
for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
@@ -11347,107 +11359,136 @@ CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
continue;
for (const Expr *Ref : C->varlists()) {
- Data.DeclToUniqeName.try_emplace(
+ Data.DeclToUniqueName.insert(std::make_pair(
cast<DeclRefExpr>(Ref->IgnoreParenImpCasts())->getDecl(),
- generateUniqueName(CGM, "pl_cond", Ref));
+ SmallString<16>(generateUniqueName(CGM, "pl_cond", Ref))));
}
}
Data.IVLVal = IVLVal;
- // In simd only mode or for simd directives no need to generate threadprivate
- // references for the loop iteration counter, we can use the original one
- // since outlining cannot happen in simd regions.
- if (CGF.getLangOpts().OpenMPSimd ||
- isOpenMPSimdDirective(S.getDirectiveKind())) {
- Data.UseOriginalIV = true;
+ Data.Fn = CGF.CurFn;
+}
+
+CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
+ CodeGenFunction &CGF, const OMPExecutableDirective &S)
+ : CGM(CGF.CGM), Action(ActionToDo::DoNotPush) {
+ assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.");
+ if (CGM.getLangOpts().OpenMP < 50)
return;
+ llvm::DenseSet<CanonicalDeclPtr<const Decl>> NeedToAddForLPCsAsDisabled;
+ tryToDisableInnerAnalysis(S, NeedToAddForLPCsAsDisabled);
+ if (!NeedToAddForLPCsAsDisabled.empty()) {
+ Action = ActionToDo::DisableLastprivateConditional;
+ LastprivateConditionalData &Data =
+ CGM.getOpenMPRuntime().LastprivateConditionalStack.emplace_back();
+ for (const Decl *VD : NeedToAddForLPCsAsDisabled)
+ Data.DeclToUniqueName.insert(std::make_pair(VD, SmallString<16>()));
+ Data.Fn = CGF.CurFn;
+ Data.Disabled = true;
}
- llvm::SmallString<16> Buffer;
- llvm::raw_svector_ostream OS(Buffer);
- PresumedLoc PLoc =
- CGM.getContext().getSourceManager().getPresumedLoc(S.getBeginLoc());
- assert(PLoc.isValid() && "Source location is expected to be always valid.");
+}
- llvm::sys::fs::UniqueID ID;
- if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
- CGM.getDiags().Report(diag::err_cannot_open_file)
- << PLoc.getFilename() << EC.message();
- OS << "$pl_cond_" << ID.getDevice() << "_" << ID.getFile() << "_"
- << PLoc.getLine() << "_" << PLoc.getColumn() << "$iv";
- Data.IVName = OS.str();
+CGOpenMPRuntime::LastprivateConditionalRAII
+CGOpenMPRuntime::LastprivateConditionalRAII::disable(
+ CodeGenFunction &CGF, const OMPExecutableDirective &S) {
+ return LastprivateConditionalRAII(CGF, S);
}
CGOpenMPRuntime::LastprivateConditionalRAII::~LastprivateConditionalRAII() {
- if (!NeedToPush)
+ if (CGM.getLangOpts().OpenMP < 50)
return;
- CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
+ if (Action == ActionToDo::DisableLastprivateConditional) {
+ assert(CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled &&
+ "Expected list of disabled private vars.");
+ CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
+ }
+ if (Action == ActionToDo::PushAsLastprivateConditional) {
+ assert(
+ !CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled &&
+ "Expected list of lastprivate conditional vars.");
+ CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
+ }
}
-void CGOpenMPRuntime::initLastprivateConditionalCounter(
- CodeGenFunction &CGF, const OMPExecutableDirective &S) {
- if (CGM.getLangOpts().OpenMPSimd ||
- !llvm::any_of(S.getClausesOfKind<OMPLastprivateClause>(),
- [](const OMPLastprivateClause *C) {
- return C->getKind() == OMPC_LASTPRIVATE_conditional;
- }))
- return;
- const CGOpenMPRuntime::LastprivateConditionalData &Data =
- LastprivateConditionalStack.back();
- if (Data.UseOriginalIV)
- return;
- // Global loop counter. Required to handle inner parallel-for regions.
- // global_iv = iv;
- Address GlobIVAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
- CGF, Data.IVLVal.getType(), Data.IVName);
- LValue GlobIVLVal = CGF.MakeAddrLValue(GlobIVAddr, Data.IVLVal.getType());
- llvm::Value *IVVal = CGF.EmitLoadOfScalar(Data.IVLVal, S.getBeginLoc());
- CGF.EmitStoreOfScalar(IVVal, GlobIVLVal);
+Address CGOpenMPRuntime::emitLastprivateConditionalInit(CodeGenFunction &CGF,
+ const VarDecl *VD) {
+ ASTContext &C = CGM.getContext();
+ auto I = LastprivateConditionalToTypes.find(CGF.CurFn);
+ if (I == LastprivateConditionalToTypes.end())
+ I = LastprivateConditionalToTypes.try_emplace(CGF.CurFn).first;
+ QualType NewType;
+ const FieldDecl *VDField;
+ const FieldDecl *FiredField;
+ LValue BaseLVal;
+ auto VI = I->getSecond().find(VD);
+ if (VI == I->getSecond().end()) {
+ RecordDecl *RD = C.buildImplicitRecord("lasprivate.conditional");
+ RD->startDefinition();
+ VDField = addFieldToRecordDecl(C, RD, VD->getType().getNonReferenceType());
+ FiredField = addFieldToRecordDecl(C, RD, C.CharTy);
+ RD->completeDefinition();
+ NewType = C.getRecordType(RD);
+ Address Addr = CGF.CreateMemTemp(NewType, C.getDeclAlign(VD), VD->getName());
+ BaseLVal = CGF.MakeAddrLValue(Addr, NewType, AlignmentSource::Decl);
+ I->getSecond().try_emplace(VD, NewType, VDField, FiredField, BaseLVal);
+ } else {
+ NewType = std::get<0>(VI->getSecond());
+ VDField = std::get<1>(VI->getSecond());
+ FiredField = std::get<2>(VI->getSecond());
+ BaseLVal = std::get<3>(VI->getSecond());
+ }
+ LValue FiredLVal =
+ CGF.EmitLValueForField(BaseLVal, FiredField);
+ CGF.EmitStoreOfScalar(
+ llvm::ConstantInt::getNullValue(CGF.ConvertTypeForMem(C.CharTy)),
+ FiredLVal);
+ return CGF.EmitLValueForField(BaseLVal, VDField).getAddress(CGF);
}
namespace {
/// Checks if the lastprivate conditional variable is referenced in LHS.
class LastprivateConditionalRefChecker final
: public ConstStmtVisitor<LastprivateConditionalRefChecker, bool> {
- CodeGenFunction &CGF;
ArrayRef<CGOpenMPRuntime::LastprivateConditionalData> LPM;
const Expr *FoundE = nullptr;
const Decl *FoundD = nullptr;
StringRef UniqueDeclName;
LValue IVLVal;
- StringRef IVName;
+ llvm::Function *FoundFn = nullptr;
SourceLocation Loc;
- bool UseOriginalIV = false;
public:
bool VisitDeclRefExpr(const DeclRefExpr *E) {
for (const CGOpenMPRuntime::LastprivateConditionalData &D :
llvm::reverse(LPM)) {
- auto It = D.DeclToUniqeName.find(E->getDecl());
- if (It == D.DeclToUniqeName.end())
+ auto It = D.DeclToUniqueName.find(E->getDecl());
+ if (It == D.DeclToUniqueName.end())
continue;
+ if (D.Disabled)
+ return false;
FoundE = E;
FoundD = E->getDecl()->getCanonicalDecl();
- UniqueDeclName = It->getSecond();
+ UniqueDeclName = It->second;
IVLVal = D.IVLVal;
- IVName = D.IVName;
- UseOriginalIV = D.UseOriginalIV;
+ FoundFn = D.Fn;
break;
}
return FoundE == E;
}
bool VisitMemberExpr(const MemberExpr *E) {
- if (!CGF.IsWrappedCXXThis(E->getBase()))
+ if (!CodeGenFunction::IsWrappedCXXThis(E->getBase()))
return false;
for (const CGOpenMPRuntime::LastprivateConditionalData &D :
llvm::reverse(LPM)) {
- auto It = D.DeclToUniqeName.find(E->getMemberDecl());
- if (It == D.DeclToUniqeName.end())
+ auto It = D.DeclToUniqueName.find(E->getMemberDecl());
+ if (It == D.DeclToUniqueName.end())
continue;
+ if (D.Disabled)
+ return false;
FoundE = E;
FoundD = E->getMemberDecl()->getCanonicalDecl();
- UniqueDeclName = It->getSecond();
+ UniqueDeclName = It->second;
IVLVal = D.IVLVal;
- IVName = D.IVName;
- UseOriginalIV = D.UseOriginalIV;
+ FoundFn = D.Fn;
break;
}
return FoundE == E;
@@ -11465,62 +11506,41 @@ public:
return false;
}
explicit LastprivateConditionalRefChecker(
- CodeGenFunction &CGF,
ArrayRef<CGOpenMPRuntime::LastprivateConditionalData> LPM)
- : CGF(CGF), LPM(LPM) {}
- std::tuple<const Expr *, const Decl *, StringRef, LValue, StringRef, bool>
+ : LPM(LPM) {}
+ std::tuple<const Expr *, const Decl *, StringRef, LValue, llvm::Function *>
getFoundData() const {
- return std::make_tuple(FoundE, FoundD, UniqueDeclName, IVLVal, IVName,
- UseOriginalIV);
+ return std::make_tuple(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn);
}
};
} // namespace
-void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
- const Expr *LHS) {
- if (CGF.getLangOpts().OpenMP < 50)
- return;
- LastprivateConditionalRefChecker Checker(CGF, LastprivateConditionalStack);
- if (!Checker.Visit(LHS))
- return;
- const Expr *FoundE;
- const Decl *FoundD;
- StringRef UniqueDeclName;
- LValue IVLVal;
- StringRef IVName;
- bool UseOriginalIV;
- std::tie(FoundE, FoundD, UniqueDeclName, IVLVal, IVName, UseOriginalIV) =
- Checker.getFoundData();
-
+void CGOpenMPRuntime::emitLastprivateConditionalUpdate(CodeGenFunction &CGF,
+ LValue IVLVal,
+ StringRef UniqueDeclName,
+ LValue LVal,
+ SourceLocation Loc) {
// Last updated loop counter for the lastprivate conditional var.
// int<xx> last_iv = 0;
llvm::Type *LLIVTy = CGF.ConvertTypeForMem(IVLVal.getType());
llvm::Constant *LastIV =
- getOrCreateInternalVariable(LLIVTy, UniqueDeclName + "$iv");
+ getOrCreateInternalVariable(LLIVTy, getName({UniqueDeclName, "iv"}));
cast<llvm::GlobalVariable>(LastIV)->setAlignment(
IVLVal.getAlignment().getAsAlign());
LValue LastIVLVal = CGF.MakeNaturalAlignAddrLValue(LastIV, IVLVal.getType());
- // Private address of the lastprivate conditional in the current context.
- // priv_a
- LValue LVal = CGF.EmitLValue(FoundE);
// Last value of the lastprivate conditional.
// decltype(priv_a) last_a;
llvm::Constant *Last = getOrCreateInternalVariable(
- LVal.getAddress(CGF).getElementType(), UniqueDeclName);
+ CGF.ConvertTypeForMem(LVal.getType()), UniqueDeclName);
cast<llvm::GlobalVariable>(Last)->setAlignment(
LVal.getAlignment().getAsAlign());
LValue LastLVal =
CGF.MakeAddrLValue(Last, LVal.getType(), LVal.getAlignment());
// Global loop counter. Required to handle inner parallel-for regions.
- // global_iv
- if (!UseOriginalIV) {
- Address IVAddr =
- getAddrOfArtificialThreadPrivate(CGF, IVLVal.getType(), IVName);
- IVLVal = CGF.MakeAddrLValue(IVAddr, IVLVal.getType());
- }
- llvm::Value *IVVal = CGF.EmitLoadOfScalar(IVLVal, FoundE->getExprLoc());
+ // iv
+ llvm::Value *IVVal = CGF.EmitLoadOfScalar(IVLVal, Loc);
// #pragma omp critical(a)
// if (last_iv <= iv) {
@@ -11528,11 +11548,10 @@ void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
// last_a = priv_a;
// }
auto &&CodeGen = [&LastIVLVal, &IVLVal, IVVal, &LVal, &LastLVal,
- FoundE](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
- llvm::Value *LastIVVal =
- CGF.EmitLoadOfScalar(LastIVLVal, FoundE->getExprLoc());
- // (last_iv <= global_iv) ? Check if the variable is updated and store new
+ llvm::Value *LastIVVal = CGF.EmitLoadOfScalar(LastIVLVal, Loc);
+ // (last_iv <= iv) ? Check if the variable is updated and store new
// value in global var.
llvm::Value *CmpRes;
if (IVLVal.getType()->isSignedIntegerType()) {
@@ -11548,19 +11567,18 @@ void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
// {
CGF.EmitBlock(ThenBB);
- // last_iv = global_iv;
+ // last_iv = iv;
CGF.EmitStoreOfScalar(IVVal, LastIVLVal);
// last_a = priv_a;
switch (CGF.getEvaluationKind(LVal.getType())) {
case TEK_Scalar: {
- llvm::Value *PrivVal = CGF.EmitLoadOfScalar(LVal, FoundE->getExprLoc());
+ llvm::Value *PrivVal = CGF.EmitLoadOfScalar(LVal, Loc);
CGF.EmitStoreOfScalar(PrivVal, LastLVal);
break;
}
case TEK_Complex: {
- CodeGenFunction::ComplexPairTy PrivVal =
- CGF.EmitLoadOfComplex(LVal, FoundE->getExprLoc());
+ CodeGenFunction::ComplexPairTy PrivVal = CGF.EmitLoadOfComplex(LVal, Loc);
CGF.EmitStoreOfComplex(PrivVal, LastLVal, /*isInit=*/false);
break;
}
@@ -11580,7 +11598,100 @@ void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
RegionCodeGenTy ThenRCG(CodeGen);
ThenRCG(CGF);
} else {
- emitCriticalRegion(CGF, UniqueDeclName, CodeGen, FoundE->getExprLoc());
+ emitCriticalRegion(CGF, UniqueDeclName, CodeGen, Loc);
+ }
+}
+
+void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
+ const Expr *LHS) {
+ if (CGF.getLangOpts().OpenMP < 50 || LastprivateConditionalStack.empty())
+ return;
+ LastprivateConditionalRefChecker Checker(LastprivateConditionalStack);
+ if (!Checker.Visit(LHS))
+ return;
+ const Expr *FoundE;
+ const Decl *FoundD;
+ StringRef UniqueDeclName;
+ LValue IVLVal;
+ llvm::Function *FoundFn;
+ std::tie(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn) =
+ Checker.getFoundData();
+ if (FoundFn != CGF.CurFn) {
+ // Special codegen for inner parallel regions.
+ // ((struct.lastprivate.conditional*)&priv_a)->Fired = 1;
+ auto It = LastprivateConditionalToTypes[FoundFn].find(FoundD);
+ assert(It != LastprivateConditionalToTypes[FoundFn].end() &&
+ "Lastprivate conditional is not found in outer region.");
+ QualType StructTy = std::get<0>(It->getSecond());
+ const FieldDecl* FiredDecl = std::get<2>(It->getSecond());
+ LValue PrivLVal = CGF.EmitLValue(FoundE);
+ Address StructAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ PrivLVal.getAddress(CGF),
+ CGF.ConvertTypeForMem(CGF.getContext().getPointerType(StructTy)));
+ LValue BaseLVal =
+ CGF.MakeAddrLValue(StructAddr, StructTy, AlignmentSource::Decl);
+ LValue FiredLVal = CGF.EmitLValueForField(BaseLVal, FiredDecl);
+ CGF.EmitAtomicStore(RValue::get(llvm::ConstantInt::get(
+ CGF.ConvertTypeForMem(FiredDecl->getType()), 1)),
+ FiredLVal, llvm::AtomicOrdering::Unordered,
+ /*IsVolatile=*/true, /*isInit=*/false);
+ return;
+ }
+
+ // Private address of the lastprivate conditional in the current context.
+ // priv_a
+ LValue LVal = CGF.EmitLValue(FoundE);
+ emitLastprivateConditionalUpdate(CGF, IVLVal, UniqueDeclName, LVal,
+ FoundE->getExprLoc());
+}
+
+void CGOpenMPRuntime::checkAndEmitSharedLastprivateConditional(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls) {
+ if (CGF.getLangOpts().OpenMP < 50 || LastprivateConditionalStack.empty())
+ return;
+ auto Range = llvm::reverse(LastprivateConditionalStack);
+ auto It = llvm::find_if(
+ Range, [](const LastprivateConditionalData &D) { return !D.Disabled; });
+ if (It == Range.end() || It->Fn != CGF.CurFn)
+ return;
+ auto LPCI = LastprivateConditionalToTypes.find(It->Fn);
+ assert(LPCI != LastprivateConditionalToTypes.end() &&
+ "Lastprivates must be registered already.");
+ SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
+ getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind());
+ const CapturedStmt *CS = D.getCapturedStmt(CaptureRegions.back());
+ for (const auto &Pair : It->DeclToUniqueName) {
+ const auto *VD = cast<VarDecl>(Pair.first->getCanonicalDecl());
+ if (!CS->capturesVariable(VD) || IgnoredDecls.count(VD) > 0)
+ continue;
+ auto I = LPCI->getSecond().find(Pair.first);
+ assert(I != LPCI->getSecond().end() &&
+ "Lastprivate must be rehistered already.");
+ // bool Cmp = priv_a.Fired != 0;
+ LValue BaseLVal = std::get<3>(I->getSecond());
+ LValue FiredLVal =
+ CGF.EmitLValueForField(BaseLVal, std::get<2>(I->getSecond()));
+ llvm::Value *Res = CGF.EmitLoadOfScalar(FiredLVal, D.getBeginLoc());
+ llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Res);
+ llvm::BasicBlock *ThenBB = CGF.createBasicBlock("lpc.then");
+ llvm::BasicBlock *DoneBB = CGF.createBasicBlock("lpc.done");
+ // if (Cmp) {
+ CGF.Builder.CreateCondBr(Cmp, ThenBB, DoneBB);
+ CGF.EmitBlock(ThenBB);
+ Address Addr = CGF.GetAddrOfLocalVar(VD);
+ LValue LVal;
+ if (VD->getType()->isReferenceType())
+ LVal = CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
+ AlignmentSource::Decl);
+ else
+ LVal = CGF.MakeAddrLValue(Addr, VD->getType().getNonReferenceType(),
+ AlignmentSource::Decl);
+ emitLastprivateConditionalUpdate(CGF, It->IVLVal, Pair.second, LVal,
+ D.getBeginLoc());
+ auto AL = ApplyDebugLocation::CreateArtificial(CGF);
+ CGF.EmitBlock(DoneBB, /*IsFinal=*/true);
+ // }
}
}
@@ -11589,10 +11700,10 @@ void CGOpenMPRuntime::emitLastprivateConditionalFinalUpdate(
SourceLocation Loc) {
if (CGF.getLangOpts().OpenMP < 50)
return;
- auto It = LastprivateConditionalStack.back().DeclToUniqeName.find(VD);
- assert(It != LastprivateConditionalStack.back().DeclToUniqeName.end() &&
+ auto It = LastprivateConditionalStack.back().DeclToUniqueName.find(VD);
+ assert(It != LastprivateConditionalStack.back().DeclToUniqueName.end() &&
"Unknown lastprivate conditional variable.");
- StringRef UniqueName = It->getSecond();
+ StringRef UniqueName = It->second;
llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(UniqueName);
// The variable was not updated in the region - exit.
if (!GV)
@@ -11750,7 +11861,8 @@ Address CGOpenMPSIMDRuntime::getAddrOfArtificialThreadPrivate(
void CGOpenMPSIMDRuntime::emitFlush(CodeGenFunction &CGF,
ArrayRef<const Expr *> Vars,
- SourceLocation Loc) {
+ SourceLocation Loc,
+ llvm::AtomicOrdering AO) {
llvm_unreachable("Not supported in SIMD-only mode");
}
@@ -11785,6 +11897,12 @@ llvm::Value *CGOpenMPSIMDRuntime::emitTaskReductionInit(
llvm_unreachable("Not supported in SIMD-only mode");
}
+void CGOpenMPSIMDRuntime::emitTaskReductionFini(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ bool IsWorksharingReduction) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
void CGOpenMPSIMDRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
SourceLocation Loc,
ReductionCodeGen &RCG,
@@ -11826,7 +11944,7 @@ void CGOpenMPSIMDRuntime::emitTargetOutlinedFunction(
void CGOpenMPSIMDRuntime::emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
- const Expr *Device,
+ llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
index 8159f5e8b790..eb22f155f5ef 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -20,12 +20,15 @@
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
+#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/AtomicOrdering.h"
namespace llvm {
class ArrayType;
@@ -35,6 +38,7 @@ class GlobalVariable;
class StructType;
class Type;
class Value;
+class OpenMPIRBuilder;
} // namespace llvm
namespace clang {
@@ -80,11 +84,10 @@ public:
template <typename Callable>
RegionCodeGenTy(
Callable &&CodeGen,
- typename std::enable_if<
- !std::is_same<typename std::remove_reference<Callable>::type,
- RegionCodeGenTy>::value>::type * = nullptr)
+ std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>,
+ RegionCodeGenTy>::value> * = nullptr)
: CodeGen(reinterpret_cast<intptr_t>(&CodeGen)),
- Callback(CallbackFn<typename std::remove_reference<Callable>::type>),
+ Callback(CallbackFn<std::remove_reference_t<Callable>>),
PrePostAction(nullptr) {}
void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; }
void operator()(CodeGenFunction &CGF) const;
@@ -99,9 +102,18 @@ struct OMPTaskDataTy final {
SmallVector<const Expr *, 4> LastprivateVars;
SmallVector<const Expr *, 4> LastprivateCopies;
SmallVector<const Expr *, 4> ReductionVars;
+ SmallVector<const Expr *, 4> ReductionOrigs;
SmallVector<const Expr *, 4> ReductionCopies;
SmallVector<const Expr *, 4> ReductionOps;
- SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 4> Dependences;
+ struct DependData {
+ OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
+ const Expr *IteratorExpr = nullptr;
+ SmallVector<const Expr *, 4> DepExprs;
+ explicit DependData() = default;
+ DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr)
+ : DepKind(DepKind), IteratorExpr(IteratorExpr) {}
+ };
+ SmallVector<DependData, 4> Dependences;
llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule;
llvm::PointerIntPair<llvm::Value *, 1, bool> Priority;
@@ -109,6 +121,8 @@ struct OMPTaskDataTy final {
unsigned NumberOfParts = 0;
bool Tied = true;
bool Nogroup = false;
+ bool IsReductionWithTaskMod = false;
+ bool IsWorksharingReduction = false;
};
/// Class intended to support codegen of all kind of the reduction clauses.
@@ -116,20 +130,26 @@ class ReductionCodeGen {
private:
/// Data required for codegen of reduction clauses.
struct ReductionData {
- /// Reference to the original shared item.
+ /// Reference to the item shared between tasks to reduce into.
+ const Expr *Shared = nullptr;
+ /// Reference to the original item.
const Expr *Ref = nullptr;
/// Helper expression for generation of private copy.
const Expr *Private = nullptr;
/// Helper expression for generation reduction operation.
const Expr *ReductionOp = nullptr;
- ReductionData(const Expr *Ref, const Expr *Private, const Expr *ReductionOp)
- : Ref(Ref), Private(Private), ReductionOp(ReductionOp) {}
+ ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private,
+ const Expr *ReductionOp)
+ : Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) {
+ }
};
/// List of reduction-based clauses.
SmallVector<ReductionData, 4> ClausesData;
- /// List of addresses of original shared variables/expressions.
+ /// List of addresses of shared variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses;
+ /// List of addresses of original variables/expressions.
+ SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses;
/// Sizes of the reduction items in chars.
SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes;
/// Base declarations for the reduction items.
@@ -149,12 +169,12 @@ private:
const OMPDeclareReductionDecl *DRD);
public:
- ReductionCodeGen(ArrayRef<const Expr *> Shareds,
+ ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> ReductionOps);
- /// Emits lvalue for a reduction item.
+ /// Emits lvalue for the shared and original reduction item.
/// \param N Number of the reduction item.
- void emitSharedLValue(CodeGenFunction &CGF, unsigned N);
+ void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
void emitAggregateType(CodeGenFunction &CGF, unsigned N);
@@ -186,6 +206,8 @@ public:
Address PrivateAddr);
/// Returns LValue for the reduction item.
LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; }
+ /// Returns LValue for the original reduction item.
+ LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; }
/// Returns the size of the reduction item (in chars and total number of
/// elements in the item), or nullptr, if the size is a constant.
std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const {
@@ -230,26 +252,42 @@ public:
/// Also, stores the expression for the private loop counter and it
/// threaprivate name.
struct LastprivateConditionalData {
- llvm::SmallDenseMap<CanonicalDeclPtr<const Decl>, SmallString<16>>
- DeclToUniqeName;
+ llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>>
+ DeclToUniqueName;
LValue IVLVal;
- SmallString<16> IVName;
- /// True if original lvalue for loop counter can be used in codegen (simd
- /// region or simd only mode) and no need to create threadprivate
- /// references.
- bool UseOriginalIV = false;
+ llvm::Function *Fn = nullptr;
+ bool Disabled = false;
};
/// Manages list of lastprivate conditional decls for the specified directive.
class LastprivateConditionalRAII {
+ enum class ActionToDo {
+ DoNotPush,
+ PushAsLastprivateConditional,
+ DisableLastprivateConditional,
+ };
CodeGenModule &CGM;
- const bool NeedToPush;
+ ActionToDo Action = ActionToDo::DoNotPush;
+
+ /// Check and try to disable analysis of inner regions for changes in
+ /// lastprivate conditional.
+ void tryToDisableInnerAnalysis(const OMPExecutableDirective &S,
+ llvm::DenseSet<CanonicalDeclPtr<const Decl>>
+ &NeedToAddForLPCsAsDisabled) const;
- public:
LastprivateConditionalRAII(CodeGenFunction &CGF,
- const OMPExecutableDirective &S, LValue IVLVal);
+ const OMPExecutableDirective &S);
+
+ public:
+ explicit LastprivateConditionalRAII(CodeGenFunction &CGF,
+ const OMPExecutableDirective &S,
+ LValue IVLVal);
+ static LastprivateConditionalRAII disable(CodeGenFunction &CGF,
+ const OMPExecutableDirective &S);
~LastprivateConditionalRAII();
};
+ llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; }
+
protected:
CodeGenModule &CGM;
StringRef FirstSeparator, Separator;
@@ -319,17 +357,6 @@ protected:
/// default location.
virtual unsigned getDefaultLocationReserved2Flags() const { return 0; }
- /// Tries to emit declare variant function for \p OldGD from \p NewGD.
- /// \param OrigAddr LLVM IR value for \p OldGD.
- /// \param IsForDefinition true, if requested emission for the definition of
- /// \p OldGD.
- /// \returns true, was able to emit a definition function for \p OldGD, which
- /// points to \p NewGD.
- virtual bool tryEmitDeclareVariant(const GlobalDecl &NewGD,
- const GlobalDecl &OldGD,
- llvm::GlobalValue *OrigAddr,
- bool IsForDefinition);
-
/// Returns default flags for the barriers depending on the directive, for
/// which this barier is going to be emitted.
static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind);
@@ -345,6 +372,8 @@ protected:
llvm::Value *getCriticalRegionLock(StringRef CriticalName);
private:
+ /// An OpenMP-IR-Builder instance.
+ llvm::OpenMPIRBuilder OMPBuilder;
/// Default const ident_t object used for initialization of all other
/// ident_t objects.
llvm::Constant *DefaultOpenMPPSource = nullptr;
@@ -392,6 +421,13 @@ private:
llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareMapperDecl *, 4>>;
FunctionUDMMapTy FunctionUDMMap;
+ /// Maps local variables marked as lastprivate conditional to their internal
+ /// types.
+ llvm::DenseMap<llvm::Function *,
+ llvm::DenseMap<CanonicalDeclPtr<const Decl>,
+ std::tuple<QualType, const FieldDecl *,
+ const FieldDecl *, LValue>>>
+ LastprivateConditionalToTypes;
/// Type kmp_critical_name, originally defined as typedef kmp_int32
/// kmp_critical_name[8];
llvm::ArrayType *KmpCriticalNameTy;
@@ -428,6 +464,16 @@ private:
/// } flags;
/// } kmp_depend_info_t;
QualType KmpDependInfoTy;
+ /// Type typedef struct kmp_task_affinity_info {
+ /// kmp_intptr_t base_addr;
+ /// size_t len;
+ /// struct {
+ /// bool flag1 : 1;
+ /// bool flag2 : 1;
+ /// kmp_int32 reserved : 30;
+ /// } flags;
+ /// } kmp_task_affinity_info_t;
+ QualType KmpTaskAffinityInfoTy;
/// struct kmp_dim { // loop bounds info casted to kmp_int64
/// kmp_int64 lo; // lower
/// kmp_int64 up; // upper
@@ -664,12 +710,6 @@ private:
/// must be emitted.
llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables;
- /// Mapping of the original functions to their variants and original global
- /// decl.
- llvm::MapVector<CanonicalDeclPtr<const FunctionDecl>,
- std::pair<GlobalDecl, GlobalDecl>>
- DeferredVariantFunction;
-
using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>;
/// Stack for list of declarations in current context marked as nontemporal.
/// The set is the union of all current stack elements.
@@ -684,6 +724,9 @@ private:
/// directive is present.
bool HasRequiresUnifiedSharedMemory = false;
+ /// Atomic ordering from the omp requires directive.
+ llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
+
/// Flag for keeping track of weather a target region has been emitted.
bool HasEmittedTargetRegion = false;
@@ -710,11 +753,6 @@ private:
/// Returns pointer to kmpc_micro type.
llvm::Type *getKmpc_MicroPointerTy();
- /// Returns specified OpenMP runtime function.
- /// \param Function OpenMP runtime function.
- /// \return Specified function.
- llvm::FunctionCallee createRuntimeFunction(unsigned Function);
-
/// Returns __kmpc_for_static_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize,
@@ -826,6 +864,19 @@ private:
const OMPLoopDirective &D)>
SizeEmitter);
+ /// Emit update for lastprivate conditional data.
+ void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal,
+ StringRef UniqueDeclName, LValue LVal,
+ SourceLocation Loc);
+
+ /// Returns the number of the elements and the address of the depobj
+ /// dependency array.
+ /// \return Number of elements in depobj array and the pointer to the array of
+ /// dependencies.
+ std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF,
+ LValue DepobjLVal,
+ SourceLocation Loc);
+
public:
explicit CGOpenMPRuntime(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, ".", ".") {}
@@ -1220,7 +1271,7 @@ public:
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
- SourceLocation Loc);
+ SourceLocation Loc, llvm::AtomicOrdering AO);
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
@@ -1381,18 +1432,34 @@ public:
/// should be emitted for reduction:
/// \code
///
- /// _task_red_item_t red_data[n];
+ /// _taskred_item_t red_data[n];
/// ...
- /// red_data[i].shar = &origs[i];
+ /// red_data[i].shar = &shareds[i];
+ /// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
- /// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data);
+ /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
+ /// For reduction clause with task modifier it emits the next call:
+ /// \code
///
+ /// _taskred_item_t red_data[n];
+ /// ...
+ /// red_data[i].shar = &shareds[i];
+ /// red_data[i].orig = &origs[i];
+ /// red_data[i].size = sizeof(origs[i]);
+ /// red_data[i].f_init = (void*)RedInit<i>;
+ /// red_data[i].f_fini = (void*)RedDest<i>;
+ /// red_data[i].f_comb = (void*)RedOp<i>;
+ /// red_data[i].flags = <Flag_i>;
+ /// ...
+ /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
+ /// red_data);
+ /// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
@@ -1403,11 +1470,16 @@ public:
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data);
+ /// Emits the following code for reduction clause with task modifier:
+ /// \code
+ /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
+ /// \endcode
+ virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
+ bool IsWorksharingReduction);
+
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
- /// initializer/combiner/finalizer functions + emits threadprivate variable to
- /// store the pointer to the original reduction item for the custom
- /// initializer defined by declare reduction construct.
+ /// initializer/combiner/finalizer functions.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
@@ -1467,16 +1539,16 @@ public:
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
- /// target directive, or null if no device clause is used.
+ /// target directive, or null if no device clause is used and device modifier.
/// \param SizeEmitter Callback to emit number of iterations for loop-based
/// directives.
- virtual void
- emitTargetCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
- llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID,
- const Expr *IfCond, const Expr *Device,
- llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
- const OMPLoopDirective &D)>
- SizeEmitter);
+ virtual void emitTargetCall(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
+ llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
+ llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
+ const OMPLoopDirective &D)>
+ SizeEmitter);
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
@@ -1675,7 +1747,10 @@ public:
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
- virtual void checkArchForUnifiedAddressing(const OMPRequiresDecl *D);
+ virtual void processRequiresDirective(const OMPRequiresDecl *D);
+
+ /// Gets default memory ordering as specified in requires directive.
+ llvm::AtomicOrdering getDefaultMemoryOrdering() const;
/// Checks if the variable has associated OMPAllocateDeclAttr attribute with
/// the predefined allocator and translates it into the corresponding address
@@ -1685,17 +1760,13 @@ public:
/// Return whether the unified_shared_memory has been specified.
bool hasRequiresUnifiedSharedMemory() const;
- /// Emits the definition of the declare variant function.
- virtual bool emitDeclareVariant(GlobalDecl GD, bool IsForDefinition);
-
/// Checks if the \p VD variable is marked as nontemporal declaration in
/// current context.
bool isNontemporalDecl(const ValueDecl *VD) const;
- /// Initializes global counter for lastprivate conditional.
- virtual void
- initLastprivateConditionalCounter(CodeGenFunction &CGF,
- const OMPExecutableDirective &S);
+ /// Create specialized alloca to handle lastprivate conditionals.
+ Address emitLastprivateConditionalInit(CodeGenFunction &CGF,
+ const VarDecl *VD);
/// Checks if the provided \p LVal is lastprivate conditional and emits the
/// code to update the value of the original variable.
@@ -1713,6 +1784,30 @@ public:
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
const Expr *LHS);
+ /// Checks if the lastprivate conditional was updated in inner region and
+ /// writes the value.
+ /// \code
+ /// lastprivate(conditional: a)
+ /// ...
+ /// <type> a;bool Fired = false;
+ /// #pragma omp ... shared(a)
+ /// {
+ /// lp_a = ...;
+ /// Fired = true;
+ /// }
+ /// if (Fired) {
+ /// #pragma omp critical(a)
+ /// if (last_iv_a <= iv) {
+ /// last_iv_a = iv;
+ /// global_a = lp_a;
+ /// }
+ /// Fired = false;
+ /// }
+ /// \endcode
+ virtual void checkAndEmitSharedLastprivateConditional(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls);
+
/// Gets the address of the global copy used for lastprivate conditional
/// update, if any.
/// \param PrivLVal LValue for the private copy.
@@ -1721,6 +1816,41 @@ public:
LValue PrivLVal,
const VarDecl *VD,
SourceLocation Loc);
+
+ /// Emits list of dependecies based on the provided data (array of
+ /// dependence/expression pairs).
+ /// \returns Pointer to the first element of the array casted to VoidPtr type.
+ std::pair<llvm::Value *, Address>
+ emitDependClause(CodeGenFunction &CGF,
+ ArrayRef<OMPTaskDataTy::DependData> Dependencies,
+ SourceLocation Loc);
+
+ /// Emits list of dependecies based on the provided data (array of
+ /// dependence/expression pairs) for depobj construct. In this case, the
+ /// variable is allocated in dynamically. \returns Pointer to the first
+ /// element of the array casted to VoidPtr type.
+ Address emitDepobjDependClause(CodeGenFunction &CGF,
+ const OMPTaskDataTy::DependData &Dependencies,
+ SourceLocation Loc);
+
+ /// Emits the code to destroy the dependency object provided in depobj
+ /// directive.
+ void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
+ SourceLocation Loc);
+
+ /// Updates the dependency kind in the specified depobj object.
+ /// \param DepobjLVal LValue for the main depobj object.
+ /// \param NewDepKind New dependency kind.
+ void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
+ OpenMPDependClauseKind NewDepKind, SourceLocation Loc);
+
+ /// Initializes user defined allocators specified in the uses_allocators
+ /// clauses.
+ void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator,
+ const Expr *AllocatorTraits);
+
+ /// Destroys user defined allocators specified in the uses_allocators clause.
+ void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator);
};
/// Class supports emissionof SIMD-only code.
@@ -1985,7 +2115,7 @@ public:
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
- SourceLocation Loc) override;
+ SourceLocation Loc, llvm::AtomicOrdering AO) override;
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
@@ -2107,18 +2237,34 @@ public:
/// should be emitted for reduction:
/// \code
///
- /// _task_red_item_t red_data[n];
+ /// _taskred_item_t red_data[n];
/// ...
- /// red_data[i].shar = &origs[i];
+ /// red_data[i].shar = &shareds[i];
+ /// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
- /// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data);
+ /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
+ /// For reduction clause with task modifier it emits the next call:
+ /// \code
///
+ /// _taskred_item_t red_data[n];
+ /// ...
+ /// red_data[i].shar = &shareds[i];
+ /// red_data[i].orig = &origs[i];
+ /// red_data[i].size = sizeof(origs[i]);
+ /// red_data[i].f_init = (void*)RedInit<i>;
+ /// red_data[i].f_fini = (void*)RedDest<i>;
+ /// red_data[i].f_comb = (void*)RedOp<i>;
+ /// red_data[i].flags = <Flag_i>;
+ /// ...
+ /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
+ /// red_data);
+ /// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
@@ -2128,6 +2274,13 @@ public:
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data) override;
+ /// Emits the following code for reduction clause with task modifier:
+ /// \code
+ /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
+ /// \endcode
+ void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
+ bool IsWorksharingReduction) override;
+
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions + emits threadprivate variable to
@@ -2191,14 +2344,14 @@ public:
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
- /// target directive, or null if no device clause is used.
- void
- emitTargetCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
- llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID,
- const Expr *IfCond, const Expr *Device,
- llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
- const OMPLoopDirective &D)>
- SizeEmitter) override;
+ /// target directive, or null if no device clause is used and device modifier.
+ void emitTargetCall(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
+ llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
+ llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
+ const OMPLoopDirective &D)>
+ SizeEmitter) override;
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
index d00d84b79cfe..cbd443134e7a 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
@@ -38,11 +38,9 @@ enum OpenMPRTLFunctionNVPTX {
/// Call to void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2,
/// Call to void __kmpc_kernel_prepare_parallel(void
- /// *outlined_function, int16_t
- /// IsOMPRuntimeInitialized);
+ /// *outlined_function);
OMPRTL_NVPTX__kmpc_kernel_prepare_parallel,
- /// Call to bool __kmpc_kernel_parallel(void **outlined_function,
- /// int16_t IsOMPRuntimeInitialized);
+ /// Call to bool __kmpc_kernel_parallel(void **outlined_function);
OMPRTL_NVPTX__kmpc_kernel_parallel,
/// Call to void __kmpc_kernel_end_parallel();
OMPRTL_NVPTX__kmpc_kernel_end_parallel,
@@ -85,6 +83,9 @@ enum OpenMPRTLFunctionNVPTX {
/// Call to void* __kmpc_data_sharing_coalesced_push_stack(size_t size,
/// int16_t UseSharedMemory);
OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack,
+ /// Call to void* __kmpc_data_sharing_push_stack(size_t size, int16_t
+ /// UseSharedMemory);
+ OMPRTL_NVPTX__kmpc_data_sharing_push_stack,
/// Call to void __kmpc_data_sharing_pop_stack(void *a);
OMPRTL_NVPTX__kmpc_data_sharing_pop_stack,
/// Call to void __kmpc_begin_sharing_variables(void ***args,
@@ -341,8 +342,7 @@ class CheckVarsEscapingDeclContext final
if (!Attr)
return;
if (((Attr->getCaptureKind() != OMPC_map) &&
- !isOpenMPPrivate(
- static_cast<OpenMPClauseKind>(Attr->getCaptureKind()))) ||
+ !isOpenMPPrivate(Attr->getCaptureKind())) ||
((Attr->getCaptureKind() == OMPC_map) &&
!FD->getType()->isAnyPointerType()))
return;
@@ -786,6 +786,8 @@ static bool hasNestedSPMDDirective(ASTContext &Ctx,
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
@@ -801,6 +803,8 @@ static bool hasNestedSPMDDirective(ASTContext &Ctx,
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -813,6 +817,7 @@ static bool hasNestedSPMDDirective(ASTContext &Ctx,
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
case OMPD_unknown:
+ default:
llvm_unreachable("Unexpected directive.");
}
}
@@ -862,6 +867,8 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
@@ -877,6 +884,8 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -889,6 +898,7 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
case OMPD_unknown:
+ default:
break;
}
llvm_unreachable(
@@ -1031,6 +1041,8 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
@@ -1046,6 +1058,8 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -1058,6 +1072,7 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
case OMPD_unknown:
+ default:
llvm_unreachable("Unexpected directive.");
}
}
@@ -1113,6 +1128,8 @@ static bool supportsLightweightRuntime(ASTContext &Ctx,
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
@@ -1128,6 +1145,8 @@ static bool supportsLightweightRuntime(ASTContext &Ctx,
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -1140,6 +1159,7 @@ static bool supportsLightweightRuntime(ASTContext &Ctx,
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
case OMPD_unknown:
+ default:
break;
}
llvm_unreachable(
@@ -1444,8 +1464,7 @@ void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
// TODO: Optimize runtime initialization and pass in correct value.
- llvm::Value *Args[] = {WorkFn.getPointer(),
- /*RequiresOMPRuntime=*/Bld.getInt16(1)};
+ llvm::Value *Args[] = {WorkFn.getPointer()};
llvm::Value *Ret = CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_parallel), Args);
Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus);
@@ -1573,17 +1592,16 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
}
case OMPRTL_NVPTX__kmpc_kernel_prepare_parallel: {
/// Build void __kmpc_kernel_prepare_parallel(
- /// void *outlined_function, int16_t IsOMPRuntimeInitialized);
- llvm::Type *TypeParams[] = {CGM.Int8PtrTy, CGM.Int16Ty};
+ /// void *outlined_function);
+ llvm::Type *TypeParams[] = {CGM.Int8PtrTy};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_prepare_parallel");
break;
}
case OMPRTL_NVPTX__kmpc_kernel_parallel: {
- /// Build bool __kmpc_kernel_parallel(void **outlined_function,
- /// int16_t IsOMPRuntimeInitialized);
- llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy, CGM.Int16Ty};
+ /// Build bool __kmpc_kernel_parallel(void **outlined_function);
+ llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy};
llvm::Type *RetTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
auto *FnTy =
llvm::FunctionType::get(RetTy, TypeParams, /*isVarArg*/ false);
@@ -1738,6 +1756,16 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
FnTy, /*Name=*/"__kmpc_data_sharing_coalesced_push_stack");
break;
}
+ case OMPRTL_NVPTX__kmpc_data_sharing_push_stack: {
+ // Build void *__kmpc_data_sharing_push_stack(size_t size, int16_t
+ // UseSharedMemory);
+ llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(
+ FnTy, /*Name=*/"__kmpc_data_sharing_push_stack");
+ break;
+ }
case OMPRTL_NVPTX__kmpc_data_sharing_pop_stack: {
// Build void __kmpc_data_sharing_pop_stack(void *a);
llvm::Type *TypeParams[] = {CGM.VoidPtrTy};
@@ -1915,19 +1943,6 @@ unsigned CGOpenMPRuntimeNVPTX::getDefaultLocationReserved2Flags() const {
llvm_unreachable("Unknown flags are requested.");
}
-bool CGOpenMPRuntimeNVPTX::tryEmitDeclareVariant(const GlobalDecl &NewGD,
- const GlobalDecl &OldGD,
- llvm::GlobalValue *OrigAddr,
- bool IsForDefinition) {
- // Emit the function in OldGD with the body from NewGD, if NewGD is defined.
- auto *NewFD = cast<FunctionDecl>(NewGD.getDecl());
- if (NewFD->isDefined()) {
- CGM.emitOpenMPDeviceFunctionRedefinition(OldGD, NewGD, OrigAddr);
- return true;
- }
- return false;
-}
-
CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, "_", "$") {
if (!CGM.getLangOpts().OpenMPIsDevice)
@@ -2208,7 +2223,7 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
GlobalRecCastAddr = Phi;
I->getSecond().GlobalRecordAddr = Phi;
I->getSecond().IsInSPMDModeFlag = IsSPMD;
- } else if (IsInTTDRegion) {
+ } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
assert(GlobalizedRecords.back().Records.size() < 2 &&
"Expected less than 2 globalized records: one for target and one "
"for teams.");
@@ -2281,12 +2296,16 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
} else {
// TODO: allow the usage of shared memory to be controlled by
// the user, for now, default to global.
+ bool UseSharedMemory =
+ IsInTTDRegion && GlobalRecordSize <= SharedMemorySize;
llvm::Value *GlobalRecordSizeArg[] = {
llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
- CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
+ CGF.Builder.getInt16(UseSharedMemory ? 1 : 0)};
llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
+ IsInTTDRegion
+ ? OMPRTL_NVPTX__kmpc_data_sharing_push_stack
+ : OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
GlobalRecordSizeArg);
GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
GlobalRecValue, GlobalRecPtrTy);
@@ -2433,7 +2452,7 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsEpilog(CodeGenFunction &CGF,
OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
CGF.EmitCastToVoidPtr(I->getSecond().GlobalRecordAddr));
CGF.EmitBlock(ExitBB);
- } else if (IsInTTDRegion) {
+ } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
assert(GlobalizedRecords.back().RegionCounter > 0 &&
"region counter must be > 0.");
--GlobalizedRecords.back().RegionCounter;
@@ -2546,7 +2565,7 @@ void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall(
llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
// Prepare for parallel region. Indicate the outlined function.
- llvm::Value *Args[] = {ID, /*RequiresOMPRuntime=*/Bld.getInt16(1)};
+ llvm::Value *Args[] = {ID};
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel),
Args);
@@ -4754,6 +4773,7 @@ Address CGOpenMPRuntimeNVPTX::getAddressOfLocalVariable(CodeGenFunction &CGF,
switch (A->getAllocatorType()) {
// Use the default allocator here as by default local vars are
// threadlocal.
+ case OMPAllocateDeclAttr::OMPNullMemAlloc:
case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
case OMPAllocateDeclAttr::OMPThreadMemAlloc:
case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
@@ -4920,6 +4940,7 @@ bool CGOpenMPRuntimeNVPTX::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
return false;
const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
switch(A->getAllocatorType()) {
+ case OMPAllocateDeclAttr::OMPNullMemAlloc:
case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
// Not supported, fallback to the default mem space.
case OMPAllocateDeclAttr::OMPThreadMemAlloc:
@@ -4962,7 +4983,7 @@ static CudaArch getCudaArch(CodeGenModule &CGM) {
/// Check to see if target architecture supports unified addressing which is
/// a restriction for OpenMP requires clause "unified_shared_memory".
-void CGOpenMPRuntimeNVPTX::checkArchForUnifiedAddressing(
+void CGOpenMPRuntimeNVPTX::processRequiresDirective(
const OMPRequiresDecl *D) {
for (const OMPClause *Clause : D->clauselists()) {
if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
@@ -4990,6 +5011,7 @@ void CGOpenMPRuntimeNVPTX::checkArchForUnifiedAddressing(
case CudaArch::SM_70:
case CudaArch::SM_72:
case CudaArch::SM_75:
+ case CudaArch::SM_80:
case CudaArch::GFX600:
case CudaArch::GFX601:
case CudaArch::GFX700:
@@ -5010,6 +5032,7 @@ void CGOpenMPRuntimeNVPTX::checkArchForUnifiedAddressing(
case CudaArch::GFX1010:
case CudaArch::GFX1011:
case CudaArch::GFX1012:
+ case CudaArch::GFX1030:
case CudaArch::UNKNOWN:
break;
case CudaArch::LAST:
@@ -5017,7 +5040,7 @@ void CGOpenMPRuntimeNVPTX::checkArchForUnifiedAddressing(
}
}
}
- CGOpenMPRuntime::checkArchForUnifiedAddressing(D);
+ CGOpenMPRuntime::processRequiresDirective(D);
}
/// Get number of SMs and number of blocks per SM.
@@ -5047,6 +5070,7 @@ static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
case CudaArch::SM_70:
case CudaArch::SM_72:
case CudaArch::SM_75:
+ case CudaArch::SM_80:
return {84, 32};
case CudaArch::GFX600:
case CudaArch::GFX601:
@@ -5068,6 +5092,7 @@ static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
case CudaArch::GFX1010:
case CudaArch::GFX1011:
case CudaArch::GFX1012:
+ case CudaArch::GFX1030:
case CudaArch::UNKNOWN:
break;
case CudaArch::LAST:
@@ -5077,7 +5102,8 @@ static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
}
void CGOpenMPRuntimeNVPTX::clear() {
- if (!GlobalizedRecords.empty()) {
+ if (!GlobalizedRecords.empty() &&
+ !CGM.getLangOpts().OpenMPCUDATargetParallel) {
ASTContext &C = CGM.getContext();
llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> GlobalRecs;
llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> SharedRecs;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
index 4159af0a622f..c52ae43817c7 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
@@ -193,18 +193,6 @@ protected:
/// Full/Lightweight runtime mode. Used for better optimization.
unsigned getDefaultLocationReserved2Flags() const override;
- /// Tries to emit declare variant function for \p OldGD from \p NewGD.
- /// \param OrigAddr LLVM IR value for \p OldGD.
- /// \param IsForDefinition true, if requested emission for the definition of
- /// \p OldGD.
- /// \returns true, was able to emit a definition function for \p OldGD, which
- /// points to \p NewGD.
- /// NVPTX backend does not support global aliases, so just use the function,
- /// emitted for \p NewGD instead of \p OldGD.
- bool tryEmitDeclareVariant(const GlobalDecl &NewGD, const GlobalDecl &OldGD,
- llvm::GlobalValue *OrigAddr,
- bool IsForDefinition) override;
-
public:
explicit CGOpenMPRuntimeNVPTX(CodeGenModule &CGM);
void clear() override;
@@ -395,7 +383,7 @@ public:
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
- void checkArchForUnifiedAddressing(const OMPRequiresDecl *D) override;
+ void processRequiresDirective(const OMPRequiresDecl *D) override;
/// Returns default address space for the constant firstprivates, __constant__
/// address space by default.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 4de64a32f2ac..4e5d1d3f16f6 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -385,7 +385,8 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
Run = FieldEnd;
continue;
}
- llvm::Type *Type = Types.ConvertTypeForMem(Field->getType());
+ llvm::Type *Type =
+ Types.ConvertTypeForMem(Field->getType(), /*ForBitFields=*/true);
// If we don't have a run yet, or don't live within the previous run's
// allocated storage then we allocate some storage and start a new run.
if (Run == FieldEnd || BitOffset >= Tail) {
@@ -405,15 +406,17 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
return;
}
- // Check if OffsetInRecord is better as a single field run. When OffsetInRecord
- // has legal integer width, and its bitfield offset is naturally aligned, it
- // is better to make the bitfield a separate storage component so as it can be
- // accessed directly with lower cost.
+ // Check if OffsetInRecord (the size in bits of the current run) is better
+ // as a single field run. When OffsetInRecord has legal integer width, and
+ // its bitfield offset is naturally aligned, it is better to make the
+ // bitfield a separate storage component so as it can be accessed directly
+ // with lower cost.
auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
uint64_t StartBitOffset) {
if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
return false;
- if (!DataLayout.isLegalInteger(OffsetInRecord))
+ if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
+ !DataLayout.fitsInLegalInteger(OffsetInRecord))
return false;
// Make sure StartBitOffset is natually aligned if it is treated as an
// IType integer.
@@ -729,8 +732,8 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset);
}
-CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
- llvm::StructType *Ty) {
+std::unique_ptr<CGRecordLayout>
+CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty) {
CGRecordLowering Builder(*this, D, /*Packed=*/false);
Builder.lower(/*NonVirtualBaseType=*/false);
@@ -757,9 +760,9 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
// but we may need to recursively layout D while laying D out as a base type.
Ty->setBody(Builder.FieldTypes, Builder.Packed);
- CGRecordLayout *RL =
- new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
- Builder.IsZeroInitializableAsBase);
+ auto RL = std::make_unique<CGRecordLayout>(
+ Ty, BaseTy, (bool)Builder.IsZeroInitializable,
+ (bool)Builder.IsZeroInitializableAsBase);
RL->NonVirtualBases.swap(Builder.NonVirtualBases);
RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
index 138459c68dbf..672909849bb7 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "CGDebugInfo.h"
+#include "CGOpenMPRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "TargetInfo.h"
@@ -18,12 +19,14 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
+#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
using namespace CodeGen;
@@ -246,6 +249,12 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
case Stmt::OMPFlushDirectiveClass:
EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
break;
+ case Stmt::OMPDepobjDirectiveClass:
+ EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
+ break;
+ case Stmt::OMPScanDirectiveClass:
+ EmitOMPScanDirective(cast<OMPScanDirective>(*S));
+ break;
case Stmt::OMPOrderedDirectiveClass:
EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
break;
@@ -601,6 +610,13 @@ void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
}
void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
+ bool nomerge = false;
+ for (const auto *A : S.getAttrs())
+ if (A->getKind() == attr::NoMerge) {
+ nomerge = true;
+ break;
+ }
+ SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge);
EmitStmt(S.getSubStmt(), S.getAttrs());
}
@@ -721,8 +737,8 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
EmitBlock(LoopHeader.getBlock());
const SourceRange &R = S.getSourceRange();
- LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), WhileAttrs,
- SourceLocToDebugLoc(R.getBegin()),
+ LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
+ WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
SourceLocToDebugLoc(R.getEnd()));
// Create an exit block for when the condition fails, which will
@@ -823,7 +839,7 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S,
EmitBlock(LoopCond.getBlock());
const SourceRange &R = S.getSourceRange();
- LoopStack.push(LoopBody, CGM.getContext(), DoAttrs,
+ LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
SourceLocToDebugLoc(R.getBegin()),
SourceLocToDebugLoc(R.getEnd()));
@@ -881,7 +897,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
EmitBlock(CondBlock);
const SourceRange &R = S.getSourceRange();
- LoopStack.push(CondBlock, CGM.getContext(), ForAttrs,
+ LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
SourceLocToDebugLoc(R.getBegin()),
SourceLocToDebugLoc(R.getEnd()));
@@ -982,7 +998,7 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
EmitBlock(CondBlock);
const SourceRange &R = S.getSourceRange();
- LoopStack.push(CondBlock, CGM.getContext(), ForAttrs,
+ LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
SourceLocToDebugLoc(R.getBegin()),
SourceLocToDebugLoc(R.getEnd()));
@@ -1054,6 +1070,19 @@ void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
EmitBranchThroughCleanup(ReturnBlock);
}
+namespace {
+// RAII struct used to save and restore a return statment's result expression.
+struct SaveRetExprRAII {
+ SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
+ : OldRetExpr(CGF.RetExpr), CGF(CGF) {
+ CGF.RetExpr = RetExpr;
+ }
+ ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
+ const Expr *OldRetExpr;
+ CodeGenFunction &CGF;
+};
+} // namespace
+
/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
/// if the function returns void, or may be missing one if the function returns
/// non-void. Fun stuff :).
@@ -1079,20 +1108,28 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// Emit the result value, even if unused, to evaluate the side effects.
const Expr *RV = S.getRetValue();
- // Treat block literals in a return expression as if they appeared
- // in their own scope. This permits a small, easily-implemented
- // exception to our over-conservative rules about not jumping to
- // statements following block literals with non-trivial cleanups.
- RunCleanupsScope cleanupScope(*this);
- if (const FullExpr *fe = dyn_cast_or_null<FullExpr>(RV)) {
- enterFullExpression(fe);
- RV = fe->getSubExpr();
- }
+ // Record the result expression of the return statement. The recorded
+ // expression is used to determine whether a block capture's lifetime should
+ // end at the end of the full expression as opposed to the end of the scope
+ // enclosing the block expression.
+ //
+ // This permits a small, easily-implemented exception to our over-conservative
+ // rules about not jumping to statements following block literals with
+ // non-trivial cleanups.
+ SaveRetExprRAII SaveRetExpr(RV, *this);
+ RunCleanupsScope cleanupScope(*this);
+ if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
+ RV = EWC->getSubExpr();
// FIXME: Clean this up by using an LValue for ReturnTemp,
// EmitStoreThroughLValue, and EmitAnyExpr.
- if (getLangOpts().ElideConstructors &&
- S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) {
+ // Check if the NRVO candidate was not globalized in OpenMP mode.
+ if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
+ S.getNRVOCandidate()->isNRVOVariable() &&
+ (!getLangOpts().OpenMP ||
+ !CGM.getOpenMPRuntime()
+ .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
+ .isValid())) {
// Apply the named return value optimization for this return statement,
// which means doing nothing: the appropriate result has already been
// constructed into the NRVO variable.
@@ -2091,8 +2128,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Update largest vector width for any vector types.
if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
- LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getFixedSize());
+ LargestVectorWidth =
+ std::max((uint64_t)LargestVectorWidth,
+ VT->getPrimitiveSizeInBits().getKnownMinSize());
} else {
ArgTypes.push_back(Dest.getAddress(*this).getType());
Args.push_back(Dest.getPointer(*this));
@@ -2116,8 +2154,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Update largest vector width for any vector types.
if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
- LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getFixedSize());
+ LargestVectorWidth =
+ std::max((uint64_t)LargestVectorWidth,
+ VT->getPrimitiveSizeInBits().getKnownMinSize());
if (Info.allowsRegister())
InOutConstraints += llvm::utostr(i);
else
@@ -2203,21 +2242,15 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Update largest vector width for any vector types.
if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
- LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getFixedSize());
+ LargestVectorWidth =
+ std::max((uint64_t)LargestVectorWidth,
+ VT->getPrimitiveSizeInBits().getKnownMinSize());
ArgTypes.push_back(Arg->getType());
Args.push_back(Arg);
Constraints += InputConstraint;
}
- // Append the "input" part of inout constraints last.
- for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
- ArgTypes.push_back(InOutArgTypes[i]);
- Args.push_back(InOutArgs[i]);
- }
- Constraints += InOutConstraints;
-
// Labels
SmallVector<llvm::BasicBlock *, 16> Transfer;
llvm::BasicBlock *Fallthrough = nullptr;
@@ -2225,7 +2258,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
IsGCCAsmGoto = GS->isAsmGoto();
if (IsGCCAsmGoto) {
- for (auto *E : GS->labels()) {
+ for (const auto *E : GS->labels()) {
JumpDest Dest = getJumpDestForLabel(E->getLabel());
Transfer.push_back(Dest.getBlock());
llvm::BlockAddress *BA =
@@ -2236,19 +2269,31 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Constraints += ',';
Constraints += 'X';
}
- StringRef Name = "asm.fallthrough";
- Fallthrough = createBasicBlock(Name);
+ Fallthrough = createBasicBlock("asm.fallthrough");
}
}
+ // Append the "input" part of inout constraints last.
+ for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
+ ArgTypes.push_back(InOutArgTypes[i]);
+ Args.push_back(InOutArgs[i]);
+ }
+ Constraints += InOutConstraints;
+
// Clobbers
for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
StringRef Clobber = S.getClobber(i);
if (Clobber == "memory")
ReadOnly = ReadNone = false;
- else if (Clobber != "cc")
+ else if (Clobber != "cc") {
Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
+ if (CGM.getCodeGenOpts().StackClashProtector &&
+ getTarget().isSPRegName(Clobber)) {
+ CGM.getDiags().Report(S.getAsmLoc(),
+ diag::warn_stack_clash_protection_inline_asm);
+ }
+ }
if (!Constraints.empty())
Constraints += ',';
@@ -2287,9 +2332,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
if (IsGCCAsmGoto) {
llvm::CallBrInst *Result =
Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
+ EmitBlock(Fallthrough);
UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
ReadNone, S, ResultRegTypes, *this, RegResults);
- EmitBlock(Fallthrough);
} else {
llvm::CallInst *Result =
Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
index dc3899f0e4ea..cfd5eda8cc80 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -18,14 +18,22 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclOpenMP.h"
+#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtOpenMP.h"
+#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Support/AtomicOrdering.h"
using namespace clang;
using namespace CodeGen;
using namespace llvm::omp;
+static const VarDecl *getBaseDecl(const Expr *Ref);
+
namespace {
/// Lexical scope for OpenMP executable constructs, that handles correct codegen
/// for captured expressions.
@@ -53,7 +61,8 @@ class OMPLexicalScope : public CodeGenFunction::LexicalScope {
static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
return CGF.LambdaCaptureFields.lookup(VD) ||
(CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
- (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl));
+ (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) &&
+ cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
}
public:
@@ -214,6 +223,12 @@ public:
if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
CGF.EmitVarDecl(*OED);
}
+ } else if (const auto *UDP = dyn_cast<OMPUseDeviceAddrClause>(C)) {
+ for (const Expr *E : UDP->varlists()) {
+ const Decl *D = getBaseDecl(E);
+ if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
+ CGF.EmitVarDecl(*OED);
+ }
}
}
if (!isOpenMPSimdDirective(S.getDirectiveKind()))
@@ -365,26 +380,28 @@ static QualType getCanonicalParamType(ASTContext &C, QualType T) {
}
namespace {
- /// Contains required data for proper outlined function codegen.
- struct FunctionOptions {
- /// Captured statement for which the function is generated.
- const CapturedStmt *S = nullptr;
- /// true if cast to/from UIntPtr is required for variables captured by
- /// value.
- const bool UIntPtrCastRequired = true;
- /// true if only casted arguments must be registered as local args or VLA
- /// sizes.
- const bool RegisterCastedArgsOnly = false;
- /// Name of the generated function.
- const StringRef FunctionName;
- explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired,
- bool RegisterCastedArgsOnly,
- StringRef FunctionName)
- : S(S), UIntPtrCastRequired(UIntPtrCastRequired),
- RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly),
- FunctionName(FunctionName) {}
- };
-}
+/// Contains required data for proper outlined function codegen.
+struct FunctionOptions {
+ /// Captured statement for which the function is generated.
+ const CapturedStmt *S = nullptr;
+ /// true if cast to/from UIntPtr is required for variables captured by
+ /// value.
+ const bool UIntPtrCastRequired = true;
+ /// true if only casted arguments must be registered as local args or VLA
+ /// sizes.
+ const bool RegisterCastedArgsOnly = false;
+ /// Name of the generated function.
+ const StringRef FunctionName;
+ /// Location of the non-debug version of the outlined function.
+ SourceLocation Loc;
+ explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired,
+ bool RegisterCastedArgsOnly, StringRef FunctionName,
+ SourceLocation Loc)
+ : S(S), UIntPtrCastRequired(UIntPtrCastRequired),
+ RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly),
+ FunctionName(FunctionName), Loc(Loc) {}
+};
+} // namespace
static llvm::Function *emitOutlinedFunctionPrologue(
CodeGenFunction &CGF, FunctionArgList &Args,
@@ -485,7 +502,9 @@ static llvm::Function *emitOutlinedFunctionPrologue(
// Generate the function.
CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs,
- FO.S->getBeginLoc(), CD->getBody()->getBeginLoc());
+ FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(),
+ FO.UIntPtrCastRequired ? FO.Loc
+ : CD->getBody()->getBeginLoc());
unsigned Cnt = CD->getContextParamPosition();
I = FO.S->captures().begin();
for (const FieldDecl *FD : RD->fields()) {
@@ -560,7 +579,8 @@ static llvm::Function *emitOutlinedFunctionPrologue(
}
llvm::Function *
-CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
+CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
+ SourceLocation Loc) {
assert(
CapturedStmtInfo &&
"CapturedStmtInfo should be set when generating the captured function");
@@ -577,7 +597,7 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
if (NeedWrapperFunction)
Out << "_debug__";
FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false,
- Out.str());
+ Out.str(), Loc);
llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs,
VLASizes, CXXThisValue, FO);
CodeGenFunction::OMPPrivateScope LocalScope(*this);
@@ -600,7 +620,7 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true,
/*RegisterCastedArgsOnly=*/true,
- CapturedStmtInfo->getHelperName());
+ CapturedStmtInfo->getHelperName(), Loc);
CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true);
WrapperCGF.CapturedStmtInfo = CapturedStmtInfo;
Args.clear();
@@ -632,8 +652,7 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
}
CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType()));
}
- CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, S.getBeginLoc(),
- F, CallArgs);
+ CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs);
WrapperCGF.FinishFunction();
return WrapperF;
}
@@ -747,11 +766,12 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
getLangOpts().OpenMPIsDevice &&
isOpenMPTargetExecutionDirective(D.getDirectiveKind());
bool FirstprivateIsLastprivate = false;
- llvm::DenseSet<const VarDecl *> Lastprivates;
+ llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates;
for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
for (const auto *D : C->varlists())
- Lastprivates.insert(
- cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
+ Lastprivates.try_emplace(
+ cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(),
+ C->getKind());
}
llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
@@ -761,8 +781,8 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
bool MustEmitFirstprivateCopy =
CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown;
for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
- auto IRef = C->varlist_begin();
- auto InitsRef = C->inits().begin();
+ const auto *IRef = C->varlist_begin();
+ const auto *InitsRef = C->inits().begin();
for (const Expr *IInit : C->private_copies()) {
const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
bool ThisFirstprivateIsLastprivate =
@@ -853,14 +873,34 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
});
} else {
Address OriginalAddr = OriginalLVal.getAddress(*this);
- IsRegistered = PrivateScope.addPrivate(
- OrigVD, [this, VDInit, OriginalAddr, VD]() {
+ IsRegistered =
+ PrivateScope.addPrivate(OrigVD, [this, VDInit, OriginalAddr, VD,
+ ThisFirstprivateIsLastprivate,
+ OrigVD, &Lastprivates, IRef]() {
// Emit private VarDecl with copy init.
// Remap temp VDInit variable to the address of the original
// variable (for proper handling of captured global variables).
setAddrOfLocalVar(VDInit, OriginalAddr);
EmitDecl(*VD);
LocalDeclMap.erase(VDInit);
+ if (ThisFirstprivateIsLastprivate &&
+ Lastprivates[OrigVD->getCanonicalDecl()] ==
+ OMPC_LASTPRIVATE_conditional) {
+ // Create/init special variable for lastprivate conditionals.
+ Address VDAddr =
+ CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
+ *this, OrigVD);
+ llvm::Value *V = EmitLoadOfScalar(
+ MakeAddrLValue(GetAddrOfLocalVar(VD), (*IRef)->getType(),
+ AlignmentSource::Decl),
+ (*IRef)->getExprLoc());
+ EmitStoreOfScalar(V,
+ MakeAddrLValue(VDAddr, (*IRef)->getType(),
+ AlignmentSource::Decl));
+ LocalDeclMap.erase(VD);
+ setAddrOfLocalVar(VD, VDAddr);
+ return VDAddr;
+ }
return GetAddrOfLocalVar(VD);
});
}
@@ -990,8 +1030,8 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
!getLangOpts().OpenMPSimd)
break;
- auto IRef = C->varlist_begin();
- auto IDestRef = C->destination_exprs().begin();
+ const auto *IRef = C->varlist_begin();
+ const auto *IDestRef = C->destination_exprs().begin();
for (const Expr *IInit : C->private_copies()) {
// Keep the address of the original variable for future update at the end
// of the loop.
@@ -1013,7 +1053,15 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
// for 'firstprivate' clause.
if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
- bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() {
+ bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD, C,
+ OrigVD]() {
+ if (C->getKind() == OMPC_LASTPRIVATE_conditional) {
+ Address VDAddr =
+ CGM.getOpenMPRuntime().emitLastprivateConditionalInit(*this,
+ OrigVD);
+ setAddrOfLocalVar(VD, VDAddr);
+ return VDAddr;
+ }
// Emit private VarDecl with copy init.
EmitDecl(*VD);
return GetAddrOfLocalVar(VD);
@@ -1099,7 +1147,7 @@ void CodeGenFunction::EmitOMPLastprivateClauseFinal(
if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>())
PrivateAddr =
Address(Builder.CreateLoad(PrivateAddr),
- getNaturalTypeAlignment(RefTy->getPointeeType()));
+ CGM.getNaturalTypeAlignment(RefTy->getPointeeType()));
// Store the last value to the private copy in the last iteration.
if (C->getKind() == OMPC_LASTPRIVATE_conditional)
CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate(
@@ -1122,7 +1170,7 @@ void CodeGenFunction::EmitOMPLastprivateClauseFinal(
void CodeGenFunction::EmitOMPReductionClauseInit(
const OMPExecutableDirective &D,
- CodeGenFunction::OMPPrivateScope &PrivateScope) {
+ CodeGenFunction::OMPPrivateScope &PrivateScope, bool ForInscan) {
if (!HaveInsertPoint())
return;
SmallVector<const Expr *, 4> Shareds;
@@ -1130,32 +1178,36 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
SmallVector<const Expr *, 4> ReductionOps;
SmallVector<const Expr *, 4> LHSs;
SmallVector<const Expr *, 4> RHSs;
+ OMPTaskDataTy Data;
+ SmallVector<const Expr *, 4> TaskLHSs;
+ SmallVector<const Expr *, 4> TaskRHSs;
for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
- auto IPriv = C->privates().begin();
- auto IRed = C->reduction_ops().begin();
- auto ILHS = C->lhs_exprs().begin();
- auto IRHS = C->rhs_exprs().begin();
- for (const Expr *Ref : C->varlists()) {
- Shareds.emplace_back(Ref);
- Privates.emplace_back(*IPriv);
- ReductionOps.emplace_back(*IRed);
- LHSs.emplace_back(*ILHS);
- RHSs.emplace_back(*IRHS);
- std::advance(IPriv, 1);
- std::advance(IRed, 1);
- std::advance(ILHS, 1);
- std::advance(IRHS, 1);
+ if (ForInscan != (C->getModifier() == OMPC_REDUCTION_inscan))
+ continue;
+ Shareds.append(C->varlist_begin(), C->varlist_end());
+ Privates.append(C->privates().begin(), C->privates().end());
+ ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
+ LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
+ if (C->getModifier() == OMPC_REDUCTION_task) {
+ Data.ReductionVars.append(C->privates().begin(), C->privates().end());
+ Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
+ Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
+ Data.ReductionOps.append(C->reduction_ops().begin(),
+ C->reduction_ops().end());
+ TaskLHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ TaskRHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
}
}
- ReductionCodeGen RedCG(Shareds, Privates, ReductionOps);
+ ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps);
unsigned Count = 0;
- auto ILHS = LHSs.begin();
- auto IRHS = RHSs.begin();
- auto IPriv = Privates.begin();
+ auto *ILHS = LHSs.begin();
+ auto *IRHS = RHSs.begin();
+ auto *IPriv = Privates.begin();
for (const Expr *IRef : Shareds) {
const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
// Emit private VarDecl with reduction init.
- RedCG.emitSharedLValue(*this, Count);
+ RedCG.emitSharedOrigLValue(*this, Count);
RedCG.emitAggregateType(*this, Count);
AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(),
@@ -1222,6 +1274,118 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
++IPriv;
++Count;
}
+ if (!Data.ReductionVars.empty()) {
+ Data.IsReductionWithTaskMod = true;
+ Data.IsWorksharingReduction =
+ isOpenMPWorksharingDirective(D.getDirectiveKind());
+ llvm::Value *ReductionDesc = CGM.getOpenMPRuntime().emitTaskReductionInit(
+ *this, D.getBeginLoc(), TaskLHSs, TaskRHSs, Data);
+ const Expr *TaskRedRef = nullptr;
+ switch (D.getDirectiveKind()) {
+ case OMPD_parallel:
+ TaskRedRef = cast<OMPParallelDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_for:
+ TaskRedRef = cast<OMPForDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_sections:
+ TaskRedRef = cast<OMPSectionsDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_parallel_for:
+ TaskRedRef = cast<OMPParallelForDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_parallel_master:
+ TaskRedRef =
+ cast<OMPParallelMasterDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_parallel_sections:
+ TaskRedRef =
+ cast<OMPParallelSectionsDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_target_parallel:
+ TaskRedRef =
+ cast<OMPTargetParallelDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_target_parallel_for:
+ TaskRedRef =
+ cast<OMPTargetParallelForDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_distribute_parallel_for:
+ TaskRedRef =
+ cast<OMPDistributeParallelForDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_teams_distribute_parallel_for:
+ TaskRedRef = cast<OMPTeamsDistributeParallelForDirective>(D)
+ .getTaskReductionRefExpr();
+ break;
+ case OMPD_target_teams_distribute_parallel_for:
+ TaskRedRef = cast<OMPTargetTeamsDistributeParallelForDirective>(D)
+ .getTaskReductionRefExpr();
+ break;
+ case OMPD_simd:
+ case OMPD_for_simd:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_parallel_for_simd:
+ case OMPD_task:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
+ case OMPD_ordered:
+ case OMPD_atomic:
+ case OMPD_teams:
+ case OMPD_target:
+ case OMPD_cancellation_point:
+ case OMPD_cancel:
+ case OMPD_target_data:
+ case OMPD_target_enter_data:
+ case OMPD_target_exit_data:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_master_taskloop:
+ case OMPD_master_taskloop_simd:
+ case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_master_taskloop_simd:
+ case OMPD_distribute:
+ case OMPD_target_update:
+ case OMPD_distribute_parallel_for_simd:
+ case OMPD_distribute_simd:
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_target_teams:
+ case OMPD_target_teams_distribute:
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_target_teams_distribute_simd:
+ case OMPD_declare_target:
+ case OMPD_end_declare_target:
+ case OMPD_threadprivate:
+ case OMPD_allocate:
+ case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
+ case OMPD_declare_simd:
+ case OMPD_requires:
+ case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
+ case OMPD_unknown:
+ default:
+ llvm_unreachable("Enexpected directive with task reductions.");
+ }
+
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl());
+ EmitVarDecl(*VD);
+ EmitStoreOfScalar(ReductionDesc, GetAddrOfLocalVar(VD),
+ /*Volatile=*/false, TaskRedRef->getType());
+ }
}
void CodeGenFunction::EmitOMPReductionClauseFinal(
@@ -1233,14 +1397,25 @@ void CodeGenFunction::EmitOMPReductionClauseFinal(
llvm::SmallVector<const Expr *, 8> RHSExprs;
llvm::SmallVector<const Expr *, 8> ReductionOps;
bool HasAtLeastOneReduction = false;
+ bool IsReductionWithTaskMod = false;
for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
+ // Do not emit for inscan reductions.
+ if (C->getModifier() == OMPC_REDUCTION_inscan)
+ continue;
HasAtLeastOneReduction = true;
Privates.append(C->privates().begin(), C->privates().end());
LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
+ IsReductionWithTaskMod =
+ IsReductionWithTaskMod || C->getModifier() == OMPC_REDUCTION_task;
}
if (HasAtLeastOneReduction) {
+ if (IsReductionWithTaskMod) {
+ CGM.getOpenMPRuntime().emitTaskReductionFini(
+ *this, D.getBeginLoc(),
+ isOpenMPWorksharingDirective(D.getDirectiveKind()));
+ }
bool WithNowait = D.getSingleClause<OMPNowaitClause>() ||
isOpenMPParallelDirective(D.getDirectiveKind()) ||
ReductionKind == OMPD_simd;
@@ -1288,6 +1463,63 @@ typedef llvm::function_ref<void(CodeGenFunction &,
CodeGenBoundParametersTy;
} // anonymous namespace
+static void
+checkForLastprivateConditionalUpdate(CodeGenFunction &CGF,
+ const OMPExecutableDirective &S) {
+ if (CGF.getLangOpts().OpenMP < 50)
+ return;
+ llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls;
+ for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref);
+ }
+ }
+ for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref);
+ }
+ }
+ for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref);
+ }
+ }
+ // Privates should ne analyzed since they are not captured at all.
+ // Task reductions may be skipped - tasks are ignored.
+ // Firstprivates do not return value but may be passed by reference - no need
+ // to check for updated lastprivate conditional.
+ for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
+ }
+ }
+ CGF.CGM.getOpenMPRuntime().checkAndEmitSharedLastprivateConditional(
+ CGF, S, PrivateDecls);
+}
+
static void emitCommonOMPParallelDirective(
CodeGenFunction &CGF, const OMPExecutableDirective &S,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
@@ -1334,9 +1566,97 @@ static void emitEmptyBoundParameters(CodeGenFunction &,
const OMPExecutableDirective &,
llvm::SmallVectorImpl<llvm::Value *> &) {}
-void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
+Address CodeGenFunction::OMPBuilderCBHelpers::getAddressOfLocalVariable(
+ CodeGenFunction &CGF, const VarDecl *VD) {
+ CodeGenModule &CGM = CGF.CGM;
+ auto &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+
+ if (!VD)
+ return Address::invalid();
+ const VarDecl *CVD = VD->getCanonicalDecl();
+ if (!CVD->hasAttr<OMPAllocateDeclAttr>())
+ return Address::invalid();
+ const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
+ // Use the default allocation.
+ if (AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc &&
+ !AA->getAllocator())
+ return Address::invalid();
+ llvm::Value *Size;
+ CharUnits Align = CGM.getContext().getDeclAlign(CVD);
+ if (CVD->getType()->isVariablyModifiedType()) {
+ Size = CGF.getTypeSize(CVD->getType());
+ // Align the size: ((size + align - 1) / align) * align
+ Size = CGF.Builder.CreateNUWAdd(
+ Size, CGM.getSize(Align - CharUnits::fromQuantity(1)));
+ Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align));
+ Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align));
+ } else {
+ CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType());
+ Size = CGM.getSize(Sz.alignTo(Align));
+ }
+
+ assert(AA->getAllocator() &&
+ "Expected allocator expression for non-default allocator.");
+ llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator());
+ // According to the standard, the original allocator type is a enum (integer).
+ // Convert to pointer type, if required.
+ if (Allocator->getType()->isIntegerTy())
+ Allocator = CGF.Builder.CreateIntToPtr(Allocator, CGM.VoidPtrTy);
+ else if (Allocator->getType()->isPointerTy())
+ Allocator = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Allocator,
+ CGM.VoidPtrTy);
+
+ llvm::Value *Addr = OMPBuilder.CreateOMPAlloc(
+ CGF.Builder, Size, Allocator,
+ getNameWithSeparators({CVD->getName(), ".void.addr"}, ".", "."));
+ llvm::CallInst *FreeCI =
+ OMPBuilder.CreateOMPFree(CGF.Builder, Addr, Allocator);
+
+ CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(NormalAndEHCleanup, FreeCI);
+ Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Addr,
+ CGF.ConvertTypeForMem(CGM.getContext().getPointerType(CVD->getType())),
+ getNameWithSeparators({CVD->getName(), ".addr"}, ".", "."));
+ return Address(Addr, Align);
+}
+
+Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
+ CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr,
+ SourceLocation Loc) {
+ CodeGenModule &CGM = CGF.CGM;
+ if (CGM.getLangOpts().OpenMPUseTLS &&
+ CGM.getContext().getTargetInfo().isTLSSupported())
+ return VDAddr;
+
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+
+ llvm::Type *VarTy = VDAddr.getElementType();
+ llvm::Value *Data =
+ CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy);
+ llvm::ConstantInt *Size = CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy));
+ std::string Suffix = getNameWithSeparators({"cache", ""});
+ llvm::Twine CacheName = Twine(CGM.getMangledName(VD)).concat(Suffix);
+
+ llvm::CallInst *ThreadPrivateCacheCall =
+ OMPBuilder.CreateCachedThreadPrivate(CGF.Builder, Data, Size, CacheName);
+
+ return Address(ThreadPrivateCacheCall, VDAddr.getAlignment());
+}
- if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) {
+std::string CodeGenFunction::OMPBuilderCBHelpers::getNameWithSeparators(
+ ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator) {
+ SmallString<128> Buffer;
+ llvm::raw_svector_ostream OS(Buffer);
+ StringRef Sep = FirstSeparator;
+ for (StringRef Part : Parts) {
+ OS << Sep << Part;
+ Sep = Separator;
+ }
+ return OS.str().str();
+}
+void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
+ if (CGM.getLangOpts().OpenMPIRBuilder) {
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
// Check if we have any if clause associated with the directive.
llvm::Value *IfCond = nullptr;
if (const auto *C = S.getSingleClause<OMPIfClause>())
@@ -1357,15 +1677,7 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
// The cleanup callback that finalizes all variabels at the given location,
// thus calls destructors etc.
auto FiniCB = [this](InsertPointTy IP) {
- CGBuilderTy::InsertPointGuard IPG(Builder);
- assert(IP.getBlock()->end() != IP.getPoint() &&
- "OpenMP IR Builder should cause terminated block!");
- llvm::BasicBlock *IPBB = IP.getBlock();
- llvm::BasicBlock *DestBB = IPBB->splitBasicBlock(IP.getPoint());
- IPBB->getTerminator()->eraseFromParent();
- Builder.SetInsertPoint(IPBB);
- CodeGenFunction::JumpDest Dest = getJumpDestInCurrentScope(DestBB);
- EmitBranchThroughCleanup(Dest);
+ OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
};
// Privatization callback that performs appropriate action for
@@ -1387,32 +1699,17 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
auto BodyGenCB = [ParallelRegionBodyStmt,
this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
llvm::BasicBlock &ContinuationBB) {
- auto OldAllocaIP = AllocaInsertPt;
- AllocaInsertPt = &*AllocaIP.getPoint();
-
- auto OldReturnBlock = ReturnBlock;
- ReturnBlock = getJumpDestInCurrentScope(&ContinuationBB);
-
- llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
- CodeGenIPBB->splitBasicBlock(CodeGenIP.getPoint());
- llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator();
- CodeGenIPBBTI->removeFromParent();
-
- Builder.SetInsertPoint(CodeGenIPBB);
-
- EmitStmt(ParallelRegionBodyStmt);
-
- Builder.Insert(CodeGenIPBBTI);
-
- AllocaInsertPt = OldAllocaIP;
- ReturnBlock = OldReturnBlock;
+ OMPBuilderCBHelpers::OutlinedRegionBodyRAII ORB(*this, AllocaIP,
+ ContinuationBB);
+ OMPBuilderCBHelpers::EmitOMPRegionBody(*this, ParallelRegionBodyStmt,
+ CodeGenIP, ContinuationBB);
};
CGCapturedStmtInfo CGSI(*CS, CR_OpenMP);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
- Builder.restoreIP(OMPBuilder->CreateParallel(Builder, BodyGenCB, PrivCB,
- FiniCB, IfCond, NumThreads,
- ProcBind, S.hasCancel()));
+ Builder.restoreIP(OMPBuilder.CreateParallel(Builder, BodyGenCB, PrivCB,
+ FiniCB, IfCond, NumThreads,
+ ProcBind, S.hasCancel()));
return;
}
@@ -1436,10 +1733,16 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt());
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
};
- emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen,
- emitEmptyBoundParameters);
- emitPostUpdateForReductionClause(*this, S,
- [](CodeGenFunction &) { return nullptr; });
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen,
+ emitEmptyBoundParameters);
+ emitPostUpdateForReductionClause(*this, S,
+ [](CodeGenFunction &) { return nullptr; });
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop,
@@ -1506,6 +1809,27 @@ void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
getProfileCount(D.getBody()));
EmitBlock(NextBB);
}
+
+ OMPPrivateScope InscanScope(*this);
+ EmitOMPReductionClauseInit(D, InscanScope, /*ForInscan=*/true);
+ bool IsInscanRegion = InscanScope.Privatize();
+ if (IsInscanRegion) {
+ // Need to remember the block before and after scan directive
+ // to dispatch them correctly depending on the clause used in
+ // this directive, inclusive or exclusive. For inclusive scan the natural
+ // order of the blocks is used, for exclusive clause the blocks must be
+ // executed in reverse order.
+ OMPBeforeScanBlock = createBasicBlock("omp.before.scan.bb");
+ OMPAfterScanBlock = createBasicBlock("omp.after.scan.bb");
+ // No need to allocate inscan exit block, in simd mode it is selected in the
+ // codegen for the scan directive.
+ if (D.getDirectiveKind() != OMPD_simd && !getLangOpts().OpenMPSimd)
+ OMPScanExitBlock = createBasicBlock("omp.exit.inscan.bb");
+ OMPScanDispatch = createBasicBlock("omp.inscan.dispatch");
+ EmitBranch(OMPScanDispatch);
+ EmitBlock(OMPBeforeScanBlock);
+ }
+
// Emit loop variables for C++ range loops.
const Stmt *Body =
D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers();
@@ -1515,13 +1839,17 @@ void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
Body, /*TryImperfectlyNestedLoops=*/true),
D.getCollapsedNumber());
+ // Jump to the dispatcher at the end of the loop body.
+ if (IsInscanRegion)
+ EmitBranch(OMPScanExitBlock);
+
// The end (updates/cleanups).
EmitBlock(Continue.getBlock());
BreakContinueStack.pop_back();
}
void CodeGenFunction::EmitOMPInnerLoop(
- const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
+ const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond,
const Expr *IncExpr,
const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) {
@@ -1531,8 +1859,19 @@ void CodeGenFunction::EmitOMPInnerLoop(
auto CondBlock = createBasicBlock("omp.inner.for.cond");
EmitBlock(CondBlock);
const SourceRange R = S.getSourceRange();
- LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
- SourceLocToDebugLoc(R.getEnd()));
+
+ // If attributes are attached, push to the basic block with them.
+ const auto &OMPED = cast<OMPExecutableDirective>(S);
+ const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt();
+ const Stmt *SS = ICS->getCapturedStmt();
+ const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(SS);
+ if (AS)
+ LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(),
+ AS->getAttrs(), SourceLocToDebugLoc(R.getBegin()),
+ SourceLocToDebugLoc(R.getEnd()));
+ else
+ LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
+ SourceLocToDebugLoc(R.getEnd()));
// If there are any cleanups between here and the loop-exit scope,
// create a block to stage a loop exit along.
@@ -1671,7 +2010,7 @@ static void emitAlignedClause(CodeGenFunction &CGF,
"alignment is not power of 2");
if (Alignment != 0) {
llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
- CGF.EmitAlignmentAssumption(
+ CGF.emitAlignmentAssumption(
PtrValue, E, /*No second loc needed*/ SourceLocation(),
llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment));
}
@@ -1835,6 +2174,18 @@ void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
LoopStack.setParallel(!IsMonotonic);
LoopStack.setVectorizeEnable();
emitSimdlenSafelenClause(*this, D, IsMonotonic);
+ if (const auto *C = D.getSingleClause<OMPOrderClause>())
+ if (C->getKind() == OMPC_ORDER_concurrent)
+ LoopStack.setParallel(/*Enable=*/true);
+ if ((D.getDirectiveKind() == OMPD_simd ||
+ (getLangOpts().OpenMPSimd &&
+ isOpenMPSimdDirective(D.getDirectiveKind()))) &&
+ llvm::any_of(D.getClausesOfKind<OMPReductionClause>(),
+ [](const OMPReductionClause *C) {
+ return C->getModifier() == OMPC_REDUCTION_inscan;
+ }))
+ // Disable parallel access in case of prefix sum.
+ LoopStack.setParallel(/*Enable=*/false);
}
void CodeGenFunction::EmitOMPSimdFinal(
@@ -1886,7 +2237,6 @@ void CodeGenFunction::EmitOMPSimdFinal(
static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF,
const OMPLoopDirective &S,
CodeGenFunction::JumpDest LoopExit) {
- CGF.CGM.getOpenMPRuntime().initLastprivateConditionalCounter(CGF, S);
CGF.EmitOMPLoopBody(S, LoopExit);
CGF.EmitStopPoint(&S);
}
@@ -1917,12 +2267,14 @@ static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S,
BodyCodeGen(CGF);
};
const Expr *IfCond = nullptr;
- for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
- if (CGF.getLangOpts().OpenMP >= 50 &&
- (C->getNameModifier() == OMPD_unknown ||
- C->getNameModifier() == OMPD_simd)) {
- IfCond = C->getCondition();
- break;
+ if (isOpenMPSimdDirective(S.getDirectiveKind())) {
+ for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
+ if (CGF.getLangOpts().OpenMP >= 50 &&
+ (C->getNameModifier() == OMPD_unknown ||
+ C->getNameModifier() == OMPD_simd)) {
+ IfCond = C->getCondition();
+ break;
+ }
}
}
if (IfCond) {
@@ -2007,10 +2359,8 @@ static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
CGF.EmitOMPInnerLoop(
S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
[&S](CodeGenFunction &CGF) {
- CGF.CGM.getOpenMPRuntime().initLastprivateConditionalCounter(
- CGF, S);
- CGF.EmitOMPLoopBody(S, CodeGenFunction::JumpDest());
- CGF.EmitStopPoint(&S);
+ emitOMPLoopBodyWithStopPoint(CGF, S,
+ CodeGenFunction::JumpDest());
},
[](CodeGenFunction &) {});
});
@@ -2031,11 +2381,19 @@ static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
}
void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
+ ParentLoopDirectiveForScanRegion ScanRegion(*this, S);
+ OMPFirstScanLoop = true;
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
emitOMPSimdRegion(CGF, S, Action);
};
- OMPLexicalScope Scope(*this, S, OMPD_unknown);
- CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
+ CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
void CodeGenFunction::EmitOMPOuterLoop(
@@ -2103,10 +2461,14 @@ void CodeGenFunction::EmitOMPOuterLoop(
[&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) {
// Generate !llvm.loop.parallel metadata for loads and stores for loops
// with dynamic/guided scheduling and without ordered clause.
- if (!isOpenMPSimdDirective(S.getDirectiveKind()))
+ if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
CGF.LoopStack.setParallel(!IsMonotonic);
- else
+ if (const auto *C = S.getSingleClause<OMPOrderClause>())
+ if (C->getKind() == OMPC_ORDER_concurrent)
+ CGF.LoopStack.setParallel(/*Enable=*/true);
+ } else {
CGF.EmitOMPSimdInit(S, IsMonotonic);
+ }
},
[&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered,
&LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
@@ -2612,6 +2974,14 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule,
/* Chunked */ Chunk != nullptr) && HasChunkSizeOne &&
isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
+ bool IsMonotonic =
+ Ordered ||
+ ((ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
+ ScheduleKind.Schedule == OMPC_SCHEDULE_unknown) &&
+ !(ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
+ ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) ||
+ ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
+ ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
if ((RT.isStaticNonchunked(ScheduleKind.Schedule,
/* Chunked */ Chunk != nullptr) ||
StaticChunkedOne) &&
@@ -2620,9 +2990,13 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
emitCommonSimdLoop(
*this, S,
- [&S](CodeGenFunction &CGF, PrePostActionTy &) {
- if (isOpenMPSimdDirective(S.getDirectiveKind()))
- CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true);
+ [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) {
+ if (isOpenMPSimdDirective(S.getDirectiveKind())) {
+ CGF.EmitOMPSimdInit(S, IsMonotonic);
+ } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) {
+ if (C->getKind() == OMPC_ORDER_concurrent)
+ CGF.LoopStack.setParallel(/*Enable=*/true);
+ }
},
[IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk,
&S, ScheduleKind, LoopExit,
@@ -2663,10 +3037,7 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
: S.getCond(),
StaticChunkedOne ? S.getDistInc() : S.getInc(),
[&S, LoopExit](CodeGenFunction &CGF) {
- CGF.CGM.getOpenMPRuntime()
- .initLastprivateConditionalCounter(CGF, S);
- CGF.EmitOMPLoopBody(S, LoopExit);
- CGF.EmitStopPoint(&S);
+ emitOMPLoopBodyWithStopPoint(CGF, S, LoopExit);
},
[](CodeGenFunction &) {});
});
@@ -2678,11 +3049,6 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
};
OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
} else {
- const bool IsMonotonic =
- Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
- ScheduleKind.Schedule == OMPC_SCHEDULE_unknown ||
- ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
- ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
// Emit the outer loop, which requests its work chunk [LB..UB] from
// runtime and runs the inner loop to process it.
const OMPLoopArguments LoopArguments(
@@ -2755,16 +3121,233 @@ emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S,
return {LBVal, UBVal};
}
+/// Emits the code for the directive with inscan reductions.
+/// The code is the following:
+/// \code
+/// size num_iters = <num_iters>;
+/// <type> buffer[num_iters];
+/// #pragma omp ...
+/// for (i: 0..<num_iters>) {
+/// <input phase>;
+/// buffer[i] = red;
+/// }
+/// for (int k = 0; k != ceil(log2(num_iters)); ++k)
+/// for (size cnt = last_iter; cnt >= pow(2, k); --k)
+/// buffer[i] op= buffer[i-pow(2,k)];
+/// #pragma omp ...
+/// for (0..<num_iters>) {
+/// red = InclusiveScan ? buffer[i] : buffer[i-1];
+/// <scan phase>;
+/// }
+/// \endcode
+static void emitScanBasedDirective(
+ CodeGenFunction &CGF, const OMPLoopDirective &S,
+ llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen,
+ llvm::function_ref<void(CodeGenFunction &)> FirstGen,
+ llvm::function_ref<void(CodeGenFunction &)> SecondGen) {
+ llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
+ NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
+ SmallVector<const Expr *, 4> Shareds;
+ SmallVector<const Expr *, 4> Privates;
+ SmallVector<const Expr *, 4> ReductionOps;
+ SmallVector<const Expr *, 4> LHSs;
+ SmallVector<const Expr *, 4> RHSs;
+ SmallVector<const Expr *, 4> CopyOps;
+ SmallVector<const Expr *, 4> CopyArrayTemps;
+ SmallVector<const Expr *, 4> CopyArrayElems;
+ for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
+ assert(C->getModifier() == OMPC_REDUCTION_inscan &&
+ "Only inscan reductions are expected.");
+ Shareds.append(C->varlist_begin(), C->varlist_end());
+ Privates.append(C->privates().begin(), C->privates().end());
+ ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
+ LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
+ CopyOps.append(C->copy_ops().begin(), C->copy_ops().end());
+ CopyArrayTemps.append(C->copy_array_temps().begin(),
+ C->copy_array_temps().end());
+ CopyArrayElems.append(C->copy_array_elems().begin(),
+ C->copy_array_elems().end());
+ }
+ {
+ // Emit buffers for each reduction variables.
+ // ReductionCodeGen is required to emit correctly the code for array
+ // reductions.
+ ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps);
+ unsigned Count = 0;
+ auto *ITA = CopyArrayTemps.begin();
+ for (const Expr *IRef : Privates) {
+ const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
+ // Emit variably modified arrays, used for arrays/array sections
+ // reductions.
+ if (PrivateVD->getType()->isVariablyModifiedType()) {
+ RedCG.emitSharedOrigLValue(CGF, Count);
+ RedCG.emitAggregateType(CGF, Count);
+ }
+ CodeGenFunction::OpaqueValueMapping DimMapping(
+ CGF,
+ cast<OpaqueValueExpr>(
+ cast<VariableArrayType>((*ITA)->getType()->getAsArrayTypeUnsafe())
+ ->getSizeExpr()),
+ RValue::get(OMPScanNumIterations));
+ // Emit temp buffer.
+ CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(*ITA)->getDecl()));
+ ++ITA;
+ ++Count;
+ }
+ }
+ CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S);
+ {
+ // Emit loop with input phase:
+ // #pragma omp ...
+ // for (i: 0..<num_iters>) {
+ // <input phase>;
+ // buffer[i] = red;
+ // }
+ CGF.OMPFirstScanLoop = true;
+ CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
+ FirstGen(CGF);
+ }
+ // Emit prefix reduction:
+ // for (int k = 0; k <= ceil(log2(n)); ++k)
+ llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock();
+ llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body");
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit");
+ llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy);
+ llvm::Value *Arg =
+ CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy);
+ llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg);
+ F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy);
+ LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal);
+ LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy);
+ llvm::Value *NMin1 = CGF.Builder.CreateNUWSub(
+ OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1));
+ auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc());
+ CGF.EmitBlock(LoopBB);
+ auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2);
+ // size pow2k = 1;
+ auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
+ Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB);
+ Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB);
+ // for (size i = n - 1; i >= 2 ^ k; --i)
+ // tmp[i] op= tmp[i-pow2k];
+ llvm::BasicBlock *InnerLoopBB =
+ CGF.createBasicBlock("omp.inner.log.scan.body");
+ llvm::BasicBlock *InnerExitBB =
+ CGF.createBasicBlock("omp.inner.log.scan.exit");
+ llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K);
+ CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
+ CGF.EmitBlock(InnerLoopBB);
+ auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
+ IVal->addIncoming(NMin1, LoopBB);
+ {
+ CodeGenFunction::OMPPrivateScope PrivScope(CGF);
+ auto *ILHS = LHSs.begin();
+ auto *IRHS = RHSs.begin();
+ for (const Expr *CopyArrayElem : CopyArrayElems) {
+ const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ Address LHSAddr = Address::invalid();
+ {
+ CodeGenFunction::OpaqueValueMapping IdxMapping(
+ CGF,
+ cast<OpaqueValueExpr>(
+ cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
+ RValue::get(IVal));
+ LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
+ }
+ PrivScope.addPrivate(LHSVD, [LHSAddr]() { return LHSAddr; });
+ Address RHSAddr = Address::invalid();
+ {
+ llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K);
+ CodeGenFunction::OpaqueValueMapping IdxMapping(
+ CGF,
+ cast<OpaqueValueExpr>(
+ cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
+ RValue::get(OffsetIVal));
+ RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
+ }
+ PrivScope.addPrivate(RHSVD, [RHSAddr]() { return RHSAddr; });
+ ++ILHS;
+ ++IRHS;
+ }
+ PrivScope.Privatize();
+ CGF.CGM.getOpenMPRuntime().emitReduction(
+ CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
+ {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown});
+ }
+ llvm::Value *NextIVal =
+ CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1));
+ IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock());
+ CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K);
+ CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
+ CGF.EmitBlock(InnerExitBB);
+ llvm::Value *Next =
+ CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1));
+ Counter->addIncoming(Next, CGF.Builder.GetInsertBlock());
+ // pow2k <<= 1;
+ llvm::Value *NextPow2K = CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true);
+ Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock());
+ llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal);
+ CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB);
+ auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc());
+ CGF.EmitBlock(ExitBB);
+
+ CGF.OMPFirstScanLoop = false;
+ SecondGen(CGF);
+}
+
+static bool emitWorksharingDirective(CodeGenFunction &CGF,
+ const OMPLoopDirective &S,
+ bool HasCancel) {
+ bool HasLastprivates;
+ if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
+ [](const OMPReductionClause *C) {
+ return C->getModifier() == OMPC_REDUCTION_inscan;
+ })) {
+ const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
+ CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
+ OMPLoopScope LoopScope(CGF, S);
+ return CGF.EmitScalarExpr(S.getNumIterations());
+ };
+ const auto &&FirstGen = [&S, HasCancel](CodeGenFunction &CGF) {
+ CodeGenFunction::OMPCancelStackRAII CancelRegion(
+ CGF, S.getDirectiveKind(), HasCancel);
+ (void)CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
+ emitForLoopBounds,
+ emitDispatchForLoopBounds);
+ // Emit an implicit barrier at the end.
+ CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(),
+ OMPD_for);
+ };
+ const auto &&SecondGen = [&S, HasCancel,
+ &HasLastprivates](CodeGenFunction &CGF) {
+ CodeGenFunction::OMPCancelStackRAII CancelRegion(
+ CGF, S.getDirectiveKind(), HasCancel);
+ HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
+ emitForLoopBounds,
+ emitDispatchForLoopBounds);
+ };
+ emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen);
+ } else {
+ CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(),
+ HasCancel);
+ HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
+ emitForLoopBounds,
+ emitDispatchForLoopBounds);
+ }
+ return HasLastprivates;
+}
+
void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
bool HasLastprivates = false;
auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
PrePostActionTy &) {
- OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel());
- HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
- emitForLoopBounds,
- emitDispatchForLoopBounds);
+ HasLastprivates = emitWorksharingDirective(CGF, S, S.hasCancel());
};
{
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
S.hasCancel());
@@ -2773,17 +3356,19 @@ void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
// Emit an implicit barrier at the end.
if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
bool HasLastprivates = false;
auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
PrePostActionTy &) {
- HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
- emitForLoopBounds,
- emitDispatchForLoopBounds);
+ HasLastprivates = emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
};
{
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
}
@@ -2791,6 +3376,8 @@ void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
// Emit an implicit barrier at the end.
if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
@@ -2808,7 +3395,7 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
bool HasLastprivates = false;
auto &&CodeGen = [&S, CapturedStmt, CS,
&HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) {
- ASTContext &C = CGF.getContext();
+ const ASTContext &C = CGF.getContext();
QualType KmpInt32Ty =
C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
// Emit helper vars inits.
@@ -2830,11 +3417,13 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
// Generate condition for loop.
- BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
- OK_Ordinary, S.getBeginLoc(), FPOptions());
+ BinaryOperator *Cond = BinaryOperator::Create(
+ C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, OK_Ordinary,
+ S.getBeginLoc(), FPOptionsOverride());
// Increment for loop counter.
- UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
- S.getBeginLoc(), true);
+ UnaryOperator *Inc = UnaryOperator::Create(
+ C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
+ S.getBeginLoc(), true, FPOptionsOverride());
auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) {
// Iterate through all sections and emit a switch construct:
// switch (IV) {
@@ -2847,7 +3436,6 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
// break;
// }
// .omp.sections.exit:
- CGF.CGM.getOpenMPRuntime().initLastprivateConditionalCounter(CGF, S);
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
llvm::SwitchInst *SwitchStmt =
CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()),
@@ -2905,7 +3493,7 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
// IV = LB;
CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV);
// while (idx <= UB) { BODY; ++idx; }
- CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
+ CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, Cond, Inc, BodyGen,
[](CodeGenFunction &) {});
// Tell the runtime we are done.
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
@@ -2949,6 +3537,8 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
{
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
OMPLexicalScope Scope(*this, S, OMPD_unknown);
EmitSections(S);
}
@@ -2957,6 +3547,8 @@ void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
OMPD_sections);
}
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
@@ -2995,6 +3587,8 @@ void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
};
{
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(),
CopyprivateVars, DestExprs,
@@ -3007,6 +3601,8 @@ void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
*this, S.getBeginLoc(),
S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
}
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
@@ -3018,11 +3614,75 @@ static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
}
void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
+ if (CGM.getLangOpts().OpenMPIRBuilder) {
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+
+ const CapturedStmt *CS = S.getInnermostCapturedStmt();
+ const Stmt *MasterRegionBodyStmt = CS->getCapturedStmt();
+
+ auto FiniCB = [this](InsertPointTy IP) {
+ OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
+ };
+
+ auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP,
+ llvm::BasicBlock &FiniBB) {
+ OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
+ OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MasterRegionBodyStmt,
+ CodeGenIP, FiniBB);
+ };
+
+ CGCapturedStmtInfo CGSI(*CS, CR_OpenMP);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
+ Builder.restoreIP(OMPBuilder.CreateMaster(Builder, BodyGenCB, FiniCB));
+
+ return;
+ }
OMPLexicalScope Scope(*this, S, OMPD_unknown);
emitMaster(*this, S);
}
void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
+ if (CGM.getLangOpts().OpenMPIRBuilder) {
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+
+ const CapturedStmt *CS = S.getInnermostCapturedStmt();
+ const Stmt *CriticalRegionBodyStmt = CS->getCapturedStmt();
+ const Expr *Hint = nullptr;
+ if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
+ Hint = HintClause->getHint();
+
+ // TODO: This is slightly different from what's currently being done in
+ // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything
+ // about typing is final.
+ llvm::Value *HintInst = nullptr;
+ if (Hint)
+ HintInst =
+ Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false);
+
+ auto FiniCB = [this](InsertPointTy IP) {
+ OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
+ };
+
+ auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP,
+ llvm::BasicBlock &FiniBB) {
+ OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
+ OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CriticalRegionBodyStmt,
+ CodeGenIP, FiniBB);
+ };
+
+ CGCapturedStmtInfo CGSI(*CS, CR_OpenMP);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
+ Builder.restoreIP(OMPBuilder.CreateCritical(
+ Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(),
+ HintInst));
+
+ return;
+ }
+
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
@@ -3042,12 +3702,16 @@ void CodeGenFunction::EmitOMPParallelForDirective(
// directives: 'parallel' with 'for' directive.
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
- OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel());
- CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
- emitDispatchForLoopBounds);
+ (void)emitWorksharingDirective(CGF, S, S.hasCancel());
};
- emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
- emitEmptyBoundParameters);
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
+ emitEmptyBoundParameters);
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
void CodeGenFunction::EmitOMPParallelForSimdDirective(
@@ -3056,11 +3720,16 @@ void CodeGenFunction::EmitOMPParallelForSimdDirective(
// directives: 'parallel' with 'for' directive.
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
- CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
- emitDispatchForLoopBounds);
+ (void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
};
- emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen,
- emitEmptyBoundParameters);
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ emitCommonOMPParallelDirective(*this, S, OMPD_for_simd, CodeGen,
+ emitEmptyBoundParameters);
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
void CodeGenFunction::EmitOMPParallelMasterDirective(
@@ -3086,10 +3755,16 @@ void CodeGenFunction::EmitOMPParallelMasterDirective(
emitMaster(CGF, S);
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
};
- emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen,
- emitEmptyBoundParameters);
- emitPostUpdateForReductionClause(*this, S,
- [](CodeGenFunction &) { return nullptr; });
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen,
+ emitEmptyBoundParameters);
+ emitPostUpdateForReductionClause(*this, S,
+ [](CodeGenFunction &) { return nullptr; });
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
void CodeGenFunction::EmitOMPParallelSectionsDirective(
@@ -3100,8 +3775,14 @@ void CodeGenFunction::EmitOMPParallelSectionsDirective(
Action.Enter(CGF);
CGF.EmitSections(S);
};
- emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen,
- emitEmptyBoundParameters);
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen,
+ emitEmptyBoundParameters);
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
void CodeGenFunction::EmitOMPTaskBasedDirective(
@@ -3188,33 +3869,28 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
SmallVector<const Expr *, 4> LHSs;
SmallVector<const Expr *, 4> RHSs;
for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
- auto IPriv = C->privates().begin();
- auto IRed = C->reduction_ops().begin();
- auto ILHS = C->lhs_exprs().begin();
- auto IRHS = C->rhs_exprs().begin();
- for (const Expr *Ref : C->varlists()) {
- Data.ReductionVars.emplace_back(Ref);
- Data.ReductionCopies.emplace_back(*IPriv);
- Data.ReductionOps.emplace_back(*IRed);
- LHSs.emplace_back(*ILHS);
- RHSs.emplace_back(*IRHS);
- std::advance(IPriv, 1);
- std::advance(IRed, 1);
- std::advance(ILHS, 1);
- std::advance(IRHS, 1);
- }
+ Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
+ Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
+ Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
+ Data.ReductionOps.append(C->reduction_ops().begin(),
+ C->reduction_ops().end());
+ LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
}
Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit(
*this, S.getBeginLoc(), LHSs, RHSs, Data);
// Build list of dependences.
- for (const auto *C : S.getClausesOfKind<OMPDependClause>())
- for (const Expr *IRef : C->varlists())
- Data.Dependences.emplace_back(C->getDependencyKind(), IRef);
+ for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
+ OMPTaskDataTy::DependData &DD =
+ Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
+ DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
+ }
auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs,
CapturedRegion](CodeGenFunction &CGF,
PrePostActionTy &Action) {
// Set proper addresses for generated private copies.
OMPPrivateScope Scope(CGF);
+ llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs;
if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
!Data.LastprivateVars.empty()) {
llvm::FunctionType *CopyFnTy = llvm::FunctionType::get(
@@ -3241,6 +3917,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
".firstpriv.ptr.addr");
PrivatePtrs.emplace_back(VD, PrivatePtr);
+ FirstprivatePtrs.emplace_back(VD, PrivatePtr);
CallArgs.push_back(PrivatePtr.getPointer());
}
for (const Expr *E : Data.LastprivateVars) {
@@ -3271,13 +3948,21 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
}
}
if (Data.Reductions) {
+ OMPPrivateScope FirstprivateScope(CGF);
+ for (const auto &Pair : FirstprivatePtrs) {
+ Address Replacement(CGF.Builder.CreateLoad(Pair.second),
+ CGF.getContext().getDeclAlign(Pair.first));
+ FirstprivateScope.addPrivate(Pair.first,
+ [Replacement]() { return Replacement; });
+ }
+ (void)FirstprivateScope.Privatize();
OMPLexicalScope LexScope(CGF, S, CapturedRegion);
- ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionCopies,
- Data.ReductionOps);
+ ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars,
+ Data.ReductionCopies, Data.ReductionOps);
llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad(
CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9)));
for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) {
- RedCG.emitSharedLValue(CGF, Cnt);
+ RedCG.emitSharedOrigLValue(CGF, Cnt);
RedCG.emitAggregateType(CGF, Cnt);
// FIXME: This must removed once the runtime library is fixed.
// Emit required threadprivate variables for
@@ -3322,9 +4007,9 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
// privatized earlier.
OMPPrivateScope InRedScope(CGF);
if (!InRedVars.empty()) {
- ReductionCodeGen RedCG(InRedVars, InRedPrivs, InRedOps);
+ ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps);
for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) {
- RedCG.emitSharedLValue(CGF, Cnt);
+ RedCG.emitSharedOrigLValue(CGF, Cnt);
RedCG.emitAggregateType(CGF, Cnt);
// The taskgroup descriptor variable is always implicit firstprivate and
// privatized already during processing of the firstprivates.
@@ -3333,9 +4018,13 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
// initializer/combiner/finalizer.
CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
RedCG, Cnt);
- llvm::Value *ReductionsPtr =
- CGF.EmitLoadOfScalar(CGF.EmitLValue(TaskgroupDescriptors[Cnt]),
- TaskgroupDescriptors[Cnt]->getExprLoc());
+ llvm::Value *ReductionsPtr;
+ if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) {
+ ReductionsPtr = CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr),
+ TRExpr->getExprLoc());
+ } else {
+ ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ }
Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
Replacement = Address(
@@ -3448,9 +4137,11 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
}
(void)TargetScope.Privatize();
// Build list of dependences.
- for (const auto *C : S.getClausesOfKind<OMPDependClause>())
- for (const Expr *IRef : C->varlists())
- Data.Dependences.emplace_back(C->getDependencyKind(), IRef);
+ for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
+ OMPTaskDataTy::DependData &DD =
+ Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
+ DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
+ }
auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD,
&InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) {
// Set proper addresses for generated private copies.
@@ -3537,6 +4228,8 @@ void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
SharedsTy, CapturedStruct, IfCond,
Data);
};
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data);
}
@@ -3562,21 +4255,13 @@ void CodeGenFunction::EmitOMPTaskgroupDirective(
SmallVector<const Expr *, 4> RHSs;
OMPTaskDataTy Data;
for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) {
- auto IPriv = C->privates().begin();
- auto IRed = C->reduction_ops().begin();
- auto ILHS = C->lhs_exprs().begin();
- auto IRHS = C->rhs_exprs().begin();
- for (const Expr *Ref : C->varlists()) {
- Data.ReductionVars.emplace_back(Ref);
- Data.ReductionCopies.emplace_back(*IPriv);
- Data.ReductionOps.emplace_back(*IRed);
- LHSs.emplace_back(*ILHS);
- RHSs.emplace_back(*IRHS);
- std::advance(IPriv, 1);
- std::advance(IRed, 1);
- std::advance(ILHS, 1);
- std::advance(IRHS, 1);
- }
+ Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
+ Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
+ Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
+ Data.ReductionOps.append(C->reduction_ops().begin(),
+ C->reduction_ops().end());
+ LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
}
llvm::Value *ReductionDesc =
CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(),
@@ -3593,6 +4278,9 @@ void CodeGenFunction::EmitOMPTaskgroupDirective(
}
void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
+ llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>()
+ ? llvm::AtomicOrdering::NotAtomic
+ : llvm::AtomicOrdering::AcquireRelease;
CGM.getOpenMPRuntime().emitFlush(
*this,
[&S]() -> ArrayRef<const Expr *> {
@@ -3601,7 +4289,233 @@ void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
FlushClause->varlist_end());
return llvm::None;
}(),
- S.getBeginLoc());
+ S.getBeginLoc(), AO);
+}
+
+void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) {
+ const auto *DO = S.getSingleClause<OMPDepobjClause>();
+ LValue DOLVal = EmitLValue(DO->getDepobj());
+ if (const auto *DC = S.getSingleClause<OMPDependClause>()) {
+ OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(),
+ DC->getModifier());
+ Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end());
+ Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause(
+ *this, Dependencies, DC->getBeginLoc());
+ EmitStoreOfScalar(DepAddr.getPointer(), DOLVal);
+ return;
+ }
+ if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) {
+ CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc());
+ return;
+ }
+ if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) {
+ CGM.getOpenMPRuntime().emitUpdateClause(
+ *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc());
+ return;
+ }
+}
+
+void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) {
+ if (!OMPParentLoopDirectiveForScan)
+ return;
+ const OMPExecutableDirective &ParentDir = *OMPParentLoopDirectiveForScan;
+ bool IsInclusive = S.hasClausesOfKind<OMPInclusiveClause>();
+ SmallVector<const Expr *, 4> Shareds;
+ SmallVector<const Expr *, 4> Privates;
+ SmallVector<const Expr *, 4> LHSs;
+ SmallVector<const Expr *, 4> RHSs;
+ SmallVector<const Expr *, 4> ReductionOps;
+ SmallVector<const Expr *, 4> CopyOps;
+ SmallVector<const Expr *, 4> CopyArrayTemps;
+ SmallVector<const Expr *, 4> CopyArrayElems;
+ for (const auto *C : ParentDir.getClausesOfKind<OMPReductionClause>()) {
+ if (C->getModifier() != OMPC_REDUCTION_inscan)
+ continue;
+ Shareds.append(C->varlist_begin(), C->varlist_end());
+ Privates.append(C->privates().begin(), C->privates().end());
+ LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
+ ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
+ CopyOps.append(C->copy_ops().begin(), C->copy_ops().end());
+ CopyArrayTemps.append(C->copy_array_temps().begin(),
+ C->copy_array_temps().end());
+ CopyArrayElems.append(C->copy_array_elems().begin(),
+ C->copy_array_elems().end());
+ }
+ if (ParentDir.getDirectiveKind() == OMPD_simd ||
+ (getLangOpts().OpenMPSimd &&
+ isOpenMPSimdDirective(ParentDir.getDirectiveKind()))) {
+ // For simd directive and simd-based directives in simd only mode, use the
+ // following codegen:
+ // int x = 0;
+ // #pragma omp simd reduction(inscan, +: x)
+ // for (..) {
+ // <first part>
+ // #pragma omp scan inclusive(x)
+ // <second part>
+ // }
+ // is transformed to:
+ // int x = 0;
+ // for (..) {
+ // int x_priv = 0;
+ // <first part>
+ // x = x_priv + x;
+ // x_priv = x;
+ // <second part>
+ // }
+ // and
+ // int x = 0;
+ // #pragma omp simd reduction(inscan, +: x)
+ // for (..) {
+ // <first part>
+ // #pragma omp scan exclusive(x)
+ // <second part>
+ // }
+ // to
+ // int x = 0;
+ // for (..) {
+ // int x_priv = 0;
+ // <second part>
+ // int temp = x;
+ // x = x_priv + x;
+ // x_priv = temp;
+ // <first part>
+ // }
+ llvm::BasicBlock *OMPScanReduce = createBasicBlock("omp.inscan.reduce");
+ EmitBranch(IsInclusive
+ ? OMPScanReduce
+ : BreakContinueStack.back().ContinueBlock.getBlock());
+ EmitBlock(OMPScanDispatch);
+ {
+ // New scope for correct construction/destruction of temp variables for
+ // exclusive scan.
+ LexicalScope Scope(*this, S.getSourceRange());
+ EmitBranch(IsInclusive ? OMPBeforeScanBlock : OMPAfterScanBlock);
+ EmitBlock(OMPScanReduce);
+ if (!IsInclusive) {
+ // Create temp var and copy LHS value to this temp value.
+ // TMP = LHS;
+ for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
+ const Expr *PrivateExpr = Privates[I];
+ const Expr *TempExpr = CopyArrayTemps[I];
+ EmitAutoVarDecl(
+ *cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl()));
+ LValue DestLVal = EmitLValue(TempExpr);
+ LValue SrcLVal = EmitLValue(LHSs[I]);
+ EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
+ SrcLVal.getAddress(*this),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
+ CopyOps[I]);
+ }
+ }
+ CGM.getOpenMPRuntime().emitReduction(
+ *this, ParentDir.getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
+ {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_simd});
+ for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
+ const Expr *PrivateExpr = Privates[I];
+ LValue DestLVal;
+ LValue SrcLVal;
+ if (IsInclusive) {
+ DestLVal = EmitLValue(RHSs[I]);
+ SrcLVal = EmitLValue(LHSs[I]);
+ } else {
+ const Expr *TempExpr = CopyArrayTemps[I];
+ DestLVal = EmitLValue(RHSs[I]);
+ SrcLVal = EmitLValue(TempExpr);
+ }
+ EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
+ SrcLVal.getAddress(*this),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
+ CopyOps[I]);
+ }
+ }
+ EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock);
+ OMPScanExitBlock = IsInclusive
+ ? BreakContinueStack.back().ContinueBlock.getBlock()
+ : OMPScanReduce;
+ EmitBlock(OMPAfterScanBlock);
+ return;
+ }
+ if (!IsInclusive) {
+ EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
+ EmitBlock(OMPScanExitBlock);
+ }
+ if (OMPFirstScanLoop) {
+ // Emit buffer[i] = red; at the end of the input phase.
+ const auto *IVExpr = cast<OMPLoopDirective>(ParentDir)
+ .getIterationVariable()
+ ->IgnoreParenImpCasts();
+ LValue IdxLVal = EmitLValue(IVExpr);
+ llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc());
+ IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false);
+ for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
+ const Expr *PrivateExpr = Privates[I];
+ const Expr *OrigExpr = Shareds[I];
+ const Expr *CopyArrayElem = CopyArrayElems[I];
+ OpaqueValueMapping IdxMapping(
+ *this,
+ cast<OpaqueValueExpr>(
+ cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
+ RValue::get(IdxVal));
+ LValue DestLVal = EmitLValue(CopyArrayElem);
+ LValue SrcLVal = EmitLValue(OrigExpr);
+ EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
+ SrcLVal.getAddress(*this),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
+ CopyOps[I]);
+ }
+ }
+ EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
+ if (IsInclusive) {
+ EmitBlock(OMPScanExitBlock);
+ EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
+ }
+ EmitBlock(OMPScanDispatch);
+ if (!OMPFirstScanLoop) {
+ // Emit red = buffer[i]; at the entrance to the scan phase.
+ const auto *IVExpr = cast<OMPLoopDirective>(ParentDir)
+ .getIterationVariable()
+ ->IgnoreParenImpCasts();
+ LValue IdxLVal = EmitLValue(IVExpr);
+ llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc());
+ IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false);
+ llvm::BasicBlock *ExclusiveExitBB = nullptr;
+ if (!IsInclusive) {
+ llvm::BasicBlock *ContBB = createBasicBlock("omp.exclusive.dec");
+ ExclusiveExitBB = createBasicBlock("omp.exclusive.copy.exit");
+ llvm::Value *Cmp = Builder.CreateIsNull(IdxVal);
+ Builder.CreateCondBr(Cmp, ExclusiveExitBB, ContBB);
+ EmitBlock(ContBB);
+ // Use idx - 1 iteration for exclusive scan.
+ IdxVal = Builder.CreateNUWSub(IdxVal, llvm::ConstantInt::get(SizeTy, 1));
+ }
+ for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
+ const Expr *PrivateExpr = Privates[I];
+ const Expr *OrigExpr = Shareds[I];
+ const Expr *CopyArrayElem = CopyArrayElems[I];
+ OpaqueValueMapping IdxMapping(
+ *this,
+ cast<OpaqueValueExpr>(
+ cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
+ RValue::get(IdxVal));
+ LValue SrcLVal = EmitLValue(CopyArrayElem);
+ LValue DestLVal = EmitLValue(OrigExpr);
+ EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
+ SrcLVal.getAddress(*this),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
+ CopyOps[I]);
+ }
+ if (!IsInclusive) {
+ EmitBlock(ExclusiveExitBB);
+ }
+ }
+ EmitBranch((OMPFirstScanLoop == IsInclusive) ? OMPBeforeScanBlock
+ : OMPAfterScanBlock);
+ EmitBlock(OMPAfterScanBlock);
}
void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
@@ -3790,7 +4704,7 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
});
EmitBlock(LoopExit.getBlock());
// Tell the runtime we are done.
- RT.emitForStaticFinish(*this, S.getBeginLoc(), S.getDirectiveKind());
+ RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind());
} else {
// Emit the outer loop, which requests its work chunk [LB..UB] from
// runtime and runs the inner loop to process it.
@@ -3843,11 +4757,12 @@ void CodeGenFunction::EmitOMPDistributeDirective(
}
static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
- const CapturedStmt *S) {
+ const CapturedStmt *S,
+ SourceLocation Loc) {
CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
CGF.CapturedStmtInfo = &CapStmtInfo;
- llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S);
+ llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc);
Fn->setDoesNotRecurse();
return Fn;
}
@@ -3867,7 +4782,8 @@ void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
if (C) {
llvm::SmallVector<llvm::Value *, 16> CapturedVars;
CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
- llvm::Function *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS);
+ llvm::Function *OutlinedFn =
+ emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc());
CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(),
OutlinedFn, CapturedVars);
} else {
@@ -3918,16 +4834,22 @@ convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
return ComplexVal;
}
-static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
+static void emitSimpleAtomicStore(CodeGenFunction &CGF, llvm::AtomicOrdering AO,
LValue LVal, RValue RVal) {
- if (LVal.isGlobalReg()) {
+ if (LVal.isGlobalReg())
CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
- } else {
- CGF.EmitAtomicStore(RVal, LVal,
- IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
- : llvm::AtomicOrdering::Monotonic,
- LVal.isVolatile(), /*isInit=*/false);
- }
+ else
+ CGF.EmitAtomicStore(RVal, LVal, AO, LVal.isVolatile(), /*isInit=*/false);
+}
+
+static RValue emitSimpleAtomicLoad(CodeGenFunction &CGF,
+ llvm::AtomicOrdering AO, LValue LVal,
+ SourceLocation Loc) {
+ if (LVal.isGlobalReg())
+ return CGF.EmitLoadOfLValue(LVal, Loc);
+ return CGF.EmitAtomicLoad(
+ LVal, Loc, llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO),
+ LVal.isVolatile());
}
void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal,
@@ -3948,7 +4870,7 @@ void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal,
}
}
-static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
+static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO,
const Expr *X, const Expr *V,
SourceLocation Loc) {
// v = x;
@@ -3956,34 +4878,54 @@ static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
LValue XLValue = CGF.EmitLValue(X);
LValue VLValue = CGF.EmitLValue(V);
- RValue Res = XLValue.isGlobalReg()
- ? CGF.EmitLoadOfLValue(XLValue, Loc)
- : CGF.EmitAtomicLoad(
- XLValue, Loc,
- IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
- : llvm::AtomicOrdering::Monotonic,
- XLValue.isVolatile());
- // OpenMP, 2.12.6, atomic Construct
- // Any atomic construct with a seq_cst clause forces the atomically
- // performed operation to include an implicit flush operation without a
- // list.
- if (IsSeqCst)
- CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
+ RValue Res = emitSimpleAtomicLoad(CGF, AO, XLValue, Loc);
+ // OpenMP, 2.17.7, atomic Construct
+ // If the read or capture clause is specified and the acquire, acq_rel, or
+ // seq_cst clause is specified then the strong flush on exit from the atomic
+ // operation is also an acquire flush.
+ switch (AO) {
+ case llvm::AtomicOrdering::Acquire:
+ case llvm::AtomicOrdering::AcquireRelease:
+ case llvm::AtomicOrdering::SequentiallyConsistent:
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ llvm::AtomicOrdering::Acquire);
+ break;
+ case llvm::AtomicOrdering::Monotonic:
+ case llvm::AtomicOrdering::Release:
+ break;
+ case llvm::AtomicOrdering::NotAtomic:
+ case llvm::AtomicOrdering::Unordered:
+ llvm_unreachable("Unexpected ordering.");
+ }
CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc);
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V);
}
-static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
- const Expr *X, const Expr *E,
- SourceLocation Loc) {
+static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF,
+ llvm::AtomicOrdering AO, const Expr *X,
+ const Expr *E, SourceLocation Loc) {
// x = expr;
assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
- emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
- // OpenMP, 2.12.6, atomic Construct
- // Any atomic construct with a seq_cst clause forces the atomically
- // performed operation to include an implicit flush operation without a
- // list.
- if (IsSeqCst)
- CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
+ emitSimpleAtomicStore(CGF, AO, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X);
+ // OpenMP, 2.17.7, atomic Construct
+ // If the write, update, or capture clause is specified and the release,
+ // acq_rel, or seq_cst clause is specified then the strong flush on entry to
+ // the atomic operation is also a release flush.
+ switch (AO) {
+ case llvm::AtomicOrdering::Release:
+ case llvm::AtomicOrdering::AcquireRelease:
+ case llvm::AtomicOrdering::SequentiallyConsistent:
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ llvm::AtomicOrdering::Release);
+ break;
+ case llvm::AtomicOrdering::Acquire:
+ case llvm::AtomicOrdering::Monotonic:
+ break;
+ case llvm::AtomicOrdering::NotAtomic:
+ case llvm::AtomicOrdering::Unordered:
+ llvm_unreachable("Unexpected ordering.");
+ }
}
static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
@@ -4104,10 +5046,10 @@ std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
return Res;
}
-static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
- const Expr *X, const Expr *E,
- const Expr *UE, bool IsXLHSInRHSPart,
- SourceLocation Loc) {
+static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF,
+ llvm::AtomicOrdering AO, const Expr *X,
+ const Expr *E, const Expr *UE,
+ bool IsXLHSInRHSPart, SourceLocation Loc) {
assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
"Update expr in 'atomic update' must be a binary operator.");
const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
@@ -4120,9 +5062,6 @@ static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
LValue XLValue = CGF.EmitLValue(X);
RValue ExprRValue = CGF.EmitAnyExpr(E);
- llvm::AtomicOrdering AO = IsSeqCst
- ? llvm::AtomicOrdering::SequentiallyConsistent
- : llvm::AtomicOrdering::Monotonic;
const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
@@ -4134,12 +5073,25 @@ static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
};
(void)CGF.EmitOMPAtomicSimpleUpdateExpr(
XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
- // OpenMP, 2.12.6, atomic Construct
- // Any atomic construct with a seq_cst clause forces the atomically
- // performed operation to include an implicit flush operation without a
- // list.
- if (IsSeqCst)
- CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X);
+ // OpenMP, 2.17.7, atomic Construct
+ // If the write, update, or capture clause is specified and the release,
+ // acq_rel, or seq_cst clause is specified then the strong flush on entry to
+ // the atomic operation is also a release flush.
+ switch (AO) {
+ case llvm::AtomicOrdering::Release:
+ case llvm::AtomicOrdering::AcquireRelease:
+ case llvm::AtomicOrdering::SequentiallyConsistent:
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ llvm::AtomicOrdering::Release);
+ break;
+ case llvm::AtomicOrdering::Acquire:
+ case llvm::AtomicOrdering::Monotonic:
+ break;
+ case llvm::AtomicOrdering::NotAtomic:
+ case llvm::AtomicOrdering::Unordered:
+ llvm_unreachable("Unexpected ordering.");
+ }
}
static RValue convertToType(CodeGenFunction &CGF, RValue Value,
@@ -4159,7 +5111,8 @@ static RValue convertToType(CodeGenFunction &CGF, RValue Value,
llvm_unreachable("Must be a scalar or complex.");
}
-static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
+static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF,
+ llvm::AtomicOrdering AO,
bool IsPostfixUpdate, const Expr *V,
const Expr *X, const Expr *E,
const Expr *UE, bool IsXLHSInRHSPart,
@@ -4170,9 +5123,6 @@ static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
LValue VLValue = CGF.EmitLValue(V);
LValue XLValue = CGF.EmitLValue(X);
RValue ExprRValue = CGF.EmitAnyExpr(E);
- llvm::AtomicOrdering AO = IsSeqCst
- ? llvm::AtomicOrdering::SequentiallyConsistent
- : llvm::AtomicOrdering::Monotonic;
QualType NewVValType;
if (UE) {
// 'x' is updated with some additional value.
@@ -4200,6 +5150,7 @@ static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
};
auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X);
if (Res.first) {
// 'atomicrmw' instruction was generated.
if (IsPostfixUpdate) {
@@ -4226,6 +5177,7 @@ static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
Loc, Gen);
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X);
if (Res.first) {
// 'atomicrmw' instruction was generated.
NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
@@ -4233,32 +5185,54 @@ static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
}
// Emit post-update store to 'v' of old/new 'x' value.
CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc);
- // OpenMP, 2.12.6, atomic Construct
- // Any atomic construct with a seq_cst clause forces the atomically
- // performed operation to include an implicit flush operation without a
- // list.
- if (IsSeqCst)
- CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V);
+ // OpenMP, 2.17.7, atomic Construct
+ // If the write, update, or capture clause is specified and the release,
+ // acq_rel, or seq_cst clause is specified then the strong flush on entry to
+ // the atomic operation is also a release flush.
+ // If the read or capture clause is specified and the acquire, acq_rel, or
+ // seq_cst clause is specified then the strong flush on exit from the atomic
+ // operation is also an acquire flush.
+ switch (AO) {
+ case llvm::AtomicOrdering::Release:
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ llvm::AtomicOrdering::Release);
+ break;
+ case llvm::AtomicOrdering::Acquire:
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ llvm::AtomicOrdering::Acquire);
+ break;
+ case llvm::AtomicOrdering::AcquireRelease:
+ case llvm::AtomicOrdering::SequentiallyConsistent:
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ llvm::AtomicOrdering::AcquireRelease);
+ break;
+ case llvm::AtomicOrdering::Monotonic:
+ break;
+ case llvm::AtomicOrdering::NotAtomic:
+ case llvm::AtomicOrdering::Unordered:
+ llvm_unreachable("Unexpected ordering.");
+ }
}
static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
- bool IsSeqCst, bool IsPostfixUpdate,
+ llvm::AtomicOrdering AO, bool IsPostfixUpdate,
const Expr *X, const Expr *V, const Expr *E,
const Expr *UE, bool IsXLHSInRHSPart,
SourceLocation Loc) {
switch (Kind) {
case OMPC_read:
- emitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
+ emitOMPAtomicReadExpr(CGF, AO, X, V, Loc);
break;
case OMPC_write:
- emitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
+ emitOMPAtomicWriteExpr(CGF, AO, X, E, Loc);
break;
case OMPC_unknown:
case OMPC_update:
- emitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
+ emitOMPAtomicUpdateExpr(CGF, AO, X, E, UE, IsXLHSInRHSPart, Loc);
break;
case OMPC_capture:
- emitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
+ emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE,
IsXLHSInRHSPart, Loc);
break;
case OMPC_if:
@@ -4277,12 +5251,17 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
case OMPC_collapse:
case OMPC_default:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_shared:
case OMPC_linear:
case OMPC_aligned:
case OMPC_copyin:
case OMPC_copyprivate:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_proc_bind:
case OMPC_schedule:
case OMPC_ordered:
@@ -4308,6 +5287,7 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -4317,38 +5297,76 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_order:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ default:
llvm_unreachable("Clause is not allowed in 'omp atomic'.");
}
}
void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
- bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>();
+ llvm::AtomicOrdering AO = llvm::AtomicOrdering::Monotonic;
+ bool MemOrderingSpecified = false;
+ if (S.getSingleClause<OMPSeqCstClause>()) {
+ AO = llvm::AtomicOrdering::SequentiallyConsistent;
+ MemOrderingSpecified = true;
+ } else if (S.getSingleClause<OMPAcqRelClause>()) {
+ AO = llvm::AtomicOrdering::AcquireRelease;
+ MemOrderingSpecified = true;
+ } else if (S.getSingleClause<OMPAcquireClause>()) {
+ AO = llvm::AtomicOrdering::Acquire;
+ MemOrderingSpecified = true;
+ } else if (S.getSingleClause<OMPReleaseClause>()) {
+ AO = llvm::AtomicOrdering::Release;
+ MemOrderingSpecified = true;
+ } else if (S.getSingleClause<OMPRelaxedClause>()) {
+ AO = llvm::AtomicOrdering::Monotonic;
+ MemOrderingSpecified = true;
+ }
OpenMPClauseKind Kind = OMPC_unknown;
for (const OMPClause *C : S.clauses()) {
- // Find first clause (skip seq_cst clause, if it is first).
- if (C->getClauseKind() != OMPC_seq_cst) {
+ // Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause,
+ // if it is first).
+ if (C->getClauseKind() != OMPC_seq_cst &&
+ C->getClauseKind() != OMPC_acq_rel &&
+ C->getClauseKind() != OMPC_acquire &&
+ C->getClauseKind() != OMPC_release &&
+ C->getClauseKind() != OMPC_relaxed) {
Kind = C->getClauseKind();
break;
}
}
-
- const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers();
- if (const auto *FE = dyn_cast<FullExpr>(CS))
- enterFullExpression(FE);
- // Processing for statements under 'atomic capture'.
- if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
- for (const Stmt *C : Compound->body()) {
- if (const auto *FE = dyn_cast<FullExpr>(C))
- enterFullExpression(FE);
+ if (!MemOrderingSpecified) {
+ llvm::AtomicOrdering DefaultOrder =
+ CGM.getOpenMPRuntime().getDefaultMemoryOrdering();
+ if (DefaultOrder == llvm::AtomicOrdering::Monotonic ||
+ DefaultOrder == llvm::AtomicOrdering::SequentiallyConsistent ||
+ (DefaultOrder == llvm::AtomicOrdering::AcquireRelease &&
+ Kind == OMPC_capture)) {
+ AO = DefaultOrder;
+ } else if (DefaultOrder == llvm::AtomicOrdering::AcquireRelease) {
+ if (Kind == OMPC_unknown || Kind == OMPC_update || Kind == OMPC_write) {
+ AO = llvm::AtomicOrdering::Release;
+ } else if (Kind == OMPC_read) {
+ assert(Kind == OMPC_read && "Unexpected atomic kind.");
+ AO = llvm::AtomicOrdering::Acquire;
+ }
}
}
- auto &&CodeGen = [&S, Kind, IsSeqCst, CS](CodeGenFunction &CGF,
+ const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers();
+
+ auto &&CodeGen = [&S, Kind, AO, CS](CodeGenFunction &CGF,
PrePostActionTy &) {
CGF.EmitStopPoint(CS);
- emitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
- S.getV(), S.getExpr(), S.getUpdateExpr(),
- S.isXLHSInRHSPart(), S.getBeginLoc());
+ emitOMPAtomicExpr(CGF, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(),
+ S.getExpr(), S.getUpdateExpr(), S.isXLHSInRHSPart(),
+ S.getBeginLoc());
};
OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
@@ -4370,6 +5388,8 @@ static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
return;
}
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S);
llvm::Function *Fn = nullptr;
llvm::Constant *FnID = nullptr;
@@ -4384,9 +5404,10 @@ static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
}
// Check if we have any device clause associated with the directive.
- const Expr *Device = nullptr;
+ llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device(
+ nullptr, OMPC_DEVICE_unknown);
if (auto *C = S.getSingleClause<OMPDeviceClause>())
- Device = C->getDevice();
+ Device.setPointerAndInt(C->getDevice(), C->getModifier());
// Check if we have an if clause whose conditional always evaluates to false
// or if we do not have any targets specified. If so the target region is not
@@ -4856,7 +5877,8 @@ void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
break;
}
}
- if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) {
+ if (CGM.getLangOpts().OpenMPIRBuilder) {
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
// TODO: This check is necessary as we only generate `omp parallel` through
// the OpenMPIRBuilder for now.
if (S.getCancelRegion() == OMPD_parallel) {
@@ -4865,7 +5887,7 @@ void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
IfCondition = EmitScalarExpr(IfCond,
/*IgnoreResultAssign=*/true);
return Builder.restoreIP(
- OMPBuilder->CreateCancel(Builder, IfCondition, S.getCancelRegion()));
+ OMPBuilder.CreateCancel(Builder, IfCondition, S.getCancelRegion()));
}
}
@@ -4876,7 +5898,8 @@ void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
CodeGenFunction::JumpDest
CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
if (Kind == OMPD_parallel || Kind == OMPD_task ||
- Kind == OMPD_target_parallel)
+ Kind == OMPD_target_parallel || Kind == OMPD_taskloop ||
+ Kind == OMPD_master_taskloop || Kind == OMPD_parallel_master_taskloop)
return ReturnBlock;
assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for ||
@@ -4888,9 +5911,8 @@ CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
}
void CodeGenFunction::EmitOMPUseDevicePtrClause(
- const OMPClause &NC, OMPPrivateScope &PrivateScope,
+ const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
- const auto &C = cast<OMPUseDevicePtrClause>(NC);
auto OrigVarIt = C.varlist_begin();
auto InitIt = C.inits().begin();
for (const Expr *PvtVarIt : C.private_copies()) {
@@ -4951,6 +5973,60 @@ void CodeGenFunction::EmitOMPUseDevicePtrClause(
}
}
+static const VarDecl *getBaseDecl(const Expr *Ref) {
+ const Expr *Base = Ref->IgnoreParenImpCasts();
+ while (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Base))
+ Base = OASE->getBase()->IgnoreParenImpCasts();
+ while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = ASE->getBase()->IgnoreParenImpCasts();
+ return cast<VarDecl>(cast<DeclRefExpr>(Base)->getDecl());
+}
+
+void CodeGenFunction::EmitOMPUseDeviceAddrClause(
+ const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
+ const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
+ llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
+ for (const Expr *Ref : C.varlists()) {
+ const VarDecl *OrigVD = getBaseDecl(Ref);
+ if (!Processed.insert(OrigVD).second)
+ continue;
+ // In order to identify the right initializer we need to match the
+ // declaration used by the mapping logic. In some cases we may get
+ // OMPCapturedExprDecl that refers to the original declaration.
+ const ValueDecl *MatchingVD = OrigVD;
+ if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
+ // OMPCapturedExprDecl are used to privative fields of the current
+ // structure.
+ const auto *ME = cast<MemberExpr>(OED->getInit());
+ assert(isa<CXXThisExpr>(ME->getBase()) &&
+ "Base should be the current struct!");
+ MatchingVD = ME->getMemberDecl();
+ }
+
+ // If we don't have information about the current list item, move on to
+ // the next one.
+ auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD);
+ if (InitAddrIt == CaptureDeviceAddrMap.end())
+ continue;
+
+ Address PrivAddr = InitAddrIt->getSecond();
+ // For declrefs and variable length array need to load the pointer for
+ // correct mapping, since the pointer to the data was passed to the runtime.
+ if (isa<DeclRefExpr>(Ref->IgnoreParenImpCasts()) ||
+ MatchingVD->getType()->isArrayType())
+ PrivAddr =
+ EmitLoadOfPointer(PrivAddr, getContext()
+ .getPointerType(OrigVD->getType())
+ ->castAs<PointerType>());
+ llvm::Type *RealTy =
+ ConvertTypeForMem(OrigVD->getType().getNonReferenceType())
+ ->getPointerTo();
+ PrivAddr = Builder.CreatePointerBitCastOrAddrSpaceCast(PrivAddr, RealTy);
+
+ (void)PrivateScope.addPrivate(OrigVD, [PrivAddr]() { return PrivAddr; });
+ }
+}
+
// Generate the instructions for '#pragma omp target data' directive.
void CodeGenFunction::EmitOMPTargetDataDirective(
const OMPTargetDataDirective &S) {
@@ -4995,9 +6071,13 @@ void CodeGenFunction::EmitOMPTargetDataDirective(
for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>())
CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope,
Info.CaptureDeviceAddrMap);
+ for (const auto *C : S.getClausesOfKind<OMPUseDeviceAddrClause>())
+ CGF.EmitOMPUseDeviceAddrClause(*C, PrivateScope,
+ Info.CaptureDeviceAddrMap);
(void)PrivateScope.Privatize();
RCG(CGF);
} else {
+ OMPLexicalScope Scope(CGF, S, OMPD_unknown);
RCG(CGF);
}
};
@@ -5222,7 +6302,11 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
assert(isOpenMPTaskLoopDirective(S.getDirectiveKind()));
// Emit outlined function for task construct.
const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop);
- Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
+ Address CapturedStruct = Address::invalid();
+ {
+ OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false);
+ CapturedStruct = GenerateCapturedStmtArgument(*CS);
+ }
QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
const Expr *IfCond = nullptr;
for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
@@ -5322,8 +6406,8 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
CGF.EmitOMPInnerLoop(
S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
[&S](CodeGenFunction &CGF) {
- CGF.EmitOMPLoopBody(S, CodeGenFunction::JumpDest());
- CGF.EmitStopPoint(&S);
+ emitOMPLoopBodyWithStopPoint(CGF, S,
+ CodeGenFunction::JumpDest());
},
[](CodeGenFunction &) {});
});
@@ -5376,11 +6460,15 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
}
void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
EmitOMPTaskLoopBasedDirective(S);
}
void CodeGenFunction::EmitOMPTaskLoopSimdDirective(
const OMPTaskLoopSimdDirective &S) {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
OMPLexicalScope Scope(*this, S);
EmitOMPTaskLoopBasedDirective(S);
}
@@ -5391,6 +6479,8 @@ void CodeGenFunction::EmitOMPMasterTaskLoopDirective(
Action.Enter(CGF);
EmitOMPTaskLoopBasedDirective(S);
};
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
OMPLexicalScope Scope(*this, S, llvm::None, /*EmitPreInitStmt=*/false);
CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc());
}
@@ -5401,6 +6491,8 @@ void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective(
Action.Enter(CGF);
EmitOMPTaskLoopBasedDirective(S);
};
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
OMPLexicalScope Scope(*this, S);
CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc());
}
@@ -5413,10 +6505,12 @@ void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective(
Action.Enter(CGF);
CGF.EmitOMPTaskLoopBasedDirective(S);
};
- OMPLexicalScope Scope(CGF, S, llvm::None, /*EmitPreInitStmt=*/false);
+ OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false);
CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen,
S.getBeginLoc());
};
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop, CodeGen,
emitEmptyBoundParameters);
}
@@ -5433,6 +6527,8 @@ void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective(
CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen,
S.getBeginLoc());
};
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop_simd, CodeGen,
emitEmptyBoundParameters);
}
@@ -5461,19 +6557,43 @@ void CodeGenFunction::EmitOMPTargetUpdateDirective(
void CodeGenFunction::EmitSimpleOMPExecutableDirective(
const OMPExecutableDirective &D) {
+ if (const auto *SD = dyn_cast<OMPScanDirective>(&D)) {
+ EmitOMPScanDirective(*SD);
+ return;
+ }
if (!D.hasAssociatedStmt() || !D.getAssociatedStmt())
return;
auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ OMPPrivateScope GlobalsScope(CGF);
+ if (isOpenMPTaskingDirective(D.getDirectiveKind())) {
+ // Capture global firstprivates to avoid crash.
+ for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ const auto *DRE = cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ const auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ if (!VD || VD->hasLocalStorage())
+ continue;
+ if (!CGF.LocalDeclMap.count(VD)) {
+ LValue GlobLVal = CGF.EmitLValue(Ref);
+ GlobalsScope.addPrivate(
+ VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); });
+ }
+ }
+ }
+ }
if (isOpenMPSimdDirective(D.getDirectiveKind())) {
+ (void)GlobalsScope.Privatize();
+ ParentLoopDirectiveForScanRegion ScanRegion(CGF, D);
emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action);
} else {
- OMPPrivateScope LoopGlobals(CGF);
if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) {
for (const Expr *E : LD->counters()) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) {
LValue GlobLVal = CGF.EmitLValue(E);
- LoopGlobals.addPrivate(
+ GlobalsScope.addPrivate(
VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); });
}
if (isa<OMPCapturedExprDecl>(VD)) {
@@ -5497,14 +6617,20 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
}
}
}
- LoopGlobals.Privatize();
+ (void)GlobalsScope.Privatize();
CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt());
}
};
- OMPSimdLexicalScope Scope(*this, D);
- CGM.getOpenMPRuntime().emitInlinedDirective(
- *this,
- isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd
- : D.getDirectiveKind(),
- CodeGen);
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, D);
+ OMPSimdLexicalScope Scope(*this, D);
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this,
+ isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd
+ : D.getDirectiveKind(),
+ CodeGen);
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, D);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
index e97f7e41499d..65b3b0c5f53d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
@@ -363,8 +363,10 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::FunctionCallee Callee,
: FPT->getReturnType();
ReturnValueSlot Slot;
if (!ResultType->isVoidType() &&
- CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect)
- Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
+ (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect ||
+ hasAggregateEvaluationKind(ResultType)))
+ Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified(),
+ /*IsUnused=*/false, /*IsExternallyDestructed=*/true);
// Now emit our call.
llvm::CallBase *CallOrInvoke;
@@ -617,29 +619,178 @@ void CodeGenVTables::EmitThunks(GlobalDecl GD) {
maybeEmitThunk(GD, Thunk, /*ForVTable=*/false);
}
-void CodeGenVTables::addVTableComponent(
- ConstantArrayBuilder &builder, const VTableLayout &layout,
- unsigned idx, llvm::Constant *rtti, unsigned &nextVTableThunkIndex) {
- auto &component = layout.vtable_components()[idx];
+void CodeGenVTables::addRelativeComponent(ConstantArrayBuilder &builder,
+ llvm::Constant *component,
+ unsigned vtableAddressPoint,
+ bool vtableHasLocalLinkage,
+ bool isCompleteDtor) const {
+ // No need to get the offset of a nullptr.
+ if (component->isNullValue())
+ return builder.add(llvm::ConstantInt::get(CGM.Int32Ty, 0));
+
+ auto *globalVal =
+ cast<llvm::GlobalValue>(component->stripPointerCastsAndAliases());
+ llvm::Module &module = CGM.getModule();
+
+ // We don't want to copy the linkage of the vtable exactly because we still
+ // want the stub/proxy to be emitted for properly calculating the offset.
+ // Examples where there would be no symbol emitted are available_externally
+ // and private linkages.
+ auto stubLinkage = vtableHasLocalLinkage ? llvm::GlobalValue::InternalLinkage
+ : llvm::GlobalValue::ExternalLinkage;
+
+ llvm::Constant *target;
+ if (auto *func = dyn_cast<llvm::Function>(globalVal)) {
+ target = getOrCreateRelativeStub(func, stubLinkage, isCompleteDtor);
+ } else {
+ llvm::SmallString<16> rttiProxyName(globalVal->getName());
+ rttiProxyName.append(".rtti_proxy");
+
+ // The RTTI component may not always be emitted in the same linkage unit as
+ // the vtable. As a general case, we can make a dso_local proxy to the RTTI
+ // that points to the actual RTTI struct somewhere. This will result in a
+ // GOTPCREL relocation when taking the relative offset to the proxy.
+ llvm::GlobalVariable *proxy = module.getNamedGlobal(rttiProxyName);
+ if (!proxy) {
+ proxy = new llvm::GlobalVariable(module, globalVal->getType(),
+ /*isConstant=*/true, stubLinkage,
+ globalVal, rttiProxyName);
+ proxy->setDSOLocal(true);
+ proxy->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ if (!proxy->hasLocalLinkage()) {
+ proxy->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ proxy->setComdat(module.getOrInsertComdat(rttiProxyName));
+ }
+ }
+ target = proxy;
+ }
+
+ builder.addRelativeOffsetToPosition(CGM.Int32Ty, target,
+ /*position=*/vtableAddressPoint);
+}
+
+llvm::Function *CodeGenVTables::getOrCreateRelativeStub(
+ llvm::Function *func, llvm::GlobalValue::LinkageTypes stubLinkage,
+ bool isCompleteDtor) const {
+ // A complete object destructor can later be substituted in the vtable for an
+ // appropriate base object destructor when optimizations are enabled. This can
+ // happen for child classes that don't have their own destructor. In the case
+ // where a parent virtual destructor is not guaranteed to be in the same
+ // linkage unit as the child vtable, it's possible for an external reference
+ // for this destructor to be substituted into the child vtable, preventing it
+ // from being in rodata. If this function is a complete virtual destructor, we
+ // can just force a stub to be emitted for it.
+ if (func->isDSOLocal() && !isCompleteDtor)
+ return func;
+
+ llvm::SmallString<16> stubName(func->getName());
+ stubName.append(".stub");
+
+ // Instead of taking the offset between the vtable and virtual function
+ // directly, we emit a dso_local stub that just contains a tail call to the
+ // original virtual function and take the offset between that and the
+ // vtable. We do this because there are some cases where the original
+ // function that would've been inserted into the vtable is not dso_local
+ // which may require some kind of dynamic relocation which prevents the
+ // vtable from being readonly. On x86_64, taking the offset between the
+ // function and the vtable gets lowered to the offset between the PLT entry
+ // for the function and the vtable which gives us a PLT32 reloc. On AArch64,
+ // right now only CALL26 and JUMP26 instructions generate PLT relocations,
+ // so we manifest them with stubs that are just jumps to the original
+ // function.
+ auto &module = CGM.getModule();
+ llvm::Function *stub = module.getFunction(stubName);
+ if (stub) {
+ assert(stub->isDSOLocal() &&
+ "The previous definition of this stub should've been dso_local.");
+ return stub;
+ }
+
+ stub = llvm::Function::Create(func->getFunctionType(), stubLinkage, stubName,
+ module);
+
+ // Propogate function attributes.
+ stub->setAttributes(func->getAttributes());
+
+ stub->setDSOLocal(true);
+ stub->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ if (!stub->hasLocalLinkage()) {
+ stub->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ stub->setComdat(module.getOrInsertComdat(stubName));
+ }
+
+ // Fill the stub with a tail call that will be optimized.
+ llvm::BasicBlock *block =
+ llvm::BasicBlock::Create(module.getContext(), "entry", stub);
+ llvm::IRBuilder<> block_builder(block);
+ llvm::SmallVector<llvm::Value *, 8> args;
+ for (auto &arg : stub->args())
+ args.push_back(&arg);
+ llvm::CallInst *call = block_builder.CreateCall(func, args);
+ call->setAttributes(func->getAttributes());
+ call->setTailCall();
+ if (call->getType()->isVoidTy())
+ block_builder.CreateRetVoid();
+ else
+ block_builder.CreateRet(call);
- auto addOffsetConstant = [&](CharUnits offset) {
- builder.add(llvm::ConstantExpr::getIntToPtr(
- llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()),
- CGM.Int8PtrTy));
- };
+ return stub;
+}
+
+bool CodeGenVTables::useRelativeLayout() const {
+ return CGM.getTarget().getCXXABI().isItaniumFamily() &&
+ CGM.getItaniumVTableContext().isRelativeLayout();
+}
+
+llvm::Type *CodeGenVTables::getVTableComponentType() const {
+ if (useRelativeLayout())
+ return CGM.Int32Ty;
+ return CGM.Int8PtrTy;
+}
+
+static void AddPointerLayoutOffset(const CodeGenModule &CGM,
+ ConstantArrayBuilder &builder,
+ CharUnits offset) {
+ builder.add(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()),
+ CGM.Int8PtrTy));
+}
+
+static void AddRelativeLayoutOffset(const CodeGenModule &CGM,
+ ConstantArrayBuilder &builder,
+ CharUnits offset) {
+ builder.add(llvm::ConstantInt::get(CGM.Int32Ty, offset.getQuantity()));
+}
+
+void CodeGenVTables::addVTableComponent(ConstantArrayBuilder &builder,
+ const VTableLayout &layout,
+ unsigned componentIndex,
+ llvm::Constant *rtti,
+ unsigned &nextVTableThunkIndex,
+ unsigned vtableAddressPoint,
+ bool vtableHasLocalLinkage) {
+ auto &component = layout.vtable_components()[componentIndex];
+
+ auto addOffsetConstant =
+ useRelativeLayout() ? AddRelativeLayoutOffset : AddPointerLayoutOffset;
switch (component.getKind()) {
case VTableComponent::CK_VCallOffset:
- return addOffsetConstant(component.getVCallOffset());
+ return addOffsetConstant(CGM, builder, component.getVCallOffset());
case VTableComponent::CK_VBaseOffset:
- return addOffsetConstant(component.getVBaseOffset());
+ return addOffsetConstant(CGM, builder, component.getVBaseOffset());
case VTableComponent::CK_OffsetToTop:
- return addOffsetConstant(component.getOffsetToTop());
+ return addOffsetConstant(CGM, builder, component.getOffsetToTop());
case VTableComponent::CK_RTTI:
- return builder.add(llvm::ConstantExpr::getBitCast(rtti, CGM.Int8PtrTy));
+ if (useRelativeLayout())
+ return addRelativeComponent(builder, rtti, vtableAddressPoint,
+ vtableHasLocalLinkage,
+ /*isCompleteDtor=*/false);
+ else
+ return builder.add(llvm::ConstantExpr::getBitCast(rtti, CGM.Int8PtrTy));
case VTableComponent::CK_FunctionPointer:
case VTableComponent::CK_CompleteDtorPointer:
@@ -673,11 +824,21 @@ void CodeGenVTables::addVTableComponent(
? MD->hasAttr<CUDADeviceAttr>()
: (MD->hasAttr<CUDAHostAttr>() || !MD->hasAttr<CUDADeviceAttr>());
if (!CanEmitMethod)
- return builder.addNullPointer(CGM.Int8PtrTy);
+ return builder.add(llvm::ConstantExpr::getNullValue(CGM.Int8PtrTy));
// Method is acceptable, continue processing as usual.
}
auto getSpecialVirtualFn = [&](StringRef name) -> llvm::Constant * {
+ // FIXME(PR43094): When merging comdat groups, lld can select a local
+ // symbol as the signature symbol even though it cannot be accessed
+ // outside that symbol's TU. The relative vtables ABI would make
+ // __cxa_pure_virtual and __cxa_deleted_virtual local symbols, and
+ // depending on link order, the comdat groups could resolve to the one
+ // with the local symbol. As a temporary solution, fill these components
+ // with zero. We shouldn't be calling these in the first place anyway.
+ if (useRelativeLayout())
+ return llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
+
// For NVPTX devices in OpenMP emit special functon as null pointers,
// otherwise linking ends up with unresolved references.
if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPIsDevice &&
@@ -698,19 +859,20 @@ void CodeGenVTables::addVTableComponent(
if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
if (!PureVirtualFn)
PureVirtualFn =
- getSpecialVirtualFn(CGM.getCXXABI().GetPureVirtualCallName());
+ getSpecialVirtualFn(CGM.getCXXABI().GetPureVirtualCallName());
fnPtr = PureVirtualFn;
// Deleted virtual member functions.
} else if (cast<CXXMethodDecl>(GD.getDecl())->isDeleted()) {
if (!DeletedVirtualFn)
DeletedVirtualFn =
- getSpecialVirtualFn(CGM.getCXXABI().GetDeletedVirtualCallName());
+ getSpecialVirtualFn(CGM.getCXXABI().GetDeletedVirtualCallName());
fnPtr = DeletedVirtualFn;
// Thunks.
} else if (nextVTableThunkIndex < layout.vtable_thunks().size() &&
- layout.vtable_thunks()[nextVTableThunkIndex].first == idx) {
+ layout.vtable_thunks()[nextVTableThunkIndex].first ==
+ componentIndex) {
auto &thunkInfo = layout.vtable_thunks()[nextVTableThunkIndex].second;
nextVTableThunkIndex++;
@@ -722,13 +884,19 @@ void CodeGenVTables::addVTableComponent(
fnPtr = CGM.GetAddrOfFunction(GD, fnTy, /*ForVTable=*/true);
}
- fnPtr = llvm::ConstantExpr::getBitCast(fnPtr, CGM.Int8PtrTy);
- builder.add(fnPtr);
- return;
+ if (useRelativeLayout()) {
+ return addRelativeComponent(
+ builder, fnPtr, vtableAddressPoint, vtableHasLocalLinkage,
+ component.getKind() == VTableComponent::CK_CompleteDtorPointer);
+ } else
+ return builder.add(llvm::ConstantExpr::getBitCast(fnPtr, CGM.Int8PtrTy));
}
case VTableComponent::CK_UnusedFunctionPointer:
- return builder.addNullPointer(CGM.Int8PtrTy);
+ if (useRelativeLayout())
+ return builder.add(llvm::ConstantExpr::getNullValue(CGM.Int32Ty));
+ else
+ return builder.addNullPointer(CGM.Int8PtrTy);
}
llvm_unreachable("Unexpected vtable component kind");
@@ -736,34 +904,41 @@ void CodeGenVTables::addVTableComponent(
llvm::Type *CodeGenVTables::getVTableType(const VTableLayout &layout) {
SmallVector<llvm::Type *, 4> tys;
- for (unsigned i = 0, e = layout.getNumVTables(); i != e; ++i) {
- tys.push_back(llvm::ArrayType::get(CGM.Int8PtrTy, layout.getVTableSize(i)));
- }
+ llvm::Type *componentType = getVTableComponentType();
+ for (unsigned i = 0, e = layout.getNumVTables(); i != e; ++i)
+ tys.push_back(llvm::ArrayType::get(componentType, layout.getVTableSize(i)));
return llvm::StructType::get(CGM.getLLVMContext(), tys);
}
void CodeGenVTables::createVTableInitializer(ConstantStructBuilder &builder,
const VTableLayout &layout,
- llvm::Constant *rtti) {
+ llvm::Constant *rtti,
+ bool vtableHasLocalLinkage) {
+ llvm::Type *componentType = getVTableComponentType();
+
+ const auto &addressPoints = layout.getAddressPointIndices();
unsigned nextVTableThunkIndex = 0;
- for (unsigned i = 0, e = layout.getNumVTables(); i != e; ++i) {
- auto vtableElem = builder.beginArray(CGM.Int8PtrTy);
- size_t thisIndex = layout.getVTableOffset(i);
- size_t nextIndex = thisIndex + layout.getVTableSize(i);
- for (unsigned i = thisIndex; i != nextIndex; ++i) {
- addVTableComponent(vtableElem, layout, i, rtti, nextVTableThunkIndex);
+ for (unsigned vtableIndex = 0, endIndex = layout.getNumVTables();
+ vtableIndex != endIndex; ++vtableIndex) {
+ auto vtableElem = builder.beginArray(componentType);
+
+ size_t vtableStart = layout.getVTableOffset(vtableIndex);
+ size_t vtableEnd = vtableStart + layout.getVTableSize(vtableIndex);
+ for (size_t componentIndex = vtableStart; componentIndex < vtableEnd;
+ ++componentIndex) {
+ addVTableComponent(vtableElem, layout, componentIndex, rtti,
+ nextVTableThunkIndex, addressPoints[vtableIndex],
+ vtableHasLocalLinkage);
}
vtableElem.finishAndAddTo(builder);
}
}
-llvm::GlobalVariable *
-CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
- const BaseSubobject &Base,
- bool BaseIsVirtual,
- llvm::GlobalVariable::LinkageTypes Linkage,
- VTableAddressPointsMapTy& AddressPoints) {
+llvm::GlobalVariable *CodeGenVTables::GenerateConstructionVTable(
+ const CXXRecordDecl *RD, const BaseSubobject &Base, bool BaseIsVirtual,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ VTableAddressPointsMapTy &AddressPoints) {
if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
DI->completeClassData(Base.getBase());
@@ -780,7 +955,15 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
cast<ItaniumMangleContext>(CGM.getCXXABI().getMangleContext())
.mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(),
Base.getBase(), Out);
- StringRef Name = OutName.str();
+ SmallString<256> Name(OutName);
+
+ bool UsingRelativeLayout = getItaniumVTableContext().isRelativeLayout();
+ bool VTableAliasExists =
+ UsingRelativeLayout && CGM.getModule().getNamedAlias(Name);
+ if (VTableAliasExists) {
+ // We previously made the vtable hidden and changed its name.
+ Name.append(".local");
+ }
llvm::Type *VTType = getVTableType(*VTLayout);
@@ -807,7 +990,8 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
// Create and set the initializer.
ConstantInitBuilder builder(CGM);
auto components = builder.beginStruct();
- createVTableInitializer(components, *VTLayout, RTTI);
+ createVTableInitializer(components, *VTLayout, RTTI,
+ VTable->hasLocalLinkage());
components.finishAndSetAsInitializer(VTable);
// Set properties only after the initializer has been set to ensure that the
@@ -817,9 +1001,68 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
CGM.EmitVTableTypeMetadata(RD, VTable, *VTLayout.get());
+ if (UsingRelativeLayout && !VTable->isDSOLocal())
+ GenerateRelativeVTableAlias(VTable, OutName);
+
return VTable;
}
+// If the VTable is not dso_local, then we will not be able to indicate that
+// the VTable does not need a relocation and move into rodata. A frequent
+// time this can occur is for classes that should be made public from a DSO
+// (like in libc++). For cases like these, we can make the vtable hidden or
+// private and create a public alias with the same visibility and linkage as
+// the original vtable type.
+void CodeGenVTables::GenerateRelativeVTableAlias(llvm::GlobalVariable *VTable,
+ llvm::StringRef AliasNameRef) {
+ assert(getItaniumVTableContext().isRelativeLayout() &&
+ "Can only use this if the relative vtable ABI is used");
+ assert(!VTable->isDSOLocal() && "This should be called only if the vtable is "
+ "not guaranteed to be dso_local");
+
+ // If the vtable is available_externally, we shouldn't (or need to) generate
+ // an alias for it in the first place since the vtable won't actually by
+ // emitted in this compilation unit.
+ if (VTable->hasAvailableExternallyLinkage())
+ return;
+
+ // Create a new string in the event the alias is already the name of the
+ // vtable. Using the reference directly could lead to use of an inititialized
+ // value in the module's StringMap.
+ llvm::SmallString<256> AliasName(AliasNameRef);
+ VTable->setName(AliasName + ".local");
+
+ auto Linkage = VTable->getLinkage();
+ assert(llvm::GlobalAlias::isValidLinkage(Linkage) &&
+ "Invalid vtable alias linkage");
+
+ llvm::GlobalAlias *VTableAlias = CGM.getModule().getNamedAlias(AliasName);
+ if (!VTableAlias) {
+ VTableAlias = llvm::GlobalAlias::create(VTable->getValueType(),
+ VTable->getAddressSpace(), Linkage,
+ AliasName, &CGM.getModule());
+ } else {
+ assert(VTableAlias->getValueType() == VTable->getValueType());
+ assert(VTableAlias->getLinkage() == Linkage);
+ }
+ VTableAlias->setVisibility(VTable->getVisibility());
+ VTableAlias->setUnnamedAddr(VTable->getUnnamedAddr());
+
+ // Both of these imply dso_local for the vtable.
+ if (!VTable->hasComdat()) {
+ // If this is in a comdat, then we shouldn't make the linkage private due to
+ // an issue in lld where private symbols can be used as the key symbol when
+ // choosing the prevelant group. This leads to "relocation refers to a
+ // symbol in a discarded section".
+ VTable->setLinkage(llvm::GlobalValue::PrivateLinkage);
+ } else {
+ // We should at least make this hidden since we don't want to expose it.
+ VTable->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ }
+
+ VTableAlias->setAliasee(VTable);
+}
+
static bool shouldEmitAvailableExternallyVTable(const CodeGenModule &CGM,
const CXXRecordDecl *RD) {
return CGM.getCodeGenOpts().OptimizationLevel > 0 &&
@@ -1012,6 +1255,26 @@ void CodeGenModule::EmitDeferredVTables() {
DeferredVTables.clear();
}
+bool CodeGenModule::HasLTOVisibilityPublicStd(const CXXRecordDecl *RD) {
+ if (!getCodeGenOpts().LTOVisibilityPublicStd)
+ return false;
+
+ const DeclContext *DC = RD;
+ while (1) {
+ auto *D = cast<Decl>(DC);
+ DC = DC->getParent();
+ if (isa<TranslationUnitDecl>(DC->getRedeclContext())) {
+ if (auto *ND = dyn_cast<NamespaceDecl>(D))
+ if (const IdentifierInfo *II = ND->getIdentifier())
+ if (II->isStr("std") || II->isStr("stdext"))
+ return true;
+ break;
+ }
+ }
+
+ return false;
+}
+
bool CodeGenModule::HasHiddenLTOVisibility(const CXXRecordDecl *RD) {
LinkageInfo LV = RD->getLinkageAndVisibility();
if (!isExternallyVisible(LV.getLinkage()))
@@ -1028,22 +1291,7 @@ bool CodeGenModule::HasHiddenLTOVisibility(const CXXRecordDecl *RD) {
return false;
}
- if (getCodeGenOpts().LTOVisibilityPublicStd) {
- const DeclContext *DC = RD;
- while (1) {
- auto *D = cast<Decl>(DC);
- DC = DC->getParent();
- if (isa<TranslationUnitDecl>(DC->getRedeclContext())) {
- if (auto *ND = dyn_cast<NamespaceDecl>(D))
- if (const IdentifierInfo *II = ND->getIdentifier())
- if (II->isStr("std") || II->isStr("stdext"))
- return false;
- break;
- }
- }
- }
-
- return true;
+ return !HasLTOVisibilityPublicStd(RD);
}
llvm::GlobalObject::VCallVisibility
@@ -1132,9 +1380,10 @@ void CodeGenModule::EmitVTableTypeMetadata(const CXXRecordDecl *RD,
}
}
- if (getCodeGenOpts().VirtualFunctionElimination) {
+ if (getCodeGenOpts().VirtualFunctionElimination ||
+ getCodeGenOpts().WholeProgramVTables) {
llvm::GlobalObject::VCallVisibility TypeVis = GetVCallVisibilityLevel(RD);
if (TypeVis != llvm::GlobalObject::VCallVisibilityPublic)
- VTable->addVCallVisibilityMetadata(TypeVis);
+ VTable->setVCallVisibilityMetadata(TypeVis);
}
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.h b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.h
index a47841bfc6c3..bdfc075ee305 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.h
@@ -62,16 +62,39 @@ class CodeGenVTables {
bool ForVTable);
void addVTableComponent(ConstantArrayBuilder &builder,
- const VTableLayout &layout, unsigned idx,
- llvm::Constant *rtti,
- unsigned &nextVTableThunkIndex);
+ const VTableLayout &layout, unsigned componentIndex,
+ llvm::Constant *rtti, unsigned &nextVTableThunkIndex,
+ unsigned vtableAddressPoint,
+ bool vtableHasLocalLinkage);
+
+ /// Add a 32-bit offset to a component relative to the vtable when using the
+ /// relative vtables ABI. The array builder points to the start of the vtable.
+ void addRelativeComponent(ConstantArrayBuilder &builder,
+ llvm::Constant *component,
+ unsigned vtableAddressPoint,
+ bool vtableHasLocalLinkage,
+ bool isCompleteDtor) const;
+
+ /// Create a dso_local stub that will be used for a relative reference in the
+ /// relative vtable layout. This stub will just be a tail call to the original
+ /// function and propagate any function attributes from the original. If the
+ /// original function is already dso_local, the original is returned instead
+ /// and a stub is not created.
+ llvm::Function *
+ getOrCreateRelativeStub(llvm::Function *func,
+ llvm::GlobalValue::LinkageTypes stubLinkage,
+ bool isCompleteDtor) const;
+
+ bool useRelativeLayout() const;
+
+ llvm::Type *getVTableComponentType() const;
public:
/// Add vtable components for the given vtable layout to the given
/// global initializer.
void createVTableInitializer(ConstantStructBuilder &builder,
- const VTableLayout &layout,
- llvm::Constant *rtti);
+ const VTableLayout &layout, llvm::Constant *rtti,
+ bool vtableHasLocalLinkage);
CodeGenVTables(CodeGenModule &CGM);
@@ -124,6 +147,13 @@ public:
/// arrays of pointers, with one struct element for each vtable in the vtable
/// group.
llvm::Type *getVTableType(const VTableLayout &layout);
+
+ /// Generate a public facing alias for the vtable and make the vtable either
+ /// hidden or private. The alias will have the original linkage and visibility
+ /// of the vtable. This is used for cases under the relative vtables ABI
+ /// when a vtable may not be dso_local.
+ void GenerateRelativeVTableAlias(llvm::GlobalVariable *VTable,
+ llvm::StringRef AliasNameRef);
};
} // end namespace CodeGen
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGValue.h b/contrib/llvm-project/clang/lib/CodeGen/CGValue.h
index 9fd07bdb187d..70e6fed3f4f6 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGValue.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGValue.h
@@ -170,7 +170,8 @@ class LValue {
VectorElt, // This is a vector element l-value (V[i]), use getVector*
BitField, // This is a bitfield l-value, use getBitfield*.
ExtVectorElt, // This is an extended vector subset, use getExtVectorComp
- GlobalReg // This is a register l-value, use getGlobalReg()
+ GlobalReg, // This is a register l-value, use getGlobalReg()
+ MatrixElt // This is a matrix element, use getVector*
} LVType;
llvm::Value *V;
@@ -254,6 +255,7 @@ public:
bool isBitField() const { return LVType == BitField; }
bool isExtVectorElt() const { return LVType == ExtVectorElt; }
bool isGlobalReg() const { return LVType == GlobalReg; }
+ bool isMatrixElt() const { return LVType == MatrixElt; }
bool isVolatileQualified() const { return Quals.hasVolatile(); }
bool isRestrictQualified() const { return Quals.hasRestrict(); }
@@ -337,8 +339,26 @@ public:
Address getVectorAddress() const {
return Address(getVectorPointer(), getAlignment());
}
- llvm::Value *getVectorPointer() const { assert(isVectorElt()); return V; }
- llvm::Value *getVectorIdx() const { assert(isVectorElt()); return VectorIdx; }
+ llvm::Value *getVectorPointer() const {
+ assert(isVectorElt());
+ return V;
+ }
+ llvm::Value *getVectorIdx() const {
+ assert(isVectorElt());
+ return VectorIdx;
+ }
+
+ Address getMatrixAddress() const {
+ return Address(getMatrixPointer(), getAlignment());
+ }
+ llvm::Value *getMatrixPointer() const {
+ assert(isMatrixElt());
+ return V;
+ }
+ llvm::Value *getMatrixIdx() const {
+ assert(isMatrixElt());
+ return VectorIdx;
+ }
// extended vector elements.
Address getExtVectorAddress() const {
@@ -430,6 +450,18 @@ public:
return R;
}
+ static LValue MakeMatrixElt(Address matAddress, llvm::Value *Idx,
+ QualType type, LValueBaseInfo BaseInfo,
+ TBAAAccessInfo TBAAInfo) {
+ LValue R;
+ R.LVType = MatrixElt;
+ R.V = matAddress.getPointer();
+ R.VectorIdx = Idx;
+ R.Initialize(type, type.getQualifiers(), matAddress.getAlignment(),
+ BaseInfo, TBAAInfo);
+ return R;
+ }
+
RValue asAggregateRValue(CodeGenFunction &CGF) const {
return RValue::getAggregate(getAddress(CGF), isVolatileQualified());
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenABITypes.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenABITypes.cpp
index 6b6a116cf259..d3a16a1d5acc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenABITypes.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenABITypes.cpp
@@ -16,7 +16,9 @@
//===----------------------------------------------------------------------===//
#include "clang/CodeGen/CodeGenABITypes.h"
+#include "CGCXXABI.h"
#include "CGRecordLayout.h"
+#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Lex/HeaderSearchOptions.h"
@@ -25,6 +27,11 @@
using namespace clang;
using namespace CodeGen;
+void CodeGen::addDefaultFunctionDefinitionAttributes(CodeGenModule &CGM,
+ llvm::AttrBuilder &attrs) {
+ CGM.addDefaultFunctionDefinitionAttributes(attrs);
+}
+
const CGFunctionInfo &
CodeGen::arrangeObjCMessageSendSignature(CodeGenModule &CGM,
const ObjCMethodDecl *MD,
@@ -63,6 +70,30 @@ CodeGen::arrangeFreeFunctionCall(CodeGenModule &CGM,
info, {}, args);
}
+ImplicitCXXConstructorArgs
+CodeGen::getImplicitCXXConstructorArgs(CodeGenModule &CGM,
+ const CXXConstructorDecl *D) {
+ // We have to create a dummy CodeGenFunction here to pass to
+ // getImplicitConstructorArgs(). In some cases (base and delegating
+ // constructor calls), getImplicitConstructorArgs() can reach into the
+ // CodeGenFunction to find parameters of the calling constructor to pass on to
+ // the called constructor, but that can't happen here because we're asking for
+ // the args for a complete, non-delegating constructor call.
+ CodeGenFunction CGF(CGM, /* suppressNewContext= */ true);
+ CGCXXABI::AddedStructorArgs addedArgs =
+ CGM.getCXXABI().getImplicitConstructorArgs(CGF, D, Ctor_Complete,
+ /* ForVirtualBase= */ false,
+ /* Delegating= */ false);
+ ImplicitCXXConstructorArgs implicitArgs;
+ for (const auto &arg : addedArgs.Prefix) {
+ implicitArgs.Prefix.push_back(arg.Value);
+ }
+ for (const auto &arg : addedArgs.Suffix) {
+ implicitArgs.Suffix.push_back(arg.Value);
+ }
+ return implicitArgs;
+}
+
llvm::FunctionType *
CodeGen::convertFreeFunctionType(CodeGenModule &CGM, const FunctionDecl *FD) {
assert(FD != nullptr && "Expected a non-null function declaration!");
@@ -84,3 +115,16 @@ unsigned CodeGen::getLLVMFieldNumber(CodeGenModule &CGM,
const FieldDecl *FD) {
return CGM.getTypes().getCGRecordLayout(RD).getLLVMFieldNo(FD);
}
+
+llvm::Value *CodeGen::getCXXDestructorImplicitParam(
+ CodeGenModule &CGM, llvm::BasicBlock *InsertBlock,
+ llvm::BasicBlock::iterator InsertPoint, const CXXDestructorDecl *D,
+ CXXDtorType Type, bool ForVirtualBase, bool Delegating) {
+ CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
+ CGF.CurCodeDecl = D;
+ CGF.CurFuncDecl = D;
+ CGF.CurFn = InsertBlock->getParent();
+ CGF.Builder.SetInsertPoint(InsertBlock, InsertPoint);
+ return CGM.getCXXABI().getCXXDestructorImplicitParam(
+ CGF, D, Type, ForVirtualBase, Delegating);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
index 20ebaf3578d6..55925110708e 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
@@ -32,8 +32,8 @@
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LLVMRemarkStreamer.h"
#include "llvm/IR/Module.h"
-#include "llvm/IR/RemarkStreamer.h"
#include "llvm/IRReader/IRReader.h"
#include "llvm/Linker/Linker.h"
#include "llvm/Pass.h"
@@ -86,15 +86,15 @@ namespace clang {
const CodeGenOptions CodeGenOpts) {
handleAllErrors(
std::move(E),
- [&](const RemarkSetupFileError &E) {
+ [&](const LLVMRemarkSetupFileError &E) {
Diags.Report(diag::err_cannot_open_file)
<< CodeGenOpts.OptRecordFile << E.message();
},
- [&](const RemarkSetupPatternError &E) {
+ [&](const LLVMRemarkSetupPatternError &E) {
Diags.Report(diag::err_drv_optimization_remark_pattern)
<< E.message() << CodeGenOpts.OptRecordPasses;
},
- [&](const RemarkSetupFormatError &E) {
+ [&](const LLVMRemarkSetupFormatError &E) {
Diags.Report(diag::err_drv_optimization_remark_format)
<< CodeGenOpts.OptRecordFormat;
});
@@ -246,7 +246,7 @@ namespace clang {
for (auto &LM : LinkModules) {
if (LM.PropagateAttrs)
for (Function &F : *LM.Module)
- Gen->CGM().AddDefaultFnAttrs(F);
+ Gen->CGM().addDefaultFunctionDefinitionAttributes(F);
CurLinkModule = LM.Module.get();
@@ -309,7 +309,7 @@ namespace clang {
CodeGenOpts, this));
Expected<std::unique_ptr<llvm::ToolOutputFile>> OptRecordFileOrErr =
- setupOptimizationRemarks(
+ setupLLVMOptimizationRemarks(
Ctx, CodeGenOpts.OptRecordFile, CodeGenOpts.OptRecordPasses,
CodeGenOpts.OptRecordFormat, CodeGenOpts.DiagnosticsWithHotness,
CodeGenOpts.DiagnosticsHotnessThreshold);
@@ -633,8 +633,9 @@ const FullSourceLoc BackendConsumer::getBestLocationFromDebugLoc(
void BackendConsumer::UnsupportedDiagHandler(
const llvm::DiagnosticInfoUnsupported &D) {
- // We only support errors.
- assert(D.getSeverity() == llvm::DS_Error);
+ // We only support warnings or errors.
+ assert(D.getSeverity() == llvm::DS_Error ||
+ D.getSeverity() == llvm::DS_Warning);
StringRef Filename;
unsigned Line, Column;
@@ -652,7 +653,11 @@ void BackendConsumer::UnsupportedDiagHandler(
DiagnosticPrinterRawOStream DP(MsgStream);
D.print(DP);
}
- Diags.Report(Loc, diag::err_fe_backend_unsupported) << MsgStream.str();
+
+ auto DiagType = D.getSeverity() == llvm::DS_Error
+ ? diag::err_fe_backend_unsupported
+ : diag::warn_fe_backend_unsupported;
+ Diags.Report(Loc, DiagType) << MsgStream.str();
if (BadDebugInfo)
// If we were not able to translate the file:line:col information
@@ -994,7 +999,7 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
std::unique_ptr<BackendConsumer> Result(new BackendConsumer(
BA, CI.getDiagnostics(), CI.getHeaderSearchOpts(),
CI.getPreprocessorOpts(), CI.getCodeGenOpts(), CI.getTargetOpts(),
- CI.getLangOpts(), CI.getFrontendOpts().ShowTimers, InFile,
+ CI.getLangOpts(), CI.getFrontendOpts().ShowTimers, std::string(InFile),
std::move(LinkModules), std::move(OS), *VMContext, CoverageInfo));
BEConsumer = Result.get();
@@ -1153,7 +1158,7 @@ void CodeGenAction::ExecuteAction() {
std::make_unique<ClangDiagnosticHandler>(CodeGenOpts, &Result));
Expected<std::unique_ptr<llvm::ToolOutputFile>> OptRecordFileOrErr =
- setupOptimizationRemarks(
+ setupLLVMOptimizationRemarks(
Ctx, CodeGenOpts.OptRecordFile, CodeGenOpts.OptRecordPasses,
CodeGenOpts.OptRecordFormat, CodeGenOpts.DiagnosticsWithHotness,
CodeGenOpts.DiagnosticsHotnessThreshold);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
index 7040fe452e49..4a7c84562dee 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -32,6 +32,7 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/FPEnv.h"
@@ -64,67 +65,36 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
: CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
CGBuilderInserterTy(this)),
- SanOpts(CGM.getLangOpts().Sanitize), DebugInfo(CGM.getModuleDebugInfo()),
- PGO(cgm), ShouldEmitLifetimeMarkers(shouldEmitLifetimeMarkers(
- CGM.getCodeGenOpts(), CGM.getLangOpts())) {
+ SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()),
+ DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm),
+ ShouldEmitLifetimeMarkers(
+ shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
if (!suppressNewContext)
CGM.getCXXABI().getMangleContext().startNewFunction();
- llvm::FastMathFlags FMF;
- if (CGM.getLangOpts().FastMath)
- FMF.setFast();
- if (CGM.getLangOpts().FiniteMathOnly) {
- FMF.setNoNaNs();
- FMF.setNoInfs();
- }
- if (CGM.getCodeGenOpts().NoNaNsFPMath) {
- FMF.setNoNaNs();
- }
- if (CGM.getCodeGenOpts().NoSignedZeros) {
- FMF.setNoSignedZeros();
- }
- if (CGM.getCodeGenOpts().ReciprocalMath) {
- FMF.setAllowReciprocal();
- }
- if (CGM.getCodeGenOpts().Reassociate) {
- FMF.setAllowReassoc();
- }
- Builder.setFastMathFlags(FMF);
+ SetFastMathFlags(CurFPFeatures);
SetFPModel();
}
CodeGenFunction::~CodeGenFunction() {
assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
- // If there are any unclaimed block infos, go ahead and destroy them
- // now. This can happen if IR-gen gets clever and skips evaluating
- // something.
- if (FirstBlockInfo)
- destroyBlockInfos(FirstBlockInfo);
-
if (getLangOpts().OpenMP && CurFn)
CGM.getOpenMPRuntime().functionFinished(*this);
-}
-
-// Map the LangOption for rounding mode into
-// the corresponding enum in the IR.
-static llvm::fp::RoundingMode ToConstrainedRoundingMD(
- LangOptions::FPRoundingModeKind Kind) {
- switch (Kind) {
- case LangOptions::FPR_ToNearest: return llvm::fp::rmToNearest;
- case LangOptions::FPR_Downward: return llvm::fp::rmDownward;
- case LangOptions::FPR_Upward: return llvm::fp::rmUpward;
- case LangOptions::FPR_TowardZero: return llvm::fp::rmTowardZero;
- case LangOptions::FPR_Dynamic: return llvm::fp::rmDynamic;
- }
- llvm_unreachable("Unsupported FP RoundingMode");
+ // If we have an OpenMPIRBuilder we want to finalize functions (incl.
+ // outlining etc) at some point. Doing it once the function codegen is done
+ // seems to be a reasonable spot. We do it here, as opposed to the deletion
+ // time of the CodeGenModule, because we have to ensure the IR has not yet
+ // been "emitted" to the outside, thus, modifications are still sensible.
+ if (CGM.getLangOpts().OpenMPIRBuilder)
+ CGM.getOpenMPRuntime().getOMPBuilder().finalize();
}
// Map the LangOption for exception behavior into
// the corresponding enum in the IR.
-static llvm::fp::ExceptionBehavior ToConstrainedExceptMD(
- LangOptions::FPExceptionModeKind Kind) {
+llvm::fp::ExceptionBehavior
+clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) {
switch (Kind) {
case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
@@ -135,81 +105,79 @@ static llvm::fp::ExceptionBehavior ToConstrainedExceptMD(
}
void CodeGenFunction::SetFPModel() {
- auto fpRoundingMode = ToConstrainedRoundingMD(
- getLangOpts().getFPRoundingMode());
+ llvm::RoundingMode RM = getLangOpts().getFPRoundingMode();
auto fpExceptionBehavior = ToConstrainedExceptMD(
getLangOpts().getFPExceptionMode());
- if (fpExceptionBehavior == llvm::fp::ebIgnore &&
- fpRoundingMode == llvm::fp::rmToNearest)
- // Constrained intrinsics are not used.
- ;
- else {
- Builder.setIsFPConstrained(true);
- Builder.setDefaultConstrainedRounding(fpRoundingMode);
- Builder.setDefaultConstrainedExcept(fpExceptionBehavior);
- }
-}
-
-CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T,
- LValueBaseInfo *BaseInfo,
- TBAAAccessInfo *TBAAInfo) {
- return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
- /* forPointeeType= */ true);
-}
-
-CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T,
- LValueBaseInfo *BaseInfo,
- TBAAAccessInfo *TBAAInfo,
- bool forPointeeType) {
- if (TBAAInfo)
- *TBAAInfo = CGM.getTBAAAccessInfo(T);
-
- // Honor alignment typedef attributes even on incomplete types.
- // We also honor them straight for C++ class types, even as pointees;
- // there's an expressivity gap here.
- if (auto TT = T->getAs<TypedefType>()) {
- if (auto Align = TT->getDecl()->getMaxAlignment()) {
- if (BaseInfo)
- *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
- return getContext().toCharUnitsFromBits(Align);
- }
- }
+ Builder.setDefaultConstrainedRounding(RM);
+ Builder.setDefaultConstrainedExcept(fpExceptionBehavior);
+ Builder.setIsFPConstrained(fpExceptionBehavior != llvm::fp::ebIgnore ||
+ RM != llvm::RoundingMode::NearestTiesToEven);
+}
- if (BaseInfo)
- *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
+void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
+ llvm::FastMathFlags FMF;
+ FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
+ FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
+ FMF.setNoInfs(FPFeatures.getNoHonorInfs());
+ FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
+ FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
+ FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
+ FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
+ Builder.setFastMathFlags(FMF);
+}
- CharUnits Alignment;
- if (T->isIncompleteType()) {
- Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
- } else {
- // For C++ class pointees, we don't know whether we're pointing at a
- // base or a complete object, so we generally need to use the
- // non-virtual alignment.
- const CXXRecordDecl *RD;
- if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
- Alignment = CGM.getClassPointerAlignment(RD);
- } else {
- Alignment = getContext().getTypeAlignInChars(T);
- if (T.getQualifiers().hasUnaligned())
- Alignment = CharUnits::One();
- }
+CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
+ FPOptions FPFeatures)
+ : CGF(CGF), OldFPFeatures(CGF.CurFPFeatures) {
+ CGF.CurFPFeatures = FPFeatures;
- // Cap to the global maximum type alignment unless the alignment
- // was somehow explicit on the type.
- if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
- if (Alignment.getQuantity() > MaxAlign &&
- !getContext().isAlignmentRequired(T))
- Alignment = CharUnits::fromQuantity(MaxAlign);
- }
- }
- return Alignment;
+ if (OldFPFeatures == FPFeatures)
+ return;
+
+ FMFGuard.emplace(CGF.Builder);
+
+ llvm::RoundingMode NewRoundingBehavior =
+ static_cast<llvm::RoundingMode>(FPFeatures.getRoundingMode());
+ CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
+ auto NewExceptionBehavior =
+ ToConstrainedExceptMD(static_cast<LangOptions::FPExceptionModeKind>(
+ FPFeatures.getFPExceptionMode()));
+ CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
+
+ CGF.SetFastMathFlags(FPFeatures);
+
+ assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
+ isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
+ isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
+ (NewExceptionBehavior == llvm::fp::ebIgnore &&
+ NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
+ "FPConstrained should be enabled on entire function");
+
+ auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
+ auto OldValue =
+ CGF.CurFn->getFnAttribute(Name).getValueAsString() == "true";
+ auto NewValue = OldValue & Value;
+ if (OldValue != NewValue)
+ CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
+ };
+ mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
+ mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
+ mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
+ mergeFnAttrValue("unsafe-fp-math", FPFeatures.getAllowFPReassociate() &&
+ FPFeatures.getAllowReciprocal() &&
+ FPFeatures.getAllowApproxFunc() &&
+ FPFeatures.getNoSignedZero());
+}
+
+CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
+ CGF.CurFPFeatures = OldFPFeatures;
}
LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
- CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
+ CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
TBAAInfo);
}
@@ -220,8 +188,8 @@ LValue
CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
- CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
- /* forPointeeType= */ true);
+ CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
+ /* forPointeeType= */ true);
return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
}
@@ -259,11 +227,13 @@ TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
case Type::MemberPointer:
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
case Type::FunctionProto:
case Type::FunctionNoProto:
case Type::Enum:
case Type::ObjCObjectPointer:
case Type::Pipe:
+ case Type::ExtInt:
return TEK_Scalar;
// Complexes.
@@ -486,13 +456,15 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
// Scan function arguments for vector width.
for (llvm::Argument &A : CurFn->args())
if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
- LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getFixedSize());
+ LargestVectorWidth =
+ std::max((uint64_t)LargestVectorWidth,
+ VT->getPrimitiveSizeInBits().getKnownMinSize());
// Update vector width based on return type.
if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
- LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getFixedSize());
+ LargestVectorWidth =
+ std::max((uint64_t)LargestVectorWidth,
+ VT->getPrimitiveSizeInBits().getKnownMinSize());
// Add the required-vector-width attribute. This contains the max width from:
// 1. min-vector-width attribute used in the source program.
@@ -799,40 +771,54 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
SanOpts.Mask &= ~SanitizerKind::Null;
- if (D) {
- // Apply xray attributes to the function (as a string, for now)
- if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
- if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
- XRayInstrKind::Function)) {
- if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction())
- Fn->addFnAttr("function-instrument", "xray-always");
- if (XRayAttr->neverXRayInstrument())
- Fn->addFnAttr("function-instrument", "xray-never");
- if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
- if (ShouldXRayInstrumentFunction())
- Fn->addFnAttr("xray-log-args",
- llvm::utostr(LogArgs->getArgumentCount()));
- }
- } else {
- if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
- Fn->addFnAttr(
- "xray-instruction-threshold",
- llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
+ // Apply xray attributes to the function (as a string, for now)
+ if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
+ if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
+ XRayInstrKind::FunctionEntry) ||
+ CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
+ XRayInstrKind::FunctionExit)) {
+ if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction())
+ Fn->addFnAttr("function-instrument", "xray-always");
+ if (XRayAttr->neverXRayInstrument())
+ Fn->addFnAttr("function-instrument", "xray-never");
+ if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
+ if (ShouldXRayInstrumentFunction())
+ Fn->addFnAttr("xray-log-args",
+ llvm::utostr(LogArgs->getArgumentCount()));
}
+ } else {
+ if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
+ Fn->addFnAttr(
+ "xray-instruction-threshold",
+ llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
+ }
- unsigned Count, Offset;
- if (const auto *Attr = D->getAttr<PatchableFunctionEntryAttr>()) {
- Count = Attr->getCount();
- Offset = Attr->getOffset();
- } else {
- Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
- Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
- }
- if (Count && Offset <= Count) {
- Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
- if (Offset)
- Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
- }
+ if (ShouldXRayInstrumentFunction()) {
+ if (CGM.getCodeGenOpts().XRayIgnoreLoops)
+ Fn->addFnAttr("xray-ignore-loops");
+
+ if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
+ XRayInstrKind::FunctionExit))
+ Fn->addFnAttr("xray-skip-exit");
+
+ if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
+ XRayInstrKind::FunctionEntry))
+ Fn->addFnAttr("xray-skip-entry");
+ }
+
+ unsigned Count, Offset;
+ if (const auto *Attr =
+ D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
+ Count = Attr->getCount();
+ Offset = Attr->getOffset();
+ } else {
+ Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
+ Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
+ }
+ if (Count && Offset <= Count) {
+ Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
+ if (Offset)
+ Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
}
// Add no-jump-tables value.
@@ -847,6 +833,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (CGM.getCodeGenOpts().ProfileSampleAccurate)
Fn->addFnAttr("profile-sample-accurate");
+ if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
+ Fn->addFnAttr("use-sample-profile");
+
if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
Fn->addFnAttr("cfi-canonical-jump-table");
@@ -894,14 +883,26 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// If we're in C++ mode and the function name is "main", it is guaranteed
// to be norecurse by the standard (3.6.1.3 "The function main shall not be
// used within a program").
- if (getLangOpts().CPlusPlus)
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
- if (FD->isMain())
- Fn->addFnAttr(llvm::Attribute::NoRecurse);
-
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ //
+ // OpenCL C 2.0 v2.2-11 s6.9.i:
+ // Recursion is not supported.
+ //
+ // SYCL v1.2.1 s3.10:
+ // kernels cannot include RTTI information, exception classes,
+ // recursive code, virtual functions or make use of C++ libraries that
+ // are not compiled for the device.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if ((getLangOpts().CPlusPlus && FD->isMain()) || getLangOpts().OpenCL ||
+ getLangOpts().SYCLIsDevice ||
+ (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>()))
+ Fn->addFnAttr(llvm::Attribute::NoRecurse);
+ }
+
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ Builder.setIsFPConstrained(FD->usesFPIntrin());
if (FD->usesFPIntrin())
Fn->addFnAttr(llvm::Attribute::StrictFP);
+ }
// If a custom alignment is used, force realigning to this alignment on
// any main function which certainly will need it.
@@ -1026,7 +1027,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
ReturnValuePointer = Address(Addr, getPointerAlign());
Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
- ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy));
+ ReturnValue = Address(Addr, CGM.getNaturalTypeAlignment(RetTy));
} else {
ReturnValue = CreateIRTemp(RetTy, "retval");
@@ -1983,6 +1984,7 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::Complex:
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
case Type::Record:
case Type::Enum:
case Type::Elaborated:
@@ -1991,6 +1993,7 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
+ case Type::ExtInt:
llvm_unreachable("type class is never variably-modified!");
case Type::Adjusted:
@@ -2146,21 +2149,47 @@ void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
protection.Inst->eraseFromParent();
}
-void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
+void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
QualType Ty, SourceLocation Loc,
SourceLocation AssumptionLoc,
llvm::Value *Alignment,
llvm::Value *OffsetValue) {
- llvm::Value *TheCheck;
- llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
- CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
+ if (Alignment->getType() != IntPtrTy)
+ Alignment =
+ Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
+ if (OffsetValue && OffsetValue->getType() != IntPtrTy)
+ OffsetValue =
+ Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
+ llvm::Value *TheCheck = nullptr;
if (SanOpts.has(SanitizerKind::Alignment)) {
- EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
- OffsetValue, TheCheck, Assumption);
+ llvm::Value *PtrIntValue =
+ Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
+
+ if (OffsetValue) {
+ bool IsOffsetZero = false;
+ if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
+ IsOffsetZero = CI->isZero();
+
+ if (!IsOffsetZero)
+ PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
+ }
+
+ llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
+ llvm::Value *Mask =
+ Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
+ llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
+ TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
}
+ llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
+ CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
+
+ if (!SanOpts.has(SanitizerKind::Alignment))
+ return;
+ emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
+ OffsetValue, TheCheck, Assumption);
}
-void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
+void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
const Expr *E,
SourceLocation AssumptionLoc,
llvm::Value *Alignment,
@@ -2170,7 +2199,7 @@ void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
QualType Ty = E->getType();
SourceLocation Loc = E->getExprLoc();
- EmitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
+ emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
OffsetValue);
}
@@ -2437,13 +2466,13 @@ void CodeGenFunction::EmitMultiVersionResolver(
// Loc), the diagnostic will additionally point a "Note:" to this location.
// It should be the location where the __attribute__((assume_aligned))
// was written e.g.
-void CodeGenFunction::EmitAlignmentAssumptionCheck(
+void CodeGenFunction::emitAlignmentAssumptionCheck(
llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
SourceLocation SecondaryLoc, llvm::Value *Alignment,
llvm::Value *OffsetValue, llvm::Value *TheCheck,
llvm::Instruction *Assumption) {
assert(Assumption && isa<llvm::CallInst>(Assumption) &&
- cast<llvm::CallInst>(Assumption)->getCalledValue() ==
+ cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
llvm::Intrinsic::getDeclaration(
Builder.GetInsertBlock()->getParent()->getParent(),
llvm::Intrinsic::assume) &&
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
index 3d8bc93eb965..d794f4f0fa81 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
@@ -26,6 +26,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/Type.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/CapturedStmt.h"
@@ -36,6 +37,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Debug.h"
#include "llvm/Transforms/Utils/SanitizerStats.h"
@@ -75,7 +77,11 @@ class ObjCAtTryStmt;
class ObjCAtThrowStmt;
class ObjCAtSynchronizedStmt;
class ObjCAutoreleasePoolStmt;
+class OMPUseDevicePtrClause;
+class OMPUseDeviceAddrClause;
class ReturnsNonNullAttr;
+class SVETypeFlags;
+class OMPExecutableDirective;
namespace analyze_os_log {
class OSLogBufferLayout;
@@ -118,6 +124,7 @@ enum TypeEvaluationKind {
SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 1) \
SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
+ SANITIZER_CHECK(InvalidObjCCast, invalid_objc_cast, 0) \
SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
SANITIZER_CHECK(MissingReturn, missing_return, 0) \
SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
@@ -258,6 +265,9 @@ public:
CodeGenModule &CGM; // Per-module state.
const TargetInfo &Target;
+ // For EH/SEH outlined funclets, this field points to parent's CGF
+ CodeGenFunction *ParentCGF = nullptr;
+
typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
LoopInfoStack LoopStack;
CGBuilderTy Builder;
@@ -332,6 +342,10 @@ public:
/// This is invalid if sret is not in use.
Address ReturnValuePointer = Address::invalid();
+ /// If a return statement is being visited, this holds the return statment's
+ /// result expression.
+ const Expr *RetExpr = nullptr;
+
/// Return true if a label was seen in the current scope.
bool hasLabelBeenSeenInCurrentScope() const {
if (CurLexicalScope)
@@ -485,6 +499,9 @@ public:
/// region.
bool IsInPreservedAIRegion = false;
+ /// True if the current statement has nomerge attribute.
+ bool InNoMergeAttributedStmt = false;
+
const CodeGen::CGBlockInfo *BlockInfo = nullptr;
llvm::Value *BlockPointer = nullptr;
@@ -533,9 +550,6 @@ public:
unsigned NextCleanupDestIndex = 1;
- /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
- CGBlockInfo *FirstBlockInfo = nullptr;
-
/// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
llvm::BasicBlock *EHResumeBlock = nullptr;
@@ -560,11 +574,49 @@ public:
llvm::BasicBlock *getInvokeDestImpl();
+ /// Parent loop-based directive for scan directive.
+ const OMPExecutableDirective *OMPParentLoopDirectiveForScan = nullptr;
+ llvm::BasicBlock *OMPBeforeScanBlock = nullptr;
+ llvm::BasicBlock *OMPAfterScanBlock = nullptr;
+ llvm::BasicBlock *OMPScanExitBlock = nullptr;
+ llvm::BasicBlock *OMPScanDispatch = nullptr;
+ bool OMPFirstScanLoop = false;
+
+ /// Manages parent directive for scan directives.
+ class ParentLoopDirectiveForScanRegion {
+ CodeGenFunction &CGF;
+ const OMPExecutableDirective *ParentLoopDirectiveForScan;
+
+ public:
+ ParentLoopDirectiveForScanRegion(
+ CodeGenFunction &CGF,
+ const OMPExecutableDirective &ParentLoopDirectiveForScan)
+ : CGF(CGF),
+ ParentLoopDirectiveForScan(CGF.OMPParentLoopDirectiveForScan) {
+ CGF.OMPParentLoopDirectiveForScan = &ParentLoopDirectiveForScan;
+ }
+ ~ParentLoopDirectiveForScanRegion() {
+ CGF.OMPParentLoopDirectiveForScan = ParentLoopDirectiveForScan;
+ }
+ };
+
template <class T>
typename DominatingValue<T>::saved_type saveValueInCond(T value) {
return DominatingValue<T>::save(*this, value);
}
+ class CGFPOptionsRAII {
+ public:
+ CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures);
+ ~CGFPOptionsRAII();
+
+ private:
+ CodeGenFunction &CGF;
+ FPOptions OldFPFeatures;
+ Optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
+ };
+ FPOptions CurFPFeatures;
+
public:
/// ObjCEHValueStack - Stack of Objective-C exception values, used for
/// rethrows.
@@ -1541,6 +1593,169 @@ public:
CallArgList OldCXXInheritedCtorInitExprArgs;
};
+ // Helper class for the OpenMP IR Builder. Allows reusability of code used for
+ // region body, and finalization codegen callbacks. This will class will also
+ // contain privatization functions used by the privatization call backs
+ //
+ // TODO: this is temporary class for things that are being moved out of
+ // CGOpenMPRuntime, new versions of current CodeGenFunction methods, or
+ // utility function for use with the OMPBuilder. Once that move to use the
+ // OMPBuilder is done, everything here will either become part of CodeGenFunc.
+ // directly, or a new helper class that will contain functions used by both
+ // this and the OMPBuilder
+
+ struct OMPBuilderCBHelpers {
+
+ OMPBuilderCBHelpers() = delete;
+ OMPBuilderCBHelpers(const OMPBuilderCBHelpers &) = delete;
+ OMPBuilderCBHelpers &operator=(const OMPBuilderCBHelpers &) = delete;
+
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+
+ /// Cleanup action for allocate support.
+ class OMPAllocateCleanupTy final : public EHScopeStack::Cleanup {
+
+ private:
+ llvm::CallInst *RTLFnCI;
+
+ public:
+ OMPAllocateCleanupTy(llvm::CallInst *RLFnCI) : RTLFnCI(RLFnCI) {
+ RLFnCI->removeFromParent();
+ }
+
+ void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
+ if (!CGF.HaveInsertPoint())
+ return;
+ CGF.Builder.Insert(RTLFnCI);
+ }
+ };
+
+ /// Returns address of the threadprivate variable for the current
+ /// thread. This Also create any necessary OMP runtime calls.
+ ///
+ /// \param VD VarDecl for Threadprivate variable.
+ /// \param VDAddr Address of the Vardecl
+ /// \param Loc The location where the barrier directive was encountered
+ static Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
+ const VarDecl *VD, Address VDAddr,
+ SourceLocation Loc);
+
+ /// Gets the OpenMP-specific address of the local variable /p VD.
+ static Address getAddressOfLocalVariable(CodeGenFunction &CGF,
+ const VarDecl *VD);
+ /// Get the platform-specific name separator.
+ /// \param Parts different parts of the final name that needs separation
+ /// \param FirstSeparator First separator used between the initial two
+ /// parts of the name.
+ /// \param Separator separator used between all of the rest consecutinve
+ /// parts of the name
+ static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
+ StringRef FirstSeparator = ".",
+ StringRef Separator = ".");
+ /// Emit the Finalization for an OMP region
+ /// \param CGF The Codegen function this belongs to
+ /// \param IP Insertion point for generating the finalization code.
+ static void FinalizeOMPRegion(CodeGenFunction &CGF, InsertPointTy IP) {
+ CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
+ assert(IP.getBlock()->end() != IP.getPoint() &&
+ "OpenMP IR Builder should cause terminated block!");
+
+ llvm::BasicBlock *IPBB = IP.getBlock();
+ llvm::BasicBlock *DestBB = IPBB->getUniqueSuccessor();
+ assert(DestBB && "Finalization block should have one successor!");
+
+ // erase and replace with cleanup branch.
+ IPBB->getTerminator()->eraseFromParent();
+ CGF.Builder.SetInsertPoint(IPBB);
+ CodeGenFunction::JumpDest Dest = CGF.getJumpDestInCurrentScope(DestBB);
+ CGF.EmitBranchThroughCleanup(Dest);
+ }
+
+ /// Emit the body of an OMP region
+ /// \param CGF The Codegen function this belongs to
+ /// \param RegionBodyStmt The body statement for the OpenMP region being
+ /// generated
+ /// \param CodeGenIP Insertion point for generating the body code.
+ /// \param FiniBB The finalization basic block
+ static void EmitOMPRegionBody(CodeGenFunction &CGF,
+ const Stmt *RegionBodyStmt,
+ InsertPointTy CodeGenIP,
+ llvm::BasicBlock &FiniBB) {
+ llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
+ if (llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator())
+ CodeGenIPBBTI->eraseFromParent();
+
+ CGF.Builder.SetInsertPoint(CodeGenIPBB);
+
+ CGF.EmitStmt(RegionBodyStmt);
+
+ if (CGF.Builder.saveIP().isSet())
+ CGF.Builder.CreateBr(&FiniBB);
+ }
+
+ /// RAII for preserving necessary info during Outlined region body codegen.
+ class OutlinedRegionBodyRAII {
+
+ llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
+ CodeGenFunction::JumpDest OldReturnBlock;
+ CGBuilderTy::InsertPoint IP;
+ CodeGenFunction &CGF;
+
+ public:
+ OutlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP,
+ llvm::BasicBlock &RetBB)
+ : CGF(cgf) {
+ assert(AllocaIP.isSet() &&
+ "Must specify Insertion point for allocas of outlined function");
+ OldAllocaIP = CGF.AllocaInsertPt;
+ CGF.AllocaInsertPt = &*AllocaIP.getPoint();
+ IP = CGF.Builder.saveIP();
+
+ OldReturnBlock = CGF.ReturnBlock;
+ CGF.ReturnBlock = CGF.getJumpDestInCurrentScope(&RetBB);
+ }
+
+ ~OutlinedRegionBodyRAII() {
+ CGF.AllocaInsertPt = OldAllocaIP;
+ CGF.ReturnBlock = OldReturnBlock;
+ CGF.Builder.restoreIP(IP);
+ }
+ };
+
+ /// RAII for preserving necessary info during inlined region body codegen.
+ class InlinedRegionBodyRAII {
+
+ llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
+ CodeGenFunction &CGF;
+
+ public:
+ InlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP,
+ llvm::BasicBlock &FiniBB)
+ : CGF(cgf) {
+ // Alloca insertion block should be in the entry block of the containing
+ // function so it expects an empty AllocaIP in which case will reuse the
+ // old alloca insertion point, or a new AllocaIP in the same block as
+ // the old one
+ assert((!AllocaIP.isSet() ||
+ CGF.AllocaInsertPt->getParent() == AllocaIP.getBlock()) &&
+ "Insertion point should be in the entry block of containing "
+ "function!");
+ OldAllocaIP = CGF.AllocaInsertPt;
+ if (AllocaIP.isSet())
+ CGF.AllocaInsertPt = &*AllocaIP.getPoint();
+
+ // TODO: Remove the call, after making sure the counter is not used by
+ // the EHStack.
+ // Since this is an inlined region, it should not modify the
+ // ReturnBlock, and should reuse the one for the enclosing outlined
+ // region. So, the JumpDest being return by the function is discarded
+ (void)CGF.getJumpDestInCurrentScope(&FiniBB);
+ }
+
+ ~InlinedRegionBodyRAII() { CGF.AllocaInsertPt = OldAllocaIP; }
+ };
+ };
+
private:
/// CXXThisDecl - When generating code for a C++ member function,
/// this will hold the implicit 'this' declaration.
@@ -1772,7 +1987,6 @@ public:
/// information about the block, including the block invoke function, the
/// captured variables, etc.
llvm::Value *EmitBlockLiteral(const BlockExpr *);
- static void destroyBlockInfos(CGBlockInfo *info);
llvm::Function *GenerateBlockFunction(GlobalDecl GD,
const CGBlockInfo &Info,
@@ -2155,13 +2369,6 @@ public:
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T);
- CharUnits getNaturalTypeAlignment(QualType T,
- LValueBaseInfo *BaseInfo = nullptr,
- TBAAAccessInfo *TBAAInfo = nullptr,
- bool forPointeeType = false);
- CharUnits getNaturalPointeeTypeAlignment(QualType T,
- LValueBaseInfo *BaseInfo = nullptr,
- TBAAAccessInfo *TBAAInfo = nullptr);
Address EmitLoadOfReference(LValue RefLVal,
LValueBaseInfo *PointeeBaseInfo = nullptr,
@@ -2264,8 +2471,9 @@ public:
/// CreateAggTemp - Create a temporary memory object for the given
/// aggregate type.
- AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
- return AggValueSlot::forAddr(CreateMemTemp(T, Name),
+ AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp",
+ Address *Alloca = nullptr) {
+ return AggValueSlot::forAddr(CreateMemTemp(T, Name, Alloca),
T.getQualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
@@ -2594,7 +2802,8 @@ public:
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E);
/// Situations in which we might emit a check for the suitability of a
- /// pointer or glvalue.
+ /// pointer or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in
+ /// compiler-rt.
enum TypeCheckKind {
/// Checking the operand of a load. Must be suitably sized and aligned.
TCK_Load,
@@ -2826,7 +3035,7 @@ public:
PeepholeProtection protectFromPeepholes(RValue rvalue);
void unprotectFromPeepholes(PeepholeProtection protection);
- void EmitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,
+ void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,
SourceLocation Loc,
SourceLocation AssumptionLoc,
llvm::Value *Alignment,
@@ -2834,13 +3043,14 @@ public:
llvm::Value *TheCheck,
llvm::Instruction *Assumption);
- void EmitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
+ void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
SourceLocation Loc, SourceLocation AssumptionLoc,
llvm::Value *Alignment,
llvm::Value *OffsetValue = nullptr);
- void EmitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E,
- SourceLocation AssumptionLoc, llvm::Value *Alignment,
+ void emitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E,
+ SourceLocation AssumptionLoc,
+ llvm::Value *Alignment,
llvm::Value *OffsetValue = nullptr);
//===--------------------------------------------------------------------===//
@@ -2983,7 +3193,8 @@ public:
llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);
llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
Address GenerateCapturedStmtArgument(const CapturedStmt &S);
- llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S);
+ llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
+ SourceLocation Loc);
void GenerateOpenMPCapturedVars(const CapturedStmt &S,
SmallVectorImpl<llvm::Value *> &CapturedVars);
void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
@@ -3037,7 +3248,10 @@ public:
void EmitOMPPrivateClause(const OMPExecutableDirective &D,
OMPPrivateScope &PrivateScope);
void EmitOMPUseDevicePtrClause(
- const OMPClause &C, OMPPrivateScope &PrivateScope,
+ const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
+ const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
+ void EmitOMPUseDeviceAddrClause(
+ const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
/// Emit code for copyin clause in \a D directive. The next code is
/// generated at the start of outlined functions for directives:
@@ -3091,7 +3305,8 @@ public:
/// proper codegen in internal captured statement.
///
void EmitOMPReductionClauseInit(const OMPExecutableDirective &D,
- OMPPrivateScope &PrivateScope);
+ OMPPrivateScope &PrivateScope,
+ bool ForInscan = false);
/// Emit final update of reduction values to original variables at
/// the end of the directive.
///
@@ -3149,6 +3364,8 @@ public:
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S);
void EmitOMPFlushDirective(const OMPFlushDirective &S);
+ void EmitOMPDepobjDirective(const OMPDepobjDirective &S);
+ void EmitOMPScanDirective(const OMPScanDirective &S);
void EmitOMPOrderedDirective(const OMPOrderedDirective &S);
void EmitOMPAtomicDirective(const OMPAtomicDirective &S);
void EmitOMPTargetDirective(const OMPTargetDirective &S);
@@ -3250,8 +3467,8 @@ public:
/// \param PostIncGen Genrator for post-increment code (required for ordered
/// loop directvies).
void EmitOMPInnerLoop(
- const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
- const Expr *IncExpr,
+ const OMPExecutableDirective &S, bool RequiresCleanup,
+ const Expr *LoopCond, const Expr *IncExpr,
const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
@@ -3517,6 +3734,7 @@ public:
LValue EmitUnaryOpLValue(const UnaryOperator *E);
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
bool Accessed = false);
+ LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E);
LValue EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
bool IsLowerBound = true);
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
@@ -3722,6 +3940,8 @@ public:
RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue);
+ RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue);
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
const CallExpr *E, ReturnValueSlot ReturnValue);
@@ -3757,6 +3977,13 @@ public:
llvm::Value *EmitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
ReturnValueSlot ReturnValue,
llvm::Triple::ArchType Arch);
+ llvm::Value *EmitARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
+ ReturnValueSlot ReturnValue,
+ llvm::Triple::ArchType Arch);
+ llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy,
+ QualType RTy);
+ llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::ArrayType *ATy,
+ QualType RTy);
llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID,
unsigned LLVMIntrinsic,
@@ -3775,12 +4002,62 @@ public:
SmallVectorImpl<llvm::Value*> &O,
const char *name,
unsigned shift = 0, bool rightshift = false);
+ llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx,
+ const llvm::ElementCount &Count);
llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
bool negateForRightShift);
llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
llvm::Type *Ty, bool usgn, const char *name);
llvm::Value *vectorWrapScalar16(llvm::Value *Op);
+ /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
+ /// access builtin. Only required if it can't be inferred from the base
+ /// pointer operand.
+ llvm::Type *SVEBuiltinMemEltTy(SVETypeFlags TypeFlags);
+
+ SmallVector<llvm::Type *, 2> getSVEOverloadTypes(SVETypeFlags TypeFlags,
+ llvm::Type *ReturnType,
+ ArrayRef<llvm::Value *> Ops);
+ llvm::Type *getEltType(SVETypeFlags TypeFlags);
+ llvm::ScalableVectorType *getSVEType(const SVETypeFlags &TypeFlags);
+ llvm::ScalableVectorType *getSVEPredType(SVETypeFlags TypeFlags);
+ llvm::Value *EmitSVEAllTruePred(SVETypeFlags TypeFlags);
+ llvm::Value *EmitSVEDupX(llvm::Value *Scalar);
+ llvm::Value *EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty);
+ llvm::Value *EmitSVEReinterpret(llvm::Value *Val, llvm::Type *Ty);
+ llvm::Value *EmitSVEPMull(SVETypeFlags TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned BuiltinID);
+ llvm::Value *EmitSVEMovl(SVETypeFlags TypeFlags,
+ llvm::ArrayRef<llvm::Value *> Ops,
+ unsigned BuiltinID);
+ llvm::Value *EmitSVEPredicateCast(llvm::Value *Pred,
+ llvm::ScalableVectorType *VTy);
+ llvm::Value *EmitSVEGatherLoad(SVETypeFlags TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitSVEScatterStore(SVETypeFlags TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitSVEMaskedLoad(const CallExpr *, llvm::Type *ReturnTy,
+ SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned BuiltinID, bool IsZExtReturn);
+ llvm::Value *EmitSVEMaskedStore(const CallExpr *,
+ SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned BuiltinID);
+ llvm::Value *EmitSVEPrefetchLoad(SVETypeFlags TypeFlags,
+ SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned BuiltinID);
+ llvm::Value *EmitSVEGatherPrefetch(SVETypeFlags TypeFlags,
+ SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitSVEStructLoad(SVETypeFlags TypeFlags,
+ SmallVectorImpl<llvm::Value *> &Ops, unsigned IntID);
+ llvm::Value *EmitSVEStructStore(SVETypeFlags TypeFlags,
+ SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
llvm::Triple::ArchType Arch);
llvm::Value *EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
@@ -3794,6 +4071,9 @@ public:
llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
const CallExpr *E);
llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ bool ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope,
+ llvm::AtomicOrdering &AO,
+ llvm::SyncScope::ID &SSID);
private:
enum class MSVCIntrin;
@@ -3924,6 +4204,10 @@ public:
/// aggregate type into a temporary LValue.
LValue EmitAggExprToLValue(const Expr *E);
+ /// Build all the stores needed to initialize an aggregate at Dest with the
+ /// value Val.
+ void EmitAggregateStore(llvm::Value *Val, Address Dest, bool DestIsVolatile);
+
/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
/// make sure it survives garbage collection until this point.
void EmitExtendGCLifetime(llvm::Value *object);
@@ -3974,6 +4258,9 @@ public:
/// Call atexit() with function dtorStub.
void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
+ /// Call unatexit() with function dtorStub.
+ llvm::Value *unregisterGlobalDtorWithUnAtExit(llvm::Function *dtorStub);
+
/// Emit code in this function to perform a guarded variable
/// initialization. Guarded initializations are used when it's not
/// possible to prove that an initialization will be done exactly
@@ -3997,12 +4284,12 @@ public:
ArrayRef<llvm::Function *> CXXThreadLocals,
ConstantAddress Guard = ConstantAddress::invalid());
- /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
+ /// GenerateCXXGlobalCleanUpFunc - Generates code for cleaning up global
/// variables.
- void GenerateCXXGlobalDtorsFunc(
+ void GenerateCXXGlobalCleanUpFunc(
llvm::Function *Fn,
const std::vector<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
- llvm::Constant *>> &DtorsAndObjects);
+ llvm::Constant *>> &DtorsOrStermFinalizers);
void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
const VarDecl *D,
@@ -4013,14 +4300,6 @@ public:
void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
- void enterFullExpression(const FullExpr *E) {
- if (const auto *EWC = dyn_cast<ExprWithCleanups>(E))
- if (EWC->getNumObjects() == 0)
- return;
- enterNonTrivialFullExpression(E);
- }
- void enterNonTrivialFullExpression(const FullExpr *E);
-
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
RValue EmitAtomicExpr(AtomicExpr *E);
@@ -4175,6 +4454,9 @@ public:
/// SetFPModel - Control floating point behavior via fp-model settings.
void SetFPModel();
+ /// Set the codegen fast-math flags.
+ void SetFastMathFlags(FPOptions FPFeatures);
+
private:
llvm::MDNode *getRangeForLoadFromType(QualType Ty);
void EmitReturnOfRValue(RValue RV, QualType Ty);
@@ -4195,7 +4477,7 @@ private:
///
/// \param AI - The first function argument of the expansion.
void ExpandTypeFromArgs(QualType Ty, LValue Dst,
- SmallVectorImpl<llvm::Value *>::iterator &AI);
+ llvm::Function::arg_iterator &AI);
/// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
/// Ty, into individual arguments on the provided vector \arg IRCallArgs,
@@ -4411,10 +4693,15 @@ inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF,
// Otherwise, it should be an alloca instruction, as set up in save().
auto alloca = cast<llvm::AllocaInst>(value.getPointer());
- return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlignment());
+ return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlign());
}
} // end namespace CodeGen
+
+// Map the LangOption for floating point exception behavior into
+// the corresponding enum in the IR.
+llvm::fp::ExceptionBehavior
+ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind);
} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
index a735bdd814ed..4ae8ce7e5ccf 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
@@ -38,6 +38,7 @@
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
@@ -83,6 +84,7 @@ static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
case TargetCXXABI::GenericMIPS:
case TargetCXXABI::GenericItanium:
case TargetCXXABI::WebAssembly:
+ case TargetCXXABI::XL:
return CreateItaniumCXXABI(CGM);
case TargetCXXABI::Microsoft:
return CreateMicrosoftCXXABI(CGM);
@@ -110,6 +112,7 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
HalfTy = llvm::Type::getHalfTy(LLVMContext);
+ BFloatTy = llvm::Type::getBFloatTy(LLVMContext);
FloatTy = llvm::Type::getFloatTy(LLVMContext);
DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
@@ -219,14 +222,6 @@ void CodeGenModule::createOpenMPRuntime() {
OpenMPRuntime.reset(new CGOpenMPRuntime(*this));
break;
}
-
- // The OpenMP-IR-Builder should eventually replace the above runtime codegens
- // but we are not there yet so they both reside in CGModule for now and the
- // OpenMP-IR-Builder is opt-in only.
- if (LangOpts.OpenMPIRBuilder) {
- OMPBuilder.reset(new llvm::OpenMPIRBuilder(TheModule));
- OMPBuilder->initialize();
- }
}
void CodeGenModule::createCUDARuntime() {
@@ -408,7 +403,7 @@ void CodeGenModule::Release() {
checkAliases();
emitMultiVersionFunctions();
EmitCXXGlobalInitFunc();
- EmitCXXGlobalDtorFunc();
+ EmitCXXGlobalCleanUpFunc();
registerGlobalDtorsWithAtExit();
EmitCXXThreadLocalInitFunc();
if (ObjCRuntime)
@@ -447,6 +442,10 @@ void CodeGenModule::Release() {
CodeGenFunction(*this).EmitCfiCheckStub();
}
emitAtAvailableLinkGuard();
+ if (Context.getTargetInfo().getTriple().isWasm() &&
+ !Context.getTargetInfo().getTriple().isOSEmscripten()) {
+ EmitMainVoidAlias();
+ }
emitLLVMUsed();
if (SanStats)
SanStats->finish();
@@ -483,6 +482,14 @@ void CodeGenModule::Release() {
getModule().addModuleFlag(llvm::Module::Max, "Dwarf Version",
CodeGenOpts.DwarfVersion);
}
+
+ if (Context.getLangOpts().SemanticInterposition)
+ // Require various optimization to respect semantic interposition.
+ getModule().setSemanticInterposition(1);
+ else if (Context.getLangOpts().ExplicitNoSemanticInterposition)
+ // Allow dso_local on applicable targets.
+ getModule().setSemanticInterposition(0);
+
if (CodeGenOpts.EmitCodeView) {
// Indicate that we want CodeView in the metadata.
getModule().addModuleFlag(llvm::Module::Warning, "CodeView", 1);
@@ -513,7 +520,7 @@ void CodeGenModule::Release() {
"StrictVTablePointersRequirement",
llvm::MDNode::get(VMContext, Ops));
}
- if (DebugInfo)
+ if (getModuleDebugInfo())
// We support a single version in the linked module. The LLVM
// parser will drop debug info with a different version number
// (and warn about it, too).
@@ -549,6 +556,14 @@ void CodeGenModule::Release() {
getModule().addModuleFlag(llvm::Module::Override, "Cross-DSO CFI", 1);
}
+ if (CodeGenOpts.WholeProgramVTables) {
+ // Indicate whether VFE was enabled for this module, so that the
+ // vcall_visibility metadata added under whole program vtables is handled
+ // appropriately in the optimizer.
+ getModule().addModuleFlag(llvm::Module::Error, "Virtual Function Elim",
+ CodeGenOpts.VirtualFunctionElimination);
+ }
+
if (LangOpts.Sanitize.has(SanitizerKind::CFIICall)) {
getModule().addModuleFlag(llvm::Module::Override,
"CFI Canonical Jump Tables",
@@ -574,7 +589,8 @@ void CodeGenModule::Release() {
// floating point values to 0. (This corresponds to its "__CUDA_FTZ"
// property.)
getModule().addModuleFlag(llvm::Module::Override, "nvvm-reflect-ftz",
- CodeGenOpts.FlushDenorm ? 1 : 0);
+ CodeGenOpts.FP32DenormalMode.Output !=
+ llvm::DenormalMode::IEEE);
}
// Emit OpenCL specific module metadata: OpenCL/SPIR version.
@@ -630,8 +646,8 @@ void CodeGenModule::Release() {
if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
EmitCoverageFile();
- if (DebugInfo)
- DebugInfo->finalize();
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->finalize();
if (getCodeGenOpts().EmitVersionIdentMetadata)
EmitVersionIdentMetadata();
@@ -639,7 +655,9 @@ void CodeGenModule::Release() {
if (!getCodeGenOpts().RecordCommandLine.empty())
EmitCommandLineMetadata();
- EmitTargetMetadata();
+ getTargetCodeGenInfo().emitTargetMetadata(*this, MangledDeclNames);
+
+ EmitBackendOptionsMetadata(getCodeGenOpts());
}
void CodeGenModule::EmitOpenCLMetadata() {
@@ -659,6 +677,19 @@ void CodeGenModule::EmitOpenCLMetadata() {
OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
}
+void CodeGenModule::EmitBackendOptionsMetadata(
+ const CodeGenOptions CodeGenOpts) {
+ switch (getTriple().getArch()) {
+ default:
+ break;
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ getModule().addModuleFlag(llvm::Module::Error, "SmallDataLimit",
+ CodeGenOpts.SmallDataLimit);
+ break;
+ }
+}
+
void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
// Make sure that this type is translated.
Types.UpdateCompletedType(TD);
@@ -678,6 +709,19 @@ llvm::MDNode *CodeGenModule::getTBAATypeInfo(QualType QTy) {
TBAAAccessInfo CodeGenModule::getTBAAAccessInfo(QualType AccessType) {
if (!TBAA)
return TBAAAccessInfo();
+ if (getLangOpts().CUDAIsDevice) {
+ // As CUDA builtin surface/texture types are replaced, skip generating TBAA
+ // access info.
+ if (AccessType->isCUDADeviceBuiltinSurfaceType()) {
+ if (getTargetCodeGenInfo().getCUDADeviceBuiltinSurfaceDeviceType() !=
+ nullptr)
+ return TBAAAccessInfo();
+ } else if (AccessType->isCUDADeviceBuiltinTextureType()) {
+ if (getTargetCodeGenInfo().getCUDADeviceBuiltinTextureDeviceType() !=
+ nullptr)
+ return TBAAAccessInfo();
+ }
+ }
return TBAA->getAccessInfo(AccessType);
}
@@ -863,7 +907,7 @@ static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
if (isa<llvm::Function>(GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static)
return true;
- // Otherwise don't assue it is local.
+ // Otherwise don't assume it is local.
return false;
}
@@ -919,9 +963,9 @@ static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
.Case("local-exec", llvm::GlobalVariable::LocalExecTLSModel);
}
-static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(
- CodeGenOptions::TLSModel M) {
- switch (M) {
+llvm::GlobalVariable::ThreadLocalMode
+CodeGenModule::GetDefaultLLVMTLSModel() const {
+ switch (CodeGenOpts.getDefaultTLSModel()) {
case CodeGenOptions::GeneralDynamicTLSModel:
return llvm::GlobalVariable::GeneralDynamicTLSModel;
case CodeGenOptions::LocalDynamicTLSModel:
@@ -938,7 +982,7 @@ void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const {
assert(D.getTLSKind() && "setting TLS mode on non-TLS var!");
llvm::GlobalValue::ThreadLocalMode TLM;
- TLM = GetLLVMTLSModel(CodeGenOpts.getDefaultTLSModel());
+ TLM = GetDefaultLLVMTLSModel();
// Override the TLS model if it is explicitly specified.
if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) {
@@ -1004,23 +1048,19 @@ static std::string getMangledNameImpl(const CodeGenModule &CGM, GlobalDecl GD,
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
MangleContext &MC = CGM.getCXXABI().getMangleContext();
- if (MC.shouldMangleDeclName(ND)) {
- llvm::raw_svector_ostream Out(Buffer);
- if (const auto *D = dyn_cast<CXXConstructorDecl>(ND))
- MC.mangleCXXCtor(D, GD.getCtorType(), Out);
- else if (const auto *D = dyn_cast<CXXDestructorDecl>(ND))
- MC.mangleCXXDtor(D, GD.getDtorType(), Out);
- else
- MC.mangleName(ND, Out);
- } else {
+ if (MC.shouldMangleDeclName(ND))
+ MC.mangleName(GD.getWithDecl(ND), Out);
+ else {
IdentifierInfo *II = ND->getIdentifier();
assert(II && "Attempt to mangle unnamed decl.");
const auto *FD = dyn_cast<FunctionDecl>(ND);
if (FD &&
FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
- llvm::raw_svector_ostream Out(Buffer);
Out << "__regcall3__" << II->getName();
+ } else if (FD && FD->hasAttr<CUDAGlobalAttr>() &&
+ GD.getKernelReferenceKind() == KernelReferenceKind::Stub) {
+ Out << "__device_stub__" << II->getName();
} else {
Out << II->getName();
}
@@ -1043,7 +1083,7 @@ static std::string getMangledNameImpl(const CodeGenModule &CGM, GlobalDecl GD,
}
}
- return Out.str();
+ return std::string(Out.str());
}
void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
@@ -1108,11 +1148,25 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
const auto *ND = cast<NamedDecl>(GD.getDecl());
std::string MangledName = getMangledNameImpl(*this, GD, ND);
- // Adjust kernel stub mangling as we may need to be able to differentiate
- // them from the kernel itself (e.g., for HIP).
- if (auto *FD = dyn_cast<FunctionDecl>(GD.getDecl()))
- if (!getLangOpts().CUDAIsDevice && FD->hasAttr<CUDAGlobalAttr>())
- MangledName = getCUDARuntime().getDeviceStubName(MangledName);
+ // Ensure either we have different ABIs between host and device compilations,
+ // says host compilation following MSVC ABI but device compilation follows
+ // Itanium C++ ABI or, if they follow the same ABI, kernel names after
+ // mangling should be the same after name stubbing. The later checking is
+ // very important as the device kernel name being mangled in host-compilation
+ // is used to resolve the device binaries to be executed. Inconsistent naming
+ // result in undefined behavior. Even though we cannot check that naming
+ // directly between host- and device-compilations, the host- and
+ // device-mangling in host compilation could help catching certain ones.
+ assert(!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() ||
+ getLangOpts().CUDAIsDevice ||
+ (getContext().getAuxTargetInfo() &&
+ (getContext().getAuxTargetInfo()->getCXXABI() !=
+ getContext().getTargetInfo().getCXXABI())) ||
+ getCUDARuntime().getDeviceSideName(ND) ==
+ getMangledNameImpl(
+ *this,
+ GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel),
+ ND));
auto Result = Manglings.insert(std::make_pair(MangledName, GD));
return MangledDeclNames[CanonicalGD] = Result.first->first();
@@ -1364,7 +1418,7 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
std::string typeName;
if (isPipe)
typeName = ty.getCanonicalType()
- ->getAs<PipeType>()
+ ->castAs<PipeType>()
->getElementType()
.getAsString(Policy);
else
@@ -1378,7 +1432,7 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
std::string baseTypeName;
if (isPipe)
baseTypeName = ty.getCanonicalType()
- ->getAs<PipeType>()
+ ->castAs<PipeType>()
->getElementType()
.getCanonicalType()
.getAsString(Policy);
@@ -1500,6 +1554,9 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
if (CodeGenOpts.UnwindTables)
B.addAttribute(llvm::Attribute::UWTable);
+ if (CodeGenOpts.StackClashProtector)
+ B.addAttribute("probe-stack", "inline-asm");
+
if (!hasUnwindExceptions(LangOpts))
B.addAttribute(llvm::Attribute::NoUnwind);
@@ -1864,15 +1921,6 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
// default, only if it is invoked by a new-expression or delete-expression.
F->addAttribute(llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoBuiltin);
-
- // A sane operator new returns a non-aliasing pointer.
- // FIXME: Also add NonNull attribute to the return value
- // for the non-nothrow forms?
- auto Kind = FD->getDeclName().getCXXOverloadedOperator();
- if (getCodeGenOpts().AssumeSaneOperatorNew &&
- (Kind == OO_New || Kind == OO_Array_New))
- F->addAttribute(llvm::AttributeList::ReturnIndex,
- llvm::Attribute::NoAlias);
}
if (isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD))
@@ -2389,13 +2437,8 @@ bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
return true;
}
-ConstantAddress CodeGenModule::GetAddrOfUuidDescriptor(
- const CXXUuidofExpr* E) {
- // Sema has verified that IIDSource has a __declspec(uuid()), and that its
- // well-formed.
- StringRef Uuid = E->getUuidStr();
- std::string Name = "_GUID_" + Uuid.lower();
- std::replace(Name.begin(), Name.end(), '-', '_');
+ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) {
+ StringRef Name = getMangledName(GD);
// The UUID descriptor should be pointer aligned.
CharUnits Alignment = CharUnits::fromQuantity(PointerAlignInBytes);
@@ -2404,8 +2447,30 @@ ConstantAddress CodeGenModule::GetAddrOfUuidDescriptor(
if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
return ConstantAddress(GV, Alignment);
- llvm::Constant *Init = EmitUuidofInitializer(Uuid);
- assert(Init && "failed to initialize as constant");
+ ConstantEmitter Emitter(*this);
+ llvm::Constant *Init;
+
+ APValue &V = GD->getAsAPValue();
+ if (!V.isAbsent()) {
+ // If possible, emit the APValue version of the initializer. In particular,
+ // this gets the type of the constant right.
+ Init = Emitter.emitForInitializer(
+ GD->getAsAPValue(), GD->getType().getAddressSpace(), GD->getType());
+ } else {
+ // As a fallback, directly construct the constant.
+ // FIXME: This may get padding wrong under esoteric struct layout rules.
+ // MSVC appears to create a complete type 'struct __s_GUID' that it
+ // presumably uses to represent these constants.
+ MSGuidDecl::Parts Parts = GD->getParts();
+ llvm::Constant *Fields[4] = {
+ llvm::ConstantInt::get(Int32Ty, Parts.Part1),
+ llvm::ConstantInt::get(Int16Ty, Parts.Part2),
+ llvm::ConstantInt::get(Int16Ty, Parts.Part3),
+ llvm::ConstantDataArray::getRaw(
+ StringRef(reinterpret_cast<char *>(Parts.Part4And5), 8), 8,
+ Int8Ty)};
+ Init = llvm::ConstantStruct::getAnon(Fields);
+ }
auto *GV = new llvm::GlobalVariable(
getModule(), Init->getType(),
@@ -2413,7 +2478,16 @@ ConstantAddress CodeGenModule::GetAddrOfUuidDescriptor(
if (supportsCOMDAT())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
setDSOLocal(GV);
- return ConstantAddress(GV, Alignment);
+
+ llvm::Constant *Addr = GV;
+ if (!V.isAbsent()) {
+ Emitter.finalize(GV);
+ } else {
+ llvm::Type *Ty = getTypes().ConvertTypeForMem(GD->getType());
+ Addr = llvm::ConstantExpr::getBitCast(
+ GV, Ty->getPointerTo(GV->getAddressSpace()));
+ }
+ return ConstantAddress(Addr, Alignment);
}
ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
@@ -2475,7 +2549,8 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
!Global->hasAttr<CUDAGlobalAttr>() &&
!Global->hasAttr<CUDAConstantAttr>() &&
!Global->hasAttr<CUDASharedAttr>() &&
- !(LangOpts.HIP && Global->hasAttr<HIPPinnedShadowAttr>()))
+ !Global->getType()->isCUDADeviceBuiltinSurfaceType() &&
+ !Global->getType()->isCUDADeviceBuiltinTextureType())
return;
} else {
// We need to emit host-side 'shadows' for all global
@@ -2568,11 +2643,6 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
return;
}
- // Check if this must be emitted as declare variant.
- if (LangOpts.OpenMP && isa<FunctionDecl>(Global) && OpenMPRuntime &&
- OpenMPRuntime->emitDeclareVariant(GD, /*IsForDefinition=*/false))
- return;
-
// If we're deferring emission of a C++ variable with an
// initializer, remember the order in which it appeared in the file.
if (getLangOpts().CPlusPlus && isa<VarDecl>(Global) &&
@@ -2755,8 +2825,8 @@ bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
// PR9614. Avoid cases where the source code is lying to us. An available
// externally function should have an equivalent function somewhere else,
- // but a function that calls itself is clearly not equivalent to the real
- // implementation.
+ // but a function that calls itself through asm label/`__builtin_` trickery is
+ // clearly not equivalent to the real implementation.
// This happens in glibc's btowc and in some configure checks.
return !isTriviallyRecursive(F);
}
@@ -2778,50 +2848,6 @@ void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
EmitGlobalFunctionDefinition(GD, GV);
}
-void CodeGenModule::emitOpenMPDeviceFunctionRedefinition(
- GlobalDecl OldGD, GlobalDecl NewGD, llvm::GlobalValue *GV) {
- assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
- OpenMPRuntime && "Expected OpenMP device mode.");
- const auto *D = cast<FunctionDecl>(OldGD.getDecl());
-
- // Compute the function info and LLVM type.
- const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(OldGD);
- llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
-
- // Get or create the prototype for the function.
- if (!GV || (GV->getType()->getElementType() != Ty)) {
- GV = cast<llvm::GlobalValue>(GetOrCreateLLVMFunction(
- getMangledName(OldGD), Ty, GlobalDecl(), /*ForVTable=*/false,
- /*DontDefer=*/true, /*IsThunk=*/false, llvm::AttributeList(),
- ForDefinition));
- SetFunctionAttributes(OldGD, cast<llvm::Function>(GV),
- /*IsIncompleteFunction=*/false,
- /*IsThunk=*/false);
- }
- // We need to set linkage and visibility on the function before
- // generating code for it because various parts of IR generation
- // want to propagate this information down (e.g. to local static
- // declarations).
- auto *Fn = cast<llvm::Function>(GV);
- setFunctionLinkage(OldGD, Fn);
-
- // FIXME: this is redundant with part of
- // setFunctionDefinitionAttributes
- setGVProperties(Fn, OldGD);
-
- MaybeHandleStaticInExternC(D, Fn);
-
- maybeSetTrivialComdat(*D, *Fn);
-
- CodeGenFunction(*this).GenerateCode(NewGD, Fn, FI);
-
- setNonAliasAttributes(OldGD, Fn);
- SetLLVMFunctionAttributesForDefinition(D, Fn);
-
- if (D->hasAttr<AnnotateAttr>())
- AddGlobalAnnotations(D, Fn);
-}
-
void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
const auto *D = cast<ValueDecl>(GD.getDecl());
@@ -3136,14 +3162,9 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
EmitGlobal(GDDef);
}
}
- // Check if this must be emitted as declare variant and emit reference to
- // the the declare variant function.
- if (LangOpts.OpenMP && OpenMPRuntime)
- (void)OpenMPRuntime->emitDeclareVariant(GD, /*IsForDefinition=*/true);
if (FD->isMultiVersion()) {
- const auto *TA = FD->getAttr<TargetAttr>();
- if (TA && TA->isDefaultVersion())
+ if (FD->hasAttr<TargetAttr>())
UpdateMultiVersionNames(GD, FD);
if (!IsForDefinition)
return GetOrCreateMultiVersionResolver(GD, Ty, FD);
@@ -3183,7 +3204,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
}
if ((isa<llvm::Function>(Entry) || isa<llvm::GlobalAlias>(Entry)) &&
- (Entry->getType()->getElementType() == Ty)) {
+ (Entry->getValueType() == Ty)) {
return Entry;
}
@@ -3232,7 +3253,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
}
llvm::Constant *BC = llvm::ConstantExpr::getBitCast(
- F, Entry->getType()->getElementType()->getPointerTo());
+ F, Entry->getValueType()->getPointerTo());
addGlobalValReplacement(Entry, BC);
}
@@ -3291,7 +3312,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
// Make sure the result is of the requested type.
if (!IsIncompleteFunction) {
- assert(F->getType()->getElementType() == Ty);
+ assert(F->getFunctionType() == Ty);
return F;
}
@@ -3307,6 +3328,8 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
bool ForVTable,
bool DontDefer,
ForDefinition_t IsForDefinition) {
+ assert(!cast<FunctionDecl>(GD.getDecl())->isConsteval() &&
+ "consteval function should never be emitted");
// If there was no specific requested type, just convert it now.
if (!Ty) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
@@ -3582,7 +3605,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
llvm::Constant *Init = emitter.tryEmitForInitializer(*InitDecl);
if (Init) {
auto *InitType = Init->getType();
- if (GV->getType()->getElementType() != InitType) {
+ if (GV->getValueType() != InitType) {
// The type of the initializer does not match the definition.
// This happens when an initializer has a different type from
// the type of the global (because of padding at the end of a
@@ -3625,26 +3648,29 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
}
llvm::Constant *
-CodeGenModule::GetAddrOfGlobal(GlobalDecl GD,
- ForDefinition_t IsForDefinition) {
+CodeGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) {
const Decl *D = GD.getDecl();
+
if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr,
/*DontDefer=*/false, IsForDefinition);
- else if (isa<CXXMethodDecl>(D)) {
- auto FInfo = &getTypes().arrangeCXXMethodDeclaration(
- cast<CXXMethodDecl>(D));
+
+ if (isa<CXXMethodDecl>(D)) {
+ auto FInfo =
+ &getTypes().arrangeCXXMethodDeclaration(cast<CXXMethodDecl>(D));
auto Ty = getTypes().GetFunctionType(*FInfo);
return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
IsForDefinition);
- } else if (isa<FunctionDecl>(D)) {
+ }
+
+ if (isa<FunctionDecl>(D)) {
const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
IsForDefinition);
- } else
- return GetAddrOfGlobalVar(cast<VarDecl>(D), /*Ty=*/nullptr,
- IsForDefinition);
+ }
+
+ return GetAddrOfGlobalVar(cast<VarDecl>(D), /*Ty=*/nullptr, IsForDefinition);
}
llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable(
@@ -3655,7 +3681,7 @@ llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable(
if (GV) {
// Check if the variable has the right type.
- if (GV->getType()->getElementType() == Ty)
+ if (GV->getValueType() == Ty)
return GV;
// Because C++ name mangling, the only way we can end up with an already
@@ -3929,12 +3955,16 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
!getLangOpts().CUDAIsDevice &&
(D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() ||
D->hasAttr<CUDASharedAttr>());
+ bool IsCUDADeviceShadowVar =
+ getLangOpts().CUDAIsDevice &&
+ (D->getType()->isCUDADeviceBuiltinSurfaceType() ||
+ D->getType()->isCUDADeviceBuiltinTextureType());
// HIP pinned shadow of initialized host-side global variables are also
// left undefined.
- bool IsHIPPinnedShadowVar =
- getLangOpts().CUDAIsDevice && D->hasAttr<HIPPinnedShadowAttr>();
if (getLangOpts().CUDA &&
- (IsCUDASharedVar || IsCUDAShadowVar || IsHIPPinnedShadowVar))
+ (IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar))
+ Init = llvm::UndefValue::get(getTypes().ConvertType(ASTTy));
+ else if (D->hasAttr<LoaderUninitializedAttr>())
Init = llvm::UndefValue::get(getTypes().ConvertType(ASTTy));
else if (!InitExpr) {
// This is a tentative definition; tentative definitions are
@@ -3993,7 +4023,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// "extern int x[];") and then a definition of a different type (e.g.
// "int x[10];"). This also happens when an initializer has a different type
// from the type of the global (this happens with unions).
- if (!GV || GV->getType()->getElementType() != InitType ||
+ if (!GV || GV->getValueType() != InitType ||
GV->getType()->getAddressSpace() !=
getContext().getTargetAddressSpace(GetGlobalVarAddressSpace(D))) {
@@ -4040,34 +4070,56 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// global variables become internal definitions. These have to
// be internal in order to prevent name conflicts with global
// host variables with the same name in a different TUs.
- if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
- D->hasAttr<HIPPinnedShadowAttr>()) {
+ if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>()) {
Linkage = llvm::GlobalValue::InternalLinkage;
-
- // Shadow variables and their properties must be registered
- // with CUDA runtime.
- unsigned Flags = 0;
- if (!D->hasDefinition())
- Flags |= CGCUDARuntime::ExternDeviceVar;
- if (D->hasAttr<CUDAConstantAttr>())
- Flags |= CGCUDARuntime::ConstantDeviceVar;
- // Extern global variables will be registered in the TU where they are
- // defined.
+ // Shadow variables and their properties must be registered with CUDA
+ // runtime. Skip Extern global variables, which will be registered in
+ // the TU where they are defined.
if (!D->hasExternalStorage())
- getCUDARuntime().registerDeviceVar(D, *GV, Flags);
- } else if (D->hasAttr<CUDASharedAttr>())
+ getCUDARuntime().registerDeviceVar(D, *GV, !D->hasDefinition(),
+ D->hasAttr<CUDAConstantAttr>());
+ } else if (D->hasAttr<CUDASharedAttr>()) {
// __shared__ variables are odd. Shadows do get created, but
// they are not registered with the CUDA runtime, so they
// can't really be used to access their device-side
// counterparts. It's not clear yet whether it's nvcc's bug or
// a feature, but we've got to do the same for compatibility.
Linkage = llvm::GlobalValue::InternalLinkage;
+ } else if (D->getType()->isCUDADeviceBuiltinSurfaceType() ||
+ D->getType()->isCUDADeviceBuiltinTextureType()) {
+ // Builtin surfaces and textures and their template arguments are
+ // also registered with CUDA runtime.
+ Linkage = llvm::GlobalValue::InternalLinkage;
+ const ClassTemplateSpecializationDecl *TD =
+ cast<ClassTemplateSpecializationDecl>(
+ D->getType()->getAs<RecordType>()->getDecl());
+ const TemplateArgumentList &Args = TD->getTemplateArgs();
+ if (TD->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>()) {
+ assert(Args.size() == 2 &&
+ "Unexpected number of template arguments of CUDA device "
+ "builtin surface type.");
+ auto SurfType = Args[1].getAsIntegral();
+ if (!D->hasExternalStorage())
+ getCUDARuntime().registerDeviceSurf(D, *GV, !D->hasDefinition(),
+ SurfType.getSExtValue());
+ } else {
+ assert(Args.size() == 3 &&
+ "Unexpected number of template arguments of CUDA device "
+ "builtin texture type.");
+ auto TexType = Args[1].getAsIntegral();
+ auto Normalized = Args[2].getAsIntegral();
+ if (!D->hasExternalStorage())
+ getCUDARuntime().registerDeviceTex(D, *GV, !D->hasDefinition(),
+ TexType.getSExtValue(),
+ Normalized.getZExtValue());
+ }
+ }
}
}
- if (!IsHIPPinnedShadowVar)
- GV->setInitializer(Init);
- if (emitter) emitter->finalize(GV);
+ GV->setInitializer(Init);
+ if (emitter)
+ emitter->finalize(GV);
// If it is safe to mark the global 'constant', do so now.
GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
@@ -4082,17 +4134,24 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
GV->setAlignment(getContext().getDeclAlign(D).getAsAlign());
- // On Darwin, if the normal linkage of a C++ thread_local variable is
- // LinkOnce or Weak, we keep the normal linkage to prevent multiple
- // copies within a linkage unit; otherwise, the backing variable has
- // internal linkage and all accesses should just be calls to the
- // Itanium-specified entry point, which has the normal linkage of the
- // variable. This is to preserve the ability to change the implementation
- // behind the scenes.
- if (!D->isStaticLocal() && D->getTLSKind() == VarDecl::TLS_Dynamic &&
+ // On Darwin, unlike other Itanium C++ ABI platforms, the thread-wrapper
+ // function is only defined alongside the variable, not also alongside
+ // callers. Normally, all accesses to a thread_local go through the
+ // thread-wrapper in order to ensure initialization has occurred, underlying
+ // variable will never be used other than the thread-wrapper, so it can be
+ // converted to internal linkage.
+ //
+ // However, if the variable has the 'constinit' attribute, it _can_ be
+ // referenced directly, without calling the thread-wrapper, so the linkage
+ // must not be changed.
+ //
+ // Additionally, if the variable isn't plain external linkage, e.g. if it's
+ // weak or linkonce, the de-duplication semantics are important to preserve,
+ // so we don't change the linkage.
+ if (D->getTLSKind() == VarDecl::TLS_Dynamic &&
+ Linkage == llvm::GlobalValue::ExternalLinkage &&
Context.getTargetInfo().getTriple().isOSDarwin() &&
- !llvm::GlobalVariable::isLinkOnceLinkage(Linkage) &&
- !llvm::GlobalVariable::isWeakLinkage(Linkage))
+ !D->hasAttr<ConstInitAttr>())
Linkage = llvm::GlobalValue::InternalLinkage;
GV->setLinkage(Linkage);
@@ -4435,11 +4494,6 @@ void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
llvm::GlobalValue *GV) {
- // Check if this must be emitted as declare variant.
- if (LangOpts.OpenMP && OpenMPRuntime &&
- OpenMPRuntime->emitDeclareVariant(GD, /*IsForDefinition=*/true))
- return;
-
const auto *D = cast<FunctionDecl>(GD.getDecl());
// Compute the function info and LLVM type.
@@ -4447,7 +4501,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
// Get or create the prototype for the function.
- if (!GV || (GV->getType()->getElementType() != Ty))
+ if (!GV || (GV->getValueType() != Ty))
GV = cast<llvm::GlobalValue>(GetAddrOfFunction(GD, Ty, /*ForVTable=*/false,
/*DontDefer=*/true,
ForDefinition));
@@ -4471,7 +4525,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
maybeSetTrivialComdat(*D, *Fn);
- CodeGenFunction(*this).GenerateCode(D, Fn, FI);
+ CodeGenFunction(*this).GenerateCode(GD, Fn, FI);
setNonAliasAttributes(GD, Fn);
SetLLVMFunctionAttributesForDefinition(D, Fn);
@@ -4523,8 +4577,9 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
}
// Create the new alias itself, but don't set a name yet.
+ unsigned AS = Aliasee->getType()->getPointerAddressSpace();
auto *GA =
- llvm::GlobalAlias::create(DeclTy, 0, LT, "", Aliasee, &getModule());
+ llvm::GlobalAlias::create(DeclTy, AS, LT, "", Aliasee, &getModule());
if (Entry) {
if (GA->getAliasee() == Entry) {
@@ -5272,6 +5327,11 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
if (D->isTemplated())
return;
+ // Consteval function shouldn't be emitted.
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isConsteval())
+ return;
+
switch (D->getKind()) {
case Decl::CXXConversion:
case Decl::CXXMethod:
@@ -5307,17 +5367,17 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
break;
case Decl::ClassTemplateSpecialization: {
const auto *Spec = cast<ClassTemplateSpecializationDecl>(D);
- if (DebugInfo &&
- Spec->getSpecializationKind() == TSK_ExplicitInstantiationDefinition &&
- Spec->hasDefinition())
- DebugInfo->completeTemplateDefinition(*Spec);
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ if (Spec->getSpecializationKind() ==
+ TSK_ExplicitInstantiationDefinition &&
+ Spec->hasDefinition())
+ DI->completeTemplateDefinition(*Spec);
} LLVM_FALLTHROUGH;
case Decl::CXXRecord:
- if (DebugInfo) {
+ if (CGDebugInfo *DI = getModuleDebugInfo())
if (auto *ES = D->getASTContext().getExternalSource())
if (ES->hasExternalDefinitions(D) == ExternalASTSource::EK_Never)
- DebugInfo->completeUnusedClass(cast<CXXRecordDecl>(*D));
- }
+ DI->completeUnusedClass(cast<CXXRecordDecl>(*D));
// Emit any static data members, they may be definitions.
for (auto *I : cast<CXXRecordDecl>(D)->decls())
if (isa<VarDecl>(I) || isa<CXXRecordDecl>(I))
@@ -5338,15 +5398,15 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::Using: // using X; [C++]
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitUsingDecl(cast<UsingDecl>(*D));
- return;
+ break;
case Decl::NamespaceAlias:
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(*D));
- return;
+ break;
case Decl::UsingDirective: // using namespace X; [C++]
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitUsingDirective(cast<UsingDirectiveDecl>(*D));
- return;
+ break;
case Decl::CXXConstructor:
getCXXABI().EmitCXXConstructors(cast<CXXConstructorDecl>(D));
break;
@@ -5529,10 +5589,10 @@ void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) {
case Decl::CXXConstructor:
case Decl::CXXDestructor: {
if (!cast<FunctionDecl>(D)->doesThisDeclarationHaveABody())
- return;
+ break;
SourceManager &SM = getContext().getSourceManager();
if (LimitedCoverage && SM.getMainFileID() != SM.getFileID(D->getBeginLoc()))
- return;
+ break;
auto I = DeferredEmptyCoverageMappingDecls.find(D);
if (I == DeferredEmptyCoverageMappingDecls.end())
DeferredEmptyCoverageMappingDecls[D] = true;
@@ -5598,6 +5658,17 @@ void CodeGenModule::EmitDeferredUnusedCoverageMappings() {
}
}
+void CodeGenModule::EmitMainVoidAlias() {
+ // In order to transition away from "__original_main" gracefully, emit an
+ // alias for "main" in the no-argument case so that libc can detect when
+ // new-style no-argument main is in used.
+ if (llvm::Function *F = getModule().getFunction("main")) {
+ if (!F->isDeclaration() && F->arg_size() == 0 && !F->isVarArg() &&
+ F->getReturnType()->isIntegerTy(Context.getTargetInfo().getIntWidth()))
+ addUsedGlobal(llvm::GlobalAlias::create("__main_void", F));
+ }
+}
+
/// Turns the given pointer into a constant.
static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
const void *Ptr) {
@@ -5712,21 +5783,6 @@ void CodeGenModule::EmitCommandLineMetadata() {
CommandLineMetadata->addOperand(llvm::MDNode::get(Ctx, CommandLineNode));
}
-void CodeGenModule::EmitTargetMetadata() {
- // Warning, new MangledDeclNames may be appended within this loop.
- // We rely on MapVector insertions adding new elements to the end
- // of the container.
- // FIXME: Move this loop into the one target that needs it, and only
- // loop over those declarations for which we couldn't emit the target
- // metadata when we emitted the declaration.
- for (unsigned I = 0; I != MangledDeclNames.size(); ++I) {
- auto Val = *(MangledDeclNames.begin() + I);
- const Decl *D = Val.first.getDecl()->getMostRecentDecl();
- llvm::GlobalValue *GV = GetGlobalValue(Val.second);
- getTargetCodeGenInfo().emitTargetMD(D, GV, *this);
- }
-}
-
void CodeGenModule::EmitCoverageFile() {
if (getCodeGenOpts().CoverageDataFile.empty() &&
getCodeGenOpts().CoverageNotesFile.empty())
@@ -5749,39 +5805,14 @@ void CodeGenModule::EmitCoverageFile() {
}
}
-llvm::Constant *CodeGenModule::EmitUuidofInitializer(StringRef Uuid) {
- // Sema has checked that all uuid strings are of the form
- // "12345678-1234-1234-1234-1234567890ab".
- assert(Uuid.size() == 36);
- for (unsigned i = 0; i < 36; ++i) {
- if (i == 8 || i == 13 || i == 18 || i == 23) assert(Uuid[i] == '-');
- else assert(isHexDigit(Uuid[i]));
- }
-
- // The starts of all bytes of Field3 in Uuid. Field 3 is "1234-1234567890ab".
- const unsigned Field3ValueOffsets[8] = { 19, 21, 24, 26, 28, 30, 32, 34 };
-
- llvm::Constant *Field3[8];
- for (unsigned Idx = 0; Idx < 8; ++Idx)
- Field3[Idx] = llvm::ConstantInt::get(
- Int8Ty, Uuid.substr(Field3ValueOffsets[Idx], 2), 16);
-
- llvm::Constant *Fields[4] = {
- llvm::ConstantInt::get(Int32Ty, Uuid.substr(0, 8), 16),
- llvm::ConstantInt::get(Int16Ty, Uuid.substr(9, 4), 16),
- llvm::ConstantInt::get(Int16Ty, Uuid.substr(14, 4), 16),
- llvm::ConstantArray::get(llvm::ArrayType::get(Int8Ty, 8), Field3)
- };
-
- return llvm::ConstantStruct::getAnon(Fields);
-}
-
llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
bool ForEH) {
// Return a bogus pointer if RTTI is disabled, unless it's for EH.
// FIXME: should we even be calling this method if RTTI is disabled
// and it's not for EH?
- if ((!ForEH && !getLangOpts().RTTI) || getLangOpts().CUDAIsDevice)
+ if ((!ForEH && !getLangOpts().RTTI) || getLangOpts().CUDAIsDevice ||
+ (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
+ getTriple().isNVPTX()))
return llvm::Constant::getNullValue(Int8PtrTy);
if (ForEH && Ty->isObjCObjectPointerType() &&
@@ -5925,3 +5956,99 @@ CodeGenModule::createOpenCLIntToSamplerConversion(const Expr *E,
"__translate_sampler_initializer"),
{C});
}
+
+CharUnits CodeGenModule::getNaturalPointeeTypeAlignment(
+ QualType T, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo) {
+ return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
+ /* forPointeeType= */ true);
+}
+
+CharUnits CodeGenModule::getNaturalTypeAlignment(QualType T,
+ LValueBaseInfo *BaseInfo,
+ TBAAAccessInfo *TBAAInfo,
+ bool forPointeeType) {
+ if (TBAAInfo)
+ *TBAAInfo = getTBAAAccessInfo(T);
+
+ // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown. But
+ // that doesn't return the information we need to compute BaseInfo.
+
+ // Honor alignment typedef attributes even on incomplete types.
+ // We also honor them straight for C++ class types, even as pointees;
+ // there's an expressivity gap here.
+ if (auto TT = T->getAs<TypedefType>()) {
+ if (auto Align = TT->getDecl()->getMaxAlignment()) {
+ if (BaseInfo)
+ *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
+ return getContext().toCharUnitsFromBits(Align);
+ }
+ }
+
+ bool AlignForArray = T->isArrayType();
+
+ // Analyze the base element type, so we don't get confused by incomplete
+ // array types.
+ T = getContext().getBaseElementType(T);
+
+ if (T->isIncompleteType()) {
+ // We could try to replicate the logic from
+ // ASTContext::getTypeAlignIfKnown, but nothing uses the alignment if the
+ // type is incomplete, so it's impossible to test. We could try to reuse
+ // getTypeAlignIfKnown, but that doesn't return the information we need
+ // to set BaseInfo. So just ignore the possibility that the alignment is
+ // greater than one.
+ if (BaseInfo)
+ *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
+ return CharUnits::One();
+ }
+
+ if (BaseInfo)
+ *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
+
+ CharUnits Alignment;
+ // For C++ class pointees, we don't know whether we're pointing at a
+ // base or a complete object, so we generally need to use the
+ // non-virtual alignment.
+ const CXXRecordDecl *RD;
+ if (forPointeeType && !AlignForArray && (RD = T->getAsCXXRecordDecl())) {
+ Alignment = getClassPointerAlignment(RD);
+ } else {
+ Alignment = getContext().getTypeAlignInChars(T);
+ if (T.getQualifiers().hasUnaligned())
+ Alignment = CharUnits::One();
+ }
+
+ // Cap to the global maximum type alignment unless the alignment
+ // was somehow explicit on the type.
+ if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
+ if (Alignment.getQuantity() > MaxAlign &&
+ !getContext().isAlignmentRequired(T))
+ Alignment = CharUnits::fromQuantity(MaxAlign);
+ }
+ return Alignment;
+}
+
+bool CodeGenModule::stopAutoInit() {
+ unsigned StopAfter = getContext().getLangOpts().TrivialAutoVarInitStopAfter;
+ if (StopAfter) {
+ // This number is positive only when -ftrivial-auto-var-init-stop-after=* is
+ // used
+ if (NumAutoVarInit >= StopAfter) {
+ return true;
+ }
+ if (!NumAutoVarInit) {
+ unsigned DiagID = getDiags().getCustomDiagID(
+ DiagnosticsEngine::Warning,
+ "-ftrivial-auto-var-init-stop-after=%0 has been enabled to limit the "
+ "number of times ftrivial-auto-var-init=%1 gets applied.");
+ getDiags().Report(DiagID)
+ << StopAfter
+ << (getContext().getLangOpts().getTrivialAutoVarInit() ==
+ LangOptions::TrivialAutoVarInitKind::Zero
+ ? "zero"
+ : "pattern");
+ }
+ ++NumAutoVarInit;
+ }
+ return false;
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
index 115e754bb392..a6c4a1f7b278 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
@@ -26,6 +26,7 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SanitizerBlacklist.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/XRayLists.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
@@ -301,6 +302,7 @@ private:
const HeaderSearchOptions &HeaderSearchOpts; // Only used for debug info.
const PreprocessorOptions &PreprocessorOpts; // Only used for debug info.
const CodeGenOptions &CodeGenOpts;
+ unsigned NumAutoVarInit = 0;
llvm::Module &TheModule;
DiagnosticsEngine &Diags;
const TargetInfo &Target;
@@ -322,7 +324,6 @@ private:
std::unique_ptr<CGObjCRuntime> ObjCRuntime;
std::unique_ptr<CGOpenCLRuntime> OpenCLRuntime;
std::unique_ptr<CGOpenMPRuntime> OpenMPRuntime;
- std::unique_ptr<llvm::OpenMPIRBuilder> OMPBuilder;
std::unique_ptr<CGCUDARuntime> CUDARuntime;
std::unique_ptr<CGDebugInfo> DebugInfo;
std::unique_ptr<ObjCEntrypoints> ObjCData;
@@ -395,6 +396,10 @@ private:
/// emitted when the translation unit is complete.
CtorList GlobalDtors;
+ /// A unique trailing identifier as a part of sinit/sterm function when
+ /// UseSinitAndSterm of CXXABI is set as true.
+ std::string GlobalUniqueModuleId;
+
/// An ordered map of canonical GlobalDecls to their mangled names.
llvm::MapVector<GlobalDecl, StringRef> MangledDeclNames;
llvm::StringMap<GlobalDecl, llvm::BumpPtrAllocator> Manglings;
@@ -463,9 +468,11 @@ private:
SmallVector<GlobalInitData, 8> PrioritizedCXXGlobalInits;
/// Global destructor functions and arguments that need to run on termination.
+ /// When UseSinitAndSterm is set, it instead contains sterm finalizer
+ /// functions, which also run on unloading a shared library.
std::vector<
std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH, llvm::Constant *>>
- CXXGlobalDtors;
+ CXXGlobalDtorsOrStermFinalizers;
/// The complete set of modules that has been imported.
llvm::SetVector<clang::Module *> ImportedModules;
@@ -589,9 +596,6 @@ public:
return *OpenMPRuntime;
}
- /// Return a pointer to the configured OpenMPIRBuilder, if any.
- llvm::OpenMPIRBuilder *getOpenMPIRBuilder() { return OMPBuilder.get(); }
-
/// Return a reference to the configured CUDA runtime.
CGCUDARuntime &getCUDARuntime() {
assert(CUDARuntime != nullptr);
@@ -788,6 +792,9 @@ public:
/// variable declaration D.
void setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const;
+ /// Get LLVM TLS mode from CodeGenOptions.
+ llvm::GlobalVariable::ThreadLocalMode GetDefaultLLVMTLSModel() const;
+
static llvm::GlobalValue::VisibilityTypes GetLLVMVisibility(Visibility V) {
switch (V) {
case DefaultVisibility: return llvm::GlobalValue::DefaultVisibility;
@@ -810,11 +817,10 @@ public:
llvm::GlobalValue::LinkageTypes Linkage,
unsigned Alignment);
- llvm::Function *
- CreateGlobalInitOrDestructFunction(llvm::FunctionType *ty, const Twine &name,
- const CGFunctionInfo &FI,
- SourceLocation Loc = SourceLocation(),
- bool TLS = false);
+ llvm::Function *CreateGlobalInitOrCleanUpFunction(
+ llvm::FunctionType *ty, const Twine &name, const CGFunctionInfo &FI,
+ SourceLocation Loc = SourceLocation(), bool TLS = false,
+ bool IsExternalLinkage = false);
/// Return the AST address space of the underlying global variable for D, as
/// determined by its declaration. Normally this is the same as the address
@@ -855,8 +861,8 @@ public:
/// Get the address of the RTTI descriptor for the given type.
llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false);
- /// Get the address of a uuid descriptor .
- ConstantAddress GetAddrOfUuidDescriptor(const CXXUuidofExpr* E);
+ /// Get the address of a GUID.
+ ConstantAddress GetAddrOfMSGuidDecl(const MSGuidDecl *GD);
/// Get the address of the thunk for the given global decl.
llvm::Constant *GetAddrOfThunk(StringRef Name, llvm::Type *FnTy,
@@ -868,6 +874,17 @@ public:
/// Returns the assumed alignment of an opaque pointer to the given class.
CharUnits getClassPointerAlignment(const CXXRecordDecl *CD);
+ /// Returns the minimum object size for an object of the given class type
+ /// (or a class derived from it).
+ CharUnits getMinimumClassObjectSize(const CXXRecordDecl *CD);
+
+ /// Returns the minimum object size for an object of the given type.
+ CharUnits getMinimumObjectSize(QualType Ty) {
+ if (CXXRecordDecl *RD = Ty->getAsCXXRecordDecl())
+ return getMinimumClassObjectSize(RD);
+ return getContext().getTypeSizeInChars(Ty);
+ }
+
/// Returns the assumed alignment of a virtual base of a class.
CharUnits getVBaseAlignment(CharUnits DerivedAlign,
const CXXRecordDecl *Derived,
@@ -1012,6 +1029,9 @@ public:
/// for the uninstrumented functions.
void EmitDeferredUnusedCoverageMappings();
+ /// Emit an alias for "main" if it has no arguments (needed for wasm).
+ void EmitMainVoidAlias();
+
/// Tell the consumer that this variable has been instantiated.
void HandleCXXStaticMemberVarInstantiation(VarDecl *VD);
@@ -1029,8 +1049,14 @@ public:
/// Add a destructor and object to add to the C++ global destructor function.
void AddCXXDtorEntry(llvm::FunctionCallee DtorFn, llvm::Constant *Object) {
- CXXGlobalDtors.emplace_back(DtorFn.getFunctionType(), DtorFn.getCallee(),
- Object);
+ CXXGlobalDtorsOrStermFinalizers.emplace_back(DtorFn.getFunctionType(),
+ DtorFn.getCallee(), Object);
+ }
+
+ /// Add an sterm finalizer to the C++ global cleanup function.
+ void AddCXXStermFinalizerEntry(llvm::FunctionCallee DtorFn) {
+ CXXGlobalDtorsOrStermFinalizers.emplace_back(DtorFn.getFunctionType(),
+ DtorFn.getCallee(), nullptr);
}
/// Create or return a runtime function declaration with the specified type
@@ -1155,7 +1181,11 @@ public:
/// on the function more conservative. But it's unsafe to call this on a
/// function which relies on particular fast-math attributes for correctness.
/// It's up to you to ensure that this is safe.
- void AddDefaultFnAttrs(llvm::Function &F);
+ void addDefaultFunctionDefinitionAttributes(llvm::Function &F);
+
+ /// Like the overload taking a `Function &`, but intended specifically
+ /// for frontends that want to build on Clang's target-configuration logic.
+ void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs);
StringRef getMangledName(GlobalDecl GD);
StringRef getBlockMangledName(GlobalDecl GD, const BlockDecl *BD);
@@ -1282,16 +1312,16 @@ public:
/// \param D Requires declaration
void EmitOMPRequiresDecl(const OMPRequiresDecl *D);
- /// Emits the definition of \p OldGD function with body from \p NewGD.
- /// Required for proper handling of declare variant directive on the GPU.
- void emitOpenMPDeviceFunctionRedefinition(GlobalDecl OldGD, GlobalDecl NewGD,
- llvm::GlobalValue *GV);
-
/// Returns whether the given record has hidden LTO visibility and therefore
/// may participate in (single-module) CFI and whole-program vtable
/// optimization.
bool HasHiddenLTOVisibility(const CXXRecordDecl *RD);
+ /// Returns whether the given record has public std LTO visibility
+ /// and therefore may not participate in (single-module) CFI and whole-program
+ /// vtable optimization.
+ bool HasLTOVisibilityPublicStd(const CXXRecordDecl *RD);
+
/// Returns the vcall visibility of the given type. This is the scope in which
/// a virtual function call could be made which ends up being dispatched to a
/// member function of this class. This scope can be wider than the visibility
@@ -1367,6 +1397,15 @@ public:
/// \param QT is the clang QualType of the null pointer.
llvm::Constant *getNullPointer(llvm::PointerType *T, QualType QT);
+ CharUnits getNaturalTypeAlignment(QualType T,
+ LValueBaseInfo *BaseInfo = nullptr,
+ TBAAAccessInfo *TBAAInfo = nullptr,
+ bool forPointeeType = false);
+ CharUnits getNaturalPointeeTypeAlignment(QualType T,
+ LValueBaseInfo *BaseInfo = nullptr,
+ TBAAAccessInfo *TBAAInfo = nullptr);
+ bool stopAutoInit();
+
private:
llvm::Constant *GetOrCreateLLVMFunction(
StringRef MangledName, llvm::Type *Ty, GlobalDecl D, bool ForVTable,
@@ -1417,8 +1456,8 @@ private:
/// Emit the function that initializes C++ globals.
void EmitCXXGlobalInitFunc();
- /// Emit the function that destroys C++ globals.
- void EmitCXXGlobalDtorFunc();
+ /// Emit the function that performs cleanup associated with C++ globals.
+ void EmitCXXGlobalCleanUpFunc();
/// Emit the function that initializes the specified global (if PerformInit is
/// true) and registers its destructor.
@@ -1489,8 +1528,9 @@ private:
/// Emit the Clang commandline as llvm.commandline metadata.
void EmitCommandLineMetadata();
- /// Emits target specific Metadata for global declarations.
- void EmitTargetMetadata();
+ /// Emit the module flag metadata used to pass options controlling the
+ /// the backend to LLVM.
+ void EmitBackendOptionsMetadata(const CodeGenOptions CodeGenOpts);
/// Emits OpenCL specific Metadata e.g. OpenCL version.
void EmitOpenCLMetadata();
@@ -1499,9 +1539,6 @@ private:
/// .gcda files in a way that persists in .bc files.
void EmitCoverageFile();
- /// Emits the initializer for a uuidof string.
- llvm::Constant *EmitUuidofInitializer(StringRef uuidstr);
-
/// Determine whether the definition must be emitted; if this returns \c
/// false, the definition can be emitted lazily if it's used.
bool MustBeEmitted(const ValueDecl *D);
@@ -1516,11 +1553,12 @@ private:
/// function.
void SimplifyPersonality();
- /// Helper function for ConstructAttributeList and AddDefaultFnAttrs.
- /// Constructs an AttrList for a function with the given properties.
- void ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
- bool AttrOnCallSite,
- llvm::AttrBuilder &FuncAttrs);
+ /// Helper function for ConstructAttributeList and
+ /// addDefaultFunctionDefinitionAttributes. Builds a set of function
+ /// attributes to add to a function with the given properties.
+ void getDefaultFunctionAttributes(StringRef Name, bool HasOptnone,
+ bool AttrOnCallSite,
+ llvm::AttrBuilder &FuncAttrs);
llvm::Metadata *CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map,
StringRef Suffix);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
index bad796bf92dc..e810f608ab78 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
@@ -52,9 +52,10 @@ void CodeGenPGO::setFuncName(llvm::Function *Fn) {
enum PGOHashVersion : unsigned {
PGO_HASH_V1,
PGO_HASH_V2,
+ PGO_HASH_V3,
// Keep this set to the latest hash version.
- PGO_HASH_LATEST = PGO_HASH_V2
+ PGO_HASH_LATEST = PGO_HASH_V3
};
namespace {
@@ -122,7 +123,7 @@ public:
BinaryOperatorGE,
BinaryOperatorEQ,
BinaryOperatorNE,
- // The preceding values are available with PGO_HASH_V2.
+ // The preceding values are available since PGO_HASH_V2.
// Keep this last. It's for the static assert that follows.
LastHashType
@@ -144,7 +145,9 @@ static PGOHashVersion getPGOHashVersion(llvm::IndexedInstrProfReader *PGOReader,
CodeGenModule &CGM) {
if (PGOReader->getVersion() <= 4)
return PGO_HASH_V1;
- return PGO_HASH_V2;
+ if (PGOReader->getVersion() <= 5)
+ return PGO_HASH_V2;
+ return PGO_HASH_V3;
}
/// A RecursiveASTVisitor that fills a map of statements to PGO counters.
@@ -288,7 +291,7 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
return PGOHash::BinaryOperatorLAnd;
if (BO->getOpcode() == BO_LOr)
return PGOHash::BinaryOperatorLOr;
- if (HashVersion == PGO_HASH_V2) {
+ if (HashVersion >= PGO_HASH_V2) {
switch (BO->getOpcode()) {
default:
break;
@@ -310,7 +313,7 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
}
}
- if (HashVersion == PGO_HASH_V2) {
+ if (HashVersion >= PGO_HASH_V2) {
switch (S->getStmtClass()) {
default:
break;
@@ -747,13 +750,21 @@ uint64_t PGOHash::finalize() {
return Working;
// Check for remaining work in Working.
- if (Working)
- MD5.update(Working);
+ if (Working) {
+ // Keep the buggy behavior from v1 and v2 for backward-compatibility. This
+ // is buggy because it converts a uint64_t into an array of uint8_t.
+ if (HashVersion < PGO_HASH_V3) {
+ MD5.update({(uint8_t)Working});
+ } else {
+ using namespace llvm::support;
+ uint64_t Swapped = endian::byte_swap<uint64_t, little>(Working);
+ MD5.update(llvm::makeArrayRef((uint8_t *)&Swapped, sizeof(Swapped)));
+ }
+ }
// Finalize the MD5 and return the hash.
llvm::MD5::MD5Result Result;
MD5.final(Result);
- using namespace llvm::support;
return Result.low();
}
@@ -1051,8 +1062,7 @@ llvm::MDNode *CodeGenFunction::createProfileWeightsForLoop(const Stmt *Cond,
if (!PGO.haveRegionCounts())
return nullptr;
Optional<uint64_t> CondCount = PGO.getStmtCount(Cond);
- assert(CondCount.hasValue() && "missing expected loop condition count");
- if (*CondCount == 0)
+ if (!CondCount || *CondCount == 0)
return nullptr;
return createProfileWeights(LoopCount,
std::max(*CondCount, LoopCount) - LoopCount);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h
index a3778b549910..dda8c66b6db2 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h
@@ -40,8 +40,8 @@ private:
uint64_t CurrentRegionCount;
public:
- CodeGenPGO(CodeGenModule &CGM)
- : CGM(CGM), FuncNameVar(nullptr), NumValueSites({{0}}),
+ CodeGenPGO(CodeGenModule &CGModule)
+ : CGM(CGModule), FuncNameVar(nullptr), NumValueSites({{0}}),
NumRegionCounters(0), FunctionHash(0), CurrentRegionCount(0) {}
/// Whether or not we have PGO region data for the current function. This is
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp
index 7d730cb1ed15..f4ebe6885675 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp
@@ -141,6 +141,34 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
case BuiltinType::UInt128:
return getTypeInfo(Context.Int128Ty);
+ case BuiltinType::UShortFract:
+ return getTypeInfo(Context.ShortFractTy);
+ case BuiltinType::UFract:
+ return getTypeInfo(Context.FractTy);
+ case BuiltinType::ULongFract:
+ return getTypeInfo(Context.LongFractTy);
+
+ case BuiltinType::SatUShortFract:
+ return getTypeInfo(Context.SatShortFractTy);
+ case BuiltinType::SatUFract:
+ return getTypeInfo(Context.SatFractTy);
+ case BuiltinType::SatULongFract:
+ return getTypeInfo(Context.SatLongFractTy);
+
+ case BuiltinType::UShortAccum:
+ return getTypeInfo(Context.ShortAccumTy);
+ case BuiltinType::UAccum:
+ return getTypeInfo(Context.AccumTy);
+ case BuiltinType::ULongAccum:
+ return getTypeInfo(Context.LongAccumTy);
+
+ case BuiltinType::SatUShortAccum:
+ return getTypeInfo(Context.SatShortAccumTy);
+ case BuiltinType::SatUAccum:
+ return getTypeInfo(Context.SatAccumTy);
+ case BuiltinType::SatULongAccum:
+ return getTypeInfo(Context.SatLongAccumTy);
+
// Treat all other builtin types as distinct types. This includes
// treating wchar_t, char16_t, and char32_t as distinct from their
// "underlying types".
@@ -181,6 +209,15 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
return createScalarTypeNode(OutName, getChar(), Size);
}
+ if (const auto *EIT = dyn_cast<ExtIntType>(Ty)) {
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ // Don't specify signed/unsigned since integer types can alias despite sign
+ // differences.
+ Out << "_ExtInt(" << EIT->getNumBits() << ')';
+ return createScalarTypeNode(OutName, getChar(), Size);
+ }
+
// For now, handle any other kind of type conservatively.
return getChar();
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypeCache.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypeCache.h
index ed4b773afd13..20a3263c0b1a 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypeCache.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypeCache.h
@@ -35,8 +35,8 @@ struct CodeGenTypeCache {
/// i8, i16, i32, and i64
llvm::IntegerType *Int8Ty, *Int16Ty, *Int32Ty, *Int64Ty;
- /// float, double
- llvm::Type *HalfTy, *FloatTy, *DoubleTy;
+ /// half, bfloat, float, double
+ llvm::Type *HalfTy, *BFloatTy, *FloatTy, *DoubleTy;
/// int
llvm::IntegerType *IntTy;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
index a458811d7a30..d431c0263666 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -36,8 +36,6 @@ CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
}
CodeGenTypes::~CodeGenTypes() {
- llvm::DeleteContainerSeconds(CGRecordLayouts);
-
for (llvm::FoldingSet<CGFunctionInfo>::iterator
I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
delete &*I++;
@@ -83,19 +81,26 @@ void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
/// memory representation is usually i8 or i32, depending on the target.
-llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
+llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) {
+ if (T->isConstantMatrixType()) {
+ const Type *Ty = Context.getCanonicalType(T).getTypePtr();
+ const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
+ return llvm::ArrayType::get(ConvertType(MT->getElementType()),
+ MT->getNumRows() * MT->getNumColumns());
+ }
+
llvm::Type *R = ConvertType(T);
- // If this is a non-bool type, don't map it.
- if (!R->isIntegerTy(1))
- return R;
+ // If this is a bool type, or an ExtIntType in a bitfield representation,
+ // map this integer to the target-specified size.
+ if ((ForBitField && T->isExtIntType()) || R->isIntegerTy(1))
+ return llvm::IntegerType::get(getLLVMContext(),
+ (unsigned)Context.getTypeSize(T));
- // Otherwise, return an integer of the target-specified size.
- return llvm::IntegerType::get(getLLVMContext(),
- (unsigned)Context.getTypeSize(T));
+ // Else, don't map it.
+ return R;
}
-
/// isRecordLayoutComplete - Return true if the specified type is already
/// completely laid out.
bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
@@ -295,6 +300,8 @@ static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
else
return llvm::Type::getInt16Ty(VMContext);
}
+ if (&format == &llvm::APFloat::BFloat())
+ return llvm::Type::getBFloatTy(VMContext);
if (&format == &llvm::APFloat::IEEEsingle())
return llvm::Type::getFloatTy(VMContext);
if (&format == &llvm::APFloat::IEEEdouble())
@@ -383,6 +390,20 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
const Type *Ty = T.getTypePtr();
+ // For the device-side compilation, CUDA device builtin surface/texture types
+ // may be represented in different types.
+ if (Context.getLangOpts().CUDAIsDevice) {
+ if (T->isCUDADeviceBuiltinSurfaceType()) {
+ if (auto *Ty = CGM.getTargetCodeGenInfo()
+ .getCUDADeviceBuiltinSurfaceDeviceType())
+ return Ty;
+ } else if (T->isCUDADeviceBuiltinTextureType()) {
+ if (auto *Ty = CGM.getTargetCodeGenInfo()
+ .getCUDADeviceBuiltinTextureDeviceType())
+ return Ty;
+ }
+ }
+
// RecordTypes are cached and processed specially.
if (const RecordType *RT = dyn_cast<RecordType>(Ty))
return ConvertRecordDeclType(RT->getDecl());
@@ -479,6 +500,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
Context.getLangOpts().NativeHalfType ||
!Context.getTargetInfo().useFP16ConversionIntrinsics());
break;
+ case BuiltinType::BFloat16:
case BuiltinType::Float:
case BuiltinType::Double:
case BuiltinType::LongDouble:
@@ -511,23 +533,99 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case BuiltinType::OCLReserveID:
ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty);
break;
-
- // TODO: real CodeGen support for SVE types requires more infrastructure
- // to be added first. Report an error until then.
-#define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
-#include "clang/Basic/AArch64SVEACLETypes.def"
- {
- unsigned DiagID = CGM.getDiags().getCustomDiagID(
- DiagnosticsEngine::Error,
- "cannot yet generate code for SVE type '%0'");
- auto *BT = cast<BuiltinType>(Ty);
- auto Name = BT->getName(CGM.getContext().getPrintingPolicy());
- CGM.getDiags().Report(DiagID) << Name;
- // Return something safe.
- ResultType = llvm::IntegerType::get(getLLVMContext(), 32);
- break;
- }
-
+#define GET_SVE_INT_VEC(BITS, ELTS) \
+ llvm::ScalableVectorType::get( \
+ llvm::IntegerType::get(getLLVMContext(), BITS), ELTS);
+ case BuiltinType::SveInt8:
+ case BuiltinType::SveUint8:
+ return GET_SVE_INT_VEC(8, 16);
+ case BuiltinType::SveInt8x2:
+ case BuiltinType::SveUint8x2:
+ return GET_SVE_INT_VEC(8, 32);
+ case BuiltinType::SveInt8x3:
+ case BuiltinType::SveUint8x3:
+ return GET_SVE_INT_VEC(8, 48);
+ case BuiltinType::SveInt8x4:
+ case BuiltinType::SveUint8x4:
+ return GET_SVE_INT_VEC(8, 64);
+ case BuiltinType::SveInt16:
+ case BuiltinType::SveUint16:
+ return GET_SVE_INT_VEC(16, 8);
+ case BuiltinType::SveInt16x2:
+ case BuiltinType::SveUint16x2:
+ return GET_SVE_INT_VEC(16, 16);
+ case BuiltinType::SveInt16x3:
+ case BuiltinType::SveUint16x3:
+ return GET_SVE_INT_VEC(16, 24);
+ case BuiltinType::SveInt16x4:
+ case BuiltinType::SveUint16x4:
+ return GET_SVE_INT_VEC(16, 32);
+ case BuiltinType::SveInt32:
+ case BuiltinType::SveUint32:
+ return GET_SVE_INT_VEC(32, 4);
+ case BuiltinType::SveInt32x2:
+ case BuiltinType::SveUint32x2:
+ return GET_SVE_INT_VEC(32, 8);
+ case BuiltinType::SveInt32x3:
+ case BuiltinType::SveUint32x3:
+ return GET_SVE_INT_VEC(32, 12);
+ case BuiltinType::SveInt32x4:
+ case BuiltinType::SveUint32x4:
+ return GET_SVE_INT_VEC(32, 16);
+ case BuiltinType::SveInt64:
+ case BuiltinType::SveUint64:
+ return GET_SVE_INT_VEC(64, 2);
+ case BuiltinType::SveInt64x2:
+ case BuiltinType::SveUint64x2:
+ return GET_SVE_INT_VEC(64, 4);
+ case BuiltinType::SveInt64x3:
+ case BuiltinType::SveUint64x3:
+ return GET_SVE_INT_VEC(64, 6);
+ case BuiltinType::SveInt64x4:
+ case BuiltinType::SveUint64x4:
+ return GET_SVE_INT_VEC(64, 8);
+ case BuiltinType::SveBool:
+ return GET_SVE_INT_VEC(1, 16);
+#undef GET_SVE_INT_VEC
+#define GET_SVE_FP_VEC(TY, ISFP16, ELTS) \
+ llvm::ScalableVectorType::get( \
+ getTypeForFormat(getLLVMContext(), \
+ Context.getFloatTypeSemantics(Context.TY), \
+ /* UseNativeHalf = */ ISFP16), \
+ ELTS);
+ case BuiltinType::SveFloat16:
+ return GET_SVE_FP_VEC(HalfTy, true, 8);
+ case BuiltinType::SveFloat16x2:
+ return GET_SVE_FP_VEC(HalfTy, true, 16);
+ case BuiltinType::SveFloat16x3:
+ return GET_SVE_FP_VEC(HalfTy, true, 24);
+ case BuiltinType::SveFloat16x4:
+ return GET_SVE_FP_VEC(HalfTy, true, 32);
+ case BuiltinType::SveFloat32:
+ return GET_SVE_FP_VEC(FloatTy, false, 4);
+ case BuiltinType::SveFloat32x2:
+ return GET_SVE_FP_VEC(FloatTy, false, 8);
+ case BuiltinType::SveFloat32x3:
+ return GET_SVE_FP_VEC(FloatTy, false, 12);
+ case BuiltinType::SveFloat32x4:
+ return GET_SVE_FP_VEC(FloatTy, false, 16);
+ case BuiltinType::SveFloat64:
+ return GET_SVE_FP_VEC(DoubleTy, false, 2);
+ case BuiltinType::SveFloat64x2:
+ return GET_SVE_FP_VEC(DoubleTy, false, 4);
+ case BuiltinType::SveFloat64x3:
+ return GET_SVE_FP_VEC(DoubleTy, false, 6);
+ case BuiltinType::SveFloat64x4:
+ return GET_SVE_FP_VEC(DoubleTy, false, 8);
+ case BuiltinType::SveBFloat16:
+ return GET_SVE_FP_VEC(BFloat16Ty, false, 8);
+ case BuiltinType::SveBFloat16x2:
+ return GET_SVE_FP_VEC(BFloat16Ty, false, 16);
+ case BuiltinType::SveBFloat16x3:
+ return GET_SVE_FP_VEC(BFloat16Ty, false, 24);
+ case BuiltinType::SveBFloat16x4:
+ return GET_SVE_FP_VEC(BFloat16Ty, false, 32);
+#undef GET_SVE_FP_VEC
case BuiltinType::Dependent:
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) \
@@ -560,7 +658,11 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
llvm::Type *PointeeType = ConvertTypeForMem(ETy);
if (PointeeType->isVoidTy())
PointeeType = llvm::Type::getInt8Ty(getLLVMContext());
- unsigned AS = Context.getTargetAddressSpace(ETy);
+
+ unsigned AS = PointeeType->isFunctionTy()
+ ? getDataLayout().getProgramAddressSpace()
+ : Context.getTargetAddressSpace(ETy);
+
ResultType = llvm::PointerType::get(PointeeType, AS);
break;
}
@@ -605,8 +707,15 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case Type::ExtVector:
case Type::Vector: {
const VectorType *VT = cast<VectorType>(Ty);
- ResultType = llvm::VectorType::get(ConvertType(VT->getElementType()),
- VT->getNumElements());
+ ResultType = llvm::FixedVectorType::get(ConvertType(VT->getElementType()),
+ VT->getNumElements());
+ break;
+ }
+ case Type::ConstantMatrix: {
+ const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
+ ResultType =
+ llvm::FixedVectorType::get(ConvertType(MT->getElementType()),
+ MT->getNumRows() * MT->getNumColumns());
break;
}
case Type::FunctionNoProto:
@@ -692,6 +801,11 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty));
break;
}
+ case Type::ExtInt: {
+ const auto &EIT = cast<ExtIntType>(Ty);
+ ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits());
+ break;
+ }
}
assert(ResultType && "Didn't convert a type?");
@@ -749,8 +863,8 @@ llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
}
// Layout fields.
- CGRecordLayout *Layout = ComputeRecordLayout(RD, Ty);
- CGRecordLayouts[Key] = Layout;
+ std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty);
+ CGRecordLayouts[Key] = std::move(Layout);
// We're done laying out this struct.
bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult;
@@ -776,17 +890,18 @@ const CGRecordLayout &
CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
const Type *Key = Context.getTagDeclType(RD).getTypePtr();
- const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key);
- if (!Layout) {
- // Compute the type information.
- ConvertRecordDeclType(RD);
+ auto I = CGRecordLayouts.find(Key);
+ if (I != CGRecordLayouts.end())
+ return *I->second;
+ // Compute the type information.
+ ConvertRecordDeclType(RD);
- // Now try again.
- Layout = CGRecordLayouts.lookup(Key);
- }
+ // Now try again.
+ I = CGRecordLayouts.find(Key);
- assert(Layout && "Unable to find record layout information for type");
- return *Layout;
+ assert(I != CGRecordLayouts.end() &&
+ "Unable to find record layout information for type");
+ return *I->second;
}
bool CodeGenTypes::isPointerZeroInitializable(QualType T) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
index 03102329507e..f8f7542e4c83 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
@@ -75,7 +75,7 @@ class CodeGenTypes {
llvm::DenseMap<const ObjCInterfaceType*, llvm::Type *> InterfaceTypes;
/// Maps clang struct type with corresponding record layout info.
- llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts;
+ llvm::DenseMap<const Type*, std::unique_ptr<CGRecordLayout>> CGRecordLayouts;
/// Contains the LLVM IR type for any converted RecordDecl.
llvm::DenseMap<const Type*, llvm::StructType *> RecordDeclTypes;
@@ -134,7 +134,7 @@ public:
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
/// memory representation is usually i8 or i32, depending on the target.
- llvm::Type *ConvertTypeForMem(QualType T);
+ llvm::Type *ConvertTypeForMem(QualType T, bool ForBitField = false);
/// GetFunctionType - Get the LLVM function type for \arg Info.
llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info);
@@ -272,8 +272,8 @@ public:
RequiredArgs args);
/// Compute a new LLVM record layout object for the given record.
- CGRecordLayout *ComputeRecordLayout(const RecordDecl *D,
- llvm::StructType *Ty);
+ std::unique_ptr<CGRecordLayout> ComputeRecordLayout(const RecordDecl *D,
+ llvm::StructType *Ty);
/// addRecordTypeName - Compute a name from the given record decl with an
/// optional suffix and name the given LLVM type using it.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h b/contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h
index 121acbac4fa9..188b82e56f53 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h
@@ -110,6 +110,8 @@ public:
llvm::Constant *tryEmitAbstract(const APValue &value, QualType T);
llvm::Constant *tryEmitAbstractForMemory(const APValue &value, QualType T);
+ llvm::Constant *tryEmitConstantExpr(const ConstantExpr *CE);
+
llvm::Constant *emitNullForMemory(QualType T) {
return emitNullForMemory(CGM, T);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp b/contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp
index 2d63d88020be..24e3ca19709c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp
@@ -128,8 +128,14 @@ void ConstantAggregateBuilderBase::addSize(CharUnits size) {
llvm::Constant *
ConstantAggregateBuilderBase::getRelativeOffset(llvm::IntegerType *offsetType,
llvm::Constant *target) {
+ return getRelativeOffsetToPosition(offsetType, target,
+ Builder.Buffer.size() - Begin);
+}
+
+llvm::Constant *ConstantAggregateBuilderBase::getRelativeOffsetToPosition(
+ llvm::IntegerType *offsetType, llvm::Constant *target, size_t position) {
// Compute the address of the relative-address slot.
- auto base = getAddrOfCurrentPosition(offsetType);
+ auto base = getAddrOfPosition(offsetType, position);
// Subtract.
base = llvm::ConstantExpr::getPtrToInt(base, Builder.CGM.IntPtrTy);
@@ -145,6 +151,20 @@ ConstantAggregateBuilderBase::getRelativeOffset(llvm::IntegerType *offsetType,
}
llvm::Constant *
+ConstantAggregateBuilderBase::getAddrOfPosition(llvm::Type *type,
+ size_t position) {
+ // Make a global variable. We will replace this with a GEP to this
+ // position after installing the initializer.
+ auto dummy = new llvm::GlobalVariable(Builder.CGM.getModule(), type, true,
+ llvm::GlobalVariable::PrivateLinkage,
+ nullptr, "");
+ Builder.SelfReferences.emplace_back(dummy);
+ auto &entry = Builder.SelfReferences.back();
+ (void)getGEPIndicesTo(entry.Indices, position + Begin);
+ return dummy;
+}
+
+llvm::Constant *
ConstantAggregateBuilderBase::getAddrOfCurrentPosition(llvm::Type *type) {
// Make a global variable. We will replace this with a GEP to this
// position after installing the initializer.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp
index bdecff39c88f..78b268f423cb 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp
@@ -13,10 +13,13 @@
#include "CoverageMappingGen.h"
#include "CodeGenFunction.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ProfileData/Coverage/CoverageMapping.h"
#include "llvm/ProfileData/Coverage/CoverageMappingReader.h"
#include "llvm/ProfileData/Coverage/CoverageMappingWriter.h"
@@ -24,6 +27,10 @@
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+// This selects the coverage mapping format defined when `InstrProfData.inc`
+// is textually included.
+#define COVMAP_V3
+
using namespace clang;
using namespace CodeGen;
using namespace llvm::coverage;
@@ -901,6 +908,18 @@ struct CounterCoverageMappingBuilder
terminateRegion(S);
}
+ void VisitCoroutineBodyStmt(const CoroutineBodyStmt *S) {
+ extendRegion(S);
+ Visit(S->getBody());
+ }
+
+ void VisitCoreturnStmt(const CoreturnStmt *S) {
+ extendRegion(S);
+ if (S->getOperand())
+ Visit(S->getOperand());
+ terminateRegion(S);
+ }
+
void VisitCXXThrowExpr(const CXXThrowExpr *E) {
extendRegion(E);
if (E->getSubExpr())
@@ -1272,17 +1291,11 @@ struct CounterCoverageMappingBuilder
}
};
-std::string getCoverageSection(const CodeGenModule &CGM) {
- return llvm::getInstrProfSectionName(
- llvm::IPSK_covmap,
- CGM.getContext().getTargetInfo().getTriple().getObjectFormat());
-}
-
std::string normalizeFilename(StringRef Filename) {
llvm::SmallString<256> Path(Filename);
llvm::sys::fs::make_absolute(Path);
llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
- return Path.str().str();
+ return std::string(Path);
}
} // end anonymous namespace
@@ -1317,30 +1330,71 @@ static void dump(llvm::raw_ostream &OS, StringRef FunctionName,
}
}
-void CoverageMappingModuleGen::addFunctionMappingRecord(
- llvm::GlobalVariable *NamePtr, StringRef NameValue, uint64_t FuncHash,
- const std::string &CoverageMapping, bool IsUsed) {
+static std::string getInstrProfSection(const CodeGenModule &CGM,
+ llvm::InstrProfSectKind SK) {
+ return llvm::getInstrProfSectionName(
+ SK, CGM.getContext().getTargetInfo().getTriple().getObjectFormat());
+}
+
+void CoverageMappingModuleGen::emitFunctionMappingRecord(
+ const FunctionInfo &Info, uint64_t FilenamesRef) {
llvm::LLVMContext &Ctx = CGM.getLLVMContext();
- if (!FunctionRecordTy) {
+
+ // Assign a name to the function record. This is used to merge duplicates.
+ std::string FuncRecordName = "__covrec_" + llvm::utohexstr(Info.NameHash);
+
+ // A dummy description for a function included-but-not-used in a TU can be
+ // replaced by full description provided by a different TU. The two kinds of
+ // descriptions play distinct roles: therefore, assign them different names
+ // to prevent `linkonce_odr` merging.
+ if (Info.IsUsed)
+ FuncRecordName += "u";
+
+ // Create the function record type.
+ const uint64_t NameHash = Info.NameHash;
+ const uint64_t FuncHash = Info.FuncHash;
+ const std::string &CoverageMapping = Info.CoverageMapping;
#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) LLVMType,
- llvm::Type *FunctionRecordTypes[] = {
- #include "llvm/ProfileData/InstrProfData.inc"
- };
- FunctionRecordTy =
- llvm::StructType::get(Ctx, makeArrayRef(FunctionRecordTypes),
- /*isPacked=*/true);
- }
+ llvm::Type *FunctionRecordTypes[] = {
+#include "llvm/ProfileData/InstrProfData.inc"
+ };
+ auto *FunctionRecordTy =
+ llvm::StructType::get(Ctx, makeArrayRef(FunctionRecordTypes),
+ /*isPacked=*/true);
- #define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) Init,
+ // Create the function record constant.
+#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) Init,
llvm::Constant *FunctionRecordVals[] = {
#include "llvm/ProfileData/InstrProfData.inc"
};
- FunctionRecords.push_back(llvm::ConstantStruct::get(
- FunctionRecordTy, makeArrayRef(FunctionRecordVals)));
+ auto *FuncRecordConstant = llvm::ConstantStruct::get(
+ FunctionRecordTy, makeArrayRef(FunctionRecordVals));
+
+ // Create the function record global.
+ auto *FuncRecord = new llvm::GlobalVariable(
+ CGM.getModule(), FunctionRecordTy, /*isConstant=*/true,
+ llvm::GlobalValue::LinkOnceODRLinkage, FuncRecordConstant,
+ FuncRecordName);
+ FuncRecord->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ FuncRecord->setSection(getInstrProfSection(CGM, llvm::IPSK_covfun));
+ FuncRecord->setAlignment(llvm::Align(8));
+ if (CGM.supportsCOMDAT())
+ FuncRecord->setComdat(CGM.getModule().getOrInsertComdat(FuncRecordName));
+
+ // Make sure the data doesn't get deleted.
+ CGM.addUsedGlobal(FuncRecord);
+}
+
+void CoverageMappingModuleGen::addFunctionMappingRecord(
+ llvm::GlobalVariable *NamePtr, StringRef NameValue, uint64_t FuncHash,
+ const std::string &CoverageMapping, bool IsUsed) {
+ llvm::LLVMContext &Ctx = CGM.getLLVMContext();
+ const uint64_t NameHash = llvm::IndexedInstrProf::ComputeHash(NameValue);
+ FunctionRecords.push_back({NameHash, FuncHash, CoverageMapping, IsUsed});
+
if (!IsUsed)
FunctionNames.push_back(
llvm::ConstantExpr::getBitCast(NamePtr, llvm::Type::getInt8PtrTy(Ctx)));
- CoverageMappings.push_back(CoverageMapping);
if (CGM.getCodeGenOpts().DumpCoverageMapping) {
// Dump the coverage mapping data for this function by decoding the
@@ -1385,37 +1439,22 @@ void CoverageMappingModuleGen::emit() {
FilenameRefs[I] = FilenameStrs[I];
}
- std::string FilenamesAndCoverageMappings;
- llvm::raw_string_ostream OS(FilenamesAndCoverageMappings);
- CoverageFilenamesSectionWriter(FilenameRefs).write(OS);
-
- // Stream the content of CoverageMappings to OS while keeping
- // memory consumption under control.
- size_t CoverageMappingSize = 0;
- for (auto &S : CoverageMappings) {
- CoverageMappingSize += S.size();
- OS << S;
- S.clear();
- S.shrink_to_fit();
- }
- CoverageMappings.clear();
- CoverageMappings.shrink_to_fit();
-
- size_t FilenamesSize = OS.str().size() - CoverageMappingSize;
- // Append extra zeroes if necessary to ensure that the size of the filenames
- // and coverage mappings is a multiple of 8.
- if (size_t Rem = OS.str().size() % 8) {
- CoverageMappingSize += 8 - Rem;
- OS.write_zeros(8 - Rem);
+ std::string Filenames;
+ {
+ llvm::raw_string_ostream OS(Filenames);
+ CoverageFilenamesSectionWriter(FilenameRefs).write(OS);
}
- auto *FilenamesAndMappingsVal =
- llvm::ConstantDataArray::getString(Ctx, OS.str(), false);
+ auto *FilenamesVal =
+ llvm::ConstantDataArray::getString(Ctx, Filenames, false);
+ const int64_t FilenamesRef = llvm::IndexedInstrProf::ComputeHash(Filenames);
- // Create the deferred function records array
- auto RecordsTy =
- llvm::ArrayType::get(FunctionRecordTy, FunctionRecords.size());
- auto RecordsVal = llvm::ConstantArray::get(RecordsTy, FunctionRecords);
+ // Emit the function records.
+ for (const FunctionInfo &Info : FunctionRecords)
+ emitFunctionMappingRecord(Info, FilenamesRef);
+ const unsigned NRecords = 0;
+ const size_t FilenamesSize = Filenames.size();
+ const unsigned CoverageMappingSize = 0;
llvm::Type *CovDataHeaderTypes[] = {
#define COVMAP_HEADER(Type, LLVMType, Name, Init) LLVMType,
#include "llvm/ProfileData/InstrProfData.inc"
@@ -1430,18 +1469,16 @@ void CoverageMappingModuleGen::emit() {
CovDataHeaderTy, makeArrayRef(CovDataHeaderVals));
// Create the coverage data record
- llvm::Type *CovDataTypes[] = {CovDataHeaderTy, RecordsTy,
- FilenamesAndMappingsVal->getType()};
+ llvm::Type *CovDataTypes[] = {CovDataHeaderTy, FilenamesVal->getType()};
auto CovDataTy = llvm::StructType::get(Ctx, makeArrayRef(CovDataTypes));
- llvm::Constant *TUDataVals[] = {CovDataHeaderVal, RecordsVal,
- FilenamesAndMappingsVal};
+ llvm::Constant *TUDataVals[] = {CovDataHeaderVal, FilenamesVal};
auto CovDataVal =
llvm::ConstantStruct::get(CovDataTy, makeArrayRef(TUDataVals));
auto CovData = new llvm::GlobalVariable(
- CGM.getModule(), CovDataTy, true, llvm::GlobalValue::InternalLinkage,
+ CGM.getModule(), CovDataTy, true, llvm::GlobalValue::PrivateLinkage,
CovDataVal, llvm::getCoverageMappingVarName());
- CovData->setSection(getCoverageSection(CGM));
+ CovData->setSection(getInstrProfSection(CGM, llvm::IPSK_covmap));
CovData->setAlignment(llvm::Align(8));
// Make sure the data doesn't get deleted.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h
index 3bf51f590479..5d79d1e65670 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h
@@ -47,17 +47,27 @@ class CodeGenModule;
/// Organizes the cross-function state that is used while generating
/// code coverage mapping data.
class CoverageMappingModuleGen {
+ /// Information needed to emit a coverage record for a function.
+ struct FunctionInfo {
+ uint64_t NameHash;
+ uint64_t FuncHash;
+ std::string CoverageMapping;
+ bool IsUsed;
+ };
+
CodeGenModule &CGM;
CoverageSourceInfo &SourceInfo;
llvm::SmallDenseMap<const FileEntry *, unsigned, 8> FileEntries;
- std::vector<llvm::Constant *> FunctionRecords;
std::vector<llvm::Constant *> FunctionNames;
- llvm::StructType *FunctionRecordTy;
- std::vector<std::string> CoverageMappings;
+ std::vector<FunctionInfo> FunctionRecords;
+
+ /// Emit a function record.
+ void emitFunctionMappingRecord(const FunctionInfo &Info,
+ uint64_t FilenamesRef);
public:
CoverageMappingModuleGen(CodeGenModule &CGM, CoverageSourceInfo &SourceInfo)
- : CGM(CGM), SourceInfo(SourceInfo), FunctionRecordTy(nullptr) {}
+ : CGM(CGM), SourceInfo(SourceInfo) {}
CoverageSourceInfo &getSourceInfo() const {
return SourceInfo;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/EHScopeStack.h b/contrib/llvm-project/clang/lib/CodeGen/EHScopeStack.h
index 0ed67aabcd62..3a640d6117d6 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/EHScopeStack.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/EHScopeStack.h
@@ -85,11 +85,6 @@ enum CleanupKind : unsigned {
NormalAndEHCleanup = EHCleanup | NormalCleanup,
- InactiveCleanup = 0x4,
- InactiveEHCleanup = EHCleanup | InactiveCleanup,
- InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
- InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup,
-
LifetimeMarker = 0x8,
NormalEHLifetimeMarker = LifetimeMarker | NormalAndEHCleanup,
};
@@ -158,9 +153,10 @@ public:
/// Generation flags.
class Flags {
enum {
- F_IsForEH = 0x1,
+ F_IsForEH = 0x1,
F_IsNormalCleanupKind = 0x2,
- F_IsEHCleanupKind = 0x4
+ F_IsEHCleanupKind = 0x4,
+ F_HasExitSwitch = 0x8,
};
unsigned flags;
@@ -179,8 +175,10 @@ public:
/// cleanup.
bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; }
void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; }
- };
+ bool hasExitSwitch() const { return flags & F_HasExitSwitch; }
+ void setHasExitSwitch() { flags |= F_HasExitSwitch; }
+ };
/// Emit the cleanup. For normal cleanups, this is run in the
/// same EH context as when the cleanup was pushed, i.e. the
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
index b5b8702c551e..80de2a6e3950 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -203,7 +203,7 @@ public:
void EmitCXXConstructors(const CXXConstructorDecl *D) override;
- AddedStructorArgs
+ AddedStructorArgCounts
buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) override;
@@ -222,10 +222,17 @@ public:
void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
- AddedStructorArgs
- addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
- CXXCtorType Type, bool ForVirtualBase,
- bool Delegating, CallArgList &Args) override;
+ AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
+ const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ bool ForVirtualBase,
+ bool Delegating) override;
+
+ llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
+ const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ bool ForVirtualBase,
+ bool Delegating) override;
void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
@@ -516,6 +523,22 @@ private:
}
bool canCallMismatchedFunctionType() const override { return false; }
};
+
+class XLCXXABI final : public ItaniumCXXABI {
+public:
+ explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
+ : ItaniumCXXABI(CGM) {}
+
+ void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::FunctionCallee dtor,
+ llvm::Constant *addr) override;
+
+ bool useSinitAndSterm() const override { return true; }
+
+private:
+ void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
+ llvm::Constant *addr);
+};
}
CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
@@ -546,6 +569,9 @@ CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
case TargetCXXABI::WebAssembly:
return new WebAssemblyCXXABI(CGM);
+ case TargetCXXABI::XL:
+ return new XLCXXABI(CGM);
+
case TargetCXXABI::GenericItanium:
if (CGM.getContext().getTargetInfo().getTriple().getArch()
== llvm::Triple::le32) {
@@ -670,6 +696,10 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
CGM.HasHiddenLTOVisibility(RD);
bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
CGM.HasHiddenLTOVisibility(RD);
+ bool ShouldEmitWPDInfo =
+ CGM.getCodeGenOpts().WholeProgramVTables &&
+ // Don't insert type tests if we are forcing public std visibility.
+ !CGM.HasLTOVisibilityPublicStd(RD);
llvm::Value *VirtualFn = nullptr;
{
@@ -677,16 +707,17 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
llvm::Value *TypeId = nullptr;
llvm::Value *CheckResult = nullptr;
- if (ShouldEmitCFICheck || ShouldEmitVFEInfo) {
- // If doing CFI or VFE, we will need the metadata node to check against.
+ if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
+ // If doing CFI, VFE or WPD, we will need the metadata node to check
+ // against.
llvm::Metadata *MD =
CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
}
- llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
-
if (ShouldEmitVFEInfo) {
+ llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
+
// If doing VFE, load from the vtable with a type.checked.load intrinsic
// call. Note that we use the GEP to calculate the address to load from
// and pass 0 as the offset to the intrinsic. This is because every
@@ -702,18 +733,30 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
} else {
// When not doing VFE, emit a normal load, as it allows more
// optimisations than type.checked.load.
- if (ShouldEmitCFICheck) {
+ if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
+ llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
CheckResult = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::type_test),
{Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
}
- VFPAddr =
- Builder.CreateBitCast(VFPAddr, FTy->getPointerTo()->getPointerTo());
- VirtualFn = Builder.CreateAlignedLoad(VFPAddr, CGF.getPointerAlign(),
- "memptr.virtualfn");
+
+ if (CGM.getItaniumVTableContext().isRelativeLayout()) {
+ VirtualFn = CGF.Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::load_relative,
+ {VTableOffset->getType()}),
+ {VTable, VTableOffset});
+ VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
+ } else {
+ llvm::Value *VFPAddr = CGF.Builder.CreateGEP(VTable, VTableOffset);
+ VFPAddr = CGF.Builder.CreateBitCast(
+ VFPAddr, FTy->getPointerTo()->getPointerTo());
+ VirtualFn = CGF.Builder.CreateAlignedLoad(
+ VFPAddr, CGF.getPointerAlign(), "memptr.virtualfn");
+ }
}
assert(VirtualFn && "Virtual fuction pointer not created!");
- assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || CheckResult) &&
+ assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
+ CheckResult) &&
"Check result required but not created!");
if (ShouldEmitCFICheck) {
@@ -984,11 +1027,16 @@ llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
llvm::Constant *MemPtr[2];
if (MD->isVirtual()) {
uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
-
- const ASTContext &Context = getContext();
- CharUnits PointerWidth =
- Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
- uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
+ uint64_t VTableOffset;
+ if (CGM.getItaniumVTableContext().isRelativeLayout()) {
+ // Multiply by 4-byte relative offsets.
+ VTableOffset = Index * 4;
+ } else {
+ const ASTContext &Context = getContext();
+ CharUnits PointerWidth = Context.toCharUnitsFromBits(
+ Context.getTargetInfo().getPointerWidth(0));
+ VTableOffset = Index * PointerWidth.getQuantity();
+ }
if (UseARMMethodPtrABI) {
// ARM C++ ABI 3.2.1:
@@ -1402,8 +1450,19 @@ llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
llvm::Value *Value =
CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
- // Load the type info.
- Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
+ if (CGM.getItaniumVTableContext().isRelativeLayout()) {
+ // Load the type info.
+ Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
+ Value = CGF.Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
+ {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
+
+ // Setup to dereference again since this is a proxy we accessed.
+ Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
+ } else {
+ // Load the type info.
+ Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
+ }
return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
}
@@ -1459,28 +1518,37 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
Address ThisAddr,
QualType SrcRecordTy,
QualType DestTy) {
- llvm::Type *PtrDiffLTy =
- CGF.ConvertType(CGF.getContext().getPointerDiffType());
llvm::Type *DestLTy = CGF.ConvertType(DestTy);
-
auto *ClassDecl =
cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
- // Get the vtable pointer.
- llvm::Value *VTable = CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(),
- ClassDecl);
+ llvm::Value *OffsetToTop;
+ if (CGM.getItaniumVTableContext().isRelativeLayout()) {
+ // Get the vtable pointer.
+ llvm::Value *VTable =
+ CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
- // Get the offset-to-top from the vtable.
- llvm::Value *OffsetToTop =
- CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
- OffsetToTop =
- CGF.Builder.CreateAlignedLoad(OffsetToTop, CGF.getPointerAlign(),
- "offset.to.top");
+ // Get the offset-to-top from the vtable.
+ OffsetToTop =
+ CGF.Builder.CreateConstInBoundsGEP1_32(/*Type=*/nullptr, VTable, -2U);
+ OffsetToTop = CGF.Builder.CreateAlignedLoad(
+ OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
+ } else {
+ llvm::Type *PtrDiffLTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+ // Get the vtable pointer.
+ llvm::Value *VTable =
+ CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
+
+ // Get the offset-to-top from the vtable.
+ OffsetToTop = CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
+ OffsetToTop = CGF.Builder.CreateAlignedLoad(
+ OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
+ }
// Finally, add the offset to the pointer.
llvm::Value *Value = ThisAddr.getPointer();
Value = CGF.EmitCastToVoidPtr(Value);
Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
-
return CGF.Builder.CreateBitCast(Value, DestLTy);
}
@@ -1501,17 +1569,22 @@ ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
CharUnits VBaseOffsetOffset =
CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
BaseClassDecl);
-
llvm::Value *VBaseOffsetPtr =
CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
"vbase.offset.ptr");
- VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
- CGM.PtrDiffTy->getPointerTo());
-
- llvm::Value *VBaseOffset =
- CGF.Builder.CreateAlignedLoad(VBaseOffsetPtr, CGF.getPointerAlign(),
- "vbase.offset");
+ llvm::Value *VBaseOffset;
+ if (CGM.getItaniumVTableContext().isRelativeLayout()) {
+ VBaseOffsetPtr =
+ CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
+ VBaseOffset = CGF.Builder.CreateAlignedLoad(
+ VBaseOffsetPtr, CharUnits::fromQuantity(4), "vbase.offset");
+ } else {
+ VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
+ CGM.PtrDiffTy->getPointerTo());
+ VBaseOffset = CGF.Builder.CreateAlignedLoad(
+ VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
+ }
return VBaseOffset;
}
@@ -1531,7 +1604,7 @@ void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
}
}
-CGCXXABI::AddedStructorArgs
+CGCXXABI::AddedStructorArgCounts
ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) {
ASTContext &Context = getContext();
@@ -1545,9 +1618,9 @@ ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
ArgTys.insert(ArgTys.begin() + 1,
Context.getPointerType(Context.VoidPtrTy));
- return AddedStructorArgs::prefix(1);
+ return AddedStructorArgCounts::prefix(1);
}
- return AddedStructorArgs{};
+ return AddedStructorArgCounts{};
}
void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
@@ -1613,9 +1686,9 @@ void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
}
-CGCXXABI::AddedStructorArgs ItaniumCXXABI::addImplicitConstructorArgs(
+CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
- bool ForVirtualBase, bool Delegating, CallArgList &Args) {
+ bool ForVirtualBase, bool Delegating) {
if (!NeedsVTTParameter(GlobalDecl(D, Type)))
return AddedStructorArgs{};
@@ -1623,8 +1696,14 @@ CGCXXABI::AddedStructorArgs ItaniumCXXABI::addImplicitConstructorArgs(
llvm::Value *VTT =
CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
- Args.insert(Args.begin() + 1, CallArg(RValue::get(VTT), VTTTy));
- return AddedStructorArgs::prefix(1); // Added one arg.
+ return AddedStructorArgs::prefix({{VTT, VTTTy}});
+}
+
+llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
+ CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
+ bool ForVirtualBase, bool Delegating) {
+ GlobalDecl GD(DD, Type);
+ return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
}
void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
@@ -1633,7 +1712,8 @@ void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
bool Delegating, Address This,
QualType ThisTy) {
GlobalDecl GD(DD, Type);
- llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
+ llvm::Value *VTT =
+ getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
CGCallee Callee;
@@ -1660,10 +1740,11 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
// Create and set the initializer.
- ConstantInitBuilder Builder(CGM);
- auto Components = Builder.beginStruct();
- CGVT.createVTableInitializer(Components, VTLayout, RTTI);
- Components.finishAndSetAsInitializer(VTable);
+ ConstantInitBuilder builder(CGM);
+ auto components = builder.beginStruct();
+ CGVT.createVTableInitializer(components, VTLayout, RTTI,
+ llvm::GlobalValue::isLocalLinkage(Linkage));
+ components.finishAndSetAsInitializer(VTable);
// Set the correct linkage.
VTable->setLinkage(Linkage);
@@ -1687,6 +1768,9 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
if (!VTable->isDeclarationForLinker())
CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
+
+ if (VTContext.isRelativeLayout() && !VTable->isDSOLocal())
+ CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
}
bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
@@ -1776,7 +1860,9 @@ llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
// Use pointer alignment for the vtable. Otherwise we would align them based
// on the size of the initializer which doesn't make sense as only single
// values are read.
- unsigned PAlign = CGM.getTarget().getPointerAlign(0);
+ unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
+ ? 32
+ : CGM.getTarget().getPointerAlign(0);
VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
Name, VTableType, llvm::GlobalValue::ExternalLinkage,
@@ -1793,9 +1879,9 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
Address This,
llvm::Type *Ty,
SourceLocation Loc) {
- Ty = Ty->getPointerTo()->getPointerTo();
auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
- llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent());
+ llvm::Value *VTable = CGF.GetVTablePtr(
+ This, Ty->getPointerTo()->getPointerTo(), MethodDecl->getParent());
uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
llvm::Value *VFunc;
@@ -1806,10 +1892,21 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
} else {
CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
- llvm::Value *VFuncPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
- auto *VFuncLoad =
- CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
+ llvm::Value *VFuncLoad;
+ if (CGM.getItaniumVTableContext().isRelativeLayout()) {
+ VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
+ llvm::Value *Load = CGF.Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
+ {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
+ VFuncLoad = CGF.Builder.CreateBitCast(Load, Ty->getPointerTo());
+ } else {
+ VTable =
+ CGF.Builder.CreateBitCast(VTable, Ty->getPointerTo()->getPointerTo());
+ llvm::Value *VTableSlotPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
+ VFuncLoad =
+ CGF.Builder.CreateAlignedLoad(VTableSlotPtr, CGF.getPointerAlign());
+ }
// Add !invariant.load md to virtual function load to indicate that
// function didn't change inside vtable.
@@ -1818,11 +1915,14 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
// the same virtual function loads from the same vtable load, which won't
// happen without enabled devirtualization with -fstrict-vtable-pointers.
if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
- CGM.getCodeGenOpts().StrictVTablePointers)
- VFuncLoad->setMetadata(
- llvm::LLVMContext::MD_invariant_load,
- llvm::MDNode::get(CGM.getLLVMContext(),
- llvm::ArrayRef<llvm::Metadata *>()));
+ CGM.getCodeGenOpts().StrictVTablePointers) {
+ if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
+ VFuncLoadInstr->setMetadata(
+ llvm::LLVMContext::MD_invariant_load,
+ llvm::MDNode::get(CGM.getLLVMContext(),
+ llvm::ArrayRef<llvm::Metadata *>()));
+ }
+ }
VFunc = VFuncLoad;
}
@@ -1939,21 +2039,28 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
// Perform the virtual adjustment if we have one.
llvm::Value *ResultPtr;
if (VirtualAdjustment) {
- llvm::Type *PtrDiffTy =
- CGF.ConvertType(CGF.getContext().getPointerDiffType());
-
Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
+ llvm::Value *Offset;
llvm::Value *OffsetPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
+ if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
+ // Load the adjustment offset from the vtable as a 32-bit int.
+ OffsetPtr =
+ CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
+ Offset =
+ CGF.Builder.CreateAlignedLoad(OffsetPtr, CharUnits::fromQuantity(4));
+ } else {
+ llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
- OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
-
- // Load the adjustment offset from the vtable.
- llvm::Value *Offset =
- CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
+ OffsetPtr =
+ CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
+ // Load the adjustment offset from the vtable.
+ Offset = CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
+ }
// Adjust our pointer.
ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
} else {
@@ -2438,7 +2545,7 @@ void CodeGenModule::registerGlobalDtorsWithAtExit() {
std::string GlobalInitFnName =
std::string("__GLOBAL_init_") + llvm::to_string(Priority);
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
- llvm::Function *GlobalInitFn = CreateGlobalInitOrDestructFunction(
+ llvm::Function *GlobalInitFn = CreateGlobalInitOrCleanUpFunction(
FTy, GlobalInitFnName, getTypes().arrangeNullaryFunction(),
SourceLocation());
ASTContext &Ctx = getContext();
@@ -2592,14 +2699,15 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
- InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init", FI,
- SourceLocation(),
- /*TLS=*/true);
+ InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
+ SourceLocation(),
+ /*TLS=*/true);
llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
llvm::GlobalVariable::InternalLinkage,
llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
Guard->setThreadLocal(true);
+ Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
CharUnits GuardAlign = CharUnits::One();
Guard->setAlignment(GuardAlign.getAsAlign());
@@ -3008,6 +3116,7 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
case BuiltinType::SatUShortFract:
case BuiltinType::SatUFract:
case BuiltinType::SatULongFract:
+ case BuiltinType::BFloat16:
return false;
case BuiltinType::Dependent:
@@ -3200,9 +3309,11 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
llvm_unreachable("Pipe types shouldn't get here");
case Type::Builtin:
+ case Type::ExtInt:
// GCC treats vector and complex types as fundamental types.
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
case Type::Complex:
case Type::Atomic:
// FIXME: GCC treats block pointers as fundamental types?!
@@ -3277,17 +3388,32 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
break;
}
- llvm::Constant *VTable =
- CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
+ llvm::Constant *VTable = nullptr;
+
+ // Check if the alias exists. If it doesn't, then get or create the global.
+ if (CGM.getItaniumVTableContext().isRelativeLayout())
+ VTable = CGM.getModule().getNamedAlias(VTableName);
+ if (!VTable)
+ VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
+
CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
llvm::Type *PtrDiffTy =
- CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+ CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
// The vtable address point is 2.
- llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
- VTable =
- llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two);
+ if (CGM.getItaniumVTableContext().isRelativeLayout()) {
+ // The vtable address point is 8 bytes after its start:
+ // 4 for the offset to top + 4 for the relative offset to rtti.
+ llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
+ VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
+ VTable =
+ llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
+ } else {
+ llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
+ VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable,
+ Two);
+ }
VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
Fields.push_back(VTable);
@@ -3438,6 +3564,7 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
case Type::Builtin:
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
case Type::Complex:
case Type::BlockPointer:
// Itanium C++ ABI 2.9.5p4:
@@ -3453,7 +3580,10 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
llvm_unreachable("Undeduced type shouldn't get here");
case Type::Pipe:
- llvm_unreachable("Pipe type shouldn't get here");
+ break;
+
+ case Type::ExtInt:
+ break;
case Type::ConstantArray:
case Type::IncompleteArray:
@@ -4401,3 +4531,70 @@ void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
ItaniumCXXABI::emitBeginCatch(CGF, C);
}
+
+/// Register a global destructor as best as we know how.
+void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::FunctionCallee dtor,
+ llvm::Constant *addr) {
+ if (D.getTLSKind() != VarDecl::TLS_None)
+ llvm::report_fatal_error("thread local storage not yet implemented on AIX");
+
+ // Create __dtor function for the var decl.
+ llvm::Function *dtorStub = CGF.createAtExitStub(D, dtor, addr);
+
+ // Register above __dtor with atexit().
+ CGF.registerGlobalDtorWithAtExit(dtorStub);
+
+ // Emit __finalize function to unregister __dtor and (as appropriate) call
+ // __dtor.
+ emitCXXStermFinalizer(D, dtorStub, addr);
+}
+
+void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
+ llvm::Constant *addr) {
+ llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
+ SmallString<256> FnName;
+ {
+ llvm::raw_svector_ostream Out(FnName);
+ getMangleContext().mangleDynamicStermFinalizer(&D, Out);
+ }
+
+ // Create the finalization action associated with a variable.
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
+ llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
+ FTy, FnName.str(), FI, D.getLocation());
+
+ CodeGenFunction CGF(CGM);
+
+ CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
+ FunctionArgList());
+
+ // The unatexit subroutine unregisters __dtor functions that were previously
+ // registered by the atexit subroutine. If the referenced function is found,
+ // the unatexit returns a value of 0, meaning that the cleanup is still
+ // pending (and we should call the __dtor function).
+ llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
+
+ llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
+
+ llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
+
+ // Check if unatexit returns a value of 0. If it does, jump to
+ // DestructCallBlock, otherwise jump to EndBlock directly.
+ CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
+
+ CGF.EmitBlock(DestructCallBlock);
+
+ // Emit the call to dtorStub.
+ llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
+
+ // Make sure the call and the callee agree on calling convention.
+ CI->setCallingConv(dtorStub->getCallingConv());
+
+ CGF.EmitBlock(EndBlock);
+
+ CGF.FinishFunction();
+
+ CGM.AddCXXStermFinalizerEntry(StermFinalizer);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index aff46135705a..45c6cb6b2e0d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -206,7 +206,7 @@ public:
// lacks a definition for the destructor, non-base destructors must always
// delegate to or alias the base destructor.
- AddedStructorArgs
+ AddedStructorArgCounts
buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) override;
@@ -253,10 +253,17 @@ public:
void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
- AddedStructorArgs
- addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
- CXXCtorType Type, bool ForVirtualBase,
- bool Delegating, CallArgList &Args) override;
+ AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
+ const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ bool ForVirtualBase,
+ bool Delegating) override;
+
+ llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
+ const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ bool ForVirtualBase,
+ bool Delegating) override;
void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
@@ -1261,10 +1268,10 @@ void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
}
}
-CGCXXABI::AddedStructorArgs
+CGCXXABI::AddedStructorArgCounts
MicrosoftCXXABI::buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) {
- AddedStructorArgs Added;
+ AddedStructorArgCounts Added;
// TODO: 'for base' flag
if (isa<CXXDestructorDecl>(GD.getDecl()) &&
GD.getDtorType() == Dtor_Deleting) {
@@ -1553,9 +1560,9 @@ void MicrosoftCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
}
}
-CGCXXABI::AddedStructorArgs MicrosoftCXXABI::addImplicitConstructorArgs(
+CGCXXABI::AddedStructorArgs MicrosoftCXXABI::getImplicitConstructorArgs(
CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
- bool ForVirtualBase, bool Delegating, CallArgList &Args) {
+ bool ForVirtualBase, bool Delegating) {
assert(Type == Ctor_Complete || Type == Ctor_Base);
// Check if we need a 'most_derived' parameter.
@@ -1570,13 +1577,16 @@ CGCXXABI::AddedStructorArgs MicrosoftCXXABI::addImplicitConstructorArgs(
} else {
MostDerivedArg = llvm::ConstantInt::get(CGM.Int32Ty, Type == Ctor_Complete);
}
- RValue RV = RValue::get(MostDerivedArg);
if (FPT->isVariadic()) {
- Args.insert(Args.begin() + 1, CallArg(RV, getContext().IntTy));
- return AddedStructorArgs::prefix(1);
+ return AddedStructorArgs::prefix({{MostDerivedArg, getContext().IntTy}});
}
- Args.add(RV, getContext().IntTy);
- return AddedStructorArgs::suffix(1);
+ return AddedStructorArgs::suffix({{MostDerivedArg, getContext().IntTy}});
+}
+
+llvm::Value *MicrosoftCXXABI::getCXXDestructorImplicitParam(
+ CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
+ bool ForVirtualBase, bool Delegating) {
+ return nullptr;
}
void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
@@ -1605,8 +1615,11 @@ void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
BaseDtorEndBB = EmitDtorCompleteObjectHandler(CGF);
}
+ llvm::Value *Implicit =
+ getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase,
+ Delegating); // = nullptr
CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy,
- /*ImplicitParam=*/nullptr,
+ /*ImplicitParam=*/Implicit,
/*ImplicitParamTy=*/QualType(), nullptr);
if (BaseDtorEndBB) {
// Complete object handler should continue to be the remaining
@@ -1621,6 +1634,15 @@ void MicrosoftCXXABI::emitVTableTypeMetadata(const VPtrInfo &Info,
if (!CGM.getCodeGenOpts().LTOUnit)
return;
+ // TODO: Should VirtualFunctionElimination also be supported here?
+ // See similar handling in CodeGenModule::EmitVTableTypeMetadata.
+ if (CGM.getCodeGenOpts().WholeProgramVTables) {
+ llvm::GlobalObject::VCallVisibility TypeVis =
+ CGM.GetVCallVisibilityLevel(RD);
+ if (TypeVis != llvm::GlobalObject::VCallVisibilityPublic)
+ VTable->setVCallVisibilityMetadata(TypeVis);
+ }
+
// The location of the first virtual function pointer in the virtual table,
// aka the "address point" on Itanium. This is at offset 0 if RTTI is
// disabled, or sizeof(void*) if RTTI is enabled.
@@ -1681,10 +1703,11 @@ void MicrosoftCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
[](const VTableComponent &VTC) { return VTC.isRTTIKind(); }))
RTTI = getMSCompleteObjectLocator(RD, *Info);
- ConstantInitBuilder Builder(CGM);
- auto Components = Builder.beginStruct();
- CGVT.createVTableInitializer(Components, VTLayout, RTTI);
- Components.finishAndSetAsInitializer(VTable);
+ ConstantInitBuilder builder(CGM);
+ auto components = builder.beginStruct();
+ CGVT.createVTableInitializer(components, VTLayout, RTTI,
+ VTable->hasLocalLinkage());
+ components.finishAndSetAsInitializer(VTable);
emitVTableTypeMetadata(*Info, RD, VTable);
}
@@ -2341,7 +2364,7 @@ void MicrosoftCXXABI::EmitThreadLocalInitFuncs(
if (!NonComdatInits.empty()) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
- llvm::Function *InitFunc = CGM.CreateGlobalInitOrDestructFunction(
+ llvm::Function *InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(
FTy, "__tls_init", CGM.getTypes().arrangeNullaryFunction(),
SourceLocation(), /*TLS=*/true);
CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(InitFunc, NonComdatInits);
@@ -2515,7 +2538,7 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
GuardVar->setComdat(
CGM.getModule().getOrInsertComdat(GuardVar->getName()));
if (D.getTLSKind())
- GuardVar->setThreadLocal(true);
+ CGM.setTLSMode(GuardVar, D);
if (GI && !HasPerVariableGuard)
GI->Guard = GuardVar;
}
@@ -3913,7 +3936,7 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
// Calculate the mangled name.
SmallString<256> ThunkName;
llvm::raw_svector_ostream Out(ThunkName);
- getMangleContext().mangleCXXCtor(CD, CT, Out);
+ getMangleContext().mangleName(GlobalDecl(CD, CT), Out);
// If the thunk has been generated previously, just return it.
if (llvm::GlobalValue *GV = CGM.getModule().getNamedValue(ThunkName))
@@ -4000,7 +4023,7 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
CGF.EmitCallArgs(Args, FPT, llvm::makeArrayRef(ArgVec), CD, IsCopy ? 1 : 0);
// Insert any ABI-specific implicit constructor arguments.
- AddedStructorArgs ExtraArgs =
+ AddedStructorArgCounts ExtraArgs =
addImplicitConstructorArgs(CGF, CD, Ctor_Complete,
/*ForVirtualBase=*/false,
/*Delegating=*/false, Args);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
index 284e8022a3c4..0c7e5f4598f8 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -173,8 +173,8 @@ public:
// Prepare CGDebugInfo to emit debug info for a clang module.
auto *DI = Builder->getModuleDebugInfo();
StringRef ModuleName = llvm::sys::path::filename(MainFileName);
- DI->setPCHDescriptor({ModuleName, "", OutputFileName,
- ASTFileSignature{{{~0U, ~0U, ~0U, ~0U, ~1U}}}});
+ DI->setPCHDescriptor(
+ {ModuleName, "", OutputFileName, ASTFileSignature::createDISentinel()});
DI->setModuleMap(MMap);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/PatternInit.cpp b/contrib/llvm-project/clang/lib/CodeGen/PatternInit.cpp
index 3410c7f21533..26ac8b63a9ba 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/PatternInit.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/PatternInit.cpp
@@ -8,6 +8,7 @@
#include "PatternInit.h"
#include "CodeGenModule.h"
+#include "clang/Basic/TargetInfo.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Type.h"
@@ -33,17 +34,15 @@ llvm::Constant *clang::CodeGen::initializationPatternFor(CodeGenModule &CGM,
constexpr bool NegativeNaN = true;
constexpr uint64_t NaNPayload = 0xFFFFFFFFFFFFFFFFull;
if (Ty->isIntOrIntVectorTy()) {
- unsigned BitWidth = cast<llvm::IntegerType>(
- Ty->isVectorTy() ? Ty->getVectorElementType() : Ty)
- ->getBitWidth();
+ unsigned BitWidth =
+ cast<llvm::IntegerType>(Ty->getScalarType())->getBitWidth();
if (BitWidth <= 64)
return llvm::ConstantInt::get(Ty, IntValue);
return llvm::ConstantInt::get(
Ty, llvm::APInt::getSplat(BitWidth, llvm::APInt(64, IntValue)));
}
if (Ty->isPtrOrPtrVectorTy()) {
- auto *PtrTy = cast<llvm::PointerType>(
- Ty->isVectorTy() ? Ty->getVectorElementType() : Ty);
+ auto *PtrTy = cast<llvm::PointerType>(Ty->getScalarType());
unsigned PtrWidth = CGM.getContext().getTargetInfo().getPointerWidth(
PtrTy->getAddressSpace());
if (PtrWidth > 64)
@@ -54,8 +53,7 @@ llvm::Constant *clang::CodeGen::initializationPatternFor(CodeGenModule &CGM,
}
if (Ty->isFPOrFPVectorTy()) {
unsigned BitWidth = llvm::APFloat::semanticsSizeInBits(
- (Ty->isVectorTy() ? Ty->getVectorElementType() : Ty)
- ->getFltSemantics());
+ Ty->getScalarType()->getFltSemantics());
llvm::APInt Payload(64, NaNPayload);
if (BitWidth >= 64)
Payload = llvm::APInt::getSplat(BitWidth, Payload);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp
index 24ae6c6e362f..cdf83370c41f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp
@@ -13,6 +13,7 @@
#include "CodeGenModule.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Type.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Constants.h"
@@ -30,16 +31,16 @@ static bool isAsanHwasanOrMemTag(const SanitizerSet& SS) {
void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
SourceLocation Loc, StringRef Name,
QualType Ty, bool IsDynInit,
- bool IsBlacklisted) {
+ bool IsExcluded) {
if (!isAsanHwasanOrMemTag(CGM.getLangOpts().Sanitize))
return;
IsDynInit &= !CGM.isInSanitizerBlacklist(GV, Loc, Ty, "init");
- IsBlacklisted |= CGM.isInSanitizerBlacklist(GV, Loc, Ty);
+ IsExcluded |= CGM.isInSanitizerBlacklist(GV, Loc, Ty);
llvm::Metadata *LocDescr = nullptr;
llvm::Metadata *GlobalName = nullptr;
llvm::LLVMContext &VMContext = CGM.getLLVMContext();
- if (!IsBlacklisted) {
+ if (!IsExcluded) {
// Don't generate source location and global name if it is blacklisted -
// it won't be instrumented anyway.
LocDescr = getLocationMetadata(Loc);
@@ -52,7 +53,7 @@ void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
llvm::ConstantAsMetadata::get(
llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), IsDynInit)),
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- llvm::Type::getInt1Ty(VMContext), IsBlacklisted))};
+ llvm::Type::getInt1Ty(VMContext), IsExcluded))};
llvm::MDNode *ThisGlobal = llvm::MDNode::get(VMContext, GlobalMetadata);
llvm::NamedMDNode *AsanGlobals =
@@ -68,12 +69,12 @@ void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
llvm::raw_string_ostream OS(QualName);
D.printQualifiedName(OS);
- bool IsBlacklisted = false;
+ bool IsExcluded = false;
for (auto Attr : D.specific_attrs<NoSanitizeAttr>())
if (Attr->getMask() & SanitizerKind::Address)
- IsBlacklisted = true;
+ IsExcluded = true;
reportGlobalToASan(GV, D.getLocation(), OS.str(), D.getType(), IsDynInit,
- IsBlacklisted);
+ IsExcluded);
}
void SanitizerMetadata::disableSanitizerForGlobal(llvm::GlobalVariable *GV) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h
index 7ffac4360d9c..440a54590acc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h
@@ -40,7 +40,7 @@ public:
bool IsDynInit = false);
void reportGlobalToASan(llvm::GlobalVariable *GV, SourceLocation Loc,
StringRef Name, QualType Ty, bool IsDynInit = false,
- bool IsBlacklisted = false);
+ bool IsExcluded = false);
void disableSanitizerForGlobal(llvm::GlobalVariable *GV);
void disableSanitizerForInstruction(llvm::Instruction *I);
private:
diff --git a/contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp b/contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp
index 8bce93b71c0c..3d7421ac2e16 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp
@@ -694,7 +694,7 @@ swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
// Try to split the vector type in half.
if (numElts >= 4 && isPowerOf2(numElts)) {
if (isLegalVectorType(CGM, vectorSize / 2, eltTy, numElts / 2))
- return {llvm::VectorType::get(eltTy, numElts / 2), 2};
+ return {llvm::FixedVectorType::get(eltTy, numElts / 2), 2};
}
return {eltTy, numElts};
@@ -747,7 +747,8 @@ void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize,
// Add the right number of vectors of this size.
auto numVecs = numElts >> logCandidateNumElts;
- components.append(numVecs, llvm::VectorType::get(eltTy, candidateNumElts));
+ components.append(numVecs,
+ llvm::FixedVectorType::get(eltTy, candidateNumElts));
numElts -= (numVecs << logCandidateNumElts);
if (numElts == 0) return;
@@ -757,7 +758,7 @@ void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize,
// This only needs to be separately checked if it's not a power of 2.
if (numElts > 2 && !isPowerOf2(numElts) &&
isLegalVectorType(CGM, eltSize * numElts, eltTy, numElts)) {
- components.push_back(llvm::VectorType::get(eltTy, numElts));
+ components.push_back(llvm::FixedVectorType::get(eltTy, numElts));
return;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
index 12e17ac751b4..9cd63ebe29ee 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/DiagnosticFrontend.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/CodeGen/SwiftCallingConv.h"
#include "llvm/ADT/SmallBitVector.h"
@@ -28,6 +29,7 @@
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm> // std::sort
@@ -96,6 +98,17 @@ Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
return Address::invalid();
}
+bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
+ if (Ty->isPromotableIntegerType())
+ return true;
+
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
+ return true;
+
+ return false;
+}
+
ABIInfo::~ABIInfo() {}
/// Does the given lowering require more than the given number of
@@ -384,7 +397,7 @@ static Address emitMergePHI(CodeGenFunction &CGF,
return Address(PHI, Align);
}
-TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
+TargetCodeGenInfo::~TargetCodeGenInfo() = default;
// If someone can figure out a general rule for this, that would be great.
// It's probably just doomed to be platform-dependent, though.
@@ -486,11 +499,15 @@ static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
// Constant arrays of empty records count as empty, strip them off.
// Constant arrays of zero length always count as empty.
+ bool WasArray = false;
if (AllowArrays)
while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
if (AT->getSize() == 0)
return true;
FT = AT->getElementType();
+ // The [[no_unique_address]] special case below does not apply to
+ // arrays of C++ empty records, so we need to remember this fact.
+ WasArray = true;
}
const RecordType *RT = FT->getAs<RecordType>();
@@ -501,7 +518,14 @@ static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
//
// FIXME: We should use a predicate for whether this behavior is true in the
// current ABI.
- if (isa<CXXRecordDecl>(RT->getDecl()))
+ //
+ // The exception to the above rule are fields marked with the
+ // [[no_unique_address]] attribute (since C++20). Those do count as empty
+ // according to the Itanium ABI. The exception applies only to records,
+ // not arrays of records, so we must also check whether we stripped off an
+ // array type above.
+ if (isa<CXXRecordDecl>(RT->getDecl()) &&
+ (WasArray || !FD->hasAttr<NoUniqueAddressAttr>()))
return false;
return isEmptyRecord(Context, FT, AllowArrays);
@@ -681,7 +705,7 @@ public:
class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
public:
DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
};
ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
@@ -700,8 +724,16 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
+ ASTContext &Context = getContext();
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() >
+ Context.getTypeSize(Context.getTargetInfo().hasInt128Type()
+ ? Context.Int128Ty
+ : Context.LongLongTy))
+ return getNaturalAlignIndirect(Ty);
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
@@ -715,8 +747,15 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
+ if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (EIT->getNumBits() >
+ getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
+ ? getContext().Int128Ty
+ : getContext().LongLongTy))
+ return getNaturalAlignIndirect(RetTy);
+
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
//===----------------------------------------------------------------------===//
@@ -726,11 +765,19 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
//===----------------------------------------------------------------------===//
class WebAssemblyABIInfo final : public SwiftABIInfo {
+public:
+ enum ABIKind {
+ MVP = 0,
+ ExperimentalMV = 1,
+ };
+
+private:
DefaultABIInfo defaultInfo;
+ ABIKind Kind;
public:
- explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT)
- : SwiftABIInfo(CGT), defaultInfo(CGT) {}
+ explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind)
+ : SwiftABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {}
private:
ABIArgInfo classifyReturnType(QualType RetTy) const;
@@ -761,8 +808,9 @@ private:
class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
public:
- explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {}
+ explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
+ WebAssemblyABIInfo::ABIKind K)
+ : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override {
@@ -813,6 +861,20 @@ ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
// though watch out for things like bitfields.
if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+ // For the experimental multivalue ABI, fully expand all other aggregates
+ if (Kind == ABIKind::ExperimentalMV) {
+ const RecordType *RT = Ty->getAs<RecordType>();
+ assert(RT);
+ bool HasBitField = false;
+ for (auto *Field : RT->getDecl()->fields()) {
+ if (Field->isBitField()) {
+ HasBitField = true;
+ break;
+ }
+ }
+ if (!HasBitField)
+ return ABIArgInfo::getExpand();
+ }
}
// Otherwise just do the default thing.
@@ -832,6 +894,9 @@ ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
// ABIArgInfo::getDirect().
if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+ // For the experimental multivalue ABI, return all other aggregates
+ if (Kind == ABIKind::ExperimentalMV)
+ return ABIArgInfo::getDirect();
}
}
@@ -871,8 +936,8 @@ class PNaClABIInfo : public ABIInfo {
class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
public:
- PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
+ PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {}
};
void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
@@ -906,10 +971,15 @@ ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
} else if (Ty->isFloatingType()) {
// Floating-point types don't go inreg.
return ABIArgInfo::getDirect();
+ } else if (const auto *EIT = Ty->getAs<ExtIntType>()) {
+ // Treat extended integers as integers if <=64, otherwise pass indirectly.
+ if (EIT->getNumBits() > 64)
+ return getNaturalAlignIndirect(Ty);
+ return ABIArgInfo::getDirect();
}
- return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
@@ -920,12 +990,19 @@ ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
if (isAggregateTypeForABI(RetTy))
return getNaturalAlignIndirect(RetTy);
+ // Treat extended integers as integers if <=64, otherwise pass indirectly.
+ if (const auto *EIT = RetTy->getAs<ExtIntType>()) {
+ if (EIT->getNumBits() > 64)
+ return getNaturalAlignIndirect(RetTy);
+ return ABIArgInfo::getDirect();
+ }
+
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
/// IsX86_MMXType - Return true if this is an MMX type.
@@ -943,7 +1020,8 @@ static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
.Cases("y", "&y", "^Ym", true)
.Default(false);
if (IsMMXCons && Ty->isVectorTy()) {
- if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
+ if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedSize() !=
+ 64) {
// Invalid MMX constraint
return nullptr;
}
@@ -1112,7 +1190,7 @@ public:
X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
bool RetSmallStructInRegABI, bool Win32StructABI,
unsigned NumRegisterParameters, bool SoftFloatABI)
- : TargetCodeGenInfo(new X86_32ABIInfo(
+ : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
NumRegisterParameters, SoftFloatABI)) {}
@@ -1412,8 +1490,8 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
// registers and we need to make sure to pick a type the LLVM
// backend will like.
if (Size == 128)
- return ABIArgInfo::getDirect(llvm::VectorType::get(
- llvm::Type::getInt64Ty(getVMContext()), 2));
+ return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
+ llvm::Type::getInt64Ty(getVMContext()), 2));
// Always return in register if it fits in a general purpose
// register, or if it is 64 bits and has a single element.
@@ -1470,15 +1548,19 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
+ if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 64)
+ return getIndirectReturnResult(RetTy, State);
+
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
-static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
+static bool isSIMDVectorType(ASTContext &Context, QualType Ty) {
return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
}
-static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
+static bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) {
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT)
return 0;
@@ -1487,16 +1569,16 @@ static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
for (const auto &I : CXXRD->bases())
- if (!isRecordWithSSEVectorType(Context, I.getType()))
+ if (!isRecordWithSIMDVectorType(Context, I.getType()))
return false;
for (const auto *i : RD->fields()) {
QualType FT = i->getType();
- if (isSSEVectorType(Context, FT))
+ if (isSIMDVectorType(Context, FT))
return true;
- if (isRecordWithSSEVectorType(Context, FT))
+ if (isRecordWithSIMDVectorType(Context, FT))
return true;
}
@@ -1517,8 +1599,8 @@ unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
}
// Otherwise, if the type contains an SSE vector type, the alignment is 16.
- if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
- isRecordWithSSEVectorType(getContext(), Ty)))
+ if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) ||
+ isRecordWithSIMDVectorType(getContext(), Ty)))
return 16;
return MinABIStackAlignInBytes;
@@ -1661,7 +1743,7 @@ void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) c
isHomogeneousAggregate(Ty, Base, NumElts)) {
if (State.FreeSSERegs >= NumElts) {
State.FreeSSERegs -= NumElts;
- Args[I].info = ABIArgInfo::getDirect();
+ Args[I].info = ABIArgInfo::getDirectInReg();
State.IsPreassigned.set(I);
}
}
@@ -1676,6 +1758,7 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
Ty = useFirstFieldIfTransparentUnion(Ty);
+ TypeInfo TI = getContext().getTypeInfo(Ty);
// Check with the C++ ABI first.
const RecordType *RT = Ty->getAs<RecordType>();
@@ -1725,7 +1808,7 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
bool NeedsPadding = false;
bool InReg;
if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
- unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ unsigned SizeInRegs = (TI.Width + 31) / 32;
SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
if (InReg)
@@ -1735,14 +1818,19 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
}
llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
+ // Pass over-aligned aggregates on Windows indirectly. This behavior was
+ // added in MSVC 2015.
+ if (IsWin32StructABI && TI.AlignIsRequired && TI.Align > 32)
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+
// Expand small (<= 128-bit) record types when we know that the stack layout
// of those arguments will match the struct. This is important because the
// LLVM backend isn't smart enough to remove byval, which inhibits many
// optimizations.
// Don't do this for the MCU if there are still free integer registers
// (see X86_64 ABI for full explanation).
- if (getContext().getTypeSize(Ty) <= 4 * 32 &&
- (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
+ if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
+ canExpandIndirectArgument(Ty))
return ABIArgInfo::getExpandWithPadding(
IsFastCall || IsVectorCall || IsRegCall, PaddingType);
@@ -1750,14 +1838,24 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
}
if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // On Windows, vectors are passed directly if registers are available, or
+ // indirectly if not. This avoids the need to align argument memory. Pass
+ // user-defined vector types larger than 512 bits indirectly for simplicity.
+ if (IsWin32StructABI) {
+ if (TI.Width <= 512 && State.FreeSSERegs > 0) {
+ --State.FreeSSERegs;
+ return ABIArgInfo::getDirectInReg();
+ }
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+ }
+
// On Darwin, some vectors are passed in memory, we handle this by passing
// it as an i8/i16/i32/i64.
if (IsDarwinVectorABI) {
- uint64_t Size = getContext().getTypeSize(Ty);
- if ((Size == 8 || Size == 16 || Size == 32) ||
- (Size == 64 && VT->getNumElements() == 1))
- return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
- Size));
+ if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) ||
+ (TI.Width == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), TI.Width));
}
if (IsX86_MMXType(CGT.ConvertType(Ty)))
@@ -1772,12 +1870,21 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
bool InReg = shouldPrimitiveUseInReg(Ty, State);
- if (Ty->isPromotableIntegerType()) {
+ if (isPromotableIntegerTypeForABI(Ty)) {
if (InReg)
return ABIArgInfo::getExtendInReg(Ty);
return ABIArgInfo::getExtend(Ty);
}
+ if (const auto * EIT = Ty->getAs<ExtIntType>()) {
+ if (EIT->getNumBits() <= 64) {
+ if (InReg)
+ return ABIArgInfo::getDirectInReg();
+ return ABIArgInfo::getDirect();
+ }
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+ }
+
if (InReg)
return ABIArgInfo::getDirectInReg();
return ABIArgInfo::getDirect();
@@ -1787,9 +1894,10 @@ void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
CCState State(FI);
if (IsMCUABI)
State.FreeRegs = 3;
- else if (State.CC == llvm::CallingConv::X86_FastCall)
+ else if (State.CC == llvm::CallingConv::X86_FastCall) {
State.FreeRegs = 2;
- else if (State.CC == llvm::CallingConv::X86_VectorCall) {
+ State.FreeSSERegs = 3;
+ } else if (State.CC == llvm::CallingConv::X86_VectorCall) {
State.FreeRegs = 2;
State.FreeSSERegs = 6;
} else if (FI.getHasRegParm())
@@ -1797,6 +1905,11 @@ void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
else if (State.CC == llvm::CallingConv::X86_RegCall) {
State.FreeRegs = 5;
State.FreeSSERegs = 8;
+ } else if (IsWin32StructABI) {
+ // Since MSVC 2015, the first three SSE vectors have been passed in
+ // registers. The rest are passed indirectly.
+ State.FreeRegs = DefaultNumRegisterParameters;
+ State.FreeSSERegs = 3;
} else
State.FreeRegs = DefaultNumRegisterParameters;
@@ -1843,16 +1956,25 @@ X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
CharUnits &StackOffset, ABIArgInfo &Info,
QualType Type) const {
// Arguments are always 4-byte-aligned.
- CharUnits FieldAlign = CharUnits::fromQuantity(4);
+ CharUnits WordSize = CharUnits::fromQuantity(4);
+ assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct");
- assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct");
- Info = ABIArgInfo::getInAlloca(FrameFields.size());
- FrameFields.push_back(CGT.ConvertTypeForMem(Type));
- StackOffset += getContext().getTypeSizeInChars(Type);
+ // sret pointers and indirect things will require an extra pointer
+ // indirection, unless they are byval. Most things are byval, and will not
+ // require this indirection.
+ bool IsIndirect = false;
+ if (Info.isIndirect() && !Info.getIndirectByVal())
+ IsIndirect = true;
+ Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect);
+ llvm::Type *LLTy = CGT.ConvertTypeForMem(Type);
+ if (IsIndirect)
+ LLTy = LLTy->getPointerTo(0);
+ FrameFields.push_back(LLTy);
+ StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type);
// Insert padding bytes to respect alignment.
CharUnits FieldEnd = StackOffset;
- StackOffset = FieldEnd.alignTo(FieldAlign);
+ StackOffset = FieldEnd.alignTo(WordSize);
if (StackOffset != FieldEnd) {
CharUnits NumBytes = StackOffset - FieldEnd;
llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
@@ -1866,16 +1988,12 @@ static bool isArgInAlloca(const ABIArgInfo &Info) {
switch (Info.getKind()) {
case ABIArgInfo::InAlloca:
return true;
- case ABIArgInfo::Indirect:
- assert(Info.getIndirectByVal());
- return true;
case ABIArgInfo::Ignore:
return false;
+ case ABIArgInfo::Indirect:
case ABIArgInfo::Direct:
case ABIArgInfo::Extend:
- if (Info.getInReg())
- return false;
- return true;
+ return !Info.getInReg();
case ABIArgInfo::Expand:
case ABIArgInfo::CoerceAndExpand:
// These are aggregate types which are never passed in registers when
@@ -1909,8 +2027,7 @@ void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
// Put the sret parameter into the inalloca struct if it's in memory.
if (Ret.isIndirect() && !Ret.getInReg()) {
- CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
- addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
+ addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType());
// On Windows, the hidden sret parameter is always returned in eax.
Ret.setInAllocaSRet(IsWin32StructABI);
}
@@ -2207,7 +2324,7 @@ public:
if (info.isDirect()) {
llvm::Type *ty = info.getCoerceToType();
if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
- return (vectorTy->getBitWidth() > 128);
+ return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128;
}
return false;
}
@@ -2280,7 +2397,7 @@ private:
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
- : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {}
+ : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {}
const X86_64ABIInfo &getABIInfo() const {
return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
@@ -2361,8 +2478,110 @@ public:
}
}
}
+
+ void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
+ const FunctionDecl *Caller,
+ const FunctionDecl *Callee,
+ const CallArgList &Args) const override;
};
+static void initFeatureMaps(const ASTContext &Ctx,
+ llvm::StringMap<bool> &CallerMap,
+ const FunctionDecl *Caller,
+ llvm::StringMap<bool> &CalleeMap,
+ const FunctionDecl *Callee) {
+ if (CalleeMap.empty() && CallerMap.empty()) {
+ // The caller is potentially nullptr in the case where the call isn't in a
+ // function. In this case, the getFunctionFeatureMap ensures we just get
+ // the TU level setting (since it cannot be modified by 'target'..
+ Ctx.getFunctionFeatureMap(CallerMap, Caller);
+ Ctx.getFunctionFeatureMap(CalleeMap, Callee);
+ }
+}
+
+static bool checkAVXParamFeature(DiagnosticsEngine &Diag,
+ SourceLocation CallLoc,
+ const llvm::StringMap<bool> &CallerMap,
+ const llvm::StringMap<bool> &CalleeMap,
+ QualType Ty, StringRef Feature,
+ bool IsArgument) {
+ bool CallerHasFeat = CallerMap.lookup(Feature);
+ bool CalleeHasFeat = CalleeMap.lookup(Feature);
+ if (!CallerHasFeat && !CalleeHasFeat)
+ return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
+ << IsArgument << Ty << Feature;
+
+ // Mixing calling conventions here is very clearly an error.
+ if (!CallerHasFeat || !CalleeHasFeat)
+ return Diag.Report(CallLoc, diag::err_avx_calling_convention)
+ << IsArgument << Ty << Feature;
+
+ // Else, both caller and callee have the required feature, so there is no need
+ // to diagnose.
+ return false;
+}
+
+static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx,
+ SourceLocation CallLoc,
+ const llvm::StringMap<bool> &CallerMap,
+ const llvm::StringMap<bool> &CalleeMap, QualType Ty,
+ bool IsArgument) {
+ uint64_t Size = Ctx.getTypeSize(Ty);
+ if (Size > 256)
+ return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty,
+ "avx512f", IsArgument);
+
+ if (Size > 128)
+ return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx",
+ IsArgument);
+
+ return false;
+}
+
+void X86_64TargetCodeGenInfo::checkFunctionCallABI(
+ CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
+ const FunctionDecl *Callee, const CallArgList &Args) const {
+ llvm::StringMap<bool> CallerMap;
+ llvm::StringMap<bool> CalleeMap;
+ unsigned ArgIndex = 0;
+
+ // We need to loop through the actual call arguments rather than the the
+ // function's parameters, in case this variadic.
+ for (const CallArg &Arg : Args) {
+ // The "avx" feature changes how vectors >128 in size are passed. "avx512f"
+ // additionally changes how vectors >256 in size are passed. Like GCC, we
+ // warn when a function is called with an argument where this will change.
+ // Unlike GCC, we also error when it is an obvious ABI mismatch, that is,
+ // the caller and callee features are mismatched.
+ // Unfortunately, we cannot do this diagnostic in SEMA, since the callee can
+ // change its ABI with attribute-target after this call.
+ if (Arg.getType()->isVectorType() &&
+ CGM.getContext().getTypeSize(Arg.getType()) > 128) {
+ initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
+ QualType Ty = Arg.getType();
+ // The CallArg seems to have desugared the type already, so for clearer
+ // diagnostics, replace it with the type in the FunctionDecl if possible.
+ if (ArgIndex < Callee->getNumParams())
+ Ty = Callee->getParamDecl(ArgIndex)->getType();
+
+ if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
+ CalleeMap, Ty, /*IsArgument*/ true))
+ return;
+ }
+ ++ArgIndex;
+ }
+
+ // Check return always, as we don't have a good way of knowing in codegen
+ // whether this value is used, tail-called, etc.
+ if (Callee->getReturnType()->isVectorType() &&
+ CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) {
+ initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
+ checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
+ CalleeMap, Callee->getReturnType(),
+ /*IsArgument*/ false);
+ }
+}
+
static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
// If the argument does not end in .lib, automatically add the suffix.
// If the argument contains a space, enclose it in quotes.
@@ -2424,7 +2643,7 @@ class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
X86AVXABILevel AVXLevel)
- : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT, AVXLevel)) {}
+ : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override;
@@ -2731,6 +2950,15 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
return;
}
+ if (const auto *EITy = Ty->getAs<ExtIntType>()) {
+ if (EITy->getNumBits() <= 64)
+ Current = Integer;
+ else if (EITy->getNumBits() <= 128)
+ Lo = Hi = Integer;
+ // Larger values need to get passed in memory.
+ return;
+ }
+
if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
// Arrays are treated like structures.
@@ -2905,8 +3133,11 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
+ if (Ty->isExtIntType())
+ return getNaturalAlignIndirect(Ty);
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
return getNaturalAlignIndirect(Ty);
@@ -2938,13 +3169,14 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
// the argument in the free register. This does not seem to happen currently,
// but this code would be much safer if we could mark the argument with
// 'onstack'. See PR12193.
- if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
+ if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
+ !Ty->isExtIntType()) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
@@ -3001,11 +3233,11 @@ llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
// Don't pass vXi128 vectors in their native type, the backend can't
// legalize them.
if (passInt128VectorsInMem() &&
- IRType->getVectorElementType()->isIntegerTy(128)) {
+ cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
// Use a vXi64 vector.
uint64_t Size = getContext().getTypeSize(Ty);
- return llvm::VectorType::get(llvm::Type::getInt64Ty(getVMContext()),
- Size / 64);
+ return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
+ Size / 64);
}
return IRType;
@@ -3020,8 +3252,8 @@ llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
// Return a LLVM IR vector type based on the size of 'Ty'.
- return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
- Size / 64);
+ return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
+ Size / 64);
}
/// BitsContainNoUserData - Return true if the specified [start,end) bit range
@@ -3155,7 +3387,8 @@ GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
// case.
if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
- return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
+ return llvm::FixedVectorType::get(llvm::Type::getFloatTy(getVMContext()),
+ 2);
return llvm::Type::getDoubleTy(getVMContext());
}
@@ -3326,7 +3559,7 @@ classifyReturnType(QualType RetTy) const {
RetTy = EnumTy->getDecl()->getIntegerType();
if (RetTy->isIntegralOrEnumerationType() &&
- RetTy->isPromotableIntegerType())
+ isPromotableIntegerTypeForABI(RetTy))
return ABIArgInfo::getExtend(RetTy);
}
break;
@@ -3471,7 +3704,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(
Ty = EnumTy->getDecl()->getIntegerType();
if (Ty->isIntegralOrEnumerationType() &&
- Ty->isPromotableIntegerType())
+ isPromotableIntegerTypeForABI(Ty))
return ABIArgInfo::getExtend(Ty);
}
@@ -3627,14 +3860,15 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
} else {
FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
}
- } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>()) {
+ } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() &&
+ getContext().getCanonicalType(FI.getReturnType()
+ ->getAs<ComplexType>()
+ ->getElementType()) ==
+ getContext().LongDoubleTy)
// Complex Long Double Type is passed in Memory when Regcall
// calling convention is used.
- const ComplexType *CT = FI.getReturnType()->getAs<ComplexType>();
- if (getContext().getCanonicalType(CT->getElementType()) ==
- getContext().LongDoubleTy)
- FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
- } else
+ FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
+ else
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
}
@@ -4021,14 +4255,25 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
// Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
// Clang matches them for compatibility.
- return ABIArgInfo::getDirect(
- llvm::VectorType::get(llvm::Type::getInt64Ty(getVMContext()), 2));
+ return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
+ llvm::Type::getInt64Ty(getVMContext()), 2));
default:
break;
}
}
+ if (Ty->isExtIntType()) {
+ // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
+ // not 1, 2, 4, or 8 bytes, must be passed by reference."
+ // However, non-power-of-two _ExtInts will be passed as 1,2,4 or 8 bytes
+ // anyway as long is it fits in them, so we don't have to check the power of
+ // 2.
+ if (Width <= 64)
+ return ABIArgInfo::getDirect();
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ }
+
return ABIArgInfo::getDirect();
}
@@ -4118,6 +4363,224 @@ Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
/*allowHigherAlign*/ false);
}
+static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address, bool Is64Bit,
+ bool IsAIX) {
+ // This is calculated from the LLVM and GCC tables and verified
+ // against gcc output. AFAIK all PPC ABIs use the same encoding.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::IntegerType *i8 = CGF.Int8Ty;
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+
+ // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers
+ AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31);
+
+ // 32-63: fp0-31, the 8-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Eight8, 32, 63);
+
+ // 64-67 are various 4-byte or 8-byte special-purpose registers:
+ // 64: mq
+ // 65: lr
+ // 66: ctr
+ // 67: ap
+ AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67);
+
+ // 68-76 are various 4-byte special-purpose registers:
+ // 68-75 cr0-7
+ // 76: xer
+ AssignToArrayRange(Builder, Address, Four8, 68, 76);
+
+ // 77-108: v0-31, the 16-byte vector registers
+ AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
+
+ // 109: vrsave
+ // 110: vscr
+ AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110);
+
+ // AIX does not utilize the rest of the registers.
+ if (IsAIX)
+ return false;
+
+ // 111: spe_acc
+ // 112: spefscr
+ // 113: sfp
+ AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113);
+
+ if (!Is64Bit)
+ return false;
+
+ // TODO: Need to verify if these registers are used on 64 bit AIX with Power8
+ // or above CPU.
+ // 64-bit only registers:
+ // 114: tfhar
+ // 115: tfiar
+ // 116: texasr
+ AssignToArrayRange(Builder, Address, Eight8, 114, 116);
+
+ return false;
+}
+
+// AIX
+namespace {
+/// AIXABIInfo - The AIX XCOFF ABI information.
+class AIXABIInfo : public ABIInfo {
+ const bool Is64Bit;
+ const unsigned PtrByteSize;
+ CharUnits getParamTypeAlignment(QualType Ty) const;
+
+public:
+ AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
+ : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
+
+ bool isPromotableTypeForABI(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+ }
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
+ const bool Is64Bit;
+
+public:
+ AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
+ : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)),
+ Is64Bit(Is64Bit) {}
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+};
+} // namespace
+
+// Return true if the ABI requires Ty to be passed sign- or zero-
+// extended to 32/64 bits.
+bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Promotable integer types are required to be promoted by the ABI.
+ if (Ty->isPromotableIntegerType())
+ return true;
+
+ if (!Is64Bit)
+ return false;
+
+ // For 64 bit mode, in addition to the usual promotable integer types, we also
+ // need to extend all 32-bit types, since the ABI requires promotion to 64
+ // bits.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isAnyComplexType())
+ llvm::report_fatal_error("complex type is not supported on AIX yet");
+
+ if (RetTy->isVectorType())
+ llvm::report_fatal_error("vector type is not supported on AIX yet");
+
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // TODO: Evaluate if AIX power alignment rule would have an impact on the
+ // alignment here.
+ if (isAggregateTypeForABI(RetTy))
+ return getNaturalAlignIndirect(RetTy);
+
+ return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (Ty->isAnyComplexType())
+ llvm::report_fatal_error("complex type is not supported on AIX yet");
+
+ if (Ty->isVectorType())
+ llvm::report_fatal_error("vector type is not supported on AIX yet");
+
+ // TODO: Evaluate if AIX power alignment rule would have an impact on the
+ // alignment here.
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // passed by value.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ CharUnits CCAlign = getParamTypeAlignment(Ty);
+ CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
+
+ return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true,
+ /*Realign*/ TyAlign > CCAlign);
+ }
+
+ return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+}
+
+CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
+ if (Ty->isAnyComplexType())
+ llvm::report_fatal_error("complex type is not supported on AIX yet");
+
+ if (Ty->isVectorType())
+ llvm::report_fatal_error("vector type is not supported on AIX yet");
+
+ // If the structure contains a vector type, the alignment is 16.
+ if (isRecordWithSIMDVectorType(getContext(), Ty))
+ return CharUnits::fromQuantity(16);
+
+ return CharUnits::fromQuantity(PtrByteSize);
+}
+
+Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ if (Ty->isAnyComplexType())
+ llvm::report_fatal_error("complex type is not supported on AIX yet");
+
+ if (Ty->isVectorType())
+ llvm::report_fatal_error("vector type is not supported on AIX yet");
+
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+ TypeInfo.second = getParamTypeAlignment(Ty);
+
+ CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
+ SlotSize, /*AllowHigher*/ true);
+}
+
+bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
+ CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const {
+ return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
+}
+
// PowerPC-32
namespace {
/// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
@@ -4150,8 +4613,8 @@ class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
public:
PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI,
bool RetSmallStructInRegABI)
- : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI,
- RetSmallStructInRegABI)) {}
+ : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>(
+ CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
static bool isStructReturnInRegABI(const llvm::Triple &Triple,
const CodeGenOptions &Opts);
@@ -4167,7 +4630,7 @@ public:
}
CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
- // Complex types are passed just like their elements
+ // Complex types are passed just like their elements.
if (const ComplexType *CTy = Ty->getAs<ComplexType>())
Ty = CTy->getElementType();
@@ -4395,42 +4858,8 @@ bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
bool
PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
- // This is calculated from the LLVM and GCC tables and verified
- // against gcc output. AFAIK all ABIs use the same encoding.
-
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
-
- llvm::IntegerType *i8 = CGF.Int8Ty;
- llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
- llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
- llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
-
- // 0-31: r0-31, the 4-byte general-purpose registers
- AssignToArrayRange(Builder, Address, Four8, 0, 31);
-
- // 32-63: fp0-31, the 8-byte floating-point registers
- AssignToArrayRange(Builder, Address, Eight8, 32, 63);
-
- // 64-76 are various 4-byte special-purpose registers:
- // 64: mq
- // 65: lr
- // 66: ctr
- // 67: ap
- // 68-75 cr0-7
- // 76: xer
- AssignToArrayRange(Builder, Address, Four8, 64, 76);
-
- // 77-108: v0-31, the 16-byte vector registers
- AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
-
- // 109: vrsave
- // 110: vscr
- // 111: spe_acc
- // 112: spefscr
- // 113: sfp
- AssignToArrayRange(Builder, Address, Four8, 109, 113);
-
- return false;
+ return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false,
+ /*IsAIX*/ false);
}
// PowerPC-64
@@ -4541,8 +4970,8 @@ public:
PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX,
bool SoftFloatABI)
- : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX,
- SoftFloatABI)) {}
+ : TargetCodeGenInfo(std::make_unique<PPC64_SVR4_ABIInfo>(
+ CGT, Kind, HasQPX, SoftFloatABI)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
// This is recovered from gcc output.
@@ -4577,7 +5006,7 @@ PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
Ty = EnumTy->getDecl()->getIntegerType();
// Promotable integer types are required to be promoted by the ABI.
- if (Ty->isPromotableIntegerType())
+ if (isPromotableIntegerTypeForABI(Ty))
return true;
// In addition to the usual promotable integer types, we also need to
@@ -4591,6 +5020,10 @@ PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
break;
}
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() < 64)
+ return true;
+
return false;
}
@@ -4808,6 +5241,10 @@ PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
}
}
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 128)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+
if (isAggregateTypeForABI(Ty)) {
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
@@ -4880,6 +5317,10 @@ PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
}
}
+ if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 128)
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
+
if (isAggregateTypeForABI(RetTy)) {
// ELFv2 homogeneous aggregates are returned as array types.
const Type *Base = nullptr;
@@ -4965,66 +5406,19 @@ Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
TypeInfo, SlotSize, /*AllowHigher*/ true);
}
-static bool
-PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) {
- // This is calculated from the LLVM and GCC tables and verified
- // against gcc output. AFAIK all ABIs use the same encoding.
-
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
-
- llvm::IntegerType *i8 = CGF.Int8Ty;
- llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
- llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
- llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
-
- // 0-31: r0-31, the 8-byte general-purpose registers
- AssignToArrayRange(Builder, Address, Eight8, 0, 31);
-
- // 32-63: fp0-31, the 8-byte floating-point registers
- AssignToArrayRange(Builder, Address, Eight8, 32, 63);
-
- // 64-67 are various 8-byte special-purpose registers:
- // 64: mq
- // 65: lr
- // 66: ctr
- // 67: ap
- AssignToArrayRange(Builder, Address, Eight8, 64, 67);
-
- // 68-76 are various 4-byte special-purpose registers:
- // 68-75 cr0-7
- // 76: xer
- AssignToArrayRange(Builder, Address, Four8, 68, 76);
-
- // 77-108: v0-31, the 16-byte vector registers
- AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
-
- // 109: vrsave
- // 110: vscr
- // 111: spe_acc
- // 112: spefscr
- // 113: sfp
- // 114: tfhar
- // 115: tfiar
- // 116: texasr
- AssignToArrayRange(Builder, Address, Eight8, 109, 116);
-
- return false;
-}
-
bool
PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
-
- return PPC64_initDwarfEHRegSizeTable(CGF, Address);
+ return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
+ /*IsAIX*/ false);
}
bool
PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
-
- return PPC64_initDwarfEHRegSizeTable(CGF, Address);
+ return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
+ /*IsAIX*/ false);
}
//===----------------------------------------------------------------------===//
@@ -5095,12 +5489,16 @@ private:
bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
unsigned elts) const override;
+
+ bool allowBFloatArgsAndRet() const override {
+ return getTarget().hasBFloat16Type();
+ }
};
class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
- : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
+ : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {}
StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
@@ -5118,9 +5516,11 @@ public:
if (!FD)
return;
- CodeGenOptions::SignReturnAddressScope Scope = CGM.getCodeGenOpts().getSignReturnAddress();
- CodeGenOptions::SignReturnAddressKeyValue Key = CGM.getCodeGenOpts().getSignReturnAddressKey();
- bool BranchTargetEnforcement = CGM.getCodeGenOpts().BranchTargetEnforcement;
+ LangOptions::SignReturnAddressScopeKind Scope =
+ CGM.getLangOpts().getSignReturnAddressScope();
+ LangOptions::SignReturnAddressKeyKind Key =
+ CGM.getLangOpts().getSignReturnAddressKey();
+ bool BranchTargetEnforcement = CGM.getLangOpts().BranchTargetEnforcement;
if (const auto *TA = FD->getAttr<TargetAttr>()) {
ParsedTargetAttr Attr = TA->parse();
if (!Attr.BranchProtection.empty()) {
@@ -5136,14 +5536,14 @@ public:
}
auto *Fn = cast<llvm::Function>(GV);
- if (Scope != CodeGenOptions::SignReturnAddressScope::None) {
+ if (Scope != LangOptions::SignReturnAddressScopeKind::None) {
Fn->addFnAttr("sign-return-address",
- Scope == CodeGenOptions::SignReturnAddressScope::All
+ Scope == LangOptions::SignReturnAddressScopeKind::All
? "all"
: "non-leaf");
Fn->addFnAttr("sign-return-address-key",
- Key == CodeGenOptions::SignReturnAddressKeyValue::AKey
+ Key == LangOptions::SignReturnAddressKeyKind::AKey
? "a_key"
: "b_key");
}
@@ -5197,13 +5597,13 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
return ABIArgInfo::getDirect(ResType);
}
if (Size == 64) {
- llvm::Type *ResType =
- llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
+ auto *ResType =
+ llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
return ABIArgInfo::getDirect(ResType);
}
if (Size == 128) {
- llvm::Type *ResType =
- llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
+ auto *ResType =
+ llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
return ABIArgInfo::getDirect(ResType);
}
return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
@@ -5214,7 +5614,11 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() && isDarwinPCS()
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 128)
+ return getNaturalAlignIndirect(Ty);
+
+ return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
? ABIArgInfo::getExtend(Ty)
: ABIArgInfo::getDirect());
}
@@ -5291,7 +5695,11 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() && isDarwinPCS()
+ if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 128)
+ return getNaturalAlignIndirect(RetTy);
+
+ return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
? ABIArgInfo::getExtend(RetTy)
: ABIArgInfo::getDirect());
}
@@ -5690,11 +6098,14 @@ public:
private:
ABIKind Kind;
+ bool IsFloatABISoftFP;
public:
ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
: SwiftABIInfo(CGT), Kind(_Kind) {
setCCs();
+ IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" ||
+ CGT.getCodeGenOpts().FloatABI == ""; // default
}
bool isEABI() const {
@@ -5725,6 +6136,10 @@ public:
ABIKind getABIKind() const { return Kind; }
+ bool allowBFloatArgsAndRet() const override {
+ return !IsFloatABISoftFP && getTarget().hasBFloat16Type();
+ }
+
private:
ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
unsigned functionCallConv) const;
@@ -5765,7 +6180,7 @@ private:
class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
public:
ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
- :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
+ : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) {}
const ARMABIInfo &getABIInfo() const {
return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
@@ -5920,7 +6335,7 @@ ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
return ABIArgInfo::getDirect(ResType);
}
if (Size == 64 || Size == 128) {
- llvm::Type *ResType = llvm::VectorType::get(
+ auto *ResType = llvm::FixedVectorType::get(
llvm::Type::getInt32Ty(getVMContext()), Size / 32);
return ABIArgInfo::getDirect(ResType);
}
@@ -5936,7 +6351,7 @@ ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
// FP16 vectors should be converted to integer vectors
if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
uint64_t Size = getContext().getTypeSize(VT);
- llvm::Type *NewVecTy = llvm::VectorType::get(
+ auto *NewVecTy = llvm::FixedVectorType::get(
llvm::Type::getInt32Ty(getVMContext()), Size / 32);
llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
@@ -5964,25 +6379,18 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
if (isIllegalVectorType(Ty))
return coerceIllegalVector(Ty);
- // _Float16 and __fp16 get passed as if it were an int or float, but with
- // the top 16 bits unspecified. This is not done for OpenCL as it handles the
- // half type natively, and does not need to interwork with AAPCS code.
- if ((Ty->isFloat16Type() || Ty->isHalfType()) &&
- !getContext().getLangOpts().NativeHalfArgsAndReturns) {
- llvm::Type *ResType = IsAAPCS_VFP ?
- llvm::Type::getFloatTy(getVMContext()) :
- llvm::Type::getInt32Ty(getVMContext());
- return ABIArgInfo::getDirect(ResType);
- }
-
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
Ty = EnumTy->getDecl()->getIntegerType();
}
- return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 64)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
@@ -6164,31 +6572,27 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
// Large vector types should be returned via memory.
if (getContext().getTypeSize(RetTy) > 128)
return getNaturalAlignIndirect(RetTy);
- // FP16 vectors should be converted to integer vectors
- if (!getTarget().hasLegalHalfType() &&
+ // TODO: FP16/BF16 vectors should be converted to integer vectors
+ // This check is similar to isIllegalVectorType - refactor?
+ if ((!getTarget().hasLegalHalfType() &&
(VT->getElementType()->isFloat16Type() ||
- VT->getElementType()->isHalfType()))
+ VT->getElementType()->isHalfType())) ||
+ (IsFloatABISoftFP &&
+ VT->getElementType()->isBFloat16Type()))
return coerceIllegalVector(RetTy);
}
- // _Float16 and __fp16 get returned as if it were an int or float, but with
- // the top 16 bits unspecified. This is not done for OpenCL as it handles the
- // half type natively, and does not need to interwork with AAPCS code.
- if ((RetTy->isFloat16Type() || RetTy->isHalfType()) &&
- !getContext().getLangOpts().NativeHalfArgsAndReturns) {
- llvm::Type *ResType = IsAAPCS_VFP ?
- llvm::Type::getFloatTy(getVMContext()) :
- llvm::Type::getInt32Ty(getVMContext());
- return ABIArgInfo::getDirect(ResType);
- }
-
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect();
+ if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 64)
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
+
+ return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect();
}
// Are we following APCS?
@@ -6264,12 +6668,17 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
/// isIllegalVector - check whether Ty is an illegal vector type.
bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
if (const VectorType *VT = Ty->getAs<VectorType> ()) {
- // On targets that don't support FP16, FP16 is expanded into float, and we
- // don't want the ABI to depend on whether or not FP16 is supported in
- // hardware. Thus return false to coerce FP16 vectors into integer vectors.
- if (!getTarget().hasLegalHalfType() &&
+ // On targets that don't support half, fp16 or bfloat, they are expanded
+ // into float, and we don't want the ABI to depend on whether or not they
+ // are supported in hardware. Thus return false to coerce vectors of these
+ // types into integer vectors.
+ // We do not depend on hasLegalHalfType for bfloat as it is a
+ // separate IR type.
+ if ((!getTarget().hasLegalHalfType() &&
(VT->getElementType()->isFloat16Type() ||
- VT->getElementType()->isHalfType()))
+ VT->getElementType()->isHalfType())) ||
+ (IsFloatABISoftFP &&
+ VT->getElementType()->isBFloat16Type()))
return true;
if (isAndroid()) {
// Android shipped using Clang 3.1, which supported a slightly different
@@ -6321,6 +6730,7 @@ bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
} else {
if (const VectorType *VT = Ty->getAs<VectorType>())
return (VT->getElementType()->isFloat16Type() ||
+ VT->getElementType()->isBFloat16Type() ||
VT->getElementType()->isHalfType());
return false;
}
@@ -6426,9 +6836,14 @@ Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
namespace {
+class NVPTXTargetCodeGenInfo;
+
class NVPTXABIInfo : public ABIInfo {
+ NVPTXTargetCodeGenInfo &CGInfo;
+
public:
- NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+ NVPTXABIInfo(CodeGenTypes &CGT, NVPTXTargetCodeGenInfo &Info)
+ : ABIInfo(CGT), CGInfo(Info) {}
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType Ty) const;
@@ -6436,36 +6851,87 @@ public:
void computeInfo(CGFunctionInfo &FI) const override;
Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const override;
+ bool isUnsupportedType(QualType T) const;
+ ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, unsigned MaxSize) const;
};
class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
public:
NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<NVPTXABIInfo>(CGT, *this)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const override;
bool shouldEmitStaticExternCAliases() const override;
+ llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const override {
+ // On the device side, surface reference is represented as an object handle
+ // in 64-bit integer.
+ return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
+ }
+
+ llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const override {
+ // On the device side, texture reference is represented as an object handle
+ // in 64-bit integer.
+ return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
+ }
+
+ bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF, LValue Dst,
+ LValue Src) const override {
+ emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
+ return true;
+ }
+
+ bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF, LValue Dst,
+ LValue Src) const override {
+ emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
+ return true;
+ }
+
private:
- // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
+ // Adds a NamedMDNode with GV, Name, and Operand as operands, and adds the
// resulting MDNode to the nvvm.annotations MDNode.
- static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
+ static void addNVVMMetadata(llvm::GlobalValue *GV, StringRef Name,
+ int Operand);
+
+ static void emitBuiltinSurfTexDeviceCopy(CodeGenFunction &CGF, LValue Dst,
+ LValue Src) {
+ llvm::Value *Handle = nullptr;
+ llvm::Constant *C =
+ llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).getPointer());
+ // Lookup `addrspacecast` through the constant pointer if any.
+ if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C))
+ C = llvm::cast<llvm::Constant>(ASC->getPointerOperand());
+ if (auto *GV = llvm::dyn_cast_or_null<llvm::GlobalVariable>(C)) {
+ // Load the handle from the specific global variable using
+ // `nvvm.texsurf.handle.internal` intrinsic.
+ Handle = CGF.EmitRuntimeCall(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::nvvm_texsurf_handle_internal,
+ {GV->getType()}),
+ {GV}, "texsurf_handle");
+ } else
+ Handle = CGF.EmitLoadOfScalar(Src, SourceLocation());
+ CGF.EmitStoreOfScalar(Handle, Dst);
+ }
};
/// Checks if the type is unsupported directly by the current target.
-static bool isUnsupportedType(ASTContext &Context, QualType T) {
+bool NVPTXABIInfo::isUnsupportedType(QualType T) const {
+ ASTContext &Context = getContext();
if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type())
return true;
if (!Context.getTargetInfo().hasFloat128Type() &&
(T->isFloat128Type() ||
(T->isRealFloatingType() && Context.getTypeSize(T) == 128)))
return true;
+ if (const auto *EIT = T->getAs<ExtIntType>())
+ return EIT->getNumBits() >
+ (Context.getTargetInfo().hasInt128Type() ? 128U : 64U);
if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() &&
- Context.getTypeSize(T) > 64)
+ Context.getTypeSize(T) > 64U)
return true;
if (const auto *AT = T->getAsArrayTypeUnsafe())
- return isUnsupportedType(Context, AT->getElementType());
+ return isUnsupportedType(AT->getElementType());
const auto *RT = T->getAs<RecordType>();
if (!RT)
return false;
@@ -6474,24 +6940,23 @@ static bool isUnsupportedType(ASTContext &Context, QualType T) {
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
for (const CXXBaseSpecifier &I : CXXRD->bases())
- if (isUnsupportedType(Context, I.getType()))
+ if (isUnsupportedType(I.getType()))
return true;
for (const FieldDecl *I : RD->fields())
- if (isUnsupportedType(Context, I->getType()))
+ if (isUnsupportedType(I->getType()))
return true;
return false;
}
/// Coerce the given type into an array with maximum allowed size of elements.
-static ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, ASTContext &Context,
- llvm::LLVMContext &LLVMContext,
- unsigned MaxSize) {
+ABIArgInfo NVPTXABIInfo::coerceToIntArrayWithLimit(QualType Ty,
+ unsigned MaxSize) const {
// Alignment and Size are measured in bits.
- const uint64_t Size = Context.getTypeSize(Ty);
- const uint64_t Alignment = Context.getTypeAlign(Ty);
+ const uint64_t Size = getContext().getTypeSize(Ty);
+ const uint64_t Alignment = getContext().getTypeAlign(Ty);
const unsigned Div = std::min<unsigned>(MaxSize, Alignment);
- llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Div);
+ llvm::Type *IntType = llvm::Type::getIntNTy(getVMContext(), Div);
const uint64_t NumElements = (Size + Div - 1) / Div;
return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
}
@@ -6501,9 +6966,8 @@ ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getIgnore();
if (getContext().getLangOpts().OpenMP &&
- getContext().getLangOpts().OpenMPIsDevice &&
- isUnsupportedType(getContext(), RetTy))
- return coerceToIntArrayWithLimit(RetTy, getContext(), getVMContext(), 64);
+ getContext().getLangOpts().OpenMPIsDevice && isUnsupportedType(RetTy))
+ return coerceToIntArrayWithLimit(RetTy, 64);
// note: this is different from default ABI
if (!RetTy->isScalarType())
@@ -6513,8 +6977,8 @@ ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
@@ -6523,11 +6987,29 @@ ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
Ty = EnumTy->getDecl()->getIntegerType();
// Return aggregates type as indirect by value
- if (isAggregateTypeForABI(Ty))
+ if (isAggregateTypeForABI(Ty)) {
+ // Under CUDA device compilation, tex/surf builtin types are replaced with
+ // object types and passed directly.
+ if (getContext().getLangOpts().CUDAIsDevice) {
+ if (Ty->isCUDADeviceBuiltinSurfaceType())
+ return ABIArgInfo::getDirect(
+ CGInfo.getCUDADeviceBuiltinSurfaceDeviceType());
+ if (Ty->isCUDADeviceBuiltinTextureType())
+ return ABIArgInfo::getDirect(
+ CGInfo.getCUDADeviceBuiltinTextureDeviceType());
+ }
return getNaturalAlignIndirect(Ty, /* byval */ true);
+ }
- return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
+ if (const auto *EIT = Ty->getAs<ExtIntType>()) {
+ if ((EIT->getNumBits() > 128) ||
+ (!getContext().getTargetInfo().hasInt128Type() &&
+ EIT->getNumBits() > 64))
+ return getNaturalAlignIndirect(Ty, /* byval */ true);
+ }
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
@@ -6552,6 +7034,17 @@ void NVPTXTargetCodeGenInfo::setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
if (GV->isDeclaration())
return;
+ const VarDecl *VD = dyn_cast_or_null<VarDecl>(D);
+ if (VD) {
+ if (M.getLangOpts().CUDA) {
+ if (VD->getType()->isCUDADeviceBuiltinSurfaceType())
+ addNVVMMetadata(GV, "surface", 1);
+ else if (VD->getType()->isCUDADeviceBuiltinTextureType())
+ addNVVMMetadata(GV, "texture", 1);
+ return;
+ }
+ }
+
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
if (!FD) return;
@@ -6600,16 +7093,16 @@ void NVPTXTargetCodeGenInfo::setTargetAttributes(
}
}
-void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
- int Operand) {
- llvm::Module *M = F->getParent();
+void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV,
+ StringRef Name, int Operand) {
+ llvm::Module *M = GV->getParent();
llvm::LLVMContext &Ctx = M->getContext();
// Get "nvvm.annotations" metadata node
llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
llvm::Metadata *MDVals[] = {
- llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
+ llvm::ConstantAsMetadata::get(GV), llvm::MDString::get(Ctx, Name),
llvm::ConstantAsMetadata::get(
llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
// Append metadata to nvvm.annotations
@@ -6629,12 +7122,13 @@ namespace {
class SystemZABIInfo : public SwiftABIInfo {
bool HasVector;
+ bool IsSoftFloatABI;
public:
- SystemZABIInfo(CodeGenTypes &CGT, bool HV)
- : SwiftABIInfo(CGT), HasVector(HV) {}
+ SystemZABIInfo(CodeGenTypes &CGT, bool HV, bool SF)
+ : SwiftABIInfo(CGT), HasVector(HV), IsSoftFloatABI(SF) {}
- bool isPromotableIntegerType(QualType Ty) const;
+ bool isPromotableIntegerTypeForABI(QualType Ty) const;
bool isCompoundType(QualType Ty) const;
bool isVectorArgumentType(QualType Ty) const;
bool isFPArgumentType(QualType Ty) const;
@@ -6664,21 +7158,26 @@ public:
class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
public:
- SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
- : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
+ SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector, bool SoftFloatABI)
+ : TargetCodeGenInfo(
+ std::make_unique<SystemZABIInfo>(CGT, HasVector, SoftFloatABI)) {}
};
}
-bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
+bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
// Promotable integer types are required to be promoted by the ABI.
- if (Ty->isPromotableIntegerType())
+ if (ABIInfo::isPromotableIntegerTypeForABI(Ty))
return true;
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() < 64)
+ return true;
+
// 32-bit values must also be promoted.
if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
switch (BT->getKind()) {
@@ -6704,6 +7203,9 @@ bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
}
bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
+ if (IsSoftFloatABI)
+ return false;
+
if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
switch (BT->getKind()) {
case BuiltinType::Float:
@@ -6717,7 +7219,9 @@ bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
}
QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
- if (const RecordType *RT = Ty->getAsStructureType()) {
+ const RecordType *RT = Ty->getAs<RecordType>();
+
+ if (RT && RT->isStructureOrClassType()) {
const RecordDecl *RD = RT->getDecl();
QualType Found;
@@ -6743,6 +7247,10 @@ QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
if (getContext().getLangOpts().CPlusPlus &&
FD->isZeroLengthBitField(getContext()))
continue;
+ // Like isSingleElementStruct(), ignore C++20 empty data members.
+ if (FD->hasAttr<NoUniqueAddressAttr>() &&
+ isEmptyRecord(getContext(), FD->getType(), true))
+ continue;
// Unlike isSingleElementStruct(), arrays do not count.
// Nested structures still do though.
@@ -6789,7 +7297,7 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
} else {
if (AI.getCoerceToType())
ArgTy = AI.getCoerceToType();
- InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
+ InFPRs = (!IsSoftFloatABI && (ArgTy->isFloatTy() || ArgTy->isDoubleTy()));
IsVector = ArgTy->isVectorTy();
UnpaddedSize = TyInfo.first;
DirectAlign = TyInfo.second;
@@ -6922,8 +7430,8 @@ ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect();
if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
return getNaturalAlignIndirect(RetTy);
- return (isPromotableIntegerType(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
@@ -6932,7 +7440,7 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
// Integers and enums are extended to full register width.
- if (isPromotableIntegerType(Ty))
+ if (isPromotableIntegerTypeForABI(Ty))
return ABIArgInfo::getExtend(Ty);
// Handle vector types and vector-like structure types. Note that
@@ -6982,10 +7490,49 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
namespace {
+class MSP430ABIInfo : public DefaultABIInfo {
+ static ABIArgInfo complexArgInfo() {
+ ABIArgInfo Info = ABIArgInfo::getDirect();
+ Info.setCanBeFlattened(false);
+ return Info;
+ }
+
+public:
+ MSP430ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const {
+ if (RetTy->isAnyComplexType())
+ return complexArgInfo();
+
+ return DefaultABIInfo::classifyReturnType(RetTy);
+ }
+
+ ABIArgInfo classifyArgumentType(QualType RetTy) const {
+ if (RetTy->isAnyComplexType())
+ return complexArgInfo();
+
+ return DefaultABIInfo::classifyArgumentType(RetTy);
+ }
+
+ // Just copy the original implementations because
+ // DefaultABIInfo::classify{Return,Argument}Type() are not virtual
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+ }
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override {
+ return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
+ }
+};
+
class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
public:
MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<MSP430ABIInfo>(CGT)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const override;
};
@@ -7044,8 +7591,8 @@ class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
unsigned SizeOfUnwindException;
public:
MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
- : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
- SizeOfUnwindException(IsO32 ? 24 : 32) {}
+ : TargetCodeGenInfo(std::make_unique<MipsABIInfo>(CGT, IsO32)),
+ SizeOfUnwindException(IsO32 ? 24 : 32) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
return 29;
@@ -7227,6 +7774,13 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
+ // Make sure we pass indirectly things that are too large.
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 128 ||
+ (EIT->getNumBits() > 64 &&
+ !getContext().getTargetInfo().hasInt128Type()))
+ return getNaturalAlignIndirect(Ty);
+
// All integral types are promoted to the GPR width.
if (Ty->isIntegralOrEnumerationType())
return extendType(Ty);
@@ -7311,7 +7865,14 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- if (RetTy->isPromotableIntegerType())
+ // Make sure we pass indirectly things that are too large.
+ if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 128 ||
+ (EIT->getNumBits() > 64 &&
+ !getContext().getTargetInfo().hasInt128Type()))
+ return getNaturalAlignIndirect(RetTy);
+
+ if (isPromotableIntegerTypeForABI(RetTy))
return ABIArgInfo::getExtend(RetTy);
if ((RetTy->isUnsignedIntegerOrEnumerationType() ||
@@ -7430,7 +7991,7 @@ namespace {
class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
public:
AVRTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new DefaultABIInfo(CGT)) { }
+ : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override {
@@ -7519,50 +8080,97 @@ void TCETargetCodeGenInfo::setTargetAttributes(
namespace {
-class HexagonABIInfo : public ABIInfo {
-
-
+class HexagonABIInfo : public DefaultABIInfo {
public:
- HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+ HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
private:
-
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const;
void computeInfo(CGFunctionInfo &FI) const override;
Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const override;
+ Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr,
+ QualType Ty) const;
+ Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr,
+ QualType Ty) const;
+ Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr,
+ QualType Ty) const;
};
class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
public:
HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
- :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<HexagonABIInfo>(CGT)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
return 29;
}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &GCM) const override {
+ if (GV->isDeclaration())
+ return;
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD)
+ return;
+ }
};
-}
+} // namespace
void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ unsigned RegsLeft = 6;
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
+ I.info = classifyArgumentType(I.type, &RegsLeft);
+}
+
+static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) {
+ assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits"
+ " through registers");
+
+ if (*RegsLeft == 0)
+ return false;
+
+ if (Size <= 32) {
+ (*RegsLeft)--;
+ return true;
+ }
+
+ if (2 <= (*RegsLeft & (~1U))) {
+ *RegsLeft = (*RegsLeft & (~1U)) - 2;
+ return true;
+ }
+
+ // Next available register was r5 but candidate was greater than 32-bits so it
+ // has to go on the stack. However we still consume r5
+ if (*RegsLeft == 1)
+ *RegsLeft = 0;
+
+ return false;
}
-ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
+ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty,
+ unsigned *RegsLeft) const {
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size <= 64)
+ HexagonAdjustRegsLeft(Size, RegsLeft);
+
+ if (Size > 64 && Ty->isExtIntType())
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+
+ return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect();
}
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
@@ -7573,63 +8181,304 @@ ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
return ABIArgInfo::getIgnore();
uint64_t Size = getContext().getTypeSize(Ty);
+ unsigned Align = getContext().getTypeAlign(Ty);
+
if (Size > 64)
return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+
+ if (HexagonAdjustRegsLeft(Size, RegsLeft))
+ Align = Size <= 32 ? 32 : 64;
+ if (Size <= Align) {
// Pass in the smallest viable integer type.
- else if (Size > 32)
- return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
- else if (Size > 16)
- return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
- else if (Size > 8)
- return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
- else
- return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (!llvm::isPowerOf2_64(Size))
+ Size = llvm::NextPowerOf2(Size);
+ return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
+ }
+ return DefaultABIInfo::classifyArgumentType(Ty);
}
ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
- // Large vector types should be returned via memory.
- if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
- return getNaturalAlignIndirect(RetTy);
+ const TargetInfo &T = CGT.getTarget();
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ if (RetTy->getAs<VectorType>()) {
+ // HVX vectors are returned in vector registers or register pairs.
+ if (T.hasFeature("hvx")) {
+ assert(T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b"));
+ uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64*8 : 128*8;
+ if (Size == VecSize || Size == 2*VecSize)
+ return ABIArgInfo::getDirectInReg();
+ }
+ // Large vector types should be returned via memory.
+ if (Size > 64)
+ return getNaturalAlignIndirect(RetTy);
+ }
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
+ if (Size > 64 && RetTy->isExtIntType())
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
+
+ return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect();
}
if (isEmptyRecord(getContext(), RetTy, true))
return ABIArgInfo::getIgnore();
- // Aggregates <= 8 bytes are returned in r0; other aggregates
+ // Aggregates <= 8 bytes are returned in registers, other aggregates
// are returned indirectly.
- uint64_t Size = getContext().getTypeSize(RetTy);
if (Size <= 64) {
// Return in the smallest viable integer type.
- if (Size <= 8)
- return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
- if (Size <= 16)
- return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
- if (Size <= 32)
- return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
- return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
+ if (!llvm::isPowerOf2_64(Size))
+ Size = llvm::NextPowerOf2(Size);
+ return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
}
-
return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
}
+Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF,
+ Address VAListAddr,
+ QualType Ty) const {
+ // Load the overflow area pointer.
+ Address __overflow_area_pointer_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
+ llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
+ __overflow_area_pointer_p, "__overflow_area_pointer");
+
+ uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (Align > 4) {
+ // Alignment should be a power of 2.
+ assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!");
+
+ // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
+
+ // Add offset to the current pointer to access the argument.
+ __overflow_area_pointer =
+ CGF.Builder.CreateGEP(__overflow_area_pointer, Offset);
+ llvm::Value *AsInt =
+ CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
+
+ // Create a mask which should be "AND"ed
+ // with (overflow_arg_area + align - 1)
+ llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -(int)Align);
+ __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
+ CGF.Builder.CreateAnd(AsInt, Mask), __overflow_area_pointer->getType(),
+ "__overflow_area_pointer.align");
+ }
+
+ // Get the type of the argument from memory and bitcast
+ // overflow area pointer to the argument type.
+ llvm::Type *PTy = CGF.ConvertTypeForMem(Ty);
+ Address AddrTyped = CGF.Builder.CreateBitCast(
+ Address(__overflow_area_pointer, CharUnits::fromQuantity(Align)),
+ llvm::PointerType::getUnqual(PTy));
+
+ // Round up to the minimum stack alignment for varargs which is 4 bytes.
+ uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
+
+ __overflow_area_pointer = CGF.Builder.CreateGEP(
+ __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "__overflow_area_pointer.next");
+ CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p);
+
+ return AddrTyped;
+}
+
+Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF,
+ Address VAListAddr,
+ QualType Ty) const {
+ // FIXME: Need to handle alignment
+ llvm::Type *BP = CGF.Int8PtrTy;
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
+ CGBuilderTy &Builder = CGF.Builder;
+ Address VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ // Handle address alignment for type alignment > 32 bits
+ uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (TyAlign > 4) {
+ assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!");
+ llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
+ AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
+ AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
+ Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
+ }
+ llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ Address AddrTyped = Builder.CreateBitCast(
+ Address(Addr, CharUnits::fromQuantity(TyAlign)), PTy);
+
+ uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr = Builder.CreateGEP(
+ Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF,
+ Address VAListAddr,
+ QualType Ty) const {
+ int ArgSize = CGF.getContext().getTypeSize(Ty) / 8;
+
+ if (ArgSize > 8)
+ return EmitVAArgFromMemory(CGF, VAListAddr, Ty);
+
+ // Here we have check if the argument is in register area or
+ // in overflow area.
+ // If the saved register area pointer + argsize rounded up to alignment >
+ // saved register area end pointer, argument is in overflow area.
+ unsigned RegsLeft = 6;
+ Ty = CGF.getContext().getCanonicalType(Ty);
+ (void)classifyArgumentType(Ty, &RegsLeft);
+
+ llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+
+ // Get rounded size of the argument.GCC does not allow vararg of
+ // size < 4 bytes. We follow the same logic here.
+ ArgSize = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
+ int ArgAlign = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
+
+ // Argument may be in saved register area
+ CGF.EmitBlock(MaybeRegBlock);
+
+ // Load the current saved register area pointer.
+ Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP(
+ VAListAddr, 0, "__current_saved_reg_area_pointer_p");
+ llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad(
+ __current_saved_reg_area_pointer_p, "__current_saved_reg_area_pointer");
+
+ // Load the saved register area end pointer.
+ Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP(
+ VAListAddr, 1, "__saved_reg_area_end_pointer_p");
+ llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad(
+ __saved_reg_area_end_pointer_p, "__saved_reg_area_end_pointer");
+
+ // If the size of argument is > 4 bytes, check if the stack
+ // location is aligned to 8 bytes
+ if (ArgAlign > 4) {
+
+ llvm::Value *__current_saved_reg_area_pointer_int =
+ CGF.Builder.CreatePtrToInt(__current_saved_reg_area_pointer,
+ CGF.Int32Ty);
+
+ __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd(
+ __current_saved_reg_area_pointer_int,
+ llvm::ConstantInt::get(CGF.Int32Ty, (ArgAlign - 1)),
+ "align_current_saved_reg_area_pointer");
+
+ __current_saved_reg_area_pointer_int =
+ CGF.Builder.CreateAnd(__current_saved_reg_area_pointer_int,
+ llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
+ "align_current_saved_reg_area_pointer");
+
+ __current_saved_reg_area_pointer =
+ CGF.Builder.CreateIntToPtr(__current_saved_reg_area_pointer_int,
+ __current_saved_reg_area_pointer->getType(),
+ "align_current_saved_reg_area_pointer");
+ }
+
+ llvm::Value *__new_saved_reg_area_pointer =
+ CGF.Builder.CreateGEP(__current_saved_reg_area_pointer,
+ llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
+ "__new_saved_reg_area_pointer");
+
+ llvm::Value *UsingStack = 0;
+ UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer,
+ __saved_reg_area_end_pointer);
+
+ CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, InRegBlock);
+
+ // Argument in saved register area
+ // Implement the block where argument is in register saved area
+ CGF.EmitBlock(InRegBlock);
+
+ llvm::Type *PTy = CGF.ConvertType(Ty);
+ llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast(
+ __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy));
+
+ CGF.Builder.CreateStore(__new_saved_reg_area_pointer,
+ __current_saved_reg_area_pointer_p);
+
+ CGF.EmitBranch(ContBlock);
+
+ // Argument in overflow area
+ // Implement the block where the argument is in overflow area.
+ CGF.EmitBlock(OnStackBlock);
+
+ // Load the overflow area pointer
+ Address __overflow_area_pointer_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
+ llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
+ __overflow_area_pointer_p, "__overflow_area_pointer");
+
+ // Align the overflow area pointer according to the alignment of the argument
+ if (ArgAlign > 4) {
+ llvm::Value *__overflow_area_pointer_int =
+ CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
+
+ __overflow_area_pointer_int =
+ CGF.Builder.CreateAdd(__overflow_area_pointer_int,
+ llvm::ConstantInt::get(CGF.Int32Ty, ArgAlign - 1),
+ "align_overflow_area_pointer");
+
+ __overflow_area_pointer_int =
+ CGF.Builder.CreateAnd(__overflow_area_pointer_int,
+ llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
+ "align_overflow_area_pointer");
+
+ __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
+ __overflow_area_pointer_int, __overflow_area_pointer->getType(),
+ "align_overflow_area_pointer");
+ }
+
+ // Get the pointer for next argument in overflow area and store it
+ // to overflow area pointer.
+ llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP(
+ __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
+ "__overflow_area_pointer.next");
+
+ CGF.Builder.CreateStore(__new_overflow_area_pointer,
+ __overflow_area_pointer_p);
+
+ CGF.Builder.CreateStore(__new_overflow_area_pointer,
+ __current_saved_reg_area_pointer_p);
+
+ // Bitcast the overflow area pointer to the type of argument.
+ llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast(
+ __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy));
+
+ CGF.EmitBranch(ContBlock);
+
+ // Get the correct pointer to load the variable argument
+ // Implement the ContBlock
+ CGF.EmitBlock(ContBlock);
+
+ llvm::Type *MemPTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
+ llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr");
+ ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock);
+ ArgAddr->addIncoming(__overflow_area_p, OnStackBlock);
+
+ return Address(ArgAddr, CharUnits::fromQuantity(ArgAlign));
+}
+
Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const {
- // FIXME: Someone needs to audit that this handle alignment correctly.
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
- getContext().getTypeInfoInChars(Ty),
- CharUnits::fromQuantity(4),
- /*AllowHigherAlign*/ true);
+
+ if (getTarget().getTriple().isMusl())
+ return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty);
+
+ return EmitVAArgForHexagon(CGF, VAListAddr, Ty);
}
//===----------------------------------------------------------------------===//
@@ -7740,7 +8589,13 @@ ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
Ty = EnumTy->getDecl()->getIntegerType();
bool InReg = shouldUseInReg(Ty, State);
- if (Ty->isPromotableIntegerType()) {
+
+ // Don't pass >64 bit integers in registers.
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 64)
+ return getIndirectResult(Ty, /*ByVal=*/true, State);
+
+ if (isPromotableIntegerTypeForABI(Ty)) {
if (InReg)
return ABIArgInfo::getDirectInReg();
return ABIArgInfo::getExtend(Ty);
@@ -7754,7 +8609,7 @@ namespace {
class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
public:
LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(new LanaiABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<LanaiABIInfo>(CGT)) {}
};
}
@@ -7794,7 +8649,7 @@ private:
EltTys, (STy->getName() + ".coerce").str(), STy->isPacked());
return llvm::StructType::get(getVMContext(), EltTys, STy->isPacked());
}
- // Arrary types.
+ // Array types.
if (auto ATy = dyn_cast<llvm::ArrayType>(Ty)) {
auto T = ATy->getElementType();
auto NT = coerceKernelArgumentType(T, FromAS, ToAS);
@@ -8022,7 +8877,7 @@ ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
public:
AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new AMDGPUABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const override;
unsigned getOpenCLKernelCallingConv() const override;
@@ -8058,23 +8913,13 @@ static bool requiresAMDGPUProtectedVisibility(const Decl *D,
(isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) ||
(isa<VarDecl>(D) &&
(D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
- D->hasAttr<HIPPinnedShadowAttr>()));
-}
-
-static bool requiresAMDGPUDefaultVisibility(const Decl *D,
- llvm::GlobalValue *GV) {
- if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
- return false;
-
- return isa<VarDecl>(D) && D->hasAttr<HIPPinnedShadowAttr>();
+ cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
+ cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType()));
}
void AMDGPUTargetCodeGenInfo::setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
- if (requiresAMDGPUDefaultVisibility(D, GV)) {
- GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
- GV->setDSOLocal(false);
- } else if (requiresAMDGPUProtectedVisibility(D, GV)) {
+ if (requiresAMDGPUProtectedVisibility(D, GV)) {
GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
GV->setDSOLocal(true);
}
@@ -8099,6 +8944,10 @@ void AMDGPUTargetCodeGenInfo::setTargetAttributes(
(M.getTriple().getOS() == llvm::Triple::AMDHSA))
F->addFnAttr("amdgpu-implicitarg-num-bytes", "56");
+ if (IsHIPKernel)
+ F->addFnAttr("uniform-work-group-size", "true");
+
+
const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
if (ReqdWGS || FlatWGS) {
unsigned Min = 0;
@@ -8123,9 +8972,13 @@ void AMDGPUTargetCodeGenInfo::setTargetAttributes(
assert(Max == 0 && "Max must be zero");
} else if (IsOpenCLKernel || IsHIPKernel) {
// By default, restrict the maximum size to a value specified by
- // --gpu-max-threads-per-block=n or its default value.
+ // --gpu-max-threads-per-block=n or its default value for HIP.
+ const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
+ const unsigned DefaultMaxWorkGroupSize =
+ IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
+ : M.getLangOpts().GPUMaxThreadsPerBlock;
std::string AttrVal =
- std::string("1,") + llvm::utostr(M.getLangOpts().GPUMaxThreadsPerBlock);
+ std::string("1,") + llvm::utostr(DefaultMaxWorkGroupSize);
F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
}
@@ -8287,7 +9140,7 @@ namespace {
class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
public:
SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new SparcV8ABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<SparcV8ABIInfo>(CGT)) {}
};
} // end anonymous namespace
@@ -8456,6 +9309,10 @@ SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
if (Size < 64 && Ty->isIntegerType())
return ABIArgInfo::getExtend(Ty);
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() < 64)
+ return ABIArgInfo::getExtend(Ty);
+
// Other non-aggregates go in registers.
if (!isAggregateTypeForABI(Ty))
return ABIArgInfo::getDirect();
@@ -8549,7 +9406,7 @@ namespace {
class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
public:
SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<SparcV9ABIInfo>(CGT)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
return 14;
@@ -8642,7 +9499,7 @@ private:
class ARCTargetCodeGenInfo : public TargetCodeGenInfo {
public:
ARCTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new ARCABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<ARCABIInfo>(CGT)) {}
};
@@ -8705,11 +9562,15 @@ ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
ABIArgInfo::getDirect(Result, 0, nullptr, false);
}
- return Ty->isPromotableIntegerType() ?
- (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty) :
- ABIArgInfo::getExtend(Ty)) :
- (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg() :
- ABIArgInfo::getDirect());
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 64)
+ return getIndirectByValue(Ty);
+
+ return isPromotableIntegerTypeForABI(Ty)
+ ? (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty)
+ : ABIArgInfo::getExtend(Ty))
+ : (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg()
+ : ABIArgInfo::getDirect());
}
ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const {
@@ -8833,11 +9694,15 @@ public:
class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
mutable TypeStringCache TSC;
+ void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
+ const CodeGen::CodeGenModule &M) const;
+
public:
XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
- :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
- void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const override;
+ : TargetCodeGenInfo(std::make_unique<XCoreABIInfo>(CGT)) {}
+ void emitTargetMetadata(CodeGen::CodeGenModule &CGM,
+ const llvm::MapVector<GlobalDecl, StringRef>
+ &MangledDeclNames) const override;
};
} // End anonymous namespace.
@@ -8998,11 +9863,13 @@ StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
/// The output is tested by test/CodeGen/xcore-stringtype.c.
///
static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
- CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC);
/// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
-void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const {
+void XCoreTargetCodeGenInfo::emitTargetMD(
+ const Decl *D, llvm::GlobalValue *GV,
+ const CodeGen::CodeGenModule &CGM) const {
SmallStringEnc Enc;
if (getTypeString(Enc, D, CGM, TSC)) {
llvm::LLVMContext &Ctx = CGM.getModule().getContext();
@@ -9014,6 +9881,21 @@ void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
}
}
+void XCoreTargetCodeGenInfo::emitTargetMetadata(
+ CodeGen::CodeGenModule &CGM,
+ const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {
+ // Warning, new MangledDeclNames may be appended within this loop.
+ // We rely on MapVector insertions adding new elements to the end
+ // of the container.
+ for (unsigned I = 0; I != MangledDeclNames.size(); ++I) {
+ auto Val = *(MangledDeclNames.begin() + I);
+ llvm::GlobalValue *GV = CGM.GetGlobalValue(Val.second);
+ if (GV) {
+ const Decl *D = Val.first.getDecl()->getMostRecentDecl();
+ emitTargetMD(D, GV, CGM);
+ }
+ }
+}
//===----------------------------------------------------------------------===//
// SPIR ABI Implementation
//===----------------------------------------------------------------------===//
@@ -9022,7 +9904,7 @@ namespace {
class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
public:
SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
unsigned getOpenCLKernelCallingConv() const override;
};
@@ -9347,7 +10229,8 @@ static bool appendType(SmallStringEnc &Enc, QualType QType,
}
static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
- CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC) {
if (!D)
return false;
@@ -9741,6 +10624,15 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
return extendType(Ty);
}
+ if (const auto *EIT = Ty->getAs<ExtIntType>()) {
+ if (EIT->getNumBits() < XLen && !MustUseStack)
+ return extendType(Ty);
+ if (EIT->getNumBits() > 128 ||
+ (!getContext().getTargetInfo().hasInt128Type() &&
+ EIT->getNumBits() > 64))
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+ }
+
return ABIArgInfo::getDirect();
}
@@ -9812,7 +10704,7 @@ class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
public:
RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
unsigned FLen)
- : TargetCodeGenInfo(new RISCVABIInfo(CGT, XLen, FLen)) {}
+ : TargetCodeGenInfo(std::make_unique<RISCVABIInfo>(CGT, XLen, FLen)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override {
@@ -9838,6 +10730,56 @@ public:
} // namespace
//===----------------------------------------------------------------------===//
+// VE ABI Implementation.
+//
+namespace {
+class VEABIInfo : public DefaultABIInfo {
+public:
+ VEABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+private:
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ void computeInfo(CGFunctionInfo &FI) const override;
+};
+} // end anonymous namespace
+
+ABIArgInfo VEABIInfo::classifyReturnType(QualType Ty) const {
+ if (Ty->isAnyComplexType()) {
+ return ABIArgInfo::getDirect();
+ }
+ return DefaultABIInfo::classifyReturnType(Ty);
+}
+
+ABIArgInfo VEABIInfo::classifyArgumentType(QualType Ty) const {
+ if (Ty->isAnyComplexType()) {
+ return ABIArgInfo::getDirect();
+ }
+ return DefaultABIInfo::classifyArgumentType(Ty);
+}
+
+void VEABIInfo::computeInfo(CGFunctionInfo &FI) const {
+
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &Arg : FI.arguments())
+ Arg.info = classifyArgumentType(Arg.type);
+}
+
+namespace {
+class VETargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ VETargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<VEABIInfo>(CGT)) {}
+ // VE ABI requires the arguments of variadic and prototype-less functions
+ // are passed in both registers and memory.
+ bool isNoProtoCallVariadic(const CallArgList &args,
+ const FunctionNoProtoType *fnType) const override {
+ return true;
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
// Driver code
//===----------------------------------------------------------------------===//
@@ -9889,8 +10831,12 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
}
case llvm::Triple::wasm32:
- case llvm::Triple::wasm64:
- return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types));
+ case llvm::Triple::wasm64: {
+ WebAssemblyABIInfo::ABIKind Kind = WebAssemblyABIInfo::MVP;
+ if (getTarget().getABI() == "experimental-mv")
+ Kind = WebAssemblyABIInfo::ExperimentalMV;
+ return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types, Kind));
+ }
case llvm::Triple::arm:
case llvm::Triple::armeb:
@@ -9918,6 +10864,9 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
}
case llvm::Triple::ppc: {
+ if (Triple.isOSAIX())
+ return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ false));
+
bool IsSoftFloat =
CodeGenOpts.FloatABI == "soft" || getTarget().hasFeature("spe");
bool RetSmallStructInRegABI =
@@ -9926,6 +10875,9 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI));
}
case llvm::Triple::ppc64:
+ if (Triple.isOSAIX())
+ return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ true));
+
if (Triple.isOSBinFormatELF()) {
PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
if (getTarget().getABI() == "elfv2")
@@ -9935,8 +10887,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
IsSoftFloat));
- } else
- return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
+ }
+ return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
case llvm::Triple::ppc64le: {
assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
@@ -9969,8 +10921,9 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
}
case llvm::Triple::systemz: {
- bool HasVector = getTarget().getABI() == "vector";
- return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector));
+ bool SoftFloat = CodeGenOpts.FloatABI == "soft";
+ bool HasVector = !SoftFloat && getTarget().getABI() == "vector";
+ return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector, SoftFloat));
}
case llvm::Triple::tce:
@@ -10028,6 +10981,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::spir:
case llvm::Triple::spir64:
return SetCGInfo(new SPIRTargetCodeGenInfo(Types));
+ case llvm::Triple::ve:
+ return SetCGInfo(new VETargetCodeGenInfo(Types));
}
}
@@ -10111,9 +11066,9 @@ llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
auto IP = CGF.Builder.saveIP();
auto *BB = llvm::BasicBlock::Create(C, "entry", F);
Builder.SetInsertPoint(BB);
- unsigned BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(BlockTy);
+ const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy);
auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);
- BlockPtr->setAlignment(llvm::MaybeAlign(BlockAlign));
+ BlockPtr->setAlignment(BlockAlign);
Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
llvm::SmallVector<llvm::Value *, 2> Args;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
index e1e90e73cb58..1152cabce4a0 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
@@ -43,11 +43,10 @@ class CGFunctionInfo;
/// codegeneration issues, like target-specific attributes, builtins and so
/// on.
class TargetCodeGenInfo {
- ABIInfo *Info;
+ std::unique_ptr<ABIInfo> Info = nullptr;
public:
- // WARNING: Acquires the ownership of ABIInfo.
- TargetCodeGenInfo(ABIInfo *info = nullptr) : Info(info) {}
+ TargetCodeGenInfo(std::unique_ptr<ABIInfo> Info) : Info(std::move(Info)) {}
virtual ~TargetCodeGenInfo();
/// getABIInfo() - Returns ABI info helper for the target.
@@ -58,10 +57,18 @@ public:
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const {}
- /// emitTargetMD - Provides a convenient hook to handle extra
- /// target-specific metadata for the given global.
- virtual void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const {}
+ /// emitTargetMetadata - Provides a convenient hook to handle extra
+ /// target-specific metadata for the given globals.
+ virtual void emitTargetMetadata(
+ CodeGen::CodeGenModule &CGM,
+ const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {}
+
+ /// Any further codegen related checks that need to be done on a function call
+ /// in a target specific manner.
+ virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
+ const FunctionDecl *Caller,
+ const FunctionDecl *Callee,
+ const CallArgList &Args) const {}
/// Determines the size of struct _Unwind_Exception on this platform,
/// in 8-bit units. The Itanium ABI defines this as:
@@ -315,6 +322,32 @@ public:
virtual bool shouldEmitStaticExternCAliases() const { return true; }
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const {}
+
+ /// Return the device-side type for the CUDA device builtin surface type.
+ virtual llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const {
+ // By default, no change from the original one.
+ return nullptr;
+ }
+ /// Return the device-side type for the CUDA device builtin texture type.
+ virtual llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const {
+ // By default, no change from the original one.
+ return nullptr;
+ }
+
+ /// Emit the device-side copy of the builtin surface type.
+ virtual bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF,
+ LValue Dst,
+ LValue Src) const {
+ // DO NOTHING by default.
+ return false;
+ }
+ /// Emit the device-side copy of the builtin texture type.
+ virtual bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF,
+ LValue Dst,
+ LValue Src) const {
+ // DO NOTHING by default.
+ return false;
+ }
};
} // namespace CodeGen
diff --git a/contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp b/contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp
index 7391d7132daf..80465c41d151 100644
--- a/contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp
+++ b/contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp
@@ -12,20 +12,26 @@
#include "clang/CrossTU/CrossTranslationUnit.h"
#include "clang/AST/ASTImporter.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/ParentMapContext.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CrossTU/CrossTUDiagnostic.h"
#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Index/USRGeneration.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Option/ArgList.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/YAMLParser.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
#include <fstream>
#include <sstream>
+#include <tuple>
namespace clang {
namespace cross_tu {
@@ -110,6 +116,17 @@ public:
return "Language dialect mismatch";
case index_error_code::load_threshold_reached:
return "Load threshold reached";
+ case index_error_code::invocation_list_ambiguous:
+ return "Invocation list file contains multiple references to the same "
+ "source file.";
+ case index_error_code::invocation_list_file_not_found:
+ return "Invocation list file is not found.";
+ case index_error_code::invocation_list_empty:
+ return "Invocation list file is empty.";
+ case index_error_code::invocation_list_wrong_format:
+ return "Invocation list file is in wrong format.";
+ case index_error_code::invocation_list_lookup_unsuccessful:
+ return "Invocation list file does not contain the requested source file.";
}
llvm_unreachable("Unrecognized index_error_code.");
}
@@ -129,8 +146,8 @@ std::error_code IndexError::convertToErrorCode() const {
}
llvm::Expected<llvm::StringMap<std::string>>
-parseCrossTUIndex(StringRef IndexPath, StringRef CrossTUDir) {
- std::ifstream ExternalMapFile(IndexPath);
+parseCrossTUIndex(StringRef IndexPath) {
+ std::ifstream ExternalMapFile{std::string(IndexPath)};
if (!ExternalMapFile)
return llvm::make_error<IndexError>(index_error_code::missing_index_file,
IndexPath.str());
@@ -139,21 +156,26 @@ parseCrossTUIndex(StringRef IndexPath, StringRef CrossTUDir) {
std::string Line;
unsigned LineNo = 1;
while (std::getline(ExternalMapFile, Line)) {
- const size_t Pos = Line.find(" ");
- if (Pos > 0 && Pos != std::string::npos) {
- StringRef LineRef{Line};
- StringRef LookupName = LineRef.substr(0, Pos);
- if (Result.count(LookupName))
+ StringRef LineRef{Line};
+ const size_t Delimiter = LineRef.find(" ");
+ if (Delimiter > 0 && Delimiter != std::string::npos) {
+ StringRef LookupName = LineRef.substr(0, Delimiter);
+
+ // Store paths with posix-style directory separator.
+ SmallVector<char, 32> FilePath;
+ llvm::Twine{LineRef.substr(Delimiter + 1)}.toVector(FilePath);
+ llvm::sys::path::native(FilePath, llvm::sys::path::Style::posix);
+
+ bool InsertionOccured;
+ std::tie(std::ignore, InsertionOccured) =
+ Result.try_emplace(LookupName, FilePath.begin(), FilePath.end());
+ if (!InsertionOccured)
return llvm::make_error<IndexError>(
index_error_code::multiple_definitions, IndexPath.str(), LineNo);
- StringRef FileName = LineRef.substr(Pos + 1);
- SmallString<256> FilePath = CrossTUDir;
- llvm::sys::path::append(FilePath, FileName);
- Result[LookupName] = FilePath.str().str();
} else
return llvm::make_error<IndexError>(
index_error_code::invalid_index_format, IndexPath.str(), LineNo);
- LineNo++;
+ ++LineNo;
}
return Result;
}
@@ -258,8 +280,8 @@ llvm::Expected<const T *> CrossTranslationUnitContext::getCrossTUDefinitionImpl(
// diagnostics.
++NumTripleMismatch;
return llvm::make_error<IndexError>(index_error_code::triple_mismatch,
- Unit->getMainFileName(), TripleTo.str(),
- TripleFrom.str());
+ std::string(Unit->getMainFileName()),
+ TripleTo.str(), TripleFrom.str());
}
const auto &LangTo = Context.getLangOpts();
@@ -288,7 +310,7 @@ llvm::Expected<const T *> CrossTranslationUnitContext::getCrossTUDefinitionImpl(
if (LangTo.CPlusPlus11 != LangFrom.CPlusPlus11 ||
LangTo.CPlusPlus14 != LangFrom.CPlusPlus14 ||
LangTo.CPlusPlus17 != LangFrom.CPlusPlus17 ||
- LangTo.CPlusPlus2a != LangFrom.CPlusPlus2a) {
+ LangTo.CPlusPlus20 != LangFrom.CPlusPlus20) {
++NumLangDialectMismatch;
return llvm::make_error<IndexError>(
index_error_code::lang_dialect_mismatch);
@@ -341,30 +363,13 @@ void CrossTranslationUnitContext::emitCrossTUDiagnostics(const IndexError &IE) {
}
}
-CrossTranslationUnitContext::ASTFileLoader::ASTFileLoader(
- const CompilerInstance &CI)
- : CI(CI) {}
-
-std::unique_ptr<ASTUnit>
-CrossTranslationUnitContext::ASTFileLoader::operator()(StringRef ASTFilePath) {
- // Load AST from ast-dump.
- IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
- TextDiagnosticPrinter *DiagClient =
- new TextDiagnosticPrinter(llvm::errs(), &*DiagOpts);
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
- new DiagnosticsEngine(DiagID, &*DiagOpts, DiagClient));
-
- return ASTUnit::LoadFromASTFile(
- ASTFilePath, CI.getPCHContainerOperations()->getRawReader(),
- ASTUnit::LoadEverything, Diags, CI.getFileSystemOpts());
-}
-
CrossTranslationUnitContext::ASTUnitStorage::ASTUnitStorage(
- const CompilerInstance &CI)
- : FileAccessor(CI), LoadGuard(const_cast<CompilerInstance &>(CI)
- .getAnalyzerOpts()
- ->CTUImportThreshold) {}
+ CompilerInstance &CI)
+ : Loader(CI, CI.getAnalyzerOpts()->CTUDir,
+ CI.getAnalyzerOpts()->CTUInvocationList),
+ LoadGuard(CI.getASTContext().getLangOpts().CPlusPlus
+ ? CI.getAnalyzerOpts()->CTUImportCppThreshold
+ : CI.getAnalyzerOpts()->CTUImportThreshold) {}
llvm::Expected<ASTUnit *>
CrossTranslationUnitContext::ASTUnitStorage::getASTUnitForFile(
@@ -380,8 +385,12 @@ CrossTranslationUnitContext::ASTUnitStorage::getASTUnitForFile(
index_error_code::load_threshold_reached);
}
- // Load the ASTUnit from the pre-dumped AST file specified by ASTFileName.
- std::unique_ptr<ASTUnit> LoadedUnit = FileAccessor(FileName);
+ auto LoadAttempt = Loader.load(FileName);
+
+ if (!LoadAttempt)
+ return LoadAttempt.takeError();
+
+ std::unique_ptr<ASTUnit> LoadedUnit = std::move(LoadAttempt.get());
// Need the raw pointer and the unique_ptr as well.
ASTUnit *Unit = LoadedUnit.get();
@@ -461,7 +470,7 @@ llvm::Error CrossTranslationUnitContext::ASTUnitStorage::ensureCTUIndexLoaded(
else
llvm::sys::path::append(IndexFile, IndexName);
- if (auto IndexMapping = parseCrossTUIndex(IndexFile, CrossTUDir)) {
+ if (auto IndexMapping = parseCrossTUIndex(IndexFile)) {
// Initialize member map.
NameFileMap = *IndexMapping;
return llvm::Error::success();
@@ -494,6 +503,193 @@ llvm::Expected<ASTUnit *> CrossTranslationUnitContext::loadExternalAST(
return Unit;
}
+CrossTranslationUnitContext::ASTLoader::ASTLoader(
+ CompilerInstance &CI, StringRef CTUDir, StringRef InvocationListFilePath)
+ : CI(CI), CTUDir(CTUDir), InvocationListFilePath(InvocationListFilePath) {}
+
+CrossTranslationUnitContext::LoadResultTy
+CrossTranslationUnitContext::ASTLoader::load(StringRef Identifier) {
+ llvm::SmallString<256> Path;
+ if (llvm::sys::path::is_absolute(Identifier, PathStyle)) {
+ Path = Identifier;
+ } else {
+ Path = CTUDir;
+ llvm::sys::path::append(Path, PathStyle, Identifier);
+ }
+
+ // The path is stored in the InvocationList member in posix style. To
+ // successfully lookup an entry based on filepath, it must be converted.
+ llvm::sys::path::native(Path, PathStyle);
+
+ // Normalize by removing relative path components.
+ llvm::sys::path::remove_dots(Path, /*remove_dot_dot*/ true, PathStyle);
+
+ if (Path.endswith(".ast"))
+ return loadFromDump(Path);
+ else
+ return loadFromSource(Path);
+}
+
+CrossTranslationUnitContext::LoadResultTy
+CrossTranslationUnitContext::ASTLoader::loadFromDump(StringRef ASTDumpPath) {
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
+ TextDiagnosticPrinter *DiagClient =
+ new TextDiagnosticPrinter(llvm::errs(), &*DiagOpts);
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, &*DiagOpts, DiagClient));
+ return ASTUnit::LoadFromASTFile(
+ std::string(ASTDumpPath.str()),
+ CI.getPCHContainerOperations()->getRawReader(), ASTUnit::LoadEverything,
+ Diags, CI.getFileSystemOpts());
+}
+
+/// Load the AST from a source-file, which is supposed to be located inside the
+/// YAML formatted invocation list file under the filesystem path specified by
+/// \p InvocationList. The invocation list should contain absolute paths.
+/// \p SourceFilePath is the absolute path of the source file that contains the
+/// function definition the analysis is looking for. The Index is built by the
+/// \p clang-extdef-mapping tool, which is also supposed to be generating
+/// absolute paths.
+///
+/// Proper diagnostic emission requires absolute paths, so even if a future
+/// change introduces the handling of relative paths, this must be taken into
+/// consideration.
+CrossTranslationUnitContext::LoadResultTy
+CrossTranslationUnitContext::ASTLoader::loadFromSource(
+ StringRef SourceFilePath) {
+
+ if (llvm::Error InitError = lazyInitInvocationList())
+ return std::move(InitError);
+ assert(InvocationList);
+
+ auto Invocation = InvocationList->find(SourceFilePath);
+ if (Invocation == InvocationList->end())
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_lookup_unsuccessful);
+
+ const InvocationListTy::mapped_type &InvocationCommand = Invocation->second;
+
+ SmallVector<const char *, 32> CommandLineArgs(InvocationCommand.size());
+ std::transform(InvocationCommand.begin(), InvocationCommand.end(),
+ CommandLineArgs.begin(),
+ [](auto &&CmdPart) { return CmdPart.c_str(); });
+
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts{&CI.getDiagnosticOpts()};
+ auto *DiagClient = new ForwardingDiagnosticConsumer{CI.getDiagnosticClient()};
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID{
+ CI.getDiagnostics().getDiagnosticIDs()};
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine{DiagID, &*DiagOpts, DiagClient});
+
+ return std::unique_ptr<ASTUnit>(ASTUnit::LoadFromCommandLine(
+ CommandLineArgs.begin(), (CommandLineArgs.end()),
+ CI.getPCHContainerOperations(), Diags,
+ CI.getHeaderSearchOpts().ResourceDir));
+}
+
+llvm::Expected<InvocationListTy>
+parseInvocationList(StringRef FileContent, llvm::sys::path::Style PathStyle) {
+ InvocationListTy InvocationList;
+
+ /// LLVM YAML parser is used to extract information from invocation list file.
+ llvm::SourceMgr SM;
+ llvm::yaml::Stream InvocationFile(FileContent, SM);
+
+ /// Only the first document is processed.
+ llvm::yaml::document_iterator FirstInvocationFile = InvocationFile.begin();
+
+ /// There has to be at least one document available.
+ if (FirstInvocationFile == InvocationFile.end())
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_empty);
+
+ llvm::yaml::Node *DocumentRoot = FirstInvocationFile->getRoot();
+ if (!DocumentRoot)
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_wrong_format);
+
+ /// According to the format specified the document must be a mapping, where
+ /// the keys are paths to source files, and values are sequences of invocation
+ /// parts.
+ auto *Mappings = dyn_cast<llvm::yaml::MappingNode>(DocumentRoot);
+ if (!Mappings)
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_wrong_format);
+
+ for (auto &NextMapping : *Mappings) {
+ /// The keys should be strings, which represent a source-file path.
+ auto *Key = dyn_cast<llvm::yaml::ScalarNode>(NextMapping.getKey());
+ if (!Key)
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_wrong_format);
+
+ SmallVector<char, 32> ValueStorage;
+ StringRef SourcePath = Key->getValue(ValueStorage);
+
+ // Store paths with PathStyle directory separator.
+ SmallVector<char, 32> NativeSourcePath;
+ llvm::Twine{SourcePath}.toVector(NativeSourcePath);
+ llvm::sys::path::native(NativeSourcePath, PathStyle);
+
+ StringRef InvocationKey{NativeSourcePath.begin(), NativeSourcePath.size()};
+
+ if (InvocationList.find(InvocationKey) != InvocationList.end())
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_ambiguous);
+
+ /// The values should be sequences of strings, each representing a part of
+ /// the invocation.
+ auto *Args = dyn_cast<llvm::yaml::SequenceNode>(NextMapping.getValue());
+ if (!Args)
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_wrong_format);
+
+ for (auto &Arg : *Args) {
+ auto *CmdString = dyn_cast<llvm::yaml::ScalarNode>(&Arg);
+ if (!CmdString)
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_wrong_format);
+ /// Every conversion starts with an empty working storage, as it is not
+ /// clear if this is a requirement of the YAML parser.
+ ValueStorage.clear();
+ InvocationList[InvocationKey].emplace_back(
+ CmdString->getValue(ValueStorage));
+ }
+
+ if (InvocationList[InvocationKey].empty())
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_wrong_format);
+ }
+
+ return InvocationList;
+}
+
+llvm::Error CrossTranslationUnitContext::ASTLoader::lazyInitInvocationList() {
+ /// Lazily initialize the invocation list member used for on-demand parsing.
+ if (InvocationList)
+ return llvm::Error::success();
+
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> FileContent =
+ llvm::MemoryBuffer::getFile(InvocationListFilePath);
+ if (!FileContent)
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_file_not_found);
+ std::unique_ptr<llvm::MemoryBuffer> ContentBuffer = std::move(*FileContent);
+ assert(ContentBuffer && "If no error was produced after loading, the pointer "
+ "should not be nullptr.");
+
+ llvm::Expected<InvocationListTy> ExpectedInvocationList =
+ parseInvocationList(ContentBuffer->getBuffer(), PathStyle);
+
+ if (!ExpectedInvocationList)
+ return ExpectedInvocationList.takeError();
+
+ InvocationList = *ExpectedInvocationList;
+
+ return llvm::Error::success();
+}
+
template <typename T>
llvm::Expected<const T *>
CrossTranslationUnitContext::importDefinitionImpl(const T *D, ASTUnit *Unit) {
@@ -525,6 +721,9 @@ CrossTranslationUnitContext::importDefinitionImpl(const T *D, ASTUnit *Unit) {
assert(hasBodyOrInit(ToDecl) && "Imported Decl should have body or init.");
++NumGetCTUSuccess;
+ // Parent map is invalidated after changing the AST.
+ ToDecl->getASTContext().getParentMapContext().clear();
+
return ToDecl;
}
diff --git a/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.cpp b/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.cpp
index ecfec52f459e..1bc286236a0e 100644
--- a/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.cpp
+++ b/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.cpp
@@ -51,4 +51,4 @@ getAsFileEvents(const std::vector<std::string> &Scan) {
return Events;
}
-} // namespace clang \ No newline at end of file
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.h b/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.h
index 55731225e251..feb8b4ea861e 100644
--- a/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.h
+++ b/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.h
@@ -26,4 +26,4 @@ getAsFileEvents(const std::vector<std::string> &Scan);
/// \returns llvm::None if \p Path doesn't exist or can't get the status.
llvm::Optional<llvm::sys::fs::file_status> getFileStatus(llvm::StringRef Path);
-} // namespace clang \ No newline at end of file
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/DirectoryWatcher/default/DirectoryWatcher-not-implemented.cpp b/contrib/llvm-project/clang/lib/DirectoryWatcher/default/DirectoryWatcher-not-implemented.cpp
index 200e540624a6..bc410822d7ae 100644
--- a/contrib/llvm-project/clang/lib/DirectoryWatcher/default/DirectoryWatcher-not-implemented.cpp
+++ b/contrib/llvm-project/clang/lib/DirectoryWatcher/default/DirectoryWatcher-not-implemented.cpp
@@ -18,4 +18,4 @@ llvm::Expected<std::unique_ptr<DirectoryWatcher>> clang::DirectoryWatcher::creat
return llvm::make_error<llvm::StringError>(
"DirectoryWatcher is not implemented for this platform!",
llvm::inconvertibleErrorCode());
-} \ No newline at end of file
+}
diff --git a/contrib/llvm-project/clang/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp b/contrib/llvm-project/clang/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp
index 7a60369a4da0..bdc389516289 100644
--- a/contrib/llvm-project/clang/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp
+++ b/contrib/llvm-project/clang/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp
@@ -14,10 +14,13 @@
#include "llvm/Support/Error.h"
#include "llvm/Support/Path.h"
#include <CoreServices/CoreServices.h>
+#include <TargetConditionals.h>
using namespace llvm;
using namespace clang;
+#if TARGET_OS_OSX
+
static void stopFSEventStream(FSEventStreamRef);
namespace {
@@ -43,24 +46,32 @@ namespace {
class DirectoryWatcherMac : public clang::DirectoryWatcher {
public:
DirectoryWatcherMac(
- FSEventStreamRef EventStream,
+ dispatch_queue_t Queue, FSEventStreamRef EventStream,
std::function<void(llvm::ArrayRef<DirectoryWatcher::Event>, bool)>
Receiver,
llvm::StringRef WatchedDirPath)
- : EventStream(EventStream), Receiver(Receiver),
+ : Queue(Queue), EventStream(EventStream), Receiver(Receiver),
WatchedDirPath(WatchedDirPath) {}
~DirectoryWatcherMac() override {
- stopFSEventStream(EventStream);
- EventStream = nullptr;
- // Now it's safe to use Receiver as the only other concurrent use would have
- // been in EventStream processing.
- Receiver(DirectoryWatcher::Event(
- DirectoryWatcher::Event::EventKind::WatcherGotInvalidated, ""),
- false);
+ // FSEventStreamStop and Invalidate must be called after Start and
+ // SetDispatchQueue to follow FSEvents API contract. The call to Receiver
+ // also uses Queue to not race with the initial scan.
+ dispatch_sync(Queue, ^{
+ stopFSEventStream(EventStream);
+ EventStream = nullptr;
+ Receiver(
+ DirectoryWatcher::Event(
+ DirectoryWatcher::Event::EventKind::WatcherGotInvalidated, ""),
+ false);
+ });
+
+ // Balance initial creation.
+ dispatch_release(Queue);
}
private:
+ dispatch_queue_t Queue;
FSEventStreamRef EventStream;
std::function<void(llvm::ArrayRef<Event>, bool)> Receiver;
const std::string WatchedDirPath;
@@ -173,7 +184,7 @@ FSEventStreamRef createFSEventStream(
if (::realpath(P.begin(), Buffer) != nullptr)
RealPath = Buffer;
else
- RealPath = Path;
+ RealPath = Path.str();
}
FSEventStreamContext Context;
@@ -217,11 +228,11 @@ llvm::Expected<std::unique_ptr<DirectoryWatcher>> clang::DirectoryWatcher::creat
assert(EventStream && "EventStream expected to be non-null");
std::unique_ptr<DirectoryWatcher> Result =
- std::make_unique<DirectoryWatcherMac>(EventStream, Receiver, Path);
+ std::make_unique<DirectoryWatcherMac>(Queue, EventStream, Receiver, Path);
// We need to copy the data so the lifetime is ok after a const copy is made
// for the block.
- const std::string CopiedPath = Path;
+ const std::string CopiedPath = Path.str();
auto InitWork = ^{
// We need to start watching the directory before we start scanning in order
@@ -230,10 +241,6 @@ llvm::Expected<std::unique_ptr<DirectoryWatcher>> clang::DirectoryWatcher::creat
// inital scan and handling events ONLY AFTER the scan finishes.
FSEventStreamSetDispatchQueue(EventStream, Queue);
FSEventStreamStart(EventStream);
- // We need to decrement the ref count for Queue as initialize() will return
- // and FSEvents has incremented it. Since we have to wait for FSEvents to
- // take ownership it's the easiest to do it here rather than main thread.
- dispatch_release(Queue);
Receiver(getAsFileEvents(scanDirectory(CopiedPath)), /*IsInitial=*/true);
};
@@ -245,3 +252,17 @@ llvm::Expected<std::unique_ptr<DirectoryWatcher>> clang::DirectoryWatcher::creat
return Result;
}
+
+#else // TARGET_OS_OSX
+
+llvm::Expected<std::unique_ptr<DirectoryWatcher>>
+clang::DirectoryWatcher::create(
+ StringRef Path,
+ std::function<void(llvm::ArrayRef<DirectoryWatcher::Event>, bool)> Receiver,
+ bool WaitForInitialSync) {
+ return llvm::make_error<llvm::StringError>(
+ "DirectoryWatcher is not implemented for this platform!",
+ llvm::inconvertibleErrorCode());
+}
+
+#endif // TARGET_OS_OSX
diff --git a/contrib/llvm-project/clang/lib/Driver/Action.cpp b/contrib/llvm-project/clang/lib/Driver/Action.cpp
index 0eb4c7257e7a..2ec063d873be 100644
--- a/contrib/llvm-project/clang/lib/Driver/Action.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Action.cpp
@@ -43,6 +43,8 @@ const char *Action::getClassName(ActionClass AC) {
return "clang-offload-unbundler";
case OffloadWrapperJobClass:
return "clang-offload-wrapper";
+ case StaticLibJobClass:
+ return "static-lib-linker";
}
llvm_unreachable("invalid class");
@@ -415,3 +417,8 @@ void OffloadWrapperJobAction::anchor() {}
OffloadWrapperJobAction::OffloadWrapperJobAction(ActionList &Inputs,
types::ID Type)
: JobAction(OffloadWrapperJobClass, Inputs, Type) {}
+
+void StaticLibJobAction::anchor() {}
+
+StaticLibJobAction::StaticLibJobAction(ActionList &Inputs, types::ID Type)
+ : JobAction(StaticLibJobClass, Inputs, Type) {}
diff --git a/contrib/llvm-project/clang/lib/Driver/Compilation.cpp b/contrib/llvm-project/clang/lib/Driver/Compilation.cpp
index 52477576b2eb..05ee5091396b 100644
--- a/contrib/llvm-project/clang/lib/Driver/Compilation.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Compilation.cpp
@@ -76,16 +76,29 @@ Compilation::getArgsForToolChain(const ToolChain *TC, StringRef BoundArch,
*TranslatedArgs, SameTripleAsHost, AllocatedArgs);
}
+ DerivedArgList *NewDAL = nullptr;
if (!OpenMPArgs) {
+ NewDAL = TC->TranslateXarchArgs(*TranslatedArgs, BoundArch,
+ DeviceOffloadKind, &AllocatedArgs);
+ } else {
+ NewDAL = TC->TranslateXarchArgs(*OpenMPArgs, BoundArch, DeviceOffloadKind,
+ &AllocatedArgs);
+ if (!NewDAL)
+ NewDAL = OpenMPArgs;
+ else
+ delete OpenMPArgs;
+ }
+
+ if (!NewDAL) {
Entry = TC->TranslateArgs(*TranslatedArgs, BoundArch, DeviceOffloadKind);
if (!Entry)
Entry = TranslatedArgs;
} else {
- Entry = TC->TranslateArgs(*OpenMPArgs, BoundArch, DeviceOffloadKind);
+ Entry = TC->TranslateArgs(*NewDAL, BoundArch, DeviceOffloadKind);
if (!Entry)
- Entry = OpenMPArgs;
+ Entry = NewDAL;
else
- delete OpenMPArgs;
+ delete NewDAL;
}
// Add allocated arguments to the final DAL.
diff --git a/contrib/llvm-project/clang/lib/Driver/Distro.cpp b/contrib/llvm-project/clang/lib/Driver/Distro.cpp
index 06707fefc9d0..4d58ad1ae78c 100644
--- a/contrib/llvm-project/clang/lib/Driver/Distro.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Distro.cpp
@@ -11,9 +11,10 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/ADT/Triple.h"
using namespace clang::driver;
using namespace clang;
@@ -70,6 +71,7 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS,
.Case("disco", Distro::UbuntuDisco)
.Case("eoan", Distro::UbuntuEoan)
.Case("focal", Distro::UbuntuFocal)
+ .Case("groovy", Distro::UbuntuGroovy)
.Default(Distro::UnknownDistro);
if (Version != Distro::UnknownDistro)
return Version;
diff --git a/contrib/llvm-project/clang/lib/Driver/Driver.cpp b/contrib/llvm-project/clang/lib/Driver/Driver.cpp
index fb8335a3695d..ece8222dcf24 100644
--- a/contrib/llvm-project/clang/lib/Driver/Driver.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Driver.cpp
@@ -38,11 +38,12 @@
#include "ToolChains/NaCl.h"
#include "ToolChains/NetBSD.h"
#include "ToolChains/OpenBSD.h"
-#include "ToolChains/PS4CPU.h"
#include "ToolChains/PPCLinux.h"
+#include "ToolChains/PS4CPU.h"
#include "ToolChains/RISCVToolchain.h"
#include "ToolChains/Solaris.h"
#include "ToolChains/TCE.h"
+#include "ToolChains/VEToolchain.h"
#include "ToolChains/WebAssembly.h"
#include "ToolChains/XCore.h"
#include "clang/Basic/Version.h"
@@ -71,6 +72,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/Host.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Process.h"
@@ -99,7 +101,7 @@ std::string Driver::GetResourcesPath(StringRef BinaryPath,
// exact same string ("a/../b/" and "b/" get different hashes, for example).
// Dir is bin/ or lib/, depending on where BinaryPath is.
- std::string Dir = llvm::sys::path::parent_path(BinaryPath);
+ std::string Dir = std::string(llvm::sys::path::parent_path(BinaryPath));
SmallString<128> P(Dir);
if (CustomResourceDir != "") {
@@ -115,7 +117,7 @@ std::string Driver::GetResourcesPath(StringRef BinaryPath,
CLANG_VERSION_STRING);
}
- return P.str();
+ return std::string(P.str());
}
Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
@@ -131,15 +133,21 @@ Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
TargetTriple(TargetTriple), CCCGenericGCCName(""), Saver(Alloc),
CheckInputsExist(true), GenReproducer(false),
SuppressMissingInputWarning(false) {
-
// Provide a sane fallback if no VFS is specified.
if (!this->VFS)
this->VFS = llvm::vfs::getRealFileSystem();
- Name = llvm::sys::path::filename(ClangExecutable);
- Dir = llvm::sys::path::parent_path(ClangExecutable);
+ Name = std::string(llvm::sys::path::filename(ClangExecutable));
+ Dir = std::string(llvm::sys::path::parent_path(ClangExecutable));
InstalledDir = Dir; // Provide a sensible default installed dir.
+ if ((!SysRoot.empty()) && llvm::sys::path::is_relative(SysRoot)) {
+ // Prepend InstalledDir if SysRoot is relative
+ SmallString<128> P(InstalledDir);
+ llvm::sys::path::append(P, SysRoot);
+ SysRoot = std::string(P);
+ }
+
#if defined(CLANG_CONFIG_FILE_SYSTEM_DIR)
SystemConfigDir = CLANG_CONFIG_FILE_SYSTEM_DIR;
#endif
@@ -467,6 +475,26 @@ static llvm::Triple computeTargetTriple(const Driver &D,
Target.getOS() == llvm::Triple::Minix)
return Target;
+ // On AIX, the env OBJECT_MODE may affect the resulting arch variant.
+ if (Target.isOSAIX()) {
+ if (Optional<std::string> ObjectModeValue =
+ llvm::sys::Process::GetEnv("OBJECT_MODE")) {
+ StringRef ObjectMode = *ObjectModeValue;
+ llvm::Triple::ArchType AT = llvm::Triple::UnknownArch;
+
+ if (ObjectMode.equals("64")) {
+ AT = Target.get64BitArchVariant().getArch();
+ } else if (ObjectMode.equals("32")) {
+ AT = Target.get32BitArchVariant().getArch();
+ } else {
+ D.Diag(diag::err_drv_invalid_object_mode) << ObjectMode;
+ }
+
+ if (AT != llvm::Triple::UnknownArch && AT != Target.getArch())
+ Target.setArch(AT);
+ }
+ }
+
// Handle pseudo-target flags '-m64', '-mx32', '-m32' and '-m16'.
Arg *A = Args.getLastArg(options::OPT_m64, options::OPT_mx32,
options::OPT_m32, options::OPT_m16);
@@ -769,7 +797,7 @@ bool Driver::readConfigFile(StringRef FileName) {
// Read options from config file.
llvm::SmallString<128> CfgFileName(FileName);
llvm::sys::path::native(CfgFileName);
- ConfigFile = CfgFileName.str();
+ ConfigFile = std::string(CfgFileName.str());
bool ContainErrors;
CfgOptions = std::make_unique<InputArgList>(
ParseArgStrings(NewCfgArgs, IsCLMode(), ContainErrors));
@@ -826,8 +854,12 @@ bool Driver::loadConfigFile() {
std::vector<std::string> ConfigFiles =
CLOptions->getAllArgValues(options::OPT_config);
if (ConfigFiles.size() > 1) {
- Diag(diag::err_drv_duplicate_config);
- return true;
+ if (!std::all_of(
+ ConfigFiles.begin(), ConfigFiles.end(),
+ [ConfigFiles](std::string s) { return s == ConfigFiles[0]; })) {
+ Diag(diag::err_drv_duplicate_config);
+ return true;
+ }
}
if (!ConfigFiles.empty()) {
@@ -952,7 +984,7 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
while (!CompilerPath.empty()) {
std::pair<StringRef, StringRef> Split =
CompilerPath.split(llvm::sys::EnvPathSeparator);
- PrefixDirs.push_back(Split.first);
+ PrefixDirs.push_back(std::string(Split.first));
CompilerPath = Split.second;
}
}
@@ -1156,7 +1188,7 @@ static void printArgList(raw_ostream &OS, const llvm::opt::ArgList &Args) {
for (auto I = ASL.begin(), E = ASL.end(); I != E; ++I) {
if (I != ASL.begin())
OS << ' ';
- Command::printArg(OS, *I, true);
+ llvm::sys::printArg(OS, *I, true);
}
OS << '\n';
}
@@ -1267,10 +1299,6 @@ void Driver::generateCompilationDiagnostics(
// Print the version of the compiler.
PrintVersion(C, llvm::errs());
- Diag(clang::diag::note_drv_command_failed_diag_msg)
- << "PLEASE submit a bug report to " BUG_REPORT_URL " and include the "
- "crash backtrace, preprocessed source, and associated run script.";
-
// Suppress driver output and emit preprocessor output to temp file.
Mode = CPPMode;
CCGenDiagnostics = true;
@@ -1413,7 +1441,7 @@ void Driver::generateCompilationDiagnostics(
ScriptOS << "\n# Additional information: " << AdditionalInformation
<< "\n";
if (Report)
- Report->TemporaryFiles.push_back(Script.str());
+ Report->TemporaryFiles.push_back(std::string(Script.str()));
Diag(clang::diag::note_drv_command_failed_diag_msg) << Script;
}
@@ -1448,7 +1476,8 @@ void Driver::setUpResponseFiles(Compilation &C, Command &Cmd) {
// capacity if the tool does not support response files, there is a chance/
// that things will just work without a response file, so we silently just
// skip it.
- if (Cmd.getCreator().getResponseFilesSupport() == Tool::RF_None ||
+ if (Cmd.getResponseFileSupport().ResponseKind ==
+ ResponseFileSupport::RF_None ||
llvm::sys::commandLineFitsWithinSystemLimits(Cmd.getExecutable(),
Cmd.getArguments()))
return;
@@ -1642,7 +1671,7 @@ void Driver::HandleAutocompletions(StringRef PassedFlags) const {
// this code.
for (StringRef S : DiagnosticIDs::getDiagnosticFlags())
if (S.startswith(Cur))
- SuggestedCompletions.push_back(S);
+ SuggestedCompletions.push_back(std::string(S));
}
// Sort the autocomplete candidates so that shells print them out in a
@@ -1812,6 +1841,11 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
return false;
}
+ if (C.getArgs().hasArg(options::OPT_print_targets)) {
+ llvm::TargetRegistry::printRegisteredTargetsForVersion(llvm::outs());
+ return false;
+ }
+
return true;
}
@@ -1849,6 +1883,7 @@ static unsigned PrintActions1(const Compilation &C, Action *A,
bool IsFirst = true;
OA->doOnEachDependence(
[&](Action *A, const ToolChain *TC, const char *BoundArch) {
+ assert(TC && "Unknown host toolchain");
// E.g. for two CUDA device dependences whose bound arch is sm_20 and
// sm_35 this will generate:
// "cuda-device" (nvptx64-nvidia-cuda:sm_20) {#ID}, "cuda-device"
@@ -1856,13 +1891,9 @@ static unsigned PrintActions1(const Compilation &C, Action *A,
if (!IsFirst)
os << ", ";
os << '"';
- if (TC)
- os << A->getOffloadingKindPrefix();
- else
- os << "host";
+ os << A->getOffloadingKindPrefix();
os << " (";
os << TC->getTriple().normalize();
-
if (BoundArch)
os << ":" << BoundArch;
os << ")";
@@ -2316,8 +2347,11 @@ class OffloadingActionBuilder final {
/// Append top level actions generated by the builder.
virtual void appendTopLevelActions(ActionList &AL) {}
- /// Append linker actions generated by the builder.
- virtual void appendLinkActions(ActionList &AL) {}
+ /// Append linker device actions generated by the builder.
+ virtual void appendLinkDeviceActions(ActionList &AL) {}
+
+ /// Append linker host action generated by the builder.
+ virtual Action* appendLinkHostActions(ActionList &AL) { return nullptr; }
/// Append linker actions generated by the builder.
virtual void appendLinkDependences(OffloadAction::DeviceDependences &DA) {}
@@ -2526,13 +2560,13 @@ class OffloadingActionBuilder final {
std::set<CudaArch> GpuArchs;
bool Error = false;
for (Arg *A : Args) {
- if (!(A->getOption().matches(options::OPT_cuda_gpu_arch_EQ) ||
- A->getOption().matches(options::OPT_no_cuda_gpu_arch_EQ)))
+ if (!(A->getOption().matches(options::OPT_offload_arch_EQ) ||
+ A->getOption().matches(options::OPT_no_offload_arch_EQ)))
continue;
A->claim();
const StringRef ArchStr = A->getValue();
- if (A->getOption().matches(options::OPT_no_cuda_gpu_arch_EQ) &&
+ if (A->getOption().matches(options::OPT_no_offload_arch_EQ) &&
ArchStr == "all") {
GpuArchs.clear();
continue;
@@ -2541,9 +2575,9 @@ class OffloadingActionBuilder final {
if (Arch == CudaArch::UNKNOWN) {
C.getDriver().Diag(clang::diag::err_drv_cuda_bad_gpu_arch) << ArchStr;
Error = true;
- } else if (A->getOption().matches(options::OPT_cuda_gpu_arch_EQ))
+ } else if (A->getOption().matches(options::OPT_offload_arch_EQ))
GpuArchs.insert(Arch);
- else if (A->getOption().matches(options::OPT_no_cuda_gpu_arch_EQ))
+ else if (A->getOption().matches(options::OPT_no_offload_arch_EQ))
GpuArchs.erase(Arch);
else
llvm_unreachable("Unexpected option.");
@@ -2700,9 +2734,7 @@ class OffloadingActionBuilder final {
// backend and assemble phases to output LLVM IR. Except for generating
// non-relocatable device coee, where we generate fat binary for device
// code and pass to host in Backend phase.
- if (CudaDeviceActions.empty() ||
- (CurPhase == phases::Backend && Relocatable) ||
- CurPhase == phases::Assemble)
+ if (CudaDeviceActions.empty())
return ABRT_Success;
assert(((CurPhase == phases::Link && Relocatable) ||
@@ -2719,10 +2751,15 @@ class OffloadingActionBuilder final {
// a fat binary containing all the code objects for different GPU's.
// The fat binary is then an input to the host action.
for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
+ auto BackendAction = C.getDriver().ConstructPhaseAction(
+ C, Args, phases::Backend, CudaDeviceActions[I],
+ AssociatedOffloadKind);
+ auto AssembleAction = C.getDriver().ConstructPhaseAction(
+ C, Args, phases::Assemble, BackendAction, AssociatedOffloadKind);
// Create a link action to link device IR with device library
// and generate ISA.
ActionList AL;
- AL.push_back(CudaDeviceActions[I]);
+ AL.push_back(AssembleAction);
CudaDeviceActions[I] =
C.MakeAction<LinkJobAction>(AL, types::TY_Image);
@@ -2784,17 +2821,45 @@ class OffloadingActionBuilder final {
: ABRT_Success;
}
- void appendLinkDependences(OffloadAction::DeviceDependences &DA) override {
+ void appendLinkDeviceActions(ActionList &AL) override {
+ if (DeviceLinkerInputs.size() == 0)
+ return;
+
+ assert(DeviceLinkerInputs.size() == GpuArchList.size() &&
+ "Linker inputs and GPU arch list sizes do not match.");
+
// Append a new link action for each device.
unsigned I = 0;
for (auto &LI : DeviceLinkerInputs) {
+ // Each entry in DeviceLinkerInputs corresponds to a GPU arch.
auto *DeviceLinkAction =
C.MakeAction<LinkJobAction>(LI, types::TY_Image);
- DA.add(*DeviceLinkAction, *ToolChains[0],
- CudaArchToString(GpuArchList[I]), AssociatedOffloadKind);
+ // Linking all inputs for the current GPU arch.
+ // LI contains all the inputs for the linker.
+ OffloadAction::DeviceDependences DeviceLinkDeps;
+ DeviceLinkDeps.add(*DeviceLinkAction, *ToolChains[0],
+ CudaArchToString(GpuArchList[I]), AssociatedOffloadKind);
+ AL.push_back(C.MakeAction<OffloadAction>(DeviceLinkDeps,
+ DeviceLinkAction->getType()));
++I;
}
+ DeviceLinkerInputs.clear();
+
+ // Create a host object from all the device images by embedding them
+ // in a fat binary.
+ OffloadAction::DeviceDependences DDeps;
+ auto *TopDeviceLinkAction =
+ C.MakeAction<LinkJobAction>(AL, types::TY_Object);
+ DDeps.add(*TopDeviceLinkAction, *ToolChains[0],
+ nullptr, AssociatedOffloadKind);
+
+ // Offload the host object to the host linker.
+ AL.push_back(C.MakeAction<OffloadAction>(DDeps, TopDeviceLinkAction->getType()));
}
+
+ Action* appendLinkHostActions(ActionList &AL) override { return AL.back(); }
+
+ void appendLinkDependences(OffloadAction::DeviceDependences &DA) override {}
};
/// OpenMP action builder. The host bitcode is passed to the device frontend
@@ -2922,7 +2987,7 @@ class OffloadingActionBuilder final {
OpenMPDeviceActions.clear();
}
- void appendLinkActions(ActionList &AL) override {
+ void appendLinkDeviceActions(ActionList &AL) override {
assert(ToolChains.size() == DeviceLinkerInputs.size() &&
"Toolchains and linker inputs sizes do not match.");
@@ -2941,6 +3006,14 @@ class OffloadingActionBuilder final {
DeviceLinkerInputs.clear();
}
+ Action* appendLinkHostActions(ActionList &AL) override {
+ // Create wrapper bitcode from the result of device link actions and compile
+ // it to an object which will be added to the host link command.
+ auto *BC = C.MakeAction<OffloadWrapperJobAction>(AL, types::TY_LLVM_BC);
+ auto *ASM = C.MakeAction<BackendJobAction>(BC, types::TY_PP_Asm);
+ return C.MakeAction<AssembleJobAction>(ASM, types::TY_Object);
+ }
+
void appendLinkDependences(OffloadAction::DeviceDependences &DA) override {}
bool initialize() override {
@@ -3173,17 +3246,20 @@ public:
for (DeviceActionBuilder *SB : SpecializedBuilders) {
if (!SB->isValid())
continue;
- SB->appendLinkActions(DeviceAL);
+ SB->appendLinkDeviceActions(DeviceAL);
}
if (DeviceAL.empty())
return nullptr;
- // Create wrapper bitcode from the result of device link actions and compile
- // it to an object which will be added to the host link command.
- auto *BC = C.MakeAction<OffloadWrapperJobAction>(DeviceAL, types::TY_LLVM_BC);
- auto *ASM = C.MakeAction<BackendJobAction>(BC, types::TY_PP_Asm);
- return C.MakeAction<AssembleJobAction>(ASM, types::TY_Object);
+ // Let builders add host linking actions.
+ Action* HA;
+ for (DeviceActionBuilder *SB : SpecializedBuilders) {
+ if (!SB->isValid())
+ continue;
+ HA = SB->appendLinkHostActions(DeviceAL);
+ }
+ return HA;
}
/// Processes the host linker action. This currently consists of replacing it
@@ -3271,8 +3347,7 @@ void Driver::handleArguments(Compilation &C, DerivedArgList &Args,
types::ID InputType = I.first;
const Arg *InputArg = I.second;
- llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PL;
- types::getCompilationPhases(InputType, PL);
+ auto PL = types::getCompilationPhases(InputType);
LastPLSize = PL.size();
// If the first step comes after the final phase we are doing as part of
@@ -3317,11 +3392,9 @@ void Driver::handleArguments(Compilation &C, DerivedArgList &Args,
// Add a separate precompile phase for the compile phase.
if (FinalPhase >= phases::Compile) {
const types::ID HeaderType = lookupHeaderTypeForSourceType(InputType);
- llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PCHPL;
- types::getCompilationPhases(HeaderType, PCHPL);
// Build the pipeline for the pch file.
Action *ClangClPch = C.MakeAction<InputAction>(*InputArg, HeaderType);
- for (phases::ID Phase : PCHPL)
+ for (phases::ID Phase : types::getCompilationPhases(HeaderType))
ClangClPch = ConstructPhaseAction(C, Args, Phase, ClangClPch);
assert(ClangClPch);
Actions.push_back(ClangClPch);
@@ -3404,13 +3477,11 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
types::ID InputType = I.first;
const Arg *InputArg = I.second;
- llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PL;
- types::getCompilationPhases(*this, Args, InputType, PL);
+ auto PL = types::getCompilationPhases(*this, Args, InputType);
if (PL.empty())
continue;
- llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> FullPL;
- types::getCompilationPhases(InputType, FullPL);
+ auto FullPL = types::getCompilationPhases(InputType);
// Build the pipeline for this file.
Action *Current = C.MakeAction<InputAction>(*InputArg, InputType);
@@ -3493,7 +3564,13 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
if (!LinkerInputs.empty()) {
if (Action *Wrapper = OffloadBuilder.makeHostLinkAction())
LinkerInputs.push_back(Wrapper);
- Action *LA = C.MakeAction<LinkJobAction>(LinkerInputs, types::TY_Image);
+ Action *LA;
+ // Check if this Linker Job should emit a static library.
+ if (ShouldEmitStaticLibrary(Args)) {
+ LA = C.MakeAction<StaticLibJobAction>(LinkerInputs, types::TY_Image);
+ } else {
+ LA = C.MakeAction<LinkJobAction>(LinkerInputs, types::TY_Image);
+ }
LA = OffloadBuilder.processHostLinkAction(LA);
Actions.push_back(LA);
}
@@ -3504,15 +3581,9 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
C.MakeAction<IfsMergeJobAction>(MergerInputs, types::TY_Image));
if (Args.hasArg(options::OPT_emit_interface_stubs)) {
- llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PhaseList;
- if (Args.hasArg(options::OPT_c)) {
- llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> CompilePhaseList;
- types::getCompilationPhases(types::TY_IFS_CPP, CompilePhaseList);
- llvm::copy_if(CompilePhaseList, std::back_inserter(PhaseList),
- [&](phases::ID Phase) { return Phase <= phases::Compile; });
- } else {
- types::getCompilationPhases(types::TY_IFS_CPP, PhaseList);
- }
+ auto PhaseList = types::getCompilationPhases(
+ types::TY_IFS_CPP,
+ Args.hasArg(options::OPT_c) ? phases::Compile : phases::LastPhase);
ActionList MergerInputs;
@@ -3674,7 +3745,10 @@ Action *Driver::ConstructPhaseAction(
Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
return C.MakeAction<BackendJobAction>(Input, Output);
}
- if (Args.hasArg(options::OPT_emit_llvm)) {
+ if (Args.hasArg(options::OPT_emit_llvm) ||
+ (TargetDeviceOffloadKind == Action::OFK_HIP &&
+ Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
+ false))) {
types::ID Output =
Args.hasArg(options::OPT_S) ? types::TY_LLVM_IR : types::TY_LLVM_BC;
return C.MakeAction<BackendJobAction>(Input, Output);
@@ -4594,8 +4668,19 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
// When using both -save-temps and -emit-llvm, use a ".tmp.bc" suffix for
// the unoptimized bitcode so that it does not get overwritten by the ".bc"
// optimized bitcode output.
- if (!AtTopLevel && C.getArgs().hasArg(options::OPT_emit_llvm) &&
- JA.getType() == types::TY_LLVM_BC)
+ auto IsHIPRDCInCompilePhase = [](const JobAction &JA,
+ const llvm::opt::DerivedArgList &Args) {
+ // The relocatable compilation in HIP implies -emit-llvm. Similarly, use a
+ // ".tmp.bc" suffix for the unoptimized bitcode (generated in the compile
+ // phase.)
+ return isa<CompileJobAction>(JA) &&
+ JA.getOffloadingDeviceKind() == Action::OFK_HIP &&
+ Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
+ false);
+ };
+ if (!AtTopLevel && JA.getType() == types::TY_LLVM_BC &&
+ (C.getArgs().hasArg(options::OPT_emit_llvm) ||
+ IsHIPRDCInCompilePhase(JA, C.getArgs())))
Suffixed += ".tmp";
Suffixed += '.';
Suffixed += Suffix;
@@ -4656,7 +4741,7 @@ std::string Driver::GetFilePath(StringRef Name, const ToolChain &TC) const {
SmallString<128> P(Dir[0] == '=' ? SysRoot + Dir.substr(1) : Dir);
llvm::sys::path::append(P, Name);
if (llvm::sys::fs::exists(Twine(P)))
- return P.str().str();
+ return std::string(P);
}
return None;
};
@@ -4667,17 +4752,17 @@ std::string Driver::GetFilePath(StringRef Name, const ToolChain &TC) const {
SmallString<128> R(ResourceDir);
llvm::sys::path::append(R, Name);
if (llvm::sys::fs::exists(Twine(R)))
- return R.str();
+ return std::string(R.str());
SmallString<128> P(TC.getCompilerRTPath());
llvm::sys::path::append(P, Name);
if (llvm::sys::fs::exists(Twine(P)))
- return P.str();
+ return std::string(P.str());
SmallString<128> D(Dir);
llvm::sys::path::append(D, "..", Name);
if (llvm::sys::fs::exists(Twine(D)))
- return D.str();
+ return std::string(D.str());
if (auto P = SearchPaths(TC.getLibraryPaths()))
return *P;
@@ -4685,7 +4770,7 @@ std::string Driver::GetFilePath(StringRef Name, const ToolChain &TC) const {
if (auto P = SearchPaths(TC.getFilePaths()))
return *P;
- return Name;
+ return std::string(Name);
}
void Driver::generatePrefixedToolNames(
@@ -4702,13 +4787,11 @@ void Driver::generatePrefixedToolNames(
}
static bool ScanDirForExecutable(SmallString<128> &Dir,
- ArrayRef<std::string> Names) {
- for (const auto &Name : Names) {
- llvm::sys::path::append(Dir, Name);
- if (llvm::sys::fs::can_execute(Twine(Dir)))
- return true;
- llvm::sys::path::remove_filename(Dir);
- }
+ const std::string &Name) {
+ llvm::sys::path::append(Dir, Name);
+ if (llvm::sys::fs::can_execute(Twine(Dir)))
+ return true;
+ llvm::sys::path::remove_filename(Dir);
return false;
}
@@ -4721,29 +4804,38 @@ std::string Driver::GetProgramPath(StringRef Name, const ToolChain &TC) const {
for (const auto &PrefixDir : PrefixDirs) {
if (llvm::sys::fs::is_directory(PrefixDir)) {
SmallString<128> P(PrefixDir);
- if (ScanDirForExecutable(P, TargetSpecificExecutables))
- return P.str();
+ for (const auto &TargetSpecificExecutable : TargetSpecificExecutables)
+ if (ScanDirForExecutable(P, TargetSpecificExecutable))
+ return std::string(P.str());
} else {
SmallString<128> P((PrefixDir + Name).str());
if (llvm::sys::fs::can_execute(Twine(P)))
- return P.str();
+ return std::string(P.str());
}
}
const ToolChain::path_list &List = TC.getProgramPaths();
- for (const auto &Path : List) {
- SmallString<128> P(Path);
- if (ScanDirForExecutable(P, TargetSpecificExecutables))
- return P.str();
- }
+ for (const auto &TargetSpecificExecutable : TargetSpecificExecutables) {
+ // For each possible name of the tool look for it in
+ // program paths first, then the path.
+ // Higher priority names will be first, meaning that
+ // a higher priority name in the path will be found
+ // instead of a lower priority name in the program path.
+ // E.g. <triple>-gcc on the path will be found instead
+ // of gcc in the program path
+ for (const auto &Path : List) {
+ SmallString<128> P(Path);
+ if (ScanDirForExecutable(P, TargetSpecificExecutable))
+ return std::string(P.str());
+ }
- // If all else failed, search the path.
- for (const auto &TargetSpecificExecutable : TargetSpecificExecutables)
+ // Fall back to the path
if (llvm::ErrorOr<std::string> P =
llvm::sys::findProgramByName(TargetSpecificExecutable))
return *P;
+ }
- return Name;
+ return std::string(Name);
}
std::string Driver::GetTemporaryPath(StringRef Prefix, StringRef Suffix) const {
@@ -4754,7 +4846,7 @@ std::string Driver::GetTemporaryPath(StringRef Prefix, StringRef Suffix) const {
return "";
}
- return Path.str();
+ return std::string(Path.str());
}
std::string Driver::GetTemporaryDirectory(StringRef Prefix) const {
@@ -4765,7 +4857,7 @@ std::string Driver::GetTemporaryDirectory(StringRef Prefix) const {
return "";
}
- return Path.str();
+ return std::string(Path.str());
}
std::string Driver::GetClPchPath(Compilation &C, StringRef BaseName) const {
@@ -4787,7 +4879,7 @@ std::string Driver::GetClPchPath(Compilation &C, StringRef BaseName) const {
Output = BaseName;
llvm::sys::path::replace_extension(Output, ".pch");
}
- return Output.str();
+ return std::string(Output.str());
}
const ToolChain &Driver::getToolChain(const ArgList &Args,
@@ -4844,6 +4936,9 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
Target.getArch() == llvm::Triple::ppc64le)
TC = std::make_unique<toolchains::PPCLinuxToolChain>(*this, Target,
Args);
+ else if (Target.getArch() == llvm::Triple::ve)
+ TC = std::make_unique<toolchains::VEToolChain>(*this, Target, Args);
+
else
TC = std::make_unique<toolchains::Linux>(*this, Target, Args);
break;
@@ -4857,6 +4952,8 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
TC = std::make_unique<toolchains::Solaris>(*this, Target, Args);
break;
case llvm::Triple::AMDHSA:
+ TC = std::make_unique<toolchains::ROCMToolChain>(*this, Target, Args);
+ break;
case llvm::Triple::AMDPAL:
case llvm::Triple::Mesa3D:
TC = std::make_unique<toolchains::AMDGPUToolChain>(*this, Target, Args);
@@ -4934,6 +5031,9 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::riscv64:
TC = std::make_unique<toolchains::RISCVToolChain>(*this, Target, Args);
break;
+ case llvm::Triple::ve:
+ TC = std::make_unique<toolchains::VEToolChain>(*this, Target, Args);
+ break;
default:
if (Target.getVendor() == llvm::Triple::Myriad)
TC = std::make_unique<toolchains::MyriadToolChain>(*this, Target,
@@ -4985,6 +5085,13 @@ bool Driver::ShouldUseFlangCompiler(const JobAction &JA) const {
return true;
}
+bool Driver::ShouldEmitStaticLibrary(const ArgList &Args) const {
+ // Only emit static library if the flag is set explicitly.
+ if (Args.hasArg(options::OPT_emit_static_lib))
+ return true;
+ return false;
+}
+
/// GetReleaseVersion - Parse (([0-9]+)(.([0-9]+)(.([0-9]+)?))?)? and return the
/// grouped values as integers. Numbers which are not provided are set to 0.
///
diff --git a/contrib/llvm-project/clang/lib/Driver/Job.cpp b/contrib/llvm-project/clang/lib/Driver/Job.cpp
index 6d1e7e61ba1d..4808a9f4628d 100644
--- a/contrib/llvm-project/clang/lib/Driver/Job.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Job.cpp
@@ -36,11 +36,11 @@ using namespace clang;
using namespace driver;
Command::Command(const Action &Source, const Tool &Creator,
- const char *Executable,
+ ResponseFileSupport ResponseSupport, const char *Executable,
const llvm::opt::ArgStringList &Arguments,
ArrayRef<InputInfo> Inputs)
- : Source(Source), Creator(Creator), Executable(Executable),
- Arguments(Arguments) {
+ : Source(Source), Creator(Creator), ResponseSupport(ResponseSupport),
+ Executable(Executable), Arguments(Arguments) {
for (const auto &II : Inputs)
if (II.isFilename())
InputFilenames.push_back(II.getFilename());
@@ -100,27 +100,9 @@ static bool skipArgs(const char *Flag, bool HaveCrashVFS, int &SkipNum,
return false;
}
-void Command::printArg(raw_ostream &OS, StringRef Arg, bool Quote) {
- const bool Escape = Arg.find_first_of(" \"\\$") != StringRef::npos;
-
- if (!Quote && !Escape) {
- OS << Arg;
- return;
- }
-
- // Quote and escape. This isn't really complete, but good enough.
- OS << '"';
- for (const auto c : Arg) {
- if (c == '"' || c == '\\' || c == '$')
- OS << '\\';
- OS << c;
- }
- OS << '"';
-}
-
void Command::writeResponseFile(raw_ostream &OS) const {
// In a file list, we only write the set of inputs to the response file
- if (Creator.getResponseFilesSupport() == Tool::RF_FileList) {
+ if (ResponseSupport.ResponseKind == ResponseFileSupport::RF_FileList) {
for (const auto *Arg : InputFileList) {
OS << Arg << '\n';
}
@@ -149,7 +131,7 @@ void Command::buildArgvForResponseFile(
// When not a file list, all arguments are sent to the response file.
// This leaves us to set the argv to a single parameter, requesting the tool
// to read the response file.
- if (Creator.getResponseFilesSupport() != Tool::RF_FileList) {
+ if (ResponseSupport.ResponseKind != ResponseFileSupport::RF_FileList) {
Out.push_back(Executable);
Out.push_back(ResponseFileFlag.c_str());
return;
@@ -167,7 +149,7 @@ void Command::buildArgvForResponseFile(
Out.push_back(Arg);
} else if (FirstInput) {
FirstInput = false;
- Out.push_back(Creator.getResponseFileFlag());
+ Out.push_back(ResponseSupport.ResponseFlag);
Out.push_back(ResponseFile);
}
}
@@ -217,7 +199,7 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
CrashReportInfo *CrashInfo) const {
// Always quote the exe.
OS << ' ';
- printArg(OS, Executable, /*Quote=*/true);
+ llvm::sys::printArg(OS, Executable, /*Quote=*/true);
ArrayRef<const char *> Args = Arguments;
SmallVector<const char *, 128> ArgsRespFile;
@@ -245,7 +227,7 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
if (!NewIncFlags.empty()) {
for (auto &F : NewIncFlags) {
OS << ' ';
- printArg(OS, F.c_str(), Quote);
+ llvm::sys::printArg(OS, F.c_str(), Quote);
}
i += NumArgs - 1;
continue;
@@ -259,20 +241,20 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
// Replace the input file name with the crashinfo's file name.
OS << ' ';
StringRef ShortName = llvm::sys::path::filename(CrashInfo->Filename);
- printArg(OS, ShortName.str(), Quote);
+ llvm::sys::printArg(OS, ShortName.str(), Quote);
continue;
}
}
OS << ' ';
- printArg(OS, Arg, Quote);
+ llvm::sys::printArg(OS, Arg, Quote);
}
if (CrashInfo && HaveCrashVFS) {
OS << ' ';
- printArg(OS, "-ivfsoverlay", Quote);
+ llvm::sys::printArg(OS, "-ivfsoverlay", Quote);
OS << ' ';
- printArg(OS, CrashInfo->VFSPath.str(), Quote);
+ llvm::sys::printArg(OS, CrashInfo->VFSPath.str(), Quote);
// The leftover modules from the crash are stored in
// <name>.cache/vfs/modules
@@ -287,7 +269,7 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
ModCachePath.append(RelModCacheDir.c_str());
OS << ' ';
- printArg(OS, ModCachePath, Quote);
+ llvm::sys::printArg(OS, ModCachePath, Quote);
}
if (ResponseFile != nullptr) {
@@ -295,7 +277,7 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
writeResponseFile(OS);
// Avoiding duplicated newline terminator, since FileLists are
// newline-separated.
- if (Creator.getResponseFilesSupport() != Tool::RF_FileList)
+ if (ResponseSupport.ResponseKind != ResponseFileSupport::RF_FileList)
OS << "\n";
OS << " (end of response file)";
}
@@ -305,7 +287,7 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
void Command::setResponseFile(const char *FileName) {
ResponseFile = FileName;
- ResponseFileFlag = Creator.getResponseFileFlag();
+ ResponseFileFlag = ResponseSupport.ResponseFlag;
ResponseFileFlag += FileName;
}
@@ -345,7 +327,7 @@ int Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
// Save the response file in the appropriate encoding
if (std::error_code EC = writeFileWithEncoding(
- ResponseFile, RespContents, Creator.getResponseFileEncoding())) {
+ ResponseFile, RespContents, ResponseSupport.ResponseEncoding)) {
if (ErrMsg)
*ErrMsg = EC.message();
if (ExecutionFailed)
@@ -372,10 +354,11 @@ int Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
}
CC1Command::CC1Command(const Action &Source, const Tool &Creator,
+ ResponseFileSupport ResponseSupport,
const char *Executable,
const llvm::opt::ArgStringList &Arguments,
ArrayRef<InputInfo> Inputs)
- : Command(Source, Creator, Executable, Arguments, Inputs) {
+ : Command(Source, Creator, ResponseSupport, Executable, Arguments, Inputs) {
InProcess = true;
}
@@ -428,11 +411,13 @@ void CC1Command::setEnvironment(llvm::ArrayRef<const char *> NewEnvironment) {
}
FallbackCommand::FallbackCommand(const Action &Source_, const Tool &Creator_,
+ ResponseFileSupport ResponseSupport,
const char *Executable_,
const llvm::opt::ArgStringList &Arguments_,
ArrayRef<InputInfo> Inputs,
std::unique_ptr<Command> Fallback_)
- : Command(Source_, Creator_, Executable_, Arguments_, Inputs),
+ : Command(Source_, Creator_, ResponseSupport, Executable_, Arguments_,
+ Inputs),
Fallback(std::move(Fallback_)) {}
void FallbackCommand::Print(raw_ostream &OS, const char *Terminator,
@@ -469,9 +454,11 @@ int FallbackCommand::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
}
ForceSuccessCommand::ForceSuccessCommand(
- const Action &Source_, const Tool &Creator_, const char *Executable_,
+ const Action &Source_, const Tool &Creator_,
+ ResponseFileSupport ResponseSupport, const char *Executable_,
const llvm::opt::ArgStringList &Arguments_, ArrayRef<InputInfo> Inputs)
- : Command(Source_, Creator_, Executable_, Arguments_, Inputs) {}
+ : Command(Source_, Creator_, ResponseSupport, Executable_, Arguments_,
+ Inputs) {}
void ForceSuccessCommand::Print(raw_ostream &OS, const char *Terminator,
bool Quote, CrashReportInfo *CrashInfo) const {
diff --git a/contrib/llvm-project/clang/lib/Driver/Multilib.cpp b/contrib/llvm-project/clang/lib/Driver/Multilib.cpp
index 303047e05f78..5dd55553bcb5 100644
--- a/contrib/llvm-project/clang/lib/Driver/Multilib.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Multilib.cpp
@@ -46,7 +46,7 @@ static void normalizePathSegment(std::string &Segment) {
if (seg.front() != '/') {
Segment = "/" + seg.str();
} else {
- Segment = seg;
+ Segment = std::string(seg);
}
}
@@ -60,19 +60,19 @@ Multilib::Multilib(StringRef GCCSuffix, StringRef OSSuffix,
}
Multilib &Multilib::gccSuffix(StringRef S) {
- GCCSuffix = S;
+ GCCSuffix = std::string(S);
normalizePathSegment(GCCSuffix);
return *this;
}
Multilib &Multilib::osSuffix(StringRef S) {
- OSSuffix = S;
+ OSSuffix = std::string(S);
normalizePathSegment(OSSuffix);
return *this;
}
Multilib &Multilib::includeSuffix(StringRef S) {
- IncludeSuffix = S;
+ IncludeSuffix = std::string(S);
normalizePathSegment(IncludeSuffix);
return *this;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp b/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
index 60fd932fbe6f..bcc9ffc7ff8f 100644
--- a/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
@@ -14,10 +14,10 @@
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SpecialCaseList.h"
#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <memory>
using namespace clang;
@@ -27,7 +27,8 @@ using namespace llvm::opt;
static const SanitizerMask NeedsUbsanRt =
SanitizerKind::Undefined | SanitizerKind::Integer |
SanitizerKind::ImplicitConversion | SanitizerKind::Nullability |
- SanitizerKind::CFI | SanitizerKind::FloatDivideByZero;
+ SanitizerKind::CFI | SanitizerKind::FloatDivideByZero |
+ SanitizerKind::ObjCCast;
static const SanitizerMask NeedsUbsanCxxRt =
SanitizerKind::Vptr | SanitizerKind::CFI;
static const SanitizerMask NotAllowedWithTrap = SanitizerKind::Vptr;
@@ -43,51 +44,53 @@ static const SanitizerMask SupportsCoverage =
SanitizerKind::KernelAddress | SanitizerKind::KernelHWAddress |
SanitizerKind::MemTag | SanitizerKind::Memory |
SanitizerKind::KernelMemory | SanitizerKind::Leak |
- SanitizerKind::Undefined | SanitizerKind::Integer |
+ SanitizerKind::Undefined | SanitizerKind::Integer | SanitizerKind::Bounds |
SanitizerKind::ImplicitConversion | SanitizerKind::Nullability |
SanitizerKind::DataFlow | SanitizerKind::Fuzzer |
SanitizerKind::FuzzerNoLink | SanitizerKind::FloatDivideByZero |
- SanitizerKind::SafeStack | SanitizerKind::ShadowCallStack;
+ SanitizerKind::SafeStack | SanitizerKind::ShadowCallStack |
+ SanitizerKind::Thread | SanitizerKind::ObjCCast;
static const SanitizerMask RecoverableByDefault =
SanitizerKind::Undefined | SanitizerKind::Integer |
SanitizerKind::ImplicitConversion | SanitizerKind::Nullability |
- SanitizerKind::FloatDivideByZero;
+ SanitizerKind::FloatDivideByZero | SanitizerKind::ObjCCast;
static const SanitizerMask Unrecoverable =
SanitizerKind::Unreachable | SanitizerKind::Return;
static const SanitizerMask AlwaysRecoverable =
SanitizerKind::KernelAddress | SanitizerKind::KernelHWAddress;
-static const SanitizerMask LegacyFsanitizeRecoverMask =
- SanitizerKind::Undefined | SanitizerKind::Integer;
static const SanitizerMask NeedsLTO = SanitizerKind::CFI;
static const SanitizerMask TrappingSupported =
(SanitizerKind::Undefined & ~SanitizerKind::Vptr) |
SanitizerKind::UnsignedIntegerOverflow | SanitizerKind::ImplicitConversion |
SanitizerKind::Nullability | SanitizerKind::LocalBounds |
- SanitizerKind::CFI | SanitizerKind::FloatDivideByZero;
+ SanitizerKind::CFI | SanitizerKind::FloatDivideByZero |
+ SanitizerKind::ObjCCast;
static const SanitizerMask TrappingDefault = SanitizerKind::CFI;
static const SanitizerMask CFIClasses =
SanitizerKind::CFIVCall | SanitizerKind::CFINVCall |
SanitizerKind::CFIMFCall | SanitizerKind::CFIDerivedCast |
SanitizerKind::CFIUnrelatedCast;
static const SanitizerMask CompatibleWithMinimalRuntime =
- TrappingSupported | SanitizerKind::Scudo | SanitizerKind::ShadowCallStack;
+ TrappingSupported | SanitizerKind::Scudo | SanitizerKind::ShadowCallStack |
+ SanitizerKind::MemTag;
enum CoverageFeature {
CoverageFunc = 1 << 0,
CoverageBB = 1 << 1,
CoverageEdge = 1 << 2,
CoverageIndirCall = 1 << 3,
- CoverageTraceBB = 1 << 4, // Deprecated.
+ CoverageTraceBB = 1 << 4, // Deprecated.
CoverageTraceCmp = 1 << 5,
CoverageTraceDiv = 1 << 6,
CoverageTraceGep = 1 << 7,
- Coverage8bitCounters = 1 << 8, // Deprecated.
+ Coverage8bitCounters = 1 << 8, // Deprecated.
CoverageTracePC = 1 << 9,
CoverageTracePCGuard = 1 << 10,
CoverageNoPrune = 1 << 11,
CoverageInline8bitCounters = 1 << 12,
CoveragePCTable = 1 << 13,
CoverageStackDepth = 1 << 14,
+ CoverageInlineBoolFlag = 1 << 15,
};
/// Parse a -fsanitize= or -fno-sanitize= argument's values, diagnosing any
@@ -118,6 +121,19 @@ static std::string describeSanitizeArg(const llvm::opt::Arg *A,
/// Sanitizers set.
static std::string toString(const clang::SanitizerSet &Sanitizers);
+static void validateSpecialCaseListFormat(const Driver &D,
+ std::vector<std::string> &SCLFiles,
+ unsigned MalformedSCLErrorDiagID) {
+ if (SCLFiles.empty())
+ return;
+
+ std::string BLError;
+ std::unique_ptr<llvm::SpecialCaseList> SCL(
+ llvm::SpecialCaseList::create(SCLFiles, D.getVFS(), BLError));
+ if (!SCL.get())
+ D.Diag(MalformedSCLErrorDiagID) << BLError;
+}
+
static void addDefaultBlacklists(const Driver &D, SanitizerMask Kinds,
std::vector<std::string> &BlacklistFiles) {
struct Blacklist {
@@ -142,12 +158,41 @@ static void addDefaultBlacklists(const Driver &D, SanitizerMask Kinds,
clang::SmallString<64> Path(D.ResourceDir);
llvm::sys::path::append(Path, "share", BL.File);
if (D.getVFS().exists(Path))
- BlacklistFiles.push_back(Path.str());
+ BlacklistFiles.push_back(std::string(Path.str()));
else if (BL.Mask == SanitizerKind::CFI)
// If cfi_blacklist.txt cannot be found in the resource dir, driver
// should fail.
D.Diag(clang::diag::err_drv_no_such_file) << Path;
}
+ validateSpecialCaseListFormat(
+ D, BlacklistFiles, clang::diag::err_drv_malformed_sanitizer_blacklist);
+}
+
+/// Parse -f(no-)?sanitize-(coverage-)?(white|black)list argument's values,
+/// diagnosing any invalid file paths and validating special case list format.
+static void parseSpecialCaseListArg(const Driver &D,
+ const llvm::opt::ArgList &Args,
+ std::vector<std::string> &SCLFiles,
+ llvm::opt::OptSpecifier SCLOptionID,
+ llvm::opt::OptSpecifier NoSCLOptionID,
+ unsigned MalformedSCLErrorDiagID) {
+ for (const auto *Arg : Args) {
+ // Match -fsanitize-(coverage-)?(white|black)list.
+ if (Arg->getOption().matches(SCLOptionID)) {
+ Arg->claim();
+ std::string SCLPath = Arg->getValue();
+ if (D.getVFS().exists(SCLPath)) {
+ SCLFiles.push_back(SCLPath);
+ } else {
+ D.Diag(clang::diag::err_drv_no_such_file) << SCLPath;
+ }
+ // Match -fno-sanitize-blacklist.
+ } else if (Arg->getOption().matches(NoSCLOptionID)) {
+ Arg->claim();
+ SCLFiles.clear();
+ }
+ }
+ validateSpecialCaseListFormat(D, SCLFiles, MalformedSCLErrorDiagID);
}
/// Sets group bits for every group that has at least one representative already
@@ -186,16 +231,6 @@ static SanitizerMask parseSanitizeTrapArgs(const Driver &D,
} else if (Arg->getOption().matches(options::OPT_fno_sanitize_trap_EQ)) {
Arg->claim();
TrapRemove |= expandSanitizerGroups(parseArgValues(D, Arg, true));
- } else if (Arg->getOption().matches(
- options::OPT_fsanitize_undefined_trap_on_error)) {
- Arg->claim();
- TrappingKinds |=
- expandSanitizerGroups(SanitizerKind::UndefinedGroup & ~TrapRemove) &
- ~TrapRemove;
- } else if (Arg->getOption().matches(
- options::OPT_fno_sanitize_undefined_trap_on_error)) {
- Arg->claim();
- TrapRemove |= expandSanitizerGroups(SanitizerKind::UndefinedGroup);
}
}
@@ -412,9 +447,11 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
SanitizerKind::Leak | SanitizerKind::Thread |
SanitizerKind::Memory | SanitizerKind::KernelAddress),
std::make_pair(SanitizerKind::SafeStack,
- SanitizerKind::Address | SanitizerKind::HWAddress |
- SanitizerKind::Leak | SanitizerKind::Thread |
- SanitizerKind::Memory | SanitizerKind::KernelAddress),
+ (TC.getTriple().isOSFuchsia() ? SanitizerMask()
+ : SanitizerKind::Leak) |
+ SanitizerKind::Address | SanitizerKind::HWAddress |
+ SanitizerKind::Thread | SanitizerKind::Memory |
+ SanitizerKind::KernelAddress),
std::make_pair(SanitizerKind::KernelHWAddress,
SanitizerKind::Address | SanitizerKind::HWAddress |
SanitizerKind::Leak | SanitizerKind::Thread |
@@ -503,18 +540,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
SanitizerMask DiagnosedUnrecoverableKinds;
SanitizerMask DiagnosedAlwaysRecoverableKinds;
for (const auto *Arg : Args) {
- const char *DeprecatedReplacement = nullptr;
- if (Arg->getOption().matches(options::OPT_fsanitize_recover)) {
- DeprecatedReplacement =
- "-fsanitize-recover=undefined,integer' or '-fsanitize-recover=all";
- RecoverableKinds |= expandSanitizerGroups(LegacyFsanitizeRecoverMask);
- Arg->claim();
- } else if (Arg->getOption().matches(options::OPT_fno_sanitize_recover)) {
- DeprecatedReplacement = "-fno-sanitize-recover=undefined,integer' or "
- "'-fno-sanitize-recover=all";
- RecoverableKinds &= ~expandSanitizerGroups(LegacyFsanitizeRecoverMask);
- Arg->claim();
- } else if (Arg->getOption().matches(options::OPT_fsanitize_recover_EQ)) {
+ if (Arg->getOption().matches(options::OPT_fsanitize_recover_EQ)) {
SanitizerMask Add = parseArgValues(D, Arg, true);
// Report error if user explicitly tries to recover from unrecoverable
// sanitizer.
@@ -543,10 +569,6 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
RecoverableKinds &= ~expandSanitizerGroups(Remove);
Arg->claim();
}
- if (DeprecatedReplacement) {
- D.Diag(diag::warn_drv_deprecated_arg) << Arg->getAsString(Args)
- << DeprecatedReplacement;
- }
}
RecoverableKinds &= Kinds;
RecoverableKinds &= ~Unrecoverable;
@@ -555,39 +577,17 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
RecoverableKinds &= ~TrappingKinds;
// Setup blacklist files.
- // Add default blacklist from resource directory.
- addDefaultBlacklists(D, Kinds, SystemBlacklistFiles);
- // Parse -f(no-)sanitize-blacklist options.
- for (const auto *Arg : Args) {
- if (Arg->getOption().matches(options::OPT_fsanitize_blacklist)) {
- Arg->claim();
- std::string BLPath = Arg->getValue();
- if (D.getVFS().exists(BLPath)) {
- UserBlacklistFiles.push_back(BLPath);
- } else {
- D.Diag(clang::diag::err_drv_no_such_file) << BLPath;
- }
- } else if (Arg->getOption().matches(options::OPT_fno_sanitize_blacklist)) {
- Arg->claim();
- UserBlacklistFiles.clear();
- SystemBlacklistFiles.clear();
- }
- }
- // Validate blacklists format.
- {
- std::string BLError;
- std::unique_ptr<llvm::SpecialCaseList> SCL(
- llvm::SpecialCaseList::create(UserBlacklistFiles, D.getVFS(), BLError));
- if (!SCL.get())
- D.Diag(clang::diag::err_drv_malformed_sanitizer_blacklist) << BLError;
- }
- {
- std::string BLError;
- std::unique_ptr<llvm::SpecialCaseList> SCL(llvm::SpecialCaseList::create(
- SystemBlacklistFiles, D.getVFS(), BLError));
- if (!SCL.get())
- D.Diag(clang::diag::err_drv_malformed_sanitizer_blacklist) << BLError;
- }
+ // Add default blacklist from resource directory for activated sanitizers, and
+ // validate special case lists format.
+ if (!Args.hasArgNoClaim(options::OPT_fno_sanitize_blacklist))
+ addDefaultBlacklists(D, Kinds, SystemBlacklistFiles);
+
+ // Parse -f(no-)?sanitize-blacklist options.
+ // This also validates special case lists format.
+ parseSpecialCaseListArg(D, Args, UserBlacklistFiles,
+ options::OPT_fsanitize_blacklist,
+ options::OPT_fno_sanitize_blacklist,
+ clang::diag::err_drv_malformed_sanitizer_blacklist);
// Parse -f[no-]sanitize-memory-track-origins[=level] options.
if (AllAddedKinds & SanitizerKind::Memory) {
@@ -720,8 +720,9 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
<< "-fsanitize-coverage=trace-pc-guard";
int InsertionPointTypes = CoverageFunc | CoverageBB | CoverageEdge;
- int InstrumentationTypes =
- CoverageTracePC | CoverageTracePCGuard | CoverageInline8bitCounters;
+ int InstrumentationTypes = CoverageTracePC | CoverageTracePCGuard |
+ CoverageInline8bitCounters |
+ CoverageInlineBoolFlag;
if ((CoverageFeatures & InsertionPointTypes) &&
!(CoverageFeatures & InstrumentationTypes)) {
D.Diag(clang::diag::warn_drv_deprecated_arg)
@@ -732,13 +733,29 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// trace-pc w/o func/bb/edge implies edge.
if (!(CoverageFeatures & InsertionPointTypes)) {
if (CoverageFeatures &
- (CoverageTracePC | CoverageTracePCGuard | CoverageInline8bitCounters))
+ (CoverageTracePC | CoverageTracePCGuard | CoverageInline8bitCounters |
+ CoverageInlineBoolFlag))
CoverageFeatures |= CoverageEdge;
if (CoverageFeatures & CoverageStackDepth)
CoverageFeatures |= CoverageFunc;
}
+ // Parse -fsanitize-coverage-(black|white)list options if coverage enabled.
+ // This also validates special case lists format.
+ // Here, OptSpecifier() acts as a never-matching command-line argument.
+ // So, there is no way to clear coverage lists but you can append to them.
+ if (CoverageFeatures) {
+ parseSpecialCaseListArg(
+ D, Args, CoverageAllowlistFiles,
+ options::OPT_fsanitize_coverage_allowlist, OptSpecifier(),
+ clang::diag::err_drv_malformed_sanitizer_coverage_whitelist);
+ parseSpecialCaseListArg(
+ D, Args, CoverageBlocklistFiles,
+ options::OPT_fsanitize_coverage_blocklist, OptSpecifier(),
+ clang::diag::err_drv_malformed_sanitizer_coverage_blacklist);
+ }
+
SharedRuntime =
Args.hasFlag(options::OPT_shared_libsan, options::OPT_static_libsan,
TC.getTriple().isAndroid() || TC.getTriple().isOSFuchsia() ||
@@ -830,8 +847,9 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
}
if (AllAddedKinds & SanitizerKind::SafeStack) {
- // SafeStack runtime is built into the system on Fuchsia.
- SafeStackRuntime = !TC.getTriple().isOSFuchsia();
+ // SafeStack runtime is built into the system on Android and Fuchsia.
+ SafeStackRuntime =
+ !TC.getTriple().isAndroid() && !TC.getTriple().isOSFuchsia();
}
LinkRuntimes =
@@ -864,6 +882,17 @@ static std::string toString(const clang::SanitizerSet &Sanitizers) {
return Res;
}
+static void addSpecialCaseListOpt(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ const char *SCLOptFlag,
+ const std::vector<std::string> &SCLFiles) {
+ for (const auto &SCLPath : SCLFiles) {
+ SmallString<64> SCLOpt(SCLOptFlag);
+ SCLOpt += SCLPath;
+ CmdArgs.push_back(Args.MakeArgString(SCLOpt));
+ }
+}
+
static void addIncludeLinkerOption(const ToolChain &TC,
const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
@@ -903,45 +932,55 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
// Do it even if Sanitizers.empty() since some forms of coverage don't require
// sanitizers.
std::pair<int, const char *> CoverageFlags[] = {
- std::make_pair(CoverageFunc, "-fsanitize-coverage-type=1"),
- std::make_pair(CoverageBB, "-fsanitize-coverage-type=2"),
- std::make_pair(CoverageEdge, "-fsanitize-coverage-type=3"),
- std::make_pair(CoverageIndirCall, "-fsanitize-coverage-indirect-calls"),
- std::make_pair(CoverageTraceBB, "-fsanitize-coverage-trace-bb"),
- std::make_pair(CoverageTraceCmp, "-fsanitize-coverage-trace-cmp"),
- std::make_pair(CoverageTraceDiv, "-fsanitize-coverage-trace-div"),
- std::make_pair(CoverageTraceGep, "-fsanitize-coverage-trace-gep"),
- std::make_pair(Coverage8bitCounters, "-fsanitize-coverage-8bit-counters"),
- std::make_pair(CoverageTracePC, "-fsanitize-coverage-trace-pc"),
- std::make_pair(CoverageTracePCGuard, "-fsanitize-coverage-trace-pc-guard"),
- std::make_pair(CoverageInline8bitCounters, "-fsanitize-coverage-inline-8bit-counters"),
- std::make_pair(CoveragePCTable, "-fsanitize-coverage-pc-table"),
- std::make_pair(CoverageNoPrune, "-fsanitize-coverage-no-prune"),
- std::make_pair(CoverageStackDepth, "-fsanitize-coverage-stack-depth")};
+ std::make_pair(CoverageFunc, "-fsanitize-coverage-type=1"),
+ std::make_pair(CoverageBB, "-fsanitize-coverage-type=2"),
+ std::make_pair(CoverageEdge, "-fsanitize-coverage-type=3"),
+ std::make_pair(CoverageIndirCall, "-fsanitize-coverage-indirect-calls"),
+ std::make_pair(CoverageTraceBB, "-fsanitize-coverage-trace-bb"),
+ std::make_pair(CoverageTraceCmp, "-fsanitize-coverage-trace-cmp"),
+ std::make_pair(CoverageTraceDiv, "-fsanitize-coverage-trace-div"),
+ std::make_pair(CoverageTraceGep, "-fsanitize-coverage-trace-gep"),
+ std::make_pair(Coverage8bitCounters, "-fsanitize-coverage-8bit-counters"),
+ std::make_pair(CoverageTracePC, "-fsanitize-coverage-trace-pc"),
+ std::make_pair(CoverageTracePCGuard,
+ "-fsanitize-coverage-trace-pc-guard"),
+ std::make_pair(CoverageInline8bitCounters,
+ "-fsanitize-coverage-inline-8bit-counters"),
+ std::make_pair(CoverageInlineBoolFlag,
+ "-fsanitize-coverage-inline-bool-flag"),
+ std::make_pair(CoveragePCTable, "-fsanitize-coverage-pc-table"),
+ std::make_pair(CoverageNoPrune, "-fsanitize-coverage-no-prune"),
+ std::make_pair(CoverageStackDepth, "-fsanitize-coverage-stack-depth")};
for (auto F : CoverageFlags) {
if (CoverageFeatures & F.first)
CmdArgs.push_back(F.second);
}
+ addSpecialCaseListOpt(
+ Args, CmdArgs, "-fsanitize-coverage-allowlist=", CoverageAllowlistFiles);
+ addSpecialCaseListOpt(
+ Args, CmdArgs, "-fsanitize-coverage-blocklist=", CoverageBlocklistFiles);
if (TC.getTriple().isOSWindows() && needsUbsanRt()) {
// Instruct the code generator to embed linker directives in the object file
// that cause the required runtime libraries to be linked.
- CmdArgs.push_back(Args.MakeArgString(
- "--dependent-lib=" + TC.getCompilerRT(Args, "ubsan_standalone")));
+ CmdArgs.push_back(
+ Args.MakeArgString("--dependent-lib=" +
+ TC.getCompilerRTBasename(Args, "ubsan_standalone")));
if (types::isCXX(InputType))
CmdArgs.push_back(Args.MakeArgString(
- "--dependent-lib=" + TC.getCompilerRT(Args, "ubsan_standalone_cxx")));
+ "--dependent-lib=" +
+ TC.getCompilerRTBasename(Args, "ubsan_standalone_cxx")));
}
if (TC.getTriple().isOSWindows() && needsStatsRt()) {
- CmdArgs.push_back(Args.MakeArgString("--dependent-lib=" +
- TC.getCompilerRT(Args, "stats_client")));
+ CmdArgs.push_back(Args.MakeArgString(
+ "--dependent-lib=" + TC.getCompilerRTBasename(Args, "stats_client")));
// The main executable must export the stats runtime.
// FIXME: Only exporting from the main executable (e.g. based on whether the
// translation unit defines main()) would save a little space, but having
// multiple copies of the runtime shouldn't hurt.
- CmdArgs.push_back(Args.MakeArgString("--dependent-lib=" +
- TC.getCompilerRT(Args, "stats")));
+ CmdArgs.push_back(Args.MakeArgString(
+ "--dependent-lib=" + TC.getCompilerRTBasename(Args, "stats")));
addIncludeLinkerOption(TC, Args, CmdArgs, "__sanitizer_stats_register");
}
@@ -957,16 +996,10 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
CmdArgs.push_back(
Args.MakeArgString("-fsanitize-trap=" + toString(TrapSanitizers)));
- for (const auto &BLPath : UserBlacklistFiles) {
- SmallString<64> BlacklistOpt("-fsanitize-blacklist=");
- BlacklistOpt += BLPath;
- CmdArgs.push_back(Args.MakeArgString(BlacklistOpt));
- }
- for (const auto &BLPath : SystemBlacklistFiles) {
- SmallString<64> BlacklistOpt("-fsanitize-system-blacklist=");
- BlacklistOpt += BLPath;
- CmdArgs.push_back(Args.MakeArgString(BlacklistOpt));
- }
+ addSpecialCaseListOpt(Args, CmdArgs,
+ "-fsanitize-blacklist=", UserBlacklistFiles);
+ addSpecialCaseListOpt(Args, CmdArgs,
+ "-fsanitize-system-blacklist=", SystemBlacklistFiles);
if (MsanTrackOrigins)
CmdArgs.push_back(Args.MakeArgString("-fsanitize-memory-track-origins=" +
@@ -1037,7 +1070,7 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
CmdArgs.push_back(Args.MakeArgString("hwasan-abi=" + HwasanAbi));
}
- if (Sanitizers.has(SanitizerKind::HWAddress)) {
+ if (Sanitizers.has(SanitizerKind::HWAddress) && TC.getTriple().isAArch64()) {
CmdArgs.push_back("-target-feature");
CmdArgs.push_back("+tagged-globals");
}
@@ -1101,22 +1134,23 @@ int parseCoverageFeatures(const Driver &D, const llvm::opt::Arg *A) {
for (int i = 0, n = A->getNumValues(); i != n; ++i) {
const char *Value = A->getValue(i);
int F = llvm::StringSwitch<int>(Value)
- .Case("func", CoverageFunc)
- .Case("bb", CoverageBB)
- .Case("edge", CoverageEdge)
- .Case("indirect-calls", CoverageIndirCall)
- .Case("trace-bb", CoverageTraceBB)
- .Case("trace-cmp", CoverageTraceCmp)
- .Case("trace-div", CoverageTraceDiv)
- .Case("trace-gep", CoverageTraceGep)
- .Case("8bit-counters", Coverage8bitCounters)
- .Case("trace-pc", CoverageTracePC)
- .Case("trace-pc-guard", CoverageTracePCGuard)
- .Case("no-prune", CoverageNoPrune)
- .Case("inline-8bit-counters", CoverageInline8bitCounters)
- .Case("pc-table", CoveragePCTable)
- .Case("stack-depth", CoverageStackDepth)
- .Default(0);
+ .Case("func", CoverageFunc)
+ .Case("bb", CoverageBB)
+ .Case("edge", CoverageEdge)
+ .Case("indirect-calls", CoverageIndirCall)
+ .Case("trace-bb", CoverageTraceBB)
+ .Case("trace-cmp", CoverageTraceCmp)
+ .Case("trace-div", CoverageTraceDiv)
+ .Case("trace-gep", CoverageTraceGep)
+ .Case("8bit-counters", Coverage8bitCounters)
+ .Case("trace-pc", CoverageTracePC)
+ .Case("trace-pc-guard", CoverageTracePCGuard)
+ .Case("no-prune", CoverageNoPrune)
+ .Case("inline-8bit-counters", CoverageInline8bitCounters)
+ .Case("inline-bool-flag", CoverageInlineBoolFlag)
+ .Case("pc-table", CoveragePCTable)
+ .Case("stack-depth", CoverageStackDepth)
+ .Default(0);
if (F == 0)
D.Diag(clang::diag::err_drv_unsupported_option_argument)
<< A->getOption().getName() << Value;
diff --git a/contrib/llvm-project/clang/lib/Driver/Tool.cpp b/contrib/llvm-project/clang/lib/Driver/Tool.cpp
index 9ff6e863a124..449f69cfcb35 100644
--- a/contrib/llvm-project/clang/lib/Driver/Tool.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Tool.cpp
@@ -11,13 +11,8 @@
using namespace clang::driver;
-Tool::Tool(const char *_Name, const char *_ShortName, const ToolChain &TC,
- ResponseFileSupport _ResponseSupport,
- llvm::sys::WindowsEncodingMethod _ResponseEncoding,
- const char *_ResponseFlag)
- : Name(_Name), ShortName(_ShortName), TheToolChain(TC),
- ResponseSupport(_ResponseSupport), ResponseEncoding(_ResponseEncoding),
- ResponseFlag(_ResponseFlag) {}
+Tool::Tool(const char *_Name, const char *_ShortName, const ToolChain &TC)
+ : Name(_Name), ShortName(_ShortName), TheToolChain(TC) {}
Tool::~Tool() {
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
index 18400d9def54..b8c12fc9241a 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
@@ -168,7 +168,7 @@ static const DriverSuffix *FindDriverSuffix(StringRef ProgName, size_t &Pos) {
/// Normalize the program name from argv[0] by stripping the file extension if
/// present and lower-casing the string on Windows.
static std::string normalizeProgramName(llvm::StringRef Argv0) {
- std::string ProgName = llvm::sys::path::stem(Argv0);
+ std::string ProgName = std::string(llvm::sys::path::stem(Argv0));
#ifdef _WIN32
// Transform to lowercase for case insensitive file systems.
std::transform(ProgName.begin(), ProgName.end(), ProgName.begin(), ::tolower);
@@ -221,16 +221,21 @@ ToolChain::getTargetAndModeFromProgramName(StringRef PN) {
StringRef Prefix(ProgName);
Prefix = Prefix.slice(0, LastComponent);
std::string IgnoredError;
- bool IsRegistered = llvm::TargetRegistry::lookupTarget(Prefix, IgnoredError);
- return ParsedClangName{Prefix, ModeSuffix, DS->ModeFlag, IsRegistered};
+ bool IsRegistered =
+ llvm::TargetRegistry::lookupTarget(std::string(Prefix), IgnoredError);
+ return ParsedClangName{std::string(Prefix), ModeSuffix, DS->ModeFlag,
+ IsRegistered};
}
StringRef ToolChain::getDefaultUniversalArchName() const {
// In universal driver terms, the arch name accepted by -arch isn't exactly
// the same as the ones that appear in the triple. Roughly speaking, this is
- // an inverse of the darwin::getArchTypeForDarwinArchName() function, but the
- // only interesting special case is powerpc.
+ // an inverse of the darwin::getArchTypeForDarwinArchName() function.
switch (Triple.getArch()) {
+ case llvm::Triple::aarch64:
+ return "arm64";
+ case llvm::Triple::aarch64_32:
+ return "arm64_32";
case llvm::Triple::ppc:
return "ppc";
case llvm::Triple::ppc64:
@@ -270,6 +275,10 @@ Tool *ToolChain::buildLinker() const {
llvm_unreachable("Linking is not supported by this toolchain");
}
+Tool *ToolChain::buildStaticLibTool() const {
+ llvm_unreachable("Creating static lib is not supported by this toolchain");
+}
+
Tool *ToolChain::getAssemble() const {
if (!Assemble)
Assemble.reset(buildAssembler());
@@ -288,6 +297,12 @@ Tool *ToolChain::getLink() const {
return Link.get();
}
+Tool *ToolChain::getStaticLibTool() const {
+ if (!StaticLibTool)
+ StaticLibTool.reset(buildStaticLibTool());
+ return StaticLibTool.get();
+}
+
Tool *ToolChain::getIfsMerge() const {
if (!IfsMerge)
IfsMerge.reset(new tools::ifstool::Merger(*this));
@@ -317,6 +332,9 @@ Tool *ToolChain::getTool(Action::ActionClass AC) const {
case Action::LinkJobClass:
return getLink();
+ case Action::StaticLibJobClass:
+ return getStaticLibTool();
+
case Action::InputClass:
case Action::BindArchClass:
case Action::OffloadClass:
@@ -385,11 +403,12 @@ std::string ToolChain::getCompilerRTPath() const {
} else {
llvm::sys::path::append(Path, "lib", getOSLibName());
}
- return Path.str();
+ return std::string(Path.str());
}
-std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
- FileType Type) const {
+std::string ToolChain::getCompilerRTBasename(const ArgList &Args,
+ StringRef Component, FileType Type,
+ bool AddArch) const {
const llvm::Triple &TT = getTriple();
bool IsITANMSVCWindows =
TT.isWindowsMSVCEnvironment() || TT.isWindowsItaniumEnvironment();
@@ -411,19 +430,33 @@ std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
break;
}
+ std::string ArchAndEnv;
+ if (AddArch) {
+ StringRef Arch = getArchNameForCompilerRTLib(*this, Args);
+ const char *Env = TT.isAndroid() ? "-android" : "";
+ ArchAndEnv = ("-" + Arch + Env).str();
+ }
+ return (Prefix + Twine("clang_rt.") + Component + ArchAndEnv + Suffix).str();
+}
+
+std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
+ FileType Type) const {
+ // Check for runtime files in the new layout without the architecture first.
+ std::string CRTBasename =
+ getCompilerRTBasename(Args, Component, Type, /*AddArch=*/false);
for (const auto &LibPath : getLibraryPaths()) {
SmallString<128> P(LibPath);
- llvm::sys::path::append(P, Prefix + Twine("clang_rt.") + Component + Suffix);
+ llvm::sys::path::append(P, CRTBasename);
if (getVFS().exists(P))
- return P.str();
+ return std::string(P.str());
}
- StringRef Arch = getArchNameForCompilerRTLib(*this, Args);
- const char *Env = TT.isAndroid() ? "-android" : "";
+ // Fall back to the old expected compiler-rt name if the new one does not
+ // exist.
+ CRTBasename = getCompilerRTBasename(Args, Component, Type, /*AddArch=*/true);
SmallString<128> Path(getCompilerRTPath());
- llvm::sys::path::append(Path, Prefix + Twine("clang_rt.") + Component + "-" +
- Arch + Env + Suffix);
- return Path.str();
+ llvm::sys::path::append(Path, CRTBasename);
+ return std::string(Path.str());
}
const char *ToolChain::getCompilerRTArgString(const llvm::opt::ArgList &Args,
@@ -440,13 +473,13 @@ Optional<std::string> ToolChain::getRuntimePath() const {
P.assign(D.ResourceDir);
llvm::sys::path::append(P, "lib", D.getTargetTriple());
if (getVFS().exists(P))
- return llvm::Optional<std::string>(P.str());
+ return llvm::Optional<std::string>(std::string(P.str()));
// Second try the normalized triple.
P.assign(D.ResourceDir);
llvm::sys::path::append(P, "lib", Triple.str());
if (getVFS().exists(P))
- return llvm::Optional<std::string>(P.str());
+ return llvm::Optional<std::string>(std::string(P.str()));
return None;
}
@@ -458,13 +491,13 @@ Optional<std::string> ToolChain::getCXXStdlibPath() const {
P.assign(D.Dir);
llvm::sys::path::append(P, "..", "lib", D.getTargetTriple(), "c++");
if (getVFS().exists(P))
- return llvm::Optional<std::string>(P.str());
+ return llvm::Optional<std::string>(std::string(P.str()));
// Second try the normalized triple.
P.assign(D.Dir);
llvm::sys::path::append(P, "..", "lib", Triple.str(), "c++");
if (getVFS().exists(P))
- return llvm::Optional<std::string>(P.str());
+ return llvm::Optional<std::string>(std::string(P.str()));
return None;
}
@@ -473,31 +506,27 @@ std::string ToolChain::getArchSpecificLibPath() const {
SmallString<128> Path(getDriver().ResourceDir);
llvm::sys::path::append(Path, "lib", getOSLibName(),
llvm::Triple::getArchTypeName(getArch()));
- return Path.str();
+ return std::string(Path.str());
}
bool ToolChain::needsProfileRT(const ArgList &Args) {
if (Args.hasArg(options::OPT_noprofilelib))
return false;
- if (needsGCovInstrumentation(Args) ||
- Args.hasArg(options::OPT_fprofile_generate) ||
- Args.hasArg(options::OPT_fprofile_generate_EQ) ||
- Args.hasArg(options::OPT_fcs_profile_generate) ||
- Args.hasArg(options::OPT_fcs_profile_generate_EQ) ||
- Args.hasArg(options::OPT_fprofile_instr_generate) ||
- Args.hasArg(options::OPT_fprofile_instr_generate_EQ) ||
- Args.hasArg(options::OPT_fcreate_profile) ||
- Args.hasArg(options::OPT_forder_file_instrumentation))
- return true;
-
- return false;
+ return Args.hasArg(options::OPT_fprofile_generate) ||
+ Args.hasArg(options::OPT_fprofile_generate_EQ) ||
+ Args.hasArg(options::OPT_fcs_profile_generate) ||
+ Args.hasArg(options::OPT_fcs_profile_generate_EQ) ||
+ Args.hasArg(options::OPT_fprofile_instr_generate) ||
+ Args.hasArg(options::OPT_fprofile_instr_generate_EQ) ||
+ Args.hasArg(options::OPT_fcreate_profile) ||
+ Args.hasArg(options::OPT_forder_file_instrumentation);
}
bool ToolChain::needsGCovInstrumentation(const llvm::opt::ArgList &Args) {
- return Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
- false) ||
- Args.hasArg(options::OPT_coverage);
+ return Args.hasArg(options::OPT_coverage) ||
+ Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
+ false);
}
Tool *ToolChain::SelectTool(const JobAction &JA) const {
@@ -525,7 +554,7 @@ std::string ToolChain::GetLinkerPath() const {
// If we're passed what looks like an absolute path, don't attempt to
// second-guess that.
if (llvm::sys::fs::can_execute(UseLinker))
- return UseLinker;
+ return std::string(UseLinker);
} else if (UseLinker.empty() || UseLinker == "ld") {
// If we're passed -fuse-ld= with no argument, or with the argument ld,
// then use whatever the default system linker is.
@@ -549,6 +578,11 @@ std::string ToolChain::GetLinkerPath() const {
return GetProgramPath(getDefaultLinker());
}
+std::string ToolChain::GetStaticLibToolPath() const {
+ // TODO: Add support for static lib archiving on Windows
+ return GetProgramPath("llvm-ar");
+}
+
types::ID ToolChain::LookupTypeForExtension(StringRef Ext) const {
types::ID id = types::lookupTypeForExtension(Ext);
@@ -734,6 +768,10 @@ std::string ToolChain::ComputeEffectiveClangTriple(const ArgList &Args,
return ComputeLLVMTriple(Args, InputType);
}
+std::string ToolChain::computeSysRoot() const {
+ return D.SysRoot;
+}
+
void ToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
// Each toolchain should provide the appropriate include flags.
@@ -747,7 +785,8 @@ void ToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {}
void ToolChain::addProfileRTLibs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const {
- if (!needsProfileRT(Args)) return;
+ if (!needsProfileRT(Args) && !needsGCovInstrumentation(Args))
+ return;
CmdArgs.push_back(getCompilerRTArgString(Args, "profile"));
}
@@ -915,28 +954,35 @@ void ToolChain::AddCCKextLibArgs(const ArgList &Args,
CmdArgs.push_back("-lcc_kext");
}
-bool ToolChain::AddFastMathRuntimeIfAvailable(const ArgList &Args,
- ArgStringList &CmdArgs) const {
+bool ToolChain::isFastMathRuntimeAvailable(const ArgList &Args,
+ std::string &Path) const {
// Do not check for -fno-fast-math or -fno-unsafe-math when -Ofast passed
// (to keep the linker options consistent with gcc and clang itself).
if (!isOptimizationLevelFast(Args)) {
// Check if -ffast-math or -funsafe-math.
Arg *A =
- Args.getLastArg(options::OPT_ffast_math, options::OPT_fno_fast_math,
- options::OPT_funsafe_math_optimizations,
- options::OPT_fno_unsafe_math_optimizations);
+ Args.getLastArg(options::OPT_ffast_math, options::OPT_fno_fast_math,
+ options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations);
if (!A || A->getOption().getID() == options::OPT_fno_fast_math ||
A->getOption().getID() == options::OPT_fno_unsafe_math_optimizations)
return false;
}
// If crtfastmath.o exists add it to the arguments.
- std::string Path = GetFilePath("crtfastmath.o");
- if (Path == "crtfastmath.o") // Not found.
- return false;
+ Path = GetFilePath("crtfastmath.o");
+ return (Path != "crtfastmath.o"); // Not found.
+}
- CmdArgs.push_back(Args.MakeArgString(Path));
- return true;
+bool ToolChain::addFastMathRuntimeIfAvailable(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ std::string Path;
+ if (isFastMathRuntimeAvailable(Args, Path)) {
+ CmdArgs.push_back(Args.MakeArgString(Path));
+ return true;
+ }
+
+ return false;
}
SanitizerMask ToolChain::getSupportedSanitizers() const {
@@ -967,6 +1013,9 @@ SanitizerMask ToolChain::getSupportedSanitizers() const {
void ToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {}
+void ToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {}
+
void ToolChain::AddIAMCUIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {}
@@ -1090,3 +1139,86 @@ llvm::opt::DerivedArgList *ToolChain::TranslateOpenMPTargetArgs(
delete DAL;
return nullptr;
}
+
+// TODO: Currently argument values separated by space e.g.
+// -Xclang -mframe-pointer=no cannot be passed by -Xarch_. This should be
+// fixed.
+void ToolChain::TranslateXarchArgs(
+ const llvm::opt::DerivedArgList &Args, llvm::opt::Arg *&A,
+ llvm::opt::DerivedArgList *DAL,
+ SmallVectorImpl<llvm::opt::Arg *> *AllocatedArgs) const {
+ const OptTable &Opts = getDriver().getOpts();
+ unsigned ValuePos = 1;
+ if (A->getOption().matches(options::OPT_Xarch_device) ||
+ A->getOption().matches(options::OPT_Xarch_host))
+ ValuePos = 0;
+
+ unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(ValuePos));
+ unsigned Prev = Index;
+ std::unique_ptr<llvm::opt::Arg> XarchArg(Opts.ParseOneArg(Args, Index));
+
+ // If the argument parsing failed or more than one argument was
+ // consumed, the -Xarch_ argument's parameter tried to consume
+ // extra arguments. Emit an error and ignore.
+ //
+ // We also want to disallow any options which would alter the
+ // driver behavior; that isn't going to work in our model. We
+ // use isDriverOption() as an approximation, although things
+ // like -O4 are going to slip through.
+ if (!XarchArg || Index > Prev + 1) {
+ getDriver().Diag(diag::err_drv_invalid_Xarch_argument_with_args)
+ << A->getAsString(Args);
+ return;
+ } else if (XarchArg->getOption().hasFlag(options::DriverOption)) {
+ getDriver().Diag(diag::err_drv_invalid_Xarch_argument_isdriver)
+ << A->getAsString(Args);
+ return;
+ }
+ XarchArg->setBaseArg(A);
+ A = XarchArg.release();
+ if (!AllocatedArgs)
+ DAL->AddSynthesizedArg(A);
+ else
+ AllocatedArgs->push_back(A);
+}
+
+llvm::opt::DerivedArgList *ToolChain::TranslateXarchArgs(
+ const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
+ Action::OffloadKind OFK,
+ SmallVectorImpl<llvm::opt::Arg *> *AllocatedArgs) const {
+ DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
+ bool Modified = false;
+
+ bool IsGPU = OFK == Action::OFK_Cuda || OFK == Action::OFK_HIP;
+ for (Arg *A : Args) {
+ bool NeedTrans = false;
+ bool Skip = false;
+ if (A->getOption().matches(options::OPT_Xarch_device)) {
+ NeedTrans = IsGPU;
+ Skip = !IsGPU;
+ } else if (A->getOption().matches(options::OPT_Xarch_host)) {
+ NeedTrans = !IsGPU;
+ Skip = IsGPU;
+ } else if (A->getOption().matches(options::OPT_Xarch__) && IsGPU) {
+ // Do not translate -Xarch_ options for non CUDA/HIP toolchain since
+ // they may need special translation.
+ // Skip this argument unless the architecture matches BoundArch
+ if (BoundArch.empty() || A->getValue(0) != BoundArch)
+ Skip = true;
+ else
+ NeedTrans = true;
+ }
+ if (NeedTrans || Skip)
+ Modified = true;
+ if (NeedTrans)
+ TranslateXarchArgs(Args, A, DAL, AllocatedArgs);
+ if (!Skip)
+ DAL->append(A);
+ }
+
+ if (Modified)
+ return DAL;
+
+ delete DAL;
+ return nullptr;
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp
index 6fbff61f7656..ac5544eedb00 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp
@@ -13,12 +13,15 @@
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/Path.h"
using AIX = clang::driver::toolchains::AIX;
using namespace clang::driver;
using namespace clang::driver::tools;
+using namespace clang::driver::toolchains;
using namespace llvm::opt;
+using namespace llvm::sys;
void aix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
@@ -73,7 +76,8 @@ void aix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -81,6 +85,7 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs, const ArgList &Args,
const char *LinkingOutput) const {
const AIX &ToolChain = static_cast<const AIX &>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
ArgStringList CmdArgs;
const bool IsArch32Bit = ToolChain.getTriple().isArch32Bit();
@@ -129,6 +134,12 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(ToolChain.GetFilePath(getCrt0Basename())));
}
+ // Collect all static constructor and destructor functions in CXX mode. This
+ // has to come before AddLinkerInputs as the implied option needs to precede
+ // any other '-bcdtors' settings or '-bnocdtors' that '-Wl' might forward.
+ if (D.CCCIsCXX())
+ CmdArgs.push_back("-bcdtors:all:0:s");
+
// Specify linker input file(s).
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
@@ -145,7 +156,8 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
/// AIX - AIX tool chain which can call as(1) and ld(1) directly.
@@ -154,6 +166,43 @@ AIX::AIX(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
getFilePaths().push_back(getDriver().SysRoot + "/usr/lib");
}
+// Returns the effective header sysroot path to use.
+// This comes from either -isysroot or --sysroot.
+llvm::StringRef
+AIX::GetHeaderSysroot(const llvm::opt::ArgList &DriverArgs) const {
+ if (DriverArgs.hasArg(options::OPT_isysroot))
+ return DriverArgs.getLastArgValue(options::OPT_isysroot);
+ if (!getDriver().SysRoot.empty())
+ return getDriver().SysRoot;
+ return "/";
+}
+
+void AIX::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ // Return if -nostdinc is specified as a driver option.
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ llvm::StringRef Sysroot = GetHeaderSysroot(DriverArgs);
+ const Driver &D = getDriver();
+
+ // Add the Clang builtin headers (<resource>/include).
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(D.ResourceDir);
+ path::append(P, "/include");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ }
+
+ // Return if -nostdlibinc is specified as a driver option.
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ // Add <sysroot>/usr/include.
+ SmallString<128> UP(Sysroot);
+ path::append(UP, "/usr/include");
+ addSystemInclude(DriverArgs, CC1Args, UP.str());
+}
+
auto AIX::buildAssembler() const -> Tool * { return new aix::Assembler(*this); }
auto AIX::buildLinker() const -> Tool * { return new aix::Linker(*this); }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h
index 69b948bc0ea8..942bb3cceb8a 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h
@@ -63,9 +63,16 @@ public:
bool isPIEDefault() const override { return false; }
bool isPICDefaultForced() const override { return true; }
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
+
+private:
+ llvm::StringRef GetHeaderSysroot(const llvm::opt::ArgList &DriverArgs) const;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
index 71a2c68b4197..bc6d1fcd4a00 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
@@ -12,6 +12,8 @@
#include "clang/Driver/Compilation.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -19,6 +21,327 @@ using namespace clang::driver::toolchains;
using namespace clang;
using namespace llvm::opt;
+void RocmInstallationDetector::scanLibDevicePath(llvm::StringRef Path) {
+ assert(!Path.empty());
+
+ const StringRef Suffix(".bc");
+ const StringRef Suffix2(".amdgcn.bc");
+
+ std::error_code EC;
+ for (llvm::vfs::directory_iterator LI = D.getVFS().dir_begin(Path, EC), LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
+ StringRef FilePath = LI->path();
+ StringRef FileName = llvm::sys::path::filename(FilePath);
+ if (!FileName.endswith(Suffix))
+ continue;
+
+ StringRef BaseName;
+ if (FileName.endswith(Suffix2))
+ BaseName = FileName.drop_back(Suffix2.size());
+ else if (FileName.endswith(Suffix))
+ BaseName = FileName.drop_back(Suffix.size());
+
+ if (BaseName == "ocml") {
+ OCML = FilePath;
+ } else if (BaseName == "ockl") {
+ OCKL = FilePath;
+ } else if (BaseName == "opencl") {
+ OpenCL = FilePath;
+ } else if (BaseName == "hip") {
+ HIP = FilePath;
+ } else if (BaseName == "oclc_finite_only_off") {
+ FiniteOnly.Off = FilePath;
+ } else if (BaseName == "oclc_finite_only_on") {
+ FiniteOnly.On = FilePath;
+ } else if (BaseName == "oclc_daz_opt_on") {
+ DenormalsAreZero.On = FilePath;
+ } else if (BaseName == "oclc_daz_opt_off") {
+ DenormalsAreZero.Off = FilePath;
+ } else if (BaseName == "oclc_correctly_rounded_sqrt_on") {
+ CorrectlyRoundedSqrt.On = FilePath;
+ } else if (BaseName == "oclc_correctly_rounded_sqrt_off") {
+ CorrectlyRoundedSqrt.Off = FilePath;
+ } else if (BaseName == "oclc_unsafe_math_on") {
+ UnsafeMath.On = FilePath;
+ } else if (BaseName == "oclc_unsafe_math_off") {
+ UnsafeMath.Off = FilePath;
+ } else if (BaseName == "oclc_wavefrontsize64_on") {
+ WavefrontSize64.On = FilePath;
+ } else if (BaseName == "oclc_wavefrontsize64_off") {
+ WavefrontSize64.Off = FilePath;
+ } else {
+ // Process all bitcode filenames that look like
+ // ocl_isa_version_XXX.amdgcn.bc
+ const StringRef DeviceLibPrefix = "oclc_isa_version_";
+ if (!BaseName.startswith(DeviceLibPrefix))
+ continue;
+
+ StringRef IsaVersionNumber =
+ BaseName.drop_front(DeviceLibPrefix.size());
+
+ llvm::Twine GfxName = Twine("gfx") + IsaVersionNumber;
+ SmallString<8> Tmp;
+ LibDeviceMap.insert(
+ std::make_pair(GfxName.toStringRef(Tmp), FilePath.str()));
+ }
+ }
+}
+
+void RocmInstallationDetector::ParseHIPVersionFile(llvm::StringRef V) {
+ SmallVector<StringRef, 4> VersionParts;
+ V.split(VersionParts, '\n');
+ unsigned Major;
+ unsigned Minor;
+ for (auto Part : VersionParts) {
+ auto Splits = Part.split('=');
+ if (Splits.first == "HIP_VERSION_MAJOR")
+ Splits.second.getAsInteger(0, Major);
+ else if (Splits.first == "HIP_VERSION_MINOR")
+ Splits.second.getAsInteger(0, Minor);
+ else if (Splits.first == "HIP_VERSION_PATCH")
+ VersionPatch = Splits.second.str();
+ }
+ VersionMajorMinor = llvm::VersionTuple(Major, Minor);
+ DetectedVersion =
+ (Twine(Major) + "." + Twine(Minor) + "." + VersionPatch).str();
+}
+
+// For candidate specified by --rocm-path we do not do strict check.
+SmallVector<RocmInstallationDetector::Candidate, 4>
+RocmInstallationDetector::getInstallationPathCandidates() {
+ SmallVector<Candidate, 4> Candidates;
+ if (!RocmPathArg.empty()) {
+ Candidates.emplace_back(RocmPathArg.str());
+ return Candidates;
+ }
+
+ // Try to find relative to the compiler binary.
+ const char *InstallDir = D.getInstalledDir();
+
+ // Check both a normal Unix prefix position of the clang binary, as well as
+ // the Windows-esque layout the ROCm packages use with the host architecture
+ // subdirectory of bin.
+
+ // Strip off directory (usually bin)
+ StringRef ParentDir = llvm::sys::path::parent_path(InstallDir);
+ StringRef ParentName = llvm::sys::path::filename(ParentDir);
+
+ // Some builds use bin/{host arch}, so go up again.
+ if (ParentName == "bin") {
+ ParentDir = llvm::sys::path::parent_path(ParentDir);
+ ParentName = llvm::sys::path::filename(ParentDir);
+ }
+
+ // Some versions of the rocm llvm package install to /opt/rocm/llvm/bin
+ if (ParentName == "llvm")
+ ParentDir = llvm::sys::path::parent_path(ParentDir);
+
+ Candidates.emplace_back(ParentDir.str(), /*StrictChecking=*/true);
+
+ // Device library may be installed in clang resource directory.
+ Candidates.emplace_back(D.ResourceDir, /*StrictChecking=*/true);
+
+ Candidates.emplace_back(D.SysRoot + "/opt/rocm", /*StrictChecking=*/true);
+ return Candidates;
+}
+
+RocmInstallationDetector::RocmInstallationDetector(
+ const Driver &D, const llvm::Triple &HostTriple,
+ const llvm::opt::ArgList &Args, bool DetectHIPRuntime, bool DetectDeviceLib)
+ : D(D) {
+ RocmPathArg = Args.getLastArgValue(clang::driver::options::OPT_rocm_path_EQ);
+ RocmDeviceLibPathArg =
+ Args.getAllArgValues(clang::driver::options::OPT_rocm_device_lib_path_EQ);
+ if (auto *A = Args.getLastArg(clang::driver::options::OPT_hip_version_EQ)) {
+ HIPVersionArg = A->getValue();
+ unsigned Major = 0;
+ unsigned Minor = 0;
+ SmallVector<StringRef, 3> Parts;
+ HIPVersionArg.split(Parts, '.');
+ if (Parts.size())
+ Parts[0].getAsInteger(0, Major);
+ if (Parts.size() > 1)
+ Parts[1].getAsInteger(0, Minor);
+ if (Parts.size() > 2)
+ VersionPatch = Parts[2].str();
+ if (VersionPatch.empty())
+ VersionPatch = "0";
+ if (Major == 0 || Minor == 0)
+ D.Diag(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << HIPVersionArg;
+
+ VersionMajorMinor = llvm::VersionTuple(Major, Minor);
+ DetectedVersion =
+ (Twine(Major) + "." + Twine(Minor) + "." + VersionPatch).str();
+ } else {
+ VersionPatch = DefaultVersionPatch;
+ VersionMajorMinor =
+ llvm::VersionTuple(DefaultVersionMajor, DefaultVersionMinor);
+ DetectedVersion = (Twine(DefaultVersionMajor) + "." +
+ Twine(DefaultVersionMinor) + "." + VersionPatch)
+ .str();
+ }
+
+ if (DetectHIPRuntime)
+ detectHIPRuntime();
+ if (DetectDeviceLib)
+ detectDeviceLibrary();
+}
+
+void RocmInstallationDetector::detectDeviceLibrary() {
+ assert(LibDevicePath.empty());
+
+ if (!RocmDeviceLibPathArg.empty())
+ LibDevicePath = RocmDeviceLibPathArg[RocmDeviceLibPathArg.size() - 1];
+ else if (const char *LibPathEnv = ::getenv("HIP_DEVICE_LIB_PATH"))
+ LibDevicePath = LibPathEnv;
+
+ auto &FS = D.getVFS();
+ if (!LibDevicePath.empty()) {
+ // Maintain compatability with HIP flag/envvar pointing directly at the
+ // bitcode library directory. This points directly at the library path instead
+ // of the rocm root installation.
+ if (!FS.exists(LibDevicePath))
+ return;
+
+ scanLibDevicePath(LibDevicePath);
+ HasDeviceLibrary = allGenericLibsValid() && !LibDeviceMap.empty();
+ return;
+ }
+
+ // The install path situation in old versions of ROCm is a real mess, and
+ // use a different install layout. Multiple copies of the device libraries
+ // exist for each frontend project, and differ depending on which build
+ // system produced the packages. Standalone OpenCL builds also have a
+ // different directory structure from the ROCm OpenCL package.
+ auto Candidates = getInstallationPathCandidates();
+ for (const auto &Candidate : Candidates) {
+ auto CandidatePath = Candidate.Path;
+
+ // Check device library exists at the given path.
+ auto CheckDeviceLib = [&](StringRef Path) {
+ bool CheckLibDevice = (!NoBuiltinLibs || Candidate.StrictChecking);
+ if (CheckLibDevice && !FS.exists(Path))
+ return false;
+
+ scanLibDevicePath(Path);
+
+ if (!NoBuiltinLibs) {
+ // Check that the required non-target libraries are all available.
+ if (!allGenericLibsValid())
+ return false;
+
+ // Check that we have found at least one libdevice that we can link in
+ // if -nobuiltinlib hasn't been specified.
+ if (LibDeviceMap.empty())
+ return false;
+ }
+ return true;
+ };
+
+ // The possible structures are:
+ // - ${ROCM_ROOT}/amdgcn/bitcode/*
+ // - ${ROCM_ROOT}/lib/*
+ // - ${ROCM_ROOT}/lib/bitcode/*
+ // so try to detect these layouts.
+ static llvm::SmallVector<const char *, 2> SubDirsList[] = {
+ {"amdgcn", "bitcode"},
+ {"lib"},
+ {"lib", "bitcode"},
+ };
+
+ // Make a path by appending sub-directories to InstallPath.
+ auto MakePath = [&](const llvm::ArrayRef<const char *> &SubDirs) {
+ auto Path = CandidatePath;
+ for (auto SubDir : SubDirs)
+ llvm::sys::path::append(Path, SubDir);
+ return Path;
+ };
+
+ for (auto SubDirs : SubDirsList) {
+ LibDevicePath = MakePath(SubDirs);
+ HasDeviceLibrary = CheckDeviceLib(LibDevicePath);
+ if (HasDeviceLibrary)
+ return;
+ }
+ }
+}
+
+void RocmInstallationDetector::detectHIPRuntime() {
+ auto Candidates = getInstallationPathCandidates();
+ auto &FS = D.getVFS();
+
+ for (const auto &Candidate : Candidates) {
+ InstallPath = Candidate.Path;
+ if (InstallPath.empty() || !FS.exists(InstallPath))
+ continue;
+
+ BinPath = InstallPath;
+ llvm::sys::path::append(BinPath, "bin");
+ IncludePath = InstallPath;
+ llvm::sys::path::append(IncludePath, "include");
+ LibPath = InstallPath;
+ llvm::sys::path::append(LibPath, "lib");
+
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> VersionFile =
+ FS.getBufferForFile(BinPath + "/.hipVersion");
+ if (!VersionFile && Candidate.StrictChecking)
+ continue;
+
+ if (HIPVersionArg.empty() && VersionFile)
+ ParseHIPVersionFile((*VersionFile)->getBuffer());
+
+ HasHIPRuntime = true;
+ return;
+ }
+ HasHIPRuntime = false;
+}
+
+void RocmInstallationDetector::print(raw_ostream &OS) const {
+ if (hasHIPRuntime())
+ OS << "Found HIP installation: " << InstallPath << ", version "
+ << DetectedVersion << '\n';
+}
+
+void RocmInstallationDetector::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ bool UsesRuntimeWrapper = VersionMajorMinor > llvm::VersionTuple(3, 5);
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ // HIP header includes standard library wrapper headers under clang
+ // cuda_wrappers directory. Since these wrapper headers include_next
+ // standard C++ headers, whereas libc++ headers include_next other clang
+ // headers. The include paths have to follow this order:
+ // - wrapper include path
+ // - standard C++ include path
+ // - other clang include path
+ // Since standard C++ and other clang include paths are added in other
+ // places after this function, here we only need to make sure wrapper
+ // include path is added.
+ //
+ // ROCm 3.5 does not fully support the wrapper headers. Therefore it needs
+ // a workaround.
+ SmallString<128> P(D.ResourceDir);
+ if (UsesRuntimeWrapper)
+ llvm::sys::path::append(P, "include", "cuda_wrappers");
+ CC1Args.push_back("-internal-isystem");
+ CC1Args.push_back(DriverArgs.MakeArgString(P));
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nogpuinc))
+ return;
+
+ if (!hasHIPRuntime()) {
+ D.Diag(diag::err_drv_no_hip_runtime);
+ return;
+ }
+
+ CC1Args.push_back("-internal-isystem");
+ CC1Args.push_back(DriverArgs.MakeArgString(getIncludePath()));
+ if (UsesRuntimeWrapper)
+ CC1Args.append({"-include", "__clang_hip_runtime_wrapper.h"});
+}
+
void amdgpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -31,8 +354,9 @@ void amdgpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-shared");
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(JA, *this, Args.MakeArgString(Linker),
- CmdArgs, Inputs));
+ C.addCommand(
+ std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileCurCP(),
+ Args.MakeArgString(Linker), CmdArgs, Inputs));
}
void amdgpu::getAMDGPUTargetFeatures(const Driver &D,
@@ -102,6 +426,73 @@ AMDGPUToolChain::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
return DAL;
}
+bool AMDGPUToolChain::getDefaultDenormsAreZeroForTarget(
+ llvm::AMDGPU::GPUKind Kind) {
+
+ // Assume nothing without a specific target.
+ if (Kind == llvm::AMDGPU::GK_NONE)
+ return false;
+
+ const unsigned ArchAttr = llvm::AMDGPU::getArchAttrAMDGCN(Kind);
+
+ // Default to enabling f32 denormals by default on subtargets where fma is
+ // fast with denormals
+ const bool BothDenormAndFMAFast =
+ (ArchAttr & llvm::AMDGPU::FEATURE_FAST_FMA_F32) &&
+ (ArchAttr & llvm::AMDGPU::FEATURE_FAST_DENORMAL_F32);
+ return !BothDenormAndFMAFast;
+}
+
+llvm::DenormalMode AMDGPUToolChain::getDefaultDenormalModeForType(
+ const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
+ const llvm::fltSemantics *FPType) const {
+ // Denormals should always be enabled for f16 and f64.
+ if (!FPType || FPType != &llvm::APFloat::IEEEsingle())
+ return llvm::DenormalMode::getIEEE();
+
+ if (JA.getOffloadingDeviceKind() == Action::OFK_HIP ||
+ JA.getOffloadingDeviceKind() == Action::OFK_Cuda) {
+ auto Kind = llvm::AMDGPU::parseArchAMDGCN(JA.getOffloadingArch());
+ if (FPType && FPType == &llvm::APFloat::IEEEsingle() &&
+ DriverArgs.hasFlag(options::OPT_fcuda_flush_denormals_to_zero,
+ options::OPT_fno_cuda_flush_denormals_to_zero,
+ getDefaultDenormsAreZeroForTarget(Kind)))
+ return llvm::DenormalMode::getPreserveSign();
+
+ return llvm::DenormalMode::getIEEE();
+ }
+
+ const StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_mcpu_EQ);
+ auto Kind = llvm::AMDGPU::parseArchAMDGCN(GpuArch);
+
+ // TODO: There are way too many flags that change this. Do we need to check
+ // them all?
+ bool DAZ = DriverArgs.hasArg(options::OPT_cl_denorms_are_zero) ||
+ getDefaultDenormsAreZeroForTarget(Kind);
+
+ // Outputs are flushed to zero (FTZ), preserving sign. Denormal inputs are
+ // also implicit treated as zero (DAZ).
+ return DAZ ? llvm::DenormalMode::getPreserveSign() :
+ llvm::DenormalMode::getIEEE();
+}
+
+bool AMDGPUToolChain::isWave64(const llvm::opt::ArgList &DriverArgs,
+ llvm::AMDGPU::GPUKind Kind) {
+ const unsigned ArchAttr = llvm::AMDGPU::getArchAttrAMDGCN(Kind);
+ static bool HasWave32 = (ArchAttr & llvm::AMDGPU::FEATURE_WAVE32);
+
+ return !HasWave32 || DriverArgs.hasFlag(
+ options::OPT_mwavefrontsize64, options::OPT_mno_wavefrontsize64, false);
+}
+
+
+/// ROCM Toolchain
+ROCMToolChain::ROCMToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : AMDGPUToolChain(D, Triple, Args) {
+ RocmInstallation.detectDeviceLibrary();
+}
+
void AMDGPUToolChain::addClangTargetOptions(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
@@ -115,3 +506,91 @@ void AMDGPUToolChain::addClangTargetOptions(
CC1Args.push_back("-fapply-global-visibility-to-externs");
}
}
+
+void ROCMToolChain::addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const {
+ AMDGPUToolChain::addClangTargetOptions(DriverArgs, CC1Args,
+ DeviceOffloadingKind);
+
+ // For the OpenCL case where there is no offload target, accept -nostdlib to
+ // disable bitcode linking.
+ if (DeviceOffloadingKind == Action::OFK_None &&
+ DriverArgs.hasArg(options::OPT_nostdlib))
+ return;
+
+ if (DriverArgs.hasArg(options::OPT_nogpulib))
+ return;
+
+ if (!RocmInstallation.hasDeviceLibrary()) {
+ getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 0;
+ return;
+ }
+
+ // Get the device name and canonicalize it
+ const StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_mcpu_EQ);
+ auto Kind = llvm::AMDGPU::parseArchAMDGCN(GpuArch);
+ const StringRef CanonArch = llvm::AMDGPU::getArchNameAMDGCN(Kind);
+ std::string LibDeviceFile = RocmInstallation.getLibDeviceFile(CanonArch);
+ if (LibDeviceFile.empty()) {
+ getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 1 << GpuArch;
+ return;
+ }
+
+ bool Wave64 = isWave64(DriverArgs, Kind);
+
+ // TODO: There are way too many flags that change this. Do we need to check
+ // them all?
+ bool DAZ = DriverArgs.hasArg(options::OPT_cl_denorms_are_zero) ||
+ getDefaultDenormsAreZeroForTarget(Kind);
+ bool FiniteOnly = DriverArgs.hasArg(options::OPT_cl_finite_math_only);
+
+ bool UnsafeMathOpt =
+ DriverArgs.hasArg(options::OPT_cl_unsafe_math_optimizations);
+ bool FastRelaxedMath = DriverArgs.hasArg(options::OPT_cl_fast_relaxed_math);
+ bool CorrectSqrt =
+ DriverArgs.hasArg(options::OPT_cl_fp32_correctly_rounded_divide_sqrt);
+
+ // Add the OpenCL specific bitcode library.
+ CC1Args.push_back("-mlink-builtin-bitcode");
+ CC1Args.push_back(DriverArgs.MakeArgString(RocmInstallation.getOpenCLPath()));
+
+ // Add the generic set of libraries.
+ RocmInstallation.addCommonBitcodeLibCC1Args(
+ DriverArgs, CC1Args, LibDeviceFile, Wave64, DAZ, FiniteOnly,
+ UnsafeMathOpt, FastRelaxedMath, CorrectSqrt);
+}
+
+void RocmInstallationDetector::addCommonBitcodeLibCC1Args(
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ StringRef LibDeviceFile, bool Wave64, bool DAZ, bool FiniteOnly,
+ bool UnsafeMathOpt, bool FastRelaxedMath, bool CorrectSqrt) const {
+ static const char LinkBitcodeFlag[] = "-mlink-builtin-bitcode";
+
+ CC1Args.push_back(LinkBitcodeFlag);
+ CC1Args.push_back(DriverArgs.MakeArgString(getOCMLPath()));
+
+ CC1Args.push_back(LinkBitcodeFlag);
+ CC1Args.push_back(DriverArgs.MakeArgString(getOCKLPath()));
+
+ CC1Args.push_back(LinkBitcodeFlag);
+ CC1Args.push_back(DriverArgs.MakeArgString(getDenormalsAreZeroPath(DAZ)));
+
+ CC1Args.push_back(LinkBitcodeFlag);
+ CC1Args.push_back(DriverArgs.MakeArgString(
+ getUnsafeMathPath(UnsafeMathOpt || FastRelaxedMath)));
+
+ CC1Args.push_back(LinkBitcodeFlag);
+ CC1Args.push_back(DriverArgs.MakeArgString(
+ getFiniteOnlyPath(FiniteOnly || FastRelaxedMath)));
+
+ CC1Args.push_back(LinkBitcodeFlag);
+ CC1Args.push_back(
+ DriverArgs.MakeArgString(getCorrectlyRoundedSqrtPath(CorrectSqrt)));
+
+ CC1Args.push_back(LinkBitcodeFlag);
+ CC1Args.push_back(DriverArgs.MakeArgString(getWavefrontSize64Path(Wave64)));
+
+ CC1Args.push_back(LinkBitcodeFlag);
+ CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile));
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h
index f4c78bea5cc9..5d44faf28b05 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h
@@ -10,19 +10,24 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_AMDGPU_H
#include "Gnu.h"
+#include "ROCm.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/TargetParser.h"
+
#include <map>
namespace clang {
namespace driver {
+
namespace tools {
namespace amdgpu {
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("amdgpu::Linker", "ld.lld", TC) {}
+ Linker(const ToolChain &TC) : Tool("amdgpu::Linker", "ld.lld", TC) {}
bool isLinkJob() const override { return true; }
bool hasIntegratedCPP() const override { return false; }
void ConstructJob(Compilation &C, const JobAction &JA,
@@ -40,11 +45,9 @@ void getAMDGPUTargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
namespace toolchains {
class LLVM_LIBRARY_VISIBILITY AMDGPUToolChain : public Generic_ELF {
-
-private:
+protected:
const std::map<options::ID, const StringRef> OptionsDefault;
-protected:
Tool *buildLinker() const override;
const StringRef getOptionDefault(options::ID OptID) const {
auto opt = OptionsDefault.find(OptID);
@@ -66,6 +69,34 @@ public:
void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const override;
+
+ /// Return whether denormals should be flushed, and treated as 0 by default
+ /// for the subtarget.
+ static bool getDefaultDenormsAreZeroForTarget(llvm::AMDGPU::GPUKind GPUKind);
+
+ llvm::DenormalMode getDefaultDenormalModeForType(
+ const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
+ const llvm::fltSemantics *FPType = nullptr) const override;
+
+ static bool isWave64(const llvm::opt::ArgList &DriverArgs,
+ llvm::AMDGPU::GPUKind Kind);
+ /// Needed for using lto.
+ bool HasNativeLLVMSupport() const override {
+ return true;
+ }
+
+ /// Needed for translating LTO options.
+ const char *getDefaultLinker() const override { return "ld.lld"; }
+};
+
+class LLVM_LIBRARY_VISIBILITY ROCMToolChain : public AMDGPUToolChain {
+public:
+ ROCMToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp
index e8a3a7b38c31..092bade53c63 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp
@@ -74,13 +74,11 @@ AVRToolChain::AVRToolChain(const Driver &D, const llvm::Triple &Triple,
// No avr-libc found and so no runtime linked.
D.Diag(diag::warn_drv_avr_libc_not_found);
} else { // We have enough information to link stdlibs
- std::string GCCRoot = GCCInstallation.getInstallPath();
+ std::string GCCRoot = std::string(GCCInstallation.getInstallPath());
std::string LibcRoot = AVRLibcRoot.getValue();
getFilePaths().push_back(LibcRoot + std::string("/lib/") +
std::string(*FamilyName));
- getFilePaths().push_back(LibcRoot + std::string("/lib/") +
- std::string(*FamilyName));
getFilePaths().push_back(GCCRoot + std::string("/") +
std::string(*FamilyName));
@@ -144,8 +142,9 @@ void AVR::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(std::string("-m") + *FamilyName));
}
- C.addCommand(std::make_unique<Command>(JA, *this, Args.MakeArgString(Linker),
- CmdArgs, Inputs));
+ C.addCommand(
+ std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileCurCP(),
+ Args.MakeArgString(Linker), CmdArgs, Inputs));
}
llvm::Optional<std::string> AVRToolChain::findAVRLibcInstallation() const {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h
index d244fc4f90e9..a3198b249580 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h
@@ -40,10 +40,10 @@ private:
namespace tools {
namespace AVR {
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
Linker(const llvm::Triple &Triple, const ToolChain &TC, bool LinkStdlib)
- : GnuTool("AVR::Linker", "avr-ld", TC), Triple(Triple),
+ : Tool("AVR::Linker", "avr-ld", TC), Triple(Triple),
LinkStdlib(LinkStdlib) {}
bool hasIntegratedCPP() const override { return false; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.cpp
index 2f11c9739a0e..a4141a57accc 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.cpp
@@ -39,7 +39,8 @@ void ananas::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void ananas::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -103,7 +104,7 @@ void ananas::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
- AddGoldPlugin(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ addLTOOptions(ToolChain, Args, CmdArgs, Output, Inputs[0],
D.getLTOMode() == LTOK_Thin);
}
@@ -123,7 +124,8 @@ void ananas::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
// Ananas - Ananas tool chain which can call as(1) and ld(1) directly.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.h
index 5e45b47fc108..72ad3edcf056 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.h
@@ -19,10 +19,9 @@ namespace tools {
/// ananas -- Directly call GNU Binutils assembler and linker
namespace ananas {
-class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
- Assembler(const ToolChain &TC)
- : GnuTool("ananas::Assembler", "assembler", TC) {}
+ Assembler(const ToolChain &TC) : Tool("ananas::Assembler", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -32,9 +31,9 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("ananas::Linker", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("ananas::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
index 9c27504dccf5..487c50dfc466 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -39,7 +39,7 @@ std::string aarch64::getAArch64TargetCPU(const ArgList &Args,
// Handle CPU name is 'native'.
if (CPU == "native")
- return llvm::sys::getHostCPUName();
+ return std::string(llvm::sys::getHostCPUName());
else if (CPU.size())
return CPU;
@@ -54,7 +54,8 @@ std::string aarch64::getAArch64TargetCPU(const ArgList &Args,
// Decode AArch64 features from string like +[no]featureA+[no]featureB+...
static bool DecodeAArch64Features(const Driver &D, StringRef text,
- std::vector<StringRef> &Features) {
+ std::vector<StringRef> &Features,
+ llvm::AArch64::ArchKind ArchKind) {
SmallVector<StringRef, 8> Split;
text.split(Split, StringRef("+"), -1, false);
@@ -66,6 +67,11 @@ static bool DecodeAArch64Features(const Driver &D, StringRef text,
D.Diag(clang::diag::err_drv_no_neon_modifier);
else
return false;
+
+ // +sve implies +f32mm if the base architecture is v8.6A
+ // it isn't the case in general that sve implies both f64mm and f32mm
+ if ((ArchKind == llvm::AArch64::ArchKind::ARMV8_6A) && Feature == "sve")
+ Features.push_back("+f32mm");
}
return true;
}
@@ -76,6 +82,7 @@ static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
std::vector<StringRef> &Features) {
std::pair<StringRef, StringRef> Split = Mcpu.split("+");
CPU = Split.first;
+ llvm::AArch64::ArchKind ArchKind = llvm::AArch64::ArchKind::ARMV8A;
if (CPU == "native")
CPU = llvm::sys::getHostCPUName();
@@ -83,7 +90,7 @@ static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
if (CPU == "generic") {
Features.push_back("+neon");
} else {
- llvm::AArch64::ArchKind ArchKind = llvm::AArch64::parseCPUArch(CPU);
+ ArchKind = llvm::AArch64::parseCPUArch(CPU);
if (!llvm::AArch64::getArchFeatures(ArchKind, Features))
return false;
@@ -92,10 +99,11 @@ static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
return false;
}
- if (Split.second.size() && !DecodeAArch64Features(D, Split.second, Features))
- return false;
+ if (Split.second.size() &&
+ !DecodeAArch64Features(D, Split.second, Features, ArchKind))
+ return false;
- return true;
+ return true;
}
static bool
@@ -108,7 +116,8 @@ getAArch64ArchFeaturesFromMarch(const Driver &D, StringRef March,
llvm::AArch64::ArchKind ArchKind = llvm::AArch64::parseArch(Split.first);
if (ArchKind == llvm::AArch64::ArchKind::INVALID ||
!llvm::AArch64::getArchFeatures(ArchKind, Features) ||
- (Split.second.size() && !DecodeAArch64Features(D, Split.second, Features)))
+ (Split.second.size() &&
+ !DecodeAArch64Features(D, Split.second, Features, ArchKind)))
return false;
return true;
@@ -139,8 +148,9 @@ getAArch64MicroArchFeaturesFromMtune(const Driver &D, StringRef Mtune,
// Handle CPU name is 'native'.
if (MtuneLowerCase == "native")
- MtuneLowerCase = llvm::sys::getHostCPUName();
- if (MtuneLowerCase == "cyclone" || MtuneLowerCase.find("apple") == 0) {
+ MtuneLowerCase = std::string(llvm::sys::getHostCPUName());
+ if (MtuneLowerCase == "cyclone" ||
+ StringRef(MtuneLowerCase).startswith("apple")) {
Features.push_back("+zcm");
Features.push_back("+zcz");
}
@@ -208,6 +218,39 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
D.Diag(diag::err_drv_invalid_mtp) << A->getAsString(Args);
}
+ // Enable/disable straight line speculation hardening.
+ if (Arg *A = Args.getLastArg(options::OPT_mharden_sls_EQ)) {
+ StringRef Scope = A->getValue();
+ bool EnableRetBr = false;
+ bool EnableBlr = false;
+ if (Scope != "none" && Scope != "all") {
+ SmallVector<StringRef, 4> Opts;
+ Scope.split(Opts, ",");
+ for (auto Opt : Opts) {
+ Opt = Opt.trim();
+ if (Opt == "retbr") {
+ EnableRetBr = true;
+ continue;
+ }
+ if (Opt == "blr") {
+ EnableBlr = true;
+ continue;
+ }
+ D.Diag(diag::err_invalid_sls_hardening)
+ << Scope << A->getAsString(Args);
+ break;
+ }
+ } else if (Scope == "all") {
+ EnableRetBr = true;
+ EnableBlr = true;
+ }
+
+ if (EnableRetBr)
+ Features.push_back("+harden-sls-retbr");
+ if (EnableBlr)
+ Features.push_back("+harden-sls-blr");
+ }
+
// En/disable crc
if (Arg *A = Args.getLastArg(options::OPT_mcrc, options::OPT_mnocrc)) {
if (A->getOption().matches(options::OPT_mcrc))
@@ -322,6 +365,10 @@ fp16_fml_fallthrough:
}
}
+ auto V8_6Pos = llvm::find(Features, "+v8.6a");
+ if (V8_6Pos != std::end(Features))
+ V8_6Pos = Features.insert(std::next(V8_6Pos), {"+i8mm", "+bf16"});
+
if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
options::OPT_munaligned_access))
if (A->getOption().matches(options::OPT_mno_unaligned_access))
@@ -399,6 +446,9 @@ fp16_fml_fallthrough:
if (Args.hasArg(options::OPT_ffixed_x28))
Features.push_back("+reserve-x28");
+ if (Args.hasArg(options::OPT_ffixed_x30))
+ Features.push_back("+reserve-x30");
+
if (Args.hasArg(options::OPT_fcall_saved_x8))
Features.push_back("+call-saved-x8");
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
index a1923e731489..afe896b4a65b 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -57,7 +57,7 @@ void arm::getARMArchCPUFromArgs(const ArgList &Args, llvm::StringRef &Arch,
static void getARMHWDivFeatures(const Driver &D, const Arg *A,
const ArgList &Args, StringRef HWDiv,
std::vector<StringRef> &Features) {
- unsigned HWDivID = llvm::ARM::parseHWDiv(HWDiv);
+ uint64_t HWDivID = llvm::ARM::parseHWDiv(HWDiv);
if (!llvm::ARM::getHWDivFeatures(HWDivID, Features))
D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
@@ -91,7 +91,7 @@ static void DecodeARMFeaturesFromCPU(const Driver &D, StringRef CPU,
CPU = CPU.split("+").first;
if (CPU != "generic") {
llvm::ARM::ArchKind ArchKind = llvm::ARM::parseCPUArch(CPU);
- unsigned Extension = llvm::ARM::getDefaultExtensions(CPU, ArchKind);
+ uint64_t Extension = llvm::ARM::getDefaultExtensions(CPU, ArchKind);
llvm::ARM::getExtensionFeatures(Extension, Features);
}
}
@@ -137,9 +137,8 @@ bool arm::useAAPCSForMachO(const llvm::Triple &T) {
}
// Select mode for reading thread pointer (-mtp=soft/cp15).
-arm::ReadTPMode arm::getReadTPMode(const ToolChain &TC, const ArgList &Args) {
+arm::ReadTPMode arm::getReadTPMode(const Driver &D, const ArgList &Args) {
if (Arg *A = Args.getLastArg(options::OPT_mtp_mode_EQ)) {
- const Driver &D = TC.getDriver();
arm::ReadTPMode ThreadPointer =
llvm::StringSwitch<arm::ReadTPMode>(A->getValue())
.Case("cp15", ReadTPMode::Cp15)
@@ -156,11 +155,14 @@ arm::ReadTPMode arm::getReadTPMode(const ToolChain &TC, const ArgList &Args) {
return ReadTPMode::Soft;
}
+arm::FloatABI arm::getARMFloatABI(const ToolChain &TC, const ArgList &Args) {
+ return arm::getARMFloatABI(TC.getDriver(), TC.getEffectiveTriple(), Args);
+}
+
// Select the float ABI as determined by -msoft-float, -mhard-float, and
// -mfloat-abi=.
-arm::FloatABI arm::getARMFloatABI(const ToolChain &TC, const ArgList &Args) {
- const Driver &D = TC.getDriver();
- const llvm::Triple &Triple = TC.getEffectiveTriple();
+arm::FloatABI arm::getARMFloatABI(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args) {
auto SubArch = getARMSubArchVersionNumber(Triple);
arm::FloatABI ABI = FloatABI::Invalid;
if (Arg *A =
@@ -276,18 +278,20 @@ arm::FloatABI arm::getARMFloatABI(const ToolChain &TC, const ArgList &Args) {
return ABI;
}
-void arm::getARMTargetFeatures(const ToolChain &TC,
- const llvm::Triple &Triple,
- const ArgList &Args,
- ArgStringList &CmdArgs,
- std::vector<StringRef> &Features,
- bool ForAS) {
- const Driver &D = TC.getDriver();
+static bool hasIntegerMVE(const std::vector<StringRef> &F) {
+ auto MVE = llvm::find(llvm::reverse(F), "+mve");
+ auto NoMVE = llvm::find(llvm::reverse(F), "-mve");
+ return MVE != F.rend() &&
+ (NoMVE == F.rend() || std::distance(MVE, NoMVE) > 0);
+}
+void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args, ArgStringList &CmdArgs,
+ std::vector<StringRef> &Features, bool ForAS) {
bool KernelOrKext =
Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
- arm::FloatABI ABI = arm::getARMFloatABI(TC, Args);
- arm::ReadTPMode ThreadPointer = arm::getReadTPMode(TC, Args);
+ arm::FloatABI ABI = arm::getARMFloatABI(D, Triple, Args);
+ arm::ReadTPMode ThreadPointer = arm::getReadTPMode(D, Args);
const Arg *WaCPU = nullptr, *WaFPU = nullptr;
const Arg *WaHDiv = nullptr, *WaArch = nullptr;
@@ -459,18 +463,13 @@ fp16_fml_fallthrough:
// Disable all features relating to hardware FP, not already disabled by the
// above call.
- Features.insert(Features.end(), {"-neon", "-crypto", "-dotprod", "-fp16fml",
- "-mve", "-mve.fp", "-fpregs"});
+ Features.insert(Features.end(),
+ {"-dotprod", "-fp16fml", "-mve", "-mve.fp", "-fpregs"});
} else if (FPUID == llvm::ARM::FK_NONE) {
// -mfpu=none is *very* similar to -mfloat-abi=soft, only that it should not
// disable MVE-I.
- Features.insert(Features.end(),
- {"-neon", "-crypto", "-dotprod", "-fp16fml", "-mve.fp"});
- // Even though we remove MVE-FP, we still need to check if it was originally
- // present among the requested extensions, because it implies MVE-I, which
- // should not be disabled by -mfpu-none.
- if (!llvm::is_contained(Features, "+mve") &&
- !llvm::is_contained(Features, "+mve.fp"))
+ Features.insert(Features.end(), {"-dotprod", "-fp16fml", "-mve.fp"});
+ if (!hasIntegerMVE(Features))
Features.emplace_back("-fpregs");
}
@@ -612,14 +611,14 @@ fp16_fml_fallthrough:
const std::string arm::getARMArch(StringRef Arch, const llvm::Triple &Triple) {
std::string MArch;
if (!Arch.empty())
- MArch = Arch;
+ MArch = std::string(Arch);
else
- MArch = Triple.getArchName();
+ MArch = std::string(Triple.getArchName());
MArch = StringRef(MArch).split("+").first.lower();
// Handle -march=native.
if (MArch == "native") {
- std::string CPU = llvm::sys::getHostCPUName();
+ std::string CPU = std::string(llvm::sys::getHostCPUName());
if (CPU != "generic") {
// Translate the native cpu into the architecture suffix for that CPU.
StringRef Suffix = arm::getLLVMArchSuffixForARM(CPU, MArch, Triple);
@@ -657,12 +656,12 @@ std::string arm::getARMTargetCPU(StringRef CPU, StringRef Arch,
std::string MCPU = StringRef(CPU).split("+").first.lower();
// Handle -mcpu=native.
if (MCPU == "native")
- return llvm::sys::getHostCPUName();
+ return std::string(llvm::sys::getHostCPUName());
else
return MCPU;
}
- return getARMCPUForMArch(Arch, Triple);
+ return std::string(getARMCPUForMArch(Arch, Triple));
}
/// getLLVMArchSuffixForARM - Get the LLVM ArchKind value to use for a
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
index 5640f8371262..0ba1a59852aa 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
@@ -48,13 +48,15 @@ enum class FloatABI {
};
FloatABI getARMFloatABI(const ToolChain &TC, const llvm::opt::ArgList &Args);
-ReadTPMode getReadTPMode(const ToolChain &TC, const llvm::opt::ArgList &Args);
+FloatABI getARMFloatABI(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ReadTPMode getReadTPMode(const Driver &D, const llvm::opt::ArgList &Args);
bool useAAPCSForMachO(const llvm::Triple &T);
void getARMArchCPUFromArgs(const llvm::opt::ArgList &Args,
llvm::StringRef &Arch, llvm::StringRef &CPU,
bool FromAs = false);
-void getARMTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
+void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
std::vector<llvm::StringRef> &Features, bool ForAS);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
index f1baadaebf41..144e276a6bd8 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
@@ -26,7 +26,7 @@ std::string ppc::getPPCTargetCPU(const ArgList &Args) {
StringRef CPUName = A->getValue();
if (CPUName == "native") {
- std::string CPU = llvm::sys::getHostCPUName();
+ std::string CPU = std::string(llvm::sys::getHostCPUName());
if (!CPU.empty() && CPU != "generic")
return CPU;
else
@@ -70,6 +70,7 @@ std::string ppc::getPPCTargetCPU(const ArgList &Args) {
.Case("power7", "pwr7")
.Case("power8", "pwr8")
.Case("power9", "pwr9")
+ .Case("power10", "pwr10")
.Case("future", "future")
.Case("pwr3", "pwr3")
.Case("pwr4", "pwr4")
@@ -80,6 +81,7 @@ std::string ppc::getPPCTargetCPU(const ArgList &Args) {
.Case("pwr7", "pwr7")
.Case("pwr8", "pwr8")
.Case("pwr9", "pwr9")
+ .Case("pwr10", "pwr10")
.Case("powerpc", "ppc")
.Case("powerpc64", "ppc64")
.Case("powerpc64le", "ppc64le")
@@ -91,14 +93,16 @@ std::string ppc::getPPCTargetCPU(const ArgList &Args) {
const char *ppc::getPPCAsmModeForCPU(StringRef Name) {
return llvm::StringSwitch<const char *>(Name)
- .Case("pwr7", "-mpower7")
- .Case("power7", "-mpower7")
- .Case("pwr8", "-mpower8")
- .Case("power8", "-mpower8")
- .Case("ppc64le", "-mpower8")
- .Case("pwr9", "-mpower9")
- .Case("power9", "-mpower9")
- .Default("-many");
+ .Case("pwr7", "-mpower7")
+ .Case("power7", "-mpower7")
+ .Case("pwr8", "-mpower8")
+ .Case("power8", "-mpower8")
+ .Case("ppc64le", "-mpower8")
+ .Case("pwr9", "-mpower9")
+ .Case("power9", "-mpower9")
+ .Case("pwr10", "-mpower10")
+ .Case("power10", "-mpower10")
+ .Default("-many");
}
void ppc::getPPCTargetFeatures(const Driver &D, const llvm::Triple &Triple,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
index d0c082bfc53b..80d12e5aa8da 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
@@ -22,6 +22,14 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
+namespace {
+// Represents the major and version number components of a RISC-V extension
+struct RISCVExtensionVersion {
+ StringRef Major;
+ StringRef Minor;
+};
+} // end anonymous namespace
+
static StringRef getExtensionTypeDesc(StringRef Ext) {
if (Ext.startswith("sx"))
return "non-standard supervisor-level extension";
@@ -29,6 +37,8 @@ static StringRef getExtensionTypeDesc(StringRef Ext) {
return "standard supervisor-level extension";
if (Ext.startswith("x"))
return "non-standard user-level extension";
+ if (Ext.startswith("z"))
+ return "standard user-level extension";
return StringRef();
}
@@ -39,10 +49,29 @@ static StringRef getExtensionType(StringRef Ext) {
return "s";
if (Ext.startswith("x"))
return "x";
+ if (Ext.startswith("z"))
+ return "z";
return StringRef();
}
+// If the extension is supported as experimental, return the version of that
+// extension that the compiler currently supports.
+static Optional<RISCVExtensionVersion>
+isExperimentalExtension(StringRef Ext) {
+ if (Ext == "b" || Ext == "zbb" || Ext == "zbc" || Ext == "zbe" ||
+ Ext == "zbf" || Ext == "zbm" || Ext == "zbp" || Ext == "zbr" ||
+ Ext == "zbs" || Ext == "zbt" || Ext == "zbproposedc")
+ return RISCVExtensionVersion{"0", "92"};
+ if (Ext == "v")
+ return RISCVExtensionVersion{"0", "8"};
+ return None;
+}
+
static bool isSupportedExtension(StringRef Ext) {
+ // LLVM supports "z" extensions which are marked as experimental.
+ if (isExperimentalExtension(Ext))
+ return true;
+
// LLVM does not support "sx", "s" nor "x" extensions.
return false;
}
@@ -52,17 +81,15 @@ static bool isSupportedExtension(StringRef Ext) {
// Version number is divided into major and minor version numbers,
// separated by a 'p'. If the minor version is 0 then 'p0' can be
// omitted from the version string. E.g., rv32i2p0, rv32i2, rv32i2p1.
-static bool getExtensionVersion(const Driver &D, StringRef MArch,
- StringRef Ext, StringRef In,
+static bool getExtensionVersion(const Driver &D, const ArgList &Args,
+ StringRef MArch, StringRef Ext, StringRef In,
std::string &Major, std::string &Minor) {
- Major = In.take_while(isDigit);
+ Major = std::string(In.take_while(isDigit));
In = In.substr(Major.size());
- if (Major.empty())
- return true;
- if (In.consume_front("p")) {
- Minor = In.take_while(isDigit);
- In = In.substr(Major.size());
+ if (Major.size() && In.consume_front("p")) {
+ Minor = std::string(In.take_while(isDigit));
+ In = In.substr(Major.size() + 1);
// Expected 'p' to be followed by minor version number.
if (Minor.empty()) {
@@ -74,7 +101,53 @@ static bool getExtensionVersion(const Driver &D, StringRef MArch,
}
}
- // TODO: Handle extensions with version number.
+ // Expected multi-character extension with version number to have no
+ // subsequent characters (i.e. must either end string or be followed by
+ // an underscore).
+ if (Ext.size() > 1 && In.size()) {
+ std::string Error =
+ "multi-character extensions must be separated by underscores";
+ D.Diag(diag::err_drv_invalid_riscv_ext_arch_name) << MArch << Error << In;
+ return false;
+ }
+
+ // If experimental extension, require use of current version number number
+ if (auto ExperimentalExtension = isExperimentalExtension(Ext)) {
+ if (!Args.hasArg(options::OPT_menable_experimental_extensions)) {
+ std::string Error =
+ "requires '-menable-experimental-extensions' for experimental extension";
+ D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
+ << MArch << Error << Ext;
+ return false;
+ } else if (Major.empty() && Minor.empty()) {
+ std::string Error =
+ "experimental extension requires explicit version number";
+ D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
+ << MArch << Error << Ext;
+ return false;
+ }
+ auto SupportedVers = *ExperimentalExtension;
+ if (Major != SupportedVers.Major || Minor != SupportedVers.Minor) {
+ std::string Error =
+ "unsupported version number " + Major;
+ if (!Minor.empty())
+ Error += "." + Minor;
+ Error += " for experimental extension (this compiler supports "
+ + SupportedVers.Major.str() + "."
+ + SupportedVers.Minor.str() + ")";
+
+ D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
+ << MArch << Error << Ext;
+ return false;
+ }
+ return true;
+ }
+
+ // Allow extensions to declare no version number
+ if (Major.empty() && Minor.empty())
+ return true;
+
+ // TODO: Handle supported extensions with version number.
std::string Error = "unsupported version number " + Major;
if (!Minor.empty())
Error += "." + Minor;
@@ -89,7 +162,7 @@ static bool getExtensionVersion(const Driver &D, StringRef MArch,
// Parse the ISA string containing non-standard user-level
// extensions, standard supervisor-level extensions and
// non-standard supervisor-level extensions.
-// These extensions start with 'x', 's', 'sx' prefixes, follow a
+// These extensions start with 'z', 'x', 's', 'sx' prefixes, follow a
// canonical order, might have a version number (major, minor)
// and are separated by a single underscore '_'.
// Set the hardware features for the extensions that are supported.
@@ -105,7 +178,7 @@ static void getExtensionFeatures(const Driver &D,
SmallVector<StringRef, 8> Split;
Exts.split(Split, StringRef("_"));
- SmallVector<StringRef, 3> Prefix{"x", "s", "sx"};
+ SmallVector<StringRef, 4> Prefix{"z", "x", "s", "sx"};
auto I = Prefix.begin();
auto E = Prefix.end();
@@ -119,8 +192,10 @@ static void getExtensionFeatures(const Driver &D,
}
StringRef Type = getExtensionType(Ext);
- StringRef Name(Ext.substr(Type.size()));
StringRef Desc = getExtensionTypeDesc(Ext);
+ auto Pos = Ext.find_if(isDigit);
+ StringRef Name(Ext.substr(0, Pos));
+ StringRef Vers(Ext.substr(Pos));
if (Type.empty()) {
D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
@@ -133,7 +208,7 @@ static void getExtensionFeatures(const Driver &D,
++I;
if (I == E) {
- std::string Error = Desc;
+ std::string Error = std::string(Desc);
Error += " not given in canonical order";
D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
<< MArch << Error << Ext;
@@ -143,35 +218,30 @@ static void getExtensionFeatures(const Driver &D,
// The order is OK, do not advance I to the next prefix
// to allow repeated extension type, e.g.: rv32ixabc_xdef.
- if (Name.empty()) {
- std::string Error = Desc;
+ if (Name.size() == Type.size()) {
+ std::string Error = std::string(Desc);
Error += " name missing after";
D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
- << MArch << Error << Ext;
+ << MArch << Error << Type;
return;
}
std::string Major, Minor;
- auto Pos = Name.find_if(isDigit);
- if (Pos != StringRef::npos) {
- auto Next = Name.substr(Pos);
- Name = Name.substr(0, Pos);
- if (!getExtensionVersion(D, MArch, Ext, Next, Major, Minor))
- return;
- }
+ if (!getExtensionVersion(D, Args, MArch, Name, Vers, Major, Minor))
+ return;
// Check if duplicated extension.
- if (llvm::is_contained(AllExts, Ext)) {
+ if (llvm::is_contained(AllExts, Name)) {
std::string Error = "duplicated ";
Error += Desc;
D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
- << MArch << Error << Ext;
+ << MArch << Error << Name;
return;
}
// Extension format is correct, keep parsing the extensions.
// TODO: Save Type, Name, Major, Minor to avoid parsing them later.
- AllExts.push_back(Ext);
+ AllExts.push_back(Name);
}
// Set target features.
@@ -186,7 +256,10 @@ static void getExtensionFeatures(const Driver &D,
<< MArch << Error << Ext;
return;
}
- Features.push_back(Args.MakeArgString("+" + Ext));
+ if (isExperimentalExtension(Ext))
+ Features.push_back(Args.MakeArgString("+experimental-" + Ext));
+ else
+ Features.push_back(Args.MakeArgString("+" + Ext));
}
}
@@ -251,28 +324,35 @@ static bool getArchFeatures(const Driver &D, StringRef MArch,
// Skip rvxxx
StringRef Exts = MArch.substr(5);
- // Remove non-standard extensions and supervisor-level extensions.
- // They have 'x', 's', 'sx' prefixes. Parse them at the end.
- // Find the very first occurrence of 's' or 'x'.
+ // Remove multi-letter standard extensions, non-standard extensions and
+ // supervisor-level extensions. They have 'z', 'x', 's', 'sx' prefixes.
+ // Parse them at the end.
+ // Find the very first occurrence of 's', 'x' or 'z'.
StringRef OtherExts;
- size_t Pos = Exts.find_first_of("sx");
+ size_t Pos = Exts.find_first_of("zsx");
if (Pos != StringRef::npos) {
OtherExts = Exts.substr(Pos);
Exts = Exts.substr(0, Pos);
}
std::string Major, Minor;
- if (!getExtensionVersion(D, MArch, std::string(1, Baseline), Exts, Major,
- Minor))
+ if (!getExtensionVersion(D, Args, MArch, std::string(1, Baseline), Exts,
+ Major, Minor))
return false;
+ // Consume the base ISA version number and any '_' between rvxxx and the
+ // first extension
+ Exts = Exts.drop_front(Major.size());
+ if (!Minor.empty())
+ Exts = Exts.drop_front(Minor.size() + 1 /*'p'*/);
+ Exts.consume_front("_");
+
// TODO: Use version number when setting target features
- // and consume the underscore '_' that might follow.
auto StdExtsItr = StdExts.begin();
auto StdExtsEnd = StdExts.end();
- for (auto I = Exts.begin(), E = Exts.end(); I != E; ++I) {
+ for (auto I = Exts.begin(), E = Exts.end(); I != E; ) {
char c = *I;
// Check ISA extensions are specified in the canonical order.
@@ -295,18 +375,15 @@ static bool getArchFeatures(const Driver &D, StringRef MArch,
// Move to next char to prevent repeated letter.
++StdExtsItr;
- if (std::next(I) != E) {
- // Skip c.
- std::string Next = std::string(std::next(I), E);
- std::string Major, Minor;
- if (!getExtensionVersion(D, MArch, std::string(1, c), Next, Major, Minor))
- return false;
-
- // TODO: Use version number when setting target features
- // and consume the underscore '_' that might follow.
- }
+ std::string Next, Major, Minor;
+ if (std::next(I) != E)
+ Next = std::string(std::next(I), E);
+ if (!getExtensionVersion(D, Args, MArch, std::string(1, c), Next, Major,
+ Minor))
+ return false;
// The order is OK, then push it into features.
+ // TODO: Use version number when setting target features
switch (c) {
default:
// Currently LLVM supports only "mafdc".
@@ -331,7 +408,22 @@ static bool getArchFeatures(const Driver &D, StringRef MArch,
case 'c':
Features.push_back("+c");
break;
+ case 'b':
+ Features.push_back("+experimental-b");
+ break;
+ case 'v':
+ Features.push_back("+experimental-v");
+ break;
}
+
+ // Consume full extension name and version, including any optional '_'
+ // between this extension and the next
+ ++I;
+ I += Major.size();
+ if (Minor.size())
+ I += Minor.size() + 1 /*'p'*/;
+ if (*I == '_')
+ ++I;
}
// Dependency check.
@@ -426,20 +518,18 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (Args.hasArg(options::OPT_ffixed_x31))
Features.push_back("+reserve-x31");
- // FreeBSD local, because ld.lld doesn't support relaxations
- // -mno-relax is default, unless -mrelax is specified.
- if (Args.hasFlag(options::OPT_mrelax, options::OPT_mno_relax, false))
+ // -mrelax is default, unless -mno-relax is specified.
+ if (Args.hasFlag(options::OPT_mrelax, options::OPT_mno_relax, true))
Features.push_back("+relax");
else
Features.push_back("-relax");
// GCC Compatibility: -mno-save-restore is default, unless -msave-restore is
- // specified...
- if (Args.hasFlag(options::OPT_msave_restore, options::OPT_mno_save_restore, false)) {
- // ... but we don't support -msave-restore, so issue a warning.
- D.Diag(diag::warn_drv_clang_unsupported)
- << Args.getLastArg(options::OPT_msave_restore)->getAsString(Args);
- }
+ // specified.
+ if (Args.hasFlag(options::OPT_msave_restore, options::OPT_mno_save_restore, false))
+ Features.push_back("+save-restore");
+ else
+ Features.push_back("-save-restore");
// Now add any that the user explicitly requested on the command line,
// which may override the defaults.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp
index 2b77d59fdc66..f81bf68172de 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp
@@ -7,6 +7,8 @@
//===----------------------------------------------------------------------===//
#include "SystemZ.h"
+#include "clang/Config/config.h"
+#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/Host.h"
@@ -16,24 +18,40 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
+systemz::FloatABI systemz::getSystemZFloatABI(const Driver &D,
+ const ArgList &Args) {
+ // Hard float is the default.
+ systemz::FloatABI ABI = systemz::FloatABI::Hard;
+ if (Args.hasArg(options::OPT_mfloat_abi_EQ))
+ D.Diag(diag::err_drv_unsupported_opt)
+ << Args.getLastArg(options::OPT_mfloat_abi_EQ)->getAsString(Args);
+
+ if (Arg *A = Args.getLastArg(clang::driver::options::OPT_msoft_float,
+ options::OPT_mhard_float))
+ if (A->getOption().matches(clang::driver::options::OPT_msoft_float))
+ ABI = systemz::FloatABI::Soft;
+
+ return ABI;
+}
+
std::string systemz::getSystemZTargetCPU(const ArgList &Args) {
if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_march_EQ)) {
llvm::StringRef CPUName = A->getValue();
if (CPUName == "native") {
- std::string CPU = llvm::sys::getHostCPUName();
+ std::string CPU = std::string(llvm::sys::getHostCPUName());
if (!CPU.empty() && CPU != "generic")
return CPU;
else
return "";
}
- return CPUName;
+ return std::string(CPUName);
}
- return "z10";
+ return CLANG_SYSTEMZ_DEFAULT_ARCH;
}
-void systemz::getSystemZTargetFeatures(const ArgList &Args,
+void systemz::getSystemZTargetFeatures(const Driver &D, const ArgList &Args,
std::vector<llvm::StringRef> &Features) {
// -m(no-)htm overrides use of the transactional-execution facility.
if (Arg *A = Args.getLastArg(options::OPT_mhtm, options::OPT_mno_htm)) {
@@ -49,4 +67,8 @@ void systemz::getSystemZTargetFeatures(const ArgList &Args,
else
Features.push_back("-vector");
}
+
+ systemz::FloatABI FloatABI = systemz::getSystemZFloatABI(D, Args);
+ if (FloatABI == systemz::FloatABI::Soft)
+ Features.push_back("+soft-float");
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.h
index 77dcbc47be5c..1e42b68a8f3c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.h
@@ -9,6 +9,7 @@
#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_SYSTEMZ_H
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_SYSTEMZ_H
+#include "clang/Driver/Driver.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Option/Option.h"
#include <string>
@@ -19,9 +20,16 @@ namespace driver {
namespace tools {
namespace systemz {
+enum class FloatABI {
+ Soft,
+ Hard,
+};
+
+FloatABI getSystemZFloatABI(const Driver &D, const llvm::opt::ArgList &Args);
+
std::string getSystemZTargetCPU(const llvm::opt::ArgList &Args);
-void getSystemZTargetFeatures(const llvm::opt::ArgList &Args,
+void getSystemZTargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
std::vector<llvm::StringRef> &Features);
} // end namespace systemz
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.cpp
new file mode 100644
index 000000000000..fa10e4810f1c
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.cpp
@@ -0,0 +1,26 @@
+//===--- VE.cpp - Tools Implementations -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "VE.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Option/ArgList.h"
+
+using namespace clang::driver;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+
+const char *ve::getVEAsmModeForCPU(StringRef Name, const llvm::Triple &Triple) {
+ return "";
+}
+
+void ve::getVETargetFeatures(const Driver &D, const ArgList &Args,
+ std::vector<StringRef> &Features) {}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.h
new file mode 100644
index 000000000000..713e3e7d042f
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.h
@@ -0,0 +1,33 @@
+//===--- VE.h - VE-specific Tool Helpers ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_VE_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_VE_H
+
+#include "clang/Driver/Driver.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Option/Option.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+namespace driver {
+namespace tools {
+namespace ve {
+
+void getVETargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
+ std::vector<llvm::StringRef> &Features);
+const char *getVEAsmModeForCPU(llvm::StringRef Name,
+ const llvm::Triple &Triple);
+
+} // end namespace ve
+} // namespace tools
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_VE_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp
index 32a5c0051e93..2cc44c09917f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp
@@ -31,7 +31,7 @@ const char *x86::getX86TargetCPU(const ArgList &Args,
//
// FIXME: We should also incorporate the detected target features for use
// with -native.
- std::string CPU = llvm::sys::getHostCPUName();
+ std::string CPU = std::string(llvm::sys::getHostCPUName());
if (!CPU.empty() && CPU != "generic")
return Args.MakeArgString(CPU);
}
@@ -185,6 +185,24 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
LVIOpt = options::OPT_mlvi_cfi;
}
+ if (Args.hasFlag(options::OPT_m_seses, options::OPT_mno_seses, false)) {
+ if (LVIOpt == options::OPT_mlvi_hardening)
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << D.getOpts().getOptionName(options::OPT_mlvi_hardening)
+ << D.getOpts().getOptionName(options::OPT_m_seses);
+
+ if (SpectreOpt != clang::driver::options::ID::OPT_INVALID)
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << D.getOpts().getOptionName(SpectreOpt)
+ << D.getOpts().getOptionName(options::OPT_m_seses);
+
+ Features.push_back("+seses");
+ if (!Args.hasArg(options::OPT_mno_lvi_cfi)) {
+ Features.push_back("+lvi-cfi");
+ LVIOpt = options::OPT_mlvi_cfi;
+ }
+ }
+
if (SpectreOpt != clang::driver::options::ID::OPT_INVALID &&
LVIOpt != clang::driver::options::ID::OPT_INVALID) {
D.Diag(diag::err_drv_argument_not_allowed_with)
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp
index dff0e04183ef..97cfa7d0e156 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp
@@ -67,7 +67,7 @@ Tool *BareMetal::buildLinker() const {
std::string BareMetal::getRuntimesDir() const {
SmallString<128> Dir(getDriver().ResourceDir);
llvm::sys::path::append(Dir, "lib", "baremetal");
- return Dir.str();
+ return std::string(Dir.str());
}
void BareMetal::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
@@ -157,7 +157,7 @@ void BareMetal::AddCXXStdlibLibArgs(const ArgList &Args,
void BareMetal::AddLinkRuntimeLib(const ArgList &Args,
ArgStringList &CmdArgs) const {
CmdArgs.push_back(Args.MakeArgString("-lclang_rt.builtins-" +
- getTriple().getArchName() + ".a"));
+ getTriple().getArchName()));
}
void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -191,7 +191,7 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(JA, *this,
- Args.MakeArgString(TC.GetLinkerPath()),
- CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Args.MakeArgString(TC.GetLinkerPath()),
+ CmdArgs, Inputs));
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
index 8b49b42598a8..9d6333bb5f1d 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "Clang.h"
+#include "AMDGPU.h"
#include "Arch/AArch64.h"
#include "Arch/ARM.h"
#include "Arch/Mips.h"
@@ -14,12 +15,12 @@
#include "Arch/RISCV.h"
#include "Arch/Sparc.h"
#include "Arch/SystemZ.h"
+#include "Arch/VE.h"
#include "Arch/X86.h"
-#include "AMDGPU.h"
#include "CommonArgs.h"
#include "Hexagon.h"
-#include "MSP430.h"
#include "InputInfo.h"
+#include "MSP430.h"
#include "PS4CPU.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/CodeGenOptions.h"
@@ -35,6 +36,7 @@
#include "llvm/Config/llvm-config.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Compression.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
@@ -307,10 +309,9 @@ static void getWebAssemblyTargetFeatures(const ArgList &Args,
handleTargetFeaturesGroup(Args, Features, options::OPT_m_wasm_Features_Group);
}
-static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
+static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args, ArgStringList &CmdArgs,
- bool ForAS) {
- const Driver &D = TC.getDriver();
+ bool ForAS, bool IsAux = false) {
std::vector<StringRef> Features;
switch (Triple.getArch()) {
default:
@@ -326,7 +327,7 @@ static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
- arm::getARMTargetFeatures(TC, Triple, Args, CmdArgs, Features, ForAS);
+ arm::getARMTargetFeatures(D, Triple, Args, CmdArgs, Features, ForAS);
break;
case llvm::Triple::ppc:
@@ -339,7 +340,7 @@ static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
riscv::getRISCVTargetFeatures(D, Triple, Args, Features);
break;
case llvm::Triple::systemz:
- systemz::getSystemZTargetFeatures(Args, Features);
+ systemz::getSystemZTargetFeatures(D, Args, Features);
break;
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_32:
@@ -368,27 +369,14 @@ static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
break;
case llvm::Triple::msp430:
msp430::getMSP430TargetFeatures(D, Args, Features);
+ break;
+ case llvm::Triple::ve:
+ ve::getVETargetFeatures(D, Args, Features);
}
- // Find the last of each feature.
- llvm::StringMap<unsigned> LastOpt;
- for (unsigned I = 0, N = Features.size(); I < N; ++I) {
- StringRef Name = Features[I];
- assert(Name[0] == '-' || Name[0] == '+');
- LastOpt[Name.drop_front(1)] = I;
- }
-
- for (unsigned I = 0, N = Features.size(); I < N; ++I) {
- // If this feature was overridden, ignore it.
- StringRef Name = Features[I];
- llvm::StringMap<unsigned>::iterator LastI = LastOpt.find(Name.drop_front(1));
- assert(LastI != LastOpt.end());
- unsigned Last = LastI->second;
- if (Last != I)
- continue;
-
- CmdArgs.push_back("-target-feature");
- CmdArgs.push_back(Name.data());
+ for (auto Feature : unifyTargetFeatures(Features)) {
+ CmdArgs.push_back(IsAux ? "-aux-target-feature" : "-target-feature");
+ CmdArgs.push_back(Feature.data());
}
}
@@ -464,6 +452,11 @@ static void addExceptionArgs(const ArgList &Args, types::ID InputType,
}
}
+ // OPT_fignore_exceptions means exception could still be thrown,
+ // but no clean up or catch would happen in current module.
+ // So we do not set EH to false.
+ Args.AddLastArg(CmdArgs, options::OPT_fignore_exceptions);
+
if (EH)
CmdArgs.push_back("-fexceptions");
}
@@ -505,7 +498,7 @@ static codegenoptions::DebugInfoKind DebugLevelToInfoKind(const Arg &A) {
return codegenoptions::DebugLineTablesOnly;
if (A.getOption().matches(options::OPT_gline_directives_only))
return codegenoptions::DebugDirectivesOnly;
- return codegenoptions::LimitedDebugInfo;
+ return codegenoptions::DebugInfoConstructor;
}
static bool mustUseNonLeafFramePointerForTarget(const llvm::Triple &Triple) {
@@ -522,7 +515,7 @@ static bool mustUseNonLeafFramePointerForTarget(const llvm::Triple &Triple) {
static bool useFramePointerForTargetByDefault(const ArgList &Args,
const llvm::Triple &Triple) {
- if (Args.hasArg(options::OPT_pg))
+ if (Args.hasArg(options::OPT_pg) && !Args.hasArg(options::OPT_mfentry))
return true;
switch (Triple.getArch()) {
@@ -553,6 +546,13 @@ static bool useFramePointerForTargetByDefault(const ArgList &Args,
Triple.isOSHurd()) {
switch (Triple.getArch()) {
// Don't use a frame pointer on linux if optimizing for certain targets.
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ if (Triple.isAndroid())
+ return true;
+ LLVM_FALLTHROUGH;
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
case llvm::Triple::mips:
@@ -721,38 +721,6 @@ static void addDashXForInput(const ArgList &Args, const InputInfo &Input,
}
}
-static void appendUserToPath(SmallVectorImpl<char> &Result) {
-#ifdef LLVM_ON_UNIX
- const char *Username = getenv("LOGNAME");
-#else
- const char *Username = getenv("USERNAME");
-#endif
- if (Username) {
- // Validate that LoginName can be used in a path, and get its length.
- size_t Len = 0;
- for (const char *P = Username; *P; ++P, ++Len) {
- if (!clang::isAlphanumeric(*P) && *P != '_') {
- Username = nullptr;
- break;
- }
- }
-
- if (Username && Len > 0) {
- Result.append(Username, Username + Len);
- return;
- }
- }
-
-// Fallback to user id.
-#ifdef LLVM_ON_UNIX
- std::string UID = llvm::utostr(getuid());
-#else
- // FIXME: Windows seems to have an 'SID' that might work.
- std::string UID = "9999";
-#endif
- Result.append(UID.begin(), UID.end());
-}
-
static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
const Driver &D, const InputInfo &Output,
const ArgList &Args,
@@ -808,8 +776,8 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
CmdArgs.push_back("-fprofile-instrument=clang");
if (TC.getTriple().isWindowsMSVCEnvironment()) {
// Add dependent lib for clang_rt.profile
- CmdArgs.push_back(Args.MakeArgString("--dependent-lib=" +
- TC.getCompilerRT(Args, "profile")));
+ CmdArgs.push_back(Args.MakeArgString(
+ "--dependent-lib=" + TC.getCompilerRTBasename(Args, "profile")));
}
}
@@ -826,8 +794,9 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
}
if (PGOGenArg) {
if (TC.getTriple().isWindowsMSVCEnvironment()) {
- CmdArgs.push_back(Args.MakeArgString("--dependent-lib=" +
- TC.getCompilerRT(Args, "profile")));
+ // Add dependent lib for clang_rt.profile
+ CmdArgs.push_back(Args.MakeArgString(
+ "--dependent-lib=" + TC.getCompilerRTBasename(Args, "profile")));
}
if (PGOGenArg->getOption().matches(
PGOGenerateArg ? options::OPT_fprofile_generate_EQ
@@ -856,11 +825,10 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
}
}
- bool EmitCovNotes = Args.hasArg(options::OPT_ftest_coverage) ||
+ bool EmitCovNotes = Args.hasFlag(options::OPT_ftest_coverage,
+ options::OPT_fno_test_coverage, false) ||
Args.hasArg(options::OPT_coverage);
- bool EmitCovData = Args.hasFlag(options::OPT_fprofile_arcs,
- options::OPT_fno_profile_arcs, false) ||
- Args.hasArg(options::OPT_coverage);
+ bool EmitCovData = TC.needsGCovInstrumentation(Args);
if (EmitCovNotes)
CmdArgs.push_back("-femit-coverage-notes");
if (EmitCovData)
@@ -1190,12 +1158,14 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_MP);
Args.AddLastArg(CmdArgs, options::OPT_MV);
- // Add offload include arguments specific for CUDA. This must happen before
- // we -I or -include anything else, because we must pick up the CUDA headers
- // from the particular CUDA installation, rather than from e.g.
- // /usr/local/include.
+ // Add offload include arguments specific for CUDA/HIP. This must happen
+ // before we -I or -include anything else, because we must pick up the
+ // CUDA/HIP headers from the particular CUDA/ROCm installation, rather than
+ // from e.g. /usr/local/include.
if (JA.isOffloading(Action::OFK_Cuda))
getToolChain().AddCudaIncludeArgs(Args, CmdArgs);
+ if (JA.isOffloading(Action::OFK_HIP))
+ getToolChain().AddHIPIncludeArgs(Args, CmdArgs);
// If we are offloading to a target via OpenMP we need to include the
// openmp_wrappers folder which contains alternative system headers.
@@ -1212,7 +1182,7 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
}
CmdArgs.push_back("-include");
- CmdArgs.push_back("__clang_openmp_math_declares.h");
+ CmdArgs.push_back("__clang_openmp_device_functions.h");
}
// Add -i* options, and automatically translate to
@@ -1227,6 +1197,7 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
if (YcArg && JA.getKind() >= Action::PrecompileJobClass &&
JA.getKind() <= Action::AssembleJobClass) {
CmdArgs.push_back(Args.MakeArgString("-building-pch-with-obj"));
+ CmdArgs.push_back(Args.MakeArgString("-fpch-instantiate-templates"));
}
if (YcArg || YuArg) {
StringRef ThroughHeader = YcArg ? YcArg->getValue() : YuArg->getValue();
@@ -1404,20 +1375,6 @@ static bool isSignedCharDefault(const llvm::Triple &Triple) {
}
}
-static bool isNoCommonDefault(const llvm::Triple &Triple) {
- switch (Triple.getArch()) {
- default:
- if (Triple.isOSFuchsia())
- return true;
- return false;
-
- case llvm::Triple::xcore:
- case llvm::Triple::wasm32:
- case llvm::Triple::wasm64:
- return true;
- }
-}
-
static bool hasMultipleInvocations(const llvm::Triple &Triple,
const ArgList &Args) {
// Supported only on Darwin where we invoke the compiler multiple times
@@ -1594,7 +1551,7 @@ void Clang::RenderTargetOptions(const llvm::Triple &EffectiveTriple,
const ToolChain &TC = getToolChain();
// Add the target features
- getTargetFeatures(TC, EffectiveTriple, Args, CmdArgs, false);
+ getTargetFeatures(TC.getDriver(), EffectiveTriple, Args, CmdArgs, false);
// Add target specific flags.
switch (TC.getArch()) {
@@ -1662,6 +1619,10 @@ void Clang::RenderTargetOptions(const llvm::Triple &EffectiveTriple,
case llvm::Triple::wasm64:
AddWebAssemblyTargetArgs(Args, CmdArgs);
break;
+
+ case llvm::Triple::ve:
+ AddVETargetArgs(Args, CmdArgs);
+ break;
}
}
@@ -1970,6 +1931,36 @@ void Clang::AddPPCTargetArgs(const ArgList &Args,
}
}
+static void SetRISCVSmallDataLimit(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ const Driver &D = TC.getDriver();
+ const llvm::Triple &Triple = TC.getTriple();
+ // Default small data limitation is eight.
+ const char *SmallDataLimit = "8";
+ // Get small data limitation.
+ if (Args.getLastArg(options::OPT_shared, options::OPT_fpic,
+ options::OPT_fPIC)) {
+ // Not support linker relaxation for PIC.
+ SmallDataLimit = "0";
+ if (Args.hasArg(options::OPT_G)) {
+ D.Diag(diag::warn_drv_unsupported_sdata);
+ }
+ } else if (Args.getLastArgValue(options::OPT_mcmodel_EQ)
+ .equals_lower("large") &&
+ (Triple.getArch() == llvm::Triple::riscv64)) {
+ // Not support linker relaxation for RV64 with large code model.
+ SmallDataLimit = "0";
+ if (Args.hasArg(options::OPT_G)) {
+ D.Diag(diag::warn_drv_unsupported_sdata);
+ }
+ } else if (Arg *A = Args.getLastArg(options::OPT_G)) {
+ SmallDataLimit = A->getValue();
+ }
+ // Forward the -msmall-data-limit= option.
+ CmdArgs.push_back("-msmall-data-limit");
+ CmdArgs.push_back(SmallDataLimit);
+}
+
void Clang::AddRISCVTargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
const llvm::Triple &Triple = getToolChain().getTriple();
@@ -1977,6 +1968,8 @@ void Clang::AddRISCVTargetArgs(const ArgList &Args,
CmdArgs.push_back("-target-abi");
CmdArgs.push_back(ABIName.data());
+
+ SetRISCVSmallDataLimit(getToolChain(), Args, CmdArgs);
}
void Clang::AddSparcTargetArgs(const ArgList &Args,
@@ -2003,70 +1996,30 @@ void Clang::AddSystemZTargetArgs(const ArgList &Args,
options::OPT_mno_backchain, false);
bool HasPackedStack = Args.hasFlag(options::OPT_mpacked_stack,
options::OPT_mno_packed_stack, false);
- if (HasBackchain && HasPackedStack) {
+ systemz::FloatABI FloatABI =
+ systemz::getSystemZFloatABI(getToolChain().getDriver(), Args);
+ bool HasSoftFloat = (FloatABI == systemz::FloatABI::Soft);
+ if (HasBackchain && HasPackedStack && !HasSoftFloat) {
const Driver &D = getToolChain().getDriver();
D.Diag(diag::err_drv_unsupported_opt)
- << Args.getLastArg(options::OPT_mpacked_stack)->getAsString(Args) +
- " " + Args.getLastArg(options::OPT_mbackchain)->getAsString(Args);
+ << "-mpacked-stack -mbackchain -mhard-float";
}
if (HasBackchain)
CmdArgs.push_back("-mbackchain");
if (HasPackedStack)
CmdArgs.push_back("-mpacked-stack");
-}
-
-static void addX86AlignBranchArgs(const Driver &D, const ArgList &Args,
- ArgStringList &CmdArgs) {
- if (Args.hasArg(options::OPT_mbranches_within_32B_boundaries)) {
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-x86-branches-within-32B-boundaries");
- }
- if (const Arg *A = Args.getLastArg(options::OPT_malign_branch_boundary_EQ)) {
- StringRef Value = A->getValue();
- unsigned Boundary;
- if (Value.getAsInteger(10, Boundary) || Boundary < 16 ||
- !llvm::isPowerOf2_64(Boundary)) {
- D.Diag(diag::err_drv_invalid_argument_to_option)
- << Value << A->getOption().getName();
- } else {
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back(
- Args.MakeArgString("-x86-align-branch-boundary=" + Twine(Boundary)));
- }
- }
- if (const Arg *A = Args.getLastArg(options::OPT_malign_branch_EQ)) {
- std::string AlignBranch;
- for (StringRef T : A->getValues()) {
- if (T != "fused" && T != "jcc" && T != "jmp" && T != "call" &&
- T != "ret" && T != "indirect")
- D.Diag(diag::err_drv_invalid_malign_branch_EQ)
- << T << "fused, jcc, jmp, call, ret, indirect";
- if (!AlignBranch.empty())
- AlignBranch += '+';
- AlignBranch += T;
- }
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back(Args.MakeArgString("-x86-align-branch=" + AlignBranch));
- }
- if (const Arg *A =
- Args.getLastArg(options::OPT_malign_branch_prefix_size_EQ)) {
- StringRef Value = A->getValue();
- unsigned PrefixSize;
- if (Value.getAsInteger(10, PrefixSize) || PrefixSize > 5) {
- D.Diag(diag::err_drv_invalid_argument_to_option)
- << Value << A->getOption().getName();
- } else {
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back(Args.MakeArgString("-x86-align-branch-prefix-size=" +
- Twine(PrefixSize)));
- }
+ if (HasSoftFloat) {
+ // Floating point operations and argument passing are soft.
+ CmdArgs.push_back("-msoft-float");
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("soft");
}
}
void Clang::AddX86TargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
const Driver &D = getToolChain().getDriver();
- addX86AlignBranchArgs(D, Args, CmdArgs);
+ addX86AlignBranchArgs(D, Args, CmdArgs, /*IsLTO=*/false);
if (!Args.hasFlag(options::OPT_mred_zone, options::OPT_mno_red_zone, true) ||
Args.hasArg(options::OPT_mkernel) ||
@@ -2167,6 +2120,12 @@ void Clang::AddWebAssemblyTargetArgs(const ArgList &Args,
}
}
+void Clang::AddVETargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const {
+ // Floating point operations and argument passing are hard.
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("hard");
+}
+
void Clang::DumpCompilationDatabase(Compilation &C, StringRef Filename,
StringRef Target, const InputInfo &Output,
const InputInfo &Input, const ArgList &Args) const {
@@ -2421,7 +2380,7 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
CmdArgs.push_back(Value.data());
} else {
RenderDebugEnablingArgs(Args, CmdArgs,
- codegenoptions::LimitedDebugInfo,
+ codegenoptions::DebugInfoConstructor,
DwarfVersion, llvm::DebuggerKind::Default);
}
} else if (Value.startswith("-mcpu") || Value.startswith("-mfpu") ||
@@ -2480,7 +2439,8 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
bool OFastEnabled, const ArgList &Args,
- ArgStringList &CmdArgs) {
+ ArgStringList &CmdArgs,
+ const JobAction &JA) {
// Handle various floating point optimization flags, mapping them to the
// appropriate LLVM code generation flags. This is complicated by several
// "umbrella" flags, so we do this by stepping through the flags incrementally
@@ -2502,10 +2462,17 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
StringRef FPModel = "";
// -ffp-exception-behavior options: strict, maytrap, ignore
StringRef FPExceptionBehavior = "";
- StringRef DenormalFPMath = "";
+ const llvm::DenormalMode DefaultDenormalFPMath =
+ TC.getDefaultDenormalModeForType(Args, JA);
+ const llvm::DenormalMode DefaultDenormalFP32Math =
+ TC.getDefaultDenormalModeForType(Args, JA, &llvm::APFloat::IEEEsingle());
+
+ llvm::DenormalMode DenormalFPMath = DefaultDenormalFPMath;
+ llvm::DenormalMode DenormalFP32Math = DefaultDenormalFP32Math;
StringRef FPContract = "";
bool StrictFPModel = false;
+
if (const Arg *A = Args.getLastArg(options::OPT_flimited_precision_EQ)) {
CmdArgs.push_back("-mlimit-float-precision");
CmdArgs.push_back(A->getValue());
@@ -2527,8 +2494,13 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
ReciprocalMath = false;
SignedZeros = true;
// -fno_fast_math restores default denormal and fpcontract handling
- DenormalFPMath = "";
FPContract = "";
+ DenormalFPMath = llvm::DenormalMode::getIEEE();
+
+ // FIXME: The target may have picked a non-IEEE default mode here based on
+ // -cl-denorms-are-zero. Should the target consider -fp-model interaction?
+ DenormalFP32Math = llvm::DenormalMode::getIEEE();
+
StringRef Val = A->getValue();
if (OFastEnabled && !Val.equals("fast")) {
// Only -ffp-model=fast is compatible with OFast, ignore.
@@ -2562,6 +2534,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
optID = options::OPT_frounding_math;
FPExceptionBehavior = "strict";
FPModel = Val;
+ FPContract = "off";
TrappingMath = true;
} else
D.Diag(diag::err_drv_unsupported_option_argument)
@@ -2621,7 +2594,19 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
break;
case options::OPT_fdenormal_fp_math_EQ:
- DenormalFPMath = A->getValue();
+ DenormalFPMath = llvm::parseDenormalFPAttribute(A->getValue());
+ if (!DenormalFPMath.isValid()) {
+ D.Diag(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ }
+ break;
+
+ case options::OPT_fdenormal_fp_math_f32_EQ:
+ DenormalFP32Math = llvm::parseDenormalFPAttribute(A->getValue());
+ if (!DenormalFP32Math.isValid()) {
+ D.Diag(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ }
break;
// Validate and pass through -ffp-contract option.
@@ -2690,8 +2675,10 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
SignedZeros = true;
TrappingMath = true;
FPExceptionBehavior = "strict";
- // -fno_unsafe_math_optimizations restores default denormal handling
- DenormalFPMath = "";
+
+ // The target may have opted to flush by default, so force IEEE.
+ DenormalFPMath = llvm::DenormalMode::getIEEE();
+ DenormalFP32Math = llvm::DenormalMode::getIEEE();
break;
case options::OPT_Ofast:
@@ -2724,7 +2711,8 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
TrappingMath = false;
RoundingFPMath = false;
// -fno_fast_math restores default denormal and fpcontract handling
- DenormalFPMath = "";
+ DenormalFPMath = DefaultDenormalFPMath;
+ DenormalFP32Math = llvm::DenormalMode::getIEEE();
FPContract = "";
break;
}
@@ -2734,7 +2722,9 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
if (HonorINFs && HonorNaNs &&
!AssociativeMath && !ReciprocalMath &&
SignedZeros && TrappingMath && RoundingFPMath &&
- DenormalFPMath.empty() && FPContract.empty())
+ (FPContract.equals("off") || FPContract.empty()) &&
+ DenormalFPMath == llvm::DenormalMode::getIEEE() &&
+ DenormalFP32Math == llvm::DenormalMode::getIEEE())
// OK: Current Arg doesn't conflict with -ffp-model=strict
;
else {
@@ -2780,9 +2770,21 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
} else if (TrappingMathPresent)
CmdArgs.push_back("-fno-trapping-math");
- if (!DenormalFPMath.empty())
- CmdArgs.push_back(
- Args.MakeArgString("-fdenormal-fp-math=" + DenormalFPMath));
+ // The default is IEEE.
+ if (DenormalFPMath != llvm::DenormalMode::getIEEE()) {
+ llvm::SmallString<64> DenormFlag;
+ llvm::raw_svector_ostream ArgStr(DenormFlag);
+ ArgStr << "-fdenormal-fp-math=" << DenormalFPMath;
+ CmdArgs.push_back(Args.MakeArgString(ArgStr.str()));
+ }
+
+ // Add f32 specific denormal mode flag if it's different.
+ if (DenormalFP32Math != DenormalFPMath) {
+ llvm::SmallString<64> DenormFlag;
+ llvm::raw_svector_ostream ArgStr(DenormFlag);
+ ArgStr << "-fdenormal-fp-math-f32=" << DenormalFP32Math;
+ CmdArgs.push_back(Args.MakeArgString(ArgStr.str()));
+ }
if (!FPContract.empty())
CmdArgs.push_back(Args.MakeArgString("-ffp-contract=" + FPContract));
@@ -2957,6 +2959,22 @@ static void RenderSSPOptions(const ToolChain &TC, const ArgList &Args,
}
}
+static void RenderSCPOptions(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ const llvm::Triple &EffectiveTriple = TC.getEffectiveTriple();
+
+ if (!EffectiveTriple.isOSLinux())
+ return;
+
+ if (!EffectiveTriple.isX86() && !EffectiveTriple.isSystemZ() &&
+ !EffectiveTriple.isPPC64())
+ return;
+
+ if (Args.hasFlag(options::OPT_fstack_clash_protection,
+ options::OPT_fnostack_clash_protection, false))
+ CmdArgs.push_back("-fstack-clash-protection");
+}
+
static void RenderTrivialAutoVarInitOptions(const Driver &D,
const ToolChain &TC,
const ArgList &Args,
@@ -2999,9 +3017,26 @@ static void RenderTrivialAutoVarInitOptions(const Driver &D,
CmdArgs.push_back(
Args.MakeArgString("-ftrivial-auto-var-init=" + TrivialAutoVarInit));
}
+
+ if (Arg *A =
+ Args.getLastArg(options::OPT_ftrivial_auto_var_init_stop_after)) {
+ if (!Args.hasArg(options::OPT_ftrivial_auto_var_init) ||
+ StringRef(
+ Args.getLastArg(options::OPT_ftrivial_auto_var_init)->getValue()) ==
+ "uninitialized")
+ D.Diag(diag::err_drv_trivial_auto_var_init_stop_after_missing_dependency);
+ A->claim();
+ StringRef Val = A->getValue();
+ if (std::stoi(Val.str()) <= 0)
+ D.Diag(diag::err_drv_trivial_auto_var_init_stop_after_invalid_value);
+ CmdArgs.push_back(
+ Args.MakeArgString("-ftrivial-auto-var-init-stop-after=" + Val));
+ }
}
static void RenderOpenCLOptions(const ArgList &Args, ArgStringList &CmdArgs) {
+ // cl-denorms-are-zero is not forwarded. It is translated into a generic flag
+ // for denormal flushing handling based on the target.
const unsigned ForwardedArguments[] = {
options::OPT_cl_opt_disable,
options::OPT_cl_strict_aliasing,
@@ -3012,7 +3047,6 @@ static void RenderOpenCLOptions(const ArgList &Args, ArgStringList &CmdArgs) {
options::OPT_cl_fast_relaxed_math,
options::OPT_cl_mad_enable,
options::OPT_cl_no_signed_zeros,
- options::OPT_cl_denorms_are_zero,
options::OPT_cl_fp32_correctly_rounded_divide_sqrt,
options::OPT_cl_uniform_work_group_size
};
@@ -3136,11 +3170,13 @@ static void RenderBuiltinOptions(const ToolChain &TC, const llvm::Triple &T,
CmdArgs.push_back("-fno-math-builtin");
}
-void Driver::getDefaultModuleCachePath(SmallVectorImpl<char> &Result) {
- llvm::sys::path::system_temp_directory(/*erasedOnReboot=*/false, Result);
- llvm::sys::path::append(Result, "org.llvm.clang.");
- appendUserToPath(Result);
- llvm::sys::path::append(Result, "ModuleCache");
+bool Driver::getDefaultModuleCachePath(SmallVectorImpl<char> &Result) {
+ if (llvm::sys::path::cache_directory(Result)) {
+ llvm::sys::path::append(Result, "clang");
+ llvm::sys::path::append(Result, "ModuleCache");
+ return true;
+ }
+ return false;
}
static void RenderModulesOptions(Compilation &C, const Driver &D,
@@ -3197,6 +3233,7 @@ static void RenderModulesOptions(Compilation &C, const Driver &D,
if (Arg *A = Args.getLastArg(options::OPT_fmodules_cache_path))
Path = A->getValue();
+ bool HasPath = true;
if (C.isForDiagnostics()) {
// When generating crash reports, we want to emit the modules along with
// the reproduction sources, so we ignore any provided module path.
@@ -3205,12 +3242,16 @@ static void RenderModulesOptions(Compilation &C, const Driver &D,
llvm::sys::path::append(Path, "modules");
} else if (Path.empty()) {
// No module path was provided: use the default.
- Driver::getDefaultModuleCachePath(Path);
+ HasPath = Driver::getDefaultModuleCachePath(Path);
}
- const char Arg[] = "-fmodules-cache-path=";
- Path.insert(Path.begin(), Arg, Arg + strlen(Arg));
- CmdArgs.push_back(Args.MakeArgString(Path));
+ // `HasPath` will only be false if getDefaultModuleCachePath() fails.
+ // That being said, that failure is unlikely and not caching is harmless.
+ if (HasPath) {
+ const char Arg[] = "-fmodules-cache-path=";
+ Path.insert(Path.begin(), Arg, Arg + strlen(Arg));
+ CmdArgs.push_back(Args.MakeArgString(Path));
+ }
}
if (HaveModules) {
@@ -3469,9 +3510,9 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
CmdArgs.push_back("-fno-diagnostics-fixit-info");
// Enable -fdiagnostics-show-option by default.
- if (Args.hasFlag(options::OPT_fdiagnostics_show_option,
- options::OPT_fno_diagnostics_show_option))
- CmdArgs.push_back("-fdiagnostics-show-option");
+ if (!Args.hasFlag(options::OPT_fdiagnostics_show_option,
+ options::OPT_fno_diagnostics_show_option, true))
+ CmdArgs.push_back("-fno-diagnostics-show-option");
if (const Arg *A =
Args.getLastArg(options::OPT_fdiagnostics_show_category_EQ)) {
@@ -3574,8 +3615,7 @@ static DwarfFissionKind getDebugFissionKind(const Driver &D,
static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
const llvm::Triple &T, const ArgList &Args,
- bool EmitCodeView, bool IsWindowsMSVC,
- ArgStringList &CmdArgs,
+ bool EmitCodeView, ArgStringList &CmdArgs,
codegenoptions::DebugInfoKind &DebugInfoKind,
DwarfFissionKind &DwarfFission) {
if (Args.hasFlag(options::OPT_fdebug_info_for_profiling,
@@ -3613,7 +3653,7 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
if (const Arg *A =
Args.getLastArg(options::OPT_g_Group, options::OPT_gsplit_dwarf,
options::OPT_gsplit_dwarf_EQ)) {
- DebugInfoKind = codegenoptions::LimitedDebugInfo;
+ DebugInfoKind = codegenoptions::DebugInfoConstructor;
// If the last option explicitly specified a debug-info level, use it.
if (checkDebugInfoOption(A, Args, D, TC) &&
@@ -3708,10 +3748,9 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
// not to include any column info.
if (const Arg *A = Args.getLastArg(options::OPT_gcolumn_info))
(void)checkDebugInfoOption(A, Args, D, TC);
- if (Args.hasFlag(options::OPT_gcolumn_info, options::OPT_gno_column_info,
- /*Default=*/!EmitCodeView &&
- DebuggerTuning != llvm::DebuggerKind::SCE))
- CmdArgs.push_back("-dwarf-column-info");
+ if (!Args.hasFlag(options::OPT_gcolumn_info, options::OPT_gno_column_info,
+ !EmitCodeView && DebuggerTuning != llvm::DebuggerKind::SCE))
+ CmdArgs.push_back("-gno-column-info");
// FIXME: Move backend command line options to the module.
// If -gline-tables-only or -gline-directives-only is the last option it wins.
@@ -3719,7 +3758,7 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
if (checkDebugInfoOption(A, Args, D, TC)) {
if (DebugInfoKind != codegenoptions::DebugLineTablesOnly &&
DebugInfoKind != codegenoptions::DebugDirectivesOnly) {
- DebugInfoKind = codegenoptions::LimitedDebugInfo;
+ DebugInfoKind = codegenoptions::DebugInfoConstructor;
CmdArgs.push_back("-dwarf-ext-refs");
CmdArgs.push_back("-fmodule-format=obj");
}
@@ -3739,7 +3778,9 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
TC.GetDefaultStandaloneDebug());
if (const Arg *A = Args.getLastArg(options::OPT_fstandalone_debug))
(void)checkDebugInfoOption(A, Args, D, TC);
- if (DebugInfoKind == codegenoptions::LimitedDebugInfo && NeedFullDebug)
+ if ((DebugInfoKind == codegenoptions::LimitedDebugInfo ||
+ DebugInfoKind == codegenoptions::DebugInfoConstructor) &&
+ NeedFullDebug)
DebugInfoKind = codegenoptions::FullDebugInfo;
if (Args.hasFlag(options::OPT_gembed_source, options::OPT_gno_embed_source,
@@ -3912,7 +3953,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- const llvm::Triple *AuxTriple = IsCuda ? TC.getAuxTriple() : nullptr;
+ const llvm::Triple *AuxTriple =
+ (IsCuda || IsHIP) ? TC.getAuxTriple() : nullptr;
bool IsWindowsMSVC = RawTriple.isWindowsMSVCEnvironment();
bool IsIAMCU = RawTriple.isOSIAMCU();
@@ -3977,6 +4019,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(NormalizedTriple));
}
+ if (Args.hasFlag(options::OPT_fsycl, options::OPT_fno_sycl, false)) {
+ CmdArgs.push_back("-fsycl");
+ CmdArgs.push_back("-fsycl-is-device");
+
+ if (Arg *A = Args.getLastArg(options::OPT_sycl_std_EQ)) {
+ A->render(Args, CmdArgs);
+ } else {
+ // Ensure the default version in SYCL mode is 1.2.1 (aka 2017)
+ CmdArgs.push_back("-sycl-std=2017");
+ }
+ }
+
if (IsOpenMPDevice) {
// We have to pass the triple of the host if compiling for an OpenMP device.
std::string NormalizedTriple =
@@ -3990,9 +4044,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Triple.isOSWindows() && (Triple.getArch() == llvm::Triple::arm ||
Triple.getArch() == llvm::Triple::thumb)) {
unsigned Offset = Triple.getArch() == llvm::Triple::arm ? 4 : 6;
- unsigned Version;
- Triple.getArchName().substr(Offset).getAsInteger(10, Version);
- if (Version < 7)
+ unsigned Version = 0;
+ bool Failure =
+ Triple.getArchName().substr(Offset).consumeInteger(10, Version);
+ if (Failure || Version < 7)
D.Diag(diag::err_target_unsupported_arch) << Triple.getArchName()
<< TripleStr;
}
@@ -4083,7 +4138,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
StringRef ArgStr =
Args.hasArg(options::OPT_interface_stub_version_EQ)
? Args.getLastArgValue(options::OPT_interface_stub_version_EQ)
- : "experimental-ifs-v1";
+ : "experimental-ifs-v2";
CmdArgs.push_back("-emit-interface-stubs");
CmdArgs.push_back(
Args.MakeArgString(Twine("-interface-stub-version=") + ArgStr.str()));
@@ -4153,8 +4208,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_function_sections,
options::OPT_fdata_sections,
options::OPT_fno_data_sections,
+ options::OPT_fbasic_block_sections_EQ,
+ options::OPT_funique_internal_linkage_names,
+ options::OPT_fno_unique_internal_linkage_names,
options::OPT_funique_section_names,
options::OPT_fno_unique_section_names,
+ options::OPT_funique_basic_block_section_names,
+ options::OPT_fno_unique_basic_block_section_names,
options::OPT_mrestrict_it,
options::OPT_mno_restrict_it,
options::OPT_mstackrealign,
@@ -4195,7 +4255,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mdisable-tail-calls");
RenderFloatingPointOptions(TC, D, isOptimizationLevelFast(Args), Args,
- CmdArgs);
+ CmdArgs, JA);
// Render ABI arguments
switch (TC.getArch()) {
@@ -4240,8 +4300,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
II.getInputArg().renderAsInput(Args, CmdArgs);
}
- C.addCommand(std::make_unique<Command>(JA, *this, D.getClangProgramPath(),
- CmdArgs, Inputs));
+ C.addCommand(
+ std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileUTF8(),
+ D.getClangProgramPath(), CmdArgs, Inputs));
return;
}
@@ -4329,14 +4390,24 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
bool IsPIE;
std::tie(RelocationModel, PICLevel, IsPIE) = ParsePICArgs(TC, Args);
- const char *RMName = RelocationModelName(RelocationModel);
+ bool IsROPI = RelocationModel == llvm::Reloc::ROPI ||
+ RelocationModel == llvm::Reloc::ROPI_RWPI;
+ bool IsRWPI = RelocationModel == llvm::Reloc::RWPI ||
+ RelocationModel == llvm::Reloc::ROPI_RWPI;
+
+ if (Args.hasArg(options::OPT_mcmse) &&
+ !Args.hasArg(options::OPT_fallow_unsupported)) {
+ if (IsROPI)
+ D.Diag(diag::err_cmse_pi_are_incompatible) << IsROPI;
+ if (IsRWPI)
+ D.Diag(diag::err_cmse_pi_are_incompatible) << !IsRWPI;
+ }
- if ((RelocationModel == llvm::Reloc::ROPI ||
- RelocationModel == llvm::Reloc::ROPI_RWPI) &&
- types::isCXX(Input.getType()) &&
+ if (IsROPI && types::isCXX(Input.getType()) &&
!Args.hasArg(options::OPT_fallow_unsupported))
D.Diag(diag::err_drv_ropi_incompatible_with_cxx);
+ const char *RMName = RelocationModelName(RelocationModel);
if (RMName) {
CmdArgs.push_back("-mrelocation-model");
CmdArgs.push_back(RMName);
@@ -4360,15 +4431,27 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(A->getValue());
}
- CmdArgs.push_back("-mthread-model");
- if (Arg *A = Args.getLastArg(options::OPT_mthread_model)) {
- if (!TC.isThreadModelSupported(A->getValue()))
- D.Diag(diag::err_drv_invalid_thread_model_for_target)
- << A->getValue() << A->getAsString(Args);
- CmdArgs.push_back(A->getValue());
+ // The default is -fno-semantic-interposition. We render it just because we
+ // require explicit -fno-semantic-interposition to infer dso_local.
+ if (Arg *A = Args.getLastArg(options::OPT_fsemantic_interposition,
+ options::OPT_fno_semantic_interposition))
+ if (RelocationModel != llvm::Reloc::Static && !IsPIE)
+ A->render(Args, CmdArgs);
+
+ {
+ std::string Model;
+ if (Arg *A = Args.getLastArg(options::OPT_mthread_model)) {
+ if (!TC.isThreadModelSupported(A->getValue()))
+ D.Diag(diag::err_drv_invalid_thread_model_for_target)
+ << A->getValue() << A->getAsString(Args);
+ Model = A->getValue();
+ } else
+ Model = TC.getThreadModel();
+ if (Model != "posix") {
+ CmdArgs.push_back("-mthread-model");
+ CmdArgs.push_back(Args.MakeArgString(Model));
+ }
}
- else
- CmdArgs.push_back(Args.MakeArgString(TC.getThreadModel()));
Args.AddLastArg(CmdArgs, options::OPT_fveclib);
@@ -4450,6 +4533,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasFlag(options::OPT_mrtd, options::OPT_mno_rtd, false))
CmdArgs.push_back("-fdefault-calling-conv=stdcall");
+ if (Args.hasArg(options::OPT_fenable_matrix)) {
+ // enable-matrix is needed by both the LangOpts and by LLVM.
+ CmdArgs.push_back("-fenable-matrix");
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-enable-matrix");
+ }
+
CodeGenOptions::FramePointerKind FPKeepKind =
getFramePointerKind(Args, RawTriple);
const char *FPKeepKindStr = nullptr;
@@ -4468,8 +4558,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(FPKeepKindStr);
if (!Args.hasFlag(options::OPT_fzero_initialized_in_bss,
- options::OPT_fno_zero_initialized_in_bss))
- CmdArgs.push_back("-mno-zero-initialized-in-bss");
+ options::OPT_fno_zero_initialized_in_bss, true))
+ CmdArgs.push_back("-fno-zero-initialized-in-bss");
bool OFastEnabled = isOptimizationLevelFast(Args);
// If -Ofast is the optimization level, then -fstrict-aliasing should be
@@ -4516,7 +4606,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_fsplit_stack))
CmdArgs.push_back("-split-stacks");
- RenderFloatingPointOptions(TC, D, OFastEnabled, Args, CmdArgs);
+ RenderFloatingPointOptions(TC, D, OFastEnabled, Args, CmdArgs, JA);
+
+ if (Arg *A = Args.getLastArg(options::OPT_mdouble_EQ)) {
+ if (TC.getArch() == llvm::Triple::avr)
+ A->render(Args, CmdArgs);
+ else
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TripleStr;
+ }
if (Arg *A = Args.getLastArg(options::OPT_LongDouble_Group)) {
if (TC.getTriple().isX86())
@@ -4532,9 +4630,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Decide whether to use verbose asm. Verbose assembly is the default on
// toolchains which have the integrated assembler on by default.
bool IsIntegratedAssemblerDefault = TC.IsIntegratedAssemblerDefault();
- if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm,
- IsIntegratedAssemblerDefault))
- CmdArgs.push_back("-masm-verbose");
+ if (!Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm,
+ IsIntegratedAssemblerDefault))
+ CmdArgs.push_back("-fno-verbose-asm");
if (!TC.useIntegratedAs())
CmdArgs.push_back("-no-integrated-as");
@@ -4550,8 +4648,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Enable -mconstructor-aliases except on darwin, where we have to work around
// a linker bug (see <rdar://problem/7651567>), and CUDA device code, where
- // aliases aren't supported.
- if (!RawTriple.isOSDarwin() && !RawTriple.isNVPTX())
+ // aliases aren't supported. Similarly, aliases aren't yet supported for AIX.
+ if (!RawTriple.isOSDarwin() && !RawTriple.isNVPTX() && !RawTriple.isOSAIX())
CmdArgs.push_back("-mconstructor-aliases");
// Darwin's kernel doesn't support guard variables; just die if we
@@ -4560,7 +4658,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fforbid-guard-variables");
if (Args.hasFlag(options::OPT_mms_bitfields, options::OPT_mno_ms_bitfields,
- false)) {
+ Triple.isWindowsGNUEnvironment())) {
CmdArgs.push_back("-mms-bitfields");
}
@@ -4596,14 +4694,36 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
AsynchronousUnwindTables))
CmdArgs.push_back("-munwind-tables");
+ // Prepare `-aux-target-cpu` and `-aux-target-feature` unless
+ // `--gpu-use-aux-triple-only` is specified.
+ if (!Args.getLastArg(options::OPT_gpu_use_aux_triple_only) &&
+ ((IsCuda && JA.isDeviceOffloading(Action::OFK_Cuda)) ||
+ (IsHIP && JA.isDeviceOffloading(Action::OFK_HIP)))) {
+ const ArgList &HostArgs =
+ C.getArgsForToolChain(nullptr, StringRef(), Action::OFK_None);
+ std::string HostCPU =
+ getCPUName(HostArgs, *TC.getAuxTriple(), /*FromAs*/ false);
+ if (!HostCPU.empty()) {
+ CmdArgs.push_back("-aux-target-cpu");
+ CmdArgs.push_back(Args.MakeArgString(HostCPU));
+ }
+ getTargetFeatures(D, *TC.getAuxTriple(), HostArgs, CmdArgs,
+ /*ForAS*/ false, /*IsAux*/ true);
+ }
+
TC.addClangTargetOptions(Args, CmdArgs, JA.getOffloadingDeviceKind());
// FIXME: Handle -mtune=.
(void)Args.hasArg(options::OPT_mtune_EQ);
if (Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
- CmdArgs.push_back("-mcode-model");
- CmdArgs.push_back(A->getValue());
+ StringRef CM = A->getValue();
+ if (CM == "small" || CM == "kernel" || CM == "medium" || CM == "large" ||
+ CM == "tiny")
+ A->render(Args, CmdArgs);
+ else
+ D.Diag(diag::err_drv_invalid_argument_to_option)
+ << CM << A->getOption().getName();
}
if (Arg *A = Args.getLastArg(options::OPT_mtls_size_EQ)) {
@@ -4638,8 +4758,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
AddClangCLArgs(Args, InputType, CmdArgs, &DebugInfoKind, &EmitCodeView);
DwarfFissionKind DwarfFission;
- RenderDebugOptions(TC, D, RawTriple, Args, EmitCodeView, IsWindowsMSVC,
- CmdArgs, DebugInfoKind, DwarfFission);
+ RenderDebugOptions(TC, D, RawTriple, Args, EmitCodeView, CmdArgs,
+ DebugInfoKind, DwarfFission);
// Add the split debug info name to the command lines here so we
// can propagate it to the backend.
@@ -4685,11 +4805,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.AddAllArgs(CmdArgs, options::OPT_v);
- Args.AddLastArg(CmdArgs, options::OPT_H);
+
+ if (Args.getLastArg(options::OPT_H)) {
+ CmdArgs.push_back("-H");
+ CmdArgs.push_back("-sys-header-deps");
+ }
+
if (D.CCPrintHeaders && !D.CCGenDiagnostics) {
CmdArgs.push_back("-header-include-file");
CmdArgs.push_back(D.CCPrintHeadersFilename ? D.CCPrintHeadersFilename
: "-");
+ CmdArgs.push_back("-sys-header-deps");
}
Args.AddLastArg(CmdArgs, options::OPT_P);
Args.AddLastArg(CmdArgs, options::OPT_print_ivar_layout);
@@ -4712,6 +4838,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-ffunction-sections");
}
+ if (Arg *A = Args.getLastArg(options::OPT_fbasic_block_sections_EQ)) {
+ StringRef Val = A->getValue();
+ if (Val != "all" && Val != "labels" && Val != "none" &&
+ !(Val.startswith("list=") && llvm::sys::fs::exists(Val.substr(5))))
+ D.Diag(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ else
+ A->render(Args, CmdArgs);
+ }
+
if (Args.hasFlag(options::OPT_fdata_sections, options::OPT_fno_data_sections,
UseSeparateSections)) {
CmdArgs.push_back("-fdata-sections");
@@ -4721,14 +4857,22 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_unique_section_names, true))
CmdArgs.push_back("-fno-unique-section-names");
+ if (Args.hasFlag(options::OPT_funique_internal_linkage_names,
+ options::OPT_fno_unique_internal_linkage_names, false))
+ CmdArgs.push_back("-funique-internal-linkage-names");
+
+ if (Args.hasFlag(options::OPT_funique_basic_block_section_names,
+ options::OPT_fno_unique_basic_block_section_names, false))
+ CmdArgs.push_back("-funique-basic-block-section-names");
+
Args.AddLastArg(CmdArgs, options::OPT_finstrument_functions,
options::OPT_finstrument_functions_after_inlining,
options::OPT_finstrument_function_entry_bare);
- // NVPTX doesn't support PGO or coverage. There's no runtime support for
- // sampling, overhead of call arc collection is way too high and there's no
- // way to collect the output.
- if (!Triple.isNVPTX())
+ // NVPTX/AMDGCN doesn't support PGO or coverage. There's no runtime support
+ // for sampling, overhead of call arc collection is way too high and there's
+ // no way to collect the output.
+ if (!Triple.isNVPTX() && !Triple.isAMDGCN())
addPGOAndCoverageFlags(TC, C, D, Output, Args, CmdArgs);
Args.AddLastArg(CmdArgs, options::OPT_fclang_abi_compat_EQ);
@@ -4847,6 +4991,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_ftrigraphs,
options::OPT_fno_trigraphs);
+
+ // HIP headers has minimum C++ standard requirements. Therefore set the
+ // default language standard.
+ if (IsHIP)
+ CmdArgs.push_back(IsWindowsMSVC ? "-std=c++14" : "-std=c++11");
}
// GCC's behavior for -Wwrite-strings is a bit strange:
@@ -4990,15 +5139,20 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
// Pass -fmessage-length=.
- CmdArgs.push_back("-fmessage-length");
+ unsigned MessageLength = 0;
if (Arg *A = Args.getLastArg(options::OPT_fmessage_length_EQ)) {
- CmdArgs.push_back(A->getValue());
+ StringRef V(A->getValue());
+ if (V.getAsInteger(0, MessageLength))
+ D.Diag(diag::err_drv_invalid_argument_to_option)
+ << V << A->getOption().getName();
} else {
// If -fmessage-length=N was not specified, determine whether this is a
// terminal and, if so, implicitly define -fmessage-length appropriately.
- unsigned N = llvm::sys::Process::StandardErrColumns();
- CmdArgs.push_back(Args.MakeArgString(Twine(N)));
+ MessageLength = llvm::sys::Process::StandardErrColumns();
}
+ if (MessageLength != 0)
+ CmdArgs.push_back(
+ Args.MakeArgString("-fmessage-length=" + Twine(MessageLength)));
// -fvisibility= and -fvisibility-ms-compat are of a piece.
if (const Arg *A = Args.getLastArg(options::OPT_fvisibility_EQ,
@@ -5027,7 +5181,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fno_operator_names);
Args.AddLastArg(CmdArgs, options::OPT_femulated_tls,
options::OPT_fno_emulated_tls);
- Args.AddLastArg(CmdArgs, options::OPT_fkeep_static_consts);
// AltiVec-like language extensions aren't relevant for assembling.
if (!isa<PreprocessJobAction>(JA) || Output.getType() != types::TY_PP_Asm)
@@ -5073,6 +5226,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_openmp_cuda_mode, /*Default=*/false))
CmdArgs.push_back("-fopenmp-cuda-mode");
+ // When in OpenMP offloading mode with NVPTX target, forward
+ // cuda-parallel-target-regions flag
+ if (Args.hasFlag(options::OPT_fopenmp_cuda_parallel_target_regions,
+ options::OPT_fno_openmp_cuda_parallel_target_regions,
+ /*Default=*/true))
+ CmdArgs.push_back("-fopenmp-cuda-parallel-target-regions");
+
// When in OpenMP offloading mode with NVPTX target, check if full runtime
// is required.
if (Args.hasFlag(options::OPT_fopenmp_cuda_force_full_runtime,
@@ -5193,11 +5353,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_pthread);
- if (Args.hasFlag(options::OPT_mspeculative_load_hardening, options::OPT_mno_speculative_load_hardening,
- false))
+ if (Args.hasFlag(options::OPT_mspeculative_load_hardening,
+ options::OPT_mno_speculative_load_hardening, false))
CmdArgs.push_back(Args.MakeArgString("-mspeculative-load-hardening"));
RenderSSPOptions(TC, Args, CmdArgs, KernelOrKext);
+ RenderSCPOptions(TC, Args, CmdArgs);
RenderTrivialAutoVarInitOptions(D, TC, Args, CmdArgs);
// Translate -mstackrealign
@@ -5243,8 +5404,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Forward -cl options to -cc1
RenderOpenCLOptions(Args, CmdArgs);
- if (Args.hasFlag(options::OPT_fhip_new_launch_api,
- options::OPT_fno_hip_new_launch_api, false))
+ if (IsHIP && Args.hasFlag(options::OPT_fhip_new_launch_api,
+ options::OPT_fno_hip_new_launch_api, true))
CmdArgs.push_back("-fhip-new-launch-api");
if (Arg *A = Args.getLastArg(options::OPT_fcf_protection_EQ)) {
@@ -5328,7 +5489,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fuse-cxa-atexit is default.
if (!Args.hasFlag(
options::OPT_fuse_cxa_atexit, options::OPT_fno_use_cxa_atexit,
- !RawTriple.isOSWindows() &&
+ !RawTriple.isOSAIX() && !RawTriple.isOSWindows() &&
TC.getArch() != llvm::Triple::xcore &&
((RawTriple.getVendor() != llvm::Triple::MipsTechnologies) ||
RawTriple.hasEnvironment())) ||
@@ -5340,16 +5501,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
RawTriple.isOSDarwin() && !KernelOrKext))
CmdArgs.push_back("-fregister-global-dtors-with-atexit");
- // -fms-extensions=0 is default.
- if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
- IsWindowsMSVC))
- CmdArgs.push_back("-fms-extensions");
-
// -fno-use-line-directives is default.
if (Args.hasFlag(options::OPT_fuse_line_directives,
options::OPT_fno_use_line_directives, false))
CmdArgs.push_back("-fuse-line-directives");
+ // -fms-extensions=0 is default.
+ if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
+ IsWindowsMSVC))
+ CmdArgs.push_back("-fms-extensions");
+
// -fms-compatibility=0 is default.
bool IsMSVCCompat = Args.hasFlag(
options::OPT_fms_compatibility, options::OPT_fno_ms_compatibility,
@@ -5463,11 +5624,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasFlag(options::OPT_fpch_validate_input_files_content,
options::OPT_fno_pch_validate_input_files_content, false))
CmdArgs.push_back("-fvalidate-ast-input-files-content");
+ if (Args.hasFlag(options::OPT_fpch_instantiate_templates,
+ options::OPT_fno_pch_instantiate_templates, false))
+ CmdArgs.push_back("-fpch-instantiate-templates");
Args.AddLastArg(CmdArgs, options::OPT_fexperimental_new_pass_manager,
options::OPT_fno_experimental_new_pass_manager);
- ObjCRuntime Runtime = AddObjCRuntimeArgs(Args, CmdArgs, rewriteKind);
+ ObjCRuntime Runtime = AddObjCRuntimeArgs(Args, Inputs, CmdArgs, rewriteKind);
RenderObjCOptions(TC, D, RawTriple, Args, Runtime, rewriteKind != RK_None,
Input, CmdArgs);
@@ -5587,11 +5751,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasFlag(options::OPT_Qy, options::OPT_Qn, true))
CmdArgs.push_back("-Qn");
- // -fcommon is the default unless compiling kernel code or the target says so
- bool NoCommonDefault = KernelOrKext || isNoCommonDefault(RawTriple);
- if (!Args.hasFlag(options::OPT_fcommon, options::OPT_fno_common,
- !NoCommonDefault))
- CmdArgs.push_back("-fno-common");
+ // -fno-common is the default, set -fcommon only when that flag is set.
+ if (Args.hasFlag(options::OPT_fcommon, options::OPT_fno_common, false))
+ CmdArgs.push_back("-fcommon");
// -fsigned-bitfields is default, and clang doesn't yet support
// -funsigned-bitfields.
@@ -5709,6 +5871,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_dM);
Args.AddLastArg(CmdArgs, options::OPT_dD);
+ Args.AddLastArg(CmdArgs, options::OPT_fmax_tokens_EQ);
+
// Handle serialized diagnostics.
if (Arg *A = Args.getLastArg(options::OPT__serialize_diags)) {
CmdArgs.push_back("-serialize-diagnostic-file");
@@ -5778,7 +5942,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// FIXME: -fembed-bitcode -save-temps will save optimized bitcode instead of
// pristine IR generated by the frontend. Ideally, a new compile action should
// be added so both IR can be captured.
- if (C.getDriver().isSaveTempsEnabled() &&
+ if ((C.getDriver().isSaveTempsEnabled() ||
+ JA.isHostOffloading(Action::OFK_OpenMP)) &&
!(C.getDriver().embedBitcodeInObject() && !C.getDriver().isUsingLTO()) &&
isa<CompileJobAction>(JA))
CmdArgs.push_back("-disable-llvm-passes");
@@ -5808,7 +5973,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Arg->render(Args, OriginalArgs);
SmallString<256> Flags;
- Flags += Exec;
+ EscapeSpacesAndBackslashes(Exec, Flags);
for (const char *OriginalArg : OriginalArgs) {
SmallString<128> EscapedArg;
EscapeSpacesAndBackslashes(OriginalArg, EscapedArg);
@@ -5920,10 +6085,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (SplitLTOUnit)
CmdArgs.push_back("-fsplit-lto-unit");
- if (Arg *A = Args.getLastArg(options::OPT_fexperimental_isel,
- options::OPT_fno_experimental_isel)) {
+ if (Arg *A = Args.getLastArg(options::OPT_fglobal_isel,
+ options::OPT_fno_global_isel)) {
CmdArgs.push_back("-mllvm");
- if (A->getOption().matches(options::OPT_fexperimental_isel)) {
+ if (A->getOption().matches(options::OPT_fglobal_isel)) {
CmdArgs.push_back("-global-isel=1");
// GISel is on by default on AArch64 -O0, so don't bother adding
@@ -5942,9 +6107,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-global-isel-abort=2");
if (!IsArchSupported)
- D.Diag(diag::warn_drv_experimental_isel_incomplete) << Triple.getArchName();
+ D.Diag(diag::warn_drv_global_isel_incomplete) << Triple.getArchName();
else
- D.Diag(diag::warn_drv_experimental_isel_incomplete_opt);
+ D.Diag(diag::warn_drv_global_isel_incomplete_opt);
}
} else {
CmdArgs.push_back("-global-isel=0");
@@ -5968,6 +6133,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fforce-enable-int128");
}
+ if (Args.hasFlag(options::OPT_fkeep_static_consts,
+ options::OPT_fno_keep_static_consts, false))
+ CmdArgs.push_back("-fkeep-static-consts");
+
if (Args.hasFlag(options::OPT_fcomplete_member_pointers,
options::OPT_fno_complete_member_pointers, false))
CmdArgs.push_back("-fcomplete-member-pointers");
@@ -5979,11 +6148,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_moutline,
options::OPT_mno_outline)) {
if (A->getOption().matches(options::OPT_moutline)) {
- // We only support -moutline in AArch64 right now. If we're not compiling
- // for AArch64, emit a warning and ignore the flag. Otherwise, add the
- // proper mllvm flags.
- if (Triple.getArch() != llvm::Triple::aarch64 &&
- Triple.getArch() != llvm::Triple::aarch64_32) {
+ // We only support -moutline in AArch64 and ARM targets right now. If
+ // we're not compiling for these, emit a warning and ignore the flag.
+ // Otherwise, add the proper mllvm flags.
+ if (!(Triple.isARM() || Triple.isThumb() ||
+ Triple.getArch() == llvm::Triple::aarch64 ||
+ Triple.getArch() == llvm::Triple::aarch64_32)) {
D.Diag(diag::warn_drv_moutline_unsupported_opt) << Triple.getArchName();
} else {
CmdArgs.push_back("-mllvm");
@@ -6055,19 +6225,21 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
auto CLCommand =
getCLFallback()->GetCommand(C, JA, Output, Inputs, Args, LinkingOutput);
C.addCommand(std::make_unique<FallbackCommand>(
- JA, *this, Exec, CmdArgs, Inputs, std::move(CLCommand)));
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs,
+ std::move(CLCommand)));
} else if (Args.hasArg(options::OPT__SLASH_fallback) &&
isa<PrecompileJobAction>(JA)) {
// In /fallback builds, run the main compilation even if the pch generation
// fails, so that the main compilation's fallback to cl.exe runs.
- C.addCommand(std::make_unique<ForceSuccessCommand>(JA, *this, Exec,
- CmdArgs, Inputs));
+ C.addCommand(std::make_unique<ForceSuccessCommand>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
} else if (D.CC1Main && !D.CCGenDiagnostics) {
// Invoke the CC1 directly in this process
- C.addCommand(
- std::make_unique<CC1Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<CC1Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
} else {
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
}
// Make the compile command echo its inputs for /showFilenames.
@@ -6078,7 +6250,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
if (Arg *A = Args.getLastArg(options::OPT_pg))
- if (FPKeepKind == CodeGenOptions::FramePointerKind::None)
+ if (FPKeepKind == CodeGenOptions::FramePointerKind::None &&
+ !Args.hasArg(options::OPT_mfentry))
D.Diag(diag::err_drv_argument_not_allowed_with) << "-fomit-frame-pointer"
<< A->getAsString(Args);
@@ -6101,7 +6274,7 @@ Clang::Clang(const ToolChain &TC)
// CAUTION! The first constructor argument ("clang") is not arbitrary,
// as it is for other tools. Some operations on a Tool actually test
// whether that tool is Clang based on the Tool's Name as a string.
- : Tool("clang", "clang frontend", TC, RF_Full) {}
+ : Tool("clang", "clang frontend", TC) {}
Clang::~Clang() {}
@@ -6109,6 +6282,7 @@ Clang::~Clang() {}
///
/// Returns true if the runtime is non-fragile.
ObjCRuntime Clang::AddObjCRuntimeArgs(const ArgList &args,
+ const InputInfoList &inputs,
ArgStringList &cmdArgs,
RewriteKind rewriteKind) const {
// Look for the controlling runtime option.
@@ -6232,8 +6406,11 @@ ObjCRuntime Clang::AddObjCRuntimeArgs(const ArgList &args,
runtime = ObjCRuntime(ObjCRuntime::GCC, VersionTuple());
}
- cmdArgs.push_back(
- args.MakeArgString("-fobjc-runtime=" + runtime.getAsString()));
+ if (llvm::any_of(inputs, [](const InputInfo &input) {
+ return types::isObjC(input.getType());
+ }))
+ cmdArgs.push_back(
+ args.MakeArgString("-fobjc-runtime=" + runtime.getAsString()));
return runtime;
}
@@ -6303,6 +6480,7 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
codegenoptions::DebugInfoKind *DebugInfoKind,
bool *EmitCodeView) const {
unsigned RTOptionID = options::OPT__SLASH_MT;
+ bool isNVPTX = getToolChain().getTriple().isNVPTX();
if (Args.hasArg(options::OPT__SLASH_LDd))
// The /LDd option implies /MTd. The dependent lib part can be overridden,
@@ -6355,7 +6533,13 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back("--dependent-lib=oldnames");
}
- Args.AddLastArg(CmdArgs, options::OPT_show_includes);
+ if (Arg *ShowIncludes =
+ Args.getLastArg(options::OPT__SLASH_showIncludes,
+ options::OPT__SLASH_showIncludes_user)) {
+ CmdArgs.push_back("--show-includes");
+ if (ShowIncludes->getOption().matches(options::OPT__SLASH_showIncludes))
+ CmdArgs.push_back("-sys-header-deps");
+ }
// This controls whether or not we emit RTTI data for polymorphic types.
if (Args.hasFlag(options::OPT__SLASH_GR_, options::OPT__SLASH_GR,
@@ -6364,8 +6548,8 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
// This controls whether or not we emit stack-protector instrumentation.
// In MSVC, Buffer Security Check (/GS) is on by default.
- if (Args.hasFlag(options::OPT__SLASH_GS, options::OPT__SLASH_GS_,
- /*Default=*/true)) {
+ if (!isNVPTX && Args.hasFlag(options::OPT__SLASH_GS, options::OPT__SLASH_GS_,
+ /*Default=*/true)) {
CmdArgs.push_back("-stack-protector");
CmdArgs.push_back(Args.MakeArgString(Twine(LangOptions::SSPStrong)));
}
@@ -6376,7 +6560,7 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
options::OPT_gline_tables_only)) {
*EmitCodeView = true;
if (DebugInfoArg->getOption().matches(options::OPT__SLASH_Z7))
- *DebugInfoKind = codegenoptions::LimitedDebugInfo;
+ *DebugInfoKind = codegenoptions::DebugInfoConstructor;
else
*DebugInfoKind = codegenoptions::DebugLineTablesOnly;
} else {
@@ -6385,7 +6569,7 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
const Driver &D = getToolChain().getDriver();
EHFlags EH = parseClangCLEHFlags(D, Args);
- if (EH.Synch || EH.Asynch) {
+ if (!isNVPTX && (EH.Synch || EH.Asynch)) {
if (types::isCXX(InputType))
CmdArgs.push_back("-fcxx-exceptions");
CmdArgs.push_back("-fexceptions");
@@ -6454,7 +6638,7 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
options::OPT__SLASH_Gregcall)) {
unsigned DCCOptId = CCArg->getOption().getID();
const char *DCCFlag = nullptr;
- bool ArchSupported = true;
+ bool ArchSupported = !isNVPTX;
llvm::Triple::ArchType Arch = getToolChain().getArch();
switch (DCCOptId) {
case options::OPT__SLASH_Gd:
@@ -6560,7 +6744,8 @@ void ClangAs::AddMIPSTargetArgs(const ArgList &Args,
void ClangAs::AddX86TargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
- addX86AlignBranchArgs(getToolChain().getDriver(), Args, CmdArgs);
+ addX86AlignBranchArgs(getToolChain().getDriver(), Args, CmdArgs,
+ /*IsLTO=*/false);
if (Arg *A = Args.getLastArg(options::OPT_masm_EQ)) {
StringRef Value = A->getValue();
@@ -6630,7 +6815,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
}
// Add the target features
- getTargetFeatures(getToolChain(), Triple, Args, CmdArgs, true);
+ getTargetFeatures(D, Triple, Args, CmdArgs, true);
// Ignore explicit -force_cpusubtype_ALL option.
(void)Args.hasArg(options::OPT_force__cpusubtype__ALL);
@@ -6672,7 +6857,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
// the guard for source type, however there is a test which asserts
// that some assembler invocation receives no -debug-info-kind,
// and it's not clear whether that test is just overly restrictive.
- DebugInfoKind = (WantDebug ? codegenoptions::LimitedDebugInfo
+ DebugInfoKind = (WantDebug ? codegenoptions::DebugInfoConstructor
: codegenoptions::NoDebugInfo);
// Add the -fdebug-compilation-dir flag if needed.
addDebugCompDirArg(Args, CmdArgs, C.getDriver().getVFS());
@@ -6715,7 +6900,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
SmallString<256> Flags;
const char *Exec = getToolChain().getDriver().getClangProgramPath();
- Flags += Exec;
+ EscapeSpacesAndBackslashes(Exec, Flags);
for (const char *OriginalArg : OriginalArgs) {
SmallString<128> EscapedArg;
EscapeSpacesAndBackslashes(OriginalArg, EscapedArg);
@@ -6792,7 +6977,8 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Input.getFilename());
const char *Exec = getToolChain().getDriver().getClangProgramPath();
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
}
// Begin OffloadBundler
@@ -6876,7 +7062,7 @@ void OffloadBundler::ConstructJob(Compilation &C, const JobAction &JA,
// All the inputs are encoded as commands.
C.addCommand(std::make_unique<Command>(
- JA, *this,
+ JA, *this, ResponseFileSupport::None(),
TCArgs.MakeArgString(getToolChain().GetProgramPath(getShortName())),
CmdArgs, None));
}
@@ -6942,7 +7128,7 @@ void OffloadBundler::ConstructJobMultipleOutputs(
// All the inputs are encoded as commands.
C.addCommand(std::make_unique<Command>(
- JA, *this,
+ JA, *this, ResponseFileSupport::None(),
TCArgs.MakeArgString(getToolChain().GetProgramPath(getShortName())),
CmdArgs, None));
}
@@ -6972,7 +7158,7 @@ void OffloadWrapper::ConstructJob(Compilation &C, const JobAction &JA,
}
C.addCommand(std::make_unique<Command>(
- JA, *this,
+ JA, *this, ResponseFileSupport::None(),
Args.MakeArgString(getToolChain().GetProgramPath(getShortName())),
CmdArgs, Inputs));
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
index b345c02489d4..a607e3c27de9 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
@@ -73,10 +73,13 @@ private:
llvm::opt::ArgStringList &CmdArgs) const;
void AddWebAssemblyTargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
+ void AddVETargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
enum RewriteKind { RK_None, RK_Fragile, RK_NonFragile };
ObjCRuntime AddObjCRuntimeArgs(const llvm::opt::ArgList &args,
+ const InputInfoList &inputs,
llvm::opt::ArgStringList &cmdArgs,
RewriteKind rewrite) const;
@@ -118,7 +121,7 @@ public:
class LLVM_LIBRARY_VISIBILITY ClangAs : public Tool {
public:
ClangAs(const ToolChain &TC)
- : Tool("clang::as", "clang integrated assembler", TC, RF_Full) {}
+ : Tool("clang::as", "clang integrated assembler", TC) {}
void AddMIPSTargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
void AddX86TargetArgs(const llvm::opt::ArgList &Args,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.cpp
index cf1d0d551e57..8dcfd4951bbf 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.cpp
@@ -75,7 +75,7 @@ void cloudabi::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
- AddGoldPlugin(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ addLTOOptions(ToolChain, Args, CmdArgs, Output, Inputs[0],
D.getLTOMode() == LTOK_Thin);
}
@@ -92,7 +92,8 @@ void cloudabi::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
// CloudABI - CloudABI tool chain which can call ld(1) directly.
@@ -102,7 +103,7 @@ CloudABI::CloudABI(const Driver &D, const llvm::Triple &Triple,
: Generic_ELF(D, Triple, Args) {
SmallString<128> P(getDriver().Dir);
llvm::sys::path::append(P, "..", getTriple().str(), "lib");
- getFilePaths().push_back(P.str());
+ getFilePaths().push_back(std::string(P.str()));
}
void CloudABI::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.h
index cc381c2b1e1f..98bf23127706 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.h
@@ -19,9 +19,9 @@ namespace tools {
/// cloudabi -- Directly call GNU Binutils linker
namespace cloudabi {
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("cloudabi::Linker", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("cloudabi::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 37ec73468570..1cac5a0822a4 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -12,6 +12,7 @@
#include "Arch/Mips.h"
#include "Arch/PPC.h"
#include "Arch/SystemZ.h"
+#include "Arch/VE.h"
#include "Arch/X86.h"
#include "HIP.h"
#include "Hexagon.h"
@@ -50,6 +51,7 @@
#include "llvm/Support/Program.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/Threading.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/YAMLParser.h"
@@ -82,6 +84,31 @@ void tools::handleTargetFeaturesGroup(const ArgList &Args,
}
}
+std::vector<StringRef>
+tools::unifyTargetFeatures(const std::vector<StringRef> &Features) {
+ std::vector<StringRef> UnifiedFeatures;
+ // Find the last of each feature.
+ llvm::StringMap<unsigned> LastOpt;
+ for (unsigned I = 0, N = Features.size(); I < N; ++I) {
+ StringRef Name = Features[I];
+ assert(Name[0] == '-' || Name[0] == '+');
+ LastOpt[Name.drop_front(1)] = I;
+ }
+
+ for (unsigned I = 0, N = Features.size(); I < N; ++I) {
+ // If this feature was overridden, ignore it.
+ StringRef Name = Features[I];
+ llvm::StringMap<unsigned>::iterator LastI = LastOpt.find(Name.drop_front(1));
+ assert(LastI != LastOpt.end());
+ unsigned Last = LastI->second;
+ if (Last != I)
+ continue;
+
+ UnifiedFeatures.push_back(Name);
+ }
+ return UnifiedFeatures;
+}
+
void tools::addDirectoryList(const ArgList &Args, ArgStringList &CmdArgs,
const char *ArgName, const char *EnvVar) {
const char *DirList = ::getenv(EnvVar);
@@ -91,7 +118,7 @@ void tools::addDirectoryList(const ArgList &Args, ArgStringList &CmdArgs,
return; // Nothing to do.
StringRef Name(ArgName);
- if (Name.equals("-I") || Name.equals("-L"))
+ if (Name.equals("-I") || Name.equals("-L") || Name.empty())
CombinedArg = true;
StringRef Dirs(DirList);
@@ -151,14 +178,12 @@ void tools::AddLinkerInputs(const ToolChain &TC, const InputInfoList &Inputs,
addDirectoryList(Args, CmdArgs, "-L", "LIBRARY_PATH");
for (const auto &II : Inputs) {
- // If the current tool chain refers to an OpenMP or HIP offloading host, we
- // should ignore inputs that refer to OpenMP or HIP offloading devices -
+ // If the current tool chain refers to an OpenMP offloading host, we
+ // should ignore inputs that refer to OpenMP offloading devices -
// they will be embedded according to a proper linker script.
if (auto *IA = II.getAction())
if ((JA.isHostOffloading(Action::OFK_OpenMP) &&
- IA->isDeviceOffloading(Action::OFK_OpenMP)) ||
- (JA.isHostOffloading(Action::OFK_HIP) &&
- IA->isDeviceOffloading(Action::OFK_HIP)))
+ IA->isDeviceOffloading(Action::OFK_OpenMP)))
continue;
if (!TC.HasNativeLLVMSupport() && types::isLLVMIR(II.getType()))
@@ -278,7 +303,7 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
StringRef CPUName;
StringRef ABIName;
mips::getMipsCPUAndABI(Args, T, CPUName, ABIName);
- return CPUName;
+ return std::string(CPUName);
}
case llvm::Triple::nvptx:
@@ -293,15 +318,19 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
std::string TargetCPUName = ppc::getPPCTargetCPU(Args);
// LLVM may default to generating code for the native CPU,
// but, like gcc, we default to a more generic option for
- // each architecture. (except on Darwin)
- if (TargetCPUName.empty() && !T.isOSDarwin()) {
- if (T.getArch() == llvm::Triple::ppc64)
- TargetCPUName = "ppc64";
- else if (T.getArch() == llvm::Triple::ppc64le)
- TargetCPUName = "ppc64le";
- else
- TargetCPUName = "ppc";
- }
+ // each architecture. (except on AIX)
+ if (!TargetCPUName.empty())
+ return TargetCPUName;
+
+ if (T.isOSAIX())
+ TargetCPUName = "pwr4";
+ else if (T.getArch() == llvm::Triple::ppc64le)
+ TargetCPUName = "ppc64le";
+ else if (T.getArch() == llvm::Triple::ppc64)
+ TargetCPUName = "ppc64";
+ else
+ TargetCPUName = "ppc";
+
return TargetCPUName;
}
@@ -334,18 +363,18 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
case llvm::Triple::wasm32:
case llvm::Triple::wasm64:
- return getWebAssemblyTargetCPU(Args);
+ return std::string(getWebAssemblyTargetCPU(Args));
}
}
-unsigned tools::getLTOParallelism(const ArgList &Args, const Driver &D) {
- unsigned Parallelism = 0;
+llvm::StringRef tools::getLTOParallelism(const ArgList &Args, const Driver &D) {
Arg *LtoJobsArg = Args.getLastArg(options::OPT_flto_jobs_EQ);
- if (LtoJobsArg &&
- StringRef(LtoJobsArg->getValue()).getAsInteger(10, Parallelism))
- D.Diag(diag::err_drv_invalid_int_value) << LtoJobsArg->getAsString(Args)
- << LtoJobsArg->getValue();
- return Parallelism;
+ if (!LtoJobsArg)
+ return {};
+ if (!llvm::get_threadpool_strategy(LtoJobsArg->getValue()))
+ D.Diag(diag::err_drv_invalid_int_value)
+ << LtoJobsArg->getAsString(Args) << LtoJobsArg->getValue();
+ return LtoJobsArg->getValue();
}
// CloudABI uses -ffunction-sections and -fdata-sections by default.
@@ -353,28 +382,32 @@ bool tools::isUseSeparateSections(const llvm::Triple &Triple) {
return Triple.getOS() == llvm::Triple::CloudABI;
}
-void tools::AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
+void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
ArgStringList &CmdArgs, const InputInfo &Output,
const InputInfo &Input, bool IsThinLTO) {
- // Tell the linker to load the plugin. This has to come before AddLinkerInputs
- // as gold requires -plugin to come before any -plugin-opt that -Wl might
- // forward.
- CmdArgs.push_back("-plugin");
+ const char *Linker = Args.MakeArgString(ToolChain.GetLinkerPath());
+ const Driver &D = ToolChain.getDriver();
+ if (llvm::sys::path::filename(Linker) != "ld.lld" &&
+ llvm::sys::path::stem(Linker) != "ld.lld") {
+ // Tell the linker to load the plugin. This has to come before
+ // AddLinkerInputs as gold requires -plugin to come before any -plugin-opt
+ // that -Wl might forward.
+ CmdArgs.push_back("-plugin");
#if defined(_WIN32)
- const char *Suffix = ".dll";
+ const char *Suffix = ".dll";
#elif defined(__APPLE__)
- const char *Suffix = ".dylib";
+ const char *Suffix = ".dylib";
#else
- const char *Suffix = ".so";
+ const char *Suffix = ".so";
#endif
- SmallString<1024> Plugin;
- llvm::sys::path::native(Twine(ToolChain.getDriver().Dir) +
- "/../lib" CLANG_LIBDIR_SUFFIX "/LLVMgold" +
- Suffix,
- Plugin);
- CmdArgs.push_back(Args.MakeArgString(Plugin));
+ SmallString<1024> Plugin;
+ llvm::sys::path::native(
+ Twine(D.Dir) + "/../lib" CLANG_LIBDIR_SUFFIX "/LLVMgold" + Suffix,
+ Plugin);
+ CmdArgs.push_back(Args.MakeArgString(Plugin));
+ }
// Try to pass driver level flags relevant to LTO code generation down to
// the plugin.
@@ -385,13 +418,19 @@ void tools::AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=mcpu=") + CPU));
if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ // The optimization level matches
+ // CompilerInvocation.cpp:getOptimizationLevel().
StringRef OOpt;
if (A->getOption().matches(options::OPT_O4) ||
A->getOption().matches(options::OPT_Ofast))
OOpt = "3";
- else if (A->getOption().matches(options::OPT_O))
+ else if (A->getOption().matches(options::OPT_O)) {
OOpt = A->getValue();
- else if (A->getOption().matches(options::OPT_O0))
+ if (OOpt == "g")
+ OOpt = "1";
+ else if (OOpt == "s" || OOpt == "z")
+ OOpt = "2";
+ } else if (A->getOption().matches(options::OPT_O0))
OOpt = "0";
if (!OOpt.empty())
CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=O") + OOpt));
@@ -406,7 +445,8 @@ void tools::AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
if (IsThinLTO)
CmdArgs.push_back("-plugin-opt=thinlto");
- if (unsigned Parallelism = getLTOParallelism(Args, ToolChain.getDriver()))
+ StringRef Parallelism = getLTOParallelism(Args, D);
+ if (!Parallelism.empty())
CmdArgs.push_back(
Args.MakeArgString("-plugin-opt=jobs=" + Twine(Parallelism)));
@@ -437,7 +477,7 @@ void tools::AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
if (Arg *A = getLastProfileSampleUseArg(Args)) {
StringRef FName = A->getValue();
if (!llvm::sys::fs::exists(FName))
- ToolChain.getDriver().Diag(diag::err_drv_no_such_file) << FName;
+ D.Diag(diag::err_drv_no_such_file) << FName;
else
CmdArgs.push_back(
Args.MakeArgString(Twine("-plugin-opt=sample-profile=") + FName));
@@ -480,17 +520,21 @@ void tools::AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
}
// Setup statistics file output.
- SmallString<128> StatsFile =
- getStatsFileName(Args, Output, Input, ToolChain.getDriver());
+ SmallString<128> StatsFile = getStatsFileName(Args, Output, Input, D);
if (!StatsFile.empty())
CmdArgs.push_back(
Args.MakeArgString(Twine("-plugin-opt=stats-file=") + StatsFile));
+
+ addX86AlignBranchArgs(D, Args, CmdArgs, /*IsLTO=*/true);
}
void tools::addArchSpecificRPath(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs) {
+ // Enable -frtlib-add-rpath by default for the case of VE.
+ const bool IsVE = TC.getTriple().isVE();
+ bool DefaultValue = IsVE;
if (!Args.hasFlag(options::OPT_frtlib_add_rpath,
- options::OPT_fno_rtlib_add_rpath, false))
+ options::OPT_fno_rtlib_add_rpath, DefaultValue))
return;
std::string CandidateRPath = TC.getArchSpecificLibPath();
@@ -583,6 +627,11 @@ static bool addSanitizerDynamicList(const ToolChain &TC, const ArgList &Args,
void tools::linkSanitizerRuntimeDeps(const ToolChain &TC,
ArgStringList &CmdArgs) {
+ // Fuchsia never needs these. Any sanitizer runtimes with system
+ // dependencies use the `.deplibs` feature instead.
+ if (TC.getTriple().isOSFuchsia())
+ return;
+
// Force linking against the system libraries sanitizers depends on
// (see PR15823 why this is necessary).
CmdArgs.push_back("--no-as-needed");
@@ -642,17 +691,21 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
StaticRuntimes.push_back("stats_client");
// Collect static runtimes.
- if (Args.hasArg(options::OPT_shared) || SanArgs.needsSharedRt()) {
- // Don't link static runtimes into DSOs or if -shared-libasan.
+ if (Args.hasArg(options::OPT_shared)) {
+ // Don't link static runtimes into DSOs.
return;
}
- if (SanArgs.needsAsanRt() && SanArgs.linkRuntimes()) {
+
+ // Each static runtime that has a DSO counterpart above is excluded below,
+ // but runtimes that exist only as static are not affected by needsSharedRt.
+
+ if (!SanArgs.needsSharedRt() && SanArgs.needsAsanRt() && SanArgs.linkRuntimes()) {
StaticRuntimes.push_back("asan");
if (SanArgs.linkCXXRuntimes())
StaticRuntimes.push_back("asan_cxx");
}
- if (SanArgs.needsHwasanRt() && SanArgs.linkRuntimes()) {
+ if (!SanArgs.needsSharedRt() && SanArgs.needsHwasanRt() && SanArgs.linkRuntimes()) {
StaticRuntimes.push_back("hwasan");
if (SanArgs.linkCXXRuntimes())
StaticRuntimes.push_back("hwasan_cxx");
@@ -671,7 +724,7 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
if (SanArgs.linkCXXRuntimes())
StaticRuntimes.push_back("tsan_cxx");
}
- if (SanArgs.needsUbsanRt() && SanArgs.linkRuntimes()) {
+ if (!SanArgs.needsSharedRt() && SanArgs.needsUbsanRt() && SanArgs.linkRuntimes()) {
if (SanArgs.requiresMinimalRuntime()) {
StaticRuntimes.push_back("ubsan_minimal");
} else {
@@ -684,18 +737,20 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
NonWholeStaticRuntimes.push_back("safestack");
RequiredSymbols.push_back("__safestack_init");
}
- if (SanArgs.needsCfiRt() && SanArgs.linkRuntimes())
- StaticRuntimes.push_back("cfi");
- if (SanArgs.needsCfiDiagRt() && SanArgs.linkRuntimes()) {
- StaticRuntimes.push_back("cfi_diag");
- if (SanArgs.linkCXXRuntimes())
- StaticRuntimes.push_back("ubsan_standalone_cxx");
+ if (!(SanArgs.needsSharedRt() && SanArgs.needsUbsanRt() && SanArgs.linkRuntimes())) {
+ if (SanArgs.needsCfiRt() && SanArgs.linkRuntimes())
+ StaticRuntimes.push_back("cfi");
+ if (SanArgs.needsCfiDiagRt() && SanArgs.linkRuntimes()) {
+ StaticRuntimes.push_back("cfi_diag");
+ if (SanArgs.linkCXXRuntimes())
+ StaticRuntimes.push_back("ubsan_standalone_cxx");
+ }
}
if (SanArgs.needsStatsRt() && SanArgs.linkRuntimes()) {
NonWholeStaticRuntimes.push_back("stats");
RequiredSymbols.push_back("__sanitizer_stats_register");
}
- if (SanArgs.needsScudoRt() && SanArgs.linkRuntimes()) {
+ if (!SanArgs.needsSharedRt() && SanArgs.needsScudoRt() && SanArgs.linkRuntimes()) {
if (SanArgs.requiresMinimalRuntime()) {
StaticRuntimes.push_back("scudo_minimal");
if (SanArgs.linkCXXRuntimes())
@@ -751,7 +806,7 @@ bool tools::addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
CmdArgs.push_back("--export-dynamic");
if (SanArgs.hasCrossDsoCfi() && !AddExportDynamic)
- CmdArgs.push_back("-export-dynamic-symbol=__cfi_check");
+ CmdArgs.push_back("--export-dynamic-symbol=__cfi_check");
return !StaticRuntimes.empty() || !NonWholeStaticRuntimes.empty();
}
@@ -834,10 +889,12 @@ void tools::SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T,
InputInfo II(types::TY_Object, Output.getFilename(), Output.getFilename());
// First extract the dwo sections.
- C.addCommand(std::make_unique<Command>(JA, T, Exec, ExtractArgs, II));
+ C.addCommand(std::make_unique<Command>(
+ JA, T, ResponseFileSupport::AtFileCurCP(), Exec, ExtractArgs, II));
// Then remove them from the original .o file.
- C.addCommand(std::make_unique<Command>(JA, T, Exec, StripArgs, II));
+ C.addCommand(std::make_unique<Command>(
+ JA, T, ResponseFileSupport::AtFileCurCP(), Exec, StripArgs, II));
}
// Claim options we don't want to warn if they are unused. We do this for
@@ -1211,7 +1268,14 @@ static void AddUnwindLibrary(const ToolChain &TC, const Driver &D,
case ToolChain::UNW_CompilerRT:
if (LGT == LibGccType::StaticLibGcc)
CmdArgs.push_back("-l:libunwind.a");
- else
+ else if (TC.getTriple().isOSCygMing()) {
+ if (LGT == LibGccType::SharedLibGcc)
+ CmdArgs.push_back("-l:libunwind.dll.a");
+ else
+ // Let the linker choose between libunwind.dll.a and libunwind.a
+ // depending on what's available, and depending on the -static flag
+ CmdArgs.push_back("-lunwind");
+ } else
CmdArgs.push_back("-l:libunwind.so");
break;
}
@@ -1263,114 +1327,6 @@ void tools::AddRunTimeLibs(const ToolChain &TC, const Driver &D,
}
}
-/// Add HIP linker script arguments at the end of the argument list so that
-/// the fat binary is built by embedding the device images into the host. The
-/// linker script also defines a symbol required by the code generation so that
-/// the image can be retrieved at runtime. This should be used only in tool
-/// chains that support linker scripts.
-void tools::AddHIPLinkerScript(const ToolChain &TC, Compilation &C,
- const InputInfo &Output,
- const InputInfoList &Inputs, const ArgList &Args,
- ArgStringList &CmdArgs, const JobAction &JA,
- const Tool &T) {
-
- // If this is not a HIP host toolchain, we don't need to do anything.
- if (!JA.isHostOffloading(Action::OFK_HIP))
- return;
-
- InputInfoList DeviceInputs;
- for (const auto &II : Inputs) {
- const Action *A = II.getAction();
- // Is this a device linking action?
- if (A && isa<LinkJobAction>(A) && A->isDeviceOffloading(Action::OFK_HIP)) {
- DeviceInputs.push_back(II);
- }
- }
-
- if (DeviceInputs.empty())
- return;
-
- // Create temporary linker script. Keep it if save-temps is enabled.
- const char *LKS;
- std::string Name = llvm::sys::path::filename(Output.getFilename());
- if (C.getDriver().isSaveTempsEnabled()) {
- LKS = C.getArgs().MakeArgString(Name + ".lk");
- } else {
- auto TmpName = C.getDriver().GetTemporaryPath(Name, "lk");
- LKS = C.addTempFile(C.getArgs().MakeArgString(TmpName));
- }
-
- // Add linker script option to the command.
- CmdArgs.push_back("-T");
- CmdArgs.push_back(LKS);
-
- // Create a buffer to write the contents of the linker script.
- std::string LksBuffer;
- llvm::raw_string_ostream LksStream(LksBuffer);
-
- // Get the HIP offload tool chain.
- auto *HIPTC = static_cast<const toolchains::CudaToolChain *>(
- C.getSingleOffloadToolChain<Action::OFK_HIP>());
- assert(HIPTC->getTriple().getArch() == llvm::Triple::amdgcn &&
- "Wrong platform");
- (void)HIPTC;
-
- const char *BundleFile;
- if (C.getDriver().isSaveTempsEnabled()) {
- BundleFile = C.getArgs().MakeArgString(Name + ".hipfb");
- } else {
- auto TmpName = C.getDriver().GetTemporaryPath(Name, "hipfb");
- BundleFile = C.addTempFile(C.getArgs().MakeArgString(TmpName));
- }
- AMDGCN::constructHIPFatbinCommand(C, JA, BundleFile, DeviceInputs, Args, T);
-
- // Add commands to embed target binaries. We ensure that each section and
- // image is 16-byte aligned. This is not mandatory, but increases the
- // likelihood of data to be aligned with a cache block in several main host
- // machines.
- LksStream << "/*\n";
- LksStream << " HIP Offload Linker Script\n";
- LksStream << " *** Automatically generated by Clang ***\n";
- LksStream << "*/\n";
- LksStream << "TARGET(binary)\n";
- LksStream << "INPUT(" << BundleFile << ")\n";
- LksStream << "SECTIONS\n";
- LksStream << "{\n";
- LksStream << " .hip_fatbin :\n";
- LksStream << " ALIGN(0x10)\n";
- LksStream << " {\n";
- LksStream << " PROVIDE_HIDDEN(__hip_fatbin = .);\n";
- LksStream << " " << BundleFile << "\n";
- LksStream << " }\n";
- LksStream << " /DISCARD/ :\n";
- LksStream << " {\n";
- LksStream << " * ( __CLANG_OFFLOAD_BUNDLE__* )\n";
- LksStream << " }\n";
- LksStream << "}\n";
- LksStream << "INSERT BEFORE .data\n";
- LksStream.flush();
-
- // Dump the contents of the linker script if the user requested that. We
- // support this option to enable testing of behavior with -###.
- if (C.getArgs().hasArg(options::OPT_fhip_dump_offload_linker_script))
- llvm::errs() << LksBuffer;
-
- // If this is a dry run, do not create the linker script file.
- if (C.getArgs().hasArg(options::OPT__HASH_HASH_HASH))
- return;
-
- // Open script file and write the contents.
- std::error_code EC;
- llvm::raw_fd_ostream Lksf(LKS, EC, llvm::sys::fs::OF_None);
-
- if (EC) {
- C.getDriver().Diag(clang::diag::err_unable_to_make_temp) << EC.message();
- return;
- }
-
- Lksf << LksBuffer;
-}
-
SmallString<128> tools::getStatsFileName(const llvm::opt::ArgList &Args,
const InputInfo &Output,
const InputInfo &Input,
@@ -1399,3 +1355,53 @@ void tools::addMultilibFlag(bool Enabled, const char *const Flag,
Multilib::flags_list &Flags) {
Flags.push_back(std::string(Enabled ? "+" : "-") + Flag);
}
+
+void tools::addX86AlignBranchArgs(const Driver &D, const ArgList &Args,
+ ArgStringList &CmdArgs, bool IsLTO) {
+ auto addArg = [&, IsLTO](const Twine &Arg) {
+ if (IsLTO) {
+ CmdArgs.push_back(Args.MakeArgString("-plugin-opt=" + Arg));
+ } else {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back(Args.MakeArgString(Arg));
+ }
+ };
+
+ if (Args.hasArg(options::OPT_mbranches_within_32B_boundaries)) {
+ addArg(Twine("-x86-branches-within-32B-boundaries"));
+ }
+ if (const Arg *A = Args.getLastArg(options::OPT_malign_branch_boundary_EQ)) {
+ StringRef Value = A->getValue();
+ unsigned Boundary;
+ if (Value.getAsInteger(10, Boundary) || Boundary < 16 ||
+ !llvm::isPowerOf2_64(Boundary)) {
+ D.Diag(diag::err_drv_invalid_argument_to_option)
+ << Value << A->getOption().getName();
+ } else {
+ addArg("-x86-align-branch-boundary=" + Twine(Boundary));
+ }
+ }
+ if (const Arg *A = Args.getLastArg(options::OPT_malign_branch_EQ)) {
+ std::string AlignBranch;
+ for (StringRef T : A->getValues()) {
+ if (T != "fused" && T != "jcc" && T != "jmp" && T != "call" &&
+ T != "ret" && T != "indirect")
+ D.Diag(diag::err_drv_invalid_malign_branch_EQ)
+ << T << "fused, jcc, jmp, call, ret, indirect";
+ if (!AlignBranch.empty())
+ AlignBranch += '+';
+ AlignBranch += T;
+ }
+ addArg("-x86-align-branch=" + Twine(AlignBranch));
+ }
+ if (const Arg *A = Args.getLastArg(options::OPT_mpad_max_prefix_size_EQ)) {
+ StringRef Value = A->getValue();
+ unsigned PrefixSize;
+ if (Value.getAsInteger(10, PrefixSize)) {
+ D.Diag(diag::err_drv_invalid_argument_to_option)
+ << Value << A->getOption().getName();
+ } else {
+ addArg("-x86-pad-max-prefix-size=" + Twine(PrefixSize));
+ }
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h
index 84b9d2cf59b4..29dedec9b09c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h
@@ -45,12 +45,6 @@ void AddRunTimeLibs(const ToolChain &TC, const Driver &D,
llvm::opt::ArgStringList &CmdArgs,
const llvm::opt::ArgList &Args);
-void AddHIPLinkerScript(const ToolChain &TC, Compilation &C,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs, const JobAction &JA,
- const Tool &T);
-
const char *SplitDebugName(const llvm::opt::ArgList &Args,
const InputInfo &Input, const InputInfo &Output);
@@ -58,7 +52,7 @@ void SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T,
const JobAction &JA, const llvm::opt::ArgList &Args,
const InputInfo &Output, const char *OutFile);
-void AddGoldPlugin(const ToolChain &ToolChain, const llvm::opt::ArgList &Args,
+void addLTOOptions(const ToolChain &ToolChain, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs, const InputInfo &Output,
const InputInfo &Input, bool IsThinLTO);
@@ -88,12 +82,18 @@ llvm::opt::Arg *getLastProfileSampleUseArg(const llvm::opt::ArgList &Args);
bool isObjCAutoRefCount(const llvm::opt::ArgList &Args);
-unsigned getLTOParallelism(const llvm::opt::ArgList &Args, const Driver &D);
+llvm::StringRef getLTOParallelism(const llvm::opt::ArgList &Args,
+ const Driver &D);
bool areOptimizationsEnabled(const llvm::opt::ArgList &Args);
bool isUseSeparateSections(const llvm::Triple &Triple);
+/// \p EnvVar is split by system delimiter for environment variables.
+/// If \p ArgName is "-I", "-L", or an empty string, each entry from \p EnvVar
+/// is prefixed by \p ArgName then added to \p Args. Otherwise, for each
+/// entry of \p EnvVar, \p ArgName is added to \p Args first, then the entry
+/// itself is added.
void addDirectoryList(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs, const char *ArgName,
const char *EnvVar);
@@ -106,10 +106,20 @@ void AddTargetFeature(const llvm::opt::ArgList &Args,
std::string getCPUName(const llvm::opt::ArgList &Args, const llvm::Triple &T,
bool FromAs = false);
+/// Iterate \p Args and convert -mxxx to +xxx and -mno-xxx to -xxx and
+/// append it to \p Features.
+///
+/// Note: Since \p Features may contain default values before calling
+/// this function, or may be appended with entries to override arguments,
+/// entries in \p Features are not unique.
void handleTargetFeaturesGroup(const llvm::opt::ArgList &Args,
std::vector<StringRef> &Features,
llvm::opt::OptSpecifier Group);
+/// If there are multiple +xxx or -xxx features, keep the last one.
+std::vector<StringRef>
+unifyTargetFeatures(const std::vector<StringRef> &Features);
+
/// Handles the -save-stats option and returns the filename to save statistics
/// to.
SmallString<128> getStatsFileName(const llvm::opt::ArgList &Args,
@@ -121,6 +131,8 @@ SmallString<128> getStatsFileName(const llvm::opt::ArgList &Args,
void addMultilibFlag(bool Enabled, const char *const Flag,
Multilib::flags_list &Flags);
+void addX86AlignBranchArgs(const Driver &D, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs, bool IsLTO);
} // end namespace tools
} // end namespace driver
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.cpp
index dbf6114eb2ec..127a8a5f24cc 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.cpp
@@ -57,7 +57,8 @@ void tools::CrossWindows::Assembler::ConstructJob(
const std::string Assembler = TC.GetProgramPath("as");
Exec = Args.MakeArgString(Assembler);
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
void tools::CrossWindows::Linker::ConstructJob(
@@ -202,7 +203,8 @@ void tools::CrossWindows::Linker::ConstructJob(
Exec = Args.MakeArgString(TC.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
}
CrossWindowsToolChain::CrossWindowsToolChain(const Driver &D,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.h
index 7267a35d48b9..df9a7f71bf9f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.h
@@ -33,8 +33,7 @@ public:
class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC)
- : Tool("CrossWindows::Linker", "ld", TC, RF_Full) {}
+ Linker(const ToolChain &TC) : Tool("CrossWindows::Linker", "ld", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
index 50d22a2a8ea7..110a0bca9bc1 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
@@ -18,9 +18,11 @@
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Host.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
+#include "llvm/Support/TargetParser.h"
#include "llvm/Support/VirtualFileSystem.h"
#include <system_error>
@@ -43,17 +45,22 @@ void CudaInstallationDetector::ParseCudaVersionFile(llvm::StringRef V) {
return;
DetectedVersion = join_items(".", VersionParts[0], VersionParts[1]);
Version = CudaStringToVersion(DetectedVersion);
- if (Version != CudaVersion::UNKNOWN)
+ if (Version != CudaVersion::UNKNOWN) {
+ // TODO(tra): remove the warning once we have all features of 10.2 and 11.0
+ // implemented.
+ DetectedVersionIsNotSupported = Version > CudaVersion::LATEST_SUPPORTED;
return;
+ }
- Version = CudaVersion::LATEST;
+ Version = CudaVersion::LATEST_SUPPORTED;
DetectedVersionIsNotSupported = true;
}
void CudaInstallationDetector::WarnIfUnsupportedVersion() {
if (DetectedVersionIsNotSupported)
D.Diag(diag::warn_drv_unknown_cuda_version)
- << DetectedVersion << CudaVersionToString(Version);
+ << DetectedVersion
+ << CudaVersionToString(CudaVersion::LATEST_SUPPORTED);
}
CudaInstallationDetector::CudaInstallationDetector(
@@ -71,6 +78,7 @@ CudaInstallationDetector::CudaInstallationDetector(
// In decreasing order so we prefer newer versions to older versions.
std::initializer_list<const char *> Versions = {"8.0", "7.5", "7.0"};
+ auto &FS = D.getVFS();
if (Args.hasArg(clang::driver::options::OPT_cuda_path_EQ)) {
Candidates.emplace_back(
@@ -97,8 +105,9 @@ CudaInstallationDetector::CudaInstallationDetector(
StringRef ptxasDir = llvm::sys::path::parent_path(ptxasAbsolutePath);
if (llvm::sys::path::filename(ptxasDir) == "bin")
- Candidates.emplace_back(llvm::sys::path::parent_path(ptxasDir),
- /*StrictChecking=*/true);
+ Candidates.emplace_back(
+ std::string(llvm::sys::path::parent_path(ptxasDir)),
+ /*StrictChecking=*/true);
}
}
@@ -106,7 +115,7 @@ CudaInstallationDetector::CudaInstallationDetector(
for (const char *Ver : Versions)
Candidates.emplace_back(D.SysRoot + "/usr/local/cuda-" + Ver);
- Distro Dist(D.getVFS(), llvm::Triple(llvm::sys::getProcessTriple()));
+ Distro Dist(FS, llvm::Triple(llvm::sys::getProcessTriple()));
if (Dist.IsDebian() || Dist.IsUbuntu())
// Special case for Debian to have nvidia-cuda-toolkit work
// out of the box. More info on http://bugs.debian.org/882505
@@ -117,14 +126,13 @@ CudaInstallationDetector::CudaInstallationDetector(
for (const auto &Candidate : Candidates) {
InstallPath = Candidate.Path;
- if (InstallPath.empty() || !D.getVFS().exists(InstallPath))
+ if (InstallPath.empty() || !FS.exists(InstallPath))
continue;
BinPath = InstallPath + "/bin";
IncludePath = InstallPath + "/include";
LibDevicePath = InstallPath + "/nvvm/libdevice";
- auto &FS = D.getVFS();
if (!(FS.exists(IncludePath) && FS.exists(BinPath)))
continue;
bool CheckLibDevice = (!NoCudaLib || Candidate.StrictChecking);
@@ -158,18 +166,19 @@ CudaInstallationDetector::CudaInstallationDetector(
// CUDA-9+ uses single libdevice file for all GPU variants.
std::string FilePath = LibDevicePath + "/libdevice.10.bc";
if (FS.exists(FilePath)) {
- for (const char *GpuArchName :
- {"sm_30", "sm_32", "sm_35", "sm_37", "sm_50", "sm_52", "sm_53",
- "sm_60", "sm_61", "sm_62", "sm_70", "sm_72", "sm_75"}) {
- const CudaArch GpuArch = StringToCudaArch(GpuArchName);
- if (Version >= MinVersionForCudaArch(GpuArch) &&
- Version <= MaxVersionForCudaArch(GpuArch))
- LibDeviceMap[GpuArchName] = FilePath;
+ for (int Arch = (int)CudaArch::SM_30, E = (int)CudaArch::LAST; Arch < E;
+ ++Arch) {
+ CudaArch GpuArch = static_cast<CudaArch>(Arch);
+ if (!IsNVIDIAGpuArch(GpuArch))
+ continue;
+ std::string GpuArchName(CudaArchToString(GpuArch));
+ LibDeviceMap[GpuArchName] = FilePath;
}
}
} else {
std::error_code EC;
- for (llvm::sys::fs::directory_iterator LI(LibDevicePath, EC), LE;
+ for (llvm::vfs::directory_iterator LI = FS.dir_begin(LibDevicePath, EC),
+ LE;
!EC && LI != LE; LI = LI.increment(EC)) {
StringRef FilePath = LI->path();
StringRef FileName = llvm::sys::path::filename(FilePath);
@@ -185,27 +194,27 @@ CudaInstallationDetector::CudaInstallationDetector(
// capability. NVCC's choice of the libdevice library version is
// rather peculiar and depends on the CUDA version.
if (GpuArch == "compute_20") {
- LibDeviceMap["sm_20"] = FilePath;
- LibDeviceMap["sm_21"] = FilePath;
- LibDeviceMap["sm_32"] = FilePath;
+ LibDeviceMap["sm_20"] = std::string(FilePath);
+ LibDeviceMap["sm_21"] = std::string(FilePath);
+ LibDeviceMap["sm_32"] = std::string(FilePath);
} else if (GpuArch == "compute_30") {
- LibDeviceMap["sm_30"] = FilePath;
+ LibDeviceMap["sm_30"] = std::string(FilePath);
if (Version < CudaVersion::CUDA_80) {
- LibDeviceMap["sm_50"] = FilePath;
- LibDeviceMap["sm_52"] = FilePath;
- LibDeviceMap["sm_53"] = FilePath;
+ LibDeviceMap["sm_50"] = std::string(FilePath);
+ LibDeviceMap["sm_52"] = std::string(FilePath);
+ LibDeviceMap["sm_53"] = std::string(FilePath);
}
- LibDeviceMap["sm_60"] = FilePath;
- LibDeviceMap["sm_61"] = FilePath;
- LibDeviceMap["sm_62"] = FilePath;
+ LibDeviceMap["sm_60"] = std::string(FilePath);
+ LibDeviceMap["sm_61"] = std::string(FilePath);
+ LibDeviceMap["sm_62"] = std::string(FilePath);
} else if (GpuArch == "compute_35") {
- LibDeviceMap["sm_35"] = FilePath;
- LibDeviceMap["sm_37"] = FilePath;
+ LibDeviceMap["sm_35"] = std::string(FilePath);
+ LibDeviceMap["sm_37"] = std::string(FilePath);
} else if (GpuArch == "compute_50") {
if (Version >= CudaVersion::CUDA_80) {
- LibDeviceMap["sm_50"] = FilePath;
- LibDeviceMap["sm_52"] = FilePath;
- LibDeviceMap["sm_53"] = FilePath;
+ LibDeviceMap["sm_50"] = std::string(FilePath);
+ LibDeviceMap["sm_52"] = std::string(FilePath);
+ LibDeviceMap["sm_53"] = std::string(FilePath);
}
}
}
@@ -233,7 +242,7 @@ void CudaInstallationDetector::AddCudaIncludeArgs(
CC1Args.push_back(DriverArgs.MakeArgString(P));
}
- if (DriverArgs.hasArg(options::OPT_nocudainc))
+ if (DriverArgs.hasArg(options::OPT_nogpuinc))
return;
if (!isValid()) {
@@ -414,7 +423,11 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
Exec = A->getValue();
else
Exec = Args.MakeArgString(TC.GetProgramPath("ptxas"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this,
+ ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8,
+ "--options-file"},
+ Exec, CmdArgs, Inputs));
}
static bool shouldIncludePTX(const ArgList &Args, const char *gpu_arch) {
@@ -468,10 +481,9 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
continue;
// We need to pass an Arch of the form "sm_XX" for cubin files and
// "compute_XX" for ptx.
- const char *Arch =
- (II.getType() == types::TY_PP_Asm)
- ? CudaVirtualArchToString(VirtualArchForCudaArch(gpu_arch))
- : gpu_arch_str;
+ const char *Arch = (II.getType() == types::TY_PP_Asm)
+ ? CudaArchToVirtualArchString(gpu_arch)
+ : gpu_arch_str;
CmdArgs.push_back(Args.MakeArgString(llvm::Twine("--image=profile=") +
Arch + ",file=" + II.getFilename()));
}
@@ -480,7 +492,11 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(A));
const char *Exec = Args.MakeArgString(TC.GetProgramPath("fatbinary"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this,
+ ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8,
+ "--options-file"},
+ Exec, CmdArgs, Inputs));
}
void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -557,7 +573,11 @@ void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("nvlink"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this,
+ ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8,
+ "--options-file"},
+ Exec, CmdArgs, Inputs));
}
/// CUDA toolchain. Our assembler is ptxas, and our "linker" is fatbinary,
@@ -571,7 +591,7 @@ CudaToolChain::CudaToolChain(const Driver &D, const llvm::Triple &Triple,
CudaInstallation(D, HostTC.getTriple(), Args), OK(OK) {
if (CudaInstallation.isValid()) {
CudaInstallation.WarnIfUnsupportedVersion();
- getProgramPaths().push_back(CudaInstallation.getBinPath());
+ getProgramPaths().push_back(std::string(CudaInstallation.getBinPath()));
}
// Lookup binaries into the driver directory, this is used to
// discover the clang-offload-bundler executable.
@@ -589,7 +609,7 @@ std::string CudaToolChain::getInputFilename(const InputInfo &Input) const {
// these particular file names.
SmallString<256> Filename(ToolChain::getInputFilename(Input));
llvm::sys::path::replace_extension(Filename, "cubin");
- return Filename.str();
+ return std::string(Filename.str());
}
void CudaToolChain::addClangTargetOptions(
@@ -607,10 +627,6 @@ void CudaToolChain::addClangTargetOptions(
if (DeviceOffloadingKind == Action::OFK_Cuda) {
CC1Args.push_back("-fcuda-is-device");
- if (DriverArgs.hasFlag(options::OPT_fcuda_flush_denormals_to_zero,
- options::OPT_fno_cuda_flush_denormals_to_zero, false))
- CC1Args.push_back("-fcuda-flush-denormals-to-zero");
-
if (DriverArgs.hasFlag(options::OPT_fcuda_approx_transcendentals,
options::OPT_fno_cuda_approx_transcendentals, false))
CC1Args.push_back("-fcuda-approx-transcendentals");
@@ -641,24 +657,30 @@ void CudaToolChain::addClangTargetOptions(
// by new PTX version, so we need to raise PTX level to enable them in NVPTX
// back-end.
const char *PtxFeature = nullptr;
- switch(CudaInstallation.version()) {
- case CudaVersion::CUDA_101:
- PtxFeature = "+ptx64";
- break;
- case CudaVersion::CUDA_100:
- PtxFeature = "+ptx63";
- break;
- case CudaVersion::CUDA_92:
- PtxFeature = "+ptx61";
- break;
- case CudaVersion::CUDA_91:
- PtxFeature = "+ptx61";
- break;
- case CudaVersion::CUDA_90:
- PtxFeature = "+ptx60";
- break;
- default:
- PtxFeature = "+ptx42";
+ switch (CudaInstallation.version()) {
+ case CudaVersion::CUDA_110:
+ PtxFeature = "+ptx70";
+ break;
+ case CudaVersion::CUDA_102:
+ PtxFeature = "+ptx65";
+ break;
+ case CudaVersion::CUDA_101:
+ PtxFeature = "+ptx64";
+ break;
+ case CudaVersion::CUDA_100:
+ PtxFeature = "+ptx63";
+ break;
+ case CudaVersion::CUDA_92:
+ PtxFeature = "+ptx61";
+ break;
+ case CudaVersion::CUDA_91:
+ PtxFeature = "+ptx61";
+ break;
+ case CudaVersion::CUDA_90:
+ PtxFeature = "+ptx60";
+ break;
+ default:
+ PtxFeature = "+ptx42";
}
CC1Args.append({"-target-feature", PtxFeature});
if (DriverArgs.hasFlag(options::OPT_fcuda_short_ptr,
@@ -711,6 +733,21 @@ void CudaToolChain::addClangTargetOptions(
}
}
+llvm::DenormalMode CudaToolChain::getDefaultDenormalModeForType(
+ const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
+ const llvm::fltSemantics *FPType) const {
+ if (JA.getOffloadingDeviceKind() == Action::OFK_Cuda) {
+ if (FPType && FPType == &llvm::APFloat::IEEEsingle() &&
+ DriverArgs.hasFlag(options::OPT_fcuda_flush_denormals_to_zero,
+ options::OPT_fno_cuda_flush_denormals_to_zero,
+ false))
+ return llvm::DenormalMode::getPreserveSign();
+ }
+
+ assert(JA.getOffloadingDeviceKind() != Action::OFK_Host);
+ return llvm::DenormalMode::getIEEE();
+}
+
bool CudaToolChain::supportsDebugInfoOption(const llvm::opt::Arg *A) const {
const Option &O = A->getOption();
return (O.matches(options::OPT_gN_Group) &&
@@ -741,7 +778,7 @@ void CudaToolChain::adjustDebugInfoKind(
void CudaToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
// Check our CUDA version if we're going to include the CUDA headers.
- if (!DriverArgs.hasArg(options::OPT_nocudainc) &&
+ if (!DriverArgs.hasArg(options::OPT_nogpuinc) &&
!DriverArgs.hasArg(options::OPT_no_cuda_version_check)) {
StringRef Arch = DriverArgs.getLastArgValue(options::OPT_march_EQ);
assert(!Arch.empty() && "Must have an explicit GPU arch.");
@@ -786,36 +823,6 @@ CudaToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
}
for (Arg *A : Args) {
- if (A->getOption().matches(options::OPT_Xarch__)) {
- // Skip this argument unless the architecture matches BoundArch
- if (BoundArch.empty() || A->getValue(0) != BoundArch)
- continue;
-
- unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(1));
- unsigned Prev = Index;
- std::unique_ptr<Arg> XarchArg(Opts.ParseOneArg(Args, Index));
-
- // If the argument parsing failed or more than one argument was
- // consumed, the -Xarch_ argument's parameter tried to consume
- // extra arguments. Emit an error and ignore.
- //
- // We also want to disallow any options which would alter the
- // driver behavior; that isn't going to work in our model. We
- // use isDriverOption() as an approximation, although things
- // like -O4 are going to slip through.
- if (!XarchArg || Index > Prev + 1) {
- getDriver().Diag(diag::err_drv_invalid_Xarch_argument_with_args)
- << A->getAsString(Args);
- continue;
- } else if (XarchArg->getOption().hasFlag(options::DriverOption)) {
- getDriver().Diag(diag::err_drv_invalid_Xarch_argument_isdriver)
- << A->getAsString(Args);
- continue;
- }
- XarchArg->setBaseArg(A);
- A = XarchArg.release();
- DAL->AddSynthesizedArg(A);
- }
DAL->append(A);
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
index d1e066f93dfb..873eb7338a30 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
@@ -89,9 +89,7 @@ namespace NVPTX {
// Run ptxas, the NVPTX assembler.
class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
- Assembler(const ToolChain &TC)
- : Tool("NVPTX::Assembler", "ptxas", TC, RF_Full, llvm::sys::WEM_UTF8,
- "--options-file") {}
+ Assembler(const ToolChain &TC) : Tool("NVPTX::Assembler", "ptxas", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -105,9 +103,7 @@ class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
// assembly into a single output file.
class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC)
- : Tool("NVPTX::Linker", "fatbinary", TC, RF_Full, llvm::sys::WEM_UTF8,
- "--options-file") {}
+ Linker(const ToolChain &TC) : Tool("NVPTX::Linker", "fatbinary", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -120,8 +116,7 @@ class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
class LLVM_LIBRARY_VISIBILITY OpenMPLinker : public Tool {
public:
OpenMPLinker(const ToolChain &TC)
- : Tool("NVPTX::OpenMPLinker", "nvlink", TC, RF_Full, llvm::sys::WEM_UTF8,
- "--options-file") {}
+ : Tool("NVPTX::OpenMPLinker", "nvlink", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -155,6 +150,10 @@ public:
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const override;
+ llvm::DenormalMode getDefaultDenormalModeForType(
+ const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
+ const llvm::fltSemantics *FPType = nullptr) const override;
+
// Never try to use the integrated assembler with CUDA; always fork out to
// ptxas.
bool useIntegratedAs() const override { return false; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
index 46265c1b9f1a..7b879f8cb652 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
@@ -23,6 +23,7 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/Threading.h"
#include "llvm/Support/VirtualFileSystem.h"
#include <cstdlib> // ::getenv
@@ -147,7 +148,8 @@ void darwin::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
// asm_final spec is empty.
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
void darwin::MachOTool::anchor() {}
@@ -201,16 +203,11 @@ static bool shouldLinkerNotDedup(bool IsLinkerOnlyAction, const ArgList &Args) {
void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
ArgStringList &CmdArgs,
- const InputInfoList &Inputs) const {
+ const InputInfoList &Inputs,
+ unsigned Version[5]) const {
const Driver &D = getToolChain().getDriver();
const toolchains::MachO &MachOTC = getMachOToolChain();
- unsigned Version[5] = {0, 0, 0, 0, 0};
- if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ)) {
- if (!Driver::GetReleaseVersion(A->getValue(), Version))
- D.Diag(diag::err_drv_invalid_version_number) << A->getAsString(Args);
- }
-
// Newer linkers support -demangle. Pass it if supported and not disabled by
// the user.
if (Version[0] >= 100 && !Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
@@ -429,6 +426,75 @@ static bool isObjCRuntimeLinked(const ArgList &Args) {
return Args.hasArg(options::OPT_fobjc_link_runtime);
}
+static bool checkRemarksOptions(const Driver &D, const ArgList &Args,
+ const llvm::Triple &Triple) {
+ // When enabling remarks, we need to error if:
+ // * The remark file is specified but we're targeting multiple architectures,
+ // which means more than one remark file is being generated.
+ bool hasMultipleInvocations =
+ Args.getAllArgValues(options::OPT_arch).size() > 1;
+ bool hasExplicitOutputFile =
+ Args.getLastArg(options::OPT_foptimization_record_file_EQ);
+ if (hasMultipleInvocations && hasExplicitOutputFile) {
+ D.Diag(diag::err_drv_invalid_output_with_multiple_archs)
+ << "-foptimization-record-file";
+ return false;
+ }
+ return true;
+}
+
+static void renderRemarksOptions(const ArgList &Args, ArgStringList &CmdArgs,
+ const llvm::Triple &Triple,
+ const InputInfo &Output, const JobAction &JA) {
+ StringRef Format = "yaml";
+ if (const Arg *A = Args.getLastArg(options::OPT_fsave_optimization_record_EQ))
+ Format = A->getValue();
+
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-lto-pass-remarks-output");
+ CmdArgs.push_back("-mllvm");
+
+ const Arg *A = Args.getLastArg(options::OPT_foptimization_record_file_EQ);
+ if (A) {
+ CmdArgs.push_back(A->getValue());
+ } else {
+ assert(Output.isFilename() && "Unexpected ld output.");
+ SmallString<128> F;
+ F = Output.getFilename();
+ F += ".opt.";
+ F += Format;
+
+ CmdArgs.push_back(Args.MakeArgString(F));
+ }
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_foptimization_record_passes_EQ)) {
+ CmdArgs.push_back("-mllvm");
+ std::string Passes =
+ std::string("-lto-pass-remarks-filter=") + A->getValue();
+ CmdArgs.push_back(Args.MakeArgString(Passes));
+ }
+
+ if (!Format.empty()) {
+ CmdArgs.push_back("-mllvm");
+ Twine FormatArg = Twine("-lto-pass-remarks-format=") + Format;
+ CmdArgs.push_back(Args.MakeArgString(FormatArg));
+ }
+
+ if (getLastProfileUseArg(Args)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-lto-pass-remarks-with-hotness");
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fdiagnostics_hotness_threshold_EQ)) {
+ CmdArgs.push_back("-mllvm");
+ std::string Opt =
+ std::string("-lto-pass-remarks-hotness-threshold=") + A->getValue();
+ CmdArgs.push_back(Args.MakeArgString(Opt));
+ }
+ }
+}
+
void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -455,63 +521,26 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("touch"));
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, None));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::None(), Exec, CmdArgs, None));
return;
}
+ unsigned Version[5] = {0, 0, 0, 0, 0};
+ if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ)) {
+ if (!Driver::GetReleaseVersion(A->getValue(), Version))
+ getToolChain().getDriver().Diag(diag::err_drv_invalid_version_number)
+ << A->getAsString(Args);
+ }
+
// I'm not sure why this particular decomposition exists in gcc, but
// we follow suite for ease of comparison.
- AddLinkArgs(C, Args, CmdArgs, Inputs);
+ AddLinkArgs(C, Args, CmdArgs, Inputs, Version);
- // For LTO, pass the name of the optimization record file and other
- // opt-remarks flags.
- if (Args.hasFlag(options::OPT_fsave_optimization_record,
- options::OPT_fsave_optimization_record_EQ,
- options::OPT_fno_save_optimization_record, false)) {
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-lto-pass-remarks-output");
- CmdArgs.push_back("-mllvm");
-
- SmallString<128> F;
- F = Output.getFilename();
- F += ".opt.";
- if (const Arg *A =
- Args.getLastArg(options::OPT_fsave_optimization_record_EQ))
- F += A->getValue();
- else
- F += "yaml";
-
- CmdArgs.push_back(Args.MakeArgString(F));
-
- if (getLastProfileUseArg(Args)) {
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-lto-pass-remarks-with-hotness");
-
- if (const Arg *A =
- Args.getLastArg(options::OPT_fdiagnostics_hotness_threshold_EQ)) {
- CmdArgs.push_back("-mllvm");
- std::string Opt =
- std::string("-lto-pass-remarks-hotness-threshold=") + A->getValue();
- CmdArgs.push_back(Args.MakeArgString(Opt));
- }
- }
-
- if (const Arg *A =
- Args.getLastArg(options::OPT_foptimization_record_passes_EQ)) {
- CmdArgs.push_back("-mllvm");
- std::string Passes =
- std::string("-lto-pass-remarks-filter=") + A->getValue();
- CmdArgs.push_back(Args.MakeArgString(Passes));
- }
-
- if (const Arg *A =
- Args.getLastArg(options::OPT_fsave_optimization_record_EQ)) {
- CmdArgs.push_back("-mllvm");
- std::string Format =
- std::string("-lto-pass-remarks-format=") + A->getValue();
- CmdArgs.push_back(Args.MakeArgString(Format));
- }
- }
+ if (willEmitRemarks(Args) &&
+ checkRemarksOptions(getToolChain().getDriver(), Args,
+ getToolChain().getTriple()))
+ renderRemarksOptions(Args, CmdArgs, getToolChain().getTriple(), Output, JA);
// Propagate the -moutline flag to the linker in LTO.
if (Arg *A =
@@ -605,10 +634,12 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
getMachOToolChain().addProfileRTLibs(Args, CmdArgs);
- if (unsigned Parallelism =
- getLTOParallelism(Args, getToolChain().getDriver())) {
+ StringRef Parallelism = getLTOParallelism(Args, getToolChain().getDriver());
+ if (!Parallelism.empty()) {
CmdArgs.push_back("-mllvm");
- CmdArgs.push_back(Args.MakeArgString("-threads=" + Twine(Parallelism)));
+ unsigned NumThreads =
+ llvm::get_threadpool_strategy(Parallelism)->compute_thread_count();
+ CmdArgs.push_back(Args.MakeArgString("-threads=" + Twine(NumThreads)));
}
if (getToolChain().ShouldLinkCXXStdlib(Args))
@@ -655,9 +686,16 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ ResponseFileSupport ResponseSupport = ResponseFileSupport::AtFileUTF8();
+ if (Version[0] < 607) {
+ // For older versions of the linker, use the legacy filelist method instead.
+ ResponseSupport = {ResponseFileSupport::RF_FileList, llvm::sys::WEM_UTF8,
+ "-filelist"};
+ }
+
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- std::unique_ptr<Command> Cmd =
- std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs);
+ std::unique_ptr<Command> Cmd = std::make_unique<Command>(
+ JA, *this, ResponseSupport, Exec, CmdArgs, Inputs);
Cmd->setInputFileList(std::move(InputFileList));
C.addCommand(std::move(Cmd));
}
@@ -681,7 +719,8 @@ void darwin::Lipo::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("lipo"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
void darwin::Dsymutil::ConstructJob(Compilation &C, const JobAction &JA,
@@ -701,7 +740,8 @@ void darwin::Dsymutil::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("dsymutil"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
void darwin::VerifyDebug::ConstructJob(Compilation &C, const JobAction &JA,
@@ -724,7 +764,8 @@ void darwin::VerifyDebug::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("dwarfdump"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
MachO::MachO(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
@@ -738,7 +779,7 @@ MachO::MachO(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
/// Darwin - Darwin tool chain for i386 and x86_64.
Darwin::Darwin(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: MachO(D, Triple, Args), TargetInitialized(false),
- CudaInstallation(D, Triple, Args) {}
+ CudaInstallation(D, Triple, Args), RocmInstallation(D, Triple, Args) {}
types::ID MachO::LookupTypeForExtension(StringRef Ext) const {
types::ID Ty = ToolChain::LookupTypeForExtension(Ext);
@@ -790,6 +831,11 @@ void Darwin::AddCudaIncludeArgs(const ArgList &DriverArgs,
CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
}
+void Darwin::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+}
+
// This is just a MachO name translation routine and there's no
// way to join this into ARMTargetParser without breaking all
// other assumptions. Maybe MachO should consider standardising
@@ -913,6 +959,10 @@ DarwinClang::DarwinClang(const Driver &D, const llvm::Triple &Triple,
: Darwin(D, Triple, Args) {}
void DarwinClang::addClangWarningOptions(ArgStringList &CC1Args) const {
+ // Always error about undefined 'TARGET_OS_*' macros.
+ CC1Args.push_back("-Wundef-prefix=TARGET_OS_");
+ CC1Args.push_back("-Werror=undef-prefix");
+
// For modern targets, promote certain warnings to errors.
if (isTargetWatchOSBased() || getTriple().isArch64Bit()) {
// Always enable -Wdeprecated-objc-isa-usage and promote it
@@ -944,6 +994,8 @@ void DarwinClang::AddLinkARCArgs(const ArgList &Args,
// Avoid linking compatibility stubs on i386 mac.
if (isTargetMacOS() && getArch() == llvm::Triple::x86)
return;
+ if (isTargetAppleSiliconMac())
+ return;
ObjCRuntime runtime = getDefaultObjCRuntime(/*nonfragile*/ true);
@@ -1068,8 +1120,8 @@ StringRef Darwin::getPlatformFamily() const {
StringRef Darwin::getSDKName(StringRef isysroot) {
// Assume SDK has path: SOME_PATH/SDKs/PlatformXX.YY.sdk
- auto BeginSDK = llvm::sys::path::begin(isysroot);
- auto EndSDK = llvm::sys::path::end(isysroot);
+ auto BeginSDK = llvm::sys::path::rbegin(isysroot);
+ auto EndSDK = llvm::sys::path::rend(isysroot);
for (auto IT = BeginSDK; IT != EndSDK; ++IT) {
StringRef SDK = *IT;
if (SDK.endswith(".sdk"))
@@ -1131,7 +1183,8 @@ static void addSectalignToPage(const ArgList &Args, ArgStringList &CmdArgs,
void Darwin::addProfileRTLibs(const ArgList &Args,
ArgStringList &CmdArgs) const {
- if (!needsProfileRT(Args)) return;
+ if (!needsProfileRT(Args) && !needsGCovInstrumentation(Args))
+ return;
AddLinkRuntimeLib(Args, CmdArgs, "profile",
RuntimeLinkOptions(RLO_AlwaysLink | RLO_FirstLink));
@@ -1271,17 +1324,17 @@ static std::string getSystemOrSDKMacOSVersion(StringRef MacOSSDKVersion) {
unsigned Major, Minor, Micro;
llvm::Triple SystemTriple(llvm::sys::getProcessTriple());
if (!SystemTriple.isMacOSX())
- return MacOSSDKVersion;
+ return std::string(MacOSSDKVersion);
SystemTriple.getMacOSXVersion(Major, Minor, Micro);
VersionTuple SystemVersion(Major, Minor, Micro);
bool HadExtra;
if (!Driver::GetReleaseVersion(MacOSSDKVersion, Major, Minor, Micro,
HadExtra))
- return MacOSSDKVersion;
+ return std::string(MacOSSDKVersion);
VersionTuple SDKVersion(Major, Minor, Micro);
if (SDKVersion > SystemVersion)
return SystemVersion.getAsString();
- return MacOSSDKVersion;
+ return std::string(MacOSSDKVersion);
}
namespace {
@@ -1321,7 +1374,7 @@ struct DarwinPlatform {
void setOSVersion(StringRef S) {
assert(Kind == TargetArg && "Unexpected kind!");
- OSVersion = S;
+ OSVersion = std::string(S);
}
bool hasOSVersion() const { return HasOSVersion; }
@@ -1578,7 +1631,7 @@ inferDeploymentTargetFromSDK(DerivedArgList &Args,
size_t StartVer = SDK.find_first_of("0123456789");
size_t EndVer = SDK.find_last_of("0123456789");
if (StartVer != StringRef::npos && EndVer > StartVer)
- Version = SDK.slice(StartVer, EndVer + 1);
+ Version = std::string(SDK.slice(StartVer, EndVer + 1));
}
if (Version.empty())
return None;
@@ -1644,8 +1697,16 @@ inferDeploymentTargetFromArch(DerivedArgList &Args, const Darwin &Toolchain,
llvm::Triple::OSType OSTy = llvm::Triple::UnknownOS;
StringRef MachOArchName = Toolchain.getMachOArchName(Args);
- if (MachOArchName == "armv7" || MachOArchName == "armv7s" ||
- MachOArchName == "arm64")
+ if (MachOArchName == "arm64") {
+#if __arm64__
+ // A clang running on an Apple Silicon mac defaults
+ // to building for mac when building for arm64 rather than
+ // defaulting to iOS.
+ OSTy = llvm::Triple::MacOSX;
+#else
+ OSTy = llvm::Triple::IOS;
+#endif
+ } else if (MachOArchName == "armv7" || MachOArchName == "armv7s")
OSTy = llvm::Triple::IOS;
else if (MachOArchName == "armv7k" || MachOArchName == "arm64_32")
OSTy = llvm::Triple::WatchOS;
@@ -1794,7 +1855,7 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
if (Platform == MacOS) {
if (!Driver::GetReleaseVersion(OSTarget->getOSVersion(), Major, Minor,
Micro, HadExtra) ||
- HadExtra || Major != 10 || Minor >= 100 || Micro >= 100)
+ HadExtra || Major < 10 || Major >= 100 || Minor >= 100 || Micro >= 100)
getDriver().Diag(diag::err_drv_invalid_version_number)
<< OSTarget->getAsString(Args, Opts);
} else if (Platform == IPhoneOS) {
@@ -1871,7 +1932,10 @@ void DarwinClang::AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs
bool NoStdInc = DriverArgs.hasArg(options::OPT_nostdinc);
bool NoStdlibInc = DriverArgs.hasArg(options::OPT_nostdlibinc);
- bool NoBuiltinInc = DriverArgs.hasArg(options::OPT_nobuiltininc);
+ bool NoBuiltinInc = DriverArgs.hasFlag(
+ options::OPT_nobuiltininc, options::OPT_ibuiltininc, /*Default=*/false);
+ bool ForceBuiltinInc = DriverArgs.hasFlag(
+ options::OPT_ibuiltininc, options::OPT_nobuiltininc, /*Default=*/false);
// Add <sysroot>/usr/local/include
if (!NoStdInc && !NoStdlibInc) {
@@ -1881,7 +1945,7 @@ void DarwinClang::AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs
}
// Add the Clang builtin headers (<resource>/include)
- if (!NoStdInc && !NoBuiltinInc) {
+ if (!(NoStdInc && !ForceBuiltinInc) && !NoBuiltinInc) {
SmallString<128> P(D.ResourceDir);
llvm::sys::path::append(P, "include");
addSystemInclude(DriverArgs, CC1Args, P);
@@ -1897,7 +1961,7 @@ void DarwinClang::AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs
CIncludeDirs.split(dirs, ":");
for (llvm::StringRef dir : dirs) {
llvm::StringRef Prefix =
- llvm::sys::path::is_absolute(dir) ? llvm::StringRef(Sysroot) : "";
+ llvm::sys::path::is_absolute(dir) ? "" : llvm::StringRef(Sysroot);
addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
}
} else {
@@ -2130,32 +2194,7 @@ DerivedArgList *MachO::TranslateArgs(const DerivedArgList &Args,
continue;
Arg *OriginalArg = A;
- unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(1));
- unsigned Prev = Index;
- std::unique_ptr<Arg> XarchArg(Opts.ParseOneArg(Args, Index));
-
- // If the argument parsing failed or more than one argument was
- // consumed, the -Xarch_ argument's parameter tried to consume
- // extra arguments. Emit an error and ignore.
- //
- // We also want to disallow any options which would alter the
- // driver behavior; that isn't going to work in our model. We
- // use isDriverOption() as an approximation, although things
- // like -O4 are going to slip through.
- if (!XarchArg || Index > Prev + 1) {
- getDriver().Diag(diag::err_drv_invalid_Xarch_argument_with_args)
- << A->getAsString(Args);
- continue;
- } else if (XarchArg->getOption().hasFlag(options::DriverOption)) {
- getDriver().Diag(diag::err_drv_invalid_Xarch_argument_isdriver)
- << A->getAsString(Args);
- continue;
- }
-
- XarchArg->setBaseArg(A);
-
- A = XarchArg.release();
- DAL->AddSynthesizedArg(A);
+ TranslateXarchArgs(Args, A, DAL);
// Linker input arguments require custom handling. The problem is that we
// have already constructed the phase actions, so we can not treat them as
@@ -2370,6 +2409,10 @@ void Darwin::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
OS << "-target-sdk-version=" << SDKInfo->getVersion();
CC1Args.push_back(DriverArgs.MakeArgString(OS.str()));
}
+
+ // Enable compatibility mode for NSItemProviderCompletionHandler in
+ // Foundation/NSItemProvider.h.
+ CC1Args.push_back("-fcompatibility-qualified-id-block-type-checking");
}
DerivedArgList *
@@ -2512,6 +2555,9 @@ void Darwin::addMinVersionArgs(const ArgList &Args,
CmdArgs.push_back("-macosx_version_min");
}
+ VersionTuple MinTgtVers = getEffectiveTriple().getMinimumSupportedOSVersion();
+ if (!MinTgtVers.empty() && MinTgtVers > TargetVersion)
+ TargetVersion = MinTgtVers;
CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
}
@@ -2544,6 +2590,9 @@ void Darwin::addPlatformVersionArgs(const llvm::opt::ArgList &Args,
PlatformName += "-simulator";
CmdArgs.push_back(Args.MakeArgString(PlatformName));
VersionTuple TargetVersion = getTargetVersion().withoutBuild();
+ VersionTuple MinTgtVers = getEffectiveTriple().getMinimumSupportedOSVersion();
+ if (!MinTgtVers.empty() && MinTgtVers > TargetVersion)
+ TargetVersion = MinTgtVers;
CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
if (SDKInfo) {
VersionTuple SDKVersion = SDKInfo->getVersion().withoutBuild();
@@ -2554,98 +2603,102 @@ void Darwin::addPlatformVersionArgs(const llvm::opt::ArgList &Args,
}
}
-void Darwin::addStartObjectFileArgs(const ArgList &Args,
- ArgStringList &CmdArgs) const {
- // Derived from startfile spec.
- if (Args.hasArg(options::OPT_dynamiclib)) {
- // Derived from darwin_dylib1 spec.
- if (isTargetWatchOSBased()) {
- ; // watchOS does not need dylib1.o.
- } else if (isTargetIOSSimulator()) {
- ; // iOS simulator does not need dylib1.o.
- } else if (isTargetIPhoneOS()) {
- if (isIPhoneOSVersionLT(3, 1))
- CmdArgs.push_back("-ldylib1.o");
+// Add additional link args for the -dynamiclib option.
+static void addDynamicLibLinkArgs(const Darwin &D, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ // Derived from darwin_dylib1 spec.
+ if (D.isTargetIPhoneOS()) {
+ if (D.isIPhoneOSVersionLT(3, 1))
+ CmdArgs.push_back("-ldylib1.o");
+ return;
+ }
+
+ if (!D.isTargetMacOS())
+ return;
+ if (D.isMacosxVersionLT(10, 5))
+ CmdArgs.push_back("-ldylib1.o");
+ else if (D.isMacosxVersionLT(10, 6))
+ CmdArgs.push_back("-ldylib1.10.5.o");
+}
+
+// Add additional link args for the -bundle option.
+static void addBundleLinkArgs(const Darwin &D, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ if (Args.hasArg(options::OPT_static))
+ return;
+ // Derived from darwin_bundle1 spec.
+ if ((D.isTargetIPhoneOS() && D.isIPhoneOSVersionLT(3, 1)) ||
+ (D.isTargetMacOS() && D.isMacosxVersionLT(10, 6)))
+ CmdArgs.push_back("-lbundle1.o");
+}
+
+// Add additional link args for the -pg option.
+static void addPgProfilingLinkArgs(const Darwin &D, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ if (D.isTargetMacOS() && D.isMacosxVersionLT(10, 9)) {
+ if (Args.hasArg(options::OPT_static) || Args.hasArg(options::OPT_object) ||
+ Args.hasArg(options::OPT_preload)) {
+ CmdArgs.push_back("-lgcrt0.o");
} else {
- if (isMacosxVersionLT(10, 5))
- CmdArgs.push_back("-ldylib1.o");
- else if (isMacosxVersionLT(10, 6))
- CmdArgs.push_back("-ldylib1.10.5.o");
+ CmdArgs.push_back("-lgcrt1.o");
+
+ // darwin_crt2 spec is empty.
}
+ // By default on OS X 10.8 and later, we don't link with a crt1.o
+ // file and the linker knows to use _main as the entry point. But,
+ // when compiling with -pg, we need to link with the gcrt1.o file,
+ // so pass the -no_new_main option to tell the linker to use the
+ // "start" symbol as the entry point.
+ if (!D.isMacosxVersionLT(10, 8))
+ CmdArgs.push_back("-no_new_main");
} else {
- if (Args.hasArg(options::OPT_bundle)) {
- if (!Args.hasArg(options::OPT_static)) {
- // Derived from darwin_bundle1 spec.
- if (isTargetWatchOSBased()) {
- ; // watchOS does not need bundle1.o.
- } else if (isTargetIOSSimulator()) {
- ; // iOS simulator does not need bundle1.o.
- } else if (isTargetIPhoneOS()) {
- if (isIPhoneOSVersionLT(3, 1))
- CmdArgs.push_back("-lbundle1.o");
- } else {
- if (isMacosxVersionLT(10, 6))
- CmdArgs.push_back("-lbundle1.o");
- }
- }
- } else {
- if (Args.hasArg(options::OPT_pg) && SupportsProfiling()) {
- if (isTargetMacOS() && isMacosxVersionLT(10, 9)) {
- if (Args.hasArg(options::OPT_static) ||
- Args.hasArg(options::OPT_object) ||
- Args.hasArg(options::OPT_preload)) {
- CmdArgs.push_back("-lgcrt0.o");
- } else {
- CmdArgs.push_back("-lgcrt1.o");
-
- // darwin_crt2 spec is empty.
- }
- // By default on OS X 10.8 and later, we don't link with a crt1.o
- // file and the linker knows to use _main as the entry point. But,
- // when compiling with -pg, we need to link with the gcrt1.o file,
- // so pass the -no_new_main option to tell the linker to use the
- // "start" symbol as the entry point.
- if (isTargetMacOS() && !isMacosxVersionLT(10, 8))
- CmdArgs.push_back("-no_new_main");
- } else {
- getDriver().Diag(diag::err_drv_clang_unsupported_opt_pg_darwin)
- << isTargetMacOS();
- }
- } else {
- if (Args.hasArg(options::OPT_static) ||
- Args.hasArg(options::OPT_object) ||
- Args.hasArg(options::OPT_preload)) {
- CmdArgs.push_back("-lcrt0.o");
- } else {
- // Derived from darwin_crt1 spec.
- if (isTargetWatchOSBased()) {
- ; // watchOS does not need crt1.o.
- } else if (isTargetIOSSimulator()) {
- ; // iOS simulator does not need crt1.o.
- } else if (isTargetIPhoneOS()) {
- if (getArch() == llvm::Triple::aarch64)
- ; // iOS does not need any crt1 files for arm64
- else if (isIPhoneOSVersionLT(3, 1))
- CmdArgs.push_back("-lcrt1.o");
- else if (isIPhoneOSVersionLT(6, 0))
- CmdArgs.push_back("-lcrt1.3.1.o");
- } else {
- if (isMacosxVersionLT(10, 5))
- CmdArgs.push_back("-lcrt1.o");
- else if (isMacosxVersionLT(10, 6))
- CmdArgs.push_back("-lcrt1.10.5.o");
- else if (isMacosxVersionLT(10, 8))
- CmdArgs.push_back("-lcrt1.10.6.o");
-
- // darwin_crt2 spec is empty.
- }
- }
- }
- }
+ D.getDriver().Diag(diag::err_drv_clang_unsupported_opt_pg_darwin)
+ << D.isTargetMacOS();
}
+}
+
+static void addDefaultCRTLinkArgs(const Darwin &D, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ // Derived from darwin_crt1 spec.
+ if (D.isTargetIPhoneOS()) {
+ if (D.getArch() == llvm::Triple::aarch64)
+ ; // iOS does not need any crt1 files for arm64
+ else if (D.isIPhoneOSVersionLT(3, 1))
+ CmdArgs.push_back("-lcrt1.o");
+ else if (D.isIPhoneOSVersionLT(6, 0))
+ CmdArgs.push_back("-lcrt1.3.1.o");
+ return;
+ }
+
+ if (!D.isTargetMacOS())
+ return;
+ if (D.isMacosxVersionLT(10, 5))
+ CmdArgs.push_back("-lcrt1.o");
+ else if (D.isMacosxVersionLT(10, 6))
+ CmdArgs.push_back("-lcrt1.10.5.o");
+ else if (D.isMacosxVersionLT(10, 8))
+ CmdArgs.push_back("-lcrt1.10.6.o");
+ // darwin_crt2 spec is empty.
+}
+
+void Darwin::addStartObjectFileArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Derived from startfile spec.
+ if (Args.hasArg(options::OPT_dynamiclib))
+ addDynamicLibLinkArgs(*this, Args, CmdArgs);
+ else if (Args.hasArg(options::OPT_bundle))
+ addBundleLinkArgs(*this, Args, CmdArgs);
+ else if (Args.hasArg(options::OPT_pg) && SupportsProfiling())
+ addPgProfilingLinkArgs(*this, Args, CmdArgs);
+ else if (Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_object) ||
+ Args.hasArg(options::OPT_preload))
+ CmdArgs.push_back("-lcrt0.o");
+ else
+ addDefaultCRTLinkArgs(*this, Args, CmdArgs);
- if (!isTargetIPhoneOS() && Args.hasArg(options::OPT_shared_libgcc) &&
- !isTargetWatchOS() && isMacosxVersionLT(10, 5)) {
+ if (isTargetMacOS() && Args.hasArg(options::OPT_shared_libgcc) &&
+ isMacosxVersionLT(10, 5)) {
const char *Str = Args.MakeArgString(GetFilePath("crt3.o"));
CmdArgs.push_back(Str);
}
@@ -2668,6 +2721,7 @@ SanitizerMask Darwin::getSupportedSanitizers() const {
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
Res |= SanitizerKind::Function;
+ Res |= SanitizerKind::ObjCCast;
// Prior to 10.9, macOS shipped a version of the C++ standard library without
// C++11 support. The same is true of iOS prior to version 5. These OS'es are
@@ -2688,4 +2742,5 @@ SanitizerMask Darwin::getSupportedSanitizers() const {
void Darwin::printVerboseInfo(raw_ostream &OS) const {
CudaInstallation.print(OS);
+ RocmInstallation.print(OS);
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
index 1b193a4c4eb9..64c252efea7d 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
@@ -10,6 +10,7 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_DARWIN_H
#include "Cuda.h"
+#include "ROCm.h"
#include "clang/Driver/DarwinSDKInfo.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
@@ -40,13 +41,8 @@ protected:
}
public:
- MachOTool(
- const char *Name, const char *ShortName, const ToolChain &TC,
- ResponseFileSupport ResponseSupport = RF_None,
- llvm::sys::WindowsEncodingMethod ResponseEncoding = llvm::sys::WEM_UTF8,
- const char *ResponseFlag = "@")
- : Tool(Name, ShortName, TC, ResponseSupport, ResponseEncoding,
- ResponseFlag) {}
+ MachOTool(const char *Name, const char *ShortName, const ToolChain &TC)
+ : Tool(Name, ShortName, TC) {}
};
class LLVM_LIBRARY_VISIBILITY Assembler : public MachOTool {
@@ -66,12 +62,10 @@ class LLVM_LIBRARY_VISIBILITY Linker : public MachOTool {
bool NeedsTempPath(const InputInfoList &Inputs) const;
void AddLinkArgs(Compilation &C, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
- const InputInfoList &Inputs) const;
+ const InputInfoList &Inputs, unsigned Version[5]) const;
public:
- Linker(const ToolChain &TC)
- : MachOTool("darwin::Linker", "linker", TC, RF_FileList,
- llvm::sys::WEM_UTF8, "-filelist") {}
+ Linker(const ToolChain &TC) : MachOTool("darwin::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -300,6 +294,7 @@ public:
mutable Optional<DarwinSDKInfo> SDKInfo;
CudaInstallationDetector CudaInstallation;
+ RocmInstallationDetector RocmInstallation;
private:
void AddDeploymentTarget(llvm::opt::DerivedArgList &Args) const;
@@ -357,6 +352,7 @@ protected:
const_cast<Darwin *>(this)->setTripleEnvironment(llvm::Triple::Simulator);
}
+public:
bool isTargetIPhoneOS() const {
assert(TargetInitialized && "Target not initialized!");
return (TargetPlatform == IPhoneOS || TargetPlatform == TvOS) &&
@@ -409,6 +405,17 @@ protected:
return TargetPlatform == MacOS;
}
+ bool isTargetMacOSBased() const {
+ assert(TargetInitialized && "Target not initialized!");
+ // FIXME (Alex L): Add remaining MacCatalyst suppport.
+ return TargetPlatform == MacOS;
+ }
+
+ bool isTargetAppleSiliconMac() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return isTargetMacOSBased() && getArch() == llvm::Triple::aarch64;
+ }
+
bool isTargetInitialized() const { return TargetInitialized; }
VersionTuple getTargetVersion() const {
@@ -422,11 +429,20 @@ protected:
return TargetVersion < VersionTuple(V0, V1, V2);
}
+ /// Returns true if the minimum supported macOS version for the slice that's
+ /// being built is less than the specified version. If there's no minimum
+ /// supported macOS version, the deployment target version is compared to the
+ /// specifed version instead.
bool isMacosxVersionLT(unsigned V0, unsigned V1 = 0, unsigned V2 = 0) const {
- assert(isTargetMacOS() && "Unexpected call for non OS X target!");
- return TargetVersion < VersionTuple(V0, V1, V2);
+ assert(isTargetMacOS() && getTriple().isMacOSX() &&
+ "Unexpected call for non OS X target!");
+ VersionTuple MinVers = getTriple().getMinimumSupportedOSVersion();
+ return (!MinVers.empty() && MinVers > TargetVersion
+ ? MinVers
+ : TargetVersion) < VersionTuple(V0, V1, V2);
}
+protected:
/// Return true if c++17 aligned allocation/deallocation functions are not
/// implemented in the c++ standard library of the deployment target we are
/// targeting.
@@ -461,6 +477,8 @@ public:
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
bool UseObjCMixedDispatch() const override {
// This is only used with the non-fragile ABI and non-legacy dispatch.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.cpp
index 424331fbc6fe..88dd0c899d8a 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.cpp
@@ -45,7 +45,8 @@ void dragonfly::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -169,7 +170,8 @@ void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
getToolChain().addProfileRTLibs(Args, CmdArgs);
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
/// DragonFly - DragonFly tool chain which can call as(1) and ld(1) directly.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.h
index 7e76904f1055..3ed5acefaefb 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.h
@@ -18,10 +18,10 @@ namespace driver {
namespace tools {
/// dragonfly -- Directly call GNU Binutils assembler and linker
namespace dragonfly {
-class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
Assembler(const ToolChain &TC)
- : GnuTool("dragonfly::Assembler", "assembler", TC) {}
+ : Tool("dragonfly::Assembler", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -31,9 +31,9 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("dragonfly::Linker", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("dragonfly::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp
index 9b9eb81fa111..80f6db7ea642 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp
@@ -70,10 +70,10 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
const auto& D = C.getDriver();
const char* Exec = Args.MakeArgString(D.GetProgramPath("flang", TC));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
}
-Flang::Flang(const ToolChain &TC)
- : Tool("flang", "flang frontend", TC, RF_Full) {}
+Flang::Flang(const ToolChain &TC) : Tool("flang", "flang frontend", TC) {}
Flang::~Flang() {}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
index 6fb4ddd7f501..909ac5e99212 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
@@ -128,7 +128,8 @@ void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -276,7 +277,7 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
- AddGoldPlugin(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ addLTOOptions(ToolChain, Args, CmdArgs, Output, Inputs[0],
D.getLTOMode() == LTOK_Thin);
}
@@ -358,7 +359,8 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
ToolChain.addProfileRTLibs(Args, CmdArgs);
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
/// FreeBSD - FreeBSD tool chain which can call as(1) and ld(1) directly.
@@ -423,6 +425,11 @@ void FreeBSD::AddCudaIncludeArgs(const ArgList &DriverArgs,
CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
}
+void FreeBSD::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+}
+
Tool *FreeBSD::buildAssembler() const {
return new tools::freebsd::Assembler(*this);
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.h
index 7e13f48b7167..abc0876cef26 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.h
@@ -19,10 +19,10 @@ namespace tools {
/// freebsd -- Directly call GNU Binutils assembler and linker
namespace freebsd {
-class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
Assembler(const ToolChain &TC)
- : GnuTool("freebsd::Assembler", "assembler", TC) {}
+ : Tool("freebsd::Assembler", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -32,9 +32,9 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("freebsd::Linker", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("freebsd::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -68,6 +68,8 @@ public:
llvm::opt::ArgStringList &CmdArgs) const override;
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
llvm::ExceptionHandling
GetExceptionModel(const llvm::opt::ArgList &Args) const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
index 808d0408d0d4..94e025e3055a 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
@@ -15,6 +15,7 @@
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/VirtualFileSystem.h"
@@ -47,6 +48,9 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.ClaimAllArgs(options::OPT_w);
CmdArgs.push_back("-z");
+ CmdArgs.push_back("max-page-size=4096");
+
+ CmdArgs.push_back("-z");
CmdArgs.push_back("now");
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
@@ -56,6 +60,7 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("rodynamic");
CmdArgs.push_back("-z");
CmdArgs.push_back("separate-loadable-segments");
+ CmdArgs.push_back("--pack-dyn-relocs=relr");
}
if (!D.SysRoot.empty())
@@ -111,7 +116,7 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
- AddGoldPlugin(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ addLTOOptions(ToolChain, Args, CmdArgs, Output, Inputs[0],
D.getLTOMode() == LTOK_Thin);
}
@@ -159,7 +164,8 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lc");
}
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
/// Fuchsia - Fuchsia tool chain which can call as(1) and ld(1) directly.
@@ -174,7 +180,7 @@ Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
if (!D.SysRoot.empty()) {
SmallString<128> P(D.SysRoot);
llvm::sys::path::append(P, "lib");
- getFilePaths().push_back(P.str());
+ getFilePaths().push_back(std::string(P.str()));
}
auto FilePaths = [&](const Multilib &M) -> std::vector<std::string> {
@@ -183,7 +189,7 @@ Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
if (auto CXXStdlibPath = getCXXStdlibPath()) {
SmallString<128> P(*CXXStdlibPath);
llvm::sys::path::append(P, M.gccSuffix());
- FP.push_back(P.str());
+ FP.push_back(std::string(P.str()));
}
}
return FP;
@@ -289,7 +295,7 @@ void Fuchsia::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
CIncludeDirs.split(dirs, ":");
for (StringRef dir : dirs) {
StringRef Prefix =
- llvm::sys::path::is_absolute(dir) ? StringRef(D.SysRoot) : "";
+ llvm::sys::path::is_absolute(dir) ? "" : StringRef(D.SysRoot);
addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
}
return;
@@ -340,6 +346,7 @@ SanitizerMask Fuchsia::getSupportedSanitizers() const {
Res |= SanitizerKind::PointerSubtract;
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
+ Res |= SanitizerKind::Leak;
Res |= SanitizerKind::SafeStack;
Res |= SanitizerKind::Scudo;
return Res;
@@ -360,3 +367,13 @@ SanitizerMask Fuchsia::getDefaultSanitizers() const {
}
return Res;
}
+
+void Fuchsia::addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {
+ // Add linker option -u__llvm_profile_runtime to cause runtime
+ // initialization module to be linked in.
+ if (needsProfileRT(Args))
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("-u", llvm::getInstrProfRuntimeHookVarName())));
+ ToolChain::addProfileRTLibs(Args, CmdArgs);
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h
index fee0e018f3ce..3159a54bda06 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h
@@ -69,6 +69,9 @@ public:
SanitizerMask getSupportedSanitizers() const override;
SanitizerMask getDefaultSanitizers() const override;
+ void addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
RuntimeLibType
GetRuntimeLibType(const llvm::opt::ArgList &Args) const override;
CXXStdlibType
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
index e8ef881e89ac..c8a7fce07ef1 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
@@ -35,8 +35,7 @@ using namespace clang;
using namespace llvm::opt;
using tools::addMultilibFlag;
-
-void tools::GnuTool::anchor() {}
+using tools::addPathIfExists;
static bool forwardToGCC(const Option &O) {
// Don't forward inputs from the original command line. They are added from
@@ -189,7 +188,8 @@ void tools::gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
GCCName = "gcc";
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath(GCCName));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void tools::gcc::Preprocessor::RenderExtraToolArgs(
@@ -304,6 +304,8 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
if (T.getEnvironment() == llvm::Triple::GNUX32)
return "elf32_x86_64";
return "elf_x86_64";
+ case llvm::Triple::ve:
+ return "elf64ve";
default:
return nullptr;
}
@@ -340,6 +342,44 @@ static bool getStatic(const ArgList &Args) {
!Args.hasArg(options::OPT_static_pie);
}
+void tools::gnutools::StaticLibTool::ConstructJob(
+ Compilation &C, const JobAction &JA, const InputInfo &Output,
+ const InputInfoList &Inputs, const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+ // Silence warnings when linking C code with a C++ '-stdlib' argument.
+ Args.ClaimAllArgs(options::OPT_stdlib_EQ);
+
+ // GNU ar tool command "ar <options> <output_file> <input_files>".
+ ArgStringList CmdArgs;
+ // Create and insert file members with a deterministic index.
+ CmdArgs.push_back("rcsD");
+ CmdArgs.push_back(Output.getFilename());
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
+
+ // Delete old output archive file if it already exists before generating a new
+ // archive file.
+ auto OutputFileName = Output.getFilename();
+ if (Output.isFilename() && llvm::sys::fs::exists(OutputFileName)) {
+ if (std::error_code EC = llvm::sys::fs::remove(OutputFileName)) {
+ D.Diag(diag::err_drv_unable_to_remove_file) << EC.message();
+ return;
+ }
+ }
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetStaticLibToolPath());
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+}
+
void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -358,6 +398,7 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const llvm::Triple::ArchType Arch = ToolChain.getArch();
const bool isAndroid = ToolChain.getTriple().isAndroid();
const bool IsIAMCU = ToolChain.getTriple().isOSIAMCU();
+ const bool IsVE = ToolChain.getTriple().isVE();
const bool IsPIE = getPIE(Args, ToolChain);
const bool IsStaticPIE = getStaticPIE(Args, ToolChain);
const bool IsStatic = getStatic(Args);
@@ -448,10 +489,9 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-export-dynamic");
if (!Args.hasArg(options::OPT_shared) && !IsStaticPIE) {
- const std::string Loader =
- D.DyldPrefix + ToolChain.getDynamicLinker(Args);
CmdArgs.push_back("-dynamic-linker");
- CmdArgs.push_back(Args.MakeArgString(Loader));
+ CmdArgs.push_back(Args.MakeArgString(Twine(D.DyldPrefix) +
+ ToolChain.getDynamicLinker(Args)));
}
}
@@ -477,6 +517,11 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
}
+ if (IsVE) {
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back("max-page-size=0x4000000");
+ }
+
if (IsIAMCU)
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o")));
else if (HasCRTBeginEndFiles) {
@@ -504,7 +549,7 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
// Add crtfastmath.o if available and fast math is enabled.
- ToolChain.AddFastMathRuntimeIfAvailable(Args, CmdArgs);
+ ToolChain.addFastMathRuntimeIfAvailable(Args, CmdArgs);
}
Args.AddAllArgs(CmdArgs, options::OPT_L);
@@ -514,7 +559,7 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
- AddGoldPlugin(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ addLTOOptions(ToolChain, Args, CmdArgs, Output, Inputs[0],
D.getLTOMode() == LTOK_Thin);
}
@@ -625,12 +670,11 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- // Add HIP offloading linker script args if required.
- AddHIPLinkerScript(getToolChain(), C, Output, Inputs, Args, CmdArgs, JA,
- *this);
+ Args.AddAllArgs(CmdArgs, options::OPT_T);
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void tools::gnutools::Assembler::ConstructJob(Compilation &C,
@@ -648,6 +692,7 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
llvm::Reloc::Model RelocationModel;
unsigned PICLevel;
bool IsPIE;
+ const char *DefaultAssembler = "as";
std::tie(RelocationModel, PICLevel, IsPIE) =
ParsePICArgs(getToolChain(), Args);
@@ -868,6 +913,8 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
CmdArgs.push_back(Args.MakeArgString("-march=" + CPUName));
break;
}
+ case llvm::Triple::ve:
+ DefaultAssembler = "nas";
}
for (const Arg *A : Args.filtered(options::OPT_ffile_prefix_map_EQ,
@@ -892,8 +939,10 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
for (const auto &II : Inputs)
CmdArgs.push_back(II.getFilename());
- const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(DefaultAssembler));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
// Handle the debug info splitting at object creation time if we're
// creating an object.
@@ -1529,7 +1578,7 @@ static void findRISCVBareMetalMultilibs(const Driver &D,
};
// currently only support the set of multilibs like riscv-gnu-toolchain does.
// TODO: support MULTILIB_REUSE
- SmallVector<RiscvMultilib, 8> RISCVMultilibSet = {
+ constexpr RiscvMultilib RISCVMultilibSet[] = {
{"rv32i", "ilp32"}, {"rv32im", "ilp32"}, {"rv32iac", "ilp32"},
{"rv32imac", "ilp32"}, {"rv32imafc", "ilp32f"}, {"rv64imac", "lp64"},
{"rv64imafdc", "lp64d"}};
@@ -1769,7 +1818,7 @@ Generic_GCC::GCCVersion Generic_GCC::GCCVersion::Parse(StringRef VersionText) {
StringRef MinorStr = Second.first;
if (Second.second.empty()) {
if (size_t EndNumber = MinorStr.find_first_not_of("0123456789")) {
- GoodVersion.PatchSuffix = MinorStr.substr(EndNumber);
+ GoodVersion.PatchSuffix = std::string(MinorStr.substr(EndNumber));
MinorStr = MinorStr.slice(0, EndNumber);
}
}
@@ -1795,7 +1844,7 @@ Generic_GCC::GCCVersion Generic_GCC::GCCVersion::Parse(StringRef VersionText) {
if (PatchText.slice(0, EndNumber).getAsInteger(10, GoodVersion.Patch) ||
GoodVersion.Patch < 0)
return BadVersion;
- GoodVersion.PatchSuffix = PatchText.substr(EndNumber);
+ GoodVersion.PatchSuffix = std::string(PatchText.substr(EndNumber));
}
}
@@ -1850,7 +1899,7 @@ void Generic_GCC::GCCInstallationDetector::init(
if (GCCToolchainDir.back() == '/')
GCCToolchainDir = GCCToolchainDir.drop_back(); // remove the /
- Prefixes.push_back(GCCToolchainDir);
+ Prefixes.push_back(std::string(GCCToolchainDir));
} else {
// If we have a SysRoot, try that first.
if (!D.SysRoot.empty()) {
@@ -1977,6 +2026,7 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
// Non-Solaris is much simpler - most systems just go with "/usr".
if (SysRoot.empty() && TargetTriple.getOS() == llvm::Triple::Linux) {
// Yet, still look for RHEL devtoolsets.
+ Prefixes.push_back("/opt/rh/devtoolset-9/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-8/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-7/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-6/root/usr");
@@ -2092,6 +2142,7 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
static const char *const RISCV64Triples[] = {"riscv64-unknown-linux-gnu",
"riscv64-linux-gnu",
"riscv64-unknown-elf",
+ "riscv64-redhat-linux",
"riscv64-suse-linux"};
static const char *const SPARCv8LibDirs[] = {"/lib32", "/lib"};
@@ -2462,7 +2513,7 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
StringRef VersionText = llvm::sys::path::filename(LI->path());
GCCVersion CandidateVersion = GCCVersion::Parse(VersionText);
if (CandidateVersion.Major != -1) // Filter obviously bad entries.
- if (!CandidateGCCInstallPaths.insert(LI->path()).second)
+ if (!CandidateGCCInstallPaths.insert(std::string(LI->path())).second)
continue; // Saw this path before; no need to look at it again.
if (CandidateVersion.isOlderThan(4, 1, 1))
continue;
@@ -2574,7 +2625,7 @@ bool Generic_GCC::GCCInstallationDetector::ScanGentooGccConfig(
Generic_GCC::Generic_GCC(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: ToolChain(D, Triple, Args), GCCInstallation(D),
- CudaInstallation(D, Triple, Args) {
+ CudaInstallation(D, Triple, Args), RocmInstallation(D, Triple, Args) {
getProgramPaths().push_back(getDriver().getInstalledDir());
if (getDriver().getInstalledDir() != getDriver().Dir)
getProgramPaths().push_back(getDriver().Dir);
@@ -2607,6 +2658,7 @@ void Generic_GCC::printVerboseInfo(raw_ostream &OS) const {
// Print the information about how we detected the GCC installation.
GCCInstallation.print(OS);
CudaInstallation.print(OS);
+ RocmInstallation.print(OS);
}
bool Generic_GCC::IsUnwindTablesDefault(const ArgList &Args) const {
@@ -2668,6 +2720,140 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const {
}
}
+static void addMultilibsFilePaths(const Driver &D, const MultilibSet &Multilibs,
+ const Multilib &Multilib,
+ StringRef InstallPath,
+ ToolChain::path_list &Paths) {
+ if (const auto &PathsCallback = Multilibs.filePathsCallback())
+ for (const auto &Path : PathsCallback(Multilib))
+ addPathIfExists(D, InstallPath + Path, Paths);
+}
+
+void Generic_GCC::PushPPaths(ToolChain::path_list &PPaths) {
+ // Cross-compiling binutils and GCC installations (vanilla and openSUSE at
+ // least) put various tools in a triple-prefixed directory off of the parent
+ // of the GCC installation. We use the GCC triple here to ensure that we end
+ // up with tools that support the same amount of cross compiling as the
+ // detected GCC installation. For example, if we find a GCC installation
+ // targeting x86_64, but it is a bi-arch GCC installation, it can also be
+ // used to target i386.
+ if (GCCInstallation.isValid()) {
+ PPaths.push_back(Twine(GCCInstallation.getParentLibPath() + "/../" +
+ GCCInstallation.getTriple().str() + "/bin")
+ .str());
+ }
+}
+
+void Generic_GCC::AddMultilibPaths(const Driver &D,
+ const std::string &SysRoot,
+ const std::string &OSLibDir,
+ const std::string &MultiarchTriple,
+ path_list &Paths) {
+ // Add the multilib suffixed paths where they are available.
+ if (GCCInstallation.isValid()) {
+ const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
+ const std::string &LibPath =
+ std::string(GCCInstallation.getParentLibPath());
+
+ // Add toolchain / multilib specific file paths.
+ addMultilibsFilePaths(D, Multilibs, SelectedMultilib,
+ GCCInstallation.getInstallPath(), Paths);
+
+ // Sourcery CodeBench MIPS toolchain holds some libraries under
+ // a biarch-like suffix of the GCC installation.
+ addPathIfExists(
+ D, GCCInstallation.getInstallPath() + SelectedMultilib.gccSuffix(),
+ Paths);
+
+ // GCC cross compiling toolchains will install target libraries which ship
+ // as part of the toolchain under <prefix>/<triple>/<libdir> rather than as
+ // any part of the GCC installation in
+ // <prefix>/<libdir>/gcc/<triple>/<version>. This decision is somewhat
+ // debatable, but is the reality today. We need to search this tree even
+ // when we have a sysroot somewhere else. It is the responsibility of
+ // whomever is doing the cross build targeting a sysroot using a GCC
+ // installation that is *not* within the system root to ensure two things:
+ //
+ // 1) Any DSOs that are linked in from this tree or from the install path
+ // above must be present on the system root and found via an
+ // appropriate rpath.
+ // 2) There must not be libraries installed into
+ // <prefix>/<triple>/<libdir> unless they should be preferred over
+ // those within the system root.
+ //
+ // Note that this matches the GCC behavior. See the below comment for where
+ // Clang diverges from GCC's behavior.
+ addPathIfExists(D,
+ LibPath + "/../" + GCCTriple.str() + "/lib/../" + OSLibDir +
+ SelectedMultilib.osSuffix(),
+ Paths);
+
+ // If the GCC installation we found is inside of the sysroot, we want to
+ // prefer libraries installed in the parent prefix of the GCC installation.
+ // It is important to *not* use these paths when the GCC installation is
+ // outside of the system root as that can pick up unintended libraries.
+ // This usually happens when there is an external cross compiler on the
+ // host system, and a more minimal sysroot available that is the target of
+ // the cross. Note that GCC does include some of these directories in some
+ // configurations but this seems somewhere between questionable and simply
+ // a bug.
+ if (StringRef(LibPath).startswith(SysRoot)) {
+ addPathIfExists(D, LibPath + "/" + MultiarchTriple, Paths);
+ addPathIfExists(D, LibPath + "/../" + OSLibDir, Paths);
+ }
+ }
+}
+
+void Generic_GCC::AddMultiarchPaths(const Driver &D,
+ const std::string &SysRoot,
+ const std::string &OSLibDir,
+ path_list &Paths) {
+ // Try walking via the GCC triple path in case of biarch or multiarch GCC
+ // installations with strange symlinks.
+ if (GCCInstallation.isValid()) {
+ addPathIfExists(D,
+ SysRoot + "/usr/lib/" + GCCInstallation.getTriple().str() +
+ "/../../" + OSLibDir,
+ Paths);
+
+ // Add the 'other' biarch variant path
+ Multilib BiarchSibling;
+ if (GCCInstallation.getBiarchSibling(BiarchSibling)) {
+ addPathIfExists(
+ D, GCCInstallation.getInstallPath() + BiarchSibling.gccSuffix(),
+ Paths);
+ }
+
+ // See comments above on the multilib variant for details of why this is
+ // included even from outside the sysroot.
+ const std::string &LibPath =
+ std::string(GCCInstallation.getParentLibPath());
+ const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
+ const Multilib &Multilib = GCCInstallation.getMultilib();
+ addPathIfExists(
+ D, LibPath + "/../" + GCCTriple.str() + "/lib" + Multilib.osSuffix(),
+ Paths);
+
+ // See comments above on the multilib variant for details of why this is
+ // only included from within the sysroot.
+ if (StringRef(LibPath).startswith(SysRoot))
+ addPathIfExists(D, LibPath, Paths);
+ }
+}
+
+void Generic_GCC::AddMultilibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ // Add include directories specific to the selected multilib set and multilib.
+ if (GCCInstallation.isValid()) {
+ const auto &Callback = Multilibs.includeDirsCallback();
+ if (Callback) {
+ for (const auto &Path : Callback(GCCInstallation.getMultilib()))
+ addExternCSystemIncludeIfExists(
+ DriverArgs, CC1Args, GCCInstallation.getInstallPath() + Path);
+ }
+ }
+}
+
void Generic_GCC::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
@@ -2698,7 +2884,7 @@ static std::string DetectLibcxxIncludePath(llvm::vfs::FileSystem &vfs,
!VersionText.slice(1, StringRef::npos).getAsInteger(10, Version)) {
if (Version > MaxVersion) {
MaxVersion = Version;
- MaxVersionString = VersionText;
+ MaxVersionString = std::string(VersionText);
}
}
}
@@ -2708,7 +2894,6 @@ static std::string DetectLibcxxIncludePath(llvm::vfs::FileSystem &vfs,
void
Generic_GCC::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
- const std::string& SysRoot = getDriver().SysRoot;
auto AddIncludePath = [&](std::string Path) {
std::string IncludePath = DetectLibcxxIncludePath(getVFS(), Path);
if (IncludePath.empty() || !getVFS().exists(IncludePath))
@@ -2724,6 +2909,7 @@ Generic_GCC::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
// If this is a development, non-installed, clang, libcxx will
// not be found at ../include/c++ but it likely to be found at
// one of the following two locations:
+ std::string SysRoot = computeSysRoot();
if (AddIncludePath(SysRoot + "/usr/local/include/c++"))
return;
if (AddIncludePath(SysRoot + "/usr/include/c++"))
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.h
index fa50b56bf954..52690ab4b83c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.h
@@ -10,6 +10,7 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_GNU_H
#include "Cuda.h"
+#include "ROCm.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include <set>
@@ -35,23 +36,26 @@ bool findMIPSMultilibs(const Driver &D, const llvm::Triple &TargetTriple,
namespace tools {
-/// Base class for all GNU tools that provide the same behavior when
-/// it comes to response files support
-class LLVM_LIBRARY_VISIBILITY GnuTool : public Tool {
- virtual void anchor();
-
+/// Directly call GNU Binutils' assembler and linker.
+namespace gnutools {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
- GnuTool(const char *Name, const char *ShortName, const ToolChain &TC)
- : Tool(Name, ShortName, TC, RF_Full, llvm::sys::WEM_CurrentCodePage) {}
+ Assembler(const ToolChain &TC) : Tool("GNU::Assembler", "assembler", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
};
-/// Directly call GNU Binutils' assembler and linker.
-namespace gnutools {
-class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Assembler(const ToolChain &TC) : GnuTool("GNU::Assembler", "assembler", TC) {}
+ Linker(const ToolChain &TC) : Tool("GNU::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
@@ -59,9 +63,10 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY StaticLibTool : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("GNU::Linker", "linker", TC) {}
+ StaticLibTool(const ToolChain &TC)
+ : Tool("GNU::StaticLibTool", "static-lib-linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -75,10 +80,10 @@ public:
/// gcc - Generic GCC tool implementations.
namespace gcc {
-class LLVM_LIBRARY_VISIBILITY Common : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Common : public Tool {
public:
Common(const char *Name, const char *ShortName, const ToolChain &TC)
- : GnuTool(Name, ShortName, TC) {}
+ : Tool(Name, ShortName, TC) {}
// A gcc tool has an "integrated" assembler that it will call to produce an
// object. Let it use that assembler so that we don't have to deal with
@@ -278,6 +283,7 @@ public:
protected:
GCCInstallationDetector GCCInstallation;
CudaInstallationDetector CudaInstallation;
+ RocmInstallationDetector RocmInstallation;
public:
Generic_GCC(const Driver &D, const llvm::Triple &Triple,
@@ -314,6 +320,16 @@ protected:
/// Check whether the target triple's architecture is 32-bits.
bool isTarget32Bit() const { return getTriple().isArch32Bit(); }
+ void PushPPaths(ToolChain::path_list &PPaths);
+ void AddMultilibPaths(const Driver &D, const std::string &SysRoot,
+ const std::string &OSLibDir,
+ const std::string &MultiarchTriple,
+ path_list &Paths);
+ void AddMultiarchPaths(const Driver &D, const std::string &SysRoot,
+ const std::string &OSLibDir, path_list &Paths);
+ void AddMultilibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const;
+
// FIXME: This should be final, but the CrossWindows toolchain does weird
// things that can't be easily generalized.
void AddClangCXXStdlibIncludeArgs(
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.cpp
index da7004cf283f..7d17f809690e 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "HIP.h"
+#include "AMDGPU.h"
#include "CommonArgs.h"
#include "InputInfo.h"
#include "clang/Basic/Cuda.h"
@@ -16,6 +17,7 @@
#include "clang/Driver/Options.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/TargetParser.h"
using namespace clang::driver;
using namespace clang::driver::toolchains;
@@ -47,153 +49,51 @@ static void addBCLib(const Driver &D, const ArgList &Args,
}
D.Diag(diag::err_drv_no_such_file) << BCName;
}
-
-static const char *getOutputFileName(Compilation &C, StringRef Base,
- const char *Postfix,
- const char *Extension) {
- const char *OutputFileName;
- if (C.getDriver().isSaveTempsEnabled()) {
- OutputFileName =
- C.getArgs().MakeArgString(Base.str() + Postfix + "." + Extension);
- } else {
- std::string TmpName =
- C.getDriver().GetTemporaryPath(Base.str() + Postfix, Extension);
- OutputFileName = C.addTempFile(C.getArgs().MakeArgString(TmpName));
- }
- return OutputFileName;
-}
-
-static void addOptLevelArgs(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs,
- bool IsLlc = false) {
- if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
- StringRef OOpt = "3";
- if (A->getOption().matches(options::OPT_O4) ||
- A->getOption().matches(options::OPT_Ofast))
- OOpt = "3";
- else if (A->getOption().matches(options::OPT_O0))
- OOpt = "0";
- else if (A->getOption().matches(options::OPT_O)) {
- // Clang and opt support -Os/-Oz; llc only supports -O0, -O1, -O2 and -O3
- // so we map -Os/-Oz to -O2.
- // Only clang supports -Og, and maps it to -O1.
- // We map anything else to -O2.
- OOpt = llvm::StringSwitch<const char *>(A->getValue())
- .Case("1", "1")
- .Case("2", "2")
- .Case("3", "3")
- .Case("s", IsLlc ? "2" : "s")
- .Case("z", IsLlc ? "2" : "z")
- .Case("g", "1")
- .Default("2");
- }
- CmdArgs.push_back(Args.MakeArgString("-O" + OOpt));
- }
-}
} // namespace
-const char *AMDGCN::Linker::constructLLVMLinkCommand(
- Compilation &C, const JobAction &JA, const InputInfoList &Inputs,
- const ArgList &Args, StringRef SubArchName,
- StringRef OutputFilePrefix) const {
- ArgStringList CmdArgs;
- // Add the input bc's created by compile step.
- for (const auto &II : Inputs)
- CmdArgs.push_back(II.getFilename());
-
- // Add an intermediate output file.
- CmdArgs.push_back("-o");
- auto OutputFileName = getOutputFileName(C, OutputFilePrefix, "-linked", "bc");
- CmdArgs.push_back(OutputFileName);
- const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath("llvm-link"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
- return OutputFileName;
-}
-
-const char *AMDGCN::Linker::constructOptCommand(
- Compilation &C, const JobAction &JA, const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args, llvm::StringRef SubArchName,
- llvm::StringRef OutputFilePrefix, const char *InputFileName) const {
- // Construct opt command.
- ArgStringList OptArgs;
- // The input to opt is the output from llvm-link.
- OptArgs.push_back(InputFileName);
- // Pass optimization arg to opt.
- addOptLevelArgs(Args, OptArgs);
- OptArgs.push_back("-mtriple=amdgcn-amd-amdhsa");
- OptArgs.push_back(Args.MakeArgString("-mcpu=" + SubArchName));
-
- for (const Arg *A : Args.filtered(options::OPT_mllvm)) {
- OptArgs.push_back(A->getValue(0));
- }
-
- OptArgs.push_back("-o");
- auto OutputFileName =
- getOutputFileName(C, OutputFilePrefix, "-optimized", "bc");
- OptArgs.push_back(OutputFileName);
- const char *OptExec =
- Args.MakeArgString(getToolChain().GetProgramPath("opt"));
- C.addCommand(std::make_unique<Command>(JA, *this, OptExec, OptArgs, Inputs));
- return OutputFileName;
-}
+void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
+ const InputInfoList &Inputs,
+ const InputInfo &Output,
+ const llvm::opt::ArgList &Args) const {
+ // Construct lld command.
+ // The output from ld.lld is an HSA code object file.
+ ArgStringList LldArgs{"-flavor", "gnu", "--no-undefined", "-shared",
+ "-plugin-opt=-amdgpu-internalize-symbols"};
-const char *AMDGCN::Linker::constructLlcCommand(
- Compilation &C, const JobAction &JA, const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args, llvm::StringRef SubArchName,
- llvm::StringRef OutputFilePrefix, const char *InputFileName,
- bool OutputIsAsm) const {
- // Construct llc command.
- ArgStringList LlcArgs;
- // The input to llc is the output from opt.
- LlcArgs.push_back(InputFileName);
- // Pass optimization arg to llc.
- addOptLevelArgs(Args, LlcArgs, /*IsLlc=*/true);
- LlcArgs.push_back("-mtriple=amdgcn-amd-amdhsa");
- LlcArgs.push_back(Args.MakeArgString("-mcpu=" + SubArchName));
- LlcArgs.push_back(
- Args.MakeArgString(Twine("-filetype=") + (OutputIsAsm ? "asm" : "obj")));
+ auto &TC = getToolChain();
+ auto &D = TC.getDriver();
+ assert(!Inputs.empty() && "Must have at least one input.");
+ addLTOOptions(TC, Args, LldArgs, Output, Inputs[0],
+ D.getLTOMode() == LTOK_Thin);
// Extract all the -m options
std::vector<llvm::StringRef> Features;
- handleTargetFeaturesGroup(
- Args, Features, options::OPT_m_amdgpu_Features_Group);
+ amdgpu::getAMDGPUTargetFeatures(D, Args, Features);
- // Add features to mattr such as xnack
- std::string MAttrString = "-mattr=";
- for(auto OneFeature : Features) {
+ // Add features to mattr such as cumode
+ std::string MAttrString = "-plugin-opt=-mattr=";
+ for (auto OneFeature : unifyTargetFeatures(Features)) {
MAttrString.append(Args.MakeArgString(OneFeature));
if (OneFeature != Features.back())
MAttrString.append(",");
}
- if(!Features.empty())
- LlcArgs.push_back(Args.MakeArgString(MAttrString));
+ if (!Features.empty())
+ LldArgs.push_back(Args.MakeArgString(MAttrString));
for (const Arg *A : Args.filtered(options::OPT_mllvm)) {
- LlcArgs.push_back(A->getValue(0));
+ LldArgs.push_back(
+ Args.MakeArgString(Twine("-plugin-opt=") + A->getValue(0)));
}
- // Add output filename
- LlcArgs.push_back("-o");
- auto LlcOutputFile =
- getOutputFileName(C, OutputFilePrefix, "", OutputIsAsm ? "s" : "o");
- LlcArgs.push_back(LlcOutputFile);
- const char *Llc = Args.MakeArgString(getToolChain().GetProgramPath("llc"));
- C.addCommand(std::make_unique<Command>(JA, *this, Llc, LlcArgs, Inputs));
- return LlcOutputFile;
-}
+ if (C.getDriver().isSaveTempsEnabled())
+ LldArgs.push_back("-save-temps");
-void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
- const InputInfoList &Inputs,
- const InputInfo &Output,
- const llvm::opt::ArgList &Args,
- const char *InputFileName) const {
- // Construct lld command.
- // The output from ld.lld is an HSA code object file.
- ArgStringList LldArgs{
- "-flavor", "gnu", "-shared", "-o", Output.getFilename(), InputFileName};
+ LldArgs.append({"-o", Output.getFilename()});
+ for (auto Input : Inputs)
+ LldArgs.push_back(Input.getFilename());
const char *Lld = Args.MakeArgString(getToolChain().GetProgramPath("lld"));
- C.addCommand(std::make_unique<Command>(JA, *this, Lld, LldArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Lld, LldArgs, Inputs));
}
// Construct a clang-offload-bundler command to bundle code objects for
@@ -220,13 +120,84 @@ void AMDGCN::constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
BundlerArgs.push_back(Args.MakeArgString(BundlerTargetArg));
BundlerArgs.push_back(Args.MakeArgString(BundlerInputArg));
- auto BundlerOutputArg =
- Args.MakeArgString(std::string("-outputs=").append(OutputFileName));
+ auto BundlerOutputArg = Args.MakeArgString(
+ std::string("-outputs=").append(std::string(OutputFileName)));
BundlerArgs.push_back(BundlerOutputArg);
const char *Bundler = Args.MakeArgString(
T.getToolChain().GetProgramPath("clang-offload-bundler"));
- C.addCommand(std::make_unique<Command>(JA, T, Bundler, BundlerArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, T, ResponseFileSupport::None(),
+ Bundler, BundlerArgs, Inputs));
+}
+
+/// Add Generated HIP Object File which has device images embedded into the
+/// host to the argument list for linking. Using MC directives, embed the
+/// device code and also define symbols required by the code generation so that
+/// the image can be retrieved at runtime.
+void AMDGCN::Linker::constructGenerateObjFileFromHIPFatBinary(
+ Compilation &C, const InputInfo &Output,
+ const InputInfoList &Inputs, const ArgList &Args,
+ const JobAction &JA) const {
+ const ToolChain &TC = getToolChain();
+ std::string Name =
+ std::string(llvm::sys::path::stem(Output.getFilename()));
+
+ // Create Temp Object File Generator,
+ // Offload Bundled file and Bundled Object file.
+ // Keep them if save-temps is enabled.
+ const char *McinFile;
+ const char *BundleFile;
+ if (C.getDriver().isSaveTempsEnabled()) {
+ McinFile = C.getArgs().MakeArgString(Name + ".mcin");
+ BundleFile = C.getArgs().MakeArgString(Name + ".hipfb");
+ } else {
+ auto TmpNameMcin = C.getDriver().GetTemporaryPath(Name, "mcin");
+ McinFile = C.addTempFile(C.getArgs().MakeArgString(TmpNameMcin));
+ auto TmpNameFb = C.getDriver().GetTemporaryPath(Name, "hipfb");
+ BundleFile = C.addTempFile(C.getArgs().MakeArgString(TmpNameFb));
+ }
+ constructHIPFatbinCommand(C, JA, BundleFile, Inputs, Args, *this);
+
+ // Create a buffer to write the contents of the temp obj generator.
+ std::string ObjBuffer;
+ llvm::raw_string_ostream ObjStream(ObjBuffer);
+
+ // Add MC directives to embed target binaries. We ensure that each
+ // section and image is 16-byte aligned. This is not mandatory, but
+ // increases the likelihood of data to be aligned with a cache block
+ // in several main host machines.
+ ObjStream << "# HIP Object Generator\n";
+ ObjStream << "# *** Automatically generated by Clang ***\n";
+ ObjStream << " .type __hip_fatbin,@object\n";
+ ObjStream << " .section .hip_fatbin,\"aMS\",@progbits,1\n";
+ ObjStream << " .data\n";
+ ObjStream << " .globl __hip_fatbin\n";
+ ObjStream << " .p2align 3\n";
+ ObjStream << "__hip_fatbin:\n";
+ ObjStream << " .incbin \"" << BundleFile << "\"\n";
+ ObjStream.flush();
+
+ // Dump the contents of the temp object file gen if the user requested that.
+ // We support this option to enable testing of behavior with -###.
+ if (C.getArgs().hasArg(options::OPT_fhip_dump_offload_linker_script))
+ llvm::errs() << ObjBuffer;
+
+ // Open script file and write the contents.
+ std::error_code EC;
+ llvm::raw_fd_ostream Objf(McinFile, EC, llvm::sys::fs::OF_None);
+
+ if (EC) {
+ C.getDriver().Diag(clang::diag::err_unable_to_make_temp) << EC.message();
+ return;
+ }
+
+ Objf << ObjBuffer;
+
+ ArgStringList McArgs{"-o", Output.getFilename(),
+ McinFile, "--filetype=obj"};
+ const char *Mc = Args.MakeArgString(TC.GetProgramPath("llvm-mc"));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Mc, McArgs, Inputs));
}
// For amdgcn the inputs of the linker job are device bitcode and output is
@@ -236,37 +207,20 @@ void AMDGCN::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ if (Inputs.size() > 0 &&
+ Inputs[0].getType() == types::TY_Image &&
+ JA.getType() == types::TY_Object)
+ return constructGenerateObjFileFromHIPFatBinary(C, Output, Inputs, Args, JA);
if (JA.getType() == types::TY_HIP_FATBIN)
return constructHIPFatbinCommand(C, JA, Output.getFilename(), Inputs, Args, *this);
- assert(getToolChain().getTriple().getArch() == llvm::Triple::amdgcn &&
- "Unsupported target");
-
- std::string SubArchName = JA.getOffloadingArch();
- assert(StringRef(SubArchName).startswith("gfx") && "Unsupported sub arch");
-
- // Prefix for temporary file name.
- std::string Prefix = llvm::sys::path::stem(Inputs[0].getFilename()).str();
- if (!C.getDriver().isSaveTempsEnabled())
- Prefix += "-" + SubArchName;
-
- // Each command outputs different files.
- const char *LLVMLinkCommand =
- constructLLVMLinkCommand(C, JA, Inputs, Args, SubArchName, Prefix);
- const char *OptCommand = constructOptCommand(C, JA, Inputs, Args, SubArchName,
- Prefix, LLVMLinkCommand);
- if (C.getDriver().isSaveTempsEnabled())
- constructLlcCommand(C, JA, Inputs, Args, SubArchName, Prefix, OptCommand,
- /*OutputIsAsm=*/true);
- const char *LlcCommand =
- constructLlcCommand(C, JA, Inputs, Args, SubArchName, Prefix, OptCommand);
- constructLldCommand(C, JA, Inputs, Output, Args, LlcCommand);
+ return constructLldCommand(C, JA, Inputs, Output, Args);
}
HIPToolChain::HIPToolChain(const Driver &D, const llvm::Triple &Triple,
const ToolChain &HostTC, const ArgList &Args)
- : ToolChain(D, Triple, Args), HostTC(HostTC) {
+ : ROCMToolChain(D, Triple, Args), HostTC(HostTC) {
// Lookup binaries into the driver directory, this is used to
// discover the clang-offload-bundler executable.
getProgramPaths().push_back(getDriver().Dir);
@@ -278,20 +232,16 @@ void HIPToolChain::addClangTargetOptions(
Action::OffloadKind DeviceOffloadingKind) const {
HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind);
- StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_march_EQ);
+ StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_mcpu_EQ);
assert(!GpuArch.empty() && "Must have an explicit GPU arch.");
(void) GpuArch;
assert(DeviceOffloadingKind == Action::OFK_HIP &&
"Only HIP offloading kinds are supported for GPUs.");
+ auto Kind = llvm::AMDGPU::parseArchAMDGCN(GpuArch);
+ const StringRef CanonArch = llvm::AMDGPU::getArchNameAMDGCN(Kind);
- CC1Args.push_back("-target-cpu");
- CC1Args.push_back(DriverArgs.MakeArgStringRef(GpuArch));
CC1Args.push_back("-fcuda-is-device");
- if (DriverArgs.hasFlag(options::OPT_fcuda_flush_denormals_to_zero,
- options::OPT_fno_cuda_flush_denormals_to_zero, false))
- CC1Args.push_back("-fcuda-flush-denormals-to-zero");
-
if (DriverArgs.hasFlag(options::OPT_fcuda_approx_transcendentals,
options::OPT_fno_cuda_approx_transcendentals, false))
CC1Args.push_back("-fcuda-approx-transcendentals");
@@ -299,6 +249,8 @@ void HIPToolChain::addClangTargetOptions(
if (DriverArgs.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
false))
CC1Args.push_back("-fgpu-rdc");
+ else
+ CC1Args.append({"-mllvm", "-amdgpu-internalize-symbols"});
StringRef MaxThreadsPerBlock =
DriverArgs.getLastArgValue(options::OPT_gpu_max_threads_per_block_EQ);
@@ -327,46 +279,50 @@ void HIPToolChain::addClangTargetOptions(
ArgStringList LibraryPaths;
// Find in --hip-device-lib-path and HIP_LIBRARY_PATH.
- for (auto Path :
- DriverArgs.getAllArgValues(options::OPT_hip_device_lib_path_EQ))
+ for (auto Path : RocmInstallation.getRocmDeviceLibPathArg())
LibraryPaths.push_back(DriverArgs.MakeArgString(Path));
- addDirectoryList(DriverArgs, LibraryPaths, "-L", "HIP_DEVICE_LIB_PATH");
+ addDirectoryList(DriverArgs, LibraryPaths, "", "HIP_DEVICE_LIB_PATH");
- llvm::SmallVector<std::string, 10> BCLibs;
+ // Maintain compatability with --hip-device-lib.
+ auto BCLibs = DriverArgs.getAllArgValues(options::OPT_hip_device_lib_EQ);
+ if (!BCLibs.empty()) {
+ for (auto Lib : BCLibs)
+ addBCLib(getDriver(), DriverArgs, CC1Args, LibraryPaths, Lib);
+ } else {
+ if (!RocmInstallation.hasDeviceLibrary()) {
+ getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 0;
+ return;
+ }
- // Add bitcode library in --hip-device-lib.
- for (auto Lib : DriverArgs.getAllArgValues(options::OPT_hip_device_lib_EQ)) {
- BCLibs.push_back(DriverArgs.MakeArgString(Lib));
- }
+ std::string LibDeviceFile = RocmInstallation.getLibDeviceFile(CanonArch);
+ if (LibDeviceFile.empty()) {
+ getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 1 << GpuArch;
+ return;
+ }
- // If --hip-device-lib is not set, add the default bitcode libraries.
- if (BCLibs.empty()) {
- // Get the bc lib file name for ISA version. For example,
- // gfx803 => oclc_isa_version_803.amdgcn.bc.
- std::string GFXVersion = GpuArch.drop_front(3).str();
- std::string ISAVerBC = "oclc_isa_version_" + GFXVersion + ".amdgcn.bc";
-
- llvm::StringRef FlushDenormalControlBC;
- if (DriverArgs.hasArg(options::OPT_fcuda_flush_denormals_to_zero))
- FlushDenormalControlBC = "oclc_daz_opt_on.amdgcn.bc";
- else
- FlushDenormalControlBC = "oclc_daz_opt_off.amdgcn.bc";
-
- llvm::StringRef WaveFrontSizeBC;
- if (stoi(GFXVersion) < 1000)
- WaveFrontSizeBC = "oclc_wavefrontsize64_on.amdgcn.bc";
- else
- WaveFrontSizeBC = "oclc_wavefrontsize64_off.amdgcn.bc";
-
- BCLibs.append({"hip.amdgcn.bc", "ocml.amdgcn.bc", "ockl.amdgcn.bc",
- "oclc_finite_only_off.amdgcn.bc", FlushDenormalControlBC,
- "oclc_correctly_rounded_sqrt_on.amdgcn.bc",
- "oclc_unsafe_math_off.amdgcn.bc", ISAVerBC,
- WaveFrontSizeBC});
+ // If --hip-device-lib is not set, add the default bitcode libraries.
+ // TODO: There are way too many flags that change this. Do we need to check
+ // them all?
+ bool DAZ = DriverArgs.hasFlag(options::OPT_fcuda_flush_denormals_to_zero,
+ options::OPT_fno_cuda_flush_denormals_to_zero,
+ getDefaultDenormsAreZeroForTarget(Kind));
+ // TODO: Check standard C++ flags?
+ bool FiniteOnly = false;
+ bool UnsafeMathOpt = false;
+ bool FastRelaxedMath = false;
+ bool CorrectSqrt = true;
+ bool Wave64 = isWave64(DriverArgs, Kind);
+
+ // Add the HIP specific bitcode library.
+ CC1Args.push_back("-mlink-builtin-bitcode");
+ CC1Args.push_back(DriverArgs.MakeArgString(RocmInstallation.getHIPPath()));
+
+ // Add the generic set of libraries.
+ RocmInstallation.addCommonBitcodeLibCC1Args(
+ DriverArgs, CC1Args, LibDeviceFile, Wave64, DAZ, FiniteOnly,
+ UnsafeMathOpt, FastRelaxedMath, CorrectSqrt);
}
- for (auto Lib : BCLibs)
- addBCLib(getDriver(), DriverArgs, CC1Args, LibraryPaths, Lib);
}
llvm::opt::DerivedArgList *
@@ -381,42 +337,12 @@ HIPToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
const OptTable &Opts = getDriver().getOpts();
for (Arg *A : Args) {
- if (A->getOption().matches(options::OPT_Xarch__)) {
- // Skip this argument unless the architecture matches BoundArch.
- if (BoundArch.empty() || A->getValue(0) != BoundArch)
- continue;
-
- unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(1));
- unsigned Prev = Index;
- std::unique_ptr<Arg> XarchArg(Opts.ParseOneArg(Args, Index));
-
- // If the argument parsing failed or more than one argument was
- // consumed, the -Xarch_ argument's parameter tried to consume
- // extra arguments. Emit an error and ignore.
- //
- // We also want to disallow any options which would alter the
- // driver behavior; that isn't going to work in our model. We
- // use isDriverOption() as an approximation, although things
- // like -O4 are going to slip through.
- if (!XarchArg || Index > Prev + 1) {
- getDriver().Diag(diag::err_drv_invalid_Xarch_argument_with_args)
- << A->getAsString(Args);
- continue;
- } else if (XarchArg->getOption().hasFlag(options::DriverOption)) {
- getDriver().Diag(diag::err_drv_invalid_Xarch_argument_isdriver)
- << A->getAsString(Args);
- continue;
- }
- XarchArg->setBaseArg(A);
- A = XarchArg.release();
- DAL->AddSynthesizedArg(A);
- }
DAL->append(A);
}
if (!BoundArch.empty()) {
- DAL->eraseArg(options::OPT_march_EQ);
- DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ), BoundArch);
+ DAL->eraseArg(options::OPT_mcpu_EQ);
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_mcpu_EQ), BoundArch);
}
return DAL;
@@ -451,6 +377,11 @@ void HIPToolChain::AddIAMCUIncludeArgs(const ArgList &Args,
HostTC.AddIAMCUIncludeArgs(Args, CC1Args);
}
+void HIPToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+}
+
SanitizerMask HIPToolChain::getSupportedSanitizers() const {
// The HIPToolChain only supports sanitizers in the sense that it allows
// sanitizer arguments on the command line if they are supported by the host
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.h
index c4f944e458bf..5e2be7138579 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.h
@@ -11,6 +11,7 @@
#include "clang/Driver/ToolChain.h"
#include "clang/Driver/Tool.h"
+#include "AMDGPU.h"
namespace clang {
namespace driver {
@@ -37,34 +38,17 @@ public:
const char *LinkingOutput) const override;
private:
- /// \return llvm-link output file name.
- const char *constructLLVMLinkCommand(Compilation &C, const JobAction &JA,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args,
- llvm::StringRef SubArchName,
- llvm::StringRef OutputFilePrefix) const;
-
- /// \return opt output file name.
- const char *constructOptCommand(Compilation &C, const JobAction &JA,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args,
- llvm::StringRef SubArchName,
- llvm::StringRef OutputFilePrefix,
- const char *InputFileName) const;
-
- /// \return llc output file name.
- const char *constructLlcCommand(Compilation &C, const JobAction &JA,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args,
- llvm::StringRef SubArchName,
- llvm::StringRef OutputFilePrefix,
- const char *InputFileName,
- bool OutputIsAsm = false) const;
void constructLldCommand(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs, const InputInfo &Output,
- const llvm::opt::ArgList &Args,
- const char *InputFileName) const;
+ const llvm::opt::ArgList &Args) const;
+
+ // Construct command for creating Object from HIP fatbin.
+ void constructGenerateObjFileFromHIPFatBinary(Compilation &C,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args,
+ const JobAction &JA) const;
};
} // end namespace AMDGCN
@@ -72,7 +56,7 @@ private:
namespace toolchains {
-class LLVM_LIBRARY_VISIBILITY HIPToolChain : public ToolChain {
+class LLVM_LIBRARY_VISIBILITY HIPToolChain final : public ROCMToolChain {
public:
HIPToolChain(const Driver &D, const llvm::Triple &Triple,
const ToolChain &HostTC, const llvm::opt::ArgList &Args);
@@ -106,6 +90,8 @@ public:
llvm::opt::ArgStringList &CC1Args) const override;
void AddIAMCUIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
SanitizerMask getSupportedSanitizers() const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
index e4d9ea8a70f9..775f6e1094fa 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
@@ -31,7 +31,6 @@ static StringRef getDefaultHvxLength(StringRef Cpu) {
.Case("v60", "64b")
.Case("v62", "64b")
.Case("v65", "64b")
- .Case("v66", "128b")
.Default("128b");
}
@@ -48,13 +47,12 @@ static void handleHVXWarnings(const Driver &D, const ArgList &Args) {
// Handle hvx target features explicitly.
static void handleHVXTargetFeatures(const Driver &D, const ArgList &Args,
std::vector<StringRef> &Features,
- bool &HasHVX) {
+ StringRef Cpu, bool &HasHVX) {
// Handle HVX warnings.
handleHVXWarnings(D, Args);
// Add the +hvx* features based on commandline flags.
StringRef HVXFeature, HVXLength;
- StringRef Cpu(toolchains::HexagonToolChain::GetTargetCPUVersion(Args));
// Handle -mhvx, -mhvx=, -mno-hvx.
if (Arg *A = Args.getLastArg(options::OPT_mno_hexagon_hvx,
@@ -108,7 +106,15 @@ void hexagon::getHexagonTargetFeatures(const Driver &D, const ArgList &Args,
Features.push_back(UseLongCalls ? "+long-calls" : "-long-calls");
bool HasHVX = false;
- handleHVXTargetFeatures(D, Args, Features, HasHVX);
+ StringRef Cpu(toolchains::HexagonToolChain::GetTargetCPUVersion(Args));
+ // 't' in Cpu denotes tiny-core micro-architecture. For now, the co-processors
+ // have no dependency on micro-architecture.
+ const bool TinyCore = Cpu.contains('t');
+
+ if (TinyCore)
+ Cpu = Cpu.take_front(Cpu.size() - 1);
+
+ handleHVXTargetFeatures(D, Args, Features, Cpu, HasHVX);
if (HexagonToolChain::isAutoHVXEnabled(Args) && !HasHVX)
D.Diag(diag::warn_drv_vectorize_needs_hvx);
@@ -183,7 +189,8 @@ void hexagon::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
}
auto *Exec = Args.MakeArgString(HTC.GetProgramPath(AsName));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void hexagon::Linker::RenderExtraToolArgs(const JobAction &JA,
@@ -258,18 +265,43 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
UseG0 = G.getValue() == 0;
}
- //----------------------------------------------------------------------------
- //
- //----------------------------------------------------------------------------
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
+ if (HTC.getTriple().isMusl()) {
+ if (!Args.hasArg(options::OPT_shared, options::OPT_static))
+ CmdArgs.push_back("-dynamic-linker=/lib/ld-musl-hexagon.so.1");
+
+ if (!Args.hasArg(options::OPT_shared, options::OPT_nostartfiles,
+ options::OPT_nostdlib))
+ CmdArgs.push_back(Args.MakeArgString(D.SysRoot + "/usr/lib/crt1.o"));
+ else if (Args.hasArg(options::OPT_shared) &&
+ !Args.hasArg(options::OPT_nostartfiles, options::OPT_nostdlib))
+ CmdArgs.push_back(Args.MakeArgString(D.SysRoot + "/usr/lib/crti.o"));
+
+ CmdArgs.push_back(
+ Args.MakeArgString(StringRef("-L") + D.SysRoot + "/usr/lib"));
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_T_Group, options::OPT_e, options::OPT_s,
+ options::OPT_t, options::OPT_u_Group});
+ AddLinkerInputs(HTC, Inputs, Args, CmdArgs, JA);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ CmdArgs.push_back("-lclang_rt.builtins-hexagon");
+ CmdArgs.push_back("-lc");
+ }
+ if (D.CCCIsCXX()) {
+ if (HTC.ShouldLinkCXXStdlib(Args))
+ HTC.AddCXXStdlibLibArgs(Args, CmdArgs);
+ }
+ return;
+ }
+
//----------------------------------------------------------------------------
// moslib
//----------------------------------------------------------------------------
std::vector<std::string> OsLibs;
bool HasStandalone = false;
-
for (const Arg *A : Args.filtered(options::OPT_moslib_EQ)) {
A->claim();
OsLibs.emplace_back(A->getValue());
@@ -375,7 +407,8 @@ void hexagon::Linker::ConstructJob(Compilation &C, const JobAction &JA,
LinkingOutput);
const char *Exec = Args.MakeArgString(HTC.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
// Hexagon tools end.
@@ -481,6 +514,22 @@ HexagonToolChain::HexagonToolChain(const Driver &D, const llvm::Triple &Triple,
HexagonToolChain::~HexagonToolChain() {}
+void HexagonToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CXXStdlibType Type = GetCXXStdlibType(Args);
+ switch (Type) {
+ case ToolChain::CST_Libcxx:
+ CmdArgs.push_back("-lc++");
+ CmdArgs.push_back("-lc++abi");
+ CmdArgs.push_back("-lunwind");
+ break;
+
+ case ToolChain::CST_Libstdcxx:
+ CmdArgs.push_back("-lstdc++");
+ break;
+ }
+}
+
Tool *HexagonToolChain::buildAssembler() const {
return new tools::hexagon::Assembler(*this);
}
@@ -517,6 +566,14 @@ unsigned HexagonToolChain::getOptimizationLevel(
void HexagonToolChain::addClangTargetOptions(const ArgList &DriverArgs,
ArgStringList &CC1Args,
Action::OffloadKind) const {
+
+ bool UseInitArrayDefault = getTriple().isMusl();
+
+ if (!DriverArgs.hasFlag(options::OPT_fuse_init_array,
+ options::OPT_fno_use_init_array,
+ UseInitArrayDefault))
+ CC1Args.push_back("-fno-use-init-array");
+
if (DriverArgs.hasArg(options::OPT_ffixed_r19)) {
CC1Args.push_back("-target-feature");
CC1Args.push_back("+reserved-r19");
@@ -534,12 +591,37 @@ void HexagonToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
return;
const Driver &D = getDriver();
+ if (!D.SysRoot.empty()) {
+ SmallString<128> P(D.SysRoot);
+ if (getTriple().isMusl())
+ llvm::sys::path::append(P, "usr/include");
+ else
+ llvm::sys::path::append(P, "include");
+ addExternCSystemInclude(DriverArgs, CC1Args, P.str());
+ return;
+ }
+
std::string TargetDir = getHexagonTargetDir(D.getInstalledDir(),
D.PrefixDirs);
addExternCSystemInclude(DriverArgs, CC1Args, TargetDir + "/hexagon/include");
}
-
+void HexagonToolChain::addLibCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+ if (!D.SysRoot.empty() && getTriple().isMusl())
+ addLibStdCXXIncludePaths(D.SysRoot + "/usr/include/c++/v1", "", "", "", "",
+ "", DriverArgs, CC1Args);
+ else if (getTriple().isMusl())
+ addLibStdCXXIncludePaths("/usr/include/c++/v1", "", "", "", "", "",
+ DriverArgs, CC1Args);
+ else {
+ std::string TargetDir = getHexagonTargetDir(D.InstalledDir, D.PrefixDirs);
+ addLibStdCXXIncludePaths(TargetDir, "/hexagon/include/c++/v1", "", "", "",
+ "", DriverArgs, CC1Args);
+ }
+}
void HexagonToolChain::addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
@@ -552,14 +634,22 @@ void HexagonToolChain::addLibStdCxxIncludePaths(
ToolChain::CXXStdlibType
HexagonToolChain::GetCXXStdlibType(const ArgList &Args) const {
Arg *A = Args.getLastArg(options::OPT_stdlib_EQ);
- if (!A)
- return ToolChain::CST_Libstdcxx;
-
+ if (!A) {
+ if (getTriple().isMusl())
+ return ToolChain::CST_Libcxx;
+ else
+ return ToolChain::CST_Libstdcxx;
+ }
StringRef Value = A->getValue();
- if (Value != "libstdc++")
+ if (Value != "libstdc++" && Value != "libc++")
getDriver().Diag(diag::err_drv_invalid_stdlib_name) << A->getAsString(Args);
- return ToolChain::CST_Libstdcxx;
+ if (Value == "libstdc++")
+ return ToolChain::CST_Libstdcxx;
+ else if (Value == "libc++")
+ return ToolChain::CST_Libcxx;
+ else
+ return ToolChain::CST_Libstdcxx;
}
bool HexagonToolChain::isAutoHVXEnabled(const llvm::opt::ArgList &Args) {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h
index d7b4a13d3a4f..c32cb7f09591 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h
@@ -20,10 +20,10 @@ namespace hexagon {
// For Hexagon, we do not need to instantiate tools for PreProcess, PreCompile
// and Compile.
// We simply use "clang -cc1" for those actions.
-class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
Assembler(const ToolChain &TC)
- : GnuTool("hexagon::Assembler", "hexagon-as", TC) {}
+ : Tool("hexagon::Assembler", "hexagon-as", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -35,9 +35,9 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("hexagon::Linker", "hexagon-ld", TC) {}
+ Linker(const ToolChain &TC) : Tool("hexagon::Linker", "hexagon-ld", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -81,10 +81,18 @@ public:
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- const char *getDefaultLinker() const override { return "hexagon-link"; }
+ void addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ const char *getDefaultLinker() const override {
+ return getTriple().isMusl() ? "ld.lld" : "hexagon-link";
+ }
CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
StringRef GetGCCLibAndIncVersion() const { return GCCLibAndIncVersion.Text; }
bool IsIntegratedAssemblerDefault() const override {
return true;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.cpp
index ee91f7d73b9c..a700d7b9064c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.cpp
@@ -63,18 +63,33 @@ static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
Hurd::Hurd(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
+ GCCInstallation.init(Triple, Args);
+ Multilibs = GCCInstallation.getMultilibs();
+ SelectedMultilib = GCCInstallation.getMultilib();
std::string SysRoot = computeSysRoot();
+ ToolChain::path_list &PPaths = getProgramPaths();
+
+ Generic_GCC::PushPPaths(PPaths);
+
+ // The selection of paths to try here is designed to match the patterns which
+ // the GCC driver itself uses, as this is part of the GCC-compatible driver.
+ // This was determined by running GCC in a fake filesystem, creating all
+ // possible permutations of these directories, and seeing which ones it added
+ // to the link paths.
path_list &Paths = getFilePaths();
- const std::string OSLibDir = getOSLibDir(Triple, Args);
+ const std::string OSLibDir = std::string(getOSLibDir(Triple, Args));
const std::string MultiarchTriple = getMultiarchTriple(D, Triple, SysRoot);
#ifdef ENABLE_LINKER_BUILD_ID
ExtraOpts.push_back("--build-id");
#endif
- // If we are currently running Clang inside of the requested system root, add
- // its parent library paths to those searched.
+ Generic_GCC::AddMultilibPaths(D, SysRoot, OSLibDir, MultiarchTriple, Paths);
+
+ // Similar to the logic for GCC above, if we currently running Clang inside
+ // of the requested system root, add its parent library paths to
+ // those searched.
// FIXME: It's not clear whether we should use the driver's installed
// directory ('Dir' below) or the ResourceDir.
if (StringRef(D.Dir).startswith(SysRoot)) {
@@ -88,8 +103,11 @@ Hurd::Hurd(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
addPathIfExists(D, SysRoot + "/usr/lib/" + MultiarchTriple, Paths);
addPathIfExists(D, SysRoot + "/usr/lib/../" + OSLibDir, Paths);
- // If we are currently running Clang inside of the requested system root, add
- // its parent library path to those searched.
+ Generic_GCC::AddMultiarchPaths(D, SysRoot, OSLibDir, Paths);
+
+ // Similar to the logic for GCC above, if we are currently running Clang
+ // inside of the requested system root, add its parent library path to those
+ // searched.
// FIXME: It's not clear whether we should use the driver's installed
// directory ('Dir' below) or the ResourceDir.
if (StringRef(D.Dir).startswith(SysRoot))
@@ -107,13 +125,6 @@ Tool *Hurd::buildAssembler() const {
return new tools::gnutools::Assembler(*this);
}
-std::string Hurd::computeSysRoot() const {
- if (!getDriver().SysRoot.empty())
- return getDriver().SysRoot;
-
- return std::string();
-}
-
std::string Hurd::getDynamicLinker(const ArgList &Args) const {
if (getArch() == llvm::Triple::x86)
return "/lib/ld.so";
@@ -148,7 +159,7 @@ void Hurd::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
CIncludeDirs.split(Dirs, ":");
for (StringRef Dir : Dirs) {
StringRef Prefix =
- llvm::sys::path::is_absolute(Dir) ? StringRef(SysRoot) : "";
+ llvm::sys::path::is_absolute(Dir) ? "" : StringRef(SysRoot);
addExternCSystemInclude(DriverArgs, CC1Args, Prefix + Dir);
}
return;
@@ -156,6 +167,9 @@ void Hurd::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
// Lacking those, try to detect the correct set of system includes for the
// target triple.
+
+ AddMultilibIncludeArgs(DriverArgs, CC1Args);
+
if (getTriple().getArch() == llvm::Triple::x86) {
std::string Path = SysRoot + "/usr/include/i386-gnu";
if (D.getVFS().exists(Path))
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.h
index 8f88d7e8e58e..0612a55280a8 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.h
@@ -27,8 +27,6 @@ public:
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- std::string computeSysRoot() const;
-
std::string getDynamicLinker(const llvm::opt::ArgList &Args) const override;
void addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/InterfaceStubs.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/InterfaceStubs.cpp
index 8f947e79bd1f..f7c11421e809 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/InterfaceStubs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/InterfaceStubs.cpp
@@ -54,8 +54,9 @@ void Merger::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(InputFilename.c_str()));
}
- C.addCommand(std::make_unique<Command>(JA, *this, Args.MakeArgString(Merger),
- CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Args.MakeArgString(Merger), CmdArgs,
+ Inputs));
}
} // namespace ifstool
} // namespace tools
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
index 6532c899492a..180350476c38 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
@@ -208,15 +208,6 @@ static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
return Triple.isArch32Bit() ? "lib" : "lib64";
}
-static void addMultilibsFilePaths(const Driver &D, const MultilibSet &Multilibs,
- const Multilib &Multilib,
- StringRef InstallPath,
- ToolChain::path_list &Paths) {
- if (const auto &PathsCallback = Multilibs.filePathsCallback())
- for (const auto &Path : PathsCallback(Multilib))
- addPathIfExists(D, InstallPath + Path, Paths);
-}
-
Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
GCCInstallation.init(Triple, Args);
@@ -224,21 +215,9 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
SelectedMultilib = GCCInstallation.getMultilib();
llvm::Triple::ArchType Arch = Triple.getArch();
std::string SysRoot = computeSysRoot();
-
- // Cross-compiling binutils and GCC installations (vanilla and openSUSE at
- // least) put various tools in a triple-prefixed directory off of the parent
- // of the GCC installation. We use the GCC triple here to ensure that we end
- // up with tools that support the same amount of cross compiling as the
- // detected GCC installation. For example, if we find a GCC installation
- // targeting x86_64, but it is a bi-arch GCC installation, it can also be
- // used to target i386.
- // FIXME: This seems unlikely to be Linux-specific.
ToolChain::path_list &PPaths = getProgramPaths();
- if (GCCInstallation.isValid()) {
- PPaths.push_back(Twine(GCCInstallation.getParentLibPath() + "/../" +
- GCCInstallation.getTriple().str() + "/bin")
- .str());
- }
+
+ Generic_GCC::PushPPaths(PPaths);
Distro Distro(D.getVFS(), Triple);
@@ -253,10 +232,9 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
ExtraOpts.push_back("relro");
}
- // The lld default page size is too large for Aarch64, which produces much
- // larger .so files and images for arm64 device targets. Use 4KB page size
- // for Android arm64 targets instead.
- if (Triple.isAArch64() && Triple.isAndroid()) {
+ // Android ARM/AArch64 use max-page-size=4096 to reduce VMA usage. Note, lld
+ // from 11 onwards default max-page-size to 65536 for both ARM and AArch64.
+ if ((Triple.isARM() || Triple.isAArch64()) && Triple.isAndroid()) {
ExtraOpts.push_back("-z");
ExtraOpts.push_back("max-page-size=4096");
}
@@ -314,60 +292,10 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// to the link paths.
path_list &Paths = getFilePaths();
- const std::string OSLibDir = getOSLibDir(Triple, Args);
+ const std::string OSLibDir = std::string(getOSLibDir(Triple, Args));
const std::string MultiarchTriple = getMultiarchTriple(D, Triple, SysRoot);
- // Add the multilib suffixed paths where they are available.
- if (GCCInstallation.isValid()) {
- const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
- const std::string &LibPath = GCCInstallation.getParentLibPath();
-
- // Add toolchain / multilib specific file paths.
- addMultilibsFilePaths(D, Multilibs, SelectedMultilib,
- GCCInstallation.getInstallPath(), Paths);
-
- // Sourcery CodeBench MIPS toolchain holds some libraries under
- // a biarch-like suffix of the GCC installation.
- addPathIfExists(
- D, GCCInstallation.getInstallPath() + SelectedMultilib.gccSuffix(),
- Paths);
-
- // GCC cross compiling toolchains will install target libraries which ship
- // as part of the toolchain under <prefix>/<triple>/<libdir> rather than as
- // any part of the GCC installation in
- // <prefix>/<libdir>/gcc/<triple>/<version>. This decision is somewhat
- // debatable, but is the reality today. We need to search this tree even
- // when we have a sysroot somewhere else. It is the responsibility of
- // whomever is doing the cross build targeting a sysroot using a GCC
- // installation that is *not* within the system root to ensure two things:
- //
- // 1) Any DSOs that are linked in from this tree or from the install path
- // above must be present on the system root and found via an
- // appropriate rpath.
- // 2) There must not be libraries installed into
- // <prefix>/<triple>/<libdir> unless they should be preferred over
- // those within the system root.
- //
- // Note that this matches the GCC behavior. See the below comment for where
- // Clang diverges from GCC's behavior.
- addPathIfExists(D, LibPath + "/../" + GCCTriple.str() + "/lib/../" +
- OSLibDir + SelectedMultilib.osSuffix(),
- Paths);
-
- // If the GCC installation we found is inside of the sysroot, we want to
- // prefer libraries installed in the parent prefix of the GCC installation.
- // It is important to *not* use these paths when the GCC installation is
- // outside of the system root as that can pick up unintended libraries.
- // This usually happens when there is an external cross compiler on the
- // host system, and a more minimal sysroot available that is the target of
- // the cross. Note that GCC does include some of these directories in some
- // configurations but this seems somewhere between questionable and simply
- // a bug.
- if (StringRef(LibPath).startswith(SysRoot)) {
- addPathIfExists(D, LibPath + "/" + MultiarchTriple, Paths);
- addPathIfExists(D, LibPath + "/../" + OSLibDir, Paths);
- }
- }
+ Generic_GCC::AddMultilibPaths(D, SysRoot, OSLibDir, MultiarchTriple, Paths);
// Similar to the logic for GCC above, if we currently running Clang inside
// of the requested system root, add its parent library paths to
@@ -411,36 +339,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
addPathIfExists(D, SysRoot + "/usr/" + OSLibDir + "/" + ABIName, Paths);
}
- // Try walking via the GCC triple path in case of biarch or multiarch GCC
- // installations with strange symlinks.
- if (GCCInstallation.isValid()) {
- addPathIfExists(D,
- SysRoot + "/usr/lib/" + GCCInstallation.getTriple().str() +
- "/../../" + OSLibDir,
- Paths);
-
- // Add the 'other' biarch variant path
- Multilib BiarchSibling;
- if (GCCInstallation.getBiarchSibling(BiarchSibling)) {
- addPathIfExists(D, GCCInstallation.getInstallPath() +
- BiarchSibling.gccSuffix(),
- Paths);
- }
-
- // See comments above on the multilib variant for details of why this is
- // included even from outside the sysroot.
- const std::string &LibPath = GCCInstallation.getParentLibPath();
- const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
- const Multilib &Multilib = GCCInstallation.getMultilib();
- addPathIfExists(D, LibPath + "/../" + GCCTriple.str() + "/lib" +
- Multilib.osSuffix(),
- Paths);
-
- // See comments above on the multilib variant for details of why this is
- // only included from within the sysroot.
- if (StringRef(LibPath).startswith(SysRoot))
- addPathIfExists(D, LibPath, Paths);
- }
+ Generic_GCC::AddMultiarchPaths(D, SysRoot, OSLibDir, Paths);
// Similar to the logic for GCC above, if we are currently running Clang
// inside of the requested system root, add its parent library path to those
@@ -464,6 +363,10 @@ bool Linux::HasNativeLLVMSupport() const { return true; }
Tool *Linux::buildLinker() const { return new tools::gnutools::Linker(*this); }
+Tool *Linux::buildStaticLibTool() const {
+ return new tools::gnutools::StaticLibTool(*this);
+}
+
Tool *Linux::buildAssembler() const {
return new tools::gnutools::Assembler(*this);
}
@@ -638,6 +541,8 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
Loader = X32 ? "ld-linux-x32.so.2" : "ld-linux-x86-64.so.2";
break;
}
+ case llvm::Triple::ve:
+ return "/opt/nec/ve/lib/ld-linux-ve.so.1";
}
if (Distro == Distro::Exherbo &&
@@ -674,7 +579,7 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
CIncludeDirs.split(dirs, ":");
for (StringRef dir : dirs) {
StringRef Prefix =
- llvm::sys::path::is_absolute(dir) ? StringRef(SysRoot) : "";
+ llvm::sys::path::is_absolute(dir) ? "" : StringRef(SysRoot);
addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
}
return;
@@ -683,15 +588,7 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
// Lacking those, try to detect the correct set of system includes for the
// target triple.
- // Add include directories specific to the selected multilib set and multilib.
- if (GCCInstallation.isValid()) {
- const auto &Callback = Multilibs.includeDirsCallback();
- if (Callback) {
- for (const auto &Path : Callback(GCCInstallation.getMultilib()))
- addExternCSystemIncludeIfExists(
- DriverArgs, CC1Args, GCCInstallation.getInstallPath() + Path);
- }
- }
+ AddMultilibIncludeArgs(DriverArgs, CC1Args);
// Implement generic Debian multiarch support.
const StringRef X86_64MultiarchIncludeDirs[] = {
@@ -906,6 +803,11 @@ void Linux::AddCudaIncludeArgs(const ArgList &DriverArgs,
CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
}
+void Linux::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+}
+
void Linux::AddIAMCUIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
if (GCCInstallation.isValid()) {
@@ -944,6 +846,7 @@ SanitizerMask Linux::getSupportedSanitizers() const {
getTriple().getArch() == llvm::Triple::thumb ||
getTriple().getArch() == llvm::Triple::armeb ||
getTriple().getArch() == llvm::Triple::thumbeb;
+ const bool IsSystemZ = getTriple().getArch() == llvm::Triple::systemz;
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
Res |= SanitizerKind::PointerCompare;
@@ -956,7 +859,8 @@ SanitizerMask Linux::getSupportedSanitizers() const {
Res |= SanitizerKind::SafeStack;
if (IsX86_64 || IsMIPS64 || IsAArch64)
Res |= SanitizerKind::DataFlow;
- if (IsX86_64 || IsMIPS64 || IsAArch64 || IsX86 || IsArmArch || IsPowerPC64)
+ if (IsX86_64 || IsMIPS64 || IsAArch64 || IsX86 || IsArmArch || IsPowerPC64 ||
+ IsSystemZ)
Res |= SanitizerKind::Leak;
if (IsX86_64 || IsMIPS64 || IsAArch64 || IsPowerPC64)
Res |= SanitizerKind::Thread;
@@ -976,17 +880,33 @@ SanitizerMask Linux::getSupportedSanitizers() const {
void Linux::addProfileRTLibs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const {
- if (!needsProfileRT(Args)) return;
-
- // Add linker option -u__llvm_runtime_variable to cause runtime
+ // Add linker option -u__llvm_profile_runtime to cause runtime
// initialization module to be linked in.
- if ((!Args.hasArg(options::OPT_coverage)) &&
- (!Args.hasArg(options::OPT_ftest_coverage)))
+ if (needsProfileRT(Args))
CmdArgs.push_back(Args.MakeArgString(
Twine("-u", llvm::getInstrProfRuntimeHookVarName())));
ToolChain::addProfileRTLibs(Args, CmdArgs);
}
+llvm::DenormalMode
+Linux::getDefaultDenormalModeForType(const llvm::opt::ArgList &DriverArgs,
+ const JobAction &JA,
+ const llvm::fltSemantics *FPType) const {
+ switch (getTriple().getArch()) {
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64: {
+ std::string Unused;
+ // DAZ and FTZ are turned on in crtfastmath.o
+ if (!DriverArgs.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles) &&
+ isFastMathRuntimeAvailable(DriverArgs, Unused))
+ return llvm::DenormalMode::getPreserveSign();
+ return llvm::DenormalMode::getIEEE();
+ }
+ default:
+ return llvm::DenormalMode::getIEEE();
+ }
+}
+
void Linux::addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const {
for (const auto &Opt : ExtraOpts)
CmdArgs.push_back(Opt.c_str());
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h
index 923ebecbd215..6b16b0e64990 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h
@@ -31,6 +31,8 @@ public:
llvm::opt::ArgStringList &CC1Args) const override;
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
void AddIAMCUIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
CXXStdlibType GetDefaultCXXStdlibType() const override;
@@ -40,7 +42,7 @@ public:
SanitizerMask getSupportedSanitizers() const override;
void addProfileRTLibs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
- virtual std::string computeSysRoot() const;
+ std::string computeSysRoot() const override;
std::string getDynamicLinker(const llvm::opt::ArgList &Args) const override;
@@ -48,9 +50,14 @@ public:
std::vector<std::string> ExtraOpts;
+ llvm::DenormalMode getDefaultDenormalModeForType(
+ const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
+ const llvm::fltSemantics *FPType = nullptr) const override;
+
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
+ Tool *buildStaticLibTool() const override;
std::string getMultiarchTriple(const Driver &D,
const llvm::Triple &TargetTriple,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp
index bc77f015915d..b0bc2e014b48 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp
@@ -143,7 +143,7 @@ std::string MSP430ToolChain::computeSysRoot() const {
else
llvm::sys::path::append(Dir, getDriver().Dir, "..", getTriple().str());
- return Dir.str();
+ return std::string(Dir.str());
}
void MSP430ToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
@@ -227,6 +227,7 @@ void msp430::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(JA, *this, Args.MakeArgString(Linker),
- CmdArgs, Inputs));
+ C.addCommand(
+ std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileCurCP(),
+ Args.MakeArgString(Linker), CmdArgs, Inputs));
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.h
index b5308a8dd687..58fd158cd12f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.h
@@ -44,7 +44,7 @@ protected:
Tool *buildLinker() const override;
private:
- std::string computeSysRoot() const;
+ std::string computeSysRoot() const override;
};
} // end namespace toolchains
@@ -52,10 +52,9 @@ private:
namespace tools {
namespace msp430 {
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC)
- : GnuTool("MSP430::Linker", "msp430-elf-ld", TC) {}
+ Linker(const ToolChain &TC) : Tool("MSP430::Linker", "msp430-elf-ld", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
void ConstructJob(Compilation &C, const JobAction &JA,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
index 4e143f6a5d3f..6b3c00e2ab6d 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
@@ -128,13 +128,13 @@ static bool findVCToolChainViaEnvironment(std::string &Path,
llvm::StringRef ParentPath = llvm::sys::path::parent_path(TestPath);
llvm::StringRef ParentFilename = llvm::sys::path::filename(ParentPath);
if (ParentFilename == "VC") {
- Path = ParentPath;
+ Path = std::string(ParentPath);
VSLayout = MSVCToolChain::ToolsetLayout::OlderVS;
return true;
}
if (ParentFilename == "x86ret" || ParentFilename == "x86chk"
|| ParentFilename == "amd64ret" || ParentFilename == "amd64chk") {
- Path = ParentPath;
+ Path = std::string(ParentPath);
VSLayout = MSVCToolChain::ToolsetLayout::DevDivInternal;
return true;
}
@@ -163,7 +163,7 @@ static bool findVCToolChainViaEnvironment(std::string &Path,
for (int i = 0; i < 3; ++i)
ToolChainPath = llvm::sys::path::parent_path(ToolChainPath);
- Path = ToolChainPath;
+ Path = std::string(ToolChainPath);
VSLayout = MSVCToolChain::ToolsetLayout::VS2017OrNewer;
return true;
}
@@ -261,7 +261,7 @@ static bool findVCToolChainViaSetupConfig(std::string &Path,
if (!llvm::sys::fs::is_directory(ToolchainPath))
return false;
- Path = ToolchainPath.str();
+ Path = std::string(ToolchainPath.str());
VSLayout = MSVCToolChain::ToolsetLayout::VS2017OrNewer;
return true;
#endif
@@ -282,7 +282,7 @@ static bool findVCToolChainViaRegistry(std::string &Path,
VSInstallPath.c_str(), VSInstallPath.find(R"(\Common7\IDE)")));
llvm::sys::path::append(VCPath, "VC");
- Path = VCPath.str();
+ Path = std::string(VCPath.str());
VSLayout = MSVCToolChain::ToolsetLayout::OlderVS;
return true;
}
@@ -300,7 +300,8 @@ static std::string FindVisualStudioExecutable(const ToolChain &TC,
SmallString<128> FilePath(MSVC.getSubDirectoryPath(
toolchains::MSVCToolChain::SubDirectoryType::Bin));
llvm::sys::path::append(FilePath, Exe);
- return llvm::sys::fs::can_execute(FilePath) ? FilePath.str() : Exe;
+ return std::string(llvm::sys::fs::can_execute(FilePath) ? FilePath.str()
+ : Exe);
}
void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -349,6 +350,16 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(std::string("-libpath:") + WindowsSdkLibPath));
}
+ // Add the compiler-rt library directories to libpath if they exist to help
+ // the linker find the various sanitizer, builtin, and profiling runtimes.
+ for (const auto &LibPath : TC.getLibraryPaths()) {
+ if (TC.getVFS().exists(LibPath))
+ CmdArgs.push_back(Args.MakeArgString("-libpath:" + LibPath));
+ }
+ auto CRTPath = TC.getCompilerRTPath();
+ if (TC.getVFS().exists(CRTPath))
+ CmdArgs.push_back(Args.MakeArgString("-libpath:" + CRTPath));
+
if (!C.getDriver().IsCLMode() && Args.hasArg(options::OPT_L))
for (const auto &LibPath : Args.getAllArgValues(options::OPT_L))
CmdArgs.push_back(Args.MakeArgString("-libpath:" + LibPath));
@@ -581,8 +592,9 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
linkPath = TC.GetProgramPath(Linker.str().c_str());
}
- auto LinkCmd = std::make_unique<Command>(
- JA, *this, Args.MakeArgString(linkPath), CmdArgs, Inputs);
+ auto LinkCmd =
+ std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileUTF16(),
+ Args.MakeArgString(linkPath), CmdArgs, Inputs);
if (!Environment.empty())
LinkCmd->setEnvironment(Environment);
C.addCommand(std::move(LinkCmd));
@@ -722,13 +734,15 @@ std::unique_ptr<Command> visualstudio::Compiler::GetCommand(
CmdArgs.push_back(Fo);
std::string Exec = FindVisualStudioExecutable(getToolChain(), "cl.exe");
- return std::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
- CmdArgs, Inputs);
+ return std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileUTF16(),
+ Args.MakeArgString(Exec), CmdArgs, Inputs);
}
MSVCToolChain::MSVCToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
- : ToolChain(D, Triple, Args), CudaInstallation(D, Triple, Args) {
+ : ToolChain(D, Triple, Args), CudaInstallation(D, Triple, Args),
+ RocmInstallation(D, Triple, Args) {
getProgramPaths().push_back(getDriver().getInstalledDir());
if (getDriver().getInstalledDir() != getDriver().Dir)
getProgramPaths().push_back(getDriver().Dir);
@@ -786,8 +800,14 @@ void MSVCToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
}
+void MSVCToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+}
+
void MSVCToolChain::printVerboseInfo(raw_ostream &OS) const {
CudaInstallation.print(OS);
+ RocmInstallation.print(OS);
}
// Windows SDKs and VC Toolchains group their contents into subdirectories based
@@ -892,7 +912,7 @@ MSVCToolChain::getSubDirectoryPath(SubDirectoryType Type,
llvm::sys::path::append(Path, "lib", SubdirName);
break;
}
- return Path.str();
+ return std::string(Path.str());
}
#ifdef _WIN32
@@ -1046,7 +1066,7 @@ static bool getWindows10SDKVersionFromPath(const std::string &SDKPath,
if (!CandidateName.startswith("10."))
continue;
if (CandidateName > SDKVersion)
- SDKVersion = CandidateName;
+ SDKVersion = std::string(CandidateName);
}
return !SDKVersion.empty();
@@ -1129,7 +1149,7 @@ bool MSVCToolChain::getWindowsSDKLibraryPath(std::string &path) const {
}
}
- path = libPath.str();
+ path = std::string(libPath.str());
return true;
}
@@ -1168,7 +1188,7 @@ bool MSVCToolChain::getUniversalCRTLibraryPath(std::string &Path) const {
llvm::SmallString<128> LibPath(UniversalCRTSdkPath);
llvm::sys::path::append(LibPath, "Lib", UCRTVersion, "ucrt", ArchName);
- Path = LibPath.str();
+ Path = std::string(LibPath.str());
return true;
}
@@ -1475,14 +1495,15 @@ static void TranslateDArg(Arg *A, llvm::opt::DerivedArgList &DAL,
return;
}
- std::string NewVal = Val;
+ std::string NewVal = std::string(Val);
NewVal[Hash] = '=';
DAL.AddJoinedArg(A, Opts.getOption(options::OPT_D), NewVal);
}
llvm::opt::DerivedArgList *
MSVCToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
- StringRef BoundArch, Action::OffloadKind) const {
+ StringRef BoundArch,
+ Action::OffloadKind OFK) const {
DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
const OptTable &Opts = getDriver().getOpts();
@@ -1521,7 +1542,8 @@ MSVCToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
} else if (A->getOption().matches(options::OPT_D)) {
// Translate -Dfoo#bar into -Dfoo=bar.
TranslateDArg(A, *DAL, Opts);
- } else {
+ } else if (OFK != Action::OFK_HIP) {
+ // HIP Toolchain translates input args by itself.
DAL->append(A);
}
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
index 41a69a82fecf..dba99ed77246 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
@@ -9,6 +9,7 @@
#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MSVC_H
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MSVC_H
+#include "AMDGPU.h"
#include "Cuda.h"
#include "clang/Basic/DebugInfoOptions.h"
#include "clang/Driver/Compilation.h"
@@ -23,9 +24,7 @@ namespace tools {
namespace visualstudio {
class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC)
- : Tool("visualstudio::Linker", "linker", TC, RF_Full,
- llvm::sys::WEM_UTF16) {}
+ Linker(const ToolChain &TC) : Tool("visualstudio::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -39,8 +38,7 @@ public:
class LLVM_LIBRARY_VISIBILITY Compiler : public Tool {
public:
Compiler(const ToolChain &TC)
- : Tool("visualstudio::Compiler", "compiler", TC, RF_Full,
- llvm::sys::WEM_UTF16) {}
+ : Tool("visualstudio::Compiler", "compiler", TC) {}
bool hasIntegratedAssembler() const override { return true; }
bool hasIntegratedCPP() const override { return true; }
@@ -125,6 +123,9 @@ public:
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
bool getWindowsSDKLibraryPath(std::string &path) const;
/// Check if Universal CRT should be used if available
bool getUniversalCRTLibraryPath(std::string &path) const;
@@ -155,6 +156,7 @@ private:
std::string VCToolChainPath;
ToolsetLayout VSLayout = ToolsetLayout::OlderVS;
CudaInstallationDetector CudaInstallation;
+ RocmInstallationDetector RocmInstallation;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
index 8f24384e688b..a1a1b413fb6c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
@@ -18,6 +18,7 @@
#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <system_error>
using namespace clang::diag;
@@ -49,7 +50,8 @@ void tools::MinGW::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
if (Args.hasArg(options::OPT_gsplit_dwarf))
SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output,
@@ -198,6 +200,17 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
TC.AddFilePathLibArgs(Args, CmdArgs);
+
+ // Add the compiler-rt library directories if they exist to help
+ // the linker find the various sanitizer, builtin, and profiling runtimes.
+ for (const auto &LibPath : TC.getLibraryPaths()) {
+ if (TC.getVFS().exists(LibPath))
+ CmdArgs.push_back(Args.MakeArgString("-L" + LibPath));
+ }
+ auto CRTPath = TC.getCompilerRTPath();
+ if (TC.getVFS().exists(CRTPath))
+ CmdArgs.push_back(Args.MakeArgString("-L" + CRTPath));
+
AddLinkerInputs(TC, Inputs, Args, CmdArgs, JA);
// TODO: Add profile stuff here
@@ -292,21 +305,25 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lkernel32");
}
- if (Args.hasArg(options::OPT_static))
+ if (Args.hasArg(options::OPT_static)) {
CmdArgs.push_back("--end-group");
- else
+ } else {
AddLibGCC(Args, CmdArgs);
+ if (!HasWindowsApp)
+ CmdArgs.push_back("-lkernel32");
+ }
}
if (!Args.hasArg(options::OPT_nostartfiles)) {
// Add crtfastmath.o if available and fast math is enabled.
- TC.AddFastMathRuntimeIfAvailable(Args, CmdArgs);
+ TC.addFastMathRuntimeIfAvailable(Args, CmdArgs);
CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crtend.o")));
}
}
const char *Exec = Args.MakeArgString(TC.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
}
// Simplified from Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple.
@@ -323,7 +340,7 @@ static bool findGccVersion(StringRef LibDir, std::string &GccLibDir,
continue;
if (CandidateVersion <= Version)
continue;
- Ver = VersionText;
+ Ver = std::string(VersionText);
GccLibDir = LI->path();
}
return Ver.size();
@@ -335,7 +352,7 @@ void toolchains::MinGW::findGccLibDir() {
Archs[0] += "-w64-mingw32";
Archs.emplace_back("mingw32");
if (Arch.empty())
- Arch = Archs[0].str();
+ Arch = std::string(Archs[0].str());
// lib: Arch Linux, Ubuntu, Windows
// lib64: openSUSE Linux
for (StringRef CandidateLib : {"lib", "lib64"}) {
@@ -343,7 +360,7 @@ void toolchains::MinGW::findGccLibDir() {
llvm::SmallString<1024> LibDir(Base);
llvm::sys::path::append(LibDir, CandidateLib, "gcc", CandidateArch);
if (findGccVersion(LibDir, GccLibDir, Ver)) {
- Arch = CandidateArch;
+ Arch = std::string(CandidateArch);
return;
}
}
@@ -372,7 +389,7 @@ llvm::ErrorOr<std::string> toolchains::MinGW::findClangRelativeSysroot() {
StringRef Sep = llvm::sys::path::get_separator();
for (StringRef CandidateSubdir : Subdirs) {
if (llvm::sys::fs::is_directory(ClangRoot + Sep + CandidateSubdir)) {
- Arch = CandidateSubdir;
+ Arch = std::string(CandidateSubdir);
return (ClangRoot + Sep + CandidateSubdir).str();
}
}
@@ -381,7 +398,8 @@ llvm::ErrorOr<std::string> toolchains::MinGW::findClangRelativeSysroot() {
toolchains::MinGW::MinGW(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
- : ToolChain(D, Triple, Args), CudaInstallation(D, Triple, Args) {
+ : ToolChain(D, Triple, Args), CudaInstallation(D, Triple, Args),
+ RocmInstallation(D, Triple, Args) {
getProgramPaths().push_back(getDriver().getInstalledDir());
if (getDriver().SysRoot.size())
@@ -389,12 +407,13 @@ toolchains::MinGW::MinGW(const Driver &D, const llvm::Triple &Triple,
// Look for <clang-bin>/../<triplet>; if found, use <clang-bin>/.. as the
// base as it could still be a base for a gcc setup with libgcc.
else if (llvm::ErrorOr<std::string> TargetSubdir = findClangRelativeSysroot())
- Base = llvm::sys::path::parent_path(TargetSubdir.get());
+ Base = std::string(llvm::sys::path::parent_path(TargetSubdir.get()));
else if (llvm::ErrorOr<std::string> GPPName = findGcc())
- Base = llvm::sys::path::parent_path(
- llvm::sys::path::parent_path(GPPName.get()));
+ Base = std::string(llvm::sys::path::parent_path(
+ llvm::sys::path::parent_path(GPPName.get())));
else
- Base = llvm::sys::path::parent_path(getDriver().getInstalledDir());
+ Base = std::string(
+ llvm::sys::path::parent_path(getDriver().getInstalledDir()));
Base += llvm::sys::path::get_separator();
findGccLibDir();
@@ -482,8 +501,14 @@ void toolchains::MinGW::AddCudaIncludeArgs(const ArgList &DriverArgs,
CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
}
+void toolchains::MinGW::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+}
+
void toolchains::MinGW::printVerboseInfo(raw_ostream &OS) const {
CudaInstallation.print(OS);
+ RocmInstallation.print(OS);
}
// Include directories for various hosts:
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.h
index 6752a405be87..2f1559fcf34c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.h
@@ -11,8 +11,10 @@
#include "Cuda.h"
#include "Gnu.h"
+#include "ROCm.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
+#include "llvm/Support/ErrorOr.h"
namespace clang {
namespace driver {
@@ -34,8 +36,7 @@ public:
class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC)
- : Tool("MinGW::Linker", "linker", TC, Tool::RF_Full) {}
+ Linker(const ToolChain &TC) : Tool("MinGW::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -81,6 +82,8 @@ public:
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
void printVerboseInfo(raw_ostream &OS) const override;
@@ -91,6 +94,7 @@ protected:
private:
CudaInstallationDetector CudaInstallation;
+ RocmInstallationDetector RocmInstallation;
std::string Base;
std::string GccLibDir;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.cpp
index 6947049ea52e..d0314795620c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.cpp
@@ -36,7 +36,8 @@ void tools::minix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void tools::minix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -88,7 +89,8 @@ void tools::minix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
/// Minix - Minix tool chain which can call as(1) and ld(1) directly.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.h
index 1ed6acebab9c..af8d59c5085a 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.h
@@ -18,10 +18,9 @@ namespace driver {
namespace tools {
/// minix -- Directly call GNU Binutils assembler and linker
namespace minix {
-class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
- Assembler(const ToolChain &TC)
- : GnuTool("minix::Assembler", "assembler", TC) {}
+ Assembler(const ToolChain &TC) : Tool("minix::Assembler", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -31,9 +30,9 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("minix::Linker", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("minix::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp
index cfda7f4bb4df..41b7b839f3b3 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp
@@ -136,5 +136,5 @@ std::string MipsLLVMToolChain::getCompilerRT(const ArgList &Args,
}
llvm::sys::path::append(
Path, Twine("libclang_rt." + Component + "-" + "mips" + Suffix));
- return Path.str();
+ return std::string(Path.str());
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.cpp
index 2ce0f13ce3d1..84fe4748b6fa 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.cpp
@@ -77,8 +77,9 @@ void tools::SHAVE::Compiler::ConstructJob(Compilation &C, const JobAction &JA,
std::string Exec =
Args.MakeArgString(getToolChain().GetProgramPath("moviCompile"));
- C.addCommand(std::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
- CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Args.MakeArgString(Exec), CmdArgs,
+ Inputs));
}
void tools::SHAVE::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -112,8 +113,9 @@ void tools::SHAVE::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
std::string Exec =
Args.MakeArgString(getToolChain().GetProgramPath("moviAsm"));
- C.addCommand(std::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
- CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Args.MakeArgString(Exec), CmdArgs,
+ Inputs));
}
void tools::Myriad::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -198,8 +200,9 @@ void tools::Myriad::Linker::ConstructJob(Compilation &C, const JobAction &JA,
std::string Exec =
Args.MakeArgString(TC.GetProgramPath("sparc-myriad-rtems-ld"));
- C.addCommand(std::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
- CmdArgs, Inputs));
+ C.addCommand(
+ std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileCurCP(),
+ Args.MakeArgString(Exec), CmdArgs, Inputs));
}
MyriadToolChain::MyriadToolChain(const Driver &D, const llvm::Triple &Triple,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.h
index 9f5225fbc62c..cae574bdcfea 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.h
@@ -49,9 +49,9 @@ public:
/// whereas the linker, which accepts code for a mixture of Sparc and SHAVE,
/// is in the Myriad namespace.
namespace Myriad {
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("shave::Linker", "ld", TC) {}
+ Linker(const ToolChain &TC) : Tool("shave::Linker", "ld", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
void ConstructJob(Compilation &C, const JobAction &JA,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.cpp
index 97241c884027..15a773675299 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.cpp
@@ -193,7 +193,8 @@ void nacltools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
/// NaCl Toolchain
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.h
index ab243f8087bb..5e5fdb583bb6 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.h
@@ -27,9 +27,9 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("NaCl::Linker", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("NaCl::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp
index 0100a387d6c3..253ee6ce0f72 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp
@@ -103,7 +103,8 @@ void netbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString((getToolChain().GetProgramPath("as")));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -337,7 +338,8 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
ToolChain.addProfileRTLibs(Args, CmdArgs);
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
/// NetBSD - NetBSD tool chain which can call as(1) and ld(1) directly.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.h
index 6d404263f625..8348554fd149 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.h
@@ -19,10 +19,9 @@ namespace tools {
/// netbsd -- Directly call GNU Binutils assembler and linker
namespace netbsd {
-class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
- Assembler(const ToolChain &TC)
- : GnuTool("netbsd::Assembler", "assembler", TC) {}
+ Assembler(const ToolChain &TC) : Tool("netbsd::Assembler", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -32,9 +31,9 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("netbsd::Linker", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("netbsd::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp
index 80343c0394cb..9c1a9c5f8228 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp
@@ -89,7 +89,8 @@ void openbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -227,7 +228,8 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
SanitizerMask OpenBSD::getSupportedSanitizers() const {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.h
index 9f1ee0f66402..897eee57ab68 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.h
@@ -19,10 +19,10 @@ namespace tools {
/// openbsd -- Directly call GNU Binutils assembler and linker
namespace openbsd {
-class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
Assembler(const ToolChain &TC)
- : GnuTool("openbsd::Assembler", "assembler", TC) {}
+ : Tool("openbsd::Assembler", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -32,9 +32,9 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("openbsd::Linker", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("openbsd::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
index 4e8840296205..6dc81899cbaa 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
@@ -30,13 +30,17 @@ void tools::PS4cpu::addProfileRTArgs(const ToolChain &TC, const ArgList &Args,
if ((Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
false) ||
Args.hasFlag(options::OPT_fprofile_generate,
- options::OPT_fno_profile_instr_generate, false) ||
+ options::OPT_fno_profile_generate, false) ||
Args.hasFlag(options::OPT_fprofile_generate_EQ,
- options::OPT_fno_profile_instr_generate, false) ||
+ options::OPT_fno_profile_generate, false) ||
Args.hasFlag(options::OPT_fprofile_instr_generate,
options::OPT_fno_profile_instr_generate, false) ||
Args.hasFlag(options::OPT_fprofile_instr_generate_EQ,
options::OPT_fno_profile_instr_generate, false) ||
+ Args.hasFlag(options::OPT_fcs_profile_generate,
+ options::OPT_fno_profile_generate, false) ||
+ Args.hasFlag(options::OPT_fcs_profile_generate_EQ,
+ options::OPT_fno_profile_generate, false) ||
Args.hasArg(options::OPT_fcreate_profile) ||
Args.hasArg(options::OPT_coverage)))
CmdArgs.push_back("--dependent-lib=libclang_rt.profile-x86_64.a");
@@ -62,7 +66,8 @@ void tools::PS4cpu::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("orbis-as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
}
static void AddPS4SanitizerArgs(const ToolChain &TC, ArgStringList &CmdArgs) {
@@ -84,13 +89,13 @@ void tools::PS4cpu::addSanitizerArgs(const ToolChain &TC,
CmdArgs.push_back("--dependent-lib=libSceDbgAddressSanitizer_stub_weak.a");
}
-static void ConstructPS4LinkJob(const Tool &T, Compilation &C,
- const JobAction &JA, const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) {
+void tools::PS4cpu::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
const toolchains::FreeBSD &ToolChain =
- static_cast<const toolchains::FreeBSD &>(T.getToolChain());
+ static_cast<const toolchains::FreeBSD &>(getToolChain());
const Driver &D = ToolChain.getDriver();
ArgStringList CmdArgs;
@@ -139,216 +144,16 @@ static void ConstructPS4LinkJob(const Tool &T, Compilation &C,
CmdArgs.push_back("-lpthread");
}
- const char *Exec = Args.MakeArgString(ToolChain.GetProgramPath("orbis-ld"));
-
- C.addCommand(std::make_unique<Command>(JA, T, Exec, CmdArgs, Inputs));
-}
-
-static void ConstructGoldLinkJob(const Tool &T, Compilation &C,
- const JobAction &JA, const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) {
- const toolchains::FreeBSD &ToolChain =
- static_cast<const toolchains::FreeBSD &>(T.getToolChain());
- const Driver &D = ToolChain.getDriver();
- ArgStringList CmdArgs;
-
- // Silence warning for "clang -g foo.o -o foo"
- Args.ClaimAllArgs(options::OPT_g_Group);
- // and "clang -emit-llvm foo.o -o foo"
- Args.ClaimAllArgs(options::OPT_emit_llvm);
- // and for "clang -w foo.o -o foo". Other warning options are already
- // handled somewhere else.
- Args.ClaimAllArgs(options::OPT_w);
-
- if (!D.SysRoot.empty())
- CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
-
- if (Args.hasArg(options::OPT_pie))
- CmdArgs.push_back("-pie");
-
- if (Args.hasArg(options::OPT_static)) {
- CmdArgs.push_back("-Bstatic");
- } else {
- if (Args.hasArg(options::OPT_rdynamic))
- CmdArgs.push_back("-export-dynamic");
- CmdArgs.push_back("--eh-frame-hdr");
- if (Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back("-Bshareable");
- } else {
- CmdArgs.push_back("-dynamic-linker");
- CmdArgs.push_back("/libexec/ld-elf.so.1");
- }
- CmdArgs.push_back("--enable-new-dtags");
- }
-
- if (Output.isFilename()) {
- CmdArgs.push_back("-o");
- CmdArgs.push_back(Output.getFilename());
- } else {
- assert(Output.isNothing() && "Invalid output.");
- }
-
- if(!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
- AddPS4SanitizerArgs(ToolChain, CmdArgs);
-
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- const char *crt1 = nullptr;
- if (!Args.hasArg(options::OPT_shared)) {
- if (Args.hasArg(options::OPT_pg))
- crt1 = "gcrt1.o";
- else if (Args.hasArg(options::OPT_pie))
- crt1 = "Scrt1.o";
- else
- crt1 = "crt1.o";
- }
- if (crt1)
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt1)));
-
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
-
- const char *crtbegin = nullptr;
- if (Args.hasArg(options::OPT_static))
- crtbegin = "crtbeginT.o";
- else if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
- crtbegin = "crtbeginS.o";
- else
- crtbegin = "crtbegin.o";
-
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
- }
-
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
- Args.AddAllArgs(CmdArgs, options::OPT_s);
- Args.AddAllArgs(CmdArgs, options::OPT_t);
- Args.AddAllArgs(CmdArgs, options::OPT_r);
-
- if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
- CmdArgs.push_back("--no-demangle");
-
- AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
-
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
- // For PS4, we always want to pass libm, libstdc++ and libkernel
- // libraries for both C and C++ compilations.
- CmdArgs.push_back("-lkernel");
- if (D.CCCIsCXX()) {
- if (ToolChain.ShouldLinkCXXStdlib(Args))
- ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
- if (Args.hasArg(options::OPT_pg))
- CmdArgs.push_back("-lm_p");
- else
- CmdArgs.push_back("-lm");
- }
- // FIXME: For some reason GCC passes -lgcc and -lgcc_s before adding
- // the default system libraries. Just mimic this for now.
- if (Args.hasArg(options::OPT_pg))
- CmdArgs.push_back("-lgcc_p");
- else
- CmdArgs.push_back("-lcompiler_rt");
- if (Args.hasArg(options::OPT_static)) {
- CmdArgs.push_back("-lstdc++");
- } else if (Args.hasArg(options::OPT_pg)) {
- CmdArgs.push_back("-lgcc_eh_p");
- } else {
- CmdArgs.push_back("--as-needed");
- CmdArgs.push_back("-lstdc++");
- CmdArgs.push_back("--no-as-needed");
- }
-
- if (Args.hasArg(options::OPT_pthread)) {
- if (Args.hasArg(options::OPT_pg))
- CmdArgs.push_back("-lpthread_p");
- else
- CmdArgs.push_back("-lpthread");
- }
-
- if (Args.hasArg(options::OPT_pg)) {
- if (Args.hasArg(options::OPT_shared))
- CmdArgs.push_back("-lc");
- else {
- if (Args.hasArg(options::OPT_static)) {
- CmdArgs.push_back("--start-group");
- CmdArgs.push_back("-lc_p");
- CmdArgs.push_back("-lpthread_p");
- CmdArgs.push_back("--end-group");
- } else {
- CmdArgs.push_back("-lc_p");
- }
- }
- CmdArgs.push_back("-lgcc_p");
- } else {
- if (Args.hasArg(options::OPT_static)) {
- CmdArgs.push_back("--start-group");
- CmdArgs.push_back("-lc");
- CmdArgs.push_back("-lpthread");
- CmdArgs.push_back("--end-group");
- } else {
- CmdArgs.push_back("-lc");
- }
- CmdArgs.push_back("-lcompiler_rt");
- }
-
- if (Args.hasArg(options::OPT_static)) {
- CmdArgs.push_back("-lstdc++");
- } else if (Args.hasArg(options::OPT_pg)) {
- CmdArgs.push_back("-lgcc_eh_p");
- } else {
- CmdArgs.push_back("--as-needed");
- CmdArgs.push_back("-lstdc++");
- CmdArgs.push_back("--no-as-needed");
- }
- }
-
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtendS.o")));
- else
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
+ if (Args.hasArg(options::OPT_fuse_ld_EQ)) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << "-fuse-ld" << getToolChain().getTriple().str();
}
const char *Exec =
-#ifdef _WIN32
- Args.MakeArgString(ToolChain.GetProgramPath("orbis-ld.gold"));
-#else
Args.MakeArgString(ToolChain.GetProgramPath("orbis-ld"));
-#endif
-
- C.addCommand(std::make_unique<Command>(JA, T, Exec, CmdArgs, Inputs));
-}
-
-void tools::PS4cpu::Link::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
- const toolchains::FreeBSD &ToolChain =
- static_cast<const toolchains::FreeBSD &>(getToolChain());
- const Driver &D = ToolChain.getDriver();
- bool PS4Linker;
- StringRef LinkerOptName;
- if (const Arg *A = Args.getLastArg(options::OPT_fuse_ld_EQ)) {
- LinkerOptName = A->getValue();
- if (LinkerOptName != "ps4" && LinkerOptName != "gold")
- D.Diag(diag::err_drv_unsupported_linker) << LinkerOptName;
- }
- if (LinkerOptName == "gold")
- PS4Linker = false;
- else if (LinkerOptName == "ps4")
- PS4Linker = true;
- else
- PS4Linker = !Args.hasArg(options::OPT_shared);
-
- if (PS4Linker)
- ConstructPS4LinkJob(*this, C, JA, Output, Inputs, Args, LinkingOutput);
- else
- ConstructGoldLinkJob(*this, C, JA, Output, Inputs, Args, LinkingOutput);
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
}
toolchains::PS4CPU::PS4CPU(const Driver &D, const llvm::Triple &Triple,
@@ -382,7 +187,7 @@ toolchains::PS4CPU::PS4CPU(const Driver &D, const llvm::Triple &Triple,
if (!llvm::sys::fs::exists(PrefixDir))
getDriver().Diag(clang::diag::warn_missing_sysroot) << PrefixDir;
} else
- PrefixDir = PS4SDKDir.str();
+ PrefixDir = std::string(PS4SDKDir.str());
SmallString<512> PS4SDKIncludeDir(PrefixDir);
llvm::sys::path::append(PS4SDKIncludeDir, "target/include");
@@ -407,7 +212,7 @@ toolchains::PS4CPU::PS4CPU(const Driver &D, const llvm::Triple &Triple,
<< "PS4 system libraries" << PS4SDKLibDir;
return;
}
- getFilePaths().push_back(PS4SDKLibDir.str());
+ getFilePaths().push_back(std::string(PS4SDKLibDir.str()));
}
Tool *toolchains::PS4CPU::buildAssembler() const {
@@ -430,3 +235,17 @@ SanitizerMask toolchains::PS4CPU::getSupportedSanitizers() const {
Res |= SanitizerKind::Vptr;
return Res;
}
+
+void toolchains::PS4CPU::addClangTargetOptions(
+ const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const {
+ // PS4 does not use init arrays.
+ if (DriverArgs.hasArg(options::OPT_fuse_init_array)) {
+ Arg *A = DriverArgs.getLastArg(options::OPT_fuse_init_array);
+ getDriver().Diag(clang::diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(DriverArgs) << getTriple().str();
+ }
+
+ CC1Args.push_back("-fno-use-init-array");
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h
index 18852b2808cb..968be015d411 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h
@@ -26,8 +26,7 @@ void addSanitizerArgs(const ToolChain &TC, llvm::opt::ArgStringList &CmdArgs);
class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
public:
- Assemble(const ToolChain &TC)
- : Tool("PS4cpu::Assemble", "assembler", TC, RF_Full) {}
+ Assemble(const ToolChain &TC) : Tool("PS4cpu::Assemble", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -40,7 +39,7 @@ public:
class LLVM_LIBRARY_VISIBILITY Link : public Tool {
public:
- Link(const ToolChain &TC) : Tool("PS4cpu::Link", "linker", TC, RF_Full) {}
+ Link(const ToolChain &TC) : Tool("PS4cpu::Link", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -88,6 +87,20 @@ public:
// capable of unit splitting.
bool canSplitThinLTOUnit() const override { return false; }
+ void addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const override;
+
+ llvm::DenormalMode getDefaultDenormalModeForType(
+ const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
+ const llvm::fltSemantics *FPType) const override {
+ // DAZ and FTZ are on by default.
+ return llvm::DenormalMode::getPreserveSign();
+ }
+
+ bool useRelaxRelocations() const override { return true; }
+
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
index ddc329e3c722..cc912d94cb92 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
@@ -119,7 +119,7 @@ std::string RISCVToolChain::computeSysRoot() const {
if (!llvm::sys::fs::exists(SysRootDir))
return std::string();
- return SysRootDir.str();
+ return std::string(SysRootDir.str());
}
void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -142,7 +142,7 @@ void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("elf32lriscv");
}
- std::string Linker = getToolChain().GetProgramPath(getShortName());
+ std::string Linker = getToolChain().GetLinkerPath();
bool WantCRTs =
!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles);
@@ -191,7 +191,8 @@ void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(JA, *this, Args.MakeArgString(Linker),
- CmdArgs, Inputs));
+ C.addCommand(
+ std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileCurCP(),
+ Args.MakeArgString(Linker), CmdArgs, Inputs));
}
// RISCV tools end.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h
index bb7f64849bcb..4734aee5f1ab 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h
@@ -39,16 +39,16 @@ protected:
Tool *buildLinker() const override;
private:
- std::string computeSysRoot() const;
+ std::string computeSysRoot() const override;
};
} // end namespace toolchains
namespace tools {
namespace RISCV {
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("RISCV::Linker", "ld", TC) {}
+ Linker(const ToolChain &TC) : Tool("RISCV::Linker", "ld", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
void ConstructJob(Compilation &C, const JobAction &JA,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h
new file mode 100644
index 000000000000..962c72fedfe0
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h
@@ -0,0 +1,228 @@
+//===--- ROCm.h - ROCm installation detector --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ROCM_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ROCM_H
+
+#include "clang/Basic/Cuda.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Options.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/VersionTuple.h"
+
+namespace clang {
+namespace driver {
+
+/// A class to find a viable ROCM installation
+/// TODO: Generalize to handle libclc.
+class RocmInstallationDetector {
+private:
+ struct ConditionalLibrary {
+ SmallString<0> On;
+ SmallString<0> Off;
+
+ bool isValid() const { return !On.empty() && !Off.empty(); }
+
+ StringRef get(bool Enabled) const {
+ assert(isValid());
+ return Enabled ? On : Off;
+ }
+ };
+
+ // Installation path candidate.
+ struct Candidate {
+ llvm::SmallString<0> Path;
+ bool StrictChecking;
+
+ Candidate(std::string Path, bool StrictChecking = false)
+ : Path(Path), StrictChecking(StrictChecking) {}
+ };
+
+ const Driver &D;
+ bool HasHIPRuntime = false;
+ bool HasDeviceLibrary = false;
+
+ // Default version if not detected or specified.
+ const unsigned DefaultVersionMajor = 3;
+ const unsigned DefaultVersionMinor = 5;
+ const char *DefaultVersionPatch = "0";
+
+ // The version string in Major.Minor.Patch format.
+ std::string DetectedVersion;
+ // Version containing major and minor.
+ llvm::VersionTuple VersionMajorMinor;
+ // Version containing patch.
+ std::string VersionPatch;
+
+ // ROCm path specified by --rocm-path.
+ StringRef RocmPathArg;
+ // ROCm device library paths specified by --rocm-device-lib-path.
+ std::vector<std::string> RocmDeviceLibPathArg;
+ // HIP version specified by --hip-version.
+ StringRef HIPVersionArg;
+ // Wheter -nogpulib is specified.
+ bool NoBuiltinLibs = false;
+
+ // Paths
+ SmallString<0> InstallPath;
+ SmallString<0> BinPath;
+ SmallString<0> LibPath;
+ SmallString<0> LibDevicePath;
+ SmallString<0> IncludePath;
+ llvm::StringMap<std::string> LibDeviceMap;
+
+ // Libraries that are always linked.
+ SmallString<0> OCML;
+ SmallString<0> OCKL;
+
+ // Libraries that are always linked depending on the language
+ SmallString<0> OpenCL;
+ SmallString<0> HIP;
+
+ // Libraries swapped based on compile flags.
+ ConditionalLibrary WavefrontSize64;
+ ConditionalLibrary FiniteOnly;
+ ConditionalLibrary UnsafeMath;
+ ConditionalLibrary DenormalsAreZero;
+ ConditionalLibrary CorrectlyRoundedSqrt;
+
+ bool allGenericLibsValid() const {
+ return !OCML.empty() && !OCKL.empty() && !OpenCL.empty() && !HIP.empty() &&
+ WavefrontSize64.isValid() && FiniteOnly.isValid() &&
+ UnsafeMath.isValid() && DenormalsAreZero.isValid() &&
+ CorrectlyRoundedSqrt.isValid();
+ }
+
+ // GPU architectures for which we have raised an error in
+ // CheckRocmVersionSupportsArch.
+ mutable llvm::SmallSet<CudaArch, 4> ArchsWithBadVersion;
+
+ void scanLibDevicePath(llvm::StringRef Path);
+ void ParseHIPVersionFile(llvm::StringRef V);
+ SmallVector<Candidate, 4> getInstallationPathCandidates();
+
+public:
+ RocmInstallationDetector(const Driver &D, const llvm::Triple &HostTriple,
+ const llvm::opt::ArgList &Args,
+ bool DetectHIPRuntime = true,
+ bool DetectDeviceLib = false);
+
+ /// Add arguments needed to link default bitcode libraries.
+ void addCommonBitcodeLibCC1Args(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ StringRef LibDeviceFile, bool Wave64,
+ bool DAZ, bool FiniteOnly, bool UnsafeMathOpt,
+ bool FastRelaxedMath, bool CorrectSqrt) const;
+
+ /// Emit an error if Version does not support the given Arch.
+ ///
+ /// If either Version or Arch is unknown, does not emit an error. Emits at
+ /// most one error per Arch.
+ void CheckRocmVersionSupportsArch(CudaArch Arch) const;
+
+ /// Check whether we detected a valid HIP runtime.
+ bool hasHIPRuntime() const { return HasHIPRuntime; }
+
+ /// Check whether we detected a valid ROCm device library.
+ bool hasDeviceLibrary() const { return HasDeviceLibrary; }
+
+ /// Print information about the detected ROCm installation.
+ void print(raw_ostream &OS) const;
+
+ /// Get the detected Rocm install's version.
+ // RocmVersion version() const { return Version; }
+
+ /// Get the detected Rocm installation path.
+ StringRef getInstallPath() const { return InstallPath; }
+
+ /// Get the detected path to Rocm's bin directory.
+ // StringRef getBinPath() const { return BinPath; }
+
+ /// Get the detected Rocm Include path.
+ StringRef getIncludePath() const { return IncludePath; }
+
+ /// Get the detected Rocm library path.
+ StringRef getLibPath() const { return LibPath; }
+
+ /// Get the detected Rocm device library path.
+ StringRef getLibDevicePath() const { return LibDevicePath; }
+
+ StringRef getOCMLPath() const {
+ assert(!OCML.empty());
+ return OCML;
+ }
+
+ StringRef getOCKLPath() const {
+ assert(!OCKL.empty());
+ return OCKL;
+ }
+
+ StringRef getOpenCLPath() const {
+ assert(!OpenCL.empty());
+ return OpenCL;
+ }
+
+ StringRef getHIPPath() const {
+ assert(!HIP.empty());
+ return HIP;
+ }
+
+ StringRef getWavefrontSize64Path(bool Enabled) const {
+ return WavefrontSize64.get(Enabled);
+ }
+
+ StringRef getFiniteOnlyPath(bool Enabled) const {
+ return FiniteOnly.get(Enabled);
+ }
+
+ StringRef getUnsafeMathPath(bool Enabled) const {
+ return UnsafeMath.get(Enabled);
+ }
+
+ StringRef getDenormalsAreZeroPath(bool Enabled) const {
+ return DenormalsAreZero.get(Enabled);
+ }
+
+ StringRef getCorrectlyRoundedSqrtPath(bool Enabled) const {
+ return CorrectlyRoundedSqrt.get(Enabled);
+ }
+
+ /// Get libdevice file for given architecture
+ std::string getLibDeviceFile(StringRef Gpu) const {
+ return LibDeviceMap.lookup(Gpu);
+ }
+
+ void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const;
+
+ void detectDeviceLibrary();
+ void detectHIPRuntime();
+
+ /// Get the values for --rocm-device-lib-path arguments
+ std::vector<std::string> getRocmDeviceLibPathArg() const {
+ return RocmDeviceLibPathArg;
+ }
+
+ /// Get the value for --rocm-path argument
+ StringRef getRocmPathArg() const { return RocmPathArg; }
+
+ /// Get the value for --hip-version argument
+ StringRef getHIPVersionArg() const { return HIPVersionArg; }
+
+ std::string getHIPVersion() const { return DetectedVersion; }
+};
+
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ROCM_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp
index fc4e2cf151ef..b8fdc87478bc 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp
@@ -41,7 +41,8 @@ void solaris::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -150,7 +151,8 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
getToolChain().addProfileRTLibs(Args, CmdArgs);
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
static StringRef getSolarisLibSuffix(const llvm::Triple &Triple) {
@@ -244,7 +246,7 @@ void Solaris::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
CIncludeDirs.split(dirs, ":");
for (StringRef dir : dirs) {
StringRef Prefix =
- llvm::sys::path::is_absolute(dir) ? StringRef(D.SysRoot) : "";
+ llvm::sys::path::is_absolute(dir) ? "" : StringRef(D.SysRoot);
addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
}
return;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp
new file mode 100644
index 000000000000..6ea405c0269c
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp
@@ -0,0 +1,119 @@
+//===--- VE.cpp - VE ToolChain Implementations ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "VEToolchain.h"
+#include "CommonArgs.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include <cstdlib> // ::getenv
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang;
+using namespace llvm::opt;
+
+/// VE tool chain
+VEToolChain::VEToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Linux(D, Triple, Args) {
+ getProgramPaths().push_back("/opt/nec/ve/bin");
+ // ProgramPaths are found via 'PATH' environment variable.
+
+ // default file paths are:
+ // ${RESOURCEDIR}/lib/linux/ve (== getArchSpecificLibPath)
+ // /lib/../lib64
+ // /usr/lib/../lib64
+ // ${BINPATH}/../lib
+ // /lib
+ // /usr/lib
+ //
+ // These are OK for host, but no go for VE. So, defines them all
+ // from scratch here.
+ getFilePaths().clear();
+ getFilePaths().push_back(getArchSpecificLibPath());
+ getFilePaths().push_back(computeSysRoot() + "/opt/nec/ve/lib");
+}
+
+Tool *VEToolChain::buildAssembler() const {
+ return new tools::gnutools::Assembler(*this);
+}
+
+Tool *VEToolChain::buildLinker() const {
+ return new tools::gnutools::Linker(*this);
+}
+
+bool VEToolChain::isPICDefault() const { return false; }
+
+bool VEToolChain::isPIEDefault() const { return false; }
+
+bool VEToolChain::isPICDefaultForced() const { return false; }
+
+bool VEToolChain::SupportsProfiling() const { return false; }
+
+bool VEToolChain::hasBlocksRuntime() const { return false; }
+
+void VEToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc))
+ return;
+
+ if (DriverArgs.hasArg(options::OPT_nobuiltininc) &&
+ DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(getDriver().ResourceDir);
+ llvm::sys::path::append(P, "include");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ if (!DriverArgs.hasArg(options::OPT_nostdlibinc)) {
+ if (const char *cl_include_dir = getenv("NCC_C_INCLUDE_PATH")) {
+ SmallVector<StringRef, 4> Dirs;
+ const char EnvPathSeparatorStr[] = {llvm::sys::EnvPathSeparator, '\0'};
+ StringRef(cl_include_dir).split(Dirs, StringRef(EnvPathSeparatorStr));
+ ArrayRef<StringRef> DirVec(Dirs);
+ addSystemIncludes(DriverArgs, CC1Args, DirVec);
+ } else {
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/opt/nec/ve/include");
+ }
+ }
+}
+
+void VEToolChain::addClangTargetOptions(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ Action::OffloadKind) const {
+ CC1Args.push_back("-nostdsysteminc");
+ bool UseInitArrayDefault = true;
+ if (!DriverArgs.hasFlag(options::OPT_fuse_init_array,
+ options::OPT_fno_use_init_array, UseInitArrayDefault))
+ CC1Args.push_back("-fno-use-init-array");
+}
+
+void VEToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ // TODO upstream VE libc++ patches
+ llvm_unreachable("The VE target has no C++ stdlib for Clang yet");
+}
+
+void VEToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // TODO upstream VE libc++ patches
+ llvm_unreachable("The VE target has no C++ stdlib for Clang yet");
+}
+
+llvm::ExceptionHandling
+VEToolChain::GetExceptionModel(const ArgList &Args) const {
+ // VE uses SjLj exceptions.
+ return llvm::ExceptionHandling::SjLj;
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.h
new file mode 100644
index 000000000000..59069c0a7595
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.h
@@ -0,0 +1,66 @@
+//===--- VE.h - VE ToolChain Implementations --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_VE_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_VE_H
+
+#include "Linux.h"
+#include "clang/Driver/ToolChain.h"
+
+namespace clang {
+namespace driver {
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY VEToolChain : public Linux {
+public:
+ VEToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+
+public:
+ bool isPICDefault() const override;
+ bool isPIEDefault() const override;
+ bool isPICDefaultForced() const override;
+ bool SupportsProfiling() const override;
+ bool hasBlocksRuntime() const override;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ llvm::ExceptionHandling
+ GetExceptionModel(const llvm::opt::ArgList &Args) const override;
+
+ CXXStdlibType
+ GetCXXStdlibType(const llvm::opt::ArgList &Args) const override {
+ return ToolChain::CST_Libcxx;
+ }
+
+ RuntimeLibType GetDefaultRuntimeLibType() const override {
+ return ToolChain::RLT_CompilerRT;
+ }
+
+ const char *getDefaultLinker() const override { return "nld"; }
+};
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_VE_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
index 907f86b8233c..10168736400f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
@@ -40,7 +40,7 @@ std::string wasm::Linker::getLinkerPath(const ArgList &Args) const {
if (!UseLinker.empty()) {
if (llvm::sys::path::is_absolute(UseLinker) &&
llvm::sys::fs::can_execute(UseLinker))
- return UseLinker;
+ return std::string(UseLinker);
// Accept 'lld', and 'ld' as aliases for the default linker
if (UseLinker != "lld" && UseLinker != "ld")
@@ -62,6 +62,12 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *Linker = Args.MakeArgString(getLinkerPath(Args));
ArgStringList CmdArgs;
+ CmdArgs.push_back("-m");
+ if (getToolChain().getTriple().isArch64Bit())
+ CmdArgs.push_back("wasm64");
+ else
+ CmdArgs.push_back("wasm32");
+
if (Args.hasArg(options::OPT_s))
CmdArgs.push_back("--strip-all");
@@ -69,8 +75,26 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_u);
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+ const char *Crt1 = "crt1.o";
+ const char *Entry = NULL;
+ if (const Arg *A = Args.getLastArg(options::OPT_mexec_model_EQ)) {
+ StringRef CM = A->getValue();
+ if (CM == "command") {
+ // Use default values.
+ } else if (CM == "reactor") {
+ Crt1 = "crt1-reactor.o";
+ Entry = "_initialize";
+ } else {
+ ToolChain.getDriver().Diag(diag::err_drv_invalid_argument_to_option)
+ << CM << A->getOption().getName();
+ }
+ }
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles))
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt1.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(Crt1)));
+ if (Entry) {
+ CmdArgs.push_back(Args.MakeArgString("--entry"));
+ CmdArgs.push_back(Args.MakeArgString(Entry));
+ }
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
@@ -90,7 +114,8 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(JA, *this, Linker, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Linker, CmdArgs, Inputs));
// When optimizing, if wasm-opt is available, run it.
if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
@@ -112,7 +137,9 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(llvm::Twine("-O") + OOpt));
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(JA, *this, WasmOpt, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), WasmOpt, CmdArgs,
+ Inputs));
}
}
}
@@ -283,7 +310,7 @@ void WebAssembly::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
CIncludeDirs.split(dirs, ":");
for (StringRef dir : dirs) {
StringRef Prefix =
- llvm::sys::path::is_absolute(dir) ? StringRef(D.SysRoot) : "";
+ llvm::sys::path::is_absolute(dir) ? "" : StringRef(D.SysRoot);
addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
}
return;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h
index 67d5fce84576..616bfb5d3d0c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h
@@ -18,10 +18,9 @@ namespace driver {
namespace tools {
namespace wasm {
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- explicit Linker(const ToolChain &TC)
- : GnuTool("wasm::Linker", "linker", TC) {}
+ explicit Linker(const ToolChain &TC) : Tool("wasm::Linker", "linker", TC) {}
bool isLinkJob() const override { return true; }
bool hasIntegratedCPP() const override { return false; }
std::string getLinkerPath(const llvm::opt::ArgList &Args) const;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.cpp
index ba3a6d44adda..5030c73c7d82 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.cpp
@@ -52,7 +52,8 @@ void tools::XCore::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("xcc"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
void tools::XCore::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -80,7 +81,8 @@ void tools::XCore::Linker::ConstructJob(Compilation &C, const JobAction &JA,
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("xcc"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
/// XCore tool chain
diff --git a/contrib/llvm-project/clang/lib/Driver/Types.cpp b/contrib/llvm-project/clang/lib/Driver/Types.cpp
index 7d83be2521e7..399e26d8d64a 100644
--- a/contrib/llvm-project/clang/lib/Driver/Types.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Types.cpp
@@ -24,10 +24,19 @@ struct TypeInfo {
const char *Name;
const char *TempSuffix;
ID PreprocessedType;
- const llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> Phases;
+ class PhasesBitSet {
+ unsigned Bits = 0;
+
+ public:
+ constexpr PhasesBitSet(std::initializer_list<phases::ID> Phases) {
+ for (auto Id : Phases)
+ Bits |= 1 << Id;
+ }
+ bool contains(phases::ID Id) const { return Bits & (1 << Id); }
+ } Phases;
};
-static const TypeInfo TypeInfos[] = {
+static constexpr TypeInfo TypeInfos[] = {
#define TYPE(NAME, ID, PP_TYPE, TEMP_SUFFIX, ...) \
{ NAME, TEMP_SUFFIX, TY_##PP_TYPE, { __VA_ARGS__ }, },
#include "clang/Driver/Types.def"
@@ -46,18 +55,18 @@ const char *types::getTypeName(ID Id) {
types::ID types::getPreprocessedType(ID Id) {
ID PPT = getInfo(Id).PreprocessedType;
- assert((llvm::is_contained(getInfo(Id).Phases, phases::Preprocess) !=
+ assert((getInfo(Id).Phases.contains(phases::Preprocess) !=
(PPT == TY_INVALID)) &&
"Unexpected Preprocess Type.");
return PPT;
}
-static bool isPrepeocessedModuleType(ID Id) {
+static bool isPreprocessedModuleType(ID Id) {
return Id == TY_CXXModule || Id == TY_PP_CXXModule;
}
types::ID types::getPrecompiledType(ID Id) {
- if (isPrepeocessedModuleType(Id))
+ if (isPreprocessedModuleType(Id))
return TY_ModuleFile;
if (onlyPrecompileType(Id))
return TY_PCH;
@@ -81,15 +90,9 @@ const char *types::getTypeTempSuffix(ID Id, bool CLMode) {
return getInfo(Id).TempSuffix;
}
-bool types::onlyAssembleType(ID Id) {
- return llvm::is_contained(getInfo(Id).Phases, phases::Assemble) &&
- !llvm::is_contained(getInfo(Id).Phases, phases::Compile) &&
- !llvm::is_contained(getInfo(Id).Phases, phases::Backend);
-}
-
bool types::onlyPrecompileType(ID Id) {
- return llvm::is_contained(getInfo(Id).Phases, phases::Precompile) &&
- !isPrepeocessedModuleType(Id);
+ return getInfo(Id).Phases.contains(phases::Precompile) &&
+ !isPreprocessedModuleType(Id);
}
bool types::canTypeBeUserSpecified(ID Id) {
@@ -275,6 +278,7 @@ types::ID types::lookupTypeForExtension(llvm::StringRef Ext) {
.Case("gch", TY_PCH)
.Case("hip", TY_HIP)
.Case("hpp", TY_CXXHeader)
+ .Case("hxx", TY_CXXHeader)
.Case("iim", TY_PP_CXXModule)
.Case("lib", TY_Object)
.Case("mii", TY_PP_ObjCXX)
@@ -295,24 +299,28 @@ types::ID types::lookupTypeForTypeSpecifier(const char *Name) {
strcmp(Name, getInfo(Id).Name) == 0)
return Id;
}
-
+ // Accept "cu" as an alias for "cuda" for NVCC compatibility
+ if (strcmp(Name, "cu") == 0) {
+ return types::TY_CUDA;
+ }
return TY_INVALID;
}
-// FIXME: Why don't we just put this list in the defs file, eh.
-// FIXME: The list is now in Types.def but for now this function will verify
-// the old behavior and a subsequent change will delete most of the body.
-void types::getCompilationPhases(ID Id, llvm::SmallVectorImpl<phases::ID> &P) {
- P = getInfo(Id).Phases;
- assert(0 < P.size() && "Not enough phases in list");
+llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases>
+types::getCompilationPhases(ID Id, phases::ID LastPhase) {
+ llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> P;
+ const auto &Info = getInfo(Id);
+ for (int I = 0; I <= LastPhase; ++I)
+ if (Info.Phases.contains(static_cast<phases::ID>(I)))
+ P.push_back(static_cast<phases::ID>(I));
assert(P.size() <= phases::MaxNumberOfPhases && "Too many phases in list");
+ return P;
}
-void types::getCompilationPhases(const clang::driver::Driver &Driver,
- llvm::opt::DerivedArgList &DAL, ID Id,
- llvm::SmallVectorImpl<phases::ID> &P) {
- llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PhaseList;
- types::getCompilationPhases(Id, PhaseList);
+llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases>
+types::getCompilationPhases(const clang::driver::Driver &Driver,
+ llvm::opt::DerivedArgList &DAL, ID Id) {
+ phases::ID LastPhase;
// Filter to compiler mode. When the compiler is run as a preprocessor then
// compilation is not an option.
@@ -321,14 +329,12 @@ void types::getCompilationPhases(const clang::driver::Driver &Driver,
DAL.getLastArg(options::OPT__SLASH_EP) ||
DAL.getLastArg(options::OPT_M, options::OPT_MM) ||
DAL.getLastArg(options::OPT__SLASH_P))
- llvm::copy_if(PhaseList, std::back_inserter(P),
- [](phases::ID Phase) { return Phase <= phases::Preprocess; });
+ LastPhase = phases::Preprocess;
// --precompile only runs up to precompilation.
// This is a clang extension and is not compatible with GCC.
else if (DAL.getLastArg(options::OPT__precompile))
- llvm::copy_if(PhaseList, std::back_inserter(P),
- [](phases::ID Phase) { return Phase <= phases::Precompile; });
+ LastPhase = phases::Precompile;
// -{fsyntax-only,-analyze,emit-ast} only run up to the compiler.
else if (DAL.getLastArg(options::OPT_fsyntax_only) ||
@@ -340,21 +346,20 @@ void types::getCompilationPhases(const clang::driver::Driver &Driver,
DAL.getLastArg(options::OPT__migrate) ||
DAL.getLastArg(options::OPT__analyze) ||
DAL.getLastArg(options::OPT_emit_ast))
- llvm::copy_if(PhaseList, std::back_inserter(P),
- [](phases::ID Phase) { return Phase <= phases::Compile; });
+ LastPhase = phases::Compile;
else if (DAL.getLastArg(options::OPT_S) ||
DAL.getLastArg(options::OPT_emit_llvm))
- llvm::copy_if(PhaseList, std::back_inserter(P),
- [](phases::ID Phase) { return Phase <= phases::Backend; });
+ LastPhase = phases::Backend;
else if (DAL.getLastArg(options::OPT_c))
- llvm::copy_if(PhaseList, std::back_inserter(P),
- [](phases::ID Phase) { return Phase <= phases::Assemble; });
+ LastPhase = phases::Assemble;
// Generally means, do every phase until Link.
else
- P = PhaseList;
+ LastPhase = phases::LastPhase;
+
+ return types::getCompilationPhases(Id, LastPhase);
}
ID types::lookupCXXTypeForCType(ID Id) {
diff --git a/contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp b/contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp
index a2dd63f9eb77..f00c3906df97 100644
--- a/contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp
@@ -13,10 +13,10 @@
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/SpecialCaseList.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang;
using namespace clang::driver;
@@ -32,157 +32,163 @@ constexpr const char *const XRaySupportedModes[] = {"xray-fdr", "xray-basic"};
XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
const Driver &D = TC.getDriver();
const llvm::Triple &Triple = TC.getTriple();
- if (Args.hasFlag(options::OPT_fxray_instrument,
- options::OPT_fnoxray_instrument, false)) {
- if (Triple.getOS() == llvm::Triple::Linux) {
- switch (Triple.getArch()) {
- case llvm::Triple::x86_64:
- case llvm::Triple::arm:
- case llvm::Triple::aarch64:
- case llvm::Triple::ppc64le:
- case llvm::Triple::mips:
- case llvm::Triple::mipsel:
- case llvm::Triple::mips64:
- case llvm::Triple::mips64el:
- break;
- default:
- D.Diag(diag::err_drv_clang_unsupported)
- << (std::string(XRayInstrumentOption) + " on " + Triple.str());
- }
- } else if (Triple.isOSFreeBSD() ||
- Triple.isOSOpenBSD() ||
- Triple.isOSNetBSD() ||
- Triple.isMacOSX()) {
- if (Triple.getArch() != llvm::Triple::x86_64) {
- D.Diag(diag::err_drv_clang_unsupported)
- << (std::string(XRayInstrumentOption) + " on " + Triple.str());
- }
- } else if (Triple.getOS() == llvm::Triple::Fuchsia) {
- switch (Triple.getArch()) {
- case llvm::Triple::x86_64:
- case llvm::Triple::aarch64:
- break;
- default:
- D.Diag(diag::err_drv_clang_unsupported)
- << (std::string(XRayInstrumentOption) + " on " + Triple.str());
- }
- } else {
+ if (!Args.hasFlag(options::OPT_fxray_instrument,
+ options::OPT_fno_xray_instrument, false))
+ return;
+ if (Triple.getOS() == llvm::Triple::Linux) {
+ switch (Triple.getArch()) {
+ case llvm::Triple::x86_64:
+ case llvm::Triple::arm:
+ case llvm::Triple::aarch64:
+ case llvm::Triple::ppc64le:
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ break;
+ default:
D.Diag(diag::err_drv_clang_unsupported)
<< (std::string(XRayInstrumentOption) + " on " + Triple.str());
}
-
- // Both XRay and -fpatchable-function-entry use
- // TargetOpcode::PATCHABLE_FUNCTION_ENTER.
- if (Arg *A = Args.getLastArg(options::OPT_fpatchable_function_entry_EQ))
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << "-fxray-instrument" << A->getSpelling();
-
- XRayInstrument = true;
- if (const Arg *A =
- Args.getLastArg(options::OPT_fxray_instruction_threshold_,
- options::OPT_fxray_instruction_threshold_EQ)) {
- StringRef S = A->getValue();
- if (S.getAsInteger(0, InstructionThreshold) || InstructionThreshold < 0)
- D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ } else if (Triple.isOSFreeBSD() || Triple.isOSOpenBSD() ||
+ Triple.isOSNetBSD() || Triple.isMacOSX()) {
+ if (Triple.getArch() != llvm::Triple::x86_64) {
+ D.Diag(diag::err_drv_clang_unsupported)
+ << (std::string(XRayInstrumentOption) + " on " + Triple.str());
+ }
+ } else if (Triple.getOS() == llvm::Triple::Fuchsia) {
+ switch (Triple.getArch()) {
+ case llvm::Triple::x86_64:
+ case llvm::Triple::aarch64:
+ break;
+ default:
+ D.Diag(diag::err_drv_clang_unsupported)
+ << (std::string(XRayInstrumentOption) + " on " + Triple.str());
}
+ } else {
+ D.Diag(diag::err_drv_clang_unsupported)
+ << (std::string(XRayInstrumentOption) + " on " + Triple.str());
+ }
+
+ // Both XRay and -fpatchable-function-entry use
+ // TargetOpcode::PATCHABLE_FUNCTION_ENTER.
+ if (Arg *A = Args.getLastArg(options::OPT_fpatchable_function_entry_EQ))
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << "-fxray-instrument" << A->getSpelling();
+
+ XRayInstrument = true;
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fxray_instruction_threshold_,
+ options::OPT_fxray_instruction_threshold_EQ)) {
+ StringRef S = A->getValue();
+ if (S.getAsInteger(0, InstructionThreshold) || InstructionThreshold < 0)
+ D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ }
+
+ // By default, the back-end will not emit the lowering for XRay customevent
+ // calls if the function is not instrumented. In the future we will change
+ // this default to be the reverse, but in the meantime we're going to
+ // introduce the new functionality behind a flag.
+ if (Args.hasFlag(options::OPT_fxray_always_emit_customevents,
+ options::OPT_fno_xray_always_emit_customevents, false))
+ XRayAlwaysEmitCustomEvents = true;
+
+ if (Args.hasFlag(options::OPT_fxray_always_emit_typedevents,
+ options::OPT_fno_xray_always_emit_typedevents, false))
+ XRayAlwaysEmitTypedEvents = true;
+
+ if (!Args.hasFlag(options::OPT_fxray_link_deps,
+ options::OPT_fnoxray_link_deps, true))
+ XRayRT = false;
- // By default, the back-end will not emit the lowering for XRay customevent
- // calls if the function is not instrumented. In the future we will change
- // this default to be the reverse, but in the meantime we're going to
- // introduce the new functionality behind a flag.
- if (Args.hasFlag(options::OPT_fxray_always_emit_customevents,
- options::OPT_fnoxray_always_emit_customevents, false))
- XRayAlwaysEmitCustomEvents = true;
-
- if (Args.hasFlag(options::OPT_fxray_always_emit_typedevents,
- options::OPT_fnoxray_always_emit_typedevents, false))
- XRayAlwaysEmitTypedEvents = true;
-
- if (!Args.hasFlag(options::OPT_fxray_link_deps,
- options::OPT_fnoxray_link_deps, true))
- XRayRT = false;
-
- auto Bundles =
- Args.getAllArgValues(options::OPT_fxray_instrumentation_bundle);
- if (Bundles.empty())
- InstrumentationBundle.Mask = XRayInstrKind::All;
- else
- for (const auto &B : Bundles) {
- llvm::SmallVector<StringRef, 2> BundleParts;
- llvm::SplitString(B, BundleParts, ",");
- for (const auto &P : BundleParts) {
- // TODO: Automate the generation of the string case table.
- auto Valid = llvm::StringSwitch<bool>(P)
- .Cases("none", "all", "function", "custom", true)
- .Default(false);
-
- if (!Valid) {
- D.Diag(clang::diag::err_drv_invalid_value)
- << "-fxray-instrumentation-bundle=" << P;
- continue;
- }
-
- auto Mask = parseXRayInstrValue(P);
- if (Mask == XRayInstrKind::None) {
- InstrumentationBundle.clear();
- break;
- }
-
- InstrumentationBundle.Mask |= Mask;
+ if (Args.hasFlag(options::OPT_fxray_ignore_loops,
+ options::OPT_fno_xray_ignore_loops, false))
+ XRayIgnoreLoops = true;
+
+ XRayFunctionIndex = Args.hasFlag(options::OPT_fxray_function_index,
+ options::OPT_fno_xray_function_index, true);
+
+ auto Bundles =
+ Args.getAllArgValues(options::OPT_fxray_instrumentation_bundle);
+ if (Bundles.empty())
+ InstrumentationBundle.Mask = XRayInstrKind::All;
+ else
+ for (const auto &B : Bundles) {
+ llvm::SmallVector<StringRef, 2> BundleParts;
+ llvm::SplitString(B, BundleParts, ",");
+ for (const auto &P : BundleParts) {
+ // TODO: Automate the generation of the string case table.
+ auto Valid = llvm::StringSwitch<bool>(P)
+ .Cases("none", "all", "function", "function-entry",
+ "function-exit", "custom", true)
+ .Default(false);
+
+ if (!Valid) {
+ D.Diag(clang::diag::err_drv_invalid_value)
+ << "-fxray-instrumentation-bundle=" << P;
+ continue;
}
- }
- // Validate the always/never attribute files. We also make sure that they
- // are treated as actual dependencies.
- for (const auto &Filename :
- Args.getAllArgValues(options::OPT_fxray_always_instrument)) {
- if (D.getVFS().exists(Filename)) {
- AlwaysInstrumentFiles.push_back(Filename);
- ExtraDeps.push_back(Filename);
- } else
- D.Diag(clang::diag::err_drv_no_such_file) << Filename;
- }
+ auto Mask = parseXRayInstrValue(P);
+ if (Mask == XRayInstrKind::None) {
+ InstrumentationBundle.clear();
+ break;
+ }
- for (const auto &Filename :
- Args.getAllArgValues(options::OPT_fxray_never_instrument)) {
- if (D.getVFS().exists(Filename)) {
- NeverInstrumentFiles.push_back(Filename);
- ExtraDeps.push_back(Filename);
- } else
- D.Diag(clang::diag::err_drv_no_such_file) << Filename;
+ InstrumentationBundle.Mask |= Mask;
+ }
}
- for (const auto &Filename :
- Args.getAllArgValues(options::OPT_fxray_attr_list)) {
- if (D.getVFS().exists(Filename)) {
- AttrListFiles.push_back(Filename);
- ExtraDeps.push_back(Filename);
- } else
- D.Diag(clang::diag::err_drv_no_such_file) << Filename;
- }
+ // Validate the always/never attribute files. We also make sure that they
+ // are treated as actual dependencies.
+ for (const auto &Filename :
+ Args.getAllArgValues(options::OPT_fxray_always_instrument)) {
+ if (D.getVFS().exists(Filename)) {
+ AlwaysInstrumentFiles.push_back(Filename);
+ ExtraDeps.push_back(Filename);
+ } else
+ D.Diag(clang::diag::err_drv_no_such_file) << Filename;
+ }
- // Get the list of modes we want to support.
- auto SpecifiedModes = Args.getAllArgValues(options::OPT_fxray_modes);
- if (SpecifiedModes.empty())
- llvm::copy(XRaySupportedModes, std::back_inserter(Modes));
- else
- for (const auto &Arg : SpecifiedModes) {
- // Parse CSV values for -fxray-modes=...
- llvm::SmallVector<StringRef, 2> ModeParts;
- llvm::SplitString(Arg, ModeParts, ",");
- for (const auto &M : ModeParts)
- if (M == "none")
- Modes.clear();
- else if (M == "all")
- llvm::copy(XRaySupportedModes, std::back_inserter(Modes));
- else
- Modes.push_back(M);
- }
+ for (const auto &Filename :
+ Args.getAllArgValues(options::OPT_fxray_never_instrument)) {
+ if (D.getVFS().exists(Filename)) {
+ NeverInstrumentFiles.push_back(Filename);
+ ExtraDeps.push_back(Filename);
+ } else
+ D.Diag(clang::diag::err_drv_no_such_file) << Filename;
+ }
- // Then we want to sort and unique the modes we've collected.
- llvm::sort(Modes);
- Modes.erase(std::unique(Modes.begin(), Modes.end()), Modes.end());
+ for (const auto &Filename :
+ Args.getAllArgValues(options::OPT_fxray_attr_list)) {
+ if (D.getVFS().exists(Filename)) {
+ AttrListFiles.push_back(Filename);
+ ExtraDeps.push_back(Filename);
+ } else
+ D.Diag(clang::diag::err_drv_no_such_file) << Filename;
}
+
+ // Get the list of modes we want to support.
+ auto SpecifiedModes = Args.getAllArgValues(options::OPT_fxray_modes);
+ if (SpecifiedModes.empty())
+ llvm::copy(XRaySupportedModes, std::back_inserter(Modes));
+ else
+ for (const auto &Arg : SpecifiedModes) {
+ // Parse CSV values for -fxray-modes=...
+ llvm::SmallVector<StringRef, 2> ModeParts;
+ llvm::SplitString(Arg, ModeParts, ",");
+ for (const auto &M : ModeParts)
+ if (M == "none")
+ Modes.clear();
+ else if (M == "all")
+ llvm::copy(XRaySupportedModes, std::back_inserter(Modes));
+ else
+ Modes.push_back(std::string(M));
+ }
+
+ // Then we want to sort and unique the modes we've collected.
+ llvm::sort(Modes);
+ Modes.erase(std::unique(Modes.begin(), Modes.end()), Modes.end());
}
void XRayArgs::addArgs(const ToolChain &TC, const ArgList &Args,
@@ -198,6 +204,12 @@ void XRayArgs::addArgs(const ToolChain &TC, const ArgList &Args,
if (XRayAlwaysEmitTypedEvents)
CmdArgs.push_back("-fxray-always-emit-typedevents");
+ if (XRayIgnoreLoops)
+ CmdArgs.push_back("-fxray-ignore-loops");
+
+ if (!XRayFunctionIndex)
+ CmdArgs.push_back("-fno-xray-function-index");
+
CmdArgs.push_back(Args.MakeArgString(Twine(XRayInstructionThresholdOption) +
Twine(InstructionThreshold)));
@@ -237,8 +249,14 @@ void XRayArgs::addArgs(const ToolChain &TC, const ArgList &Args,
} else if (InstrumentationBundle.empty()) {
Bundle += "none";
} else {
- if (InstrumentationBundle.has(XRayInstrKind::Function))
+ if (InstrumentationBundle.has(XRayInstrKind::FunctionEntry) &&
+ InstrumentationBundle.has(XRayInstrKind::FunctionExit))
Bundle += "function";
+ else if (InstrumentationBundle.has(XRayInstrKind::FunctionEntry))
+ Bundle += "function-entry";
+ else if (InstrumentationBundle.has(XRayInstrKind::FunctionExit))
+ Bundle += "function-exit";
+
if (InstrumentationBundle.has(XRayInstrKind::Custom))
Bundle += "custom";
if (InstrumentationBundle.has(XRayInstrKind::Typed))
diff --git a/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp b/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
index cd0eb0b4324a..15fbe3b6515d 100644
--- a/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
+++ b/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
@@ -587,9 +587,8 @@ void BreakableBlockComment::insertBreak(unsigned LineIndex, unsigned TailOffset,
Text.data() - tokenAt(LineIndex).TokenText.data() + Split.first;
unsigned CharsToRemove = Split.second;
assert(LocalIndentAtLineBreak >= Prefix.size());
- std::string PrefixWithTrailingIndent = Prefix;
- for (unsigned I = 0; I < ContentIndent; ++I)
- PrefixWithTrailingIndent += " ";
+ std::string PrefixWithTrailingIndent = std::string(Prefix);
+ PrefixWithTrailingIndent.append(ContentIndent, ' ');
Whitespaces.replaceWhitespaceInToken(
tokenAt(LineIndex), BreakOffsetInToken, CharsToRemove, "",
PrefixWithTrailingIndent, InPPDirective, /*Newlines=*/1,
@@ -864,7 +863,8 @@ void BreakableLineCommentSection::reflow(unsigned LineIndex,
// tokens by the empty string.
Whitespaces.replaceWhitespace(
*Tokens[LineIndex], /*Newlines=*/0, /*Spaces=*/0,
- /*StartOfTokenColumn=*/StartColumn, /*InPPDirective=*/false);
+ /*StartOfTokenColumn=*/StartColumn, /*IsAligned=*/true,
+ /*InPPDirective=*/false);
} else if (LineIndex > 0) {
// In case we're reflowing after the '\' in:
//
@@ -932,6 +932,7 @@ void BreakableLineCommentSection::adaptStartOfLine(
/*Newlines=*/1,
/*Spaces=*/LineColumn,
/*StartOfTokenColumn=*/LineColumn,
+ /*IsAligned=*/true,
/*InPPDirective=*/false);
}
if (OriginalPrefix[LineIndex] != Prefix[LineIndex]) {
diff --git a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
index 2ff6e5ec2344..b1497651a8fe 100644
--- a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
+++ b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
@@ -329,6 +329,11 @@ bool ContinuationIndenter::canBreak(const LineState &State) {
bool ContinuationIndenter::mustBreak(const LineState &State) {
const FormatToken &Current = *State.NextToken;
const FormatToken &Previous = *Current.Previous;
+ if (Style.BraceWrapping.BeforeLambdaBody && Current.CanBreakBefore &&
+ Current.is(TT_LambdaLBrace) && Previous.isNot(TT_LineComment)) {
+ auto LambdaBodyLength = getLengthToMatchingParen(Current, State.Stack);
+ return (LambdaBodyLength > getColumnLimit(State));
+ }
if (Current.MustBreakBefore || Current.is(TT_InlineASMColon))
return true;
if (State.Stack.back().BreakBeforeClosingBrace &&
@@ -337,10 +342,16 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
if (Previous.is(tok::semi) && State.LineContainsContinuedForLoopSection)
return true;
if (Style.Language == FormatStyle::LK_ObjC &&
+ Style.ObjCBreakBeforeNestedBlockParam &&
Current.ObjCSelectorNameParts > 1 &&
Current.startsSequence(TT_SelectorName, tok::colon, tok::caret)) {
return true;
}
+ // Avoid producing inconsistent states by requiring breaks where they are not
+ // permitted for C# generic type constraints.
+ if (State.Stack.back().IsCSharpGenericTypeConstraint &&
+ Previous.isNot(TT_CSharpGenericTypeConstraintComma))
+ return false;
if ((startsNextParameter(Current, Style) || Previous.is(tok::semi) ||
(Previous.is(TT_TemplateCloser) && Current.is(TT_StartOfName) &&
Style.isCpp() &&
@@ -356,6 +367,12 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
State.Stack.back().BreakBeforeParameter && !Current.isTrailingComment() &&
!Current.isOneOf(tok::r_paren, tok::r_brace))
return true;
+ if (State.Stack.back().IsChainedConditional &&
+ ((Style.BreakBeforeTernaryOperators && Current.is(TT_ConditionalExpr) &&
+ Current.is(tok::colon)) ||
+ (!Style.BreakBeforeTernaryOperators && Previous.is(TT_ConditionalExpr) &&
+ Previous.is(tok::colon))))
+ return true;
if (((Previous.is(TT_DictLiteral) && Previous.is(tok::l_brace)) ||
(Previous.is(TT_ArrayInitializerLSquare) &&
Previous.ParameterCount > 1) ||
@@ -412,7 +429,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
State.Stack.back().BreakBeforeParameter && Current.CanBreakBefore)
return true;
- if (State.Column <= NewLineColumn)
+ if (!State.Line->First->is(tok::kw_enum) && State.Column <= NewLineColumn)
return false;
if (Style.AlwaysBreakBeforeMultilineStrings &&
@@ -629,9 +646,12 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
State.Stack.back().NoLineBreak = true;
if (Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign &&
+ !State.Stack.back().IsCSharpGenericTypeConstraint &&
Previous.opensScope() && Previous.isNot(TT_ObjCMethodExpr) &&
- (Current.isNot(TT_LineComment) || Previous.BlockKind == BK_BracedInit))
+ (Current.isNot(TT_LineComment) || Previous.BlockKind == BK_BracedInit)) {
State.Stack.back().Indent = State.Column + Spaces;
+ State.Stack.back().IsAligned = true;
+ }
if (State.Stack.back().AvoidBinPacking && startsNextParameter(Current, Style))
State.Stack.back().NoLineBreak = true;
if (startsSegmentOfBuilderTypeCall(Current) &&
@@ -673,7 +693,9 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
// does not help.
bool HasTwoOperands =
P->OperatorIndex == 0 && !P->NextOperator && !P->is(TT_ConditionalExpr);
- if ((!BreakBeforeOperator && !(HasTwoOperands && Style.AlignOperands)) ||
+ if ((!BreakBeforeOperator &&
+ !(HasTwoOperands &&
+ Style.AlignOperands != FormatStyle::OAS_DontAlign)) ||
(!State.Stack.back().LastOperatorWrapped && BreakBeforeOperator))
State.Stack.back().NoLineBreakInOperand = true;
}
@@ -710,6 +732,8 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
} else if (Previous.is(TT_InheritanceColon)) {
State.Stack.back().Indent = State.Column;
State.Stack.back().LastSpace = State.Column;
+ } else if (Current.is(TT_CSharpGenericTypeConstraintColon)) {
+ State.Stack.back().ColonPos = State.Column;
} else if (Previous.opensScope()) {
// If a function has a trailing call, indent all parameters from the
// opening parenthesis. This avoids confusing indents like:
@@ -844,6 +868,7 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
bool ContinuePPDirective =
State.Line->InPPDirective && State.Line->Type != LT_ImportStatement;
Whitespaces.replaceWhitespace(Current, Newlines, State.Column, State.Column,
+ State.Stack.back().IsAligned,
ContinuePPDirective);
}
@@ -861,8 +886,10 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
// Any break on this level means that the parent level has been broken
// and we need to avoid bin packing there.
bool NestedBlockSpecialCase =
- !Style.isCpp() && Current.is(tok::r_brace) && State.Stack.size() > 1 &&
- State.Stack[State.Stack.size() - 2].NestedBlockInlined;
+ (!Style.isCpp() && Current.is(tok::r_brace) && State.Stack.size() > 1 &&
+ State.Stack[State.Stack.size() - 2].NestedBlockInlined) ||
+ (Style.Language == FormatStyle::LK_ObjC && Current.is(tok::r_brace) &&
+ State.Stack.size() > 1 && !Style.ObjCBreakBeforeNestedBlockParam);
if (!NestedBlockSpecialCase)
for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i)
State.Stack[i].BreakBeforeParameter = true;
@@ -917,7 +944,13 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
if (!State.NextToken || !State.NextToken->Previous)
return 0;
+
FormatToken &Current = *State.NextToken;
+
+ if (State.Stack.back().IsCSharpGenericTypeConstraint &&
+ Current.isNot(TT_CSharpGenericTypeConstraint))
+ return State.Stack.back().ColonPos + 2;
+
const FormatToken &Previous = *Current.Previous;
// If we are continuing an expression, we want to use the continuation indent.
unsigned ContinuationIndent =
@@ -997,8 +1030,28 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
if (State.Stack.back().QuestionColumn != 0 &&
((NextNonComment->is(tok::colon) &&
NextNonComment->is(TT_ConditionalExpr)) ||
- Previous.is(TT_ConditionalExpr)))
+ Previous.is(TT_ConditionalExpr))) {
+ if (((NextNonComment->is(tok::colon) && NextNonComment->Next &&
+ !NextNonComment->Next->FakeLParens.empty() &&
+ NextNonComment->Next->FakeLParens.back() == prec::Conditional) ||
+ (Previous.is(tok::colon) && !Current.FakeLParens.empty() &&
+ Current.FakeLParens.back() == prec::Conditional)) &&
+ !State.Stack.back().IsWrappedConditional) {
+ // NOTE: we may tweak this slightly:
+ // * not remove the 'lead' ContinuationIndentWidth
+ // * always un-indent by the operator when
+ // BreakBeforeTernaryOperators=true
+ unsigned Indent = State.Stack.back().Indent;
+ if (Style.AlignOperands != FormatStyle::OAS_DontAlign) {
+ Indent -= Style.ContinuationIndentWidth;
+ }
+ if (Style.BreakBeforeTernaryOperators &&
+ State.Stack.back().UnindentOperator)
+ Indent -= 2;
+ return Indent;
+ }
return State.Stack.back().QuestionColumn;
+ }
if (Previous.is(tok::comma) && State.Stack.back().VariablePos != 0)
return State.Stack.back().VariablePos;
if ((PreviousNonComment &&
@@ -1040,6 +1093,9 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
if (NextNonComment->is(TT_ArraySubscriptLSquare)) {
if (State.Stack.back().StartOfArraySubscripts != 0)
return State.Stack.back().StartOfArraySubscripts;
+ else if (Style.isCSharp()) // C# allows `["key"] = value` inside object
+ // initializers.
+ return State.Stack.back().Indent;
return ContinuationIndent;
}
@@ -1071,6 +1127,13 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
return ContinuationIndent;
if (Current.is(TT_ProtoExtensionLSquare))
return State.Stack.back().Indent;
+ if (Current.isBinaryOperator() && State.Stack.back().UnindentOperator)
+ return State.Stack.back().Indent - Current.Tok.getLength() -
+ Current.SpacesRequiredBefore;
+ if (Current.isOneOf(tok::comment, TT_BlockComment, TT_LineComment) &&
+ NextNonComment->isBinaryOperator() && State.Stack.back().UnindentOperator)
+ return State.Stack.back().Indent - NextNonComment->Tok.getLength() -
+ NextNonComment->SpacesRequiredBefore;
if (State.Stack.back().Indent == State.FirstIndent && PreviousNonComment &&
!PreviousNonComment->isOneOf(tok::r_brace, TT_CtorInitializerComma))
// Ensure that we fall back to the continuation indent width instead of
@@ -1079,14 +1142,28 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
return State.Stack.back().Indent;
}
+static bool hasNestedBlockInlined(const FormatToken *Previous,
+ const FormatToken &Current,
+ const FormatStyle &Style) {
+ if (Previous->isNot(tok::l_paren))
+ return true;
+ if (Previous->ParameterCount > 1)
+ return true;
+
+ // Also a nested block if contains a lambda inside function with 1 parameter
+ return (Style.BraceWrapping.BeforeLambdaBody && Current.is(TT_LambdaLSquare));
+}
+
unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
bool DryRun, bool Newline) {
assert(State.Stack.size());
const FormatToken &Current = *State.NextToken;
+ if (Current.is(TT_CSharpGenericTypeConstraint))
+ State.Stack.back().IsCSharpGenericTypeConstraint = true;
if (Current.isOneOf(tok::comma, TT_BinaryOperator))
State.Stack.back().NoLineBreakInOperand = false;
- if (Current.is(TT_InheritanceColon))
+ if (Current.isOneOf(TT_InheritanceColon, TT_CSharpGenericTypeConstraintColon))
State.Stack.back().AvoidBinPacking = true;
if (Current.is(tok::lessless) && Current.isNot(TT_OverloadedOperator)) {
if (State.Stack.back().FirstLessLess == 0)
@@ -1102,6 +1179,11 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
if (Current.is(TT_ArraySubscriptLSquare) &&
State.Stack.back().StartOfArraySubscripts == 0)
State.Stack.back().StartOfArraySubscripts = State.Column;
+ if (Current.is(TT_ConditionalExpr) && Current.is(tok::question) &&
+ ((Current.MustBreakBefore) ||
+ (Current.getNextNonComment() &&
+ Current.getNextNonComment()->MustBreakBefore)))
+ State.Stack.back().IsWrappedConditional = true;
if (Style.BreakBeforeTernaryOperators && Current.is(tok::question))
State.Stack.back().QuestionColumn = State.Column;
if (!Style.BreakBeforeTernaryOperators && Current.isNot(tok::colon)) {
@@ -1181,8 +1263,7 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr)) &&
!Previous->isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)) {
State.Stack.back().NestedBlockInlined =
- !Newline &&
- (Previous->isNot(tok::l_paren) || Previous->ParameterCount > 1);
+ !Newline && hasNestedBlockInlined(Previous, Current, Style);
}
moveStatePastFakeLParens(State, Newline);
@@ -1233,7 +1314,7 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
(Previous && (Previous->opensScope() ||
Previous->isOneOf(tok::semi, tok::kw_return) ||
(Previous->getPrecedence() == prec::Assignment &&
- Style.AlignOperands) ||
+ Style.AlignOperands != FormatStyle::OAS_DontAlign) ||
Previous->is(TT_ObjCMethodExpr)));
for (SmallVectorImpl<prec::Level>::const_reverse_iterator
I = Current.FakeLParens.rbegin(),
@@ -1243,6 +1324,9 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
NewParenState.Tok = nullptr;
NewParenState.ContainsLineBreak = false;
NewParenState.LastOperatorWrapped = true;
+ NewParenState.IsChainedConditional = false;
+ NewParenState.IsWrappedConditional = false;
+ NewParenState.UnindentOperator = false;
NewParenState.NoLineBreak =
NewParenState.NoLineBreak || State.Stack.back().NoLineBreakInOperand;
@@ -1254,14 +1338,27 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
// a builder type call after 'return' or, if the alignment after opening
// brackets is disabled.
if (!Current.isTrailingComment() &&
- (Style.AlignOperands || *I < prec::Assignment) &&
+ (Style.AlignOperands != FormatStyle::OAS_DontAlign ||
+ *I < prec::Assignment) &&
(!Previous || Previous->isNot(tok::kw_return) ||
(Style.Language != FormatStyle::LK_Java && *I > 0)) &&
(Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign ||
- *I != prec::Comma || Current.NestingLevel == 0))
+ *I != prec::Comma || Current.NestingLevel == 0)) {
NewParenState.Indent =
std::max(std::max(State.Column, NewParenState.Indent),
State.Stack.back().LastSpace);
+ }
+
+ // If BreakBeforeBinaryOperators is set, un-indent a bit to account for
+ // the operator and keep the operands aligned
+ if (Style.AlignOperands == FormatStyle::OAS_AlignAfterOperator &&
+ Previous &&
+ (Previous->getPrecedence() == prec::Assignment ||
+ Previous->is(tok::kw_return) ||
+ (*I == prec::Conditional && Previous->is(tok::question) &&
+ Previous->is(TT_ConditionalExpr))) &&
+ !Newline)
+ NewParenState.UnindentOperator = true;
// Do not indent relative to the fake parentheses inserted for "." or "->".
// This is a special case to make the following to statements consistent:
@@ -1275,14 +1372,21 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign)
NewParenState.StartOfFunctionCall = State.Column;
- // Always indent conditional expressions. Never indent expression where
- // the 'operator' is ',', ';' or an assignment (i.e. *I <=
- // prec::Assignment) as those have different indentation rules. Indent
- // other expression, unless the indentation needs to be skipped.
- if (*I == prec::Conditional ||
- (!SkipFirstExtraIndent && *I > prec::Assignment &&
- !Current.isTrailingComment()))
+ // Indent conditional expressions, unless they are chained "else-if"
+ // conditionals. Never indent expression where the 'operator' is ',', ';' or
+ // an assignment (i.e. *I <= prec::Assignment) as those have different
+ // indentation rules. Indent other expression, unless the indentation needs
+ // to be skipped.
+ if (*I == prec::Conditional && Previous && Previous->is(tok::colon) &&
+ Previous->is(TT_ConditionalExpr) && I == Current.FakeLParens.rbegin() &&
+ !State.Stack.back().IsWrappedConditional) {
+ NewParenState.IsChainedConditional = true;
+ NewParenState.UnindentOperator = State.Stack.back().UnindentOperator;
+ } else if (*I == prec::Conditional ||
+ (!SkipFirstExtraIndent && *I > prec::Assignment &&
+ !Current.isTrailingComment())) {
NewParenState.Indent += Style.ContinuationIndentWidth;
+ }
if ((Previous && !Previous->opensScope()) || *I != prec::Comma)
NewParenState.BreakBeforeParameter = false;
State.Stack.push_back(NewParenState);
@@ -1308,6 +1412,11 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
if (!Current.opensScope())
return;
+ // Don't allow '<' or '(' in C# generic type constraints to start new scopes.
+ if (Current.isOneOf(tok::less, tok::l_paren) &&
+ State.Stack.back().IsCSharpGenericTypeConstraint)
+ return;
+
if (Current.MatchingParen && Current.BlockKind == BK_Block) {
moveStateToNewBlock(State);
return;
@@ -1372,6 +1481,7 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
(State.Line->Type == LT_ObjCDecl && ObjCBinPackProtocolList);
AvoidBinPacking =
+ (State.Stack.back().IsCSharpGenericTypeConstraint) ||
(Style.Language == FormatStyle::LK_JavaScript && EndsInComma) ||
(State.Line->MustBeDeclaration && !BinPackDeclaration) ||
(!State.Line->MustBeDeclaration && !Style.BinPackArguments) ||
@@ -1380,7 +1490,8 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
(!BinPackInconclusiveFunctions &&
Current.PackingKind == PPK_Inconclusive)));
- if (Current.is(TT_ObjCMethodExpr) && Current.MatchingParen) {
+ if (Current.is(TT_ObjCMethodExpr) && Current.MatchingParen &&
+ Style.ObjCBreakBeforeNestedBlockParam) {
if (Style.ColumnLimit) {
// If this '[' opens an ObjC call, determine whether all parameters fit
// into one line and put one per line if they don't.
@@ -1418,7 +1529,22 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
ParenState(&Current, NewIndent, LastSpace, AvoidBinPacking, NoLineBreak));
State.Stack.back().NestedBlockIndent = NestedBlockIndent;
State.Stack.back().BreakBeforeParameter = BreakBeforeParameter;
- State.Stack.back().HasMultipleNestedBlocks = Current.BlockParameterCount > 1;
+ State.Stack.back().HasMultipleNestedBlocks =
+ (Current.BlockParameterCount > 1);
+
+ if (Style.BraceWrapping.BeforeLambdaBody && Current.Next != nullptr &&
+ Current.Tok.is(tok::l_paren)) {
+ // Search for any parameter that is a lambda
+ FormatToken const *next = Current.Next;
+ while (next != nullptr) {
+ if (next->is(TT_LambdaLSquare)) {
+ State.Stack.back().HasMultipleNestedBlocks = true;
+ break;
+ }
+ next = next->Next;
+ }
+ }
+
State.Stack.back().IsInsideObjCArrayLiteral =
Current.is(TT_ArrayInitializerLSquare) && Current.Previous &&
Current.Previous->is(tok::at);
@@ -1513,8 +1639,8 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
unsigned OldSuffixSize = 2 + OldDelimiter.size();
// We create a virtual text environment which expects a null-terminated
// string, so we cannot use StringRef.
- std::string RawText =
- Current.TokenText.substr(OldPrefixSize).drop_back(OldSuffixSize);
+ std::string RawText = std::string(
+ Current.TokenText.substr(OldPrefixSize).drop_back(OldSuffixSize));
if (NewDelimiter != OldDelimiter) {
// Don't update to the canonical delimiter 'deli' if ')deli"' occurs in the
// raw string.
@@ -1760,7 +1886,7 @@ ContinuationIndenter::createBreakableToken(const FormatToken &Current,
LineState &State, bool AllowBreak) {
unsigned StartColumn = State.Column - Current.ColumnWidth;
if (Current.isStringLiteral()) {
- // FIXME: String literal breaking is currently disabled for C#,Java and
+ // FIXME: String literal breaking is currently disabled for C#, Java and
// JavaScript, as it requires strings to be merged using "+" which we
// don't support.
if (Style.Language == FormatStyle::LK_Java ||
diff --git a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h
index 11df619e0f40..b1b2611263a9 100644
--- a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h
+++ b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h
@@ -202,13 +202,16 @@ struct ParenState {
ParenState(const FormatToken *Tok, unsigned Indent, unsigned LastSpace,
bool AvoidBinPacking, bool NoLineBreak)
: Tok(Tok), Indent(Indent), LastSpace(LastSpace),
- NestedBlockIndent(Indent), BreakBeforeClosingBrace(false),
- AvoidBinPacking(AvoidBinPacking), BreakBeforeParameter(false),
- NoLineBreak(NoLineBreak), NoLineBreakInOperand(false),
- LastOperatorWrapped(true), ContainsLineBreak(false),
- ContainsUnwrappedBuilder(false), AlignColons(true),
- ObjCSelectorNameFound(false), HasMultipleNestedBlocks(false),
- NestedBlockInlined(false), IsInsideObjCArrayLiteral(false) {}
+ NestedBlockIndent(Indent), IsAligned(false),
+ BreakBeforeClosingBrace(false), AvoidBinPacking(AvoidBinPacking),
+ BreakBeforeParameter(false), NoLineBreak(NoLineBreak),
+ NoLineBreakInOperand(false), LastOperatorWrapped(true),
+ ContainsLineBreak(false), ContainsUnwrappedBuilder(false),
+ AlignColons(true), ObjCSelectorNameFound(false),
+ HasMultipleNestedBlocks(false), NestedBlockInlined(false),
+ IsInsideObjCArrayLiteral(false), IsCSharpGenericTypeConstraint(false),
+ IsChainedConditional(false), IsWrappedConditional(false),
+ UnindentOperator(false) {}
/// \brief The token opening this parenthesis level, or nullptr if this level
/// is opened by fake parenthesis.
@@ -264,6 +267,9 @@ struct ParenState {
/// Used to align further variables if necessary.
unsigned VariablePos = 0;
+ /// Whether this block's indentation is used for alignment.
+ bool IsAligned : 1;
+
/// Whether a newline needs to be inserted before the block's closing
/// brace.
///
@@ -329,6 +335,20 @@ struct ParenState {
/// array literal.
bool IsInsideObjCArrayLiteral : 1;
+ bool IsCSharpGenericTypeConstraint : 1;
+
+ /// \brief true if the current \c ParenState represents the false branch of
+ /// a chained conditional expression (e.g. else-if)
+ bool IsChainedConditional : 1;
+
+ /// \brief true if there conditionnal was wrapped on the first operator (the
+ /// question mark)
+ bool IsWrappedConditional : 1;
+
+ /// \brief Indicates the indent should be reduced by the length of the
+ /// operator.
+ bool UnindentOperator : 1;
+
bool operator<(const ParenState &Other) const {
if (Indent != Other.Indent)
return Indent < Other.Indent;
@@ -338,6 +358,8 @@ struct ParenState {
return NestedBlockIndent < Other.NestedBlockIndent;
if (FirstLessLess != Other.FirstLessLess)
return FirstLessLess < Other.FirstLessLess;
+ if (IsAligned != Other.IsAligned)
+ return IsAligned;
if (BreakBeforeClosingBrace != Other.BreakBeforeClosingBrace)
return BreakBeforeClosingBrace;
if (QuestionColumn != Other.QuestionColumn)
@@ -366,6 +388,14 @@ struct ParenState {
return ContainsUnwrappedBuilder;
if (NestedBlockInlined != Other.NestedBlockInlined)
return NestedBlockInlined;
+ if (IsCSharpGenericTypeConstraint != Other.IsCSharpGenericTypeConstraint)
+ return IsCSharpGenericTypeConstraint;
+ if (IsChainedConditional != Other.IsChainedConditional)
+ return IsChainedConditional;
+ if (IsWrappedConditional != Other.IsWrappedConditional)
+ return IsWrappedConditional;
+ if (UnindentOperator != Other.UnindentOperator)
+ return UnindentOperator;
return false;
}
};
diff --git a/contrib/llvm-project/clang/lib/Format/Format.cpp b/contrib/llvm-project/clang/lib/Format/Format.cpp
index f12bca48c630..0d277a6464af 100644
--- a/contrib/llvm-project/clang/lib/Format/Format.cpp
+++ b/contrib/llvm-project/clang/lib/Format/Format.cpp
@@ -14,6 +14,7 @@
#include "clang/Format/Format.h"
#include "AffectedRangeManager.h"
+#include "BreakableToken.h"
#include "ContinuationIndenter.h"
#include "FormatInternal.h"
#include "FormatTokenLexer.h"
@@ -93,6 +94,7 @@ template <> struct ScalarEnumerationTraits<FormatStyle::UseTabStyle> {
IO.enumCase(Value, "ForIndentation", FormatStyle::UT_ForIndentation);
IO.enumCase(Value, "ForContinuationAndIndentation",
FormatStyle::UT_ForContinuationAndIndentation);
+ IO.enumCase(Value, "AlignWithSpaces", FormatStyle::UT_AlignWithSpaces);
}
};
@@ -157,6 +159,13 @@ template <> struct ScalarEnumerationTraits<FormatStyle::BinPackStyle> {
}
};
+template <> struct ScalarEnumerationTraits<FormatStyle::TrailingCommaStyle> {
+ static void enumeration(IO &IO, FormatStyle::TrailingCommaStyle &Value) {
+ IO.enumCase(Value, "None", FormatStyle::TCS_None);
+ IO.enumCase(Value, "Wrapped", FormatStyle::TCS_Wrapped);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::BinaryOperatorStyle> {
static void enumeration(IO &IO, FormatStyle::BinaryOperatorStyle &Value) {
IO.enumCase(Value, "All", FormatStyle::BOS_All);
@@ -187,11 +196,13 @@ struct ScalarEnumerationTraits<
static void
enumeration(IO &IO,
FormatStyle::BraceWrappingAfterControlStatementStyle &Value) {
- IO.enumCase(Value, "false", FormatStyle::BWACS_Never);
- IO.enumCase(Value, "true", FormatStyle::BWACS_Always);
IO.enumCase(Value, "Never", FormatStyle::BWACS_Never);
IO.enumCase(Value, "MultiLine", FormatStyle::BWACS_MultiLine);
IO.enumCase(Value, "Always", FormatStyle::BWACS_Always);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "false", FormatStyle::BWACS_Never);
+ IO.enumCase(Value, "true", FormatStyle::BWACS_Always);
}
};
@@ -225,6 +236,17 @@ struct ScalarEnumerationTraits<FormatStyle::PPDirectiveIndentStyle> {
};
template <>
+struct ScalarEnumerationTraits<FormatStyle::IndentExternBlockStyle> {
+ static void enumeration(IO &IO, FormatStyle::IndentExternBlockStyle &Value) {
+ IO.enumCase(Value, "AfterExternBlock", FormatStyle::IEBS_AfterExternBlock);
+ IO.enumCase(Value, "Indent", FormatStyle::IEBS_Indent);
+ IO.enumCase(Value, "NoIndent", FormatStyle::IEBS_NoIndent);
+ IO.enumCase(Value, "true", FormatStyle::IEBS_Indent);
+ IO.enumCase(Value, "false", FormatStyle::IEBS_NoIndent);
+ }
+};
+
+template <>
struct ScalarEnumerationTraits<FormatStyle::ReturnTypeBreakingStyle> {
static void enumeration(IO &IO, FormatStyle::ReturnTypeBreakingStyle &Value) {
IO.enumCase(Value, "None", FormatStyle::RTBS_None);
@@ -300,6 +322,19 @@ struct ScalarEnumerationTraits<FormatStyle::EscapedNewlineAlignmentStyle> {
}
};
+template <> struct ScalarEnumerationTraits<FormatStyle::OperandAlignmentStyle> {
+ static void enumeration(IO &IO, FormatStyle::OperandAlignmentStyle &Value) {
+ IO.enumCase(Value, "DontAlign", FormatStyle::OAS_DontAlign);
+ IO.enumCase(Value, "Align", FormatStyle::OAS_Align);
+ IO.enumCase(Value, "AlignAfterOperator",
+ FormatStyle::OAS_AlignAfterOperator);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "true", FormatStyle::OAS_Align);
+ IO.enumCase(Value, "false", FormatStyle::OAS_DontAlign);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::PointerAlignmentStyle> {
static void enumeration(IO &IO, FormatStyle::PointerAlignmentStyle &Value) {
IO.enumCase(Value, "Middle", FormatStyle::PAS_Middle);
@@ -319,6 +354,8 @@ struct ScalarEnumerationTraits<FormatStyle::SpaceBeforeParensOptions> {
IO.enumCase(Value, "Never", FormatStyle::SBPO_Never);
IO.enumCase(Value, "ControlStatements",
FormatStyle::SBPO_ControlStatements);
+ IO.enumCase(Value, "ControlStatementsExceptForEachMacros",
+ FormatStyle::SBPO_ControlStatementsExceptForEachMacros);
IO.enumCase(Value, "NonEmptyParentheses",
FormatStyle::SBPO_NonEmptyParentheses);
IO.enumCase(Value, "Always", FormatStyle::SBPO_Always);
@@ -378,6 +415,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("AlignConsecutiveMacros", Style.AlignConsecutiveMacros);
IO.mapOptional("AlignConsecutiveAssignments",
Style.AlignConsecutiveAssignments);
+ IO.mapOptional("AlignConsecutiveBitFields",
+ Style.AlignConsecutiveBitFields);
IO.mapOptional("AlignConsecutiveDeclarations",
Style.AlignConsecutiveDeclarations);
IO.mapOptional("AlignEscapedNewlines", Style.AlignEscapedNewlines);
@@ -389,6 +428,8 @@ template <> struct MappingTraits<FormatStyle> {
Style.AllowAllConstructorInitializersOnNextLine);
IO.mapOptional("AllowAllParametersOfDeclarationOnNextLine",
Style.AllowAllParametersOfDeclarationOnNextLine);
+ IO.mapOptional("AllowShortEnumsOnASingleLine",
+ Style.AllowShortEnumsOnASingleLine);
IO.mapOptional("AllowShortBlocksOnASingleLine",
Style.AllowShortBlocksOnASingleLine);
IO.mapOptional("AllowShortCaseLabelsOnASingleLine",
@@ -480,11 +521,14 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("IncludeIsMainSourceRegex",
Style.IncludeStyle.IncludeIsMainSourceRegex);
IO.mapOptional("IndentCaseLabels", Style.IndentCaseLabels);
+ IO.mapOptional("IndentCaseBlocks", Style.IndentCaseBlocks);
IO.mapOptional("IndentGotoLabels", Style.IndentGotoLabels);
IO.mapOptional("IndentPPDirectives", Style.IndentPPDirectives);
+ IO.mapOptional("IndentExternBlock", Style.IndentExternBlock);
IO.mapOptional("IndentWidth", Style.IndentWidth);
IO.mapOptional("IndentWrappedFunctionNames",
Style.IndentWrappedFunctionNames);
+ IO.mapOptional("InsertTrailingCommas", Style.InsertTrailingCommas);
IO.mapOptional("JavaImportGroups", Style.JavaImportGroups);
IO.mapOptional("JavaScriptQuotes", Style.JavaScriptQuotes);
IO.mapOptional("JavaScriptWrapImports", Style.JavaScriptWrapImports);
@@ -497,6 +541,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("NamespaceMacros", Style.NamespaceMacros);
IO.mapOptional("ObjCBinPackProtocolList", Style.ObjCBinPackProtocolList);
IO.mapOptional("ObjCBlockIndentWidth", Style.ObjCBlockIndentWidth);
+ IO.mapOptional("ObjCBreakBeforeNestedBlockParam",
+ Style.ObjCBreakBeforeNestedBlockParam);
IO.mapOptional("ObjCSpaceAfterProperty", Style.ObjCSpaceAfterProperty);
IO.mapOptional("ObjCSpaceBeforeProtocolList",
Style.ObjCSpaceBeforeProtocolList);
@@ -553,6 +599,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("TypenameMacros", Style.TypenameMacros);
IO.mapOptional("UseCRLF", Style.UseCRLF);
IO.mapOptional("UseTab", Style.UseTab);
+ IO.mapOptional("WhitespaceSensitiveMacros",
+ Style.WhitespaceSensitiveMacros);
}
};
@@ -570,6 +618,8 @@ template <> struct MappingTraits<FormatStyle::BraceWrappingFlags> {
IO.mapOptional("AfterExternBlock", Wrapping.AfterExternBlock);
IO.mapOptional("BeforeCatch", Wrapping.BeforeCatch);
IO.mapOptional("BeforeElse", Wrapping.BeforeElse);
+ IO.mapOptional("BeforeLambdaBody", Wrapping.BeforeLambdaBody);
+ IO.mapOptional("BeforeWhile", Wrapping.BeforeWhile);
IO.mapOptional("IndentBraces", Wrapping.IndentBraces);
IO.mapOptional("SplitEmptyFunction", Wrapping.SplitEmptyFunction);
IO.mapOptional("SplitEmptyRecord", Wrapping.SplitEmptyRecord);
@@ -643,6 +693,8 @@ std::string ParseErrorCategory::message(int EV) const {
return "Invalid argument";
case ParseError::Unsuitable:
return "Unsuitable";
+ case ParseError::BinPackTrailingCommaConflict:
+ return "trailing comma insertion cannot be used with bin packing";
}
llvm_unreachable("unexpected parse error");
}
@@ -651,12 +703,24 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
if (Style.BreakBeforeBraces == FormatStyle::BS_Custom)
return Style;
FormatStyle Expanded = Style;
- Expanded.BraceWrapping = {false, false, FormatStyle::BWACS_Never,
- false, false, false,
- false, false, false,
- false, false, false,
- false, true, true,
- true};
+ Expanded.BraceWrapping = {/*AfterCaseLabel=*/false,
+ /*AfterClass=*/false,
+ /*AfterControlStatement=*/FormatStyle::BWACS_Never,
+ /*AfterEnum=*/false,
+ /*AfterFunction=*/false,
+ /*AfterNamespace=*/false,
+ /*AfterObjCDeclaration=*/false,
+ /*AfterStruct=*/false,
+ /*AfterUnion=*/false,
+ /*AfterExternBlock=*/false,
+ /*BeforeCatch=*/false,
+ /*BeforeElse=*/false,
+ /*BeforeLambdaBody=*/false,
+ /*BeforeWhile=*/false,
+ /*IndentBraces=*/false,
+ /*SplitEmptyFunction=*/true,
+ /*SplitEmptyRecord=*/true,
+ /*SplitEmptyNamespace=*/true};
switch (Style.BreakBeforeBraces) {
case FormatStyle::BS_Linux:
Expanded.BraceWrapping.AfterClass = true;
@@ -670,6 +734,7 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
Expanded.BraceWrapping.AfterStruct = true;
Expanded.BraceWrapping.AfterUnion = true;
Expanded.BraceWrapping.AfterExternBlock = true;
+ Expanded.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
Expanded.BraceWrapping.SplitEmptyFunction = true;
Expanded.BraceWrapping.SplitEmptyRecord = false;
break;
@@ -689,6 +754,7 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
Expanded.BraceWrapping.AfterStruct = true;
Expanded.BraceWrapping.AfterUnion = true;
Expanded.BraceWrapping.AfterExternBlock = true;
+ Expanded.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
Expanded.BraceWrapping.BeforeCatch = true;
Expanded.BraceWrapping.BeforeElse = true;
break;
@@ -702,16 +768,32 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
Expanded.BraceWrapping.AfterObjCDeclaration = true;
Expanded.BraceWrapping.AfterStruct = true;
Expanded.BraceWrapping.AfterExternBlock = true;
+ Expanded.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
Expanded.BraceWrapping.BeforeCatch = true;
Expanded.BraceWrapping.BeforeElse = true;
+ Expanded.BraceWrapping.BeforeLambdaBody = true;
break;
case FormatStyle::BS_GNU:
- Expanded.BraceWrapping = {true, true, FormatStyle::BWACS_Always,
- true, true, true,
- true, true, true,
- true, true, true,
- true, true, true,
- true};
+ Expanded.BraceWrapping = {
+ /*AfterCaseLabel=*/true,
+ /*AfterClass=*/true,
+ /*AfterControlStatement=*/FormatStyle::BWACS_Always,
+ /*AfterEnum=*/true,
+ /*AfterFunction=*/true,
+ /*AfterNamespace=*/true,
+ /*AfterObjCDeclaration=*/true,
+ /*AfterStruct=*/true,
+ /*AfterUnion=*/true,
+ /*AfterExternBlock=*/true,
+ /*BeforeCatch=*/true,
+ /*BeforeElse=*/true,
+ /*BeforeLambdaBody=*/false,
+ /*BeforeWhile=*/true,
+ /*IndentBraces=*/true,
+ /*SplitEmptyFunction=*/true,
+ /*SplitEmptyRecord=*/true,
+ /*SplitEmptyNamespace=*/true};
+ Expanded.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
break;
case FormatStyle::BS_WebKit:
Expanded.BraceWrapping.AfterFunction = true;
@@ -728,14 +810,16 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.AccessModifierOffset = -2;
LLVMStyle.AlignEscapedNewlines = FormatStyle::ENAS_Right;
LLVMStyle.AlignAfterOpenBracket = FormatStyle::BAS_Align;
- LLVMStyle.AlignOperands = true;
+ LLVMStyle.AlignOperands = FormatStyle::OAS_Align;
LLVMStyle.AlignTrailingComments = true;
LLVMStyle.AlignConsecutiveAssignments = false;
+ LLVMStyle.AlignConsecutiveBitFields = false;
LLVMStyle.AlignConsecutiveDeclarations = false;
LLVMStyle.AlignConsecutiveMacros = false;
LLVMStyle.AllowAllArgumentsOnNextLine = true;
LLVMStyle.AllowAllConstructorInitializersOnNextLine = true;
LLVMStyle.AllowAllParametersOfDeclarationOnNextLine = true;
+ LLVMStyle.AllowShortEnumsOnASingleLine = true;
LLVMStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_All;
LLVMStyle.AllowShortBlocksOnASingleLine = FormatStyle::SBS_Never;
LLVMStyle.AllowShortCaseLabelsOnASingleLine = false;
@@ -751,12 +835,25 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_None;
LLVMStyle.BreakBeforeTernaryOperators = true;
LLVMStyle.BreakBeforeBraces = FormatStyle::BS_Attach;
- LLVMStyle.BraceWrapping = {false, false, FormatStyle::BWACS_Never,
- false, false, false,
- false, false, false,
- false, false, false,
- false, true, true,
- true};
+ LLVMStyle.BraceWrapping = {/*AfterCaseLabel=*/false,
+ /*AfterClass=*/false,
+ /*AfterControlStatement=*/FormatStyle::BWACS_Never,
+ /*AfterEnum=*/false,
+ /*AfterFunction=*/false,
+ /*AfterNamespace=*/false,
+ /*AfterObjCDeclaration=*/false,
+ /*AfterStruct=*/false,
+ /*AfterUnion=*/false,
+ /*AfterExternBlock=*/false,
+ /*BeforeCatch=*/false,
+ /*BeforeElse=*/false,
+ /*BeforeLambdaBody=*/false,
+ /*BeforeWhile=*/false,
+ /*IndentBraces=*/false,
+ /*SplitEmptyFunction=*/true,
+ /*SplitEmptyRecord=*/true,
+ /*SplitEmptyNamespace=*/true};
+ LLVMStyle.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
LLVMStyle.BreakAfterJavaFieldAnnotations = false;
LLVMStyle.BreakConstructorInitializers = FormatStyle::BCIS_BeforeColon;
LLVMStyle.BreakInheritanceList = FormatStyle::BILS_BeforeColon;
@@ -782,10 +879,12 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.IncludeStyle.IncludeIsMainRegex = "(Test)?$";
LLVMStyle.IncludeStyle.IncludeBlocks = tooling::IncludeStyle::IBS_Preserve;
LLVMStyle.IndentCaseLabels = false;
+ LLVMStyle.IndentCaseBlocks = false;
LLVMStyle.IndentGotoLabels = true;
LLVMStyle.IndentPPDirectives = FormatStyle::PPDIS_None;
LLVMStyle.IndentWrappedFunctionNames = false;
LLVMStyle.IndentWidth = 2;
+ LLVMStyle.InsertTrailingCommas = FormatStyle::TCS_None;
LLVMStyle.JavaScriptQuotes = FormatStyle::JSQS_Leave;
LLVMStyle.JavaScriptWrapImports = true;
LLVMStyle.TabWidth = 8;
@@ -794,6 +893,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.NamespaceIndentation = FormatStyle::NI_None;
LLVMStyle.ObjCBinPackProtocolList = FormatStyle::BPS_Auto;
LLVMStyle.ObjCBlockIndentWidth = 2;
+ LLVMStyle.ObjCBreakBeforeNestedBlockParam = true;
LLVMStyle.ObjCSpaceAfterProperty = false;
LLVMStyle.ObjCSpaceBeforeProtocolList = true;
LLVMStyle.PointerAlignment = FormatStyle::PAS_Right;
@@ -835,6 +935,9 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.SortUsingDeclarations = true;
LLVMStyle.StatementMacros.push_back("Q_UNUSED");
LLVMStyle.StatementMacros.push_back("QT_REQUIRE_VERSION");
+ LLVMStyle.WhitespaceSensitiveMacros.push_back("STRINGIZE");
+ LLVMStyle.WhitespaceSensitiveMacros.push_back("PP_STRINGIZE");
+ LLVMStyle.WhitespaceSensitiveMacros.push_back("BOOST_PP_STRINGIZE");
// Defaults that differ when not C++.
if (Language == FormatStyle::LK_TableGen) {
@@ -911,6 +1014,8 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
"PARSE_TEXT_PROTO",
"ParseTextOrDie",
"ParseTextProtoOrDie",
+ "ParseTestProto",
+ "ParsePartialTestProto",
},
/*CanonicalDelimiter=*/"",
/*BasedOnStyle=*/"google",
@@ -924,7 +1029,7 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
if (Language == FormatStyle::LK_Java) {
GoogleStyle.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
- GoogleStyle.AlignOperands = false;
+ GoogleStyle.AlignOperands = FormatStyle::OAS_DontAlign;
GoogleStyle.AlignTrailingComments = false;
GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Empty;
GoogleStyle.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
@@ -935,13 +1040,18 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.SpacesBeforeTrailingComments = 1;
} else if (Language == FormatStyle::LK_JavaScript) {
GoogleStyle.AlignAfterOpenBracket = FormatStyle::BAS_AlwaysBreak;
- GoogleStyle.AlignOperands = false;
+ GoogleStyle.AlignOperands = FormatStyle::OAS_DontAlign;
GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Empty;
+ // TODO: still under discussion whether to switch to SLS_All.
+ GoogleStyle.AllowShortLambdasOnASingleLine = FormatStyle::SLS_Empty;
GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
GoogleStyle.BreakBeforeTernaryOperators = false;
- // taze:, triple slash directives (`/// <...`), @see, which is commonly
- // followed by overlong URLs.
- GoogleStyle.CommentPragmas = "(taze:|^/[ \t]*<|@see)";
+ // taze:, triple slash directives (`/// <...`), tslint:, and @see, which is
+ // commonly followed by overlong URLs.
+ GoogleStyle.CommentPragmas = "(taze:|^/[ \t]*<|tslint:|@see)";
+ // TODO: enable once decided, in particular re disabling bin packing.
+ // https://google.github.io/styleguide/jsguide.html#features-arrays-trailing-comma
+ // GoogleStyle.InsertTrailingCommas = FormatStyle::TCS_Wrapped;
GoogleStyle.MaxEmptyLinesToKeep = 3;
GoogleStyle.NamespaceIndentation = FormatStyle::NI_All;
GoogleStyle.SpacesInContainerLiterals = false;
@@ -966,6 +1076,12 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
// #imports, etc.)
GoogleStyle.IncludeStyle.IncludeBlocks =
tooling::IncludeStyle::IBS_Preserve;
+ } else if (Language == FormatStyle::LK_CSharp) {
+ GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Empty;
+ GoogleStyle.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
+ GoogleStyle.BreakStringLiterals = false;
+ GoogleStyle.ColumnLimit = 100;
+ GoogleStyle.NamespaceIndentation = FormatStyle::NI_All;
}
return GoogleStyle;
@@ -1061,7 +1177,7 @@ FormatStyle getWebKitStyle() {
FormatStyle Style = getLLVMStyle();
Style.AccessModifierOffset = -4;
Style.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
- Style.AlignOperands = false;
+ Style.AlignOperands = FormatStyle::OAS_DontAlign;
Style.AlignTrailingComments = false;
Style.AllowShortBlocksOnASingleLine = FormatStyle::SBS_Empty;
Style.BreakBeforeBinaryOperators = FormatStyle::BOS_All;
@@ -1110,9 +1226,12 @@ FormatStyle getMicrosoftStyle(FormatStyle::LanguageKind Language) {
Style.BraceWrapping.AfterObjCDeclaration = true;
Style.BraceWrapping.AfterStruct = true;
Style.BraceWrapping.AfterExternBlock = true;
+ Style.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
Style.BraceWrapping.BeforeCatch = true;
Style.BraceWrapping.BeforeElse = true;
+ Style.BraceWrapping.BeforeWhile = false;
Style.PenaltyReturnTypeOnItsOwnLine = 1000;
+ Style.AllowShortEnumsOnASingleLine = false;
Style.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_None;
Style.AllowShortCaseLabelsOnASingleLine = false;
Style.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
@@ -1207,6 +1326,11 @@ std::error_code parseConfiguration(StringRef Text, FormatStyle *Style) {
StyleSet.Add(std::move(DefaultStyle));
}
*Style = *StyleSet.Get(Language);
+ if (Style->InsertTrailingCommas != FormatStyle::TCS_None &&
+ Style->BinPackArguments) {
+ // See comment on FormatStyle::TSC_Wrapped.
+ return make_error_code(ParseError::BinPackTrailingCommaConflict);
+ }
return make_error_code(ParseError::Success);
}
@@ -1462,6 +1586,75 @@ private:
FormattingAttemptStatus *Status;
};
+/// TrailingCommaInserter inserts trailing commas into container literals.
+/// E.g.:
+/// const x = [
+/// 1,
+/// ];
+/// TrailingCommaInserter runs after formatting. To avoid causing a required
+/// reformatting (and thus reflow), it never inserts a comma that'd exceed the
+/// ColumnLimit.
+///
+/// Because trailing commas disable binpacking of arrays, TrailingCommaInserter
+/// is conceptually incompatible with bin packing.
+class TrailingCommaInserter : public TokenAnalyzer {
+public:
+ TrailingCommaInserter(const Environment &Env, const FormatStyle &Style)
+ : TokenAnalyzer(Env, Style) {}
+
+ std::pair<tooling::Replacements, unsigned>
+ analyze(TokenAnnotator &Annotator,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) override {
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
+ tooling::Replacements Result;
+ insertTrailingCommas(AnnotatedLines, Result);
+ return {Result, 0};
+ }
+
+private:
+ /// Inserts trailing commas in [] and {} initializers if they wrap over
+ /// multiple lines.
+ void insertTrailingCommas(SmallVectorImpl<AnnotatedLine *> &Lines,
+ tooling::Replacements &Result) {
+ for (AnnotatedLine *Line : Lines) {
+ insertTrailingCommas(Line->Children, Result);
+ if (!Line->Affected)
+ continue;
+ for (FormatToken *FormatTok = Line->First; FormatTok;
+ FormatTok = FormatTok->Next) {
+ if (FormatTok->NewlinesBefore == 0)
+ continue;
+ FormatToken *Matching = FormatTok->MatchingParen;
+ if (!Matching || !FormatTok->getPreviousNonComment())
+ continue;
+ if (!(FormatTok->is(tok::r_square) &&
+ Matching->is(TT_ArrayInitializerLSquare)) &&
+ !(FormatTok->is(tok::r_brace) && Matching->is(TT_DictLiteral)))
+ continue;
+ FormatToken *Prev = FormatTok->getPreviousNonComment();
+ if (Prev->is(tok::comma) || Prev->is(tok::semi))
+ continue;
+ // getEndLoc is not reliably set during re-lexing, use text length
+ // instead.
+ SourceLocation Start =
+ Prev->Tok.getLocation().getLocWithOffset(Prev->TokenText.size());
+ // If inserting a comma would push the code over the column limit, skip
+ // this location - it'd introduce an unstable formatting due to the
+ // required reflow.
+ unsigned ColumnNumber =
+ Env.getSourceManager().getSpellingColumnNumber(Start);
+ if (ColumnNumber > Style.ColumnLimit)
+ continue;
+ // Comma insertions cannot conflict with each other, and this pass has a
+ // clean set of Replacements, so the operation below cannot fail.
+ cantFail(Result.add(
+ tooling::Replacement(Env.getSourceManager(), Start, 0, ",")));
+ }
+ }
+ }
+};
+
// This class clean up the erroneous/redundant code around the given ranges in
// file.
class Cleaner : public TokenAnalyzer {
@@ -1808,7 +2001,7 @@ private:
<< FormatTok->Tok.getLocation().printToString(
SourceManager)
<< " token: " << FormatTok->TokenText << " token type: "
- << getTokenTypeName(FormatTok->Type) << "\n");
+ << getTokenTypeName(FormatTok->getType()) << "\n");
return true;
}
if (guessIsObjC(SourceManager, Line->Children, Keywords))
@@ -1951,8 +2144,7 @@ static void sortCppIncludes(const FormatStyle &Style,
// enough as additional newlines might be added or removed across #include
// blocks. This we handle below by generating the updated #imclude blocks and
// comparing it to the original.
- if (Indices.size() == Includes.size() &&
- std::is_sorted(Indices.begin(), Indices.end()) &&
+ if (Indices.size() == Includes.size() && llvm::is_sorted(Indices) &&
Style.IncludeStyle.IncludeBlocks == tooling::IncludeStyle::IBS_Preserve)
return;
@@ -1973,8 +2165,8 @@ static void sortCppIncludes(const FormatStyle &Style,
// If the #includes are out of order, we generate a single replacement fixing
// the entire range of blocks. Otherwise, no replacement is generated.
- if (replaceCRLF(result) ==
- replaceCRLF(Code.substr(IncludesBeginOffset, IncludesBlockSize)))
+ if (replaceCRLF(result) == replaceCRLF(std::string(Code.substr(
+ IncludesBeginOffset, IncludesBlockSize))))
return;
auto Err = Replaces.add(tooling::Replacement(
@@ -2142,8 +2334,8 @@ static void sortJavaImports(const FormatStyle &Style,
// If the imports are out of order, we generate a single replacement fixing
// the entire block. Otherwise, no replacement is generated.
- if (replaceCRLF(result) ==
- replaceCRLF(Code.substr(Imports.front().Offset, ImportsBlockSize)))
+ if (replaceCRLF(result) == replaceCRLF(std::string(Code.substr(
+ Imports.front().Offset, ImportsBlockSize))))
return;
auto Err = Replaces.add(tooling::Replacement(FileName, Imports.front().Offset,
@@ -2431,6 +2623,12 @@ reformat(const FormatStyle &Style, StringRef Code,
return Formatter(Env, Expanded, Status).process();
});
+ if (Style.Language == FormatStyle::LK_JavaScript &&
+ Style.InsertTrailingCommas == FormatStyle::TCS_Wrapped)
+ Passes.emplace_back([&](const Environment &Env) {
+ return TrailingCommaInserter(Env, Expanded).process();
+ });
+
auto Env =
std::make_unique<Environment>(Code, FileName, Ranges, FirstStartColumn,
NextStartColumn, LastStartColumn);
@@ -2518,7 +2716,8 @@ LangOptions getFormattingLangOpts(const FormatStyle &Style) {
LangOpts.CPlusPlus11 = LexingStd >= FormatStyle::LS_Cpp11;
LangOpts.CPlusPlus14 = LexingStd >= FormatStyle::LS_Cpp14;
LangOpts.CPlusPlus17 = LexingStd >= FormatStyle::LS_Cpp17;
- LangOpts.CPlusPlus2a = LexingStd >= FormatStyle::LS_Cpp20;
+ LangOpts.CPlusPlus20 = LexingStd >= FormatStyle::LS_Cpp20;
+ LangOpts.Char8 = LexingStd >= FormatStyle::LS_Cpp20;
LangOpts.LineComment = 1;
bool AlternativeOperators = Style.isCpp();
@@ -2532,7 +2731,7 @@ LangOptions getFormattingLangOpts(const FormatStyle &Style) {
const char *StyleOptionHelpDescription =
"Coding style, currently supports:\n"
- " LLVM, Google, Chromium, Mozilla, WebKit.\n"
+ " LLVM, GNU, Google, Chromium, Microsoft, Mozilla, WebKit.\n"
"Use -style=file to load style configuration from\n"
".clang-format file located in one of the parent\n"
"directories of the source file (or current\n"
diff --git a/contrib/llvm-project/clang/lib/Format/FormatToken.cpp b/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
index 90d09064bb43..7d792974cd57 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
+++ b/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
@@ -50,6 +50,7 @@ bool FormatToken::isSimpleTypeSpecifier() const {
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
+ case tok::kw___bf16:
case tok::kw__Float16:
case tok::kw___float128:
case tok::kw_wchar_t:
@@ -84,8 +85,8 @@ unsigned CommaSeparatedList::formatAfterToken(LineState &State,
const FormatToken *LBrace =
State.NextToken->Previous->getPreviousNonComment();
if (!LBrace || !LBrace->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) ||
- LBrace->BlockKind == BK_Block || LBrace->Type == TT_DictLiteral ||
- LBrace->Next->Type == TT_DesignatedInitializerPeriod)
+ LBrace->BlockKind == BK_Block || LBrace->getType() == TT_DictLiteral ||
+ LBrace->Next->getType() == TT_DesignatedInitializerPeriod)
return 0;
// Calculate the number of code points we have to format this list. As the
diff --git a/contrib/llvm-project/clang/lib/Format/FormatToken.h b/contrib/llvm-project/clang/lib/Format/FormatToken.h
index e9cd327754ef..d4287f53fde3 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatToken.h
+++ b/contrib/llvm-project/clang/lib/Format/FormatToken.h
@@ -54,6 +54,7 @@ namespace format {
TYPE(InheritanceComma) \
TYPE(InlineASMBrace) \
TYPE(InlineASMColon) \
+ TYPE(InlineASMSymbolicNameLSquare) \
TYPE(JavaAnnotation) \
TYPE(JsComputedPropertyName) \
TYPE(JsExponentiation) \
@@ -101,10 +102,20 @@ namespace format {
TYPE(TrailingUnaryOperator) \
TYPE(TypenameMacro) \
TYPE(UnaryOperator) \
+ TYPE(UntouchableMacroFunc) \
TYPE(CSharpStringLiteral) \
+ TYPE(CSharpNamedArgumentColon) \
+ TYPE(CSharpNullable) \
TYPE(CSharpNullCoalescing) \
+ TYPE(CSharpNullConditional) \
+ TYPE(CSharpNullConditionalLSquare) \
+ TYPE(CSharpGenericTypeConstraint) \
+ TYPE(CSharpGenericTypeConstraintColon) \
+ TYPE(CSharpGenericTypeConstraintComma) \
TYPE(Unknown)
+/// Determines the semantic type of a syntactic token, e.g. whether "<" is a
+/// template opener or binary operator.
enum TokenType {
#define TYPE(X) TT_##X,
LIST_TOKEN_TYPES
@@ -172,6 +183,12 @@ struct FormatToken {
/// before the token.
bool MustBreakBefore = false;
+ /// Whether to not align across this token
+ ///
+ /// This happens for example when a preprocessor directive ended directly
+ /// before the token, but very rarely otherwise.
+ bool MustBreakAlignBefore = false;
+
/// The raw text of the token.
///
/// Contains the raw token text without leading whitespace and without leading
@@ -184,7 +201,10 @@ struct FormatToken {
/// Contains the kind of block if this token is a brace.
BraceBlockKind BlockKind = BK_Unknown;
- TokenType Type = TT_Unknown;
+ /// Returns the token's type, e.g. whether "<" is a template opener or
+ /// binary operator.
+ TokenType getType() const { return Type; }
+ void setType(TokenType T) { Type = T; }
/// The number of spaces that should be inserted before this token.
unsigned SpacesRequiredBefore = 0;
@@ -504,6 +524,9 @@ struct FormatToken {
/// Returns \c true if this tokens starts a block-type list, i.e. a
/// list that should be indented with a block indent.
bool opensBlockOrBlockTypeList(const FormatStyle &Style) const {
+ // C# Does not indent object initialisers as continuations.
+ if (is(tok::l_brace) && BlockKind == BK_BracedInit && Style.isCSharp())
+ return true;
if (is(TT_TemplateString) && opensScope())
return true;
return is(TT_ArrayInitializerLSquare) || is(TT_ProtoExtensionLSquare) ||
@@ -579,6 +602,8 @@ private:
return Previous->endsSequenceInternal(K1, Tokens...);
return is(K1) && Previous && Previous->endsSequenceInternal(Tokens...);
}
+
+ TokenType Type = TT_Unknown;
};
class ContinuationIndenter;
@@ -770,6 +795,8 @@ struct AdditionalKeywords {
kw_unchecked = &IdentTable.get("unchecked");
kw_unsafe = &IdentTable.get("unsafe");
kw_ushort = &IdentTable.get("ushort");
+ kw_when = &IdentTable.get("when");
+ kw_where = &IdentTable.get("where");
// Keep this at the end of the constructor to make sure everything here
// is
@@ -786,7 +813,8 @@ struct AdditionalKeywords {
kw_fixed, kw_foreach, kw_implicit, kw_in, kw_interface, kw_internal,
kw_is, kw_lock, kw_null, kw_object, kw_out, kw_override, kw_params,
kw_readonly, kw_ref, kw_string, kw_stackalloc, kw_sbyte, kw_sealed,
- kw_uint, kw_ulong, kw_unchecked, kw_unsafe, kw_ushort,
+ kw_uint, kw_ulong, kw_unchecked, kw_unsafe, kw_ushort, kw_when,
+ kw_where,
// Keywords from the JavaScript section.
kw_as, kw_async, kw_await, kw_declare, kw_finally, kw_from,
kw_function, kw_get, kw_import, kw_is, kw_let, kw_module, kw_readonly,
@@ -890,13 +918,77 @@ struct AdditionalKeywords {
IdentifierInfo *kw_unchecked;
IdentifierInfo *kw_unsafe;
IdentifierInfo *kw_ushort;
+ IdentifierInfo *kw_when;
+ IdentifierInfo *kw_where;
/// Returns \c true if \p Tok is a true JavaScript identifier, returns
/// \c false if it is a keyword or a pseudo keyword.
- bool IsJavaScriptIdentifier(const FormatToken &Tok) const {
- return Tok.is(tok::identifier) &&
- JsExtraKeywords.find(Tok.Tok.getIdentifierInfo()) ==
- JsExtraKeywords.end();
+ /// If \c AcceptIdentifierName is true, returns true not only for keywords,
+ // but also for IdentifierName tokens (aka pseudo-keywords), such as
+ // ``yield``.
+ bool IsJavaScriptIdentifier(const FormatToken &Tok,
+ bool AcceptIdentifierName = true) const {
+ // Based on the list of JavaScript & TypeScript keywords here:
+ // https://github.com/microsoft/TypeScript/blob/master/src/compiler/scanner.ts#L74
+ switch (Tok.Tok.getKind()) {
+ case tok::kw_break:
+ case tok::kw_case:
+ case tok::kw_catch:
+ case tok::kw_class:
+ case tok::kw_continue:
+ case tok::kw_const:
+ case tok::kw_default:
+ case tok::kw_delete:
+ case tok::kw_do:
+ case tok::kw_else:
+ case tok::kw_enum:
+ case tok::kw_export:
+ case tok::kw_false:
+ case tok::kw_for:
+ case tok::kw_if:
+ case tok::kw_import:
+ case tok::kw_module:
+ case tok::kw_new:
+ case tok::kw_private:
+ case tok::kw_protected:
+ case tok::kw_public:
+ case tok::kw_return:
+ case tok::kw_static:
+ case tok::kw_switch:
+ case tok::kw_this:
+ case tok::kw_throw:
+ case tok::kw_true:
+ case tok::kw_try:
+ case tok::kw_typeof:
+ case tok::kw_void:
+ case tok::kw_while:
+ // These are JS keywords that are lexed by LLVM/clang as keywords.
+ return false;
+ case tok::identifier: {
+ // For identifiers, make sure they are true identifiers, excluding the
+ // JavaScript pseudo-keywords (not lexed by LLVM/clang as keywords).
+ bool IsPseudoKeyword =
+ JsExtraKeywords.find(Tok.Tok.getIdentifierInfo()) !=
+ JsExtraKeywords.end();
+ return AcceptIdentifierName || !IsPseudoKeyword;
+ }
+ default:
+ // Other keywords are handled in the switch below, to avoid problems due
+ // to duplicate case labels when using the #include trick.
+ break;
+ }
+
+ switch (Tok.Tok.getKind()) {
+ // Handle C++ keywords not included above: these are all JS identifiers.
+#define KEYWORD(X, Y) case tok::kw_##X:
+#include "clang/Basic/TokenKinds.def"
+ // #undef KEYWORD is not needed -- it's #undef-ed at the end of
+ // TokenKinds.def
+ return true;
+ default:
+ // All other tokens (punctuation etc) are not JS identifiers.
+ return false;
+ }
}
/// Returns \c true if \p Tok is a C# keyword, returns
diff --git a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
index ef20ba884fb3..1fd153d1112e 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
@@ -22,13 +22,15 @@
namespace clang {
namespace format {
-FormatTokenLexer::FormatTokenLexer(const SourceManager &SourceMgr, FileID ID,
- unsigned Column, const FormatStyle &Style,
- encoding::Encoding Encoding)
+FormatTokenLexer::FormatTokenLexer(
+ const SourceManager &SourceMgr, FileID ID, unsigned Column,
+ const FormatStyle &Style, encoding::Encoding Encoding,
+ llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
+ IdentifierTable &IdentTable)
: FormatTok(nullptr), IsFirstToken(true), StateStack({LexerState::NORMAL}),
Column(Column), TrailingWhitespace(0), SourceMgr(SourceMgr), ID(ID),
- Style(Style), IdentTable(getFormattingLangOpts(Style)),
- Keywords(IdentTable), Encoding(Encoding), FirstInLineIndex(0),
+ Style(Style), IdentTable(IdentTable), Keywords(IdentTable),
+ Encoding(Encoding), Allocator(Allocator), FirstInLineIndex(0),
FormattingDisabled(false), MacroBlockBeginRegex(Style.MacroBlockBegin),
MacroBlockEndRegex(Style.MacroBlockEnd) {
Lex.reset(new Lexer(ID, SourceMgr.getBuffer(ID), SourceMgr,
@@ -43,6 +45,11 @@ FormatTokenLexer::FormatTokenLexer(const SourceManager &SourceMgr, FileID ID,
Macros.insert({&IdentTable.get(TypenameMacro), TT_TypenameMacro});
for (const std::string &NamespaceMacro : Style.NamespaceMacros)
Macros.insert({&IdentTable.get(NamespaceMacro), TT_NamespaceMacro});
+ for (const std::string &WhitespaceSensitiveMacro :
+ Style.WhitespaceSensitiveMacros) {
+ Macros.insert(
+ {&IdentTable.get(WhitespaceSensitiveMacro), TT_UntouchableMacroFunc});
+ }
}
ArrayRef<FormatToken *> FormatTokenLexer::lex() {
@@ -57,6 +64,10 @@ ArrayRef<FormatToken *> FormatTokenLexer::lex() {
if (Style.Language == FormatStyle::LK_TextProto)
tryParsePythonComment();
tryMergePreviousTokens();
+ if (Style.isCSharp())
+ // This needs to come after tokens have been merged so that C#
+ // string literals are correctly identified.
+ handleCSharpVerbatimAndInterpolatedStrings();
if (Tokens.back()->NewlinesBefore > 0 || Tokens.back()->IsMultiline)
FirstInLineIndex = Tokens.size() - 1;
} while (Tokens.back()->Tok.isNot(tok::eof));
@@ -70,15 +81,19 @@ void FormatTokenLexer::tryMergePreviousTokens() {
return;
if (tryMergeLessLess())
return;
+ if (tryMergeForEach())
+ return;
+ if (Style.isCpp() && tryTransformTryUsageForC())
+ return;
if (Style.isCSharp()) {
if (tryMergeCSharpKeywordVariables())
return;
- if (tryMergeCSharpVerbatimStringLiteral())
+ if (tryMergeCSharpStringLiteral())
return;
if (tryMergeCSharpDoubleQuestion())
return;
- if (tryMergeCSharpNullConditionals())
+ if (tryMergeCSharpNullConditional())
return;
if (tryTransformCSharpForEach())
return;
@@ -120,8 +135,11 @@ void FormatTokenLexer::tryMergePreviousTokens() {
Tokens.back()->Tok.setKind(tok::starequal);
return;
}
- if (tryMergeTokens(JSNullishOperator, TT_JsNullishCoalescingOperator))
+ if (tryMergeTokens(JSNullishOperator, TT_JsNullishCoalescingOperator)) {
+ // Treat like the "||" operator (as opposed to the ternary ?).
+ Tokens.back()->Tok.setKind(tok::pipepipe);
return;
+ }
if (tryMergeTokens(JSNullPropagatingOperator,
TT_JsNullPropagatingOperator)) {
// Treat like a regular "." access.
@@ -151,7 +169,7 @@ bool FormatTokenLexer::tryMergeNSStringLiteral() {
At->TokenText = StringRef(At->TokenText.begin(),
String->TokenText.end() - At->TokenText.begin());
At->ColumnWidth += String->ColumnWidth;
- At->Type = TT_ObjCStringLiteral;
+ At->setType(TT_ObjCStringLiteral);
Tokens.erase(Tokens.end() - 1);
return true;
}
@@ -170,7 +188,7 @@ bool FormatTokenLexer::tryMergeJSPrivateIdentifier() {
StringRef(Hash->TokenText.begin(),
Identifier->TokenText.end() - Hash->TokenText.begin());
Hash->ColumnWidth += Identifier->ColumnWidth;
- Hash->Type = TT_JsPrivateIdentifier;
+ Hash->setType(TT_JsPrivateIdentifier);
Tokens.erase(Tokens.end() - 1);
return true;
}
@@ -178,18 +196,71 @@ bool FormatTokenLexer::tryMergeJSPrivateIdentifier() {
// Search for verbatim or interpolated string literals @"ABC" or
// $"aaaaa{abc}aaaaa" i and mark the token as TT_CSharpStringLiteral, and to
// prevent splitting of @, $ and ".
-bool FormatTokenLexer::tryMergeCSharpVerbatimStringLiteral() {
+// Merging of multiline verbatim strings with embedded '"' is handled in
+// handleCSharpVerbatimAndInterpolatedStrings with lower-level lexing.
+bool FormatTokenLexer::tryMergeCSharpStringLiteral() {
if (Tokens.size() < 2)
return false;
- auto &At = *(Tokens.end() - 2);
+
+ // Interpolated strings could contain { } with " characters inside.
+ // $"{x ?? "null"}"
+ // should not be split into $"{x ?? ", null, "}" but should treated as a
+ // single string-literal.
+ //
+ // We opt not to try and format expressions inside {} within a C#
+ // interpolated string. Formatting expressions within an interpolated string
+ // would require similar work as that done for JavaScript template strings
+ // in `handleTemplateStrings()`.
+ auto &CSharpInterpolatedString = *(Tokens.end() - 2);
+ if (CSharpInterpolatedString->getType() == TT_CSharpStringLiteral &&
+ (CSharpInterpolatedString->TokenText.startswith(R"($")") ||
+ CSharpInterpolatedString->TokenText.startswith(R"($@")"))) {
+ int UnmatchedOpeningBraceCount = 0;
+
+ auto TokenTextSize = CSharpInterpolatedString->TokenText.size();
+ for (size_t Index = 0; Index < TokenTextSize; ++Index) {
+ char C = CSharpInterpolatedString->TokenText[Index];
+ if (C == '{') {
+ // "{{" inside an interpolated string is an escaped '{' so skip it.
+ if (Index + 1 < TokenTextSize &&
+ CSharpInterpolatedString->TokenText[Index + 1] == '{') {
+ ++Index;
+ continue;
+ }
+ ++UnmatchedOpeningBraceCount;
+ } else if (C == '}') {
+ // "}}" inside an interpolated string is an escaped '}' so skip it.
+ if (Index + 1 < TokenTextSize &&
+ CSharpInterpolatedString->TokenText[Index + 1] == '}') {
+ ++Index;
+ continue;
+ }
+ --UnmatchedOpeningBraceCount;
+ }
+ }
+
+ if (UnmatchedOpeningBraceCount > 0) {
+ auto &NextToken = *(Tokens.end() - 1);
+ CSharpInterpolatedString->TokenText =
+ StringRef(CSharpInterpolatedString->TokenText.begin(),
+ NextToken->TokenText.end() -
+ CSharpInterpolatedString->TokenText.begin());
+ CSharpInterpolatedString->ColumnWidth += NextToken->ColumnWidth;
+ Tokens.erase(Tokens.end() - 1);
+ return true;
+ }
+ }
+
+ // Look for @"aaaaaa" or $"aaaaaa".
auto &String = *(Tokens.end() - 1);
+ if (!String->is(tok::string_literal))
+ return false;
- // Look for $"aaaaaa" @"aaaaaa".
- if (!(At->is(tok::at) || At->TokenText == "$") ||
- !String->is(tok::string_literal))
+ auto &At = *(Tokens.end() - 2);
+ if (!(At->is(tok::at) || At->TokenText == "$"))
return false;
- if (Tokens.size() >= 2 && At->is(tok::at)) {
+ if (Tokens.size() > 2 && At->is(tok::at)) {
auto &Dollar = *(Tokens.end() - 3);
if (Dollar->TokenText == "$") {
// This looks like $@"aaaaa" so we need to combine all 3 tokens.
@@ -198,7 +269,7 @@ bool FormatTokenLexer::tryMergeCSharpVerbatimStringLiteral() {
StringRef(Dollar->TokenText.begin(),
String->TokenText.end() - Dollar->TokenText.begin());
Dollar->ColumnWidth += (At->ColumnWidth + String->ColumnWidth);
- Dollar->Type = TT_CSharpStringLiteral;
+ Dollar->setType(TT_CSharpStringLiteral);
Tokens.erase(Tokens.end() - 2);
Tokens.erase(Tokens.end() - 1);
return true;
@@ -210,11 +281,18 @@ bool FormatTokenLexer::tryMergeCSharpVerbatimStringLiteral() {
At->TokenText = StringRef(At->TokenText.begin(),
String->TokenText.end() - At->TokenText.begin());
At->ColumnWidth += String->ColumnWidth;
- At->Type = TT_CSharpStringLiteral;
+ At->setType(TT_CSharpStringLiteral);
Tokens.erase(Tokens.end() - 1);
return true;
}
+// Valid C# attribute targets:
+// https://docs.microsoft.com/en-us/dotnet/csharp/programming-guide/concepts/attributes/#attribute-targets
+const llvm::StringSet<> FormatTokenLexer::CSharpAttributeTargets = {
+ "assembly", "module", "field", "event", "method",
+ "param", "property", "return", "type",
+};
+
bool FormatTokenLexer::tryMergeCSharpDoubleQuestion() {
if (Tokens.size() < 2)
return false;
@@ -222,12 +300,38 @@ bool FormatTokenLexer::tryMergeCSharpDoubleQuestion() {
auto &SecondQuestion = *(Tokens.end() - 1);
if (!FirstQuestion->is(tok::question) || !SecondQuestion->is(tok::question))
return false;
- FirstQuestion->Tok.setKind(tok::question);
+ FirstQuestion->Tok.setKind(tok::question); // no '??' in clang tokens.
FirstQuestion->TokenText = StringRef(FirstQuestion->TokenText.begin(),
SecondQuestion->TokenText.end() -
FirstQuestion->TokenText.begin());
FirstQuestion->ColumnWidth += SecondQuestion->ColumnWidth;
- FirstQuestion->Type = TT_CSharpNullCoalescing;
+ FirstQuestion->setType(TT_CSharpNullCoalescing);
+ Tokens.erase(Tokens.end() - 1);
+ return true;
+}
+
+// Merge '?[' and '?.' pairs into single tokens.
+bool FormatTokenLexer::tryMergeCSharpNullConditional() {
+ if (Tokens.size() < 2)
+ return false;
+ auto &Question = *(Tokens.end() - 2);
+ auto &PeriodOrLSquare = *(Tokens.end() - 1);
+ if (!Question->is(tok::question) ||
+ !PeriodOrLSquare->isOneOf(tok::l_square, tok::period))
+ return false;
+ Question->TokenText =
+ StringRef(Question->TokenText.begin(),
+ PeriodOrLSquare->TokenText.end() - Question->TokenText.begin());
+ Question->ColumnWidth += PeriodOrLSquare->ColumnWidth;
+
+ if (PeriodOrLSquare->is(tok::l_square)) {
+ Question->Tok.setKind(tok::question); // no '?[' in clang tokens.
+ Question->setType(TT_CSharpNullConditionalLSquare);
+ } else {
+ Question->Tok.setKind(tok::question); // no '?.' in clang tokens.
+ Question->setType(TT_CSharpNullConditional);
+ }
+
Tokens.erase(Tokens.end() - 1);
return true;
}
@@ -246,24 +350,7 @@ bool FormatTokenLexer::tryMergeCSharpKeywordVariables() {
At->TokenText = StringRef(At->TokenText.begin(),
Keyword->TokenText.end() - At->TokenText.begin());
At->ColumnWidth += Keyword->ColumnWidth;
- At->Type = Keyword->Type;
- Tokens.erase(Tokens.end() - 1);
- return true;
-}
-
-// In C# merge the Identifier and the ? together e.g. arg?.
-bool FormatTokenLexer::tryMergeCSharpNullConditionals() {
- if (Tokens.size() < 2)
- return false;
- auto &Identifier = *(Tokens.end() - 2);
- auto &Question = *(Tokens.end() - 1);
- if (!Identifier->isOneOf(tok::r_square, tok::identifier) ||
- !Question->is(tok::question))
- return false;
- Identifier->TokenText =
- StringRef(Identifier->TokenText.begin(),
- Question->TokenText.end() - Identifier->TokenText.begin());
- Identifier->ColumnWidth += Question->ColumnWidth;
+ At->setType(Keyword->getType());
Tokens.erase(Tokens.end() - 1);
return true;
}
@@ -278,11 +365,53 @@ bool FormatTokenLexer::tryTransformCSharpForEach() {
if (Identifier->TokenText != "foreach")
return false;
- Identifier->Type = TT_ForEachMacro;
+ Identifier->setType(TT_ForEachMacro);
Identifier->Tok.setKind(tok::kw_for);
return true;
}
+bool FormatTokenLexer::tryMergeForEach() {
+ if (Tokens.size() < 2)
+ return false;
+ auto &For = *(Tokens.end() - 2);
+ auto &Each = *(Tokens.end() - 1);
+ if (!For->is(tok::kw_for))
+ return false;
+ if (!Each->is(tok::identifier))
+ return false;
+ if (Each->TokenText != "each")
+ return false;
+
+ For->setType(TT_ForEachMacro);
+ For->Tok.setKind(tok::kw_for);
+
+ For->TokenText = StringRef(For->TokenText.begin(),
+ Each->TokenText.end() - For->TokenText.begin());
+ For->ColumnWidth += Each->ColumnWidth;
+ Tokens.erase(Tokens.end() - 1);
+ return true;
+}
+
+bool FormatTokenLexer::tryTransformTryUsageForC() {
+ if (Tokens.size() < 2)
+ return false;
+ auto &Try = *(Tokens.end() - 2);
+ if (!Try->is(tok::kw_try))
+ return false;
+ auto &Next = *(Tokens.end() - 1);
+ if (Next->isOneOf(tok::l_brace, tok::colon))
+ return false;
+
+ if (Tokens.size() > 2) {
+ auto &At = *(Tokens.end() - 3);
+ if (At->is(tok::at))
+ return false;
+ }
+
+ Try->Tok.setKind(tok::identifier);
+ return true;
+}
+
bool FormatTokenLexer::tryMergeLessLess() {
// Merge X,less,less,Y into X,lessless,Y unless X or Y is less.
if (Tokens.size() < 3)
@@ -329,7 +458,7 @@ bool FormatTokenLexer::tryMergeTokens(ArrayRef<tok::TokenKind> Kinds,
First[0]->TokenText = StringRef(First[0]->TokenText.data(),
First[0]->TokenText.size() + AddLength);
First[0]->ColumnWidth += AddLength;
- First[0]->Type = NewType;
+ First[0]->setType(NewType);
return true;
}
@@ -418,7 +547,7 @@ void FormatTokenLexer::tryParseJSRegexLiteral() {
}
}
- RegexToken->Type = TT_RegexLiteral;
+ RegexToken->setType(TT_RegexLiteral);
// Treat regex literals like other string_literals.
RegexToken->Tok.setKind(tok::string_literal);
RegexToken->TokenText = StringRef(RegexBegin, Offset - RegexBegin);
@@ -427,6 +556,68 @@ void FormatTokenLexer::tryParseJSRegexLiteral() {
resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset)));
}
+void FormatTokenLexer::handleCSharpVerbatimAndInterpolatedStrings() {
+ FormatToken *CSharpStringLiteral = Tokens.back();
+
+ if (CSharpStringLiteral->getType() != TT_CSharpStringLiteral)
+ return;
+
+ // Deal with multiline strings.
+ if (!(CSharpStringLiteral->TokenText.startswith(R"(@")") ||
+ CSharpStringLiteral->TokenText.startswith(R"($@")")))
+ return;
+
+ const char *StrBegin =
+ Lex->getBufferLocation() - CSharpStringLiteral->TokenText.size();
+ const char *Offset = StrBegin;
+ if (CSharpStringLiteral->TokenText.startswith(R"(@")"))
+ Offset += 2;
+ else // CSharpStringLiteral->TokenText.startswith(R"($@")")
+ Offset += 3;
+
+ // Look for a terminating '"' in the current file buffer.
+ // Make no effort to format code within an interpolated or verbatim string.
+ for (; Offset != Lex->getBuffer().end(); ++Offset) {
+ if (Offset[0] == '"') {
+ // "" within a verbatim string is an escaped double quote: skip it.
+ if (Offset + 1 < Lex->getBuffer().end() && Offset[1] == '"')
+ ++Offset;
+ else
+ break;
+ }
+ }
+
+ // Make no attempt to format code properly if a verbatim string is
+ // unterminated.
+ if (Offset == Lex->getBuffer().end())
+ return;
+
+ StringRef LiteralText(StrBegin, Offset - StrBegin + 1);
+ CSharpStringLiteral->TokenText = LiteralText;
+
+ // Adjust width for potentially multiline string literals.
+ size_t FirstBreak = LiteralText.find('\n');
+ StringRef FirstLineText = FirstBreak == StringRef::npos
+ ? LiteralText
+ : LiteralText.substr(0, FirstBreak);
+ CSharpStringLiteral->ColumnWidth = encoding::columnWidthWithTabs(
+ FirstLineText, CSharpStringLiteral->OriginalColumn, Style.TabWidth,
+ Encoding);
+ size_t LastBreak = LiteralText.rfind('\n');
+ if (LastBreak != StringRef::npos) {
+ CSharpStringLiteral->IsMultiline = true;
+ unsigned StartColumn = 0;
+ CSharpStringLiteral->LastLineColumnWidth = encoding::columnWidthWithTabs(
+ LiteralText.substr(LastBreak + 1, LiteralText.size()), StartColumn,
+ Style.TabWidth, Encoding);
+ }
+
+ SourceLocation loc = Offset < Lex->getBuffer().end()
+ ? Lex->getSourceLocation(Offset + 1)
+ : SourceMgr.getLocForEndOfFile(ID);
+ resetLexer(SourceMgr.getFileOffset(loc));
+}
+
void FormatTokenLexer::handleTemplateStrings() {
FormatToken *BacktickToken = Tokens.back();
@@ -468,7 +659,7 @@ void FormatTokenLexer::handleTemplateStrings() {
}
StringRef LiteralText(TmplBegin, Offset - TmplBegin + 1);
- BacktickToken->Type = TT_TemplateString;
+ BacktickToken->setType(TT_TemplateString);
BacktickToken->Tok.setKind(tok::string_literal);
BacktickToken->TokenText = LiteralText;
@@ -506,7 +697,7 @@ void FormatTokenLexer::tryParsePythonComment() {
if (To == StringRef::npos)
To = Lex->getBuffer().size();
size_t Len = To - From;
- HashToken->Type = TT_LineComment;
+ HashToken->setType(TT_LineComment);
HashToken->Tok.setKind(tok::comment);
HashToken->TokenText = Lex->getBuffer().substr(From, Len);
SourceLocation Loc = To < Lex->getBuffer().size()
@@ -604,7 +795,7 @@ bool FormatTokenLexer::tryMergeConflictMarkers() {
// We do not need to build a complete token here, as we will skip it
// during parsing anyway (as we must not touch whitespace around conflict
// markers).
- Tokens.back()->Type = Type;
+ Tokens.back()->setType(Type);
Tokens.back()->Tok.setKind(tok::kw___unknown_anytype);
Tokens.push_back(Next);
@@ -691,13 +882,13 @@ FormatToken *FormatTokenLexer::getNextToken() {
break;
case '\\':
if (i + 1 == e || (Text[i + 1] != '\r' && Text[i + 1] != '\n'))
- FormatTok->Type = TT_ImplicitStringLiteral;
+ FormatTok->setType(TT_ImplicitStringLiteral);
break;
default:
- FormatTok->Type = TT_ImplicitStringLiteral;
+ FormatTok->setType(TT_ImplicitStringLiteral);
break;
}
- if (FormatTok->Type == TT_ImplicitStringLiteral)
+ if (FormatTok->getType() == TT_ImplicitStringLiteral)
break;
}
@@ -825,12 +1016,12 @@ FormatToken *FormatTokenLexer::getNextToken() {
Tokens.back()->Tok.getIdentifierInfo()->getPPKeywordID() ==
tok::pp_define) &&
it != Macros.end()) {
- FormatTok->Type = it->second;
+ FormatTok->setType(it->second);
} else if (FormatTok->is(tok::identifier)) {
if (MacroBlockBeginRegex.match(Text)) {
- FormatTok->Type = TT_MacroBlockBegin;
+ FormatTok->setType(TT_MacroBlockBegin);
} else if (MacroBlockEndRegex.match(Text)) {
- FormatTok->Type = TT_MacroBlockEnd;
+ FormatTok->setType(TT_MacroBlockEnd);
}
}
}
diff --git a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h
index 611211be055a..6b08677e3369 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h
+++ b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h
@@ -21,6 +21,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Regex.h"
#include <stack>
@@ -37,7 +38,9 @@ enum LexerState {
class FormatTokenLexer {
public:
FormatTokenLexer(const SourceManager &SourceMgr, FileID ID, unsigned Column,
- const FormatStyle &Style, encoding::Encoding Encoding);
+ const FormatStyle &Style, encoding::Encoding Encoding,
+ llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
+ IdentifierTable &IdentTable);
ArrayRef<FormatToken *> lex();
@@ -49,11 +52,13 @@ private:
bool tryMergeLessLess();
bool tryMergeNSStringLiteral();
bool tryMergeJSPrivateIdentifier();
- bool tryMergeCSharpVerbatimStringLiteral();
+ bool tryMergeCSharpStringLiteral();
bool tryMergeCSharpKeywordVariables();
- bool tryMergeCSharpNullConditionals();
bool tryMergeCSharpDoubleQuestion();
+ bool tryMergeCSharpNullConditional();
bool tryTransformCSharpForEach();
+ bool tryMergeForEach();
+ bool tryTransformTryUsageForC();
bool tryMergeTokens(ArrayRef<tok::TokenKind> Kinds, TokenType NewType);
@@ -79,6 +84,8 @@ private:
// nested template parts by balancing curly braces.
void handleTemplateStrings();
+ void handleCSharpVerbatimAndInterpolatedStrings();
+
void tryParsePythonComment();
bool tryMerge_TMacro();
@@ -98,10 +105,10 @@ private:
const SourceManager &SourceMgr;
FileID ID;
const FormatStyle &Style;
- IdentifierTable IdentTable;
+ IdentifierTable &IdentTable;
AdditionalKeywords Keywords;
encoding::Encoding Encoding;
- llvm::SpecificBumpPtrAllocator<FormatToken> Allocator;
+ llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator;
// Index (in 'Tokens') of the last token that starts a new line.
unsigned FirstInLineIndex;
SmallVector<FormatToken *, 16> Tokens;
@@ -113,6 +120,9 @@ private:
llvm::Regex MacroBlockBeginRegex;
llvm::Regex MacroBlockEndRegex;
+ // Targets that may appear inside a C# attribute.
+ static const llvm::StringSet<> CSharpAttributeTargets;
+
void readRawToken(FormatToken &Tok);
void resetLexer(unsigned Offset);
diff --git a/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp b/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
index 20b424f86077..97de45bd1965 100644
--- a/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
@@ -121,7 +121,25 @@ bool validEndComment(const FormatToken *RBraceTok, StringRef NamespaceName,
// Named namespace comments must not mention anonymous namespace.
if (!NamespaceName.empty() && !AnonymousInComment.empty())
return false;
- return NamespaceNameInComment == NamespaceName;
+ if (NamespaceNameInComment == NamespaceName)
+ return true;
+
+ // Has namespace comment flowed onto the next line.
+ // } // namespace
+ // // verylongnamespacenamethatdidnotfitonthepreviouscommentline
+ if (!(Comment->Next && Comment->Next->is(TT_LineComment)))
+ return false;
+
+ static const llvm::Regex CommentPattern = llvm::Regex(
+ "^/[/*] *( +([a-zA-Z0-9:_]+))?\\.? *(\\*/)?$", llvm::Regex::IgnoreCase);
+
+ // Pull out just the comment text.
+ if (!CommentPattern.match(Comment->Next->TokenText, &Groups)) {
+ return false;
+ }
+ NamespaceNameInComment = Groups.size() > 2 ? Groups[2] : "";
+
+ return (NamespaceNameInComment == NamespaceName);
}
void addEndComment(const FormatToken *RBraceTok, StringRef EndCommentText,
@@ -187,6 +205,23 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
const SourceManager &SourceMgr = Env.getSourceManager();
AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
tooling::Replacements Fixes;
+
+ // Spin through the lines and ensure we have balanced braces.
+ int Braces = 0;
+ for (size_t I = 0, E = AnnotatedLines.size(); I != E; ++I) {
+ FormatToken *Tok = AnnotatedLines[I]->First;
+ while (Tok) {
+ Braces += Tok->is(tok::l_brace) ? 1 : Tok->is(tok::r_brace) ? -1 : 0;
+ Tok = Tok->Next;
+ }
+ }
+ // Don't attempt to comment unbalanced braces or this can
+ // lead to comments being placed on the closing brace which isn't
+ // the matching brace of the namespace. (occurs during incomplete editing).
+ if (Braces != 0) {
+ return {Fixes, 0};
+ }
+
std::string AllNamespaceNames = "";
size_t StartLineIndex = SIZE_MAX;
StringRef NamespaceTokenText;
diff --git a/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp b/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
index 5be243f4c07a..db2b65b08898 100644
--- a/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
+++ b/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
@@ -144,7 +144,7 @@ public:
llvm::stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
return References[LHSI] < References[RHSI];
});
- bool ReferencesInOrder = std::is_sorted(Indices.begin(), Indices.end());
+ bool ReferencesInOrder = llvm::is_sorted(Indices);
std::string ReferencesText;
bool SymbolsInOrder = true;
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
index eb98a205d526..f1459a808ff8 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
@@ -64,11 +64,16 @@ TokenAnalyzer::TokenAnalyzer(const Environment &Env, const FormatStyle &Style)
std::pair<tooling::Replacements, unsigned> TokenAnalyzer::process() {
tooling::Replacements Result;
- FormatTokenLexer Tokens(Env.getSourceManager(), Env.getFileID(),
- Env.getFirstStartColumn(), Style, Encoding);
+ llvm::SpecificBumpPtrAllocator<FormatToken> Allocator;
+ IdentifierTable IdentTable(getFormattingLangOpts(Style));
+ FormatTokenLexer Lex(Env.getSourceManager(), Env.getFileID(),
+ Env.getFirstStartColumn(), Style, Encoding, Allocator,
- UnwrappedLineParser Parser(Style, Tokens.getKeywords(),
- Env.getFirstStartColumn(), Tokens.lex(), *this);
+ IdentTable);
+ ArrayRef<FormatToken *> Toks(Lex.lex());
+ SmallVector<FormatToken *, 10> Tokens(Toks.begin(), Toks.end());
+ UnwrappedLineParser Parser(Style, Lex.getKeywords(),
+ Env.getFirstStartColumn(), Tokens, *this);
Parser.parse();
assert(UnwrappedLines.rbegin()->empty());
unsigned Penalty = 0;
@@ -76,14 +81,14 @@ std::pair<tooling::Replacements, unsigned> TokenAnalyzer::process() {
LLVM_DEBUG(llvm::dbgs() << "Run " << Run << "...\n");
SmallVector<AnnotatedLine *, 16> AnnotatedLines;
- TokenAnnotator Annotator(Style, Tokens.getKeywords());
+ TokenAnnotator Annotator(Style, Lex.getKeywords());
for (unsigned i = 0, e = UnwrappedLines[Run].size(); i != e; ++i) {
AnnotatedLines.push_back(new AnnotatedLine(UnwrappedLines[Run][i]));
Annotator.annotate(*AnnotatedLines.back());
}
std::pair<tooling::Replacements, unsigned> RunResult =
- analyze(Annotator, AnnotatedLines, Tokens);
+ analyze(Annotator, AnnotatedLines, Lex);
LLVM_DEBUG({
llvm::dbgs() << "Replacements for run " << Run << ":\n";
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
index 8cb786a4d343..7f8e35126512 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
@@ -118,9 +118,9 @@ private:
if (Style.Language == FormatStyle::LK_TextProto ||
(Style.Language == FormatStyle::LK_Proto && Left->Previous &&
Left->Previous->isOneOf(TT_SelectorName, TT_DictLiteral)))
- CurrentToken->Type = TT_DictLiteral;
+ CurrentToken->setType(TT_DictLiteral);
else
- CurrentToken->Type = TT_TemplateCloser;
+ CurrentToken->setType(TT_TemplateCloser);
next();
return true;
}
@@ -131,7 +131,7 @@ private:
}
if (CurrentToken->isOneOf(tok::r_paren, tok::r_square, tok::r_brace) ||
(CurrentToken->isOneOf(tok::colon, tok::question) && InExprContext &&
- Style.Language != FormatStyle::LK_Proto &&
+ !Style.isCSharp() && Style.Language != FormatStyle::LK_Proto &&
Style.Language != FormatStyle::LK_TextProto))
return false;
// If a && or || is found and interpreted as a binary operator, this set
@@ -151,7 +151,7 @@ private:
if (CurrentToken->is(tok::colon) ||
(CurrentToken->isOneOf(tok::l_brace, tok::less) &&
Previous->isNot(tok::colon)))
- Previous->Type = TT_SelectorName;
+ Previous->setType(TT_SelectorName);
}
}
if (!consumeToken())
@@ -160,6 +160,27 @@ private:
return false;
}
+ bool parseUntouchableParens() {
+ while (CurrentToken) {
+ CurrentToken->Finalized = true;
+ switch (CurrentToken->Tok.getKind()) {
+ case tok::l_paren:
+ next();
+ if (!parseUntouchableParens())
+ return false;
+ continue;
+ case tok::r_paren:
+ next();
+ return true;
+ default:
+ // no-op
+ break;
+ }
+ next();
+ }
+ return false;
+ }
+
bool parseParens(bool LookForDecls = false) {
if (!CurrentToken)
return false;
@@ -171,6 +192,11 @@ private:
Contexts.back().ColonIsForRangeExpr =
Contexts.size() == 2 && Contexts[0].ColonIsForRangeExpr;
+ if (Left->Previous && Left->Previous->is(TT_UntouchableMacroFunc)) {
+ Left->Finalized = true;
+ return parseUntouchableParens();
+ }
+
bool StartsObjCMethodExpr = false;
if (FormatToken *MaybeSel = Left->Previous) {
// @selector( starts a selector.
@@ -217,7 +243,7 @@ private:
// This is the parameter list of an ObjC block.
Contexts.back().IsExpression = false;
} else if (Left->Previous && Left->Previous->is(tok::kw___attribute)) {
- Left->Type = TT_AttributeParen;
+ Left->setType(TT_AttributeParen);
} else if (Left->Previous && Left->Previous->is(TT_ForEachMacro)) {
// The first argument to a foreach macro is a declaration.
Contexts.back().IsForEachMacro = true;
@@ -233,7 +259,7 @@ private:
if (StartsObjCMethodExpr) {
Contexts.back().ColonIsObjCMethodExpr = true;
- Left->Type = TT_ObjCMethodExpr;
+ Left->setType(TT_ObjCMethodExpr);
}
// MightBeFunctionType and ProbablyFunctionType are used for
@@ -264,7 +290,7 @@ private:
if (PrevPrev && PrevPrev->is(tok::identifier) &&
Prev->isOneOf(tok::star, tok::amp, tok::ampamp) &&
CurrentToken->is(tok::identifier) && Next->isNot(tok::equal)) {
- Prev->Type = TT_BinaryOperator;
+ Prev->setType(TT_BinaryOperator);
LookForDecls = false;
}
}
@@ -282,8 +308,8 @@ private:
if (MightBeFunctionType && ProbablyFunctionType && CurrentToken->Next &&
(CurrentToken->Next->is(tok::l_paren) ||
(CurrentToken->Next->is(tok::l_square) && Line.MustBeDeclaration)))
- Left->Type = Left->Next->is(tok::caret) ? TT_ObjCBlockLParen
- : TT_FunctionTypeLParen;
+ Left->setType(Left->Next->is(tok::caret) ? TT_ObjCBlockLParen
+ : TT_FunctionTypeLParen);
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
@@ -295,12 +321,12 @@ private:
for (FormatToken *Tok = Left; Tok != CurrentToken; Tok = Tok->Next) {
if (Tok->is(TT_BinaryOperator) &&
Tok->isOneOf(tok::star, tok::amp, tok::ampamp))
- Tok->Type = TT_PointerOrReference;
+ Tok->setType(TT_PointerOrReference);
}
}
if (StartsObjCMethodExpr) {
- CurrentToken->Type = TT_ObjCMethodExpr;
+ CurrentToken->setType(TT_ObjCMethodExpr);
if (Contexts.back().FirstObjCSelectorName) {
Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName =
Contexts.back().LongestObjCSelectorName;
@@ -308,13 +334,13 @@ private:
}
if (Left->is(TT_AttributeParen))
- CurrentToken->Type = TT_AttributeParen;
+ CurrentToken->setType(TT_AttributeParen);
if (Left->Previous && Left->Previous->is(TT_JavaAnnotation))
- CurrentToken->Type = TT_JavaAnnotation;
+ CurrentToken->setType(TT_JavaAnnotation);
if (Left->Previous && Left->Previous->is(TT_LeadingJavaAnnotation))
- CurrentToken->Type = TT_LeadingJavaAnnotation;
+ CurrentToken->setType(TT_LeadingJavaAnnotation);
if (Left->Previous && Left->Previous->is(TT_AttributeSquare))
- CurrentToken->Type = TT_AttributeSquare;
+ CurrentToken->setType(TT_AttributeSquare);
if (!HasMultipleLines)
Left->PackingKind = PPK_Inconclusive;
@@ -330,7 +356,7 @@ private:
return false;
if (CurrentToken->is(tok::l_brace))
- Left->Type = TT_Unknown; // Not TT_ObjCBlockLParen
+ Left->setType(TT_Unknown); // Not TT_ObjCBlockLParen
if (CurrentToken->is(tok::comma) && CurrentToken->Next &&
!CurrentToken->Next->HasUnescapedNewline &&
!CurrentToken->Next->isTrailingComment())
@@ -342,13 +368,13 @@ private:
if (CurrentToken->isOneOf(tok::semi, tok::colon)) {
MightBeObjCForRangeLoop = false;
if (PossibleObjCForInToken) {
- PossibleObjCForInToken->Type = TT_Unknown;
+ PossibleObjCForInToken->setType(TT_Unknown);
PossibleObjCForInToken = nullptr;
}
}
if (MightBeObjCForRangeLoop && CurrentToken->is(Keywords.kw_in)) {
PossibleObjCForInToken = CurrentToken;
- PossibleObjCForInToken->Type = TT_ObjCForIn;
+ PossibleObjCForInToken->setType(TT_ObjCForIn);
}
// When we discover a 'new', we set CanBeExpression to 'false' in order to
// parse the type correctly. Reset that after a comma.
@@ -369,6 +395,17 @@ private:
if (!Style.isCSharp())
return false;
+ // `identifier[i]` is not an attribute.
+ if (Tok.Previous && Tok.Previous->is(tok::identifier))
+ return false;
+
+ // Chains of [] in `identifier[i][j][k]` are not attributes.
+ if (Tok.Previous && Tok.Previous->is(tok::r_square)) {
+ auto *MatchingParen = Tok.Previous->MatchingParen;
+ if (!MatchingParen || MatchingParen->is(TT_ArraySubscriptLSquare))
+ return false;
+ }
+
const FormatToken *AttrTok = Tok.Next;
if (!AttrTok)
return false;
@@ -385,15 +422,15 @@ private:
if (!AttrTok)
return false;
- // Move past the end of ']'.
+ // Allow an attribute to be the only content of a file.
AttrTok = AttrTok->Next;
if (!AttrTok)
- return false;
+ return true;
// Limit this to being an access modifier that follows.
if (AttrTok->isOneOf(tok::kw_public, tok::kw_private, tok::kw_protected,
- tok::kw_class, tok::kw_static, tok::l_square,
- Keywords.kw_internal)) {
+ tok::comment, tok::kw_class, tok::kw_static,
+ tok::l_square, Keywords.kw_internal)) {
return true;
}
@@ -460,7 +497,7 @@ private:
Contexts.back().InCpp11AttributeSpecifier;
// Treat C# Attributes [STAThread] much like C++ attributes [[...]].
- bool IsCSharp11AttributeSpecifier =
+ bool IsCSharpAttributeSpecifier =
isCSharpAttributeSpecifier(*Left) ||
Contexts.back().InCSharpAttributeSpecifier;
@@ -469,7 +506,8 @@ private:
bool StartsObjCMethodExpr =
!IsCppStructuredBinding && !InsideInlineASM && !CppArrayTemplates &&
Style.isCpp() && !IsCpp11AttributeSpecifier &&
- Contexts.back().CanBeExpression && Left->isNot(TT_LambdaLSquare) &&
+ !IsCSharpAttributeSpecifier && Contexts.back().CanBeExpression &&
+ Left->isNot(TT_LambdaLSquare) &&
!CurrentToken->isOneOf(tok::l_brace, tok::r_square) &&
(!Parent ||
Parent->isOneOf(tok::colon, tok::l_square, tok::l_paren,
@@ -483,24 +521,26 @@ private:
unsigned BindingIncrease = 1;
if (IsCppStructuredBinding) {
- Left->Type = TT_StructuredBindingLSquare;
+ Left->setType(TT_StructuredBindingLSquare);
} else if (Left->is(TT_Unknown)) {
if (StartsObjCMethodExpr) {
- Left->Type = TT_ObjCMethodExpr;
+ Left->setType(TT_ObjCMethodExpr);
+ } else if (InsideInlineASM) {
+ Left->setType(TT_InlineASMSymbolicNameLSquare);
} else if (IsCpp11AttributeSpecifier) {
- Left->Type = TT_AttributeSquare;
+ Left->setType(TT_AttributeSquare);
} else if (Style.Language == FormatStyle::LK_JavaScript && Parent &&
Contexts.back().ContextKind == tok::l_brace &&
Parent->isOneOf(tok::l_brace, tok::comma)) {
- Left->Type = TT_JsComputedPropertyName;
+ Left->setType(TT_JsComputedPropertyName);
} else if (Style.isCpp() && Contexts.back().ContextKind == tok::l_brace &&
Parent && Parent->isOneOf(tok::l_brace, tok::comma)) {
- Left->Type = TT_DesignatedInitializerLSquare;
- } else if (IsCSharp11AttributeSpecifier) {
- Left->Type = TT_AttributeSquare;
+ Left->setType(TT_DesignatedInitializerLSquare);
+ } else if (IsCSharpAttributeSpecifier) {
+ Left->setType(TT_AttributeSquare);
} else if (CurrentToken->is(tok::r_square) && Parent &&
Parent->is(TT_TemplateCloser)) {
- Left->Type = TT_ArraySubscriptLSquare;
+ Left->setType(TT_ArraySubscriptLSquare);
} else if (Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto) {
// Square braces in LK_Proto can either be message field attributes:
@@ -529,13 +569,13 @@ private:
//
// In the first and the third case we want to spread the contents inside
// the square braces; in the second we want to keep them inline.
- Left->Type = TT_ArrayInitializerLSquare;
+ Left->setType(TT_ArrayInitializerLSquare);
if (!Left->endsSequence(tok::l_square, tok::numeric_constant,
tok::equal) &&
!Left->endsSequence(tok::l_square, tok::numeric_constant,
tok::identifier) &&
!Left->endsSequence(tok::l_square, tok::colon, TT_SelectorName)) {
- Left->Type = TT_ProtoExtensionLSquare;
+ Left->setType(TT_ProtoExtensionLSquare);
BindingIncrease = 10;
}
} else if (!CppArrayTemplates && Parent &&
@@ -544,10 +584,10 @@ private:
tok::question, tok::colon, tok::kw_return,
// Should only be relevant to JavaScript:
tok::kw_default)) {
- Left->Type = TT_ArrayInitializerLSquare;
+ Left->setType(TT_ArrayInitializerLSquare);
} else {
BindingIncrease = 10;
- Left->Type = TT_ArraySubscriptLSquare;
+ Left->setType(TT_ArraySubscriptLSquare);
}
}
@@ -559,14 +599,14 @@ private:
Contexts.back().ColonIsObjCMethodExpr = StartsObjCMethodExpr;
Contexts.back().InCpp11AttributeSpecifier = IsCpp11AttributeSpecifier;
- Contexts.back().InCSharpAttributeSpecifier = IsCSharp11AttributeSpecifier;
+ Contexts.back().InCSharpAttributeSpecifier = IsCSharpAttributeSpecifier;
while (CurrentToken) {
if (CurrentToken->is(tok::r_square)) {
if (IsCpp11AttributeSpecifier)
- CurrentToken->Type = TT_AttributeSquare;
- if (IsCSharp11AttributeSpecifier)
- CurrentToken->Type = TT_AttributeSquare;
+ CurrentToken->setType(TT_AttributeSquare);
+ if (IsCSharpAttributeSpecifier)
+ CurrentToken->setType(TT_AttributeSquare);
else if (((CurrentToken->Next &&
CurrentToken->Next->is(tok::l_paren)) ||
(CurrentToken->Previous &&
@@ -577,26 +617,26 @@ private:
// will be expanded to more tokens.
// FIXME: Do we incorrectly label ":" with this?
StartsObjCMethodExpr = false;
- Left->Type = TT_Unknown;
+ Left->setType(TT_Unknown);
}
if (StartsObjCMethodExpr && CurrentToken->Previous != Left) {
- CurrentToken->Type = TT_ObjCMethodExpr;
+ CurrentToken->setType(TT_ObjCMethodExpr);
// If we haven't seen a colon yet, make sure the last identifier
// before the r_square is tagged as a selector name component.
if (!ColonFound && CurrentToken->Previous &&
CurrentToken->Previous->is(TT_Unknown) &&
canBeObjCSelectorComponent(*CurrentToken->Previous))
- CurrentToken->Previous->Type = TT_SelectorName;
+ CurrentToken->Previous->setType(TT_SelectorName);
// determineStarAmpUsage() thinks that '*' '[' is allocating an
// array of pointers, but if '[' starts a selector then '*' is a
// binary operator.
if (Parent && Parent->is(TT_PointerOrReference))
- Parent->Type = TT_BinaryOperator;
+ Parent->setType(TT_BinaryOperator);
}
// An arrow after an ObjC method expression is not a lambda arrow.
- if (CurrentToken->Type == TT_ObjCMethodExpr && CurrentToken->Next &&
- CurrentToken->Next->is(TT_LambdaArrow))
- CurrentToken->Next->Type = TT_Unknown;
+ if (CurrentToken->getType() == TT_ObjCMethodExpr &&
+ CurrentToken->Next && CurrentToken->Next->is(TT_LambdaArrow))
+ CurrentToken->Next->setType(TT_Unknown);
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
// FirstObjCSelectorName is set when a colon is found. This does
@@ -630,21 +670,21 @@ private:
tok::kw_using)) {
// Remember that this is a [[using ns: foo]] C++ attribute, so we
// don't add a space before the colon (unlike other colons).
- CurrentToken->Type = TT_AttributeColon;
+ CurrentToken->setType(TT_AttributeColon);
} else if (Left->isOneOf(TT_ArraySubscriptLSquare,
TT_DesignatedInitializerLSquare)) {
- Left->Type = TT_ObjCMethodExpr;
+ Left->setType(TT_ObjCMethodExpr);
StartsObjCMethodExpr = true;
Contexts.back().ColonIsObjCMethodExpr = true;
if (Parent && Parent->is(tok::r_paren))
// FIXME(bug 36976): ObjC return types shouldn't use TT_CastRParen.
- Parent->Type = TT_CastRParen;
+ Parent->setType(TT_CastRParen);
}
ColonFound = true;
}
if (CurrentToken->is(tok::comma) && Left->is(TT_ObjCMethodExpr) &&
!ColonFound)
- Left->Type = TT_ArrayInitializerLSquare;
+ Left->setType(TT_ArrayInitializerLSquare);
FormatToken *Tok = CurrentToken;
if (!consumeToken())
return false;
@@ -659,7 +699,7 @@ private:
Left->ParentBracket = Contexts.back().ContextKind;
if (Contexts.back().CaretFound)
- Left->Type = TT_ObjCBlockLBrace;
+ Left->setType(TT_ObjCBlockLBrace);
Contexts.back().CaretFound = false;
ScopedContextCreator ContextCreator(*this, tok::l_brace, 1);
@@ -688,18 +728,18 @@ private:
(!Contexts.back().ColonIsDictLiteral || !Style.isCpp())) ||
Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto) {
- Left->Type = TT_DictLiteral;
+ Left->setType(TT_DictLiteral);
if (Previous->Tok.getIdentifierInfo() ||
Previous->is(tok::string_literal))
- Previous->Type = TT_SelectorName;
+ Previous->setType(TT_SelectorName);
}
if (CurrentToken->is(tok::colon) ||
Style.Language == FormatStyle::LK_JavaScript)
- Left->Type = TT_DictLiteral;
+ Left->setType(TT_DictLiteral);
}
if (CurrentToken->is(tok::comma) &&
Style.Language == FormatStyle::LK_JavaScript)
- Left->Type = TT_DictLiteral;
+ Left->setType(TT_DictLiteral);
if (!consumeToken())
return false;
}
@@ -726,7 +766,7 @@ private:
bool parseConditional() {
while (CurrentToken) {
if (CurrentToken->is(tok::colon)) {
- CurrentToken->Type = TT_ConditionalExpr;
+ CurrentToken->setType(TT_ConditionalExpr);
next();
return true;
}
@@ -738,7 +778,7 @@ private:
bool parseTemplateDeclaration() {
if (CurrentToken && CurrentToken->is(tok::less)) {
- CurrentToken->Type = TT_TemplateOpener;
+ CurrentToken->setType(TT_TemplateOpener);
next();
if (!parseAngle())
return false;
@@ -756,7 +796,7 @@ private:
case tok::plus:
case tok::minus:
if (!Tok->Previous && Line.MustBeDeclaration)
- Tok->Type = TT_ObjCMethodSpecifier;
+ Tok->setType(TT_ObjCMethodSpecifier);
break;
case tok::colon:
if (!Tok->Previous)
@@ -773,21 +813,30 @@ private:
(Contexts.size() == 1 &&
Line.MustBeDeclaration)) { // method/property declaration
Contexts.back().IsExpression = false;
- Tok->Type = TT_JsTypeColon;
+ Tok->setType(TT_JsTypeColon);
+ break;
+ }
+ } else if (Style.isCSharp()) {
+ if (Contexts.back().InCSharpAttributeSpecifier) {
+ Tok->setType(TT_AttributeColon);
+ break;
+ }
+ if (Contexts.back().ContextKind == tok::l_paren) {
+ Tok->setType(TT_CSharpNamedArgumentColon);
break;
}
}
if (Contexts.back().ColonIsDictLiteral ||
Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto) {
- Tok->Type = TT_DictLiteral;
+ Tok->setType(TT_DictLiteral);
if (Style.Language == FormatStyle::LK_TextProto) {
if (FormatToken *Previous = Tok->getPreviousNonComment())
- Previous->Type = TT_SelectorName;
+ Previous->setType(TT_SelectorName);
}
} else if (Contexts.back().ColonIsObjCMethodExpr ||
Line.startsWith(TT_ObjCMethodSpecifier)) {
- Tok->Type = TT_ObjCMethodExpr;
+ Tok->setType(TT_ObjCMethodExpr);
const FormatToken *BeforePrevious = Tok->Previous->Previous;
// Ensure we tag all identifiers in method declarations as
// TT_SelectorName.
@@ -802,7 +851,7 @@ private:
BeforePrevious->is(tok::r_square) ||
Contexts.back().LongestObjCSelectorName == 0 ||
UnknownIdentifierInMethodDeclaration) {
- Tok->Previous->Type = TT_SelectorName;
+ Tok->Previous->setType(TT_SelectorName);
if (!Contexts.back().FirstObjCSelectorName)
Contexts.back().FirstObjCSelectorName = Tok->Previous;
else if (Tok->Previous->ColumnWidth >
@@ -814,25 +863,30 @@ private:
++Contexts.back().FirstObjCSelectorName->ObjCSelectorNameParts;
}
} else if (Contexts.back().ColonIsForRangeExpr) {
- Tok->Type = TT_RangeBasedForLoopColon;
+ Tok->setType(TT_RangeBasedForLoopColon);
} else if (CurrentToken && CurrentToken->is(tok::numeric_constant)) {
- Tok->Type = TT_BitFieldColon;
+ Tok->setType(TT_BitFieldColon);
} else if (Contexts.size() == 1 &&
!Line.First->isOneOf(tok::kw_enum, tok::kw_case)) {
- if (Tok->getPreviousNonComment()->isOneOf(tok::r_paren,
- tok::kw_noexcept))
- Tok->Type = TT_CtorInitializerColon;
- else
- Tok->Type = TT_InheritanceColon;
+ FormatToken *Prev = Tok->getPreviousNonComment();
+ if (Prev->isOneOf(tok::r_paren, tok::kw_noexcept))
+ Tok->setType(TT_CtorInitializerColon);
+ else if (Prev->is(tok::kw_try)) {
+ // Member initializer list within function try block.
+ FormatToken *PrevPrev = Prev->getPreviousNonComment();
+ if (PrevPrev && PrevPrev->isOneOf(tok::r_paren, tok::kw_noexcept))
+ Tok->setType(TT_CtorInitializerColon);
+ } else
+ Tok->setType(TT_InheritanceColon);
} else if (canBeObjCSelectorComponent(*Tok->Previous) && Tok->Next &&
(Tok->Next->isOneOf(tok::r_paren, tok::comma) ||
(canBeObjCSelectorComponent(*Tok->Next) && Tok->Next->Next &&
Tok->Next->Next->is(tok::colon)))) {
// This handles a special macro in ObjC code where selectors including
// the colon are passed as macro arguments.
- Tok->Type = TT_ObjCMethodExpr;
+ Tok->setType(TT_ObjCMethodExpr);
} else if (Contexts.back().ContextKind == tok::l_paren) {
- Tok->Type = TT_InlineASMColon;
+ Tok->setType(TT_InlineASMColon);
}
break;
case tok::pipe:
@@ -841,7 +895,7 @@ private:
// intersection types, respectively.
if (Style.Language == FormatStyle::LK_JavaScript &&
!Contexts.back().IsExpression)
- Tok->Type = TT_JsTypeOperator;
+ Tok->setType(TT_JsTypeOperator);
break;
case tok::kw_if:
case tok::kw_while:
@@ -877,9 +931,9 @@ private:
if (Tok->Previous && Tok->Previous->is(tok::r_paren) &&
Tok->Previous->MatchingParen &&
Tok->Previous->MatchingParen->is(TT_OverloadedOperatorLParen)) {
- Tok->Previous->Type = TT_OverloadedOperator;
- Tok->Previous->MatchingParen->Type = TT_OverloadedOperator;
- Tok->Type = TT_OverloadedOperatorLParen;
+ Tok->Previous->setType(TT_OverloadedOperator);
+ Tok->Previous->MatchingParen->setType(TT_OverloadedOperator);
+ Tok->setType(TT_OverloadedOperatorLParen);
}
if (!parseParens())
@@ -898,15 +952,15 @@ private:
case tok::l_brace:
if (Style.Language == FormatStyle::LK_TextProto) {
FormatToken *Previous = Tok->getPreviousNonComment();
- if (Previous && Previous->Type != TT_DictLiteral)
- Previous->Type = TT_SelectorName;
+ if (Previous && Previous->getType() != TT_DictLiteral)
+ Previous->setType(TT_SelectorName);
}
if (!parseBrace())
return false;
break;
case tok::less:
if (parseAngle()) {
- Tok->Type = TT_TemplateOpener;
+ Tok->setType(TT_TemplateOpener);
// In TT_Proto, we must distignuish between:
// map<key, value>
// msg < item: data >
@@ -915,13 +969,13 @@ private:
if (Style.Language == FormatStyle::LK_TextProto ||
(Style.Language == FormatStyle::LK_Proto && Tok->Previous &&
Tok->Previous->isOneOf(TT_SelectorName, TT_DictLiteral))) {
- Tok->Type = TT_DictLiteral;
+ Tok->setType(TT_DictLiteral);
FormatToken *Previous = Tok->getPreviousNonComment();
- if (Previous && Previous->Type != TT_DictLiteral)
- Previous->Type = TT_SelectorName;
+ if (Previous && Previous->getType() != TT_DictLiteral)
+ Previous->setType(TT_SelectorName);
}
} else {
- Tok->Type = TT_BinaryOperator;
+ Tok->setType(TT_BinaryOperator);
NonTemplateLess.insert(Tok);
CurrentToken = Tok;
next();
@@ -937,7 +991,7 @@ private:
break;
case tok::greater:
if (Style.Language != FormatStyle::LK_TextProto)
- Tok->Type = TT_BinaryOperator;
+ Tok->setType(TT_BinaryOperator);
if (Tok->Previous && Tok->Previous->is(TT_TemplateCloser))
Tok->SpacesRequiredBefore = 1;
break;
@@ -948,20 +1002,29 @@ private:
while (CurrentToken &&
!CurrentToken->isOneOf(tok::l_paren, tok::semi, tok::r_paren)) {
if (CurrentToken->isOneOf(tok::star, tok::amp))
- CurrentToken->Type = TT_PointerOrReference;
+ CurrentToken->setType(TT_PointerOrReference);
consumeToken();
+ if (CurrentToken && CurrentToken->is(tok::comma) &&
+ CurrentToken->Previous->isNot(tok::kw_operator))
+ break;
if (CurrentToken && CurrentToken->Previous->isOneOf(
TT_BinaryOperator, TT_UnaryOperator, tok::comma,
tok::star, tok::arrow, tok::amp, tok::ampamp))
- CurrentToken->Previous->Type = TT_OverloadedOperator;
- }
- if (CurrentToken) {
- CurrentToken->Type = TT_OverloadedOperatorLParen;
- if (CurrentToken->Previous->is(TT_BinaryOperator))
- CurrentToken->Previous->Type = TT_OverloadedOperator;
+ CurrentToken->Previous->setType(TT_OverloadedOperator);
}
+ if (CurrentToken && CurrentToken->is(tok::l_paren))
+ CurrentToken->setType(TT_OverloadedOperatorLParen);
+ if (CurrentToken && CurrentToken->Previous->is(TT_BinaryOperator))
+ CurrentToken->Previous->setType(TT_OverloadedOperator);
break;
case tok::question:
+ if (Tok->is(TT_CSharpNullConditionalLSquare)) {
+ if (!parseSquare())
+ return false;
+ break;
+ }
+ if (Tok->isOneOf(TT_CSharpNullConditional, TT_CSharpNullCoalescing))
+ break;
if (Style.Language == FormatStyle::LK_JavaScript && Tok->Next &&
Tok->Next->isOneOf(tok::semi, tok::comma, tok::colon, tok::r_paren,
tok::r_brace)) {
@@ -969,7 +1032,7 @@ private:
// types (fields, parameters), e.g.
// function(x?: string, y?) {...}
// class X { y?; }
- Tok->Type = TT_JsTypeOptionalQuestion;
+ Tok->setType(TT_JsTypeOptionalQuestion);
break;
}
// Declarations cannot be conditional expressions, this can only be part
@@ -977,6 +1040,18 @@ private:
if (Line.MustBeDeclaration && !Contexts.back().IsExpression &&
Style.Language == FormatStyle::LK_JavaScript)
break;
+ if (Style.isCSharp()) {
+ // `Type?)`, `Type?>`, `Type? name;` and `Type? name =` can only be
+ // nullable types.
+ // Line.MustBeDeclaration will be true for `Type? name;`.
+ if ((!Contexts.back().IsExpression && Line.MustBeDeclaration) ||
+ (Tok->Next && Tok->Next->isOneOf(tok::r_paren, tok::greater)) ||
+ (Tok->Next && Tok->Next->is(tok::identifier) && Tok->Next->Next &&
+ Tok->Next->Next->is(tok::equal))) {
+ Tok->setType(TT_CSharpNullable);
+ break;
+ }
+ }
parseConditional();
break;
case tok::kw_template:
@@ -984,9 +1059,9 @@ private:
break;
case tok::comma:
if (Contexts.back().InCtorInitializer)
- Tok->Type = TT_CtorInitializerComma;
+ Tok->setType(TT_CtorInitializerComma);
else if (Contexts.back().InInheritanceList)
- Tok->Type = TT_InheritanceComma;
+ Tok->setType(TT_InheritanceComma);
else if (Contexts.back().FirstStartOfName &&
(Contexts.size() == 1 || Line.startsWith(tok::kw_for))) {
Contexts.back().FirstStartOfName->PartOfMultiVariableDeclStmt = true;
@@ -1000,6 +1075,11 @@ private:
Keywords.kw___has_include_next)) {
parseHasInclude();
}
+ if (Style.isCSharp() && Tok->is(Keywords.kw_where) && Tok->Next &&
+ Tok->Next->isNot(tok::l_paren)) {
+ Tok->setType(TT_CSharpGenericTypeConstraint);
+ parseCSharpGenericTypeConstraint();
+ }
break;
default:
break;
@@ -1007,6 +1087,35 @@ private:
return true;
}
+ void parseCSharpGenericTypeConstraint() {
+ int OpenAngleBracketsCount = 0;
+ while (CurrentToken) {
+ if (CurrentToken->is(tok::less)) {
+ // parseAngle is too greedy and will consume the whole line.
+ CurrentToken->setType(TT_TemplateOpener);
+ ++OpenAngleBracketsCount;
+ next();
+ } else if (CurrentToken->is(tok::greater)) {
+ CurrentToken->setType(TT_TemplateCloser);
+ --OpenAngleBracketsCount;
+ next();
+ } else if (CurrentToken->is(tok::comma) && OpenAngleBracketsCount == 0) {
+ // We allow line breaks after GenericTypeConstraintComma's
+ // so do not flag commas in Generics as GenericTypeConstraintComma's.
+ CurrentToken->setType(TT_CSharpGenericTypeConstraintComma);
+ next();
+ } else if (CurrentToken->is(Keywords.kw_where)) {
+ CurrentToken->setType(TT_CSharpGenericTypeConstraint);
+ next();
+ } else if (CurrentToken->is(tok::colon)) {
+ CurrentToken->setType(TT_CSharpGenericTypeConstraintColon);
+ next();
+ } else {
+ next();
+ }
+ }
+ }
+
void parseIncludeDirective() {
if (CurrentToken && CurrentToken->is(tok::less)) {
next();
@@ -1015,7 +1124,7 @@ private:
// literals.
if (CurrentToken->isNot(tok::comment) &&
!CurrentToken->TokenText.startswith("//"))
- CurrentToken->Type = TT_ImplicitStringLiteral;
+ CurrentToken->setType(TT_ImplicitStringLiteral);
next();
}
}
@@ -1027,7 +1136,7 @@ private:
// warning or error.
next();
while (CurrentToken) {
- CurrentToken->Type = TT_ImplicitStringLiteral;
+ CurrentToken->setType(TT_ImplicitStringLiteral);
next();
}
}
@@ -1041,7 +1150,7 @@ private:
next(); // Consume first token (so we fix leading whitespace).
while (CurrentToken) {
if (IsMark || CurrentToken->Previous->is(TT_BinaryOperator))
- CurrentToken->Type = TT_ImplicitStringLiteral;
+ CurrentToken->setType(TT_ImplicitStringLiteral);
next();
}
}
@@ -1068,7 +1177,7 @@ private:
// Treat these like C++ #include directives.
while (CurrentToken) {
// Tokens cannot be comments here.
- CurrentToken->Type = TT_ImplicitStringLiteral;
+ CurrentToken->setType(TT_ImplicitStringLiteral);
next();
}
return LT_ImportStatement;
@@ -1228,8 +1337,8 @@ private:
TT_TypenameMacro, TT_FunctionLBrace, TT_ImplicitStringLiteral,
TT_InlineASMBrace, TT_JsFatArrow, TT_LambdaArrow, TT_NamespaceMacro,
TT_OverloadedOperator, TT_RegexLiteral, TT_TemplateString,
- TT_ObjCStringLiteral))
- CurrentToken->Type = TT_Unknown;
+ TT_ObjCStringLiteral, TT_UntouchableMacroFunc))
+ CurrentToken->setType(TT_Unknown);
CurrentToken->Role.reset();
CurrentToken->MatchingParen = nullptr;
CurrentToken->FakeLParens.clear();
@@ -1317,7 +1426,7 @@ private:
if (Previous->isOneOf(TT_BinaryOperator, TT_UnaryOperator) &&
Previous->isOneOf(tok::star, tok::amp, tok::ampamp) &&
Previous->Previous && Previous->Previous->isNot(tok::equal))
- Previous->Type = TT_PointerOrReference;
+ Previous->setType(TT_PointerOrReference);
}
}
} else if (Current.is(tok::lessless) &&
@@ -1339,7 +1448,7 @@ private:
for (FormatToken *Previous = Current.Previous;
Previous && Previous->isOneOf(tok::star, tok::amp);
Previous = Previous->Previous)
- Previous->Type = TT_PointerOrReference;
+ Previous->setType(TT_PointerOrReference);
if (Line.MustBeDeclaration && !Contexts.front().InCtorInitializer)
Contexts.back().IsExpression = false;
} else if (Current.is(tok::kw_new)) {
@@ -1423,19 +1532,36 @@ private:
// The token type is already known.
return;
+ if (Style.isCSharp() && CurrentToken->is(tok::question)) {
+ if (CurrentToken->TokenText == "??") {
+ Current.setType(TT_CSharpNullCoalescing);
+ return;
+ }
+ if (CurrentToken->TokenText == "?.") {
+ Current.setType(TT_CSharpNullConditional);
+ return;
+ }
+ if (CurrentToken->TokenText == "?[") {
+ Current.setType(TT_CSharpNullConditionalLSquare);
+ return;
+ }
+ }
+
if (Style.Language == FormatStyle::LK_JavaScript) {
if (Current.is(tok::exclaim)) {
if (Current.Previous &&
- (Current.Previous->isOneOf(tok::identifier, tok::kw_namespace,
- tok::r_paren, tok::r_square,
- tok::r_brace) ||
+ (Keywords.IsJavaScriptIdentifier(
+ *Current.Previous, /* AcceptIdentifierName= */ true) ||
+ Current.Previous->isOneOf(
+ tok::kw_namespace, tok::r_paren, tok::r_square, tok::r_brace,
+ Keywords.kw_type, Keywords.kw_get, Keywords.kw_set) ||
Current.Previous->Tok.isLiteral())) {
- Current.Type = TT_JsNonNullAssertion;
+ Current.setType(TT_JsNonNullAssertion);
return;
}
if (Current.Next &&
Current.Next->isOneOf(TT_BinaryOperator, Keywords.kw_as)) {
- Current.Type = TT_JsNonNullAssertion;
+ Current.setType(TT_JsNonNullAssertion);
return;
}
}
@@ -1445,11 +1571,11 @@ private:
// function declaration have been found. In this case, 'Current' is a
// trailing token of this declaration and thus cannot be a name.
if (Current.is(Keywords.kw_instanceof)) {
- Current.Type = TT_BinaryOperator;
+ Current.setType(TT_BinaryOperator);
} else if (isStartOfName(Current) &&
(!Line.MightBeFunctionDecl || Current.NestingLevel != 0)) {
Contexts.back().FirstStartOfName = &Current;
- Current.Type = TT_StartOfName;
+ Current.setType(TT_StartOfName);
} else if (Current.is(tok::semi)) {
// Reset FirstStartOfName after finding a semicolon so that a for loop
// with multiple increment statements is not confused with a for loop
@@ -1459,57 +1585,57 @@ private:
AutoFound = true;
} else if (Current.is(tok::arrow) &&
Style.Language == FormatStyle::LK_Java) {
- Current.Type = TT_LambdaArrow;
+ Current.setType(TT_LambdaArrow);
} else if (Current.is(tok::arrow) && AutoFound && Line.MustBeDeclaration &&
Current.NestingLevel == 0 &&
!Current.Previous->is(tok::kw_operator)) {
// not auto operator->() -> xxx;
- Current.Type = TT_TrailingReturnArrow;
+ Current.setType(TT_TrailingReturnArrow);
} else if (isDeductionGuide(Current)) {
// Deduction guides trailing arrow " A(...) -> A<T>;".
- Current.Type = TT_TrailingReturnArrow;
+ Current.setType(TT_TrailingReturnArrow);
} else if (Current.isOneOf(tok::star, tok::amp, tok::ampamp)) {
- Current.Type = determineStarAmpUsage(Current,
- Contexts.back().CanBeExpression &&
- Contexts.back().IsExpression,
- Contexts.back().InTemplateArgument);
+ Current.setType(determineStarAmpUsage(
+ Current,
+ Contexts.back().CanBeExpression && Contexts.back().IsExpression,
+ Contexts.back().InTemplateArgument));
} else if (Current.isOneOf(tok::minus, tok::plus, tok::caret)) {
- Current.Type = determinePlusMinusCaretUsage(Current);
+ Current.setType(determinePlusMinusCaretUsage(Current));
if (Current.is(TT_UnaryOperator) && Current.is(tok::caret))
Contexts.back().CaretFound = true;
} else if (Current.isOneOf(tok::minusminus, tok::plusplus)) {
- Current.Type = determineIncrementUsage(Current);
+ Current.setType(determineIncrementUsage(Current));
} else if (Current.isOneOf(tok::exclaim, tok::tilde)) {
- Current.Type = TT_UnaryOperator;
+ Current.setType(TT_UnaryOperator);
} else if (Current.is(tok::question)) {
if (Style.Language == FormatStyle::LK_JavaScript &&
Line.MustBeDeclaration && !Contexts.back().IsExpression) {
// In JavaScript, `interface X { foo?(): bar; }` is an optional method
// on the interface, not a ternary expression.
- Current.Type = TT_JsTypeOptionalQuestion;
+ Current.setType(TT_JsTypeOptionalQuestion);
} else {
- Current.Type = TT_ConditionalExpr;
+ Current.setType(TT_ConditionalExpr);
}
} else if (Current.isBinaryOperator() &&
(!Current.Previous || Current.Previous->isNot(tok::l_square)) &&
(!Current.is(tok::greater) &&
Style.Language != FormatStyle::LK_TextProto)) {
- Current.Type = TT_BinaryOperator;
+ Current.setType(TT_BinaryOperator);
} else if (Current.is(tok::comment)) {
if (Current.TokenText.startswith("/*")) {
if (Current.TokenText.endswith("*/"))
- Current.Type = TT_BlockComment;
+ Current.setType(TT_BlockComment);
else
// The lexer has for some reason determined a comment here. But we
// cannot really handle it, if it isn't properly terminated.
Current.Tok.setKind(tok::unknown);
} else {
- Current.Type = TT_LineComment;
+ Current.setType(TT_LineComment);
}
} else if (Current.is(tok::r_paren)) {
if (rParenEndsCast(Current))
- Current.Type = TT_CastRParen;
+ Current.setType(TT_CastRParen);
if (Current.MatchingParen && Current.Next &&
!Current.Next->isBinaryOperator() &&
!Current.Next->isOneOf(tok::semi, tok::colon, tok::l_brace,
@@ -1524,7 +1650,7 @@ private:
BeforeParen->TokenText == BeforeParen->TokenText.upper() &&
(!BeforeParen->Previous ||
BeforeParen->Previous->ClosesTemplateDeclaration))
- Current.Type = TT_FunctionAnnotationRParen;
+ Current.setType(TT_FunctionAnnotationRParen);
}
}
} else if (Current.is(tok::at) && Current.Next &&
@@ -1536,10 +1662,10 @@ private:
case tok::objc_interface:
case tok::objc_implementation:
case tok::objc_protocol:
- Current.Type = TT_ObjCDecl;
+ Current.setType(TT_ObjCDecl);
break;
case tok::objc_property:
- Current.Type = TT_ObjCProperty;
+ Current.setType(TT_ObjCProperty);
break;
default:
break;
@@ -1548,11 +1674,11 @@ private:
FormatToken *PreviousNoComment = Current.getPreviousNonComment();
if (PreviousNoComment &&
PreviousNoComment->isOneOf(tok::comma, tok::l_brace))
- Current.Type = TT_DesignatedInitializerPeriod;
+ Current.setType(TT_DesignatedInitializerPeriod);
else if (Style.Language == FormatStyle::LK_Java && Current.Previous &&
Current.Previous->isOneOf(TT_JavaAnnotation,
TT_LeadingJavaAnnotation)) {
- Current.Type = Current.Previous->Type;
+ Current.setType(Current.Previous->getType());
}
} else if (canBeObjCSelectorComponent(Current) &&
// FIXME(bug 36976): ObjC return types shouldn't use
@@ -1565,7 +1691,7 @@ private:
// This is the first part of an Objective-C selector name. (If there's no
// colon after this, this is the only place which annotates the identifier
// as a selector.)
- Current.Type = TT_SelectorName;
+ Current.setType(TT_SelectorName);
} else if (Current.isOneOf(tok::identifier, tok::kw_const,
tok::kw_noexcept) &&
Current.Previous &&
@@ -1573,7 +1699,7 @@ private:
Line.MightBeFunctionDecl && Contexts.size() == 1) {
// Line.MightBeFunctionDecl can only be true after the parentheses of a
// function declaration have been found.
- Current.Type = TT_TrailingAnnotation;
+ Current.setType(TT_TrailingAnnotation);
} else if ((Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript) &&
Current.Previous) {
@@ -1582,13 +1708,13 @@ private:
const FormatToken &AtToken = *Current.Previous;
const FormatToken *Previous = AtToken.getPreviousNonComment();
if (!Previous || Previous->is(TT_LeadingJavaAnnotation))
- Current.Type = TT_LeadingJavaAnnotation;
+ Current.setType(TT_LeadingJavaAnnotation);
else
- Current.Type = TT_JavaAnnotation;
+ Current.setType(TT_JavaAnnotation);
} else if (Current.Previous->is(tok::period) &&
Current.Previous->isOneOf(TT_JavaAnnotation,
TT_LeadingJavaAnnotation)) {
- Current.Type = Current.Previous->Type;
+ Current.setType(Current.Previous->getType());
}
}
}
@@ -1640,8 +1766,9 @@ private:
/// Determine whether ')' is ending a cast.
bool rParenEndsCast(const FormatToken &Tok) {
- // C-style casts are only used in C++ and Java.
- if (!Style.isCpp() && Style.Language != FormatStyle::LK_Java)
+ // C-style casts are only used in C++, C# and Java.
+ if (!Style.isCSharp() && !Style.isCpp() &&
+ Style.Language != FormatStyle::LK_Java)
return false;
// Empty parens aren't casts and there are no casts at the end of the line.
@@ -1676,6 +1803,10 @@ private:
if (Tok.Next->is(tok::question))
return false;
+ // `foreach((A a, B b) in someList)` should not be seen as a cast.
+ if (Tok.Next->is(Keywords.kw_in) && Style.isCSharp())
+ return false;
+
// Functions which end with decorations like volatile, noexcept are unlikely
// to be casts.
if (Tok.Next->isOneOf(tok::kw_noexcept, tok::kw_volatile, tok::kw_const,
@@ -1749,6 +1880,10 @@ private:
if (Style.Language == FormatStyle::LK_JavaScript)
return TT_BinaryOperator;
+ // && in C# must be a binary operator.
+ if (Style.isCSharp() && Tok.is(tok::ampamp))
+ return TT_BinaryOperator;
+
const FormatToken *PrevToken = Tok.getPreviousNonComment();
if (!PrevToken)
return TT_UnaryOperator;
@@ -1800,14 +1935,16 @@ private:
return TT_BinaryOperator;
// "&&(" is quite unlikely to be two successive unary "&".
- if (Tok.is(tok::ampamp) && NextToken && NextToken->is(tok::l_paren))
+ if (Tok.is(tok::ampamp) && NextToken->is(tok::l_paren))
return TT_BinaryOperator;
// This catches some cases where evaluation order is used as control flow:
// aaa && aaa->f();
- const FormatToken *NextNextToken = NextToken->getNextNonComment();
- if (NextNextToken && NextNextToken->is(tok::arrow))
- return TT_BinaryOperator;
+ if (NextToken->Tok.isAnyIdentifier()) {
+ const FormatToken *NextNextToken = NextToken->getNextNonComment();
+ if (NextNextToken && NextNextToken->is(tok::arrow))
+ return TT_BinaryOperator;
+ }
// It is very unlikely that we are going to find a pointer or reference type
// definition on the RHS of an assignment.
@@ -2281,7 +2418,7 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
bool InFunctionDecl = Line.MightBeFunctionDecl;
while (Current) {
if (isFunctionDeclarationName(*Current, Line))
- Current->Type = TT_FunctionDeclarationName;
+ Current->setType(TT_FunctionDeclarationName);
if (Current->is(TT_LineComment)) {
if (Current->Previous->BlockKind == BK_BracedInit &&
Current->Previous->opensScope())
@@ -2707,9 +2844,14 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
Left.Previous &&
!Left.Previous->isOneOf(tok::l_paren, tok::coloncolon,
tok::l_square));
+ // Ensure right pointer alignement with ellipsis e.g. int *...P
+ if (Left.is(tok::ellipsis) && Left.Previous &&
+ Left.Previous->isOneOf(tok::star, tok::amp, tok::ampamp))
+ return Style.PointerAlignment != FormatStyle::PAS_Right;
+
if (Right.is(tok::star) && Left.is(tok::l_paren))
return false;
- if (Right.is(tok::star) && Left.is(tok::star))
+ if (Left.is(tok::star) && Right.isOneOf(tok::star, tok::amp, tok::ampamp))
return false;
if (Right.isOneOf(tok::star, tok::amp, tok::ampamp)) {
const FormatToken *Previous = &Left;
@@ -2738,9 +2880,10 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
// operator std::Foo*()
// operator C<T>::D<U>*()
// dependent on PointerAlignment style.
- if (Previous && (Previous->endsSequence(tok::kw_operator) ||
- Previous->endsSequence(tok::kw_const, tok::kw_operator) ||
- Previous->endsSequence(tok::kw_volatile, tok::kw_operator)))
+ if (Previous &&
+ (Previous->endsSequence(tok::kw_operator) ||
+ Previous->endsSequence(tok::kw_const, tok::kw_operator) ||
+ Previous->endsSequence(tok::kw_volatile, tok::kw_operator)))
return (Style.PointerAlignment != FormatStyle::PAS_Left);
}
const auto SpaceRequiredForArrayInitializerLSquare =
@@ -2786,10 +2929,19 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
// No whitespace in x(/*foo=*/1), except for JavaScript.
return Style.Language == FormatStyle::LK_JavaScript ||
!Left.TokenText.endswith("=*/");
+
+ // Space between template and attribute.
+ // e.g. template <typename T> [[nodiscard]] ...
+ if (Left.is(TT_TemplateCloser) && Right.is(TT_AttributeSquare))
+ return true;
if (Right.is(tok::l_paren)) {
if ((Left.is(tok::r_paren) && Left.is(TT_AttributeParen)) ||
(Left.is(tok::r_square) && Left.is(TT_AttributeSquare)))
return true;
+ if (Style.SpaceBeforeParens ==
+ FormatStyle::SBPO_ControlStatementsExceptForEachMacros &&
+ Left.is(TT_ForEachMacro))
+ return false;
return Line.Type == LT_ObjCDecl || Left.is(tok::semi) ||
(Style.SpaceBeforeParens != FormatStyle::SBPO_Never &&
(Left.isOneOf(tok::pp_elif, tok::kw_for, tok::kw_while,
@@ -2838,7 +2990,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
Right.MatchingParen->endsSequence(TT_DictLiteral, tok::at))
// Objective-C dictionary literal -> no space before closing brace.
return false;
- if (Right.Type == TT_TrailingAnnotation &&
+ if (Right.getType() == TT_TrailingAnnotation &&
Right.isOneOf(tok::amp, tok::ampamp) &&
Left.isOneOf(tok::kw_const, tok::kw_volatile) &&
(!Right.Next || Right.Next->is(tok::semi)))
@@ -2886,13 +3038,83 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.is(tok::numeric_constant) && Right.is(tok::percent))
return Right.WhitespaceRange.getEnd() != Right.WhitespaceRange.getBegin();
} else if (Style.isCSharp()) {
+ // Require spaces around '{' and before '}' unless they appear in
+ // interpolated strings. Interpolated strings are merged into a single token
+ // so cannot have spaces inserted by this function.
+
+ // No space between 'this' and '['
+ if (Left.is(tok::kw_this) && Right.is(tok::l_square))
+ return false;
+
+ // No space between 'new' and '('
+ if (Left.is(tok::kw_new) && Right.is(tok::l_paren))
+ return false;
+
+ // Space before { (including space within '{ {').
+ if (Right.is(tok::l_brace))
+ return true;
+
+ // Spaces inside braces.
+ if (Left.is(tok::l_brace) && Right.isNot(tok::r_brace))
+ return true;
+
+ if (Left.isNot(tok::l_brace) && Right.is(tok::r_brace))
+ return true;
+
+ // Spaces around '=>'.
+ if (Left.is(TT_JsFatArrow) || Right.is(TT_JsFatArrow))
+ return true;
+
+ // No spaces around attribute target colons
+ if (Left.is(TT_AttributeColon) || Right.is(TT_AttributeColon))
+ return false;
+
// space between type and variable e.g. Dictionary<string,string> foo;
if (Left.is(TT_TemplateCloser) && Right.is(TT_StartOfName))
return true;
+
+ // spaces inside square brackets.
+ if (Left.is(tok::l_square) || Right.is(tok::r_square))
+ return Style.SpacesInSquareBrackets;
+
+ // No space before ? in nullable types.
+ if (Right.is(TT_CSharpNullable))
+ return false;
+
+ // Require space after ? in nullable types except in generics and casts.
+ if (Left.is(TT_CSharpNullable))
+ return !Right.isOneOf(TT_TemplateCloser, tok::r_paren);
+
+ // No space before or after '?.'.
+ if (Left.is(TT_CSharpNullConditional) || Right.is(TT_CSharpNullConditional))
+ return false;
+
+ // Space before and after '??'.
+ if (Left.is(TT_CSharpNullCoalescing) || Right.is(TT_CSharpNullCoalescing))
+ return true;
+
+ // No space before '?['.
+ if (Right.is(TT_CSharpNullConditionalLSquare))
+ return false;
+
+ // No space between consecutive commas '[,,]'.
+ if (Left.is(tok::comma) && Right.is(tok::comma))
+ return false;
+
+ // Possible space inside `?[ 0 ]`.
+ if (Left.is(TT_CSharpNullConditionalLSquare))
+ return Style.SpacesInSquareBrackets;
+
+ // space after var in `var (key, value)`
+ if (Left.is(Keywords.kw_var) && Right.is(tok::l_paren))
+ return true;
+
// space between keywords and paren e.g. "using ("
if (Right.is(tok::l_paren))
- if (Left.is(tok::kw_using))
- return spaceRequiredBeforeParens(Left);
+ if (Left.isOneOf(tok::kw_using, Keywords.kw_async, Keywords.kw_when,
+ Keywords.kw_lock))
+ return Style.SpaceBeforeParens == FormatStyle::SBPO_ControlStatements ||
+ spaceRequiredBeforeParens(Right);
} else if (Style.Language == FormatStyle::LK_JavaScript) {
if (Left.is(TT_JsFatArrow))
return true;
@@ -2912,9 +3134,9 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
(Right.is(TT_TemplateString) && Right.TokenText.startswith("}")))
return false;
// In tagged template literals ("html`bar baz`"), there is no space between
- // the tag identifier and the template string. getIdentifierInfo makes sure
- // that the identifier is not a pseudo keyword like `yield`, either.
- if (Left.is(tok::identifier) && Keywords.IsJavaScriptIdentifier(Left) &&
+ // the tag identifier and the template string.
+ if (Keywords.IsJavaScriptIdentifier(Left,
+ /* AcceptIdentifierName= */ false) &&
Right.is(TT_TemplateString))
return false;
if (Right.is(tok::star) &&
@@ -3043,6 +3265,8 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return Style.SpacesInContainerLiterals;
if (Right.is(TT_AttributeColon))
return false;
+ if (Right.is(TT_CSharpNamedArgumentColon))
+ return false;
return true;
}
if (Left.is(TT_UnaryOperator)) {
@@ -3093,12 +3317,13 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return Right.WhitespaceRange.getBegin() != Right.WhitespaceRange.getEnd();
if (Right.is(tok::coloncolon) &&
!Left.isOneOf(tok::l_brace, tok::comment, tok::l_paren))
+ // Put a space between < and :: in vector< ::std::string >
return (Left.is(TT_TemplateOpener) &&
- Style.Standard < FormatStyle::LS_Cpp11) ||
+ (Style.Standard < FormatStyle::LS_Cpp11 || Style.SpacesInAngles)) ||
!(Left.isOneOf(tok::l_paren, tok::r_paren, tok::l_square,
- tok::kw___super, TT_TemplateCloser,
- TT_TemplateOpener)) ||
- (Left.is(tok ::l_paren) && Style.SpacesInParentheses);
+ tok::kw___super, TT_TemplateOpener,
+ TT_TemplateCloser)) ||
+ (Left.is(tok::l_paren) && Style.SpacesInParentheses);
if ((Left.is(TT_TemplateOpener)) != (Right.is(TT_TemplateCloser)))
return Style.SpacesInAngles;
// Space before TT_StructuredBindingLSquare.
@@ -3135,13 +3360,67 @@ static bool isAllmanBrace(const FormatToken &Tok) {
!Tok.isOneOf(TT_ObjCBlockLBrace, TT_LambdaLBrace, TT_DictLiteral);
}
+// Returns 'true' if 'Tok' is an function argument.
+static bool IsFunctionArgument(const FormatToken &Tok) {
+ return Tok.MatchingParen && Tok.MatchingParen->Next &&
+ Tok.MatchingParen->Next->isOneOf(tok::comma, tok::r_paren);
+}
+
+static bool
+isItAnEmptyLambdaAllowed(const FormatToken &Tok,
+ FormatStyle::ShortLambdaStyle ShortLambdaOption) {
+ return Tok.Children.empty() && ShortLambdaOption != FormatStyle::SLS_None;
+}
+
+static bool
+isItAInlineLambdaAllowed(const FormatToken &Tok,
+ FormatStyle::ShortLambdaStyle ShortLambdaOption) {
+ return (ShortLambdaOption == FormatStyle::SLS_Inline &&
+ IsFunctionArgument(Tok)) ||
+ (ShortLambdaOption == FormatStyle::SLS_All);
+}
+
+static bool isOneChildWithoutMustBreakBefore(const FormatToken &Tok) {
+ if (Tok.Children.size() != 1)
+ return false;
+ FormatToken *curElt = Tok.Children[0]->First;
+ while (curElt) {
+ if (curElt->MustBreakBefore)
+ return false;
+ curElt = curElt->Next;
+ }
+ return true;
+}
+static bool isAllmanLambdaBrace(const FormatToken &Tok) {
+ return (Tok.is(tok::l_brace) && Tok.BlockKind == BK_Block &&
+ !Tok.isOneOf(TT_ObjCBlockLBrace, TT_DictLiteral));
+}
+
+static bool isAllmanBraceIncludedBreakableLambda(
+ const FormatToken &Tok, FormatStyle::ShortLambdaStyle ShortLambdaOption) {
+ if (!isAllmanLambdaBrace(Tok))
+ return false;
+
+ if (isItAnEmptyLambdaAllowed(Tok, ShortLambdaOption))
+ return false;
+
+ return !isItAInlineLambdaAllowed(Tok, ShortLambdaOption) ||
+ !isOneChildWithoutMustBreakBefore(Tok);
+}
+
bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
const FormatToken &Right) {
const FormatToken &Left = *Right.Previous;
if (Right.NewlinesBefore > 1 && Style.MaxEmptyLinesToKeep > 0)
return true;
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isCSharp()) {
+ if (Right.is(TT_CSharpNamedArgumentColon) ||
+ Left.is(TT_CSharpNamedArgumentColon))
+ return false;
+ if (Right.is(TT_CSharpGenericTypeConstraint))
+ return true;
+ } else if (Style.Language == FormatStyle::LK_JavaScript) {
// FIXME: This might apply to other languages and token kinds.
if (Right.is(tok::string_literal) && Left.is(tok::plus) && Left.Previous &&
Left.Previous->is(tok::string_literal))
@@ -3164,6 +3443,25 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
// JavaScript top-level enum key/value pairs are put on separate lines
// instead of bin-packing.
return true;
+ if (Right.is(tok::r_brace) && Left.is(tok::l_brace) && Left.Previous &&
+ Left.Previous->is(TT_JsFatArrow)) {
+ // JS arrow function (=> {...}).
+ switch (Style.AllowShortLambdasOnASingleLine) {
+ case FormatStyle::SLS_All:
+ return false;
+ case FormatStyle::SLS_None:
+ return true;
+ case FormatStyle::SLS_Empty:
+ return !Left.Children.empty();
+ case FormatStyle::SLS_Inline:
+ // allow one-lining inline (e.g. in function call args) and empty arrow
+ // functions.
+ return (Left.NestingLevel == 0 && Line.Level == 0) &&
+ !Left.Children.empty();
+ }
+ llvm_unreachable("Unknown FormatStyle::ShortLambdaStyle enum");
+ }
+
if (Right.is(tok::r_brace) && Left.is(tok::l_brace) &&
!Left.Children.empty())
// Support AllowShortFunctionsOnASingleLine for JavaScript.
@@ -3251,6 +3549,14 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
}
if (Right.is(TT_InlineASMBrace))
return Right.HasUnescapedNewline;
+
+ auto ShortLambdaOption = Style.AllowShortLambdasOnASingleLine;
+ if (Style.BraceWrapping.BeforeLambdaBody &&
+ (isAllmanBraceIncludedBreakableLambda(Left, ShortLambdaOption) ||
+ isAllmanBraceIncludedBreakableLambda(Right, ShortLambdaOption))) {
+ return true;
+ }
+
if (isAllmanBrace(Left) || isAllmanBrace(Right))
return (Line.startsWith(tok::kw_enum) && Style.BraceWrapping.AfterEnum) ||
(Line.startsWith(tok::kw_typedef, tok::kw_enum) &&
@@ -3262,8 +3568,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
return true;
if (Left.is(TT_LambdaLBrace)) {
- if (Left.MatchingParen && Left.MatchingParen->Next &&
- Left.MatchingParen->Next->isOneOf(tok::comma, tok::r_paren) &&
+ if (IsFunctionArgument(Left) &&
Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Inline)
return false;
@@ -3274,13 +3579,6 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
return true;
}
- // Put multiple C# attributes on a new line.
- if (Style.isCSharp() &&
- ((Left.is(TT_AttributeSquare) && Left.is(tok::r_square)) ||
- (Left.is(tok::r_square) && Right.is(TT_AttributeSquare) &&
- Right.is(tok::l_square))))
- return true;
-
// Put multiple Java annotation on a new line.
if ((Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript) &&
@@ -3407,9 +3705,15 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
const FormatToken &Right) {
const FormatToken &Left = *Right.Previous;
-
// Language-specific stuff.
- if (Style.Language == FormatStyle::LK_Java) {
+ if (Style.isCSharp()) {
+ if (Left.isOneOf(TT_CSharpNamedArgumentColon, TT_AttributeColon) ||
+ Right.isOneOf(TT_CSharpNamedArgumentColon, TT_AttributeColon))
+ return false;
+ // Only break after commas for generic type constraints.
+ if (Line.First->is(TT_CSharpGenericTypeConstraint))
+ return Left.is(TT_CSharpGenericTypeConstraintComma);
+ } else if (Style.Language == FormatStyle::LK_Java) {
if (Left.isOneOf(Keywords.kw_throws, Keywords.kw_extends,
Keywords.kw_implements))
return false;
@@ -3623,7 +3927,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if (Right.is(tok::kw___attribute) ||
(Right.is(tok::l_square) && Right.is(TT_AttributeSquare)))
- return true;
+ return !Left.is(TT_AttributeSquare);
if (Left.is(tok::identifier) && Right.is(tok::string_literal))
return true;
@@ -3668,11 +3972,21 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if ((Left.is(TT_AttributeSquare) && Right.is(tok::l_square)) ||
(Left.is(tok::r_square) && Right.is(TT_AttributeSquare)))
return false;
+
+ auto ShortLambdaOption = Style.AllowShortLambdasOnASingleLine;
+ if (Style.BraceWrapping.BeforeLambdaBody) {
+ if (isAllmanLambdaBrace(Left))
+ return !isItAnEmptyLambdaAllowed(Left, ShortLambdaOption);
+ if (isAllmanLambdaBrace(Right))
+ return !isItAnEmptyLambdaAllowed(Right, ShortLambdaOption);
+ }
+
return Left.isOneOf(tok::comma, tok::coloncolon, tok::semi, tok::l_brace,
tok::kw_class, tok::kw_struct, tok::comment) ||
Right.isMemberAccess() ||
Right.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow, tok::lessless,
tok::colon, tok::l_square, tok::at) ||
+ (Style.BraceWrapping.BeforeLambdaBody && Right.is(TT_LambdaLBrace)) ||
(Left.is(tok::r_paren) &&
Right.isOneOf(tok::identifier, tok::kw_const)) ||
(Left.is(tok::l_paren) && !Right.is(tok::r_paren)) ||
@@ -3685,9 +3999,9 @@ void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) {
while (Tok) {
llvm::errs() << " M=" << Tok->MustBreakBefore
<< " C=" << Tok->CanBreakBefore
- << " T=" << getTokenTypeName(Tok->Type)
+ << " T=" << getTokenTypeName(Tok->getType())
<< " S=" << Tok->SpacesRequiredBefore
- << " B=" << Tok->BlockParameterCount
+ << " F=" << Tok->Finalized << " B=" << Tok->BlockParameterCount
<< " BK=" << Tok->BlockKind << " P=" << Tok->SplitPenalty
<< " Name=" << Tok->Tok.getName() << " L=" << Tok->TotalLength
<< " PPK=" << Tok->PackingKind << " FakeLParens=";
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
index fec85f1174da..22f27a668dcc 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
@@ -64,6 +64,8 @@ public:
}
if (static_cast<int>(Indent) + Offset >= 0)
Indent += Offset;
+ if (Line.First->is(TT_CSharpGenericTypeConstraint))
+ Indent = Line.Level * Style.IndentWidth + Style.ContinuationIndentWidth;
}
/// Update the indent state given that \p Line indent should be
@@ -340,21 +342,6 @@ private:
? 1
: 0;
}
- // Try to merge either empty or one-line block if is precedeed by control
- // statement token
- if (TheLine->First->is(tok::l_brace) && TheLine->First == TheLine->Last &&
- I != AnnotatedLines.begin() &&
- I[-1]->First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_for)) {
- unsigned MergedLines = 0;
- if (Style.AllowShortBlocksOnASingleLine != FormatStyle::SBS_Never) {
- MergedLines = tryMergeSimpleBlock(I - 1, E, Limit);
- // If we managed to merge the block, discard the first merged line
- // since we are merging starting from I.
- if (MergedLines > 0)
- --MergedLines;
- }
- return MergedLines;
- }
// Don't merge block with left brace wrapped after ObjC special blocks
if (TheLine->First->is(tok::l_brace) && I != AnnotatedLines.begin() &&
I[-1]->First->is(tok::at) && I[-1]->First->Next) {
@@ -404,7 +391,7 @@ private:
? tryMergeSimpleControlStatement(I, E, Limit)
: 0;
}
- if (TheLine->First->isOneOf(tok::kw_for, tok::kw_while)) {
+ if (TheLine->First->isOneOf(tok::kw_for, tok::kw_while, tok::kw_do)) {
return Style.AllowShortLoopsOnASingleLine
? tryMergeSimpleControlStatement(I, E, Limit)
: 0;
@@ -449,7 +436,10 @@ private:
return 0;
Limit = limitConsideringMacros(I + 1, E, Limit);
AnnotatedLine &Line = **I;
- if (Line.Last->isNot(tok::r_paren))
+ if (!Line.First->is(tok::kw_do) && Line.Last->isNot(tok::r_paren))
+ return 0;
+ // Only merge do while if do is the only statement on the line.
+ if (Line.First->is(tok::kw_do) && !Line.Last->is(tok::kw_do))
return 0;
if (1 + I[1]->Last->TotalLength > Limit)
return 0;
@@ -593,9 +583,10 @@ private:
FormatToken *RecordTok = Line.First;
// Skip record modifiers.
while (RecordTok->Next &&
- RecordTok->isOneOf(tok::kw_typedef, tok::kw_export,
- Keywords.kw_declare, Keywords.kw_abstract,
- tok::kw_default))
+ RecordTok->isOneOf(
+ tok::kw_typedef, tok::kw_export, Keywords.kw_declare,
+ Keywords.kw_abstract, tok::kw_default, tok::kw_public,
+ tok::kw_private, tok::kw_protected, Keywords.kw_internal))
RecordTok = RecordTok->Next;
if (RecordTok &&
RecordTok->isOneOf(tok::kw_class, tok::kw_union, tok::kw_struct,
@@ -817,7 +808,8 @@ protected:
if (!DryRun) {
Whitespaces->replaceWhitespace(
*Child->First, /*Newlines=*/0, /*Spaces=*/1,
- /*StartOfTokenColumn=*/State.Column, State.Line->InPPDirective);
+ /*StartOfTokenColumn=*/State.Column, /*IsAligned=*/false,
+ State.Line->InPPDirective);
}
Penalty +=
formatLine(*Child, State.Column + 1, /*FirstStartColumn=*/0, DryRun);
@@ -1238,7 +1230,8 @@ void UnwrappedLineFormatter::formatFirstToken(
// If in Whitemsmiths mode, indent start and end of blocks
if (Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths) {
- if (RootToken.isOneOf(tok::l_brace, tok::r_brace, tok::kw_case))
+ if (RootToken.isOneOf(tok::l_brace, tok::r_brace, tok::kw_case,
+ tok::kw_default))
Indent += Style.IndentWidth;
}
@@ -1249,6 +1242,7 @@ void UnwrappedLineFormatter::formatFirstToken(
Indent = 0;
Whitespaces->replaceWhitespace(RootToken, Newlines, Indent, Indent,
+ /*IsAligned=*/false,
Line.InPPDirective &&
!RootToken.HasUnescapedNewline);
}
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
index ead6b4743207..a37386425aae 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "UnwrappedLineParser.h"
+#include "FormatToken.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -323,13 +324,54 @@ void UnwrappedLineParser::parseFile() {
addUnwrappedLine();
}
+void UnwrappedLineParser::parseCSharpGenericTypeConstraint() {
+ do {
+ switch (FormatTok->Tok.getKind()) {
+ case tok::l_brace:
+ return;
+ default:
+ if (FormatTok->is(Keywords.kw_where)) {
+ addUnwrappedLine();
+ nextToken();
+ parseCSharpGenericTypeConstraint();
+ break;
+ }
+ nextToken();
+ break;
+ }
+ } while (!eof());
+}
+
+void UnwrappedLineParser::parseCSharpAttribute() {
+ int UnpairedSquareBrackets = 1;
+ do {
+ switch (FormatTok->Tok.getKind()) {
+ case tok::r_square:
+ nextToken();
+ --UnpairedSquareBrackets;
+ if (UnpairedSquareBrackets == 0) {
+ addUnwrappedLine();
+ return;
+ }
+ break;
+ case tok::l_square:
+ ++UnpairedSquareBrackets;
+ nextToken();
+ break;
+ default:
+ nextToken();
+ break;
+ }
+ } while (!eof());
+}
+
void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
bool SwitchLabelEncountered = false;
do {
tok::TokenKind kind = FormatTok->Tok.getKind();
- if (FormatTok->Type == TT_MacroBlockBegin) {
+ if (FormatTok->getType() == TT_MacroBlockBegin) {
kind = tok::l_brace;
- } else if (FormatTok->Type == TT_MacroBlockEnd) {
+ } else if (FormatTok->getType() == TT_MacroBlockEnd) {
kind = tok::r_brace;
}
@@ -381,6 +423,13 @@ void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
SwitchLabelEncountered = true;
parseStructuralElement();
break;
+ case tok::l_square:
+ if (Style.isCSharp()) {
+ nextToken();
+ parseCSharpAttribute();
+ break;
+ }
+ LLVM_FALLTHROUGH;
default:
parseStructuralElement();
break;
@@ -851,14 +900,14 @@ void UnwrappedLineParser::parsePPUnknown() {
addUnwrappedLine();
}
-// Here we blacklist certain tokens that are not usually the first token in an
+// Here we exclude certain tokens that are not usually the first token in an
// unwrapped line. This is used in attempt to distinguish macro calls without
// trailing semicolons from other constructs split to several lines.
-static bool tokenCanStartNewLine(const clang::Token &Tok) {
+static bool tokenCanStartNewLine(const FormatToken &Tok) {
// Semicolon can be a null-statement, l_square can be a start of a macro or
// a C++11 attribute, but this doesn't seem to be common.
return Tok.isNot(tok::semi) && Tok.isNot(tok::l_brace) &&
- Tok.isNot(tok::l_square) &&
+ Tok.isNot(TT_AttributeSquare) &&
// Tokens that can only be used as binary operators and a part of
// overloaded operator names.
Tok.isNot(tok::period) && Tok.isNot(tok::periodstar) &&
@@ -984,11 +1033,11 @@ void UnwrappedLineParser::parseStructuralElement() {
case tok::kw_asm:
nextToken();
if (FormatTok->is(tok::l_brace)) {
- FormatTok->Type = TT_InlineASMBrace;
+ FormatTok->setType(TT_InlineASMBrace);
nextToken();
while (FormatTok && FormatTok->isNot(tok::eof)) {
if (FormatTok->is(tok::r_brace)) {
- FormatTok->Type = TT_InlineASMBrace;
+ FormatTok->setType(TT_InlineASMBrace);
nextToken();
addUnwrappedLine();
break;
@@ -1011,13 +1060,22 @@ void UnwrappedLineParser::parseStructuralElement() {
parseAccessSpecifier();
return;
case tok::kw_if:
+ if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ // field/method declaration.
+ break;
parseIfThenElse();
return;
case tok::kw_for:
case tok::kw_while:
+ if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ // field/method declaration.
+ break;
parseForOrWhileLoop();
return;
case tok::kw_do:
+ if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ // field/method declaration.
+ break;
parseDoWhile();
return;
case tok::kw_switch:
@@ -1045,6 +1103,9 @@ void UnwrappedLineParser::parseStructuralElement() {
return;
case tok::kw_try:
case tok::kw___try:
+ if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ // field/method declaration.
+ break;
parseTryCatch();
return;
case tok::kw_extern:
@@ -1052,11 +1113,16 @@ void UnwrappedLineParser::parseStructuralElement() {
if (FormatTok->Tok.is(tok::string_literal)) {
nextToken();
if (FormatTok->Tok.is(tok::l_brace)) {
- if (Style.BraceWrapping.AfterExternBlock) {
- addUnwrappedLine();
- parseBlock(/*MustBeDeclaration=*/true);
+ if (!Style.IndentExternBlock) {
+ if (Style.BraceWrapping.AfterExternBlock) {
+ addUnwrappedLine();
+ }
+ parseBlock(/*MustBeDeclaration=*/true,
+ /*AddLevel=*/Style.BraceWrapping.AfterExternBlock);
} else {
- parseBlock(/*MustBeDeclaration=*/true, /*AddLevel=*/false);
+ parseBlock(/*MustBeDeclaration=*/true,
+ /*AddLevel=*/Style.IndentExternBlock ==
+ FormatStyle::IEBS_Indent);
}
addUnwrappedLine();
return;
@@ -1274,14 +1340,14 @@ void UnwrappedLineParser::parseStructuralElement() {
parseChildBlock();
break;
case tok::l_brace:
- if (!tryToParseBracedList()) {
+ if (!tryToParsePropertyAccessor() && !tryToParseBracedList()) {
// A block outside of parentheses must be the last part of a
// structural element.
// FIXME: Figure out cases where this is not true, and add projections
// for them (the one we know is missing are lambdas).
if (Style.BraceWrapping.AfterFunction)
addUnwrappedLine();
- FormatTok->Type = TT_FunctionLBrace;
+ FormatTok->setType(TT_FunctionLBrace);
parseBlock(/*MustBeDeclaration=*/false);
addUnwrappedLine();
return;
@@ -1290,12 +1356,24 @@ void UnwrappedLineParser::parseStructuralElement() {
// element continues.
break;
case tok::kw_try:
+ if (Style.Language == FormatStyle::LK_JavaScript &&
+ Line->MustBeDeclaration) {
+ // field/method declaration.
+ nextToken();
+ break;
+ }
// We arrive here when parsing function-try blocks.
if (Style.BraceWrapping.AfterFunction)
addUnwrappedLine();
parseTryCatch();
return;
case tok::identifier: {
+ if (Style.isCSharp() && FormatTok->is(Keywords.kw_where) &&
+ Line->MustBeDeclaration) {
+ addUnwrappedLine();
+ parseCSharpGenericTypeConstraint();
+ break;
+ }
if (FormatTok->is(TT_MacroBlockEnd)) {
addUnwrappedLine();
return;
@@ -1368,7 +1446,7 @@ void UnwrappedLineParser::parseStructuralElement() {
: CommentsBeforeNextToken.front()->NewlinesBefore > 0;
if (FollowedByNewline && (Text.size() >= 5 || FunctionLike) &&
- tokenCanStartNewLine(FormatTok->Tok) && Text == Text.upper()) {
+ tokenCanStartNewLine(*FormatTok) && Text == Text.upper()) {
addUnwrappedLine();
return;
}
@@ -1381,19 +1459,30 @@ void UnwrappedLineParser::parseStructuralElement() {
// followed by a curly.
if (FormatTok->is(TT_JsFatArrow)) {
nextToken();
- if (FormatTok->is(tok::l_brace))
+ if (FormatTok->is(tok::l_brace)) {
+ // C# may break after => if the next character is a newline.
+ if (Style.isCSharp() && Style.BraceWrapping.AfterFunction == true) {
+ // calling `addUnwrappedLine()` here causes odd parsing errors.
+ FormatTok->MustBreakBefore = true;
+ }
parseChildBlock();
+ }
break;
}
nextToken();
if (FormatTok->Tok.is(tok::l_brace)) {
+ // Block kind should probably be set to BK_BracedInit for any language.
+ // C# needs this change to ensure that array initialisers and object
+ // initialisers are indented the same way.
+ if (Style.isCSharp())
+ FormatTok->BlockKind = BK_BracedInit;
nextToken();
parseBracedList();
} else if (Style.Language == FormatStyle::LK_Proto &&
FormatTok->Tok.is(tok::less)) {
nextToken();
- parseBracedList(/*ContinueOnSemicolons=*/false,
+ parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
/*ClosingBraceKind=*/tok::greater);
}
break;
@@ -1410,6 +1499,96 @@ void UnwrappedLineParser::parseStructuralElement() {
} while (!eof());
}
+bool UnwrappedLineParser::tryToParsePropertyAccessor() {
+ assert(FormatTok->is(tok::l_brace));
+ if (!Style.isCSharp())
+ return false;
+ // See if it's a property accessor.
+ if (FormatTok->Previous->isNot(tok::identifier))
+ return false;
+
+ // See if we are inside a property accessor.
+ //
+ // Record the current tokenPosition so that we can advance and
+ // reset the current token. `Next` is not set yet so we need
+ // another way to advance along the token stream.
+ unsigned int StoredPosition = Tokens->getPosition();
+ FormatToken *Tok = Tokens->getNextToken();
+
+ // A trivial property accessor is of the form:
+ // { [ACCESS_SPECIFIER] [get]; [ACCESS_SPECIFIER] [set] }
+ // Track these as they do not require line breaks to be introduced.
+ bool HasGetOrSet = false;
+ bool IsTrivialPropertyAccessor = true;
+ while (!eof()) {
+ if (Tok->isOneOf(tok::semi, tok::kw_public, tok::kw_private,
+ tok::kw_protected, Keywords.kw_internal, Keywords.kw_get,
+ Keywords.kw_set)) {
+ if (Tok->isOneOf(Keywords.kw_get, Keywords.kw_set))
+ HasGetOrSet = true;
+ Tok = Tokens->getNextToken();
+ continue;
+ }
+ if (Tok->isNot(tok::r_brace))
+ IsTrivialPropertyAccessor = false;
+ break;
+ }
+
+ if (!HasGetOrSet) {
+ Tokens->setPosition(StoredPosition);
+ return false;
+ }
+
+ // Try to parse the property accessor:
+ // https://docs.microsoft.com/en-us/dotnet/csharp/programming-guide/classes-and-structs/properties
+ Tokens->setPosition(StoredPosition);
+ if (!IsTrivialPropertyAccessor && Style.BraceWrapping.AfterFunction == true)
+ addUnwrappedLine();
+ nextToken();
+ do {
+ switch (FormatTok->Tok.getKind()) {
+ case tok::r_brace:
+ nextToken();
+ if (FormatTok->is(tok::equal)) {
+ while (!eof() && FormatTok->isNot(tok::semi))
+ nextToken();
+ nextToken();
+ }
+ addUnwrappedLine();
+ return true;
+ case tok::l_brace:
+ ++Line->Level;
+ parseBlock(/*MustBeDeclaration=*/true);
+ addUnwrappedLine();
+ --Line->Level;
+ break;
+ case tok::equal:
+ if (FormatTok->is(TT_JsFatArrow)) {
+ ++Line->Level;
+ do {
+ nextToken();
+ } while (!eof() && FormatTok->isNot(tok::semi));
+ nextToken();
+ addUnwrappedLine();
+ --Line->Level;
+ break;
+ }
+ nextToken();
+ break;
+ default:
+ if (FormatTok->isOneOf(Keywords.kw_get, Keywords.kw_set) &&
+ !IsTrivialPropertyAccessor) {
+ // Non-trivial get/set needs to be on its own line.
+ addUnwrappedLine();
+ }
+ nextToken();
+ }
+ } while (!eof());
+
+ // Unreachable for well-formed code (paired '{' and '}').
+ return true;
+}
+
bool UnwrappedLineParser::tryToParseLambda() {
if (!Style.isCpp()) {
nextToken();
@@ -1480,6 +1659,7 @@ bool UnwrappedLineParser::tryToParseLambda() {
case tok::lessequal:
case tok::question:
case tok::colon:
+ case tok::ellipsis:
case tok::kw_true:
case tok::kw_false:
if (SeenArrow) {
@@ -1491,7 +1671,7 @@ bool UnwrappedLineParser::tryToParseLambda() {
// This might or might not actually be a lambda arrow (this could be an
// ObjC method invocation followed by a dereferencing arrow). We might
// reset this back to TT_Unknown in TokenAnnotator.
- FormatTok->Type = TT_LambdaArrow;
+ FormatTok->setType(TT_LambdaArrow);
SeenArrow = true;
nextToken();
break;
@@ -1499,8 +1679,8 @@ bool UnwrappedLineParser::tryToParseLambda() {
return true;
}
}
- FormatTok->Type = TT_LambdaLBrace;
- LSquare.Type = TT_LambdaLSquare;
+ FormatTok->setType(TT_LambdaLBrace);
+ LSquare.setType(TT_LambdaLSquare);
parseChildBlock();
return true;
}
@@ -1533,7 +1713,7 @@ void UnwrappedLineParser::tryToParseJSFunction() {
// Consume * (generator function). Treat it like C++'s overloaded operators.
if (FormatTok->is(tok::star)) {
- FormatTok->Type = TT_OverloadedOperator;
+ FormatTok->setType(TT_OverloadedOperator);
nextToken();
}
@@ -1578,12 +1758,24 @@ bool UnwrappedLineParser::tryToParseBracedList() {
}
bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
+ bool IsEnum,
tok::TokenKind ClosingBraceKind) {
bool HasError = false;
// FIXME: Once we have an expression parser in the UnwrappedLineParser,
// replace this by using parseAssigmentExpression() inside.
do {
+ if (Style.isCSharp()) {
+ if (FormatTok->is(TT_JsFatArrow)) {
+ nextToken();
+ // Fat arrows can be followed by simple expressions or by child blocks
+ // in curly braces.
+ if (FormatTok->is(tok::l_brace)) {
+ parseChildBlock();
+ continue;
+ }
+ }
+ }
if (Style.Language == FormatStyle::LK_JavaScript) {
if (FormatTok->is(Keywords.kw_function) ||
FormatTok->startsSequence(Keywords.kw_async, Keywords.kw_function)) {
@@ -1607,6 +1799,8 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
}
}
if (FormatTok->Tok.getKind() == ClosingBraceKind) {
+ if (IsEnum && !Style.AllowShortEnumsOnASingleLine)
+ addUnwrappedLine();
nextToken();
return !HasError;
}
@@ -1618,7 +1812,10 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
}
break;
case tok::l_square:
- tryToParseLambda();
+ if (Style.isCSharp())
+ parseSquare();
+ else
+ tryToParseLambda();
break;
case tok::l_paren:
parseParens();
@@ -1640,7 +1837,7 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
case tok::less:
if (Style.Language == FormatStyle::LK_Proto) {
nextToken();
- parseBracedList(/*ContinueOnSemicolons=*/false,
+ parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
/*ClosingBraceKind=*/tok::greater);
} else {
nextToken();
@@ -1662,6 +1859,8 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
break;
case tok::comma:
nextToken();
+ if (IsEnum && !Style.AllowShortEnumsOnASingleLine)
+ addUnwrappedLine();
break;
default:
nextToken();
@@ -1768,6 +1967,9 @@ void UnwrappedLineParser::parseIfThenElse() {
nextToken();
if (FormatTok->Tok.is(tok::l_paren))
parseParens();
+ // handle [[likely]] / [[unlikely]]
+ if (FormatTok->is(tok::l_square) && tryToParseSimpleAttribute())
+ parseSquare();
bool NeedsUnwrappedLine = false;
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
@@ -1784,6 +1986,9 @@ void UnwrappedLineParser::parseIfThenElse() {
}
if (FormatTok->Tok.is(tok::kw_else)) {
nextToken();
+ // handle [[likely]] / [[unlikely]]
+ if (FormatTok->Tok.is(tok::l_square) && tryToParseSimpleAttribute())
+ parseSquare();
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
@@ -1810,11 +2015,20 @@ void UnwrappedLineParser::parseTryCatch() {
if (FormatTok->is(tok::colon)) {
// We are in a function try block, what comes is an initializer list.
nextToken();
+
+ // In case identifiers were removed by clang-tidy, what might follow is
+ // multiple commas in sequence - before the first identifier.
+ while (FormatTok->is(tok::comma))
+ nextToken();
+
while (FormatTok->is(tok::identifier)) {
nextToken();
if (FormatTok->is(tok::l_paren))
parseParens();
- if (FormatTok->is(tok::comma))
+
+ // In case identifiers were removed by clang-tidy, what might follow is
+ // multiple commas in sequence - after the first identifier.
+ while (FormatTok->is(tok::comma))
nextToken();
}
}
@@ -1898,7 +2112,7 @@ void UnwrappedLineParser::parseNamespace() {
DeclarationScopeStack.size() > 1);
parseBlock(/*MustBeDeclaration=*/true, AddLevel);
// Munch the semicolon after a namespace. This is more common than one would
- // think. Puttin the semicolon into its own line is very ugly.
+ // think. Putting the semicolon into its own line is very ugly.
if (FormatTok->Tok.is(tok::semi))
nextToken();
addUnwrappedLine();
@@ -1909,6 +2123,19 @@ void UnwrappedLineParser::parseNamespace() {
void UnwrappedLineParser::parseNew() {
assert(FormatTok->is(tok::kw_new) && "'new' expected");
nextToken();
+
+ if (Style.isCSharp()) {
+ do {
+ if (FormatTok->is(tok::l_brace))
+ parseBracedList();
+
+ if (FormatTok->isOneOf(tok::semi, tok::comma))
+ return;
+
+ nextToken();
+ } while (!eof());
+ }
+
if (Style.Language != FormatStyle::LK_Java)
return;
@@ -1959,7 +2186,7 @@ void UnwrappedLineParser::parseDoWhile() {
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
- if (Style.BraceWrapping.IndentBraces)
+ if (Style.BraceWrapping.BeforeWhile)
addUnwrappedLine();
} else {
addUnwrappedLine();
@@ -1985,15 +2212,21 @@ void UnwrappedLineParser::parseLabel(bool LeftAlignLabel) {
--Line->Level;
if (LeftAlignLabel)
Line->Level = 0;
- if (CommentsBeforeNextToken.empty() && FormatTok->Tok.is(tok::l_brace)) {
+ if (!Style.IndentCaseBlocks && CommentsBeforeNextToken.empty() &&
+ FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Line->Level,
Style.BraceWrapping.AfterCaseLabel,
Style.BraceWrapping.IndentBraces);
parseBlock(/*MustBeDeclaration=*/false);
if (FormatTok->Tok.is(tok::kw_break)) {
if (Style.BraceWrapping.AfterControlStatement ==
- FormatStyle::BWACS_Always)
+ FormatStyle::BWACS_Always) {
addUnwrappedLine();
+ if (!Style.IndentCaseBlocks &&
+ Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths) {
+ Line->Level++;
+ }
+ }
parseStructuralElement();
}
addUnwrappedLine();
@@ -2097,9 +2330,18 @@ bool UnwrappedLineParser::parseEnum() {
return true;
}
+ if (!Style.AllowShortEnumsOnASingleLine)
+ addUnwrappedLine();
// Parse enum body.
nextToken();
- bool HasError = !parseBracedList(/*ContinueOnSemicolons=*/true);
+ if (!Style.AllowShortEnumsOnASingleLine) {
+ addUnwrappedLine();
+ Line->Level += 1;
+ }
+ bool HasError = !parseBracedList(/*ContinueOnSemicolons=*/true,
+ /*IsEnum=*/true);
+ if (!Style.AllowShortEnumsOnASingleLine)
+ Line->Level -= 1;
if (HasError) {
if (FormatTok->is(tok::semi))
nextToken();
@@ -2112,6 +2354,51 @@ bool UnwrappedLineParser::parseEnum() {
// "} n, m;" will end up in one unwrapped line.
}
+namespace {
+// A class used to set and restore the Token position when peeking
+// ahead in the token source.
+class ScopedTokenPosition {
+ unsigned StoredPosition;
+ FormatTokenSource *Tokens;
+
+public:
+ ScopedTokenPosition(FormatTokenSource *Tokens) : Tokens(Tokens) {
+ assert(Tokens && "Tokens expected to not be null");
+ StoredPosition = Tokens->getPosition();
+ }
+
+ ~ScopedTokenPosition() { Tokens->setPosition(StoredPosition); }
+};
+} // namespace
+
+// Look to see if we have [[ by looking ahead, if
+// its not then rewind to the original position.
+bool UnwrappedLineParser::tryToParseSimpleAttribute() {
+ ScopedTokenPosition AutoPosition(Tokens);
+ FormatToken *Tok = Tokens->getNextToken();
+ // We already read the first [ check for the second.
+ if (Tok && !Tok->is(tok::l_square)) {
+ return false;
+ }
+ // Double check that the attribute is just something
+ // fairly simple.
+ while (Tok) {
+ if (Tok->is(tok::r_square)) {
+ break;
+ }
+ Tok = Tokens->getNextToken();
+ }
+ Tok = Tokens->getNextToken();
+ if (Tok && !Tok->is(tok::r_square)) {
+ return false;
+ }
+ Tok = Tokens->getNextToken();
+ if (Tok && Tok->is(tok::semi)) {
+ return false;
+ }
+ return true;
+}
+
void UnwrappedLineParser::parseJavaEnumBody() {
// Determine whether the enum is simple, i.e. does not have a semicolon or
// constants with class bodies. Simple enums can be formatted like braced
@@ -2181,9 +2468,10 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
// The actual identifier can be a nested name specifier, and in macros
// it is often token-pasted.
+ // An [[attribute]] can be before the identifier.
while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::hashhash,
tok::kw___attribute, tok::kw___declspec,
- tok::kw_alignas) ||
+ tok::kw_alignas, tok::l_square, tok::r_square) ||
((Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript) &&
FormatTok->isOneOf(tok::period, tok::comma))) {
@@ -2203,8 +2491,16 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
FormatTok->TokenText != FormatTok->TokenText.upper();
nextToken();
// We can have macros or attributes in between 'class' and the class name.
- if (!IsNonMacroIdentifier && FormatTok->Tok.is(tok::l_paren))
- parseParens();
+ if (!IsNonMacroIdentifier) {
+ if (FormatTok->Tok.is(tok::l_paren)) {
+ parseParens();
+ } else if (FormatTok->is(TT_AttributeSquare)) {
+ parseSquare();
+ // Consume the closing TT_AttributeSquare.
+ if (FormatTok->Next && FormatTok->is(TT_AttributeSquare))
+ nextToken();
+ }
+ }
}
// Note that parsing away template declarations here leads to incorrectly
@@ -2226,6 +2522,12 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
}
if (FormatTok->Tok.is(tok::semi))
return;
+ if (Style.isCSharp() && FormatTok->is(Keywords.kw_where)) {
+ addUnwrappedLine();
+ nextToken();
+ parseCSharpGenericTypeConstraint();
+ break;
+ }
nextToken();
}
}
@@ -2451,8 +2753,8 @@ LLVM_ATTRIBUTE_UNUSED static void printDebugInfo(const UnwrappedLine &Line,
E = Line.Tokens.end();
I != E; ++I) {
llvm::dbgs() << I->Tok->Tok.getName() << "["
- << "T=" << I->Tok->Type << ", OC=" << I->Tok->OriginalColumn
- << "] ";
+ << "T=" << I->Tok->getType()
+ << ", OC=" << I->Tok->OriginalColumn << "] ";
}
for (std::list<UnwrappedLineNode>::const_iterator I = Line.Tokens.begin(),
E = Line.Tokens.end();
@@ -2723,18 +3025,19 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
flushComments(isOnNewLine(*FormatTok));
parsePPDirective();
}
- while (FormatTok->Type == TT_ConflictStart ||
- FormatTok->Type == TT_ConflictEnd ||
- FormatTok->Type == TT_ConflictAlternative) {
- if (FormatTok->Type == TT_ConflictStart) {
+ while (FormatTok->getType() == TT_ConflictStart ||
+ FormatTok->getType() == TT_ConflictEnd ||
+ FormatTok->getType() == TT_ConflictAlternative) {
+ if (FormatTok->getType() == TT_ConflictStart) {
conditionalCompilationStart(/*Unreachable=*/false);
- } else if (FormatTok->Type == TT_ConflictAlternative) {
+ } else if (FormatTok->getType() == TT_ConflictAlternative) {
conditionalCompilationAlternative();
- } else if (FormatTok->Type == TT_ConflictEnd) {
+ } else if (FormatTok->getType() == TT_ConflictEnd) {
conditionalCompilationEnd();
}
FormatTok = Tokens->getNextToken();
FormatTok->MustBreakBefore = true;
+ FormatTok->MustBreakAlignBefore = true;
}
if (!PPStack.empty() && (PPStack.back().Kind == PP_Unreachable) &&
@@ -2759,6 +3062,7 @@ void UnwrappedLineParser::pushToken(FormatToken *Tok) {
Line->Tokens.push_back(UnwrappedLineNode(Tok));
if (MustBreakBeforeNextToken) {
Line->Tokens.back().Tok->MustBreakBefore = true;
+ Line->Tokens.back().Tok->MustBreakAlignBefore = true;
MustBreakBeforeNextToken = false;
}
}
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
index 5d9bafc429a7..8b3aa4c84edb 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
@@ -98,7 +98,7 @@ private:
void readTokenWithJavaScriptASI();
void parseStructuralElement();
bool tryToParseBracedList();
- bool parseBracedList(bool ContinueOnSemicolons = false,
+ bool parseBracedList(bool ContinueOnSemicolons = false, bool IsEnum = false,
tok::TokenKind ClosingBraceKind = tok::r_brace);
void parseParens();
void parseSquare(bool LambdaIntroducer = false);
@@ -125,9 +125,16 @@ private:
bool parseObjCProtocol();
void parseJavaScriptEs6ImportExport();
void parseStatementMacro();
+ void parseCSharpAttribute();
+ // Parse a C# generic type constraint: `where T : IComparable<T>`.
+ // See:
+ // https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/keywords/where-generic-type-constraint
+ void parseCSharpGenericTypeConstraint();
bool tryToParseLambda();
bool tryToParseLambdaIntroducer();
+ bool tryToParsePropertyAccessor();
void tryToParseJSFunction();
+ bool tryToParseSimpleAttribute();
void addUnwrappedLine();
bool eof() const;
// LevelDifference is the difference of levels after and before the current
diff --git a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
index 5a44500d355f..32e0b685ea0f 100644
--- a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
+++ b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
@@ -30,28 +30,29 @@ WhitespaceManager::Change::Change(const FormatToken &Tok,
int Spaces, unsigned StartOfTokenColumn,
unsigned NewlinesBefore,
StringRef PreviousLinePostfix,
- StringRef CurrentLinePrefix,
+ StringRef CurrentLinePrefix, bool IsAligned,
bool ContinuesPPDirective, bool IsInsideToken)
: Tok(&Tok), CreateReplacement(CreateReplacement),
OriginalWhitespaceRange(OriginalWhitespaceRange),
StartOfTokenColumn(StartOfTokenColumn), NewlinesBefore(NewlinesBefore),
PreviousLinePostfix(PreviousLinePostfix),
- CurrentLinePrefix(CurrentLinePrefix),
+ CurrentLinePrefix(CurrentLinePrefix), IsAligned(IsAligned),
ContinuesPPDirective(ContinuesPPDirective), Spaces(Spaces),
IsInsideToken(IsInsideToken), IsTrailingComment(false), TokenLength(0),
PreviousEndOfTokenColumn(0), EscapedNewlineColumn(0),
- StartOfBlockComment(nullptr), IndentationOffset(0) {}
+ StartOfBlockComment(nullptr), IndentationOffset(0), ConditionalsLevel(0) {
+}
void WhitespaceManager::replaceWhitespace(FormatToken &Tok, unsigned Newlines,
unsigned Spaces,
unsigned StartOfTokenColumn,
- bool InPPDirective) {
+ bool IsAligned, bool InPPDirective) {
if (Tok.Finalized)
return;
Tok.Decision = (Newlines > 0) ? FD_Break : FD_Continue;
Changes.push_back(Change(Tok, /*CreateReplacement=*/true, Tok.WhitespaceRange,
Spaces, StartOfTokenColumn, Newlines, "", "",
- InPPDirective && !Tok.IsFirst,
+ IsAligned, InPPDirective && !Tok.IsFirst,
/*IsInsideToken=*/false));
}
@@ -62,7 +63,7 @@ void WhitespaceManager::addUntouchableToken(const FormatToken &Tok,
Changes.push_back(Change(Tok, /*CreateReplacement=*/false,
Tok.WhitespaceRange, /*Spaces=*/0,
Tok.OriginalColumn, Tok.NewlinesBefore, "", "",
- InPPDirective && !Tok.IsFirst,
+ /*IsAligned=*/false, InPPDirective && !Tok.IsFirst,
/*IsInsideToken=*/false));
}
@@ -82,7 +83,8 @@ void WhitespaceManager::replaceWhitespaceInToken(
Change(Tok, /*CreateReplacement=*/true,
SourceRange(Start, Start.getLocWithOffset(ReplaceChars)), Spaces,
std::max(0, Spaces), Newlines, PreviousPostfix, CurrentPrefix,
- InPPDirective && !Tok.IsFirst, /*IsInsideToken=*/true));
+ /*IsAligned=*/true, InPPDirective && !Tok.IsFirst,
+ /*IsInsideToken=*/true));
}
const tooling::Replacements &WhitespaceManager::generateReplacements() {
@@ -93,7 +95,9 @@ const tooling::Replacements &WhitespaceManager::generateReplacements() {
calculateLineBreakInformation();
alignConsecutiveMacros();
alignConsecutiveDeclarations();
+ alignConsecutiveBitFields();
alignConsecutiveAssignments();
+ alignChainedConditionals();
alignTrailingComments();
alignEscapedNewlines();
generateChanges();
@@ -226,6 +230,33 @@ void WhitespaceManager::calculateLineBreakInformation() {
LastBlockComment = nullptr;
}
}
+
+ // Compute conditional nesting level
+ // Level is increased for each conditional, unless this conditional continues
+ // a chain of conditional, i.e. starts immediately after the colon of another
+ // conditional.
+ SmallVector<bool, 16> ScopeStack;
+ int ConditionalsLevel = 0;
+ for (auto &Change : Changes) {
+ for (unsigned i = 0, e = Change.Tok->FakeLParens.size(); i != e; ++i) {
+ bool isNestedConditional =
+ Change.Tok->FakeLParens[e - 1 - i] == prec::Conditional &&
+ !(i == 0 && Change.Tok->Previous &&
+ Change.Tok->Previous->is(TT_ConditionalExpr) &&
+ Change.Tok->Previous->is(tok::colon));
+ if (isNestedConditional)
+ ++ConditionalsLevel;
+ ScopeStack.push_back(isNestedConditional);
+ }
+
+ Change.ConditionalsLevel = ConditionalsLevel;
+
+ for (unsigned i = Change.Tok->FakeRParens; i > 0 && ScopeStack.size();
+ --i) {
+ if (ScopeStack.pop_back_val())
+ --ConditionalsLevel;
+ }
+ }
}
// Align a single sequence of tokens, see AlignTokens below.
@@ -247,6 +278,7 @@ AlignTokenSequence(unsigned Start, unsigned End, unsigned Column, F &&Matches,
// double z);
// In the above example, we need to take special care to ensure that
// 'double z' is indented along with it's owning function 'b'.
+ // Special handling is required for 'nested' ternary operators.
SmallVector<unsigned, 16> ScopeStack;
for (unsigned i = Start; i != End; ++i) {
@@ -287,7 +319,10 @@ AlignTokenSequence(unsigned Start, unsigned End, unsigned Column, F &&Matches,
unsigned ScopeStart = ScopeStack.back();
if (Changes[ScopeStart - 1].Tok->is(TT_FunctionDeclarationName) ||
(ScopeStart > Start + 1 &&
- Changes[ScopeStart - 2].Tok->is(TT_FunctionDeclarationName)))
+ Changes[ScopeStart - 2].Tok->is(TT_FunctionDeclarationName)) ||
+ Changes[i].Tok->is(TT_ConditionalExpr) ||
+ (Changes[i].Tok->Previous &&
+ Changes[i].Tok->Previous->is(TT_ConditionalExpr)))
Changes[i].Spaces += Shift;
}
@@ -340,7 +375,7 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
// abort when we hit any token in a higher scope than the starting one.
auto IndentAndNestingLevel = StartAt < Changes.size()
? Changes[StartAt].indentAndNestingLevel()
- : std::pair<unsigned, unsigned>(0, 0);
+ : std::tuple<unsigned, unsigned, unsigned>();
// Keep track of the number of commas before the matching tokens, we will only
// align a sequence of matching tokens if they are preceded by the same number
@@ -376,9 +411,11 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
if (Changes[i].NewlinesBefore != 0) {
CommasBeforeMatch = 0;
EndOfSequence = i;
- // If there is a blank line, or if the last line didn't contain any
- // matching token, the sequence ends here.
- if (Changes[i].NewlinesBefore > 1 || !FoundMatchOnLine)
+ // If there is a blank line, there is a forced-align-break (eg,
+ // preprocessor), or if the last line didn't contain any matching token,
+ // the sequence ends here.
+ if (Changes[i].NewlinesBefore > 1 ||
+ Changes[i].Tok->MustBreakAlignBefore || !FoundMatchOnLine)
AlignCurrentSequence();
FoundMatchOnLine = false;
@@ -408,9 +445,17 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
StartOfSequence = i;
unsigned ChangeMinColumn = Changes[i].StartOfTokenColumn;
- int LineLengthAfter = -Changes[i].Spaces;
- for (unsigned j = i; j != e && Changes[j].NewlinesBefore == 0; ++j)
- LineLengthAfter += Changes[j].Spaces + Changes[j].TokenLength;
+ int LineLengthAfter = Changes[i].TokenLength;
+ for (unsigned j = i + 1; j != e && Changes[j].NewlinesBefore == 0; ++j) {
+ LineLengthAfter += Changes[j].Spaces;
+ // Changes are generally 1:1 with the tokens, but a change could also be
+ // inside of a token, in which case it's counted more than once: once for
+ // the whitespace surrounding the token (!IsInsideToken) and once for
+ // each whitespace change within it (IsInsideToken).
+ // Therefore, changes inside of a token should only count the space.
+ if (!Changes[j].IsInsideToken)
+ LineLengthAfter += Changes[j].TokenLength;
+ }
unsigned ChangeMaxColumn = Style.ColumnLimit - LineLengthAfter;
// If we are restricted by the maximum column width, end the sequence.
@@ -573,6 +618,26 @@ void WhitespaceManager::alignConsecutiveAssignments() {
Changes, /*StartAt=*/0);
}
+void WhitespaceManager::alignConsecutiveBitFields() {
+ if (!Style.AlignConsecutiveBitFields)
+ return;
+
+ AlignTokens(
+ Style,
+ [&](Change const &C) {
+ // Do not align on ':' that is first on a line.
+ if (C.NewlinesBefore > 0)
+ return false;
+
+ // Do not align on ':' that is last on a line.
+ if (&C != &Changes.back() && (&C + 1)->NewlinesBefore > 0)
+ return false;
+
+ return C.Tok->is(TT_BitFieldColon);
+ },
+ Changes, /*StartAt=*/0);
+}
+
void WhitespaceManager::alignConsecutiveDeclarations() {
if (!Style.AlignConsecutiveDeclarations)
return;
@@ -607,6 +672,50 @@ void WhitespaceManager::alignConsecutiveDeclarations() {
Changes, /*StartAt=*/0);
}
+void WhitespaceManager::alignChainedConditionals() {
+ if (Style.BreakBeforeTernaryOperators) {
+ AlignTokens(
+ Style,
+ [](Change const &C) {
+ // Align question operators and last colon
+ return C.Tok->is(TT_ConditionalExpr) &&
+ ((C.Tok->is(tok::question) && !C.NewlinesBefore) ||
+ (C.Tok->is(tok::colon) && C.Tok->Next &&
+ (C.Tok->Next->FakeLParens.size() == 0 ||
+ C.Tok->Next->FakeLParens.back() != prec::Conditional)));
+ },
+ Changes, /*StartAt=*/0);
+ } else {
+ static auto AlignWrappedOperand = [](Change const &C) {
+ auto Previous = C.Tok->getPreviousNonComment(); // Previous;
+ return C.NewlinesBefore && Previous && Previous->is(TT_ConditionalExpr) &&
+ (Previous->is(tok::question) ||
+ (Previous->is(tok::colon) &&
+ (C.Tok->FakeLParens.size() == 0 ||
+ C.Tok->FakeLParens.back() != prec::Conditional)));
+ };
+ // Ensure we keep alignment of wrapped operands with non-wrapped operands
+ // Since we actually align the operators, the wrapped operands need the
+ // extra offset to be properly aligned.
+ for (Change &C : Changes) {
+ if (AlignWrappedOperand(C))
+ C.StartOfTokenColumn -= 2;
+ }
+ AlignTokens(
+ Style,
+ [this](Change const &C) {
+ // Align question operators if next operand is not wrapped, as
+ // well as wrapped operands after question operator or last
+ // colon in conditional sequence
+ return (C.Tok->is(TT_ConditionalExpr) && C.Tok->is(tok::question) &&
+ &C != &Changes.back() && (&C + 1)->NewlinesBefore == 0 &&
+ !(&C + 1)->IsTrailingComment) ||
+ AlignWrappedOperand(C);
+ },
+ Changes, /*StartAt=*/0);
+ }
+}
+
void WhitespaceManager::alignTrailingComments() {
unsigned MinColumn = 0;
unsigned MaxColumn = UINT_MAX;
@@ -617,6 +726,8 @@ void WhitespaceManager::alignTrailingComments() {
if (Changes[i].StartOfBlockComment)
continue;
Newlines += Changes[i].NewlinesBefore;
+ if (Changes[i].Tok->MustBreakAlignBefore)
+ BreakBeforeNext = true;
if (!Changes[i].IsTrailingComment)
continue;
@@ -761,9 +872,9 @@ void WhitespaceManager::generateChanges() {
C.EscapedNewlineColumn);
else
appendNewlineText(ReplacementText, C.NewlinesBefore);
- appendIndentText(ReplacementText, C.Tok->IndentLevel,
- std::max(0, C.Spaces),
- C.StartOfTokenColumn - std::max(0, C.Spaces));
+ appendIndentText(
+ ReplacementText, C.Tok->IndentLevel, std::max(0, C.Spaces),
+ C.StartOfTokenColumn - std::max(0, C.Spaces), C.IsAligned);
ReplacementText.append(C.CurrentLinePrefix);
storeReplacement(C.OriginalWhitespaceRange, ReplacementText);
}
@@ -809,7 +920,8 @@ void WhitespaceManager::appendEscapedNewlineText(
void WhitespaceManager::appendIndentText(std::string &Text,
unsigned IndentLevel, unsigned Spaces,
- unsigned WhitespaceStartColumn) {
+ unsigned WhitespaceStartColumn,
+ bool IsAligned) {
switch (Style.UseTab) {
case FormatStyle::UT_Never:
Text.append(Spaces, ' ');
@@ -838,28 +950,39 @@ void WhitespaceManager::appendIndentText(std::string &Text,
case FormatStyle::UT_ForIndentation:
if (WhitespaceStartColumn == 0) {
unsigned Indentation = IndentLevel * Style.IndentWidth;
- // This happens, e.g. when a line in a block comment is indented less than
- // the first one.
- if (Indentation > Spaces)
- Indentation = Spaces;
- if (Style.TabWidth) {
- unsigned Tabs = Indentation / Style.TabWidth;
- Text.append(Tabs, '\t');
- Spaces -= Tabs * Style.TabWidth;
- }
+ Spaces = appendTabIndent(Text, Spaces, Indentation);
}
Text.append(Spaces, ' ');
break;
case FormatStyle::UT_ForContinuationAndIndentation:
- if (WhitespaceStartColumn == 0 && Style.TabWidth) {
- unsigned Tabs = Spaces / Style.TabWidth;
- Text.append(Tabs, '\t');
- Spaces -= Tabs * Style.TabWidth;
+ if (WhitespaceStartColumn == 0)
+ Spaces = appendTabIndent(Text, Spaces, Spaces);
+ Text.append(Spaces, ' ');
+ break;
+ case FormatStyle::UT_AlignWithSpaces:
+ if (WhitespaceStartColumn == 0) {
+ unsigned Indentation =
+ IsAligned ? IndentLevel * Style.IndentWidth : Spaces;
+ Spaces = appendTabIndent(Text, Spaces, Indentation);
}
Text.append(Spaces, ' ');
break;
}
}
+unsigned WhitespaceManager::appendTabIndent(std::string &Text, unsigned Spaces,
+ unsigned Indentation) {
+ // This happens, e.g. when a line in a block comment is indented less than the
+ // first one.
+ if (Indentation > Spaces)
+ Indentation = Spaces;
+ if (Style.TabWidth) {
+ unsigned Tabs = Indentation / Style.TabWidth;
+ Text.append(Tabs, '\t');
+ Spaces -= Tabs * Style.TabWidth;
+ }
+ return Spaces;
+}
+
} // namespace format
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
index f47bf40204b3..1398a3aee2b8 100644
--- a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
+++ b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
@@ -19,6 +19,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include <string>
+#include <tuple>
namespace clang {
namespace format {
@@ -49,7 +50,7 @@ public:
/// this replacement. It is needed for determining how \p Spaces is turned
/// into tabs and spaces for some format styles.
void replaceWhitespace(FormatToken &Tok, unsigned Newlines, unsigned Spaces,
- unsigned StartOfTokenColumn,
+ unsigned StartOfTokenColumn, bool isAligned = false,
bool InPPDirective = false);
/// Adds information about an unchangeable token's whitespace.
@@ -109,7 +110,7 @@ public:
SourceRange OriginalWhitespaceRange, int Spaces,
unsigned StartOfTokenColumn, unsigned NewlinesBefore,
StringRef PreviousLinePostfix, StringRef CurrentLinePrefix,
- bool ContinuesPPDirective, bool IsInsideToken);
+ bool IsAligned, bool ContinuesPPDirective, bool IsInsideToken);
// The kind of the token whose whitespace this change replaces, or in which
// this change inserts whitespace.
@@ -125,6 +126,7 @@ public:
unsigned NewlinesBefore;
std::string PreviousLinePostfix;
std::string CurrentLinePrefix;
+ bool IsAligned;
bool ContinuesPPDirective;
// The number of spaces in front of the token or broken part of the token.
@@ -157,11 +159,16 @@ public:
const Change *StartOfBlockComment;
int IndentationOffset;
- // A combination of indent level and nesting level, which are used in
- // tandem to compute lexical scope, for the purposes of deciding
+ // Depth of conditionals. Computed from tracking fake parenthesis, except
+ // it does not increase the indent for "chained" conditionals.
+ int ConditionalsLevel;
+
+ // A combination of indent, nesting and conditionals levels, which are used
+ // in tandem to compute lexical scope, for the purposes of deciding
// when to stop consecutive alignment runs.
- std::pair<unsigned, unsigned> indentAndNestingLevel() const {
- return std::make_pair(Tok->IndentLevel, Tok->NestingLevel);
+ std::tuple<unsigned, unsigned, unsigned> indentAndNestingLevel() const {
+ return std::make_tuple(Tok->IndentLevel, Tok->NestingLevel,
+ ConditionalsLevel);
}
};
@@ -177,9 +184,15 @@ private:
/// Align consecutive assignments over all \c Changes.
void alignConsecutiveAssignments();
+ /// Align consecutive bitfields over all \c Changes.
+ void alignConsecutiveBitFields();
+
/// Align consecutive declarations over all \c Changes.
void alignConsecutiveDeclarations();
+ /// Align consecutive declarations over all \c Changes.
+ void alignChainedConditionals();
+
/// Align trailing comments over all \c Changes.
void alignTrailingComments();
@@ -204,7 +217,10 @@ private:
unsigned PreviousEndOfTokenColumn,
unsigned EscapedNewlineColumn);
void appendIndentText(std::string &Text, unsigned IndentLevel,
- unsigned Spaces, unsigned WhitespaceStartColumn);
+ unsigned Spaces, unsigned WhitespaceStartColumn,
+ bool IsAligned);
+ unsigned appendTabIndent(std::string &Text, unsigned Spaces,
+ unsigned Indentation);
SmallVector<Change, 16> Changes;
const SourceManager &SourceMgr;
diff --git a/contrib/llvm-project/clang/lib/Frontend/ASTConsumers.cpp b/contrib/llvm-project/clang/lib/Frontend/ASTConsumers.cpp
index 043b2541b8f8..a73cc8876d5d 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ASTConsumers.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ASTConsumers.cpp
@@ -36,10 +36,10 @@ namespace {
enum Kind { DumpFull, Dump, Print, None };
ASTPrinter(std::unique_ptr<raw_ostream> Out, Kind K,
ASTDumpOutputFormat Format, StringRef FilterString,
- bool DumpLookups = false)
+ bool DumpLookups = false, bool DumpDeclTypes = false)
: Out(Out ? *Out : llvm::outs()), OwnedOut(std::move(Out)),
OutputKind(K), OutputFormat(Format), FilterString(FilterString),
- DumpLookups(DumpLookups) {}
+ DumpLookups(DumpLookups), DumpDeclTypes(DumpDeclTypes) {}
void HandleTranslationUnit(ASTContext &Context) override {
TranslationUnitDecl *D = Context.getTranslationUnitDecl();
@@ -91,8 +91,22 @@ namespace {
} else if (OutputKind == Print) {
PrintingPolicy Policy(D->getASTContext().getLangOpts());
D->print(Out, Policy, /*Indentation=*/0, /*PrintInstantiation=*/true);
- } else if (OutputKind != None)
+ } else if (OutputKind != None) {
D->dump(Out, OutputKind == DumpFull, OutputFormat);
+ }
+
+ if (DumpDeclTypes) {
+ Decl *InnerD = D;
+ if (auto *TD = dyn_cast<TemplateDecl>(D))
+ InnerD = TD->getTemplatedDecl();
+
+ // FIXME: Support OutputFormat in type dumping.
+ // FIXME: Support combining -ast-dump-decl-types with -ast-dump-lookups.
+ if (auto *VD = dyn_cast<ValueDecl>(InnerD))
+ VD->getType().dump(Out, VD->getASTContext());
+ if (auto *TD = dyn_cast<TypeDecl>(InnerD))
+ TD->getTypeForDecl()->dump(Out, TD->getASTContext());
+ }
}
raw_ostream &Out;
@@ -111,6 +125,9 @@ namespace {
/// results will be output with a format determined by OutputKind. This is
/// incompatible with OutputKind == Print.
bool DumpLookups;
+
+ /// Whether to dump the type for each declaration dumped.
+ bool DumpDeclTypes;
};
class ASTDeclNodeLister : public ASTConsumer,
@@ -146,13 +163,13 @@ clang::CreateASTPrinter(std::unique_ptr<raw_ostream> Out,
std::unique_ptr<ASTConsumer>
clang::CreateASTDumper(std::unique_ptr<raw_ostream> Out, StringRef FilterString,
bool DumpDecls, bool Deserialize, bool DumpLookups,
- ASTDumpOutputFormat Format) {
+ bool DumpDeclTypes, ASTDumpOutputFormat Format) {
assert((DumpDecls || Deserialize || DumpLookups) && "nothing to dump");
- return std::make_unique<ASTPrinter>(std::move(Out),
- Deserialize ? ASTPrinter::DumpFull :
- DumpDecls ? ASTPrinter::Dump :
- ASTPrinter::None, Format,
- FilterString, DumpLookups);
+ return std::make_unique<ASTPrinter>(
+ std::move(Out),
+ Deserialize ? ASTPrinter::DumpFull
+ : DumpDecls ? ASTPrinter::Dump : ASTPrinter::None,
+ Format, FilterString, DumpLookups, DumpDeclTypes);
}
std::unique_ptr<ASTConsumer> clang::CreateASTDeclNodeLister() {
diff --git a/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp b/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
index b3264952ff47..57d025b7c32e 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
@@ -224,7 +224,7 @@ struct ASTUnit::ASTWriterData {
};
void ASTUnit::clearFileLevelDecls() {
- llvm::DeleteContainerSeconds(FileDecls);
+ FileDecls.clear();
}
/// After failing to build a precompiled preamble (due to
@@ -784,7 +784,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
UserFilesAreVolatile);
AST->ModuleCache = new InMemoryModuleCache;
AST->HSOpts = std::make_shared<HeaderSearchOptions>();
- AST->HSOpts->ModuleFormat = PCHContainerRdr.getFormat();
+ AST->HSOpts->ModuleFormat = std::string(PCHContainerRdr.getFormat());
AST->HeaderInfo.reset(new HeaderSearch(AST->HSOpts,
AST->getSourceManager(),
AST->getDiagnostics(),
@@ -847,7 +847,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
return nullptr;
}
- AST->OriginalSourceFile = AST->Reader->getOriginalSourceFile();
+ AST->OriginalSourceFile = std::string(AST->Reader->getOriginalSourceFile());
PP.setCounterValue(Counter);
@@ -1131,7 +1131,8 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
CICleanup(Clang.get());
Clang->setInvocation(CCInvocation);
- OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
+ OriginalSourceFile =
+ std::string(Clang->getFrontendOpts().Inputs[0].getFile());
// Set up diagnostics, capturing any diagnostics that would
// otherwise be dropped.
@@ -1260,13 +1261,13 @@ makeStandaloneDiagnostic(const LangOptions &LangOpts,
ASTUnit::StandaloneDiagnostic OutDiag;
OutDiag.ID = InDiag.getID();
OutDiag.Level = InDiag.getLevel();
- OutDiag.Message = InDiag.getMessage();
+ OutDiag.Message = std::string(InDiag.getMessage());
OutDiag.LocOffset = 0;
if (InDiag.getLocation().isInvalid())
return OutDiag;
const SourceManager &SM = InDiag.getLocation().getManager();
SourceLocation FileLoc = SM.getFileLoc(InDiag.getLocation());
- OutDiag.Filename = SM.getFilename(FileLoc);
+ OutDiag.Filename = std::string(SM.getFilename(FileLoc));
if (OutDiag.Filename.empty())
return OutDiag;
OutDiag.LocOffset = SM.getFileOffset(FileLoc);
@@ -1532,7 +1533,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
if (!ResourceFilesPath.empty()) {
// Override the resources path.
- CI->getHeaderSearchOpts().ResourceDir = ResourceFilesPath;
+ CI->getHeaderSearchOpts().ResourceDir = std::string(ResourceFilesPath);
}
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
@@ -1564,7 +1565,8 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
CICleanup(Clang.get());
Clang->setInvocation(std::move(CI));
- AST->OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
+ AST->OriginalSourceFile =
+ std::string(Clang->getFrontendOpts().Inputs[0].getFile());
// Set up diagnostics, capturing any diagnostics that would
// otherwise be dropped.
@@ -1767,13 +1769,14 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
PPOpts.RetainExcludedConditionalBlocks = RetainExcludedConditionalBlocks;
// Override the resources path.
- CI->getHeaderSearchOpts().ResourceDir = ResourceFilesPath;
+ CI->getHeaderSearchOpts().ResourceDir = std::string(ResourceFilesPath);
CI->getFrontendOpts().SkipFunctionBodies =
SkipFunctionBodies == SkipFunctionBodiesScope::PreambleAndMainFile;
if (ModuleFormat)
- CI->getHeaderSearchOpts().ModuleFormat = ModuleFormat.getValue();
+ CI->getHeaderSearchOpts().ModuleFormat =
+ std::string(ModuleFormat.getValue());
// Create the AST unit.
std::unique_ptr<ASTUnit> AST;
@@ -2165,7 +2168,7 @@ void ASTUnit::CodeComplete(
assert(IncludeBriefComments == this->IncludeBriefCommentsInCodeCompletion);
- FrontendOpts.CodeCompletionAt.FileName = File;
+ FrontendOpts.CodeCompletionAt.FileName = std::string(File);
FrontendOpts.CodeCompletionAt.Line = Line;
FrontendOpts.CodeCompletionAt.Column = Column;
@@ -2185,7 +2188,8 @@ void ASTUnit::CodeComplete(
auto &Inv = *CCInvocation;
Clang->setInvocation(std::move(CCInvocation));
- OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
+ OriginalSourceFile =
+ std::string(Clang->getFrontendOpts().Inputs[0].getFile());
// Set up diagnostics, capturing any diagnostics produced.
Clang->setDiagnostics(&Diag);
@@ -2432,9 +2436,9 @@ void ASTUnit::addFileLevelDecl(Decl *D) {
if (FID.isInvalid())
return;
- LocDeclsTy *&Decls = FileDecls[FID];
+ std::unique_ptr<LocDeclsTy> &Decls = FileDecls[FID];
if (!Decls)
- Decls = new LocDeclsTy();
+ Decls = std::make_unique<LocDeclsTy>();
std::pair<unsigned, Decl *> LocDecl(Offset, D);
diff --git a/contrib/llvm-project/clang/lib/Frontend/ChainedIncludesSource.cpp b/contrib/llvm-project/clang/lib/Frontend/ChainedIncludesSource.cpp
index dec281529b9e..1486adf70c3f 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ChainedIncludesSource.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ChainedIncludesSource.cpp
@@ -189,7 +189,7 @@ IntrusiveRefCntPtr<ExternalSemaSource> clang::createChainedIncludesSource(
Clang->getASTConsumer().GetASTDeserializationListener());
if (!Reader)
return nullptr;
- Clang->setModuleManager(Reader);
+ Clang->setASTReader(Reader);
Clang->getASTContext().setExternalSource(Reader);
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp b/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
index 688f21dd0908..4613ed8d7f61 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
@@ -97,6 +97,10 @@ void CompilerInstance::setVerboseOutputStream(std::unique_ptr<raw_ostream> Value
void CompilerInstance::setTarget(TargetInfo *Value) { Target = Value; }
void CompilerInstance::setAuxTarget(TargetInfo *Value) { AuxTarget = Value; }
+llvm::vfs::FileSystem &CompilerInstance::getVirtualFileSystem() const {
+ return getFileManager().getVirtualFileSystem();
+}
+
void CompilerInstance::setFileManager(FileManager *Value) {
FileMgr = Value;
}
@@ -138,7 +142,7 @@ std::unique_ptr<Sema> CompilerInstance::takeSema() {
IntrusiveRefCntPtr<ASTReader> CompilerInstance::getASTReader() const {
return TheASTReader;
}
-void CompilerInstance::setModuleManager(IntrusiveRefCntPtr<ASTReader> Reader) {
+void CompilerInstance::setASTReader(IntrusiveRefCntPtr<ASTReader> Reader) {
assert(ModuleCache.get() == &Reader->getModuleManager().getModuleCache() &&
"Expected ASTReader to use the same PCM cache");
TheASTReader = std::move(Reader);
@@ -379,7 +383,7 @@ static void InitializeFileRemapping(DiagnosticsEngine &Diags,
void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
const PreprocessorOptions &PPOpts = getPreprocessorOpts();
- // The module manager holds a reference to the old preprocessor (if any).
+ // The AST reader holds a reference to the old preprocessor (if any).
TheASTReader.reset();
// Create the Preprocessor.
@@ -474,7 +478,7 @@ std::string CompilerInstance::getSpecificModuleCachePath() {
if (!SpecificModuleCache.empty() && !getHeaderSearchOpts().DisableModuleHash)
llvm::sys::path::append(SpecificModuleCache,
getInvocation().getModuleHash());
- return SpecificModuleCache.str();
+ return std::string(SpecificModuleCache.str());
}
// ASTContext
@@ -713,13 +717,13 @@ std::unique_ptr<llvm::raw_pwrite_stream> CompilerInstance::createOutputFile(
std::string OutFile, TempFile;
if (!OutputPath.empty()) {
- OutFile = OutputPath;
+ OutFile = std::string(OutputPath);
} else if (InFile == "-") {
OutFile = "-";
} else if (!Extension.empty()) {
SmallString<128> Path(InFile);
llvm::sys::path::replace_extension(Path, Extension);
- OutFile = Path.str();
+ OutFile = std::string(Path.str());
} else {
OutFile = "-";
}
@@ -774,7 +778,7 @@ std::unique_ptr<llvm::raw_pwrite_stream> CompilerInstance::createOutputFile(
if (!EC) {
OS.reset(new llvm::raw_fd_ostream(fd, /*shouldClose=*/true));
- OSFile = TempFile = TempPath.str();
+ OSFile = TempFile = std::string(TempPath.str());
}
// If we failed to create the temporary, fallback to writing to the file
// directly. This handles the corner case where we cannot write to the
@@ -811,17 +815,15 @@ std::unique_ptr<llvm::raw_pwrite_stream> CompilerInstance::createOutputFile(
// Initialization Utilities
bool CompilerInstance::InitializeSourceManager(const FrontendInputFile &Input){
- return InitializeSourceManager(
- Input, getDiagnostics(), getFileManager(), getSourceManager(),
- hasPreprocessor() ? &getPreprocessor().getHeaderSearchInfo() : nullptr,
- getDependencyOutputOpts(), getFrontendOpts());
+ return InitializeSourceManager(Input, getDiagnostics(), getFileManager(),
+ getSourceManager());
}
// static
-bool CompilerInstance::InitializeSourceManager(
- const FrontendInputFile &Input, DiagnosticsEngine &Diags,
- FileManager &FileMgr, SourceManager &SourceMgr, HeaderSearch *HS,
- DependencyOutputOptions &DepOpts, const FrontendOptions &Opts) {
+bool CompilerInstance::InitializeSourceManager(const FrontendInputFile &Input,
+ DiagnosticsEngine &Diags,
+ FileManager &FileMgr,
+ SourceManager &SourceMgr) {
SrcMgr::CharacteristicKind Kind =
Input.getKind().getFormat() == InputKind::ModuleMap
? Input.isSystem() ? SrcMgr::C_System_ModuleMap
@@ -923,10 +925,27 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
!getFrontendOpts().AuxTriple.empty()) {
auto TO = std::make_shared<TargetOptions>();
TO->Triple = llvm::Triple::normalize(getFrontendOpts().AuxTriple);
+ if (getFrontendOpts().AuxTargetCPU)
+ TO->CPU = getFrontendOpts().AuxTargetCPU.getValue();
+ if (getFrontendOpts().AuxTargetFeatures)
+ TO->FeaturesAsWritten = getFrontendOpts().AuxTargetFeatures.getValue();
TO->HostTriple = getTarget().getTriple().str();
setAuxTarget(TargetInfo::CreateTargetInfo(getDiagnostics(), TO));
}
+ if (!getTarget().hasStrictFP() && !getLangOpts().ExpStrictFP) {
+ if (getLangOpts().getFPRoundingMode() !=
+ llvm::RoundingMode::NearestTiesToEven) {
+ getDiagnostics().Report(diag::warn_fe_backend_unsupported_fp_rounding);
+ getLangOpts().setFPRoundingMode(llvm::RoundingMode::NearestTiesToEven);
+ }
+ if (getLangOpts().getFPExceptionMode() != LangOptions::FPE_Ignore) {
+ getDiagnostics().Report(diag::warn_fe_backend_unsupported_fp_exceptions);
+ getLangOpts().setFPExceptionMode(LangOptions::FPE_Ignore);
+ }
+ // FIXME: can we disable FEnvAccess?
+ }
+
// Inform the target of the language options.
//
// FIXME: We shouldn't need to do this, the target should be immutable once
@@ -1073,7 +1092,7 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
ImportingInstance.getInvocation().getLangOpts()->ModuleName;
// Note the name of the module we're building.
- Invocation->getLangOpts()->CurrentModule = ModuleName;
+ Invocation->getLangOpts()->CurrentModule = std::string(ModuleName);
// Make sure that the failed-module structure has been allocated in
// the importing instance, and propagate the pointer to the newly-created
@@ -1093,7 +1112,7 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
FrontendOpts.DisableFree = false;
FrontendOpts.GenerateGlobalModuleIndex = false;
FrontendOpts.BuildingImplicitModule = true;
- FrontendOpts.OriginalModuleMap = OriginalModuleMapFile;
+ FrontendOpts.OriginalModuleMap = std::string(OriginalModuleMapFile);
// Force implicitly-built modules to hash the content of the module file.
HSOpts.ModulesHashContent = true;
FrontendOpts.Inputs = {Input};
@@ -1568,7 +1587,7 @@ bool CompilerInstance::loadModuleFile(StringRef FileName) {
Stack.push_back(M);
while (!Stack.empty()) {
Module *Current = Stack.pop_back_val();
- if (Current->IsMissingRequirement) continue;
+ if (Current->IsUnimportable) continue;
Current->IsAvailable = true;
Stack.insert(Stack.end(),
Current->submodule_begin(), Current->submodule_end());
@@ -1630,10 +1649,10 @@ enum ModuleSource {
/// Select a source for loading the named module and compute the filename to
/// load it from.
-static ModuleSource
-selectModuleSource(Module *M, StringRef ModuleName, std::string &ModuleFilename,
- const std::map<std::string, std::string> &BuiltModules,
- HeaderSearch &HS) {
+static ModuleSource selectModuleSource(
+ Module *M, StringRef ModuleName, std::string &ModuleFilename,
+ const std::map<std::string, std::string, std::less<>> &BuiltModules,
+ HeaderSearch &HS) {
assert(ModuleFilename.empty() && "Already has a module source?");
// Check to see if the module has been built as part of this compilation
@@ -2077,7 +2096,7 @@ void CompilerInstance::createModuleFromSource(SourceLocation ImportLoc,
// Build the module, inheriting any modules that we've built locally.
if (compileModuleImpl(*this, ImportLoc, ModuleName, Input, StringRef(),
ModuleFileName, PreBuildStep, PostBuildStep)) {
- BuiltModules[ModuleName] = ModuleFileName.str();
+ BuiltModules[std::string(ModuleName)] = std::string(ModuleFileName.str());
llvm::sys::RemoveFileOnSignal(ModuleFileName);
}
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
index 11e56f2331b4..75d7cf5d26d3 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
@@ -41,11 +41,13 @@
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Sema/CodeCompleteOptions.h"
+#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ModuleFileExtension.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/CachedHashString.h"
+#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
@@ -116,6 +118,62 @@ CompilerInvocationBase::CompilerInvocationBase(const CompilerInvocationBase &X)
CompilerInvocationBase::~CompilerInvocationBase() = default;
//===----------------------------------------------------------------------===//
+// Normalizers
+//===----------------------------------------------------------------------===//
+
+#define SIMPLE_ENUM_VALUE_TABLE
+#include "clang/Driver/Options.inc"
+#undef SIMPLE_ENUM_VALUE_TABLE
+
+static llvm::Optional<unsigned> normalizeSimpleEnum(OptSpecifier Opt,
+ unsigned TableIndex,
+ const ArgList &Args,
+ DiagnosticsEngine &Diags) {
+ assert(TableIndex < SimpleEnumValueTablesSize);
+ const SimpleEnumValueTable &Table = SimpleEnumValueTables[TableIndex];
+
+ auto *Arg = Args.getLastArg(Opt);
+ if (!Arg)
+ return None;
+
+ StringRef ArgValue = Arg->getValue();
+ for (int I = 0, E = Table.Size; I != E; ++I)
+ if (ArgValue == Table.Table[I].Name)
+ return Table.Table[I].Value;
+
+ Diags.Report(diag::err_drv_invalid_value)
+ << Arg->getAsString(Args) << ArgValue;
+ return None;
+}
+
+static const char *denormalizeSimpleEnum(CompilerInvocation::StringAllocator SA,
+ unsigned TableIndex, unsigned Value) {
+ assert(TableIndex < SimpleEnumValueTablesSize);
+ const SimpleEnumValueTable &Table = SimpleEnumValueTables[TableIndex];
+ for (int I = 0, E = Table.Size; I != E; ++I)
+ if (Value == Table.Table[I].Value)
+ return Table.Table[I].Name;
+
+ llvm_unreachable("The simple enum value was not correctly defined in "
+ "the tablegen option description");
+}
+
+static const char *denormalizeString(CompilerInvocation::StringAllocator SA,
+ unsigned TableIndex,
+ const std::string &Value) {
+ return SA(Value);
+}
+
+static Optional<std::string> normalizeTriple(OptSpecifier Opt, int TableIndex,
+ const ArgList &Args,
+ DiagnosticsEngine &Diags) {
+ auto *Arg = Args.getLastArg(Opt);
+ if (!Arg)
+ return None;
+ return llvm::Triple::normalize(Arg->getValue());
+}
+
+//===----------------------------------------------------------------------===//
// Deserialization (from args)
//===----------------------------------------------------------------------===//
@@ -135,7 +193,7 @@ static unsigned getOptimizationLevel(ArgList &Args, InputKind IK,
assert(A->getOption().matches(options::OPT_O));
StringRef S(A->getValue());
- if (S == "s" || S == "z" || S.empty())
+ if (S == "s" || S == "z")
return llvm::CodeGenOpt::Default;
if (S == "g")
@@ -170,10 +228,12 @@ static void addDiagnosticArgs(ArgList &Args, OptSpecifier Group,
if (A->getOption().getKind() == Option::FlagClass) {
// The argument is a pure flag (such as OPT_Wall or OPT_Wdeprecated). Add
// its name (minus the "W" or "R" at the beginning) to the warning list.
- Diagnostics.push_back(A->getOption().getName().drop_front(1));
+ Diagnostics.push_back(
+ std::string(A->getOption().getName().drop_front(1)));
} else if (A->getOption().matches(GroupWithValue)) {
// This is -Wfoo= or -Rfoo=, where foo is the name of the diagnostic group.
- Diagnostics.push_back(A->getOption().getName().drop_front(1).rtrim("=-"));
+ Diagnostics.push_back(
+ std::string(A->getOption().getName().drop_front(1).rtrim("=-")));
} else {
// Otherwise, add its value (for OPT_W_Joined and similar).
for (const auto *Arg : A->getValues())
@@ -307,14 +367,16 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
Opts.visualizeExplodedGraphWithGraphViz =
Args.hasArg(OPT_analyzer_viz_egraph_graphviz);
- Opts.DumpExplodedGraphTo = Args.getLastArgValue(OPT_analyzer_dump_egraph);
+ Opts.DumpExplodedGraphTo =
+ std::string(Args.getLastArgValue(OPT_analyzer_dump_egraph));
Opts.NoRetryExhausted = Args.hasArg(OPT_analyzer_disable_retry_exhausted);
Opts.AnalyzerWerror = Args.hasArg(OPT_analyzer_werror);
Opts.AnalyzeAll = Args.hasArg(OPT_analyzer_opt_analyze_headers);
Opts.AnalyzerDisplayProgress = Args.hasArg(OPT_analyzer_display_progress);
Opts.AnalyzeNestedBlocks =
Args.hasArg(OPT_analyzer_opt_analyze_nested_blocks);
- Opts.AnalyzeSpecificFunction = Args.getLastArgValue(OPT_analyze_function);
+ Opts.AnalyzeSpecificFunction =
+ std::string(Args.getLastArgValue(OPT_analyze_function));
Opts.UnoptimizedCFG = Args.hasArg(OPT_analysis_UnoptimizedCFG);
Opts.TrimGraph = Args.hasArg(OPT_trim_egraph);
Opts.maxBlockVisitOnPath =
@@ -335,7 +397,8 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
SmallVector<StringRef, 16> CheckersAndPackages;
CheckerAndPackageList.split(CheckersAndPackages, ",");
for (const StringRef &CheckerOrPackage : CheckersAndPackages)
- Opts.CheckersAndPackages.emplace_back(CheckerOrPackage, IsEnabled);
+ Opts.CheckersAndPackages.emplace_back(std::string(CheckerOrPackage),
+ IsEnabled);
}
// Go through the analyzer configuration options.
@@ -372,7 +435,7 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
}
A->claim();
- Opts.Config[key] = val;
+ Opts.Config[key] = std::string(val);
}
}
@@ -394,7 +457,7 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
static StringRef getStringOption(AnalyzerOptions::ConfigTable &Config,
StringRef OptionName, StringRef DefaultVal) {
- return Config.insert({OptionName, DefaultVal}).first->second;
+ return Config.insert({OptionName, std::string(DefaultVal)}).first->second;
}
static void initOption(AnalyzerOptions::ConfigTable &Config,
@@ -521,36 +584,6 @@ static void ParseCommentArgs(CommentOptions &Opts, ArgList &Args) {
Opts.ParseAllComments = Args.hasArg(OPT_fparse_all_comments);
}
-static StringRef getCodeModel(ArgList &Args, DiagnosticsEngine &Diags) {
- if (Arg *A = Args.getLastArg(OPT_mcode_model)) {
- StringRef Value = A->getValue();
- if (Value == "small" || Value == "kernel" || Value == "medium" ||
- Value == "large" || Value == "tiny")
- return Value;
- Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Value;
- }
- return "default";
-}
-
-static llvm::Reloc::Model getRelocModel(ArgList &Args,
- DiagnosticsEngine &Diags) {
- if (Arg *A = Args.getLastArg(OPT_mrelocation_model)) {
- StringRef Value = A->getValue();
- auto RM = llvm::StringSwitch<llvm::Optional<llvm::Reloc::Model>>(Value)
- .Case("static", llvm::Reloc::Static)
- .Case("pic", llvm::Reloc::PIC_)
- .Case("ropi", llvm::Reloc::ROPI)
- .Case("rwpi", llvm::Reloc::RWPI)
- .Case("ropi-rwpi", llvm::Reloc::ROPI_RWPI)
- .Case("dynamic-no-pic", llvm::Reloc::DynamicNoPIC)
- .Default(None);
- if (RM.hasValue())
- return *RM;
- Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Value;
- }
- return llvm::Reloc::PIC_;
-}
-
/// Create a new Regex instance out of the string value in \p RpassArg.
/// It returns a pointer to the newly generated Regex instance.
static std::shared_ptr<llvm::Regex>
@@ -754,7 +787,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.setDebuggerTuning(static_cast<llvm::DebuggerKind>(Val));
}
Opts.DwarfVersion = getLastArgIntValue(Args, OPT_dwarf_version_EQ, 0, Diags);
- Opts.DebugColumnInfo = Args.hasArg(OPT_dwarf_column_info);
+ Opts.DebugColumnInfo = !Args.hasArg(OPT_gno_column_info);
Opts.EmitCodeView = Args.hasArg(OPT_gcodeview);
Opts.CodeViewGHash = Args.hasArg(OPT_gcodeview_ghash);
Opts.MacroDebugInfo = Args.hasArg(OPT_debug_info_macro);
@@ -762,19 +795,21 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.VirtualFunctionElimination =
Args.hasArg(OPT_fvirtual_function_elimination);
Opts.LTOVisibilityPublicStd = Args.hasArg(OPT_flto_visibility_public_std);
- Opts.SplitDwarfFile = Args.getLastArgValue(OPT_split_dwarf_file);
- Opts.SplitDwarfOutput = Args.getLastArgValue(OPT_split_dwarf_output);
+ Opts.SplitDwarfFile = std::string(Args.getLastArgValue(OPT_split_dwarf_file));
+ Opts.SplitDwarfOutput =
+ std::string(Args.getLastArgValue(OPT_split_dwarf_output));
Opts.SplitDwarfInlining = !Args.hasArg(OPT_fno_split_dwarf_inlining);
Opts.DebugTypeExtRefs = Args.hasArg(OPT_dwarf_ext_refs);
Opts.DebugExplicitImport = Args.hasArg(OPT_dwarf_explicit_import);
Opts.DebugFwdTemplateParams = Args.hasArg(OPT_debug_forward_template_params);
Opts.EmbedSource = Args.hasArg(OPT_gembed_source);
+ Opts.ForceDwarfFrameSection = Args.hasArg(OPT_fforce_dwarf_frame);
- Opts.ForceDwarfFrameSection =
- Args.hasFlag(OPT_fforce_dwarf_frame, OPT_fno_force_dwarf_frame, false);
-
- for (const auto &Arg : Args.getAllArgValues(OPT_fdebug_prefix_map_EQ))
- Opts.DebugPrefixMap.insert(StringRef(Arg).split('='));
+ for (const auto &Arg : Args.getAllArgValues(OPT_fdebug_prefix_map_EQ)) {
+ auto Split = StringRef(Arg).split('=');
+ Opts.DebugPrefixMap.insert(
+ {std::string(Split.first), std::string(Split.second)});
+ }
if (const Arg *A =
Args.getLastArg(OPT_emit_llvm_uselists, OPT_no_emit_llvm_uselists))
@@ -785,12 +820,13 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
const llvm::Triple::ArchType DebugEntryValueArchs[] = {
llvm::Triple::x86, llvm::Triple::x86_64, llvm::Triple::aarch64,
- llvm::Triple::arm, llvm::Triple::armeb};
+ llvm::Triple::arm, llvm::Triple::armeb, llvm::Triple::mips,
+ llvm::Triple::mipsel, llvm::Triple::mips64, llvm::Triple::mips64el};
llvm::Triple T(TargetOpts.Triple);
if (Opts.OptimizationLevel > 0 && Opts.hasReducedDebugInfo() &&
llvm::is_contained(DebugEntryValueArchs, T.getArch()))
- Opts.EnableDebugEntryValues = Args.hasArg(OPT_femit_debug_entry_values);
+ Opts.EmitCallSiteInfo = true;
Opts.DisableO0ImplyOptNone = Args.hasArg(OPT_disable_O0_optnone);
Opts.DisableRedZone = Args.hasArg(OPT_disable_red_zone);
@@ -805,10 +841,12 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.FineGrainedBitfieldAccesses =
Args.hasFlag(OPT_ffine_grained_bitfield_accesses,
OPT_fno_fine_grained_bitfield_accesses, false);
- Opts.DwarfDebugFlags = Args.getLastArgValue(OPT_dwarf_debug_flags);
- Opts.RecordCommandLine = Args.getLastArgValue(OPT_record_command_line);
+ Opts.DwarfDebugFlags =
+ std::string(Args.getLastArgValue(OPT_dwarf_debug_flags));
+ Opts.RecordCommandLine =
+ std::string(Args.getLastArgValue(OPT_record_command_line));
Opts.MergeAllConstants = Args.hasArg(OPT_fmerge_all_constants);
- Opts.NoCommon = Args.hasArg(OPT_fno_common);
+ Opts.NoCommon = !Args.hasArg(OPT_fcommon);
Opts.NoInlineLineTables = Args.hasArg(OPT_gno_inline_line_tables);
Opts.NoImplicitFloat = Args.hasArg(OPT_no_implicit_float);
Opts.OptimizeSize = getOptimizationLevelSize(Args);
@@ -823,7 +861,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.DisableIntegratedAS = Args.hasArg(OPT_fno_integrated_as);
Opts.Autolink = !Args.hasArg(OPT_fno_autolink);
- Opts.SampleProfileFile = Args.getLastArgValue(OPT_fprofile_sample_use_EQ);
+ Opts.SampleProfileFile =
+ std::string(Args.getLastArgValue(OPT_fprofile_sample_use_EQ));
Opts.DebugInfoForProfiling = Args.hasFlag(
OPT_fdebug_info_for_profiling, OPT_fno_debug_info_for_profiling, false);
Opts.DebugNameTable = static_cast<unsigned>(
@@ -836,13 +875,13 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
setPGOInstrumentor(Opts, Args, Diags);
Opts.InstrProfileOutput =
- Args.getLastArgValue(OPT_fprofile_instrument_path_EQ);
+ std::string(Args.getLastArgValue(OPT_fprofile_instrument_path_EQ));
Opts.ProfileInstrumentUsePath =
- Args.getLastArgValue(OPT_fprofile_instrument_use_path_EQ);
+ std::string(Args.getLastArgValue(OPT_fprofile_instrument_use_path_EQ));
if (!Opts.ProfileInstrumentUsePath.empty())
setPGOUseInstrumentor(Opts, Opts.ProfileInstrumentUsePath);
Opts.ProfileRemappingFile =
- Args.getLastArgValue(OPT_fprofile_remapping_file_EQ);
+ std::string(Args.getLastArgValue(OPT_fprofile_remapping_file_EQ));
if (!Opts.ProfileRemappingFile.empty() && !Opts.ExperimentalNewPassManager) {
Diags.Report(diag::err_drv_argument_only_allowed_with)
<< Args.getLastArg(OPT_fprofile_remapping_file_EQ)->getAsString(Args)
@@ -852,7 +891,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.CoverageMapping =
Args.hasFlag(OPT_fcoverage_mapping, OPT_fno_coverage_mapping, false);
Opts.DumpCoverageMapping = Args.hasArg(OPT_dump_coverage_mapping);
- Opts.AsmVerbose = Args.hasArg(OPT_masm_verbose);
+ Opts.AsmVerbose = !Args.hasArg(OPT_fno_verbose_asm);
Opts.PreserveAsmComments = !Args.hasArg(OPT_fno_preserve_as_comments);
Opts.AssumeSaneOperatorNew = !Args.hasArg(OPT_fno_assume_sane_operator_new);
Opts.ObjCAutoRefCountExceptions = Args.hasArg(OPT_fobjc_arc_exceptions);
@@ -861,7 +900,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.hasArg(OPT_fregister_global_dtors_with_atexit);
Opts.CXXCtorDtorAliases = Args.hasArg(OPT_mconstructor_aliases);
Opts.CodeModel = TargetOpts.CodeModel;
- Opts.DebugPass = Args.getLastArgValue(OPT_mdebug_pass);
+ Opts.DebugPass = std::string(Args.getLastArgValue(OPT_mdebug_pass));
// Handle -mframe-pointer option.
if (Arg *A = Args.getLastArg(OPT_mframe_pointer_EQ)) {
@@ -883,49 +922,30 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.setFramePointer(FP);
}
- // -pg may override -mframe-pointer
- // TODO: This should be merged into getFramePointerKind in Clang.cpp.
- if (Args.hasArg(OPT_pg))
- Opts.setFramePointer(CodeGenOptions::FramePointerKind::All);
-
Opts.DisableFree = Args.hasArg(OPT_disable_free);
Opts.DiscardValueNames = Args.hasArg(OPT_discard_value_names);
Opts.DisableTailCalls = Args.hasArg(OPT_mdisable_tail_calls);
Opts.NoEscapingBlockTailCalls =
Args.hasArg(OPT_fno_escaping_block_tail_calls);
- Opts.FloatABI = Args.getLastArgValue(OPT_mfloat_abi);
+ Opts.FloatABI = std::string(Args.getLastArgValue(OPT_mfloat_abi));
Opts.LessPreciseFPMAD = Args.hasArg(OPT_cl_mad_enable) ||
Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
Args.hasArg(OPT_cl_fast_relaxed_math);
- Opts.LimitFloatPrecision = Args.getLastArgValue(OPT_mlimit_float_precision);
- Opts.NoInfsFPMath = (Args.hasArg(OPT_menable_no_infinities) ||
- Args.hasArg(OPT_cl_finite_math_only) ||
- Args.hasArg(OPT_cl_fast_relaxed_math));
- Opts.NoNaNsFPMath = (Args.hasArg(OPT_menable_no_nans) ||
- Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
- Args.hasArg(OPT_cl_finite_math_only) ||
- Args.hasArg(OPT_cl_fast_relaxed_math));
- Opts.NoSignedZeros = (Args.hasArg(OPT_fno_signed_zeros) ||
- Args.hasArg(OPT_cl_no_signed_zeros) ||
- Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
- Args.hasArg(OPT_cl_fast_relaxed_math));
- Opts.Reassociate = Args.hasArg(OPT_mreassociate);
- Opts.FlushDenorm = Args.hasArg(OPT_cl_denorms_are_zero) ||
- (Args.hasArg(OPT_fcuda_is_device) &&
- Args.hasArg(OPT_fcuda_flush_denormals_to_zero));
+ Opts.LimitFloatPrecision =
+ std::string(Args.getLastArgValue(OPT_mlimit_float_precision));
Opts.CorrectlyRoundedDivSqrt =
Args.hasArg(OPT_cl_fp32_correctly_rounded_divide_sqrt);
Opts.UniformWGSize =
Args.hasArg(OPT_cl_uniform_work_group_size);
Opts.Reciprocals = Args.getAllArgValues(OPT_mrecip_EQ);
- Opts.ReciprocalMath = Args.hasArg(OPT_freciprocal_math);
- Opts.NoTrappingMath = Args.hasArg(OPT_fno_trapping_math);
Opts.StrictFloatCastOverflow =
!Args.hasArg(OPT_fno_strict_float_cast_overflow);
- Opts.NoZeroInitializedInBSS = Args.hasArg(OPT_mno_zero_initialized_in_bss);
+ Opts.NoZeroInitializedInBSS = Args.hasArg(OPT_fno_zero_initialized_in_bss);
Opts.NumRegisterParameters = getLastArgIntValue(Args, OPT_mregparm, 0, Diags);
Opts.NoExecStack = Args.hasArg(OPT_mno_exec_stack);
+ Opts.SmallDataLimit =
+ getLastArgIntValue(Args, OPT_msmall_data_limit, 0, Diags);
Opts.FatalWarnings = Args.hasArg(OPT_massembler_fatal_warnings);
Opts.NoWarn = Args.hasArg(OPT_massembler_no_warn);
Opts.EnableSegmentedStacks = Args.hasArg(OPT_split_stacks);
@@ -942,27 +962,31 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.StrictReturn = !Args.hasArg(OPT_fno_strict_return);
Opts.StrictVTablePointers = Args.hasArg(OPT_fstrict_vtable_pointers);
Opts.ForceEmitVTables = Args.hasArg(OPT_fforce_emit_vtables);
- Opts.UnsafeFPMath = Args.hasArg(OPT_menable_unsafe_fp_math) ||
- Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
- Args.hasArg(OPT_cl_fast_relaxed_math);
Opts.UnwindTables = Args.hasArg(OPT_munwind_tables);
- Opts.RelocationModel = getRelocModel(Args, Diags);
- Opts.ThreadModel = Args.getLastArgValue(OPT_mthread_model, "posix");
+ Opts.ThreadModel =
+ std::string(Args.getLastArgValue(OPT_mthread_model, "posix"));
if (Opts.ThreadModel != "posix" && Opts.ThreadModel != "single")
Diags.Report(diag::err_drv_invalid_value)
<< Args.getLastArg(OPT_mthread_model)->getAsString(Args)
<< Opts.ThreadModel;
- Opts.TrapFuncName = Args.getLastArgValue(OPT_ftrap_function_EQ);
+ Opts.TrapFuncName = std::string(Args.getLastArgValue(OPT_ftrap_function_EQ));
Opts.UseInitArray = !Args.hasArg(OPT_fno_use_init_array);
- Opts.FunctionSections = Args.hasFlag(OPT_ffunction_sections,
- OPT_fno_function_sections, false);
- Opts.DataSections = Args.hasFlag(OPT_fdata_sections,
- OPT_fno_data_sections, false);
- Opts.StackSizeSection =
- Args.hasFlag(OPT_fstack_size_section, OPT_fno_stack_size_section, false);
- Opts.UniqueSectionNames = Args.hasFlag(OPT_funique_section_names,
- OPT_fno_unique_section_names, true);
+ Opts.BBSections =
+ std::string(Args.getLastArgValue(OPT_fbasic_block_sections_EQ, "none"));
+
+ // Basic Block Sections implies Function Sections.
+ Opts.FunctionSections =
+ Args.hasArg(OPT_ffunction_sections) ||
+ (Opts.BBSections != "none" && Opts.BBSections != "labels");
+
+ Opts.DataSections = Args.hasArg(OPT_fdata_sections);
+ Opts.StackSizeSection = Args.hasArg(OPT_fstack_size_section);
+ Opts.UniqueSectionNames = !Args.hasArg(OPT_fno_unique_section_names);
+ Opts.UniqueBasicBlockSectionNames =
+ Args.hasArg(OPT_funique_basic_block_section_names);
+ Opts.UniqueInternalLinkageNames =
+ Args.hasArg(OPT_funique_internal_linkage_names);
Opts.MergeFunctions = Args.hasArg(OPT_fmerge_functions);
@@ -987,7 +1011,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
if (IK.getLanguage() != Language::LLVM_IR)
Diags.Report(diag::err_drv_argument_only_allowed_with)
<< A->getAsString(Args) << "-x ir";
- Opts.ThinLTOIndexFile = Args.getLastArgValue(OPT_fthinlto_index_EQ);
+ Opts.ThinLTOIndexFile =
+ std::string(Args.getLastArgValue(OPT_fthinlto_index_EQ));
}
if (Arg *A = Args.getLastArg(OPT_save_temps_EQ))
Opts.SaveTempsFilePrefix =
@@ -995,16 +1020,18 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
.Case("obj", FrontendOpts.OutputFile)
.Default(llvm::sys::path::filename(FrontendOpts.OutputFile).str());
- Opts.ThinLinkBitcodeFile = Args.getLastArgValue(OPT_fthin_link_bitcode_EQ);
+ Opts.ThinLinkBitcodeFile =
+ std::string(Args.getLastArgValue(OPT_fthin_link_bitcode_EQ));
Opts.MSVolatile = Args.hasArg(OPT_fms_volatile);
Opts.VectorizeLoop = Args.hasArg(OPT_vectorize_loops);
Opts.VectorizeSLP = Args.hasArg(OPT_vectorize_slp);
- Opts.PreferVectorWidth = Args.getLastArgValue(OPT_mprefer_vector_width_EQ);
+ Opts.PreferVectorWidth =
+ std::string(Args.getLastArgValue(OPT_mprefer_vector_width_EQ));
- Opts.MainFileName = Args.getLastArgValue(OPT_main_file_name);
+ Opts.MainFileName = std::string(Args.getLastArgValue(OPT_main_file_name));
Opts.VerifyModule = !Args.hasArg(OPT_disable_llvm_verifier);
Opts.ControlFlowGuardNoChecks = Args.hasArg(OPT_cfguard_no_checks);
@@ -1014,17 +1041,14 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.EmitGcovArcs = Args.hasArg(OPT_femit_coverage_data);
Opts.EmitGcovNotes = Args.hasArg(OPT_femit_coverage_notes);
if (Opts.EmitGcovArcs || Opts.EmitGcovNotes) {
- Opts.CoverageDataFile = Args.getLastArgValue(OPT_coverage_data_file);
- Opts.CoverageNotesFile = Args.getLastArgValue(OPT_coverage_notes_file);
- Opts.CoverageExtraChecksum = Args.hasArg(OPT_coverage_cfg_checksum);
- Opts.CoverageNoFunctionNamesInData =
- Args.hasArg(OPT_coverage_no_function_names_in_data);
+ Opts.CoverageDataFile =
+ std::string(Args.getLastArgValue(OPT_coverage_data_file));
+ Opts.CoverageNotesFile =
+ std::string(Args.getLastArgValue(OPT_coverage_notes_file));
Opts.ProfileFilterFiles =
- Args.getLastArgValue(OPT_fprofile_filter_files_EQ);
+ std::string(Args.getLastArgValue(OPT_fprofile_filter_files_EQ));
Opts.ProfileExcludeFiles =
- Args.getLastArgValue(OPT_fprofile_exclude_files_EQ);
- Opts.CoverageExitBlockBeforeBody =
- Args.hasArg(OPT_coverage_exit_block_before_body);
+ std::string(Args.getLastArgValue(OPT_fprofile_exclude_files_EQ));
if (Args.hasArg(OPT_coverage_version_EQ)) {
StringRef CoverageVersion = Args.getLastArgValue(OPT_coverage_version_EQ);
if (CoverageVersion.size() != 4) {
@@ -1062,8 +1086,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
A->getOption().getID() == options::OPT_INPUT ||
A->getOption().getID() == options::OPT_x ||
A->getOption().getID() == options::OPT_fembed_bitcode ||
- (A->getOption().getGroup().isValid() &&
- A->getOption().getGroup().getID() == options::OPT_W_Group))
+ A->getOption().matches(options::OPT_W_Group))
continue;
ArgStringList ASL;
A->render(Args, ASL);
@@ -1091,6 +1114,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.hasArg(OPT_fxray_always_emit_typedevents);
Opts.XRayInstructionThreshold =
getLastArgIntValue(Args, OPT_fxray_instruction_threshold_EQ, 200, Diags);
+ Opts.XRayIgnoreLoops = Args.hasArg(OPT_fxray_ignore_loops);
+ Opts.XRayOmitFunctionIndex = Args.hasArg(OPT_fno_xray_function_index);
auto XRayInstrBundles =
Args.getAllArgValues(OPT_fxray_instrumentation_bundle);
@@ -1143,7 +1168,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
Opts.RelaxELFRelocations = Args.hasArg(OPT_mrelax_relocations);
- Opts.DebugCompilationDir = Args.getLastArgValue(OPT_fdebug_compilation_dir);
+ Opts.DebugCompilationDir =
+ std::string(Args.getLastArgValue(OPT_fdebug_compilation_dir));
for (auto *A :
Args.filtered(OPT_mlink_bitcode_file, OPT_mlink_builtin_bitcode)) {
CodeGenOptions::BitcodeFileToLink F;
@@ -1173,9 +1199,15 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.SanitizeCoverageNoPrune = Args.hasArg(OPT_fsanitize_coverage_no_prune);
Opts.SanitizeCoverageInline8bitCounters =
Args.hasArg(OPT_fsanitize_coverage_inline_8bit_counters);
+ Opts.SanitizeCoverageInlineBoolFlag =
+ Args.hasArg(OPT_fsanitize_coverage_inline_bool_flag);
Opts.SanitizeCoveragePCTable = Args.hasArg(OPT_fsanitize_coverage_pc_table);
Opts.SanitizeCoverageStackDepth =
Args.hasArg(OPT_fsanitize_coverage_stack_depth);
+ Opts.SanitizeCoverageAllowlistFiles =
+ Args.getAllArgValues(OPT_fsanitize_coverage_allowlist);
+ Opts.SanitizeCoverageBlocklistFiles =
+ Args.getAllArgValues(OPT_fsanitize_coverage_blocklist);
Opts.SanitizeMemoryTrackOrigins =
getLastArgIntValue(Args, OPT_fsanitize_memory_track_origins_EQ, 0, Diags);
Opts.SanitizeMemoryUseAfterDtor =
@@ -1227,6 +1259,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.NoStackArgProbe = Args.hasArg(OPT_mno_stack_arg_probe);
+ Opts.StackClashProtector = Args.hasArg(OPT_fstack_clash_protection);
+
if (Arg *A = Args.getLastArg(OPT_fobjc_dispatch_method_EQ)) {
StringRef Name = A->getValue();
unsigned Method = llvm::StringSwitch<unsigned>(Name)
@@ -1275,7 +1309,14 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
if (Arg *A = Args.getLastArg(OPT_fdenormal_fp_math_EQ)) {
StringRef Val = A->getValue();
Opts.FPDenormalMode = llvm::parseDenormalFPAttribute(Val);
- if (Opts.FPDenormalMode == llvm::DenormalMode::Invalid)
+ if (!Opts.FPDenormalMode.isValid())
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_fdenormal_fp_math_f32_EQ)) {
+ StringRef Val = A->getValue();
+ Opts.FP32DenormalMode = llvm::parseDenormalFPAttribute(Val);
+ if (!Opts.FP32DenormalMode.isValid())
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
}
@@ -1284,6 +1325,12 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
if (Arg *A =
Args.getLastArg(OPT_fpcc_struct_return, OPT_freg_struct_return,
OPT_maix_struct_return, OPT_msvr4_struct_return)) {
+ // TODO: We might want to consider enabling these options on AIX in the
+ // future.
+ if (T.isOSAIX())
+ Diags.Report(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << T.str();
+
const Option &O = A->getOption();
if (O.matches(OPT_fpcc_struct_return) ||
O.matches(OPT_maix_struct_return)) {
@@ -1299,7 +1346,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.LinkerOptions = Args.getAllArgValues(OPT_linker_option);
bool NeedLocTracking = false;
- Opts.OptRecordFile = Args.getLastArgValue(OPT_opt_record_file);
+ Opts.OptRecordFile = std::string(Args.getLastArgValue(OPT_opt_record_file));
if (!Opts.OptRecordFile.empty())
NeedLocTracking = true;
@@ -1372,7 +1419,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.SanitizeTrap);
Opts.CudaGpuBinaryFileName =
- Args.getLastArgValue(OPT_fcuda_include_gpubinary);
+ std::string(Args.getLastArgValue(OPT_fcuda_include_gpubinary));
Opts.Backchain = Args.hasArg(OPT_mbackchain);
@@ -1383,38 +1430,6 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.Addrsig = Args.hasArg(OPT_faddrsig);
- if (Arg *A = Args.getLastArg(OPT_msign_return_address_EQ)) {
- StringRef SignScope = A->getValue();
-
- if (SignScope.equals_lower("none"))
- Opts.setSignReturnAddress(CodeGenOptions::SignReturnAddressScope::None);
- else if (SignScope.equals_lower("all"))
- Opts.setSignReturnAddress(CodeGenOptions::SignReturnAddressScope::All);
- else if (SignScope.equals_lower("non-leaf"))
- Opts.setSignReturnAddress(
- CodeGenOptions::SignReturnAddressScope::NonLeaf);
- else
- Diags.Report(diag::err_drv_invalid_value)
- << A->getAsString(Args) << SignScope;
-
- if (Arg *A = Args.getLastArg(OPT_msign_return_address_key_EQ)) {
- StringRef SignKey = A->getValue();
- if (!SignScope.empty() && !SignKey.empty()) {
- if (SignKey.equals_lower("a_key"))
- Opts.setSignReturnAddressKey(
- CodeGenOptions::SignReturnAddressKeyValue::AKey);
- else if (SignKey.equals_lower("b_key"))
- Opts.setSignReturnAddressKey(
- CodeGenOptions::SignReturnAddressKeyValue::BKey);
- else
- Diags.Report(diag::err_drv_invalid_value)
- << A->getAsString(Args) << SignKey;
- }
- }
- }
-
- Opts.BranchTargetEnforcement = Args.hasArg(OPT_mbranch_target_enforce);
-
Opts.KeepStaticConsts = Args.hasArg(OPT_fkeep_static_consts);
Opts.SpeculativeLoadHardening = Args.hasArg(OPT_mspeculative_load_hardening);
@@ -1423,20 +1438,23 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.PassPlugins = Args.getAllArgValues(OPT_fpass_plugin_EQ);
- Opts.SymbolPartition = Args.getLastArgValue(OPT_fsymbol_partition_EQ);
+ Opts.SymbolPartition =
+ std::string(Args.getLastArgValue(OPT_fsymbol_partition_EQ));
+ Opts.ForceAAPCSBitfieldLoad = Args.hasArg(OPT_ForceAAPCSBitfieldLoad);
return Success;
}
static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
ArgList &Args) {
- Opts.OutputFile = Args.getLastArgValue(OPT_dependency_file);
+ Opts.OutputFile = std::string(Args.getLastArgValue(OPT_dependency_file));
Opts.Targets = Args.getAllArgValues(OPT_MT);
Opts.IncludeSystemHeaders = Args.hasArg(OPT_sys_header_deps);
Opts.IncludeModuleFiles = Args.hasArg(OPT_module_file_deps);
Opts.UsePhonyTargets = Args.hasArg(OPT_MP);
Opts.ShowHeaderIncludes = Args.hasArg(OPT_H);
- Opts.HeaderIncludeOutputFile = Args.getLastArgValue(OPT_header_include_file);
+ Opts.HeaderIncludeOutputFile =
+ std::string(Args.getLastArgValue(OPT_header_include_file));
Opts.AddMissingHeaderDeps = Args.hasArg(OPT_MG);
if (Args.hasArg(OPT_show_includes)) {
// Writing both /showIncludes and preprocessor output to stdout
@@ -1449,9 +1467,9 @@ static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
} else {
Opts.ShowIncludesDest = ShowIncludesDestination::None;
}
- Opts.DOTOutputFile = Args.getLastArgValue(OPT_dependency_dot);
+ Opts.DOTOutputFile = std::string(Args.getLastArgValue(OPT_dependency_dot));
Opts.ModuleDependencyOutputDir =
- Args.getLastArgValue(OPT_module_dependency_dir);
+ std::string(Args.getLastArgValue(OPT_module_dependency_dir));
if (Args.hasArg(OPT_MV))
Opts.OutputFormat = DependencyOutputFormat::NMake;
// Add sanitizer blacklists as extra dependencies.
@@ -1461,13 +1479,13 @@ static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
for (const auto *A : Args.filtered(OPT_fsanitize_blacklist)) {
StringRef Val = A->getValue();
if (Val.find('=') == StringRef::npos)
- Opts.ExtraDeps.push_back(Val);
+ Opts.ExtraDeps.push_back(std::string(Val));
}
if (Opts.IncludeSystemHeaders) {
for (const auto *A : Args.filtered(OPT_fsanitize_system_blacklist)) {
StringRef Val = A->getValue();
if (Val.find('=') == StringRef::npos)
- Opts.ExtraDeps.push_back(Val);
+ Opts.ExtraDeps.push_back(std::string(Val));
}
}
}
@@ -1481,7 +1499,7 @@ static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
for (const auto *A : Args.filtered(OPT_fmodule_file)) {
StringRef Val = A->getValue();
if (Val.find('=') == StringRef::npos)
- Opts.ExtraDeps.push_back(Val);
+ Opts.ExtraDeps.push_back(std::string(Val));
}
}
@@ -1540,10 +1558,11 @@ static bool checkVerifyPrefixes(const std::vector<std::string> &VerifyPrefixes,
bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
DiagnosticsEngine *Diags,
- bool DefaultDiagColor, bool DefaultShowOpt) {
+ bool DefaultDiagColor) {
bool Success = true;
- Opts.DiagnosticLogFile = Args.getLastArgValue(OPT_diagnostic_log_file);
+ Opts.DiagnosticLogFile =
+ std::string(Args.getLastArgValue(OPT_diagnostic_log_file));
if (Arg *A =
Args.getLastArg(OPT_diagnostic_serialized_file, OPT__serialize_diags))
Opts.DiagnosticSerializationFile = A->getValue();
@@ -1553,17 +1572,11 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
Opts.PedanticErrors = Args.hasArg(OPT_pedantic_errors);
Opts.ShowCarets = !Args.hasArg(OPT_fno_caret_diagnostics);
Opts.ShowColors = parseShowColorsArgs(Args, DefaultDiagColor);
- Opts.ShowColumn = Args.hasFlag(OPT_fshow_column,
- OPT_fno_show_column,
- /*Default=*/true);
+ Opts.ShowColumn = !Args.hasArg(OPT_fno_show_column);
Opts.ShowFixits = !Args.hasArg(OPT_fno_diagnostics_fixit_info);
Opts.ShowLocation = !Args.hasArg(OPT_fno_show_source_location);
Opts.AbsolutePath = Args.hasArg(OPT_fdiagnostics_absolute_paths);
- Opts.ShowOptionNames =
- Args.hasFlag(OPT_fdiagnostics_show_option,
- OPT_fno_diagnostics_show_option, DefaultShowOpt);
-
- llvm::sys::Process::UseANSIEscapeCodes(Args.hasArg(OPT_fansi_escape_codes));
+ Opts.ShowOptionNames = !Args.hasArg(OPT_fno_diagnostics_show_option);
// Default behavior is to not to show note include stacks.
Opts.ShowNoteIncludeStack = false;
@@ -1669,7 +1682,11 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
Diags->Report(diag::warn_ignoring_ftabstop_value)
<< Opts.TabStop << DiagnosticOptions::DefaultTabStop;
}
- Opts.MessageLength = getLastArgIntValue(Args, OPT_fmessage_length, 0, Diags);
+ Opts.MessageLength =
+ getLastArgIntValue(Args, OPT_fmessage_length_EQ, 0, Diags);
+
+ Opts.UndefPrefixes = Args.getAllArgValues(OPT_Wundef_prefix_EQ);
+
addDiagnosticArgs(Args, OPT_W_Group, OPT_W_value_Group, Opts.Warnings);
addDiagnosticArgs(Args, OPT_R_Group, OPT_R_value_Group, Opts.Remarks);
@@ -1677,7 +1694,7 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
}
static void ParseFileSystemArgs(FileSystemOptions &Opts, ArgList &Args) {
- Opts.WorkingDir = Args.getLastArgValue(OPT_working_directory);
+ Opts.WorkingDir = std::string(Args.getLastArgValue(OPT_working_directory));
}
/// Parse the argument to the -ftest-module-file-extension
@@ -1695,12 +1712,12 @@ static bool parseTestModuleFileExtensionArg(StringRef Arg,
if (Args.size() < 5)
return true;
- BlockName = Args[0];
+ BlockName = std::string(Args[0]);
if (Args[1].getAsInteger(10, MajorVersion)) return true;
if (Args[2].getAsInteger(10, MinorVersion)) return true;
if (Args[3].getAsInteger(2, Hashed)) return true;
if (Args.size() > 4)
- UserInfo = Args[4];
+ UserInfo = std::string(Args[4]);
return false;
}
@@ -1733,6 +1750,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
case OPT_ast_dump:
case OPT_ast_dump_all:
case OPT_ast_dump_lookups:
+ case OPT_ast_dump_decl_types:
Opts.ProgramAction = frontend::ASTDump; break;
case OPT_ast_print:
Opts.ProgramAction = frontend::ASTPrint; break;
@@ -1775,25 +1793,26 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
StringRef ArgStr =
Args.hasArg(OPT_interface_stub_version_EQ)
? Args.getLastArgValue(OPT_interface_stub_version_EQ)
- : "experimental-ifs-v1";
+ : "experimental-ifs-v2";
if (ArgStr == "experimental-yaml-elf-v1" ||
+ ArgStr == "experimental-ifs-v1" ||
ArgStr == "experimental-tapi-elf-v1") {
std::string ErrorMessage =
"Invalid interface stub format: " + ArgStr.str() +
" is deprecated.";
Diags.Report(diag::err_drv_invalid_value)
<< "Must specify a valid interface stub format type, ie: "
- "-interface-stub-version=experimental-ifs-v1"
+ "-interface-stub-version=experimental-ifs-v2"
<< ErrorMessage;
- } else if (ArgStr != "experimental-ifs-v1") {
+ } else if (!ArgStr.startswith("experimental-ifs-")) {
std::string ErrorMessage =
"Invalid interface stub format: " + ArgStr.str() + ".";
Diags.Report(diag::err_drv_invalid_value)
<< "Must specify a valid interface stub format type, ie: "
- "-interface-stub-version=experimental-ifs-v1"
+ "-interface-stub-version=experimental-ifs-v2"
<< ErrorMessage;
} else {
- Opts.ProgramAction = frontend::GenerateInterfaceIfsExpV1;
+ Opts.ProgramAction = frontend::GenerateInterfaceStubs;
}
break;
}
@@ -1868,7 +1887,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
}
Opts.DisableFree = Args.hasArg(OPT_disable_free);
- Opts.OutputFile = Args.getLastArgValue(OPT_o);
+ Opts.OutputFile = std::string(Args.getLastArgValue(OPT_o));
Opts.Plugins = Args.getAllArgValues(OPT_load);
Opts.RelocatablePCH = Args.hasArg(OPT_relocatable_pch);
Opts.ShowHelp = Args.hasArg(OPT_help);
@@ -1887,8 +1906,9 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.FixToTemporaries = Args.hasArg(OPT_fixit_to_temp);
Opts.ASTDumpDecls = Args.hasArg(OPT_ast_dump, OPT_ast_dump_EQ);
Opts.ASTDumpAll = Args.hasArg(OPT_ast_dump_all, OPT_ast_dump_all_EQ);
- Opts.ASTDumpFilter = Args.getLastArgValue(OPT_ast_dump_filter);
+ Opts.ASTDumpFilter = std::string(Args.getLastArgValue(OPT_ast_dump_filter));
Opts.ASTDumpLookups = Args.hasArg(OPT_ast_dump_lookups);
+ Opts.ASTDumpDeclTypes = Args.hasArg(OPT_ast_dump_decl_types);
Opts.UseGlobalModuleIndex = !Args.hasArg(OPT_fno_modules_global_index);
Opts.GenerateGlobalModuleIndex = Opts.UseGlobalModuleIndex;
Opts.ModuleMapFiles = Args.getAllArgValues(OPT_fmodule_map_file);
@@ -1896,12 +1916,17 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
for (const auto *A : Args.filtered(OPT_fmodule_file)) {
StringRef Val = A->getValue();
if (Val.find('=') == StringRef::npos)
- Opts.ModuleFiles.push_back(Val);
+ Opts.ModuleFiles.push_back(std::string(Val));
}
Opts.ModulesEmbedFiles = Args.getAllArgValues(OPT_fmodules_embed_file_EQ);
Opts.ModulesEmbedAllFiles = Args.hasArg(OPT_fmodules_embed_all_files);
Opts.IncludeTimestamps = !Args.hasArg(OPT_fno_pch_timestamp);
Opts.UseTemporary = !Args.hasArg(OPT_fno_temp_file);
+ Opts.IsSystemModule = Args.hasArg(OPT_fsystem_module);
+
+ if (Opts.ProgramAction != frontend::GenerateModule && Opts.IsSystemModule)
+ Diags.Report(diag::err_drv_argument_only_allowed_with) << "-fsystem-module"
+ << "-emit-module";
Opts.CodeCompleteOpts.IncludeMacros
= Args.hasArg(OPT_code_completion_macros);
@@ -1916,10 +1941,14 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.CodeCompleteOpts.IncludeFixIts
= Args.hasArg(OPT_code_completion_with_fixits);
- Opts.OverrideRecordLayoutsFile
- = Args.getLastArgValue(OPT_foverride_record_layout_EQ);
- Opts.AuxTriple = Args.getLastArgValue(OPT_aux_triple);
- Opts.StatsFile = Args.getLastArgValue(OPT_stats_file);
+ Opts.OverrideRecordLayoutsFile =
+ std::string(Args.getLastArgValue(OPT_foverride_record_layout_EQ));
+ Opts.AuxTriple = std::string(Args.getLastArgValue(OPT_aux_triple));
+ if (Args.hasArg(OPT_aux_target_cpu))
+ Opts.AuxTargetCPU = std::string(Args.getLastArgValue(OPT_aux_target_cpu));
+ if (Args.hasArg(OPT_aux_target_feature))
+ Opts.AuxTargetFeatures = Args.getAllArgValues(OPT_aux_target_feature);
+ Opts.StatsFile = std::string(Args.getLastArgValue(OPT_stats_file));
if (const Arg *A = Args.getLastArg(OPT_arcmt_check,
OPT_arcmt_modify,
@@ -1938,9 +1967,10 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
break;
}
}
- Opts.MTMigrateDir = Args.getLastArgValue(OPT_mt_migrate_directory);
- Opts.ARCMTMigrateReportOut
- = Args.getLastArgValue(OPT_arcmt_migrate_report_output);
+ Opts.MTMigrateDir =
+ std::string(Args.getLastArgValue(OPT_mt_migrate_directory));
+ Opts.ARCMTMigrateReportOut =
+ std::string(Args.getLastArgValue(OPT_arcmt_migrate_report_output));
Opts.ARCMTMigrateEmitARCErrors
= Args.hasArg(OPT_arcmt_migrate_emit_arc_errors);
@@ -1975,7 +2005,8 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
if (Args.hasArg(OPT_objcmt_migrate_all))
Opts.ObjCMTAction |= FrontendOptions::ObjCMT_MigrateDecls;
- Opts.ObjCMTWhiteListPath = Args.getLastArgValue(OPT_objcmt_whitelist_dir_path);
+ Opts.ObjCMTWhiteListPath =
+ std::string(Args.getLastArgValue(OPT_objcmt_whitelist_dir_path));
if (Opts.ARCMTAction != FrontendOptions::ARCMT_None &&
Opts.ObjCMTAction != FrontendOptions::ObjCMT_None) {
@@ -2052,12 +2083,16 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
DashX = IK;
}
+ bool IsSystem = false;
+
// The -emit-module action implicitly takes a module map.
if (Opts.ProgramAction == frontend::GenerateModule &&
- IK.getFormat() == InputKind::Source)
+ IK.getFormat() == InputKind::Source) {
IK = IK.withFormat(InputKind::ModuleMap);
+ IsSystem = Opts.IsSystemModule;
+ }
- Opts.Inputs.emplace_back(std::move(Inputs[i]), IK);
+ Opts.Inputs.emplace_back(std::move(Inputs[i]), IK, IsSystem);
}
return DashX;
@@ -2072,14 +2107,14 @@ std::string CompilerInvocation::GetResourcesPath(const char *Argv0,
static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
const std::string &WorkingDir) {
- Opts.Sysroot = Args.getLastArgValue(OPT_isysroot, "/");
+ Opts.Sysroot = std::string(Args.getLastArgValue(OPT_isysroot, "/"));
Opts.Verbose = Args.hasArg(OPT_v);
Opts.UseBuiltinIncludes = !Args.hasArg(OPT_nobuiltininc);
Opts.UseStandardSystemIncludes = !Args.hasArg(OPT_nostdsysteminc);
Opts.UseStandardCXXIncludes = !Args.hasArg(OPT_nostdincxx);
if (const Arg *A = Args.getLastArg(OPT_stdlib_EQ))
Opts.UseLibcxx = (strcmp(A->getValue(), "libc++") == 0);
- Opts.ResourceDir = Args.getLastArgValue(OPT_resource_dir);
+ Opts.ResourceDir = std::string(Args.getLastArgValue(OPT_resource_dir));
// Canonicalize -fmodules-cache-path before storing it.
SmallString<128> P(Args.getLastArgValue(OPT_fmodules_cache_path));
@@ -2090,20 +2125,23 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
llvm::sys::fs::make_absolute(WorkingDir, P);
}
llvm::sys::path::remove_dots(P);
- Opts.ModuleCachePath = P.str();
+ Opts.ModuleCachePath = std::string(P.str());
- Opts.ModuleUserBuildPath = Args.getLastArgValue(OPT_fmodules_user_build_path);
+ Opts.ModuleUserBuildPath =
+ std::string(Args.getLastArgValue(OPT_fmodules_user_build_path));
// Only the -fmodule-file=<name>=<file> form.
for (const auto *A : Args.filtered(OPT_fmodule_file)) {
StringRef Val = A->getValue();
- if (Val.find('=') != StringRef::npos)
- Opts.PrebuiltModuleFiles.insert(Val.split('='));
+ if (Val.find('=') != StringRef::npos){
+ auto Split = Val.split('=');
+ Opts.PrebuiltModuleFiles.insert(
+ {std::string(Split.first), std::string(Split.second)});
+ }
}
for (const auto *A : Args.filtered(OPT_fprebuilt_module_path))
Opts.AddPrebuiltModulePath(A->getValue());
Opts.DisableModuleHash = Args.hasArg(OPT_fdisable_module_hash);
Opts.ModulesHashContent = Args.hasArg(OPT_fmodules_hash_content);
- Opts.ModulesStrictContextHash = Args.hasArg(OPT_fmodules_strict_context_hash);
Opts.ModulesValidateDiagnosticOptions =
!Args.hasArg(OPT_fmodules_disable_diagnostic_validation);
Opts.ImplicitModuleMaps = Args.hasArg(OPT_fimplicit_module_maps);
@@ -2150,7 +2188,7 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
SmallString<32> Buffer;
llvm::sys::path::append(Buffer, Opts.Sysroot,
llvm::StringRef(A->getValue()).substr(1));
- Path = Buffer.str();
+ Path = std::string(Buffer.str());
}
Opts.AddPath(Path, Group, IsFramework,
@@ -2250,7 +2288,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
if (T.isPS4())
LangStd = LangStandard::lang_gnu99;
else
- LangStd = LangStandard::lang_gnu11;
+ LangStd = LangStandard::lang_gnu17;
#endif
break;
case Language::ObjC:
@@ -2287,7 +2325,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.CPlusPlus11 = Std.isCPlusPlus11();
Opts.CPlusPlus14 = Std.isCPlusPlus14();
Opts.CPlusPlus17 = Std.isCPlusPlus17();
- Opts.CPlusPlus2a = Std.isCPlusPlus2a();
+ Opts.CPlusPlus20 = Std.isCPlusPlus20();
Opts.Digraphs = Std.hasDigraphs();
Opts.GNUMode = Std.isGNUMode();
Opts.GNUInline = !Opts.C99 && !Opts.CPlusPlus;
@@ -2313,7 +2351,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.AltiVec = 0;
Opts.ZVector = 0;
Opts.setLaxVectorConversions(LangOptions::LaxVectorConversionKind::None);
- Opts.setDefaultFPContractMode(LangOptions::FPC_On);
+ Opts.setDefaultFPContractMode(LangOptions::FPM_On);
Opts.NativeHalfType = 1;
Opts.NativeHalfArgsAndReturns = 1;
Opts.OpenCLCPlusPlus = Opts.CPlusPlus;
@@ -2333,7 +2371,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.CUDA = IK.getLanguage() == Language::CUDA || Opts.HIP;
if (Opts.CUDA)
// Set default FP_CONTRACT to FAST.
- Opts.setDefaultFPContractMode(LangOptions::FPC_Fast);
+ Opts.setDefaultFPContractMode(LangOptions::FPM_Fast);
Opts.RenderScript = IK.getLanguage() == Language::RenderScript;
if (Opts.RenderScript) {
@@ -2522,6 +2560,24 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
LangStd = OpenCLLangStd;
}
+ Opts.SYCL = Args.hasArg(options::OPT_fsycl);
+ Opts.SYCLIsDevice = Opts.SYCL && Args.hasArg(options::OPT_fsycl_is_device);
+ if (Opts.SYCL) {
+ // -sycl-std applies to any SYCL source, not only those containing kernels,
+ // but also those using the SYCL API
+ if (const Arg *A = Args.getLastArg(OPT_sycl_std_EQ)) {
+ Opts.SYCLVersion = llvm::StringSwitch<unsigned>(A->getValue())
+ .Cases("2017", "1.2.1", "121", "sycl-1.2.1", 2017)
+ .Default(0U);
+
+ if (Opts.SYCLVersion == 0U) {
+ // User has passed an invalid value to the flag, this is an error
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ }
+ }
+ }
+
Opts.IncludeDefaultHeader = Args.hasArg(OPT_finclude_default_header);
Opts.DeclareOpenCLBuiltins = Args.hasArg(OPT_fdeclare_opencl_builtins);
@@ -2708,7 +2764,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.setSignedOverflowBehavior(LangOptions::SOB_Trapping);
// Set the handler, if one is specified.
Opts.OverflowHandler =
- Args.getLastArgValue(OPT_ftrapv_handler);
+ std::string(Args.getLastArgValue(OPT_ftrapv_handler));
}
else if (Args.hasArg(OPT_fwrapv))
Opts.setSignedOverflowBehavior(LangOptions::SOB_Defined);
@@ -2759,6 +2815,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Args.hasArg(OPT_fno_threadsafe_statics))
Opts.ThreadsafeStatics = 0;
Opts.Exceptions = Args.hasArg(OPT_fexceptions);
+ Opts.IgnoreExceptions = Args.hasArg(OPT_fignore_exceptions);
Opts.ObjCExceptions = Args.hasArg(OPT_fobjc_exceptions);
Opts.CXXExceptions = Args.hasArg(OPT_fcxx_exceptions);
@@ -2797,7 +2854,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.Blocks = Args.hasArg(OPT_fblocks) || (Opts.OpenCL
&& Opts.OpenCLVersion == 200);
Opts.BlocksRuntimeOptional = Args.hasArg(OPT_fblocks_runtime_optional);
- Opts.Coroutines = Opts.CPlusPlus2a || Args.hasArg(OPT_fcoroutines_ts);
+ Opts.Coroutines = Opts.CPlusPlus20 || Args.hasArg(OPT_fcoroutines_ts);
Opts.ConvergentFunctions = Opts.OpenCL || (Opts.CUDA && Opts.CUDAIsDevice) ||
Args.hasArg(OPT_fconvergent_functions);
@@ -2807,7 +2864,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
OPT_fno_double_square_bracket_attributes,
Opts.DoubleSquareBracketAttributes);
- Opts.CPlusPlusModules = Opts.CPlusPlus2a;
+ Opts.CPlusPlusModules = Opts.CPlusPlus20;
Opts.ModulesTS = Args.hasArg(OPT_fmodules_ts);
Opts.Modules =
Args.hasArg(OPT_fmodules) || Opts.ModulesTS || Opts.CPlusPlusModules;
@@ -2828,7 +2885,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.ImplicitModules = !Args.hasArg(OPT_fno_implicit_modules);
Opts.CharIsSigned = Opts.OpenCL || !Args.hasArg(OPT_fno_signed_char);
Opts.WChar = Opts.CPlusPlus && !Args.hasArg(OPT_fno_wchar);
- Opts.Char8 = Args.hasFlag(OPT_fchar8__t, OPT_fno_char8__t, Opts.CPlusPlus2a);
+ Opts.Char8 = Args.hasFlag(OPT_fchar8__t, OPT_fno_char8__t, Opts.CPlusPlus20);
if (const Arg *A = Args.getLastArg(OPT_fwchar_type_EQ)) {
Opts.WCharSize = llvm::StringSwitch<unsigned>(A->getValue())
.Case("char", 1)
@@ -2865,6 +2922,11 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
!Args.hasArg(OPT_fno_concept_satisfaction_caching);
if (Args.hasArg(OPT_fconcepts_ts))
Diags.Report(diag::warn_fe_concepts_ts_flag);
+ // Recovery AST still heavily relies on dependent-type machinery.
+ Opts.RecoveryAST =
+ Args.hasFlag(OPT_frecovery_ast, OPT_fno_recovery_ast, Opts.CPlusPlus);
+ Opts.RecoveryASTType =
+ Args.hasFlag(OPT_frecovery_ast_type, OPT_fno_recovery_ast_type, false);
Opts.HeinousExtensions = Args.hasArg(OPT_fheinous_gnu_extensions);
Opts.AccessControl = !Args.hasArg(OPT_fno_access_control);
Opts.ElideConstructors = !Args.hasArg(OPT_fno_elide_constructors);
@@ -2885,7 +2947,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
getLastArgIntValue(Args, OPT_Wlarge_by_value_copy_EQ, 0, Diags);
Opts.MSBitfields = Args.hasArg(OPT_mms_bitfields);
Opts.ObjCConstantStringClass =
- Args.getLastArgValue(OPT_fconstant_string_class);
+ std::string(Args.getLastArgValue(OPT_fconstant_string_class));
Opts.ObjCDefaultSynthProperties =
!Args.hasArg(OPT_disable_objc_default_synthesize_properties);
Opts.EncodeExtendedBlockSig =
@@ -2894,6 +2956,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.PackStruct = getLastArgIntValue(Args, OPT_fpack_struct_EQ, 0, Diags);
Opts.MaxTypeAlign = getLastArgIntValue(Args, OPT_fmax_type_align_EQ, 0, Diags);
Opts.AlignDouble = Args.hasArg(OPT_malign_double);
+ Opts.DoubleSize = getLastArgIntValue(Args, OPT_mdouble_EQ, 0, Diags);
Opts.LongDoubleSize = Args.hasArg(OPT_mlong_double_128)
? 128
: Args.hasArg(OPT_mlong_double_64) ? 64 : 0;
@@ -2911,6 +2974,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.NoBitFieldTypeAlign = Args.hasArg(OPT_fno_bitfield_type_align);
Opts.SinglePrecisionConstants = Args.hasArg(OPT_cl_single_precision_constant);
Opts.FastRelaxedMath = Args.hasArg(OPT_cl_fast_relaxed_math);
+ if (Opts.FastRelaxedMath)
+ Opts.setDefaultFPContractMode(LangOptions::FPM_Fast);
Opts.HexagonQdsp6Compat = Args.hasArg(OPT_mqdsp6_compat);
Opts.FakeAddressSpaceMap = Args.hasArg(OPT_ffake_address_space_map);
Opts.ParseUnknownAnytype = Args.hasArg(OPT_funknown_anytype);
@@ -2918,7 +2983,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.DebuggerCastResultToId = Args.hasArg(OPT_fdebugger_cast_result_to_id);
Opts.DebuggerObjCLiteral = Args.hasArg(OPT_fdebugger_objc_literal);
Opts.ApplePragmaPack = Args.hasArg(OPT_fapple_pragma_pack);
- Opts.ModuleName = Args.getLastArgValue(OPT_fmodule_name_EQ);
+ Opts.ModuleName = std::string(Args.getLastArgValue(OPT_fmodule_name_EQ));
Opts.CurrentModule = Opts.ModuleName;
Opts.AppExt = Args.hasArg(OPT_fapplication_extension);
Opts.ModuleFeatures = Args.getAllArgValues(OPT_fmodule_feature);
@@ -3013,6 +3078,11 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.setDefaultCallingConv(DefaultCC);
}
+ Opts.SemanticInterposition = Args.hasArg(OPT_fsemantic_interposition);
+ // An explicit -fno-semantic-interposition infers dso_local.
+ Opts.ExplicitNoSemanticInterposition =
+ Args.hasArg(OPT_fno_semantic_interposition);
+
// -mrtd option
if (Arg *A = Args.getLastArg(OPT_mrtd)) {
if (Opts.getDefaultCallingConv() != LangOptions::DCC_None)
@@ -3028,8 +3098,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
}
}
- // Check if -fopenmp is specified and set default version to 4.5.
- Opts.OpenMP = Args.hasArg(options::OPT_fopenmp) ? 45 : 0;
+ // Check if -fopenmp is specified and set default version to 5.0.
+ Opts.OpenMP = Args.hasArg(options::OPT_fopenmp) ? 50 : 0;
// Check if -fopenmp-simd is specified.
bool IsSimdSpecified =
Args.hasFlag(options::OPT_fopenmp_simd, options::OPT_fno_openmp_simd,
@@ -3047,10 +3117,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Opts.OpenMP || Opts.OpenMPSimd) {
if (int Version = getLastArgIntValue(
Args, OPT_fopenmp_version_EQ,
- (IsSimdSpecified || IsTargetSpecified) ? 45 : Opts.OpenMP, Diags))
+ (IsSimdSpecified || IsTargetSpecified) ? 50 : Opts.OpenMP, Diags))
Opts.OpenMP = Version;
- else if (IsSimdSpecified || IsTargetSpecified)
- Opts.OpenMP = 45;
// Provide diagnostic when a given target is not expected to be an OpenMP
// device or host.
if (!Opts.OpenMPIsDevice) {
@@ -3069,7 +3137,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
// Set the flag to prevent the implementation from emitting device exception
// handling code for those requiring so.
- if ((Opts.OpenMPIsDevice && T.isNVPTX()) || Opts.OpenCLCPlusPlus) {
+ if ((Opts.OpenMPIsDevice && (T.isNVPTX() || T.isAMDGCN())) ||
+ Opts.OpenCLCPlusPlus) {
Opts.Exceptions = 0;
Opts.CXXExceptions = 0;
}
@@ -3103,6 +3172,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
TT.getArch() == llvm::Triple::ppc64le ||
TT.getArch() == llvm::Triple::nvptx ||
TT.getArch() == llvm::Triple::nvptx64 ||
+ TT.getArch() == llvm::Triple::amdgcn ||
TT.getArch() == llvm::Triple::x86 ||
TT.getArch() == llvm::Triple::x86_64))
Diags.Report(diag::err_drv_invalid_omp_target) << A->getValue(i);
@@ -3120,15 +3190,19 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
<< Opts.OMPHostIRFile;
}
- Opts.SYCLIsDevice = Args.hasArg(options::OPT_fsycl_is_device);
-
- // Set CUDA mode for OpenMP target NVPTX if specified in options
- Opts.OpenMPCUDAMode = Opts.OpenMPIsDevice && T.isNVPTX() &&
+ // Set CUDA mode for OpenMP target NVPTX/AMDGCN if specified in options
+ Opts.OpenMPCUDAMode = Opts.OpenMPIsDevice && (T.isNVPTX() || T.isAMDGCN()) &&
Args.hasArg(options::OPT_fopenmp_cuda_mode);
- // Set CUDA mode for OpenMP target NVPTX if specified in options
+ // Set CUDA support for parallel execution of target regions for OpenMP target
+ // NVPTX/AMDGCN if specified in options.
+ Opts.OpenMPCUDATargetParallel =
+ Opts.OpenMPIsDevice && (T.isNVPTX() || T.isAMDGCN()) &&
+ Args.hasArg(options::OPT_fopenmp_cuda_parallel_target_regions);
+
+ // Set CUDA mode for OpenMP target NVPTX/AMDGCN if specified in options
Opts.OpenMPCUDAForceFullRuntime =
- Opts.OpenMPIsDevice && T.isNVPTX() &&
+ Opts.OpenMPIsDevice && (T.isNVPTX() || T.isAMDGCN()) &&
Args.hasArg(options::OPT_fopenmp_cuda_force_full_runtime);
// Record whether the __DEPRECATED define was requested.
@@ -3152,30 +3226,65 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (InlineArg->getOption().matches(options::OPT_fno_inline))
Opts.NoInlineDefine = true;
- Opts.FastMath = Args.hasArg(OPT_ffast_math) ||
- Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.FastMath =
+ Args.hasArg(OPT_ffast_math) || Args.hasArg(OPT_cl_fast_relaxed_math);
Opts.FiniteMathOnly = Args.hasArg(OPT_ffinite_math_only) ||
- Args.hasArg(OPT_cl_finite_math_only) ||
- Args.hasArg(OPT_cl_fast_relaxed_math);
+ Args.hasArg(OPT_ffast_math) ||
+ Args.hasArg(OPT_cl_finite_math_only) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
Opts.UnsafeFPMath = Args.hasArg(OPT_menable_unsafe_fp_math) ||
+ Args.hasArg(OPT_ffast_math) ||
+ Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.AllowFPReassoc = Args.hasArg(OPT_mreassociate) ||
+ Args.hasArg(OPT_menable_unsafe_fp_math) ||
+ Args.hasArg(OPT_ffast_math) ||
+ Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.NoHonorNaNs =
+ Args.hasArg(OPT_menable_no_nans) || Args.hasArg(OPT_ffinite_math_only) ||
+ Args.hasArg(OPT_ffast_math) || Args.hasArg(OPT_cl_finite_math_only) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.NoHonorInfs = Args.hasArg(OPT_menable_no_infinities) ||
+ Args.hasArg(OPT_ffinite_math_only) ||
+ Args.hasArg(OPT_ffast_math) ||
+ Args.hasArg(OPT_cl_finite_math_only) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.NoSignedZero = Args.hasArg(OPT_fno_signed_zeros) ||
+ Args.hasArg(OPT_menable_unsafe_fp_math) ||
+ Args.hasArg(OPT_ffast_math) ||
+ Args.hasArg(OPT_cl_no_signed_zeros) ||
Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.AllowRecip = Args.hasArg(OPT_freciprocal_math) ||
+ Args.hasArg(OPT_menable_unsafe_fp_math) ||
+ Args.hasArg(OPT_ffast_math) ||
+ Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
+ // Currently there's no clang option to enable this individually
+ Opts.ApproxFunc = Args.hasArg(OPT_menable_unsafe_fp_math) ||
+ Args.hasArg(OPT_ffast_math) ||
+ Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
if (Arg *A = Args.getLastArg(OPT_ffp_contract)) {
StringRef Val = A->getValue();
if (Val == "fast")
- Opts.setDefaultFPContractMode(LangOptions::FPC_Fast);
+ Opts.setDefaultFPContractMode(LangOptions::FPM_Fast);
else if (Val == "on")
- Opts.setDefaultFPContractMode(LangOptions::FPC_On);
+ Opts.setDefaultFPContractMode(LangOptions::FPM_On);
else if (Val == "off")
- Opts.setDefaultFPContractMode(LangOptions::FPC_Off);
+ Opts.setDefaultFPContractMode(LangOptions::FPM_Off);
else
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
}
- LangOptions::FPRoundingModeKind FPRM = LangOptions::FPR_ToNearest;
+ if (Args.hasArg(OPT_fexperimental_strict_floating_point))
+ Opts.ExpStrictFP = true;
+
+ auto FPRM = llvm::RoundingMode::NearestTiesToEven;
if (Args.hasArg(OPT_frounding_math)) {
- FPRM = LangOptions::FPR_Dynamic;
+ FPRM = llvm::RoundingMode::Dynamic;
}
Opts.setFPRoundingMode(FPRM);
@@ -3229,6 +3338,11 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
}
+ if (Arg *A = Args.getLastArg(OPT_ftrivial_auto_var_init_stop_after)) {
+ int Val = std::stoi(A->getValue());
+ Opts.TrivialAutoVarInitStopAfter = Val;
+ }
+
// Parse -fsanitize= arguments.
parseSanitizerKinds("-fsanitize=", Args.getAllArgValues(OPT_fsanitize_EQ),
Diags, Opts.Sanitize);
@@ -3243,18 +3357,11 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
systemBlacklists.end());
// -fxray-instrument
- Opts.XRayInstrument =
- Args.hasFlag(OPT_fxray_instrument, OPT_fnoxray_instrument, false);
-
- // -fxray-always-emit-customevents
+ Opts.XRayInstrument = Args.hasArg(OPT_fxray_instrument);
Opts.XRayAlwaysEmitCustomEvents =
- Args.hasFlag(OPT_fxray_always_emit_customevents,
- OPT_fnoxray_always_emit_customevents, false);
-
- // -fxray-always-emit-typedevents
+ Args.hasArg(OPT_fxray_always_emit_customevents);
Opts.XRayAlwaysEmitTypedEvents =
- Args.hasFlag(OPT_fxray_always_emit_typedevents,
- OPT_fnoxray_always_emit_customevents, false);
+ Args.hasArg(OPT_fxray_always_emit_typedevents);
// -fxray-{always,never}-instrument= filenames.
Opts.XRayAlwaysInstrumentFiles =
@@ -3306,6 +3413,54 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.CompleteMemberPointers = Args.hasArg(OPT_fcomplete_member_pointers);
Opts.BuildingPCHWithObjectFile = Args.hasArg(OPT_building_pch_with_obj);
+ Opts.PCHInstantiateTemplates = Args.hasArg(OPT_fpch_instantiate_templates);
+
+ Opts.MatrixTypes = Args.hasArg(OPT_fenable_matrix);
+
+ Opts.MaxTokens = getLastArgIntValue(Args, OPT_fmax_tokens_EQ, 0, Diags);
+
+ if (Arg *A = Args.getLastArg(OPT_msign_return_address_EQ)) {
+ StringRef SignScope = A->getValue();
+
+ if (SignScope.equals_lower("none"))
+ Opts.setSignReturnAddressScope(
+ LangOptions::SignReturnAddressScopeKind::None);
+ else if (SignScope.equals_lower("all"))
+ Opts.setSignReturnAddressScope(
+ LangOptions::SignReturnAddressScopeKind::All);
+ else if (SignScope.equals_lower("non-leaf"))
+ Opts.setSignReturnAddressScope(
+ LangOptions::SignReturnAddressScopeKind::NonLeaf);
+ else
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << SignScope;
+
+ if (Arg *A = Args.getLastArg(OPT_msign_return_address_key_EQ)) {
+ StringRef SignKey = A->getValue();
+ if (!SignScope.empty() && !SignKey.empty()) {
+ if (SignKey.equals_lower("a_key"))
+ Opts.setSignReturnAddressKey(
+ LangOptions::SignReturnAddressKeyKind::AKey);
+ else if (SignKey.equals_lower("b_key"))
+ Opts.setSignReturnAddressKey(
+ LangOptions::SignReturnAddressKeyKind::BKey);
+ else
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << SignKey;
+ }
+ }
+ }
+
+ Opts.BranchTargetEnforcement = Args.hasArg(OPT_mbranch_target_enforce);
+ Opts.SpeculativeLoadHardening = Args.hasArg(OPT_mspeculative_load_hardening);
+
+ Opts.CompatibilityQualifiedIdBlockParamTypeChecking =
+ Args.hasArg(OPT_fcompatibility_qualified_id_block_param_type_checking);
+
+ Opts.RelativeCXXABIVTables =
+ Args.hasFlag(OPT_fexperimental_relative_cxx_abi_vtables,
+ OPT_fno_experimental_relative_cxx_abi_vtables,
+ /*default=*/false);
}
static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
@@ -3326,7 +3481,7 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
case frontend::GenerateModuleInterface:
case frontend::GenerateHeaderModule:
case frontend::GeneratePCH:
- case frontend::GenerateInterfaceIfsExpV1:
+ case frontend::GenerateInterfaceStubs:
case frontend::ParseSyntaxOnly:
case frontend::ModuleFileInfo:
case frontend::VerifyPCH:
@@ -3355,11 +3510,12 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags,
frontend::ActionKind Action) {
- Opts.ImplicitPCHInclude = Args.getLastArgValue(OPT_include_pch);
+ Opts.ImplicitPCHInclude = std::string(Args.getLastArgValue(OPT_include_pch));
Opts.PCHWithHdrStop = Args.hasArg(OPT_pch_through_hdrstop_create) ||
Args.hasArg(OPT_pch_through_hdrstop_use);
Opts.PCHWithHdrStopCreate = Args.hasArg(OPT_pch_through_hdrstop_create);
- Opts.PCHThroughHeader = Args.getLastArgValue(OPT_pch_through_header_EQ);
+ Opts.PCHThroughHeader =
+ std::string(Args.getLastArgValue(OPT_pch_through_header_EQ));
Opts.UsePredefines = !Args.hasArg(OPT_undef);
Opts.DetailedRecord = Args.hasArg(OPT_detailed_preprocessing_record);
Opts.DisablePCHValidation = Args.hasArg(OPT_fno_validate_pch);
@@ -3369,8 +3525,11 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
for (const auto *A : Args.filtered(OPT_error_on_deserialized_pch_decl))
Opts.DeserializedPCHDeclsToErrorOn.insert(A->getValue());
- for (const auto &A : Args.getAllArgValues(OPT_fmacro_prefix_map_EQ))
- Opts.MacroPrefixMap.insert(StringRef(A).split('='));
+ for (const auto &A : Args.getAllArgValues(OPT_fmacro_prefix_map_EQ)) {
+ auto Split = StringRef(A).split('=');
+ Opts.MacroPrefixMap.insert(
+ {std::string(Split.first), std::string(Split.second)});
+ }
if (const Arg *A = Args.getLastArg(OPT_preamble_bytes_EQ)) {
StringRef Value(A->getValue());
@@ -3470,8 +3629,8 @@ static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags) {
- Opts.CodeModel = getCodeModel(Args, Diags);
- Opts.ABI = Args.getLastArgValue(OPT_target_abi);
+ Opts.CodeModel = std::string(Args.getLastArgValue(OPT_mcmodel_EQ, "default"));
+ Opts.ABI = std::string(Args.getLastArgValue(OPT_target_abi));
if (Arg *A = Args.getLastArg(OPT_meabi)) {
StringRef Value = A->getValue();
llvm::EABI EABIVersion = llvm::StringSwitch<llvm::EABI>(Value)
@@ -3486,15 +3645,11 @@ static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
else
Opts.EABIVersion = EABIVersion;
}
- Opts.CPU = Args.getLastArgValue(OPT_target_cpu);
- Opts.FPMath = Args.getLastArgValue(OPT_mfpmath);
+ Opts.CPU = std::string(Args.getLastArgValue(OPT_target_cpu));
+ Opts.FPMath = std::string(Args.getLastArgValue(OPT_mfpmath));
Opts.FeaturesAsWritten = Args.getAllArgValues(OPT_target_feature);
- Opts.LinkerVersion = Args.getLastArgValue(OPT_target_linker_version);
- Opts.Triple = Args.getLastArgValue(OPT_triple);
- // Use the default target triple if unspecified.
- if (Opts.Triple.empty())
- Opts.Triple = llvm::sys::getDefaultTargetTriple();
- Opts.Triple = llvm::Triple::normalize(Opts.Triple);
+ Opts.LinkerVersion =
+ std::string(Args.getLastArgValue(OPT_target_linker_version));
Opts.OpenCLExtensionsAsWritten = Args.getAllArgValues(OPT_cl_ext_EQ);
Opts.ForceEnableInt128 = Args.hasArg(OPT_fforce_enable_int128);
Opts.NVPTXUseShortPointers = Args.hasFlag(
@@ -3509,9 +3664,35 @@ static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
}
}
+bool CompilerInvocation::parseSimpleArgs(const ArgList &Args,
+ DiagnosticsEngine &Diags) {
+#define OPTION_WITH_MARSHALLING_FLAG(PREFIX_TYPE, NAME, ID, KIND, GROUP, \
+ ALIAS, ALIASARGS, FLAGS, PARAM, HELPTEXT, \
+ METAVAR, VALUES, SPELLING, ALWAYS_EMIT, \
+ KEYPATH, DEFAULT_VALUE, IS_POSITIVE) \
+ this->KEYPATH = Args.hasArg(OPT_##ID) && IS_POSITIVE;
+
+#define OPTION_WITH_MARSHALLING_STRING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
+ TYPE, NORMALIZER, DENORMALIZER, TABLE_INDEX) \
+ { \
+ if (auto MaybeValue = NORMALIZER(OPT_##ID, TABLE_INDEX, Args, Diags)) \
+ this->KEYPATH = static_cast<TYPE>(*MaybeValue); \
+ else \
+ this->KEYPATH = DEFAULT_VALUE; \
+ }
+
+#include "clang/Driver/Options.inc"
+#undef OPTION_WITH_MARSHALLING_STRING
+#undef OPTION_WITH_MARSHALLING_FLAG
+ return true;
+}
+
bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
ArrayRef<const char *> CommandLineArgs,
- DiagnosticsEngine &Diags) {
+ DiagnosticsEngine &Diags,
+ const char *Argv0) {
bool Success = true;
// Parse the arguments.
@@ -3541,6 +3722,11 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
Success = false;
}
+ Success &= Res.parseSimpleArgs(Args, Diags);
+
+ llvm::sys::Process::UseANSIEscapeCodes(
+ Res.DiagnosticOpts->UseANSIEscapeCodes);
+
Success &= ParseAnalyzerArgs(*Res.getAnalyzerOpts(), Args, Diags);
Success &= ParseMigratorArgs(Res.getMigratorOpts(), Args);
ParseDependencyOutputArgs(Res.getDependencyOutputOpts(), Args);
@@ -3549,9 +3735,8 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
Diags.Report(diag::err_fe_dependency_file_requires_MT);
Success = false;
}
- Success &=
- ParseDiagnosticArgs(Res.getDiagnosticOpts(), Args, &Diags,
- false /*DefaultDiagColor*/, false /*DefaultShowOpt*/);
+ Success &= ParseDiagnosticArgs(Res.getDiagnosticOpts(), Args, &Diags,
+ /*DefaultDiagColor=*/false);
ParseCommentArgs(LangOpts.CommentOpts, Args);
ParseFileSystemArgs(Res.getFileSystemOpts(), Args);
// FIXME: We shouldn't have to pass the DashX option around here
@@ -3632,6 +3817,11 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
Res.getCodeGenOpts().FineGrainedBitfieldAccesses = false;
Diags.Report(diag::warn_drv_fine_grained_bitfield_accesses_ignored);
}
+
+ // Store the command-line for using in the CodeView backend.
+ Res.getCodeGenOpts().Argv0 = Argv0;
+ Res.getCodeGenOpts().CommandLineArgs = CommandLineArgs;
+
return Success;
}
@@ -3648,6 +3838,11 @@ std::string CompilerInvocation::getModuleHash() const {
// CityHash, but this will do for now.
hash_code code = hash_value(getClangFullRepositoryVersion());
+ // Also include the serialization version, in case LLVM_APPEND_VC_REV is off
+ // and getClangFullRepositoryVersion() doesn't include git revision.
+ code = hash_combine(code, serialization::VERSION_MAJOR,
+ serialization::VERSION_MINOR);
+
// Extend the signature with the language options
#define LANGOPT(Name, Bits, Default, Description) \
code = hash_combine(code, LangOpts->Name);
@@ -3660,6 +3855,10 @@ std::string CompilerInvocation::getModuleHash() const {
for (StringRef Feature : LangOpts->ModuleFeatures)
code = hash_combine(code, Feature);
+ code = hash_combine(code, LangOpts->ObjCRuntime);
+ const auto &BCN = LangOpts->CommentOpts.BlockCommandNames;
+ code = hash_combine(code, hash_combine_range(BCN.begin(), BCN.end()));
+
// Extend the signature with the target options.
code = hash_combine(code, TargetOpts->Triple, TargetOpts->CPU,
TargetOpts->ABI);
@@ -3739,6 +3938,33 @@ std::string CompilerInvocation::getModuleHash() const {
return llvm::APInt(64, code).toString(36, /*Signed=*/false);
}
+void CompilerInvocation::generateCC1CommandLine(
+ SmallVectorImpl<const char *> &Args, StringAllocator SA) const {
+#define OPTION_WITH_MARSHALLING_FLAG(PREFIX_TYPE, NAME, ID, KIND, GROUP, \
+ ALIAS, ALIASARGS, FLAGS, PARAM, HELPTEXT, \
+ METAVAR, VALUES, SPELLING, ALWAYS_EMIT, \
+ KEYPATH, DEFAULT_VALUE, IS_POSITIVE) \
+ if ((FLAGS) & options::CC1Option && \
+ (ALWAYS_EMIT || this->KEYPATH != DEFAULT_VALUE)) \
+ Args.push_back(SPELLING);
+
+#define OPTION_WITH_MARSHALLING_STRING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
+ NORMALIZER_RET_TY, NORMALIZER, DENORMALIZER, TABLE_INDEX) \
+ if (((FLAGS) & options::CC1Option) && \
+ (ALWAYS_EMIT || this->KEYPATH != DEFAULT_VALUE)) { \
+ if (Option::KIND##Class == Option::SeparateClass) { \
+ Args.push_back(SPELLING); \
+ Args.push_back(DENORMALIZER(SA, TABLE_INDEX, this->KEYPATH)); \
+ } \
+ }
+
+#include "clang/Driver/Options.inc"
+#undef OPTION_WITH_MARSHALLING_STRING
+#undef OPTION_WITH_MARSHALLING_FLAG
+}
+
namespace clang {
IntrusiveRefCntPtr<llvm::vfs::FileSystem>
diff --git a/contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp b/contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
index 18c4814bbd5c..1d5a6c06b34f 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -93,7 +93,7 @@ std::unique_ptr<CompilerInvocation> clang::createInvocationFromCommandLine(
if (CC1Args)
*CC1Args = {CCArgs.begin(), CCArgs.end()};
auto CI = std::make_unique<CompilerInvocation>();
- if (!CompilerInvocation::CreateFromArgs(*CI, CCArgs, *Diags) &&
+ if (!CompilerInvocation::CreateFromArgs(*CI, CCArgs, *Diags, Args[0]) &&
!ShouldRecoverOnErorrs)
return nullptr;
return CI;
diff --git a/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp b/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp
index 4bb0167bd597..c9240f4122a7 100644
--- a/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp
@@ -137,16 +137,17 @@ struct DepCollectorASTListener : public ASTReaderListener {
};
} // end anonymous namespace
-void DependencyCollector::maybeAddDependency(StringRef Filename, bool FromModule,
- bool IsSystem, bool IsModuleFile,
- bool IsMissing) {
+void DependencyCollector::maybeAddDependency(StringRef Filename,
+ bool FromModule, bool IsSystem,
+ bool IsModuleFile,
+ bool IsMissing) {
if (sawDependency(Filename, FromModule, IsSystem, IsModuleFile, IsMissing))
addDependency(Filename);
}
bool DependencyCollector::addDependency(StringRef Filename) {
if (Seen.insert(Filename).second) {
- Dependencies.push_back(Filename);
+ Dependencies.push_back(std::string(Filename));
return true;
}
return false;
@@ -160,8 +161,8 @@ static bool isSpecialFilename(StringRef Filename) {
}
bool DependencyCollector::sawDependency(StringRef Filename, bool FromModule,
- bool IsSystem, bool IsModuleFile,
- bool IsMissing) {
+ bool IsSystem, bool IsModuleFile,
+ bool IsMissing) {
return !isSpecialFilename(Filename) &&
(needSystemDependencies() || !IsSystem);
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp b/contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp
index ccf7a2785510..8a6e491def45 100644
--- a/contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp
@@ -119,8 +119,7 @@ void DependencyGraphCallback::OutputGraphFile() {
if (FileName.startswith(SysRoot))
FileName = FileName.substr(SysRoot.size());
- OS << DOT::EscapeString(FileName)
- << "\"];\n";
+ OS << DOT::EscapeString(std::string(FileName)) << "\"];\n";
}
// Write the edges
diff --git a/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp b/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp
index 934d17b3c925..59a968b5c709 100644
--- a/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp
@@ -157,10 +157,9 @@ FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
bool FoundAllPlugins = true;
for (const std::string &Arg : CI.getFrontendOpts().AddPluginActions) {
bool Found = false;
- for (FrontendPluginRegistry::iterator it = FrontendPluginRegistry::begin(),
- ie = FrontendPluginRegistry::end();
- it != ie; ++it) {
- if (it->getName() == Arg)
+ for (const FrontendPluginRegistry::entry &Plugin :
+ FrontendPluginRegistry::entries()) {
+ if (Plugin.getName() == Arg)
Found = true;
}
if (!Found) {
@@ -183,25 +182,24 @@ FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
// or after it (in AfterConsumers)
std::vector<std::unique_ptr<ASTConsumer>> Consumers;
std::vector<std::unique_ptr<ASTConsumer>> AfterConsumers;
- for (FrontendPluginRegistry::iterator it = FrontendPluginRegistry::begin(),
- ie = FrontendPluginRegistry::end();
- it != ie; ++it) {
- std::unique_ptr<PluginASTAction> P = it->instantiate();
+ for (const FrontendPluginRegistry::entry &Plugin :
+ FrontendPluginRegistry::entries()) {
+ std::unique_ptr<PluginASTAction> P = Plugin.instantiate();
PluginASTAction::ActionType ActionType = P->getActionType();
if (ActionType == PluginASTAction::Cmdline) {
// This is O(|plugins| * |add_plugins|), but since both numbers are
// way below 50 in practice, that's ok.
- for (size_t i = 0, e = CI.getFrontendOpts().AddPluginActions.size();
- i != e; ++i) {
- if (it->getName() == CI.getFrontendOpts().AddPluginActions[i]) {
- ActionType = PluginASTAction::AddAfterMainAction;
- break;
- }
- }
+ if (llvm::any_of(CI.getFrontendOpts().AddPluginActions,
+ [&](const std::string &PluginAction) {
+ return PluginAction == Plugin.getName();
+ }))
+ ActionType = PluginASTAction::AddAfterMainAction;
}
if ((ActionType == PluginASTAction::AddBeforeMainAction ||
ActionType == PluginASTAction::AddAfterMainAction) &&
- P->ParseArgs(CI, CI.getFrontendOpts().PluginArgs[it->getName()])) {
+ P->ParseArgs(
+ CI,
+ CI.getFrontendOpts().PluginArgs[std::string(Plugin.getName())])) {
std::unique_ptr<ASTConsumer> PluginConsumer = P->CreateASTConsumer(CI, InFile);
if (ActionType == PluginASTAction::AddBeforeMainAction) {
Consumers.push_back(std::move(PluginConsumer));
@@ -363,6 +361,7 @@ static std::error_code collectModuleHeaderIncludes(
llvm::sys::path::native(UmbrellaDir.Entry->getName(), DirNative);
llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
+ SmallVector<std::pair<std::string, const FileEntry *>, 8> Headers;
for (llvm::vfs::recursive_directory_iterator Dir(FS, DirNative, EC), End;
Dir != End && !EC; Dir.increment(EC)) {
// Check whether this entry has an extension typically associated with
@@ -393,13 +392,25 @@ static std::error_code collectModuleHeaderIncludes(
++It)
llvm::sys::path::append(RelativeHeader, *It);
- // Include this header as part of the umbrella directory.
- Module->addTopHeader(*Header);
- addHeaderInclude(RelativeHeader, Includes, LangOpts, Module->IsExternC);
+ std::string RelName = RelativeHeader.c_str();
+ Headers.push_back(std::make_pair(RelName, *Header));
}
if (EC)
return EC;
+
+ // Sort header paths and make the header inclusion order deterministic
+ // across different OSs and filesystems.
+ llvm::sort(Headers.begin(), Headers.end(), [](
+ const std::pair<std::string, const FileEntry *> &LHS,
+ const std::pair<std::string, const FileEntry *> &RHS) {
+ return LHS.first < RHS.first;
+ });
+ for (auto &H : Headers) {
+ // Include this header as part of the umbrella directory.
+ Module->addTopHeader(H.second);
+ addHeaderInclude(H.first, Includes, LangOpts, Module->IsExternC);
+ }
}
// Recurse into submodules.
@@ -564,8 +575,9 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
StringRef InputFile = Input.getFile();
std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromASTFile(
- InputFile, CI.getPCHContainerReader(), ASTUnit::LoadPreprocessorOnly,
- ASTDiags, CI.getFileSystemOpts(), CI.getCodeGenOpts().DebugTypeExtRefs);
+ std::string(InputFile), CI.getPCHContainerReader(),
+ ASTUnit::LoadPreprocessorOnly, ASTDiags, CI.getFileSystemOpts(),
+ CI.getCodeGenOpts().DebugTypeExtRefs);
if (!AST)
goto failure;
@@ -592,10 +604,11 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (&MF != &PrimaryModule)
CI.getFrontendOpts().ModuleFiles.push_back(MF.FileName);
- ASTReader->visitTopLevelModuleMaps(PrimaryModule,
- [&](const FileEntry *FE) {
- CI.getFrontendOpts().ModuleMapFiles.push_back(FE->getName());
- });
+ ASTReader->visitTopLevelModuleMaps(
+ PrimaryModule, [&](const FileEntry *FE) {
+ CI.getFrontendOpts().ModuleMapFiles.push_back(
+ std::string(FE->getName()));
+ });
}
// Set up the input file for replay purposes.
@@ -630,8 +643,9 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
StringRef InputFile = Input.getFile();
std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromASTFile(
- InputFile, CI.getPCHContainerReader(), ASTUnit::LoadEverything, Diags,
- CI.getFileSystemOpts(), CI.getCodeGenOpts().DebugTypeExtRefs);
+ std::string(InputFile), CI.getPCHContainerReader(),
+ ASTUnit::LoadEverything, Diags, CI.getFileSystemOpts(),
+ CI.getCodeGenOpts().DebugTypeExtRefs);
if (!AST)
goto failure;
@@ -725,7 +739,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
Dir->path(), FileMgr, CI.getPCHContainerReader(),
CI.getLangOpts(), CI.getTargetOpts(), CI.getPreprocessorOpts(),
SpecificModuleCachePath)) {
- PPOpts.ImplicitPCHInclude = Dir->path();
+ PPOpts.ImplicitPCHInclude = std::string(Dir->path());
Found = true;
break;
}
@@ -783,7 +797,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
auto Kind = CurrentModule->IsSystem ? SrcMgr::C_System : SrcMgr::C_User;
auto &SourceMgr = CI.getSourceManager();
auto BufferID = SourceMgr.createFileID(std::move(Buffer), Kind);
- assert(BufferID.isValid() && "couldn't creaate module buffer ID");
+ assert(BufferID.isValid() && "couldn't create module buffer ID");
SourceMgr.setMainFileID(BufferID);
}
}
@@ -817,7 +831,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
// For preprocessed files, check if the first line specifies the original
// source file name with a linemarker.
- std::string PresumedInputFile = getCurrentFileOrBufferName();
+ std::string PresumedInputFile = std::string(getCurrentFileOrBufferName());
if (Input.isPreprocessed())
ReadOriginalFileName(CI, PresumedInputFile);
@@ -836,7 +850,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
source = createChainedIncludesSource(CI, FinalReader);
if (!source)
goto failure;
- CI.setModuleManager(static_cast<ASTReader *>(FinalReader.get()));
+ CI.setASTReader(static_cast<ASTReader *>(FinalReader.get()));
CI.getASTContext().setExternalSource(source);
} else if (CI.getLangOpts().Modules ||
!CI.getPreprocessorOpts().ImplicitPCHInclude.empty()) {
@@ -866,7 +880,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (!CI.getASTContext().getExternalSource())
goto failure;
}
- // If modules are enabled, create the module manager before creating
+ // If modules are enabled, create the AST reader before creating
// any builtins, so that all declarations know that they might be
// extended by an external source.
if (CI.getLangOpts().Modules || !CI.hasASTContext() ||
@@ -1077,6 +1091,9 @@ void WrapperFrontendAction::ExecuteAction() {
void WrapperFrontendAction::EndSourceFileAction() {
WrappedAction->EndSourceFileAction();
}
+bool WrapperFrontendAction::shouldEraseOutputFiles() {
+ return WrappedAction->shouldEraseOutputFiles();
+}
bool WrapperFrontendAction::usesPreprocessorOnly() const {
return WrappedAction->usesPreprocessorOnly();
diff --git a/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp b/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
index 935c64a0fa13..711e7336c820 100644
--- a/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
@@ -9,6 +9,7 @@
#include "clang/Frontend/FrontendActions.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/LangStandard.h"
#include "clang/Frontend/ASTConsumers.h"
#include "clang/Frontend/CompilerInstance.h"
@@ -78,7 +79,8 @@ ASTDumpAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
const FrontendOptions &Opts = CI.getFrontendOpts();
return CreateASTDumper(nullptr /*Dump to stdout.*/, Opts.ASTDumpFilter,
Opts.ASTDumpDecls, Opts.ASTDumpAll,
- Opts.ASTDumpLookups, Opts.ASTDumpFormat);
+ Opts.ASTDumpLookups, Opts.ASTDumpDeclTypes,
+ Opts.ASTDumpFormat);
}
std::unique_ptr<ASTConsumer>
@@ -115,7 +117,7 @@ GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
CI.getPreprocessorOpts().AllowPCHWithCompilerErrors,
FrontendOpts.IncludeTimestamps, +CI.getLangOpts().CacheGeneratedPCH));
Consumers.push_back(CI.getPCHContainerWriter().CreatePCHContainerGenerator(
- CI, InFile, OutputFile, std::move(OS), Buffer));
+ CI, std::string(InFile), OutputFile, std::move(OS), Buffer));
return std::make_unique<MultiplexConsumer>(std::move(Consumers));
}
@@ -181,7 +183,7 @@ GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
/*ShouldCacheASTInMemory=*/
+CI.getFrontendOpts().BuildingImplicitModule));
Consumers.push_back(CI.getPCHContainerWriter().CreatePCHContainerGenerator(
- CI, InFile, OutputFile, std::move(OS), Buffer));
+ CI, std::string(InFile), OutputFile, std::move(OS), Buffer));
return std::make_unique<MultiplexConsumer>(std::move(Consumers));
}
@@ -266,7 +268,7 @@ bool GenerateHeaderModuleAction::PrepareToExecuteAction(
HeaderContents += "#include \"";
HeaderContents += FIF.getFile();
HeaderContents += "\"\n";
- ModuleHeaders.push_back(FIF.getFile());
+ ModuleHeaders.push_back(std::string(FIF.getFile()));
}
Buffer = llvm::MemoryBuffer::getMemBufferCopy(
HeaderContents, Module::getModuleInputBufferName());
@@ -295,7 +297,7 @@ bool GenerateHeaderModuleAction::BeginSourceFileAction(
<< Name;
continue;
}
- Headers.push_back({Name, &FE->getFileEntry()});
+ Headers.push_back({std::string(Name), &FE->getFileEntry()});
}
HS.getModuleMap().createHeaderModule(CI.getLangOpts().CurrentModule, Headers);
@@ -433,6 +435,10 @@ private:
return "RequirementInstantiation";
case CodeSynthesisContext::NestedRequirementConstraintsCheck:
return "NestedRequirementConstraintsCheck";
+ case CodeSynthesisContext::InitializingStructuredBinding:
+ return "InitializingStructuredBinding";
+ case CodeSynthesisContext::MarkingClassDllexported:
+ return "MarkingClassDllexported";
}
return "";
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp b/contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp
index 5c1fbf889c23..9f080db733f1 100644
--- a/contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp
@@ -25,11 +25,12 @@ InputKind FrontendOptions::getInputKindForExtension(StringRef Extension) {
.Cases("mm", "M", Language::ObjCXX)
.Case("mii", InputKind(Language::ObjCXX).getPreprocessed())
.Cases("C", "cc", "cp", Language::CXX)
- .Cases("cpp", "CPP", "c++", "cxx", "hpp", Language::CXX)
+ .Cases("cpp", "CPP", "c++", "cxx", "hpp", "hxx", Language::CXX)
.Case("cppm", Language::CXX)
.Case("iim", InputKind(Language::CXX).getPreprocessed())
.Case("cl", Language::OpenCL)
.Case("cu", Language::CUDA)
+ .Case("hip", Language::HIP)
.Cases("ll", "bc", Language::LLVM_IR)
.Default(Language::Unknown);
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/HeaderIncludeGen.cpp b/contrib/llvm-project/clang/lib/Frontend/HeaderIncludeGen.cpp
index 5f91157816b0..97fac8a26fae 100644
--- a/contrib/llvm-project/clang/lib/Frontend/HeaderIncludeGen.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/HeaderIncludeGen.cpp
@@ -127,8 +127,8 @@ void clang::AttachHeaderIncludeGen(Preprocessor &PP,
void HeaderIncludesCallback::FileChanged(SourceLocation Loc,
FileChangeReason Reason,
- SrcMgr::CharacteristicKind NewFileType,
- FileID PrevFID) {
+ SrcMgr::CharacteristicKind NewFileType,
+ FileID PrevFID) {
// Unless we are exiting a #include, make sure to skip ahead to the line the
// #include directive was at.
PresumedLoc UserLoc = SM.getPresumedLoc(Loc);
@@ -167,6 +167,9 @@ void HeaderIncludesCallback::FileChanged(SourceLocation Loc,
else if (!DepOpts.ShowIncludesPretendHeader.empty())
++IncludeDepth; // Pretend inclusion by ShowIncludesPretendHeader.
+ if (!DepOpts.IncludeSystemHeaders && isSystem(NewFileType))
+ ShowHeader = false;
+
// Dump the header include information we are past the predefines buffer or
// are showing all headers and this isn't the magic implicit <command line>
// header.
diff --git a/contrib/llvm-project/clang/lib/Frontend/InitHeaderSearch.cpp b/contrib/llvm-project/clang/lib/Frontend/InitHeaderSearch.cpp
index 5d877ee9c0d7..16f1f1670e8d 100644
--- a/contrib/llvm-project/clang/lib/Frontend/InitHeaderSearch.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/InitHeaderSearch.cpp
@@ -47,11 +47,9 @@ class InitHeaderSearch {
bool HasSysroot;
public:
-
InitHeaderSearch(HeaderSearch &HS, bool verbose, StringRef sysroot)
- : Headers(HS), Verbose(verbose), IncludeSysroot(sysroot),
- HasSysroot(!(sysroot.empty() || sysroot == "/")) {
- }
+ : Headers(HS), Verbose(verbose), IncludeSysroot(std::string(sysroot)),
+ HasSysroot(!(sysroot.empty() || sysroot == "/")) {}
/// AddPath - Add the specified path to the specified group list, prefixing
/// the sysroot if used.
@@ -67,7 +65,7 @@ public:
/// AddSystemHeaderPrefix - Add the specified prefix to the system header
/// prefix list.
void AddSystemHeaderPrefix(StringRef Prefix, bool IsSystemHeader) {
- SystemHeaderPrefixes.emplace_back(Prefix, IsSystemHeader);
+ SystemHeaderPrefixes.emplace_back(std::string(Prefix), IsSystemHeader);
}
/// AddGnuCPlusPlusIncludePaths - Add the necessary paths to support a gnu
@@ -355,7 +353,7 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
// files is <SDK_DIR>/host_tools/lib/clang
SmallString<128> P = StringRef(HSOpts.ResourceDir);
llvm::sys::path::append(P, "../../..");
- BaseSDKPath = P.str();
+ BaseSDKPath = std::string(P.str());
}
}
AddPath(BaseSDKPath + "/target/include", System, false);
@@ -383,6 +381,7 @@ void InitHeaderSearch::AddDefaultCPlusPlusIncludePaths(
case llvm::Triple::Linux:
case llvm::Triple::Hurd:
case llvm::Triple::Solaris:
+ case llvm::Triple::AIX:
llvm_unreachable("Include management is handled in the driver.");
break;
case llvm::Triple::Win32:
@@ -426,6 +425,7 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
case llvm::Triple::Hurd:
case llvm::Triple::Solaris:
case llvm::Triple::WASI:
+ case llvm::Triple::AIX:
return;
case llvm::Triple::Win32:
@@ -435,8 +435,7 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
break;
case llvm::Triple::UnknownOS:
- if (triple.getArch() == llvm::Triple::wasm32 ||
- triple.getArch() == llvm::Triple::wasm64)
+ if (triple.isWasm())
return;
break;
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp b/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
index c273cb96d9b9..6eef1e2376f6 100644
--- a/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
@@ -80,9 +80,9 @@ static void AddImplicitIncludeMacros(MacroBuilder &Builder, StringRef File) {
static void AddImplicitIncludePCH(MacroBuilder &Builder, Preprocessor &PP,
const PCHContainerReader &PCHContainerRdr,
StringRef ImplicitIncludePCH) {
- std::string OriginalFile =
- ASTReader::getOriginalSourceFile(ImplicitIncludePCH, PP.getFileManager(),
- PCHContainerRdr, PP.getDiagnostics());
+ std::string OriginalFile = ASTReader::getOriginalSourceFile(
+ std::string(ImplicitIncludePCH), PP.getFileManager(), PCHContainerRdr,
+ PP.getDiagnostics());
if (OriginalFile.empty())
return;
@@ -377,7 +377,7 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
} else {
// -- __cplusplus
// [C++20] The integer literal 202002L.
- if (LangOpts.CPlusPlus2a)
+ if (LangOpts.CPlusPlus20)
Builder.defineMacro("__cplusplus", "202002L");
// [C++17] The integer literal 201703L.
else if (LangOpts.CPlusPlus17)
@@ -460,6 +460,13 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
if (LangOpts.FastRelaxedMath)
Builder.defineMacro("__FAST_RELAXED_MATH__");
}
+
+ if (LangOpts.SYCL) {
+ // SYCL Version is set to a value when building SYCL applications
+ if (LangOpts.SYCLVersion == 2017)
+ Builder.defineMacro("CL_SYCL_LANGUAGE_VERSION", "121");
+ }
+
// Not "standard" per se, but available even with the -undef flag.
if (LangOpts.AsmPreprocessor)
Builder.defineMacro("__ASSEMBLER__");
@@ -491,7 +498,7 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_user_defined_literals", "200809L");
Builder.defineMacro("__cpp_lambdas", "200907L");
Builder.defineMacro("__cpp_constexpr",
- LangOpts.CPlusPlus2a ? "201907L" :
+ LangOpts.CPlusPlus20 ? "201907L" :
LangOpts.CPlusPlus17 ? "201603L" :
LangOpts.CPlusPlus14 ? "201304L" : "200704");
Builder.defineMacro("__cpp_constexpr_in_decltype", "201711L");
@@ -518,9 +525,9 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_binary_literals", "201304L");
Builder.defineMacro("__cpp_digit_separators", "201309L");
Builder.defineMacro("__cpp_init_captures",
- LangOpts.CPlusPlus2a ? "201803L" : "201304L");
+ LangOpts.CPlusPlus20 ? "201803L" : "201304L");
Builder.defineMacro("__cpp_generic_lambdas",
- LangOpts.CPlusPlus2a ? "201707L" : "201304L");
+ LangOpts.CPlusPlus20 ? "201707L" : "201304L");
Builder.defineMacro("__cpp_decltype_auto", "201304L");
Builder.defineMacro("__cpp_return_type_deduction", "201304L");
Builder.defineMacro("__cpp_aggregate_nsdmi", "201304L");
@@ -556,7 +563,7 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_template_template_args", "201611L");
// C++20 features.
- if (LangOpts.CPlusPlus2a) {
+ if (LangOpts.CPlusPlus20) {
//Builder.defineMacro("__cpp_aggregate_paren_init", "201902L");
Builder.defineMacro("__cpp_concepts", "201907L");
Builder.defineMacro("__cpp_conditional_explicit", "201806L");
@@ -1069,12 +1076,12 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
case 40:
Builder.defineMacro("_OPENMP", "201307");
break;
- case 50:
- Builder.defineMacro("_OPENMP", "201811");
+ case 45:
+ Builder.defineMacro("_OPENMP", "201511");
break;
default:
- // Default version is OpenMP 4.5
- Builder.defineMacro("_OPENMP", "201511");
+ // Default version is OpenMP 5.0
+ Builder.defineMacro("_OPENMP", "201811");
break;
}
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp b/contrib/llvm-project/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp
index 7241081d6cc0..b7c1e693413b 100644
--- a/contrib/llvm-project/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp
@@ -8,6 +8,7 @@
#include "clang/AST/Mangle.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendActions.h"
#include "clang/Sema/TemplateInstCallback.h"
@@ -289,7 +290,7 @@ public:
const ASTContext &context, StringRef Format,
raw_ostream &OS) -> void {
OS << "--- !" << Format << "\n";
- OS << "IfsVersion: 1.0\n";
+ OS << "IfsVersion: 2.0\n";
OS << "Triple: " << T.str() << "\n";
OS << "ObjectFileFormat: "
<< "ELF"
@@ -298,11 +299,11 @@ public:
for (const auto &E : Symbols) {
const MangledSymbol &Symbol = E.second;
for (auto Name : Symbol.Names) {
- OS << " \""
+ OS << " - { Name: \""
<< (Symbol.ParentName.empty() || Instance.getLangOpts().CPlusPlus
? ""
: (Symbol.ParentName + "."))
- << Name << "\" : { Type: ";
+ << Name << "\", Type: ";
switch (Symbol.Type) {
default:
llvm_unreachable(
@@ -329,15 +330,15 @@ public:
OS.flush();
};
- assert(Format == "experimental-ifs-v1" && "Unexpected IFS Format.");
+ assert(Format == "experimental-ifs-v2" && "Unexpected IFS Format.");
writeIfsV1(Instance.getTarget().getTriple(), Symbols, context, Format, *OS);
}
};
} // namespace
std::unique_ptr<ASTConsumer>
-GenerateInterfaceIfsExpV1Action::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+GenerateInterfaceStubsAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
return std::make_unique<InterfaceStubFunctionsConsumer>(
- CI, InFile, "experimental-ifs-v1");
+ CI, InFile, "experimental-ifs-v2");
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/LogDiagnosticPrinter.cpp b/contrib/llvm-project/clang/lib/Frontend/LogDiagnosticPrinter.cpp
index 4bac17553999..df8b23691a7d 100644
--- a/contrib/llvm-project/clang/lib/Frontend/LogDiagnosticPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/LogDiagnosticPrinter.cpp
@@ -120,7 +120,7 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
if (FID.isValid()) {
const FileEntry *FE = SM.getFileEntryForID(FID);
if (FE && FE->isValid())
- MainFilename = FE->getName();
+ MainFilename = std::string(FE->getName());
}
}
@@ -129,12 +129,13 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
DE.DiagnosticID = Info.getID();
DE.DiagnosticLevel = Level;
- DE.WarningOption = DiagnosticIDs::getWarningOptionForDiag(DE.DiagnosticID);
+ DE.WarningOption =
+ std::string(DiagnosticIDs::getWarningOptionForDiag(DE.DiagnosticID));
// Format the message.
SmallString<100> MessageStr;
Info.FormatDiagnostic(MessageStr);
- DE.Message = MessageStr.str();
+ DE.Message = std::string(MessageStr.str());
// Set the location information.
DE.Filename = "";
@@ -149,7 +150,7 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
if (FID.isValid()) {
const FileEntry *FE = SM.getFileEntryForID(FID);
if (FE && FE->isValid())
- DE.Filename = FE->getName();
+ DE.Filename = std::string(FE->getName());
}
} else {
DE.Filename = PLoc.getFilename();
diff --git a/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp b/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp
index fd22433d31bd..b54eb97d6c47 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp
@@ -170,7 +170,7 @@ bool ModuleDependencyCollector::getRealPath(StringRef SrcPath,
if (DirWithSymLink == SymLinkMap.end()) {
if (llvm::sys::fs::real_path(Dir, RealPath))
return false;
- SymLinkMap[Dir] = RealPath.str();
+ SymLinkMap[Dir] = std::string(RealPath.str());
} else {
RealPath = DirWithSymLink->second;
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp b/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp
index 0e5a8e504dc5..6cdfc595dcae 100644
--- a/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp
@@ -12,21 +12,26 @@
#include "clang/Frontend/PrecompiledPreamble.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangStandard.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/FrontendActions.h"
#include "clang/Frontend/FrontendOptions.h"
+#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Serialization/ASTWriter.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/VirtualFileSystem.h"
#include <limits>
@@ -73,6 +78,68 @@ public:
bool needSystemDependencies() override { return true; }
};
+// Collects files whose existence would invalidate the preamble.
+// Collecting *all* of these would make validating it too slow though, so we
+// just find all the candidates for 'file not found' diagnostics.
+//
+// A caveat that may be significant for generated files: we'll omit files under
+// search path entries whose roots don't exist when the preamble is built.
+// These are pruned by InitHeaderSearch and so we don't see the search path.
+// It would be nice to include them but we don't want to duplicate all the rest
+// of the InitHeaderSearch logic to reconstruct them.
+class MissingFileCollector : public PPCallbacks {
+ llvm::StringSet<> &Out;
+ const HeaderSearch &Search;
+ const SourceManager &SM;
+
+public:
+ MissingFileCollector(llvm::StringSet<> &Out, const HeaderSearch &Search,
+ const SourceManager &SM)
+ : Out(Out), Search(Search), SM(SM) {}
+
+ void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
+ StringRef FileName, bool IsAngled,
+ CharSourceRange FilenameRange, const FileEntry *File,
+ StringRef SearchPath, StringRef RelativePath,
+ const Module *Imported,
+ SrcMgr::CharacteristicKind FileType) override {
+ // File is null if it wasn't found.
+ // (We have some false negatives if PP recovered e.g. <foo> -> "foo")
+ if (File != nullptr)
+ return;
+
+ // If it's a rare absolute include, we know the full path already.
+ if (llvm::sys::path::is_absolute(FileName)) {
+ Out.insert(FileName);
+ return;
+ }
+
+ // Reconstruct the filenames that would satisfy this directive...
+ llvm::SmallString<256> Buf;
+ auto NotFoundRelativeTo = [&](const DirectoryEntry *DE) {
+ Buf = DE->getName();
+ llvm::sys::path::append(Buf, FileName);
+ llvm::sys::path::remove_dots(Buf, /*remove_dot_dot=*/true);
+ Out.insert(Buf);
+ };
+ // ...relative to the including file.
+ if (!IsAngled) {
+ if (const FileEntry *IncludingFile =
+ SM.getFileEntryForID(SM.getFileID(IncludeTok.getLocation())))
+ if (IncludingFile->getDir())
+ NotFoundRelativeTo(IncludingFile->getDir());
+ }
+ // ...relative to the search paths.
+ for (const auto &Dir : llvm::make_range(
+ IsAngled ? Search.angled_dir_begin() : Search.search_dir_begin(),
+ Search.search_dir_end())) {
+ // No support for frameworks or header maps yet.
+ if (Dir.isNormalDir())
+ NotFoundRelativeTo(Dir.getDir());
+ }
+ }
+};
+
/// Keeps a track of files to be deleted in destructor.
class TemporaryFiles {
public:
@@ -188,6 +255,10 @@ public:
Action.setEmittedPreamblePCH(getWriter());
}
+ bool shouldSkipFunctionBody(Decl *D) override {
+ return Action.Callbacks.shouldSkipFunctionBody(D);
+ }
+
private:
PrecompilePreambleAction &Action;
std::unique_ptr<raw_ostream> Out;
@@ -227,7 +298,7 @@ template <class T> bool moveOnNoError(llvm::ErrorOr<T> Val, T &Output) {
} // namespace
PreambleBounds clang::ComputePreambleBounds(const LangOptions &LangOpts,
- llvm::MemoryBuffer *Buffer,
+ const llvm::MemoryBuffer *Buffer,
unsigned MaxLines) {
return Lexer::ComputePreamble(Buffer->getBuffer(), LangOpts, MaxLines);
}
@@ -269,8 +340,9 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
// Tell the compiler invocation to generate a temporary precompiled header.
FrontendOpts.ProgramAction = frontend::GeneratePCH;
- FrontendOpts.OutputFile = StoreInMemory ? getInMemoryPreamblePath()
- : Storage.asFile().getFilePath();
+ FrontendOpts.OutputFile =
+ std::string(StoreInMemory ? getInMemoryPreamblePath()
+ : Storage.asFile().getFilePath());
PreprocessorOpts.PrecompiledPreambleBytes.first = 0;
PreprocessorOpts.PrecompiledPreambleBytes.second = false;
// Inform preprocessor to record conditional stack when building the preamble.
@@ -351,6 +423,11 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
Clang->getPreprocessor().addPPCallbacks(std::move(DelegatedPPCallbacks));
if (auto CommentHandler = Callbacks.getCommentHandler())
Clang->getPreprocessor().addCommentHandler(CommentHandler);
+ llvm::StringSet<> MissingFiles;
+ Clang->getPreprocessor().addPPCallbacks(
+ std::make_unique<MissingFileCollector>(
+ MissingFiles, Clang->getPreprocessor().getHeaderSearchInfo(),
+ Clang->getSourceManager()));
if (llvm::Error Err = Act->Execute())
return errorToErrorCode(std::move(Err));
@@ -385,9 +462,9 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
}
}
- return PrecompiledPreamble(std::move(Storage), std::move(PreambleBytes),
- PreambleEndsAtStartOfLine,
- std::move(FilesInPreamble));
+ return PrecompiledPreamble(
+ std::move(Storage), std::move(PreambleBytes), PreambleEndsAtStartOfLine,
+ std::move(FilesInPreamble), std::move(MissingFiles));
}
PreambleBounds PrecompiledPreamble::getBounds() const {
@@ -444,6 +521,7 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
// First, make a record of those files that have been overridden via
// remapping or unsaved_files.
std::map<llvm::sys::fs::UniqueID, PreambleFileHash> OverriddenFiles;
+ llvm::StringSet<> OverriddenAbsPaths; // Either by buffers or files.
for (const auto &R : PreprocessorOpts.RemappedFiles) {
llvm::vfs::Status Status;
if (!moveOnNoError(VFS->status(R.second), Status)) {
@@ -451,6 +529,10 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
// horrible happened.
return false;
}
+ // If a mapped file was previously missing, then it has changed.
+ llvm::SmallString<128> MappedPath(R.first);
+ if (!VFS->makeAbsolute(MappedPath))
+ OverriddenAbsPaths.insert(MappedPath);
OverriddenFiles[Status.getUniqueID()] = PreambleFileHash::createForFile(
Status.getSize(), llvm::sys::toTimeT(Status.getLastModificationTime()));
@@ -466,6 +548,10 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
OverriddenFiles[Status.getUniqueID()] = PreambleHash;
else
OverridenFileBuffers[RB.first] = PreambleHash;
+
+ llvm::SmallString<128> MappedPath(RB.first);
+ if (!VFS->makeAbsolute(MappedPath))
+ OverriddenAbsPaths.insert(MappedPath);
}
// Check whether anything has changed.
@@ -503,6 +589,17 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
F.second.ModTime)
return false;
}
+ for (const auto &F : MissingFiles) {
+ // A missing file may be "provided" by an override buffer or file.
+ if (OverriddenAbsPaths.count(F.getKey()))
+ return false;
+ // If a file previously recorded as missing exists as a regular file, then
+ // consider the preamble out-of-date.
+ if (auto Status = VFS->status(F.getKey())) {
+ if (Status->isRegularFile())
+ return false;
+ }
+ }
return true;
}
@@ -523,8 +620,10 @@ void PrecompiledPreamble::OverridePreamble(
PrecompiledPreamble::PrecompiledPreamble(
PCHStorage Storage, std::vector<char> PreambleBytes,
bool PreambleEndsAtStartOfLine,
- llvm::StringMap<PreambleFileHash> FilesInPreamble)
+ llvm::StringMap<PreambleFileHash> FilesInPreamble,
+ llvm::StringSet<> MissingFiles)
: Storage(std::move(Storage)), FilesInPreamble(std::move(FilesInPreamble)),
+ MissingFiles(std::move(MissingFiles)),
PreambleBytes(std::move(PreambleBytes)),
PreambleEndsAtStartOfLine(PreambleEndsAtStartOfLine) {
assert(this->Storage.getKind() != PCHStorage::Kind::Empty);
@@ -548,7 +647,7 @@ PrecompiledPreamble::TempPCHFile::CreateNewPreamblePCHFile() {
return EC;
// We only needed to make sure the file exists, close the file right away.
llvm::sys::Process::SafelyCloseFileDescriptor(FD);
- return TempPCHFile(std::move(File).str());
+ return TempPCHFile(std::string(std::move(File).str()));
}
PrecompiledPreamble::TempPCHFile::TempPCHFile(std::string FilePath)
@@ -715,7 +814,7 @@ void PrecompiledPreamble::setupPreambleStorage(
IntrusiveRefCntPtr<llvm::vfs::FileSystem> &VFS) {
if (Storage.getKind() == PCHStorage::Kind::TempFile) {
const TempPCHFile &PCHFile = Storage.asFile();
- PreprocessorOpts.ImplicitPCHInclude = PCHFile.getFilePath();
+ PreprocessorOpts.ImplicitPCHInclude = std::string(PCHFile.getFilePath());
// Make sure we can access the PCH file even if we're using a VFS
IntrusiveRefCntPtr<llvm::vfs::FileSystem> RealFS =
@@ -739,7 +838,7 @@ void PrecompiledPreamble::setupPreambleStorage(
// For in-memory preamble, we have to provide a VFS overlay that makes it
// accessible.
StringRef PCHPath = getInMemoryPreamblePath();
- PreprocessorOpts.ImplicitPCHInclude = PCHPath;
+ PreprocessorOpts.ImplicitPCHInclude = std::string(PCHPath);
auto Buf = llvm::MemoryBuffer::getMemBuffer(Storage.asMemory().Data);
VFS = createVFSOverlayForPreamblePCH(PCHPath, std::move(Buf), VFS);
diff --git a/contrib/llvm-project/clang/lib/Frontend/Rewrite/FixItRewriter.cpp b/contrib/llvm-project/clang/lib/Frontend/Rewrite/FixItRewriter.cpp
index 0217b3385a51..4fe64b96cb15 100644
--- a/contrib/llvm-project/clang/lib/Frontend/Rewrite/FixItRewriter.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/Rewrite/FixItRewriter.cpp
@@ -95,7 +95,8 @@ bool FixItRewriter::WriteFixedFiles(
for (iterator I = buffer_begin(), E = buffer_end(); I != E; ++I) {
const FileEntry *Entry = Rewrite.getSourceMgr().getFileEntryForID(I->first);
int fd;
- std::string Filename = FixItOpts->RewriteFilename(Entry->getName(), fd);
+ std::string Filename =
+ FixItOpts->RewriteFilename(std::string(Entry->getName()), fd);
std::error_code EC;
std::unique_ptr<llvm::raw_fd_ostream> OS;
if (fd != -1) {
@@ -113,7 +114,8 @@ bool FixItRewriter::WriteFixedFiles(
OS->flush();
if (RewrittenFiles)
- RewrittenFiles->push_back(std::make_pair(Entry->getName(), Filename));
+ RewrittenFiles->push_back(
+ std::make_pair(std::string(Entry->getName()), Filename));
}
return false;
diff --git a/contrib/llvm-project/clang/lib/Frontend/Rewrite/FrontendActions.cpp b/contrib/llvm-project/clang/lib/Frontend/Rewrite/FrontendActions.cpp
index aaffbde3309b..5351ff0593ed 100644
--- a/contrib/llvm-project/clang/lib/Frontend/Rewrite/FrontendActions.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/Rewrite/FrontendActions.cpp
@@ -77,7 +77,7 @@ public:
SmallString<128> Path(Filename);
llvm::sys::path::replace_extension(Path,
NewSuffix + llvm::sys::path::extension(Path));
- return Path.str();
+ return std::string(Path.str());
}
};
@@ -88,7 +88,7 @@ public:
llvm::sys::fs::createTemporaryFile(llvm::sys::path::filename(Filename),
llvm::sys::path::extension(Filename).drop_front(), fd,
Path);
- return Path.str();
+ return std::string(Path.str());
}
};
} // end anonymous namespace
@@ -166,11 +166,11 @@ RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
CI.createDefaultOutputFile(false, InFile, "cpp")) {
if (CI.getLangOpts().ObjCRuntime.isNonFragile())
return CreateModernObjCRewriter(
- InFile, std::move(OS), CI.getDiagnostics(), CI.getLangOpts(),
- CI.getDiagnosticOpts().NoRewriteMacros,
+ std::string(InFile), std::move(OS), CI.getDiagnostics(),
+ CI.getLangOpts(), CI.getDiagnosticOpts().NoRewriteMacros,
(CI.getCodeGenOpts().getDebugInfo() != codegenoptions::NoDebugInfo));
- return CreateObjCRewriter(InFile, std::move(OS), CI.getDiagnostics(),
- CI.getLangOpts(),
+ return CreateObjCRewriter(std::string(InFile), std::move(OS),
+ CI.getDiagnostics(), CI.getLangOpts(),
CI.getDiagnosticOpts().NoRewriteMacros);
}
return nullptr;
diff --git a/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
index 831f95e8c6be..e122b10e76d3 100644
--- a/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
@@ -860,7 +860,7 @@ RewriteModernObjC::getIvarAccessString(ObjCIvarDecl *D) {
// ivar in class extensions requires special treatment.
if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CDecl))
CDecl = CatDecl->getClassInterface();
- std::string RecName = CDecl->getName();
+ std::string RecName = std::string(CDecl->getName());
RecName += "_IMPL";
RecordDecl *RD =
RecordDecl::Create(*Context, TTK_Struct, TUDecl, SourceLocation(),
@@ -941,9 +941,10 @@ void RewriteModernObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
unsigned Attributes = PD->getPropertyAttributes();
if (mustSynthesizeSetterGetterMethod(IMD, PD, true /*getter*/)) {
- bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
- (Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_copy));
+ bool GenGetProperty =
+ !(Attributes & ObjCPropertyAttribute::kind_nonatomic) &&
+ (Attributes & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_copy));
std::string Getr;
if (GenGetProperty && !objcGetPropertyDefined) {
objcGetPropertyDefined = true;
@@ -1002,8 +1003,8 @@ void RewriteModernObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
// Generate the 'setter' function.
std::string Setr;
- bool GenSetProperty = Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_copy);
+ bool GenSetProperty = Attributes & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_copy);
if (GenSetProperty && !objcSetPropertyDefined) {
objcSetPropertyDefined = true;
// FIXME. Is this attribute correct in all cases?
@@ -1022,11 +1023,11 @@ void RewriteModernObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
Setr += ", (id)";
Setr += PD->getName();
Setr += ", ";
- if (Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ if (Attributes & ObjCPropertyAttribute::kind_nonatomic)
Setr += "0, ";
else
Setr += "1, ";
- if (Attributes & ObjCPropertyDecl::OBJC_PR_copy)
+ if (Attributes & ObjCPropertyAttribute::kind_copy)
Setr += "1)";
else
Setr += "0)";
@@ -2586,9 +2587,10 @@ Stmt *RewriteModernObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
strType, nullptr, SC_Static);
DeclRefExpr *DRE = new (Context)
DeclRefExpr(*Context, NewVD, false, strType, VK_LValue, SourceLocation());
- Expr *Unop = new (Context)
- UnaryOperator(DRE, UO_AddrOf, Context->getPointerType(DRE->getType()),
- VK_RValue, OK_Ordinary, SourceLocation(), false);
+ Expr *Unop = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), DRE, UO_AddrOf,
+ Context->getPointerType(DRE->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
// cast to NSConstantString *
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
CK_CPointerToObjCPointerCast, Unop);
@@ -2688,7 +2690,7 @@ Stmt *RewriteModernObjC::RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp) {
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
- const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ auto *FT = msgSendType->castAs<FunctionType>();
CallExpr *CE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
VK_RValue, EndLoc);
ReplaceStmt(Exp, CE);
@@ -3282,10 +3284,10 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// we need the cast below. For example:
// (struct __rw_objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
//
- SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()),
- VK_RValue, OK_Ordinary,
- SourceLocation(), false);
+ SuperRep = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
CK_BitCast, SuperRep);
@@ -3300,10 +3302,10 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
superType, VK_LValue,
ILE, false);
// struct __rw_objc_super *
- SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()),
- VK_RValue, OK_Ordinary,
- SourceLocation(), false);
+ SuperRep = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
}
MsgExprs.push_back(SuperRep);
break;
@@ -3377,10 +3379,10 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// we need the cast below. For example:
// (struct __rw_objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
//
- SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()),
- VK_RValue, OK_Ordinary,
- SourceLocation(), false);
+ SuperRep = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
CK_BitCast, SuperRep);
@@ -4442,7 +4444,7 @@ void RewriteModernObjC::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) {
static void BuildUniqueMethodName(std::string &Name,
ObjCMethodDecl *MD) {
ObjCInterfaceDecl *IFace = MD->getClassInterface();
- Name = IFace->getName();
+ Name = std::string(IFace->getName());
Name += "__" + MD->getSelector().getAsString();
// Convert colons to underscores.
std::string::size_type loc = 0;
@@ -4704,9 +4706,9 @@ Stmt *RewriteModernObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) {
if (VarDecl *Var = dyn_cast<VarDecl>(VD))
if (!ImportedLocalExternalDecls.count(Var))
return DRE;
- Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(),
- VK_LValue, OK_Ordinary,
- DRE->getLocation(), false);
+ Expr *Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), DRE, UO_Deref, DRE->getType(),
+ VK_LValue, OK_Ordinary, DRE->getLocation(), false, FPOptionsOverride());
// Need parens to enforce precedence.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
Exp);
@@ -5292,11 +5294,12 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
VarDecl *NewVD = VarDecl::Create(
*Context, TUDecl, SourceLocation(), SourceLocation(),
&Context->Idents.get(DescData), Context->VoidPtrTy, nullptr, SC_Static);
- UnaryOperator *DescRefExpr = new (Context) UnaryOperator(
+ UnaryOperator *DescRefExpr = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context),
new (Context) DeclRefExpr(*Context, NewVD, false, Context->VoidPtrTy,
VK_LValue, SourceLocation()),
UO_AddrOf, Context->getPointerType(Context->VoidPtrTy), VK_RValue,
- OK_Ordinary, SourceLocation(), false);
+ OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
InitExprs.push_back(DescRefExpr);
// Add initializers for any closure decl refs.
@@ -5313,9 +5316,9 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
- Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
- OK_Ordinary, SourceLocation(),
- false);
+ Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
}
} else if (isTopLevelBlockPointerType((*I)->getType())) {
FD = SynthBlockInitFunctionDecl((*I)->getName());
@@ -5330,9 +5333,9 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
- Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
- OK_Ordinary, SourceLocation(),
- false);
+ Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
}
}
@@ -5370,10 +5373,10 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
// captured nested byref variable has its address passed. Do not take
// its address again.
if (!isNestedCapturedVar)
- Exp = new (Context) UnaryOperator(Exp, UO_AddrOf,
- Context->getPointerType(Exp->getType()),
- VK_RValue, OK_Ordinary, SourceLocation(),
- false);
+ Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), Exp, UO_AddrOf,
+ Context->getPointerType(Exp->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp);
InitExprs.push_back(Exp);
}
@@ -5397,9 +5400,10 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
NewRep = DRE;
}
- NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf,
- Context->getPointerType(NewRep->getType()),
- VK_RValue, OK_Ordinary, SourceLocation(), false);
+ NewRep = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), NewRep, UO_AddrOf,
+ Context->getPointerType(NewRep->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
NewRep);
// Put Paren around the call.
@@ -7484,10 +7488,10 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
DeclRefExpr *DRE = new (Context)
DeclRefExpr(*Context, NewVD, false, Context->UnsignedLongTy,
VK_LValue, SourceLocation());
- BinaryOperator *addExpr =
- new (Context) BinaryOperator(castExpr, DRE, BO_Add,
- Context->getPointerType(Context->CharTy),
- VK_RValue, OK_Ordinary, SourceLocation(), FPOptions());
+ BinaryOperator *addExpr = BinaryOperator::Create(
+ *Context, castExpr, DRE, BO_Add,
+ Context->getPointerType(Context->CharTy), VK_RValue, OK_Ordinary,
+ SourceLocation(), FPOptionsOverride());
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(),
SourceLocation(),
@@ -7501,12 +7505,11 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
RD = RD->getDefinition();
if (RD && !RD->getDeclName().getAsIdentifierInfo()) {
// decltype(((Foo_IMPL*)0)->bar) *
- ObjCContainerDecl *CDecl =
- dyn_cast<ObjCContainerDecl>(D->getDeclContext());
+ auto *CDecl = cast<ObjCContainerDecl>(D->getDeclContext());
// ivar in class extensions requires special treatment.
if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CDecl))
CDecl = CatDecl->getClassInterface();
- std::string RecName = CDecl->getName();
+ std::string RecName = std::string(CDecl->getName());
RecName += "_IMPL";
RecordDecl *RD = RecordDecl::Create(
*Context, TTK_Struct, TUDecl, SourceLocation(), SourceLocation(),
@@ -7539,10 +7542,9 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
CK_BitCast,
PE);
-
- Expr *Exp = new (Context) UnaryOperator(castExpr, UO_Deref, IvarT,
- VK_LValue, OK_Ordinary,
- SourceLocation(), false);
+ Expr *Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), castExpr, UO_Deref, IvarT,
+ VK_LValue, OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
PE = new (Context) ParenExpr(OldRange.getBegin(),
OldRange.getEnd(),
Exp);
diff --git a/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteObjC.cpp b/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
index 0cb7592b9982..3f320dc57aa6 100644
--- a/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
@@ -789,9 +789,10 @@ void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
unsigned Attributes = PD->getPropertyAttributes();
if (PID->getGetterMethodDecl() && !PID->getGetterMethodDecl()->isDefined()) {
- bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
- (Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_copy));
+ bool GenGetProperty =
+ !(Attributes & ObjCPropertyAttribute::kind_nonatomic) &&
+ (Attributes & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_copy));
std::string Getr;
if (GenGetProperty && !objcGetPropertyDefined) {
objcGetPropertyDefined = true;
@@ -850,8 +851,8 @@ void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
// Generate the 'setter' function.
std::string Setr;
- bool GenSetProperty = Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_copy);
+ bool GenSetProperty = Attributes & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_copy);
if (GenSetProperty && !objcSetPropertyDefined) {
objcSetPropertyDefined = true;
// FIXME. Is this attribute correct in all cases?
@@ -870,11 +871,11 @@ void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
Setr += ", (id)";
Setr += PD->getName();
Setr += ", ";
- if (Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ if (Attributes & ObjCPropertyAttribute::kind_nonatomic)
Setr += "0, ";
else
Setr += "1, ";
- if (Attributes & ObjCPropertyDecl::OBJC_PR_copy)
+ if (Attributes & ObjCPropertyAttribute::kind_copy)
Setr += "1)";
else
Setr += "0)";
@@ -2513,9 +2514,10 @@ Stmt *RewriteObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
strType, nullptr, SC_Static);
DeclRefExpr *DRE = new (Context)
DeclRefExpr(*Context, NewVD, false, strType, VK_LValue, SourceLocation());
- Expr *Unop = new (Context)
- UnaryOperator(DRE, UO_AddrOf, Context->getPointerType(DRE->getType()),
- VK_RValue, OK_Ordinary, SourceLocation(), false);
+ Expr *Unop = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), DRE, UO_AddrOf,
+ Context->getPointerType(DRE->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
// cast to NSConstantString *
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
CK_CPointerToObjCPointerCast, Unop);
@@ -2713,10 +2715,10 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// we need the cast below. For example:
// (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
//
- SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()),
- VK_RValue, OK_Ordinary,
- SourceLocation(), false);
+ SuperRep = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
CK_BitCast, SuperRep);
@@ -2731,10 +2733,10 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
superType, VK_LValue,
ILE, false);
// struct objc_super *
- SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()),
- VK_RValue, OK_Ordinary,
- SourceLocation(), false);
+ SuperRep = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
}
MsgExprs.push_back(SuperRep);
break;
@@ -2808,10 +2810,10 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// we need the cast below. For example:
// (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
//
- SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()),
- VK_RValue, OK_Ordinary,
- SourceLocation(), false);
+ SuperRep = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
CK_BitCast, SuperRep);
@@ -2995,10 +2997,9 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
llvm::APInt(IntSize, 8),
Context->IntTy,
SourceLocation());
- BinaryOperator *lessThanExpr =
- new (Context) BinaryOperator(sizeofExpr, limit, BO_LE, Context->IntTy,
- VK_RValue, OK_Ordinary, SourceLocation(),
- FPOptions());
+ BinaryOperator *lessThanExpr = BinaryOperator::Create(
+ *Context, sizeofExpr, limit, BO_LE, Context->IntTy, VK_RValue,
+ OK_Ordinary, SourceLocation(), FPOptionsOverride());
// (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
ConditionalOperator *CondExpr =
new (Context) ConditionalOperator(lessThanExpr,
@@ -3048,9 +3049,10 @@ Stmt *RewriteObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) {
nullptr, SC_Extern);
DeclRefExpr *DRE = new (Context) DeclRefExpr(
*Context, VD, false, getProtocolType(), VK_LValue, SourceLocation());
- Expr *DerefExpr = new (Context) UnaryOperator(DRE, UO_AddrOf,
- Context->getPointerType(DRE->getType()),
- VK_RValue, OK_Ordinary, SourceLocation(), false);
+ Expr *DerefExpr = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), DRE, UO_AddrOf,
+ Context->getPointerType(DRE->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, DerefExpr->getType(),
CK_BitCast,
DerefExpr);
@@ -3631,7 +3633,7 @@ void RewriteObjC::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) {
static void BuildUniqueMethodName(std::string &Name,
ObjCMethodDecl *MD) {
ObjCInterfaceDecl *IFace = MD->getClassInterface();
- Name = IFace->getName();
+ Name = std::string(IFace->getName());
Name += "__" + MD->getSelector().getAsString();
// Convert colons to underscores.
std::string::size_type loc = 0;
@@ -3875,9 +3877,9 @@ Stmt *RewriteObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) {
if (VarDecl *Var = dyn_cast<VarDecl>(VD))
if (!ImportedLocalExternalDecls.count(Var))
return DRE;
- Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(),
- VK_LValue, OK_Ordinary,
- DRE->getLocation(), false);
+ Expr *Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), DRE, UO_Deref, DRE->getType(),
+ VK_LValue, OK_Ordinary, DRE->getLocation(), false, FPOptionsOverride());
// Need parens to enforce precedence.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
Exp);
@@ -4432,11 +4434,12 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
VarDecl *NewVD = VarDecl::Create(
*Context, TUDecl, SourceLocation(), SourceLocation(),
&Context->Idents.get(DescData), Context->VoidPtrTy, nullptr, SC_Static);
- UnaryOperator *DescRefExpr = new (Context) UnaryOperator(
+ UnaryOperator *DescRefExpr = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context),
new (Context) DeclRefExpr(*Context, NewVD, false, Context->VoidPtrTy,
VK_LValue, SourceLocation()),
UO_AddrOf, Context->getPointerType(Context->VoidPtrTy), VK_RValue,
- OK_Ordinary, SourceLocation(), false);
+ OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
InitExprs.push_back(DescRefExpr);
// Add initializers for any closure decl refs.
@@ -4453,9 +4456,9 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
- Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
- OK_Ordinary, SourceLocation(),
- false);
+ Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
}
} else if (isTopLevelBlockPointerType((*I)->getType())) {
FD = SynthBlockInitFunctionDecl((*I)->getName());
@@ -4470,9 +4473,9 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
- Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
- OK_Ordinary, SourceLocation(),
- false);
+ Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
}
}
InitExprs.push_back(Exp);
@@ -4509,9 +4512,10 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
// captured nested byref variable has its address passed. Do not take
// its address again.
if (!isNestedCapturedVar)
- Exp = new (Context) UnaryOperator(
- Exp, UO_AddrOf, Context->getPointerType(Exp->getType()), VK_RValue,
- OK_Ordinary, SourceLocation(), false);
+ Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), Exp, UO_AddrOf,
+ Context->getPointerType(Exp->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp);
InitExprs.push_back(Exp);
}
@@ -4527,9 +4531,10 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
}
NewRep = CallExpr::Create(*Context, DRE, InitExprs, FType, VK_LValue,
SourceLocation());
- NewRep = new (Context) UnaryOperator(
- NewRep, UO_AddrOf, Context->getPointerType(NewRep->getType()), VK_RValue,
- OK_Ordinary, SourceLocation(), false);
+ NewRep = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), NewRep, UO_AddrOf,
+ Context->getPointerType(NewRep->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
NewRep);
BlockDeclRefs.clear();
@@ -5819,7 +5824,8 @@ Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
// Synthesize an explicit cast to gain access to the ivar.
- std::string RecName = clsDeclared->getIdentifier()->getName();
+ std::string RecName =
+ std::string(clsDeclared->getIdentifier()->getName());
RecName += "_IMPL";
IdentifierInfo *II = &Context->Idents.get(RecName);
RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
@@ -5859,7 +5865,8 @@ Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
// Synthesize an explicit cast to gain access to the ivar.
- std::string RecName = clsDeclared->getIdentifier()->getName();
+ std::string RecName =
+ std::string(clsDeclared->getIdentifier()->getName());
RecName += "_IMPL";
IdentifierInfo *II = &Context->Idents.get(RecName);
RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
diff --git a/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp b/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
index 8042b52ddc03..462aeda6e027 100644
--- a/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -22,6 +22,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Bitstream/BitCodes.h"
#include "llvm/Bitstream/BitstreamReader.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <utility>
@@ -238,6 +239,9 @@ private:
/// generated from child processes.
bool MergeChildRecords;
+ /// Whether we've started finishing and tearing down this instance.
+ bool IsFinishing = false;
+
/// State that is shared among the various clones of this diagnostic
/// consumer.
struct SharedState {
@@ -567,6 +571,17 @@ unsigned SDiagsWriter::getEmitDiagnosticFlag(StringRef FlagName) {
void SDiagsWriter::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
const Diagnostic &Info) {
+ assert(!IsFinishing &&
+ "Received a diagnostic after we've already started teardown.");
+ if (IsFinishing) {
+ SmallString<256> diagnostic;
+ Info.FormatDiagnostic(diagnostic);
+ getMetaDiags()->Report(
+ diag::warn_fe_serialized_diag_failure_during_finalisation)
+ << diagnostic;
+ return;
+ }
+
// Enter the block for a non-note diagnostic immediately, rather than waiting
// for beginDiagnostic, in case associated notes are emitted before we get
// there.
@@ -760,6 +775,9 @@ void SDiagsWriter::RemoveOldDiagnostics() {
}
void SDiagsWriter::finish() {
+ assert(!IsFinishing);
+ IsFinishing = true;
+
// The original instance is responsible for writing the file.
if (!OriginalInstance)
return;
@@ -785,12 +803,20 @@ void SDiagsWriter::finish() {
if (EC) {
getMetaDiags()->Report(diag::warn_fe_serialized_diag_failure)
<< State->OutputFile << EC.message();
+ OS->clear_error();
return;
}
// Write the generated bitstream to "Out".
OS->write((char *)&State->Buffer.front(), State->Buffer.size());
OS->flush();
+
+ assert(!OS->has_error());
+ if (OS->has_error()) {
+ getMetaDiags()->Report(diag::warn_fe_serialized_diag_failure)
+ << State->OutputFile << OS->error().message();
+ OS->clear_error();
+ }
}
std::error_code SDiagsMerger::visitStartOfDiagnostic() {
diff --git a/contrib/llvm-project/clang/lib/Frontend/TextDiagnosticBuffer.cpp b/contrib/llvm-project/clang/lib/Frontend/TextDiagnosticBuffer.cpp
index b2497f56cbcd..90f273e65f88 100644
--- a/contrib/llvm-project/clang/lib/Frontend/TextDiagnosticBuffer.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/TextDiagnosticBuffer.cpp
@@ -32,20 +32,20 @@ void TextDiagnosticBuffer::HandleDiagnostic(DiagnosticsEngine::Level Level,
"Diagnostic not handled during diagnostic buffering!");
case DiagnosticsEngine::Note:
All.emplace_back(Level, Notes.size());
- Notes.emplace_back(Info.getLocation(), Buf.str());
+ Notes.emplace_back(Info.getLocation(), std::string(Buf.str()));
break;
case DiagnosticsEngine::Warning:
All.emplace_back(Level, Warnings.size());
- Warnings.emplace_back(Info.getLocation(), Buf.str());
+ Warnings.emplace_back(Info.getLocation(), std::string(Buf.str()));
break;
case DiagnosticsEngine::Remark:
All.emplace_back(Level, Remarks.size());
- Remarks.emplace_back(Info.getLocation(), Buf.str());
+ Remarks.emplace_back(Info.getLocation(), std::string(Buf.str()));
break;
case DiagnosticsEngine::Error:
case DiagnosticsEngine::Fatal:
All.emplace_back(Level, Errors.size());
- Errors.emplace_back(Info.getLocation(), Buf.str());
+ Errors.emplace_back(Info.getLocation(), std::string(Buf.str()));
break;
}
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp b/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
index 82c2af87706e..56e05242f7c9 100644
--- a/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -89,9 +89,10 @@ namespace {
class StandardDirective : public Directive {
public:
StandardDirective(SourceLocation DirectiveLoc, SourceLocation DiagnosticLoc,
- bool MatchAnyLine, StringRef Text, unsigned Min,
- unsigned Max)
- : Directive(DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text, Min, Max) {}
+ bool MatchAnyFileAndLine, bool MatchAnyLine, StringRef Text,
+ unsigned Min, unsigned Max)
+ : Directive(DirectiveLoc, DiagnosticLoc, MatchAnyFileAndLine,
+ MatchAnyLine, Text, Min, Max) {}
bool isValid(std::string &Error) override {
// all strings are considered valid; even empty ones
@@ -107,9 +108,10 @@ public:
class RegexDirective : public Directive {
public:
RegexDirective(SourceLocation DirectiveLoc, SourceLocation DiagnosticLoc,
- bool MatchAnyLine, StringRef Text, unsigned Min, unsigned Max,
- StringRef RegexStr)
- : Directive(DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text, Min, Max),
+ bool MatchAnyFileAndLine, bool MatchAnyLine, StringRef Text,
+ unsigned Min, unsigned Max, StringRef RegexStr)
+ : Directive(DirectiveLoc, DiagnosticLoc, MatchAnyFileAndLine,
+ MatchAnyLine, Text, Min, Max),
Regex(RegexStr) {}
bool isValid(std::string &Error) override {
@@ -294,11 +296,13 @@ struct UnattachedDirective {
// Attach the specified directive to the line of code indicated by
// \p ExpectedLoc.
void attachDirective(DiagnosticsEngine &Diags, const UnattachedDirective &UD,
- SourceLocation ExpectedLoc, bool MatchAnyLine = false) {
+ SourceLocation ExpectedLoc,
+ bool MatchAnyFileAndLine = false,
+ bool MatchAnyLine = false) {
// Construct new directive.
- std::unique_ptr<Directive> D =
- Directive::create(UD.RegexKind, UD.DirectivePos, ExpectedLoc,
- MatchAnyLine, UD.Text, UD.Min, UD.Max);
+ std::unique_ptr<Directive> D = Directive::create(
+ UD.RegexKind, UD.DirectivePos, ExpectedLoc, MatchAnyFileAndLine,
+ MatchAnyLine, UD.Text, UD.Min, UD.Max);
std::string Error;
if (!D->isValid(Error)) {
@@ -498,6 +502,7 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
// Next optional token: @
SourceLocation ExpectedLoc;
StringRef Marker;
+ bool MatchAnyFileAndLine = false;
bool MatchAnyLine = false;
if (!PH.Next("@")) {
ExpectedLoc = Pos;
@@ -526,26 +531,39 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
StringRef Filename(PH.C, PH.P-PH.C);
PH.Advance();
- // Lookup file via Preprocessor, like a #include.
- const DirectoryLookup *CurDir;
- Optional<FileEntryRef> File =
- PP->LookupFile(Pos, Filename, false, nullptr, nullptr, CurDir,
- nullptr, nullptr, nullptr, nullptr, nullptr);
- if (!File) {
- Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
- diag::err_verify_missing_file) << Filename << KindStr;
- continue;
- }
-
- const FileEntry *FE = &File->getFileEntry();
- if (SM.translateFile(FE).isInvalid())
- SM.createFileID(FE, Pos, SrcMgr::C_User);
-
- if (PH.Next(Line) && Line > 0)
- ExpectedLoc = SM.translateFileLineCol(FE, Line, 1);
- else if (PH.Next("*")) {
+ if (Filename == "*") {
+ MatchAnyFileAndLine = true;
+ if (!PH.Next("*")) {
+ Diags.Report(Pos.getLocWithOffset(PH.C - PH.Begin),
+ diag::err_verify_missing_line)
+ << "'*'";
+ continue;
+ }
MatchAnyLine = true;
- ExpectedLoc = SM.translateFileLineCol(FE, 1, 1);
+ ExpectedLoc = SourceLocation();
+ } else {
+ // Lookup file via Preprocessor, like a #include.
+ const DirectoryLookup *CurDir;
+ Optional<FileEntryRef> File =
+ PP->LookupFile(Pos, Filename, false, nullptr, nullptr, CurDir,
+ nullptr, nullptr, nullptr, nullptr, nullptr);
+ if (!File) {
+ Diags.Report(Pos.getLocWithOffset(PH.C - PH.Begin),
+ diag::err_verify_missing_file)
+ << Filename << KindStr;
+ continue;
+ }
+
+ const FileEntry *FE = &File->getFileEntry();
+ if (SM.translateFile(FE).isInvalid())
+ SM.createFileID(FE, Pos, SrcMgr::C_User);
+
+ if (PH.Next(Line) && Line > 0)
+ ExpectedLoc = SM.translateFileLineCol(FE, Line, 1);
+ else if (PH.Next("*")) {
+ MatchAnyLine = true;
+ ExpectedLoc = SM.translateFileLineCol(FE, 1, 1);
+ }
}
} else if (PH.Next("*")) {
MatchAnyLine = true;
@@ -631,7 +649,7 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
}
if (Marker.empty())
- attachDirective(Diags, D, ExpectedLoc, MatchAnyLine);
+ attachDirective(Diags, D, ExpectedLoc, MatchAnyFileAndLine, MatchAnyLine);
else
Markers.addDirective(Marker, D);
FoundDirective = true;
@@ -877,7 +895,7 @@ static unsigned PrintExpected(DiagnosticsEngine &Diags,
SmallString<256> Fmt;
llvm::raw_svector_ostream OS(Fmt);
for (const auto *D : DL) {
- if (D->DiagnosticLoc.isInvalid())
+ if (D->DiagnosticLoc.isInvalid() || D->MatchAnyFileAndLine)
OS << "\n File *";
else
OS << "\n File " << SourceMgr.getFilename(D->DiagnosticLoc);
@@ -937,7 +955,7 @@ static unsigned CheckLists(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
continue;
}
- if (!D.DiagnosticLoc.isInvalid() &&
+ if (!D.DiagnosticLoc.isInvalid() && !D.MatchAnyFileAndLine &&
!IsFromSameFile(SourceMgr, D.DiagnosticLoc, II->first))
continue;
@@ -1114,11 +1132,13 @@ void VerifyDiagnosticConsumer::CheckDiagnostics() {
std::unique_ptr<Directive> Directive::create(bool RegexKind,
SourceLocation DirectiveLoc,
SourceLocation DiagnosticLoc,
+ bool MatchAnyFileAndLine,
bool MatchAnyLine, StringRef Text,
unsigned Min, unsigned Max) {
if (!RegexKind)
return std::make_unique<StandardDirective>(DirectiveLoc, DiagnosticLoc,
- MatchAnyLine, Text, Min, Max);
+ MatchAnyFileAndLine,
+ MatchAnyLine, Text, Min, Max);
// Parse the directive into a regular expression.
std::string RegexStr;
@@ -1143,6 +1163,7 @@ std::unique_ptr<Directive> Directive::create(bool RegexKind,
}
}
- return std::make_unique<RegexDirective>(
- DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text, Min, Max, RegexStr);
+ return std::make_unique<RegexDirective>(DirectiveLoc, DiagnosticLoc,
+ MatchAnyFileAndLine, MatchAnyLine,
+ Text, Min, Max, RegexStr);
}
diff --git a/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index 9bf70b793d9b..ac64e1708da6 100644
--- a/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -23,6 +23,7 @@
#include "clang/Frontend/Utils.h"
#include "clang/FrontendTool/Utils.h"
#include "clang/Rewrite/Frontend/FrontendActions.h"
+#include "clang/StaticAnalyzer/Frontend/AnalyzerHelpFlags.h"
#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
@@ -64,8 +65,8 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
case GenerateHeaderModule:
return std::make_unique<GenerateHeaderModuleAction>();
case GeneratePCH: return std::make_unique<GeneratePCHAction>();
- case GenerateInterfaceIfsExpV1:
- return std::make_unique<GenerateInterfaceIfsExpV1Action>();
+ case GenerateInterfaceStubs:
+ return std::make_unique<GenerateInterfaceStubsAction>();
case InitOnly: return std::make_unique<InitOnlyAction>();
case ParseSyntaxOnly: return std::make_unique<SyntaxOnlyAction>();
case ModuleFileInfo: return std::make_unique<DumpModuleInfoAction>();
@@ -73,14 +74,15 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
case TemplightDump: return std::make_unique<TemplightDumpAction>();
case PluginAction: {
- for (FrontendPluginRegistry::iterator it =
- FrontendPluginRegistry::begin(), ie = FrontendPluginRegistry::end();
- it != ie; ++it) {
- if (it->getName() == CI.getFrontendOpts().ActionName) {
- std::unique_ptr<PluginASTAction> P(it->instantiate());
+ for (const FrontendPluginRegistry::entry &Plugin :
+ FrontendPluginRegistry::entries()) {
+ if (Plugin.getName() == CI.getFrontendOpts().ActionName) {
+ std::unique_ptr<PluginASTAction> P(Plugin.instantiate());
if ((P->getActionType() != PluginASTAction::ReplaceAction &&
P->getActionType() != PluginASTAction::Cmdline) ||
- !P->ParseArgs(CI, CI.getFrontendOpts().PluginArgs[it->getName()]))
+ !P->ParseArgs(
+ CI,
+ CI.getFrontendOpts().PluginArgs[std::string(Plugin.getName())]))
return nullptr;
return std::move(P);
}
@@ -202,9 +204,7 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
}
// Load any requested plugins.
- for (unsigned i = 0,
- e = Clang->getFrontendOpts().Plugins.size(); i != e; ++i) {
- const std::string &Path = Clang->getFrontendOpts().Plugins[i];
+ for (const std::string &Path : Clang->getFrontendOpts().Plugins) {
std::string Error;
if (llvm::sys::DynamicLibrary::LoadLibraryPermanently(Path.c_str(), &Error))
Clang->getDiagnostics().Report(diag::err_fe_unable_to_load_plugin)
@@ -212,13 +212,12 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
}
// Check if any of the loaded plugins replaces the main AST action
- for (FrontendPluginRegistry::iterator it = FrontendPluginRegistry::begin(),
- ie = FrontendPluginRegistry::end();
- it != ie; ++it) {
- std::unique_ptr<PluginASTAction> P(it->instantiate());
+ for (const FrontendPluginRegistry::entry &Plugin :
+ FrontendPluginRegistry::entries()) {
+ std::unique_ptr<PluginASTAction> P(Plugin.instantiate());
if (P->getActionType() == PluginASTAction::ReplaceAction) {
Clang->getFrontendOpts().ProgramAction = clang::frontend::PluginAction;
- Clang->getFrontendOpts().ActionName = it->getName();
+ Clang->getFrontendOpts().ActionName = Plugin.getName().str();
break;
}
}
@@ -241,35 +240,24 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
// These should happen AFTER plugins have been loaded!
AnalyzerOptions &AnOpts = *Clang->getAnalyzerOpts();
+
// Honor -analyzer-checker-help and -analyzer-checker-help-hidden.
if (AnOpts.ShowCheckerHelp || AnOpts.ShowCheckerHelpAlpha ||
AnOpts.ShowCheckerHelpDeveloper) {
- ento::printCheckerHelp(llvm::outs(),
- Clang->getFrontendOpts().Plugins,
- AnOpts,
- Clang->getDiagnostics(),
- Clang->getLangOpts());
+ ento::printCheckerHelp(llvm::outs(), *Clang);
return true;
}
// Honor -analyzer-checker-option-help.
if (AnOpts.ShowCheckerOptionList || AnOpts.ShowCheckerOptionAlphaList ||
AnOpts.ShowCheckerOptionDeveloperList) {
- ento::printCheckerConfigList(llvm::outs(),
- Clang->getFrontendOpts().Plugins,
- *Clang->getAnalyzerOpts(),
- Clang->getDiagnostics(),
- Clang->getLangOpts());
+ ento::printCheckerConfigList(llvm::outs(), *Clang);
return true;
}
// Honor -analyzer-list-enabled-checkers.
if (AnOpts.ShowEnabledCheckerList) {
- ento::printEnabledCheckerList(llvm::outs(),
- Clang->getFrontendOpts().Plugins,
- AnOpts,
- Clang->getDiagnostics(),
- Clang->getLangOpts());
+ ento::printEnabledCheckerList(llvm::outs(), *Clang);
return true;
}
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_cmath.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_cmath.h
index 834a2e3fd134..8ba182689a4f 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_cmath.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_cmath.h
@@ -12,7 +12,9 @@
#error "This file is for CUDA compilation only."
#endif
+#ifndef __OPENMP_NVPTX__
#include <limits>
+#endif
// CUDA lets us use various std math functions on the device side. This file
// works in concert with __clang_cuda_math_forward_declares.h to make this work.
@@ -30,32 +32,16 @@
// implementation. Declaring in the global namespace and pulling into namespace
// std covers all of the known knowns.
-#ifdef _OPENMP
-#define __DEVICE__ static __attribute__((always_inline))
+#ifdef __OPENMP_NVPTX__
+#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
#else
#define __DEVICE__ static __device__ __inline__ __attribute__((always_inline))
#endif
-// For C++ 17 we need to include noexcept attribute to be compatible
-// with the header-defined version. This may be removed once
-// variant is supported.
-#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
-#define __NOEXCEPT noexcept
-#else
-#define __NOEXCEPT
-#endif
-
-#if !(defined(_OPENMP) && defined(__cplusplus))
__DEVICE__ long long abs(long long __n) { return ::llabs(__n); }
__DEVICE__ long abs(long __n) { return ::labs(__n); }
__DEVICE__ float abs(float __x) { return ::fabsf(__x); }
__DEVICE__ double abs(double __x) { return ::fabs(__x); }
-#endif
-// TODO: remove once variat is supported.
-#if defined(_OPENMP) && defined(__cplusplus)
-__DEVICE__ const float abs(const float __x) { return ::fabsf((float)__x); }
-__DEVICE__ const double abs(const double __x) { return ::fabs((double)__x); }
-#endif
__DEVICE__ float acos(float __x) { return ::acosf(__x); }
__DEVICE__ float asin(float __x) { return ::asinf(__x); }
__DEVICE__ float atan(float __x) { return ::atanf(__x); }
@@ -64,11 +50,9 @@ __DEVICE__ float ceil(float __x) { return ::ceilf(__x); }
__DEVICE__ float cos(float __x) { return ::cosf(__x); }
__DEVICE__ float cosh(float __x) { return ::coshf(__x); }
__DEVICE__ float exp(float __x) { return ::expf(__x); }
-__DEVICE__ float fabs(float __x) __NOEXCEPT { return ::fabsf(__x); }
+__DEVICE__ float fabs(float __x) { return ::fabsf(__x); }
__DEVICE__ float floor(float __x) { return ::floorf(__x); }
__DEVICE__ float fmod(float __x, float __y) { return ::fmodf(__x, __y); }
-// TODO: remove when variant is supported
-#ifndef _OPENMP
__DEVICE__ int fpclassify(float __x) {
return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
FP_ZERO, __x);
@@ -77,14 +61,15 @@ __DEVICE__ int fpclassify(double __x) {
return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
FP_ZERO, __x);
}
-#endif
__DEVICE__ float frexp(float __arg, int *__exp) {
return ::frexpf(__arg, __exp);
}
// For inscrutable reasons, the CUDA headers define these functions for us on
-// Windows.
-#ifndef _MSC_VER
+// Windows. For OpenMP we omit these as some old system headers have
+// non-conforming `isinf(float)` and `isnan(float)` implementations that return
+// an `int`. The system versions of these functions should be fine anyway.
+#if !defined(_MSC_VER) && !defined(__OPENMP_NVPTX__)
__DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }
__DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }
__DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }
@@ -161,6 +146,8 @@ __DEVICE__ float tanh(float __x) { return ::tanhf(__x); }
// libdevice doesn't provide an implementation, and we don't want to be in the
// business of implementing tricky libm functions in this header.
+#ifndef __OPENMP_NVPTX__
+
// Now we've defined everything we promised we'd define in
// __clang_cuda_math_forward_declares.h. We need to do two additional things to
// fix up our math functions.
@@ -457,10 +444,7 @@ using ::remainderf;
using ::remquof;
using ::rintf;
using ::roundf;
-// TODO: remove once variant is supported
-#ifndef _OPENMP
using ::scalblnf;
-#endif
using ::scalbnf;
using ::sinf;
using ::sinhf;
@@ -479,7 +463,8 @@ _GLIBCXX_END_NAMESPACE_VERSION
} // namespace std
#endif
-#undef __NOEXCEPT
+#endif // __OPENMP_NVPTX__
+
#undef __DEVICE__
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_complex_builtins.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_complex_builtins.h
index 576a958b16bb..8c10ff6b461f 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_complex_builtins.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_complex_builtins.h
@@ -13,10 +13,57 @@
// This header defines __muldc3, __mulsc3, __divdc3, and __divsc3. These are
// libgcc functions that clang assumes are available when compiling c99 complex
// operations. (These implementations come from libc++, and have been modified
-// to work with CUDA.)
+// to work with CUDA and OpenMP target offloading [in C and C++ mode].)
-extern "C" inline __device__ double _Complex __muldc3(double __a, double __b,
- double __c, double __d) {
+#pragma push_macro("__DEVICE__")
+#ifdef _OPENMP
+#pragma omp declare target
+#define __DEVICE__ __attribute__((noinline, nothrow, cold, weak))
+#else
+#define __DEVICE__ __device__ inline
+#endif
+
+// To make the algorithms available for C and C++ in CUDA and OpenMP we select
+// different but equivalent function versions. TODO: For OpenMP we currently
+// select the native builtins as the overload support for templates is lacking.
+#if !defined(_OPENMP)
+#define _ISNANd std::isnan
+#define _ISNANf std::isnan
+#define _ISINFd std::isinf
+#define _ISINFf std::isinf
+#define _ISFINITEd std::isfinite
+#define _ISFINITEf std::isfinite
+#define _COPYSIGNd std::copysign
+#define _COPYSIGNf std::copysign
+#define _SCALBNd std::scalbn
+#define _SCALBNf std::scalbn
+#define _ABSd std::abs
+#define _ABSf std::abs
+#define _LOGBd std::logb
+#define _LOGBf std::logb
+#else
+#define _ISNANd __nv_isnand
+#define _ISNANf __nv_isnanf
+#define _ISINFd __nv_isinfd
+#define _ISINFf __nv_isinff
+#define _ISFINITEd __nv_isfinited
+#define _ISFINITEf __nv_finitef
+#define _COPYSIGNd __nv_copysign
+#define _COPYSIGNf __nv_copysignf
+#define _SCALBNd __nv_scalbn
+#define _SCALBNf __nv_scalbnf
+#define _ABSd __nv_fabs
+#define _ABSf __nv_fabsf
+#define _LOGBd __nv_logb
+#define _LOGBf __nv_logbf
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+__DEVICE__ double _Complex __muldc3(double __a, double __b, double __c,
+ double __d) {
double __ac = __a * __c;
double __bd = __b * __d;
double __ad = __a * __d;
@@ -24,50 +71,49 @@ extern "C" inline __device__ double _Complex __muldc3(double __a, double __b,
double _Complex z;
__real__(z) = __ac - __bd;
__imag__(z) = __ad + __bc;
- if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
+ if (_ISNANd(__real__(z)) && _ISNANd(__imag__(z))) {
int __recalc = 0;
- if (std::isinf(__a) || std::isinf(__b)) {
- __a = std::copysign(std::isinf(__a) ? 1 : 0, __a);
- __b = std::copysign(std::isinf(__b) ? 1 : 0, __b);
- if (std::isnan(__c))
- __c = std::copysign(0, __c);
- if (std::isnan(__d))
- __d = std::copysign(0, __d);
+ if (_ISINFd(__a) || _ISINFd(__b)) {
+ __a = _COPYSIGNd(_ISINFd(__a) ? 1 : 0, __a);
+ __b = _COPYSIGNd(_ISINFd(__b) ? 1 : 0, __b);
+ if (_ISNANd(__c))
+ __c = _COPYSIGNd(0, __c);
+ if (_ISNANd(__d))
+ __d = _COPYSIGNd(0, __d);
__recalc = 1;
}
- if (std::isinf(__c) || std::isinf(__d)) {
- __c = std::copysign(std::isinf(__c) ? 1 : 0, __c);
- __d = std::copysign(std::isinf(__d) ? 1 : 0, __d);
- if (std::isnan(__a))
- __a = std::copysign(0, __a);
- if (std::isnan(__b))
- __b = std::copysign(0, __b);
+ if (_ISINFd(__c) || _ISINFd(__d)) {
+ __c = _COPYSIGNd(_ISINFd(__c) ? 1 : 0, __c);
+ __d = _COPYSIGNd(_ISINFd(__d) ? 1 : 0, __d);
+ if (_ISNANd(__a))
+ __a = _COPYSIGNd(0, __a);
+ if (_ISNANd(__b))
+ __b = _COPYSIGNd(0, __b);
__recalc = 1;
}
- if (!__recalc && (std::isinf(__ac) || std::isinf(__bd) ||
- std::isinf(__ad) || std::isinf(__bc))) {
- if (std::isnan(__a))
- __a = std::copysign(0, __a);
- if (std::isnan(__b))
- __b = std::copysign(0, __b);
- if (std::isnan(__c))
- __c = std::copysign(0, __c);
- if (std::isnan(__d))
- __d = std::copysign(0, __d);
+ if (!__recalc &&
+ (_ISINFd(__ac) || _ISINFd(__bd) || _ISINFd(__ad) || _ISINFd(__bc))) {
+ if (_ISNANd(__a))
+ __a = _COPYSIGNd(0, __a);
+ if (_ISNANd(__b))
+ __b = _COPYSIGNd(0, __b);
+ if (_ISNANd(__c))
+ __c = _COPYSIGNd(0, __c);
+ if (_ISNANd(__d))
+ __d = _COPYSIGNd(0, __d);
__recalc = 1;
}
if (__recalc) {
// Can't use std::numeric_limits<double>::infinity() -- that doesn't have
// a device overload (and isn't constexpr before C++11, naturally).
- __real__(z) = __builtin_huge_valf() * (__a * __c - __b * __d);
- __imag__(z) = __builtin_huge_valf() * (__a * __d + __b * __c);
+ __real__(z) = __builtin_huge_val() * (__a * __c - __b * __d);
+ __imag__(z) = __builtin_huge_val() * (__a * __d + __b * __c);
}
}
return z;
}
-extern "C" inline __device__ float _Complex __mulsc3(float __a, float __b,
- float __c, float __d) {
+__DEVICE__ float _Complex __mulsc3(float __a, float __b, float __c, float __d) {
float __ac = __a * __c;
float __bd = __b * __d;
float __ad = __a * __d;
@@ -75,36 +121,36 @@ extern "C" inline __device__ float _Complex __mulsc3(float __a, float __b,
float _Complex z;
__real__(z) = __ac - __bd;
__imag__(z) = __ad + __bc;
- if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
+ if (_ISNANf(__real__(z)) && _ISNANf(__imag__(z))) {
int __recalc = 0;
- if (std::isinf(__a) || std::isinf(__b)) {
- __a = std::copysign(std::isinf(__a) ? 1 : 0, __a);
- __b = std::copysign(std::isinf(__b) ? 1 : 0, __b);
- if (std::isnan(__c))
- __c = std::copysign(0, __c);
- if (std::isnan(__d))
- __d = std::copysign(0, __d);
+ if (_ISINFf(__a) || _ISINFf(__b)) {
+ __a = _COPYSIGNf(_ISINFf(__a) ? 1 : 0, __a);
+ __b = _COPYSIGNf(_ISINFf(__b) ? 1 : 0, __b);
+ if (_ISNANf(__c))
+ __c = _COPYSIGNf(0, __c);
+ if (_ISNANf(__d))
+ __d = _COPYSIGNf(0, __d);
__recalc = 1;
}
- if (std::isinf(__c) || std::isinf(__d)) {
- __c = std::copysign(std::isinf(__c) ? 1 : 0, __c);
- __d = std::copysign(std::isinf(__d) ? 1 : 0, __d);
- if (std::isnan(__a))
- __a = std::copysign(0, __a);
- if (std::isnan(__b))
- __b = std::copysign(0, __b);
+ if (_ISINFf(__c) || _ISINFf(__d)) {
+ __c = _COPYSIGNf(_ISINFf(__c) ? 1 : 0, __c);
+ __d = _COPYSIGNf(_ISINFf(__d) ? 1 : 0, __d);
+ if (_ISNANf(__a))
+ __a = _COPYSIGNf(0, __a);
+ if (_ISNANf(__b))
+ __b = _COPYSIGNf(0, __b);
__recalc = 1;
}
- if (!__recalc && (std::isinf(__ac) || std::isinf(__bd) ||
- std::isinf(__ad) || std::isinf(__bc))) {
- if (std::isnan(__a))
- __a = std::copysign(0, __a);
- if (std::isnan(__b))
- __b = std::copysign(0, __b);
- if (std::isnan(__c))
- __c = std::copysign(0, __c);
- if (std::isnan(__d))
- __d = std::copysign(0, __d);
+ if (!__recalc &&
+ (_ISINFf(__ac) || _ISINFf(__bd) || _ISINFf(__ad) || _ISINFf(__bc))) {
+ if (_ISNANf(__a))
+ __a = _COPYSIGNf(0, __a);
+ if (_ISNANf(__b))
+ __b = _COPYSIGNf(0, __b);
+ if (_ISNANf(__c))
+ __c = _COPYSIGNf(0, __c);
+ if (_ISNANf(__d))
+ __d = _COPYSIGNf(0, __d);
__recalc = 1;
}
if (__recalc) {
@@ -115,36 +161,36 @@ extern "C" inline __device__ float _Complex __mulsc3(float __a, float __b,
return z;
}
-extern "C" inline __device__ double _Complex __divdc3(double __a, double __b,
- double __c, double __d) {
+__DEVICE__ double _Complex __divdc3(double __a, double __b, double __c,
+ double __d) {
int __ilogbw = 0;
// Can't use std::max, because that's defined in <algorithm>, and we don't
// want to pull that in for every compile. The CUDA headers define
// ::max(float, float) and ::max(double, double), which is sufficient for us.
- double __logbw = std::logb(max(std::abs(__c), std::abs(__d)));
- if (std::isfinite(__logbw)) {
+ double __logbw = _LOGBd(max(_ABSd(__c), _ABSd(__d)));
+ if (_ISFINITEd(__logbw)) {
__ilogbw = (int)__logbw;
- __c = std::scalbn(__c, -__ilogbw);
- __d = std::scalbn(__d, -__ilogbw);
+ __c = _SCALBNd(__c, -__ilogbw);
+ __d = _SCALBNd(__d, -__ilogbw);
}
double __denom = __c * __c + __d * __d;
double _Complex z;
- __real__(z) = std::scalbn((__a * __c + __b * __d) / __denom, -__ilogbw);
- __imag__(z) = std::scalbn((__b * __c - __a * __d) / __denom, -__ilogbw);
- if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
- if ((__denom == 0.0) && (!std::isnan(__a) || !std::isnan(__b))) {
- __real__(z) = std::copysign(__builtin_huge_valf(), __c) * __a;
- __imag__(z) = std::copysign(__builtin_huge_valf(), __c) * __b;
- } else if ((std::isinf(__a) || std::isinf(__b)) && std::isfinite(__c) &&
- std::isfinite(__d)) {
- __a = std::copysign(std::isinf(__a) ? 1.0 : 0.0, __a);
- __b = std::copysign(std::isinf(__b) ? 1.0 : 0.0, __b);
- __real__(z) = __builtin_huge_valf() * (__a * __c + __b * __d);
- __imag__(z) = __builtin_huge_valf() * (__b * __c - __a * __d);
- } else if (std::isinf(__logbw) && __logbw > 0.0 && std::isfinite(__a) &&
- std::isfinite(__b)) {
- __c = std::copysign(std::isinf(__c) ? 1.0 : 0.0, __c);
- __d = std::copysign(std::isinf(__d) ? 1.0 : 0.0, __d);
+ __real__(z) = _SCALBNd((__a * __c + __b * __d) / __denom, -__ilogbw);
+ __imag__(z) = _SCALBNd((__b * __c - __a * __d) / __denom, -__ilogbw);
+ if (_ISNANd(__real__(z)) && _ISNANd(__imag__(z))) {
+ if ((__denom == 0.0) && (!_ISNANd(__a) || !_ISNANd(__b))) {
+ __real__(z) = _COPYSIGNd(__builtin_huge_val(), __c) * __a;
+ __imag__(z) = _COPYSIGNd(__builtin_huge_val(), __c) * __b;
+ } else if ((_ISINFd(__a) || _ISINFd(__b)) && _ISFINITEd(__c) &&
+ _ISFINITEd(__d)) {
+ __a = _COPYSIGNd(_ISINFd(__a) ? 1.0 : 0.0, __a);
+ __b = _COPYSIGNd(_ISINFd(__b) ? 1.0 : 0.0, __b);
+ __real__(z) = __builtin_huge_val() * (__a * __c + __b * __d);
+ __imag__(z) = __builtin_huge_val() * (__b * __c - __a * __d);
+ } else if (_ISINFd(__logbw) && __logbw > 0.0 && _ISFINITEd(__a) &&
+ _ISFINITEd(__b)) {
+ __c = _COPYSIGNd(_ISINFd(__c) ? 1.0 : 0.0, __c);
+ __d = _COPYSIGNd(_ISINFd(__d) ? 1.0 : 0.0, __d);
__real__(z) = 0.0 * (__a * __c + __b * __d);
__imag__(z) = 0.0 * (__b * __c - __a * __d);
}
@@ -152,33 +198,32 @@ extern "C" inline __device__ double _Complex __divdc3(double __a, double __b,
return z;
}
-extern "C" inline __device__ float _Complex __divsc3(float __a, float __b,
- float __c, float __d) {
+__DEVICE__ float _Complex __divsc3(float __a, float __b, float __c, float __d) {
int __ilogbw = 0;
- float __logbw = std::logb(max(std::abs(__c), std::abs(__d)));
- if (std::isfinite(__logbw)) {
+ float __logbw = _LOGBf(max(_ABSf(__c), _ABSf(__d)));
+ if (_ISFINITEf(__logbw)) {
__ilogbw = (int)__logbw;
- __c = std::scalbn(__c, -__ilogbw);
- __d = std::scalbn(__d, -__ilogbw);
+ __c = _SCALBNf(__c, -__ilogbw);
+ __d = _SCALBNf(__d, -__ilogbw);
}
float __denom = __c * __c + __d * __d;
float _Complex z;
- __real__(z) = std::scalbn((__a * __c + __b * __d) / __denom, -__ilogbw);
- __imag__(z) = std::scalbn((__b * __c - __a * __d) / __denom, -__ilogbw);
- if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
- if ((__denom == 0) && (!std::isnan(__a) || !std::isnan(__b))) {
- __real__(z) = std::copysign(__builtin_huge_valf(), __c) * __a;
- __imag__(z) = std::copysign(__builtin_huge_valf(), __c) * __b;
- } else if ((std::isinf(__a) || std::isinf(__b)) && std::isfinite(__c) &&
- std::isfinite(__d)) {
- __a = std::copysign(std::isinf(__a) ? 1 : 0, __a);
- __b = std::copysign(std::isinf(__b) ? 1 : 0, __b);
+ __real__(z) = _SCALBNf((__a * __c + __b * __d) / __denom, -__ilogbw);
+ __imag__(z) = _SCALBNf((__b * __c - __a * __d) / __denom, -__ilogbw);
+ if (_ISNANf(__real__(z)) && _ISNANf(__imag__(z))) {
+ if ((__denom == 0) && (!_ISNANf(__a) || !_ISNANf(__b))) {
+ __real__(z) = _COPYSIGNf(__builtin_huge_valf(), __c) * __a;
+ __imag__(z) = _COPYSIGNf(__builtin_huge_valf(), __c) * __b;
+ } else if ((_ISINFf(__a) || _ISINFf(__b)) && _ISFINITEf(__c) &&
+ _ISFINITEf(__d)) {
+ __a = _COPYSIGNf(_ISINFf(__a) ? 1 : 0, __a);
+ __b = _COPYSIGNf(_ISINFf(__b) ? 1 : 0, __b);
__real__(z) = __builtin_huge_valf() * (__a * __c + __b * __d);
__imag__(z) = __builtin_huge_valf() * (__b * __c - __a * __d);
- } else if (std::isinf(__logbw) && __logbw > 0 && std::isfinite(__a) &&
- std::isfinite(__b)) {
- __c = std::copysign(std::isinf(__c) ? 1 : 0, __c);
- __d = std::copysign(std::isinf(__d) ? 1 : 0, __d);
+ } else if (_ISINFf(__logbw) && __logbw > 0 && _ISFINITEf(__a) &&
+ _ISFINITEf(__b)) {
+ __c = _COPYSIGNf(_ISINFf(__c) ? 1 : 0, __c);
+ __d = _COPYSIGNf(_ISINFf(__d) ? 1 : 0, __d);
__real__(z) = 0 * (__a * __c + __b * __d);
__imag__(z) = 0 * (__b * __c - __a * __d);
}
@@ -186,4 +231,29 @@ extern "C" inline __device__ float _Complex __divsc3(float __a, float __b,
return z;
}
+#if defined(__cplusplus)
+} // extern "C"
+#endif
+
+#undef _ISNANd
+#undef _ISNANf
+#undef _ISINFd
+#undef _ISINFf
+#undef _COPYSIGNd
+#undef _COPYSIGNf
+#undef _ISFINITEd
+#undef _ISFINITEf
+#undef _SCALBNd
+#undef _SCALBNf
+#undef _ABSd
+#undef _ABSf
+#undef _LOGBd
+#undef _LOGBf
+
+#ifdef _OPENMP
+#pragma omp end declare target
+#endif
+
+#pragma pop_macro("__DEVICE__")
+
#endif // __CLANG_CUDA_COMPLEX_BUILTINS
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_device_functions.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_device_functions.h
index 50ad674f9483..f801e5426aa4 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_device_functions.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_device_functions.h
@@ -10,7 +10,7 @@
#ifndef __CLANG_CUDA_DEVICE_FUNCTIONS_H__
#define __CLANG_CUDA_DEVICE_FUNCTIONS_H__
-#ifndef _OPENMP
+#ifndef __OPENMP_NVPTX__
#if CUDA_VERSION < 9000
#error This file is intended to be used with CUDA-9+ only.
#endif
@@ -20,32 +20,12 @@
// we implement in this file. We need static in order to avoid emitting unused
// functions and __forceinline__ helps inlining these wrappers at -O1.
#pragma push_macro("__DEVICE__")
-#ifdef _OPENMP
-#define __DEVICE__ static __attribute__((always_inline))
+#ifdef __OPENMP_NVPTX__
+#define __DEVICE__ static __attribute__((always_inline, nothrow))
#else
#define __DEVICE__ static __device__ __forceinline__
#endif
-// libdevice provides fast low precision and slow full-recision implementations
-// for some functions. Which one gets selected depends on
-// __CLANG_CUDA_APPROX_TRANSCENDENTALS__ which gets defined by clang if
-// -ffast-math or -fcuda-approx-transcendentals are in effect.
-#pragma push_macro("__FAST_OR_SLOW")
-#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
-#define __FAST_OR_SLOW(fast, slow) fast
-#else
-#define __FAST_OR_SLOW(fast, slow) slow
-#endif
-
-// For C++ 17 we need to include noexcept attribute to be compatible
-// with the header-defined version. This may be removed once
-// variant is supported.
-#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
-#define __NOEXCEPT noexcept
-#else
-#define __NOEXCEPT
-#endif
-
__DEVICE__ int __all(int __a) { return __nvvm_vote_all(__a); }
__DEVICE__ int __any(int __a) { return __nvvm_vote_any(__a); }
__DEVICE__ unsigned int __ballot(int __a) { return __nvvm_vote_ballot(__a); }
@@ -359,10 +339,10 @@ __DEVICE__ int __iAtomicAdd(int *__p, int __v) {
return __nvvm_atom_add_gen_i(__p, __v);
}
__DEVICE__ int __iAtomicAdd_block(int *__p, int __v) {
- __nvvm_atom_cta_add_gen_i(__p, __v);
+ return __nvvm_atom_cta_add_gen_i(__p, __v);
}
__DEVICE__ int __iAtomicAdd_system(int *__p, int __v) {
- __nvvm_atom_sys_add_gen_i(__p, __v);
+ return __nvvm_atom_sys_add_gen_i(__p, __v);
}
__DEVICE__ int __iAtomicAnd(int *__p, int __v) {
return __nvvm_atom_and_gen_i(__p, __v);
@@ -1483,152 +1463,17 @@ __DEVICE__ unsigned int __vsubus4(unsigned int __a, unsigned int __b) {
return r;
}
#endif // CUDA_VERSION >= 9020
-__DEVICE__ int abs(int __a) __NOEXCEPT { return __nv_abs(__a); }
-__DEVICE__ double fabs(double __a) __NOEXCEPT { return __nv_fabs(__a); }
-__DEVICE__ double acos(double __a) { return __nv_acos(__a); }
-__DEVICE__ float acosf(float __a) { return __nv_acosf(__a); }
-__DEVICE__ double acosh(double __a) { return __nv_acosh(__a); }
-__DEVICE__ float acoshf(float __a) { return __nv_acoshf(__a); }
-__DEVICE__ double asin(double __a) { return __nv_asin(__a); }
-__DEVICE__ float asinf(float __a) { return __nv_asinf(__a); }
-__DEVICE__ double asinh(double __a) { return __nv_asinh(__a); }
-__DEVICE__ float asinhf(float __a) { return __nv_asinhf(__a); }
-__DEVICE__ double atan(double __a) { return __nv_atan(__a); }
-__DEVICE__ double atan2(double __a, double __b) { return __nv_atan2(__a, __b); }
-__DEVICE__ float atan2f(float __a, float __b) { return __nv_atan2f(__a, __b); }
-__DEVICE__ float atanf(float __a) { return __nv_atanf(__a); }
-__DEVICE__ double atanh(double __a) { return __nv_atanh(__a); }
-__DEVICE__ float atanhf(float __a) { return __nv_atanhf(__a); }
-__DEVICE__ double cbrt(double __a) { return __nv_cbrt(__a); }
-__DEVICE__ float cbrtf(float __a) { return __nv_cbrtf(__a); }
-__DEVICE__ double ceil(double __a) { return __nv_ceil(__a); }
-__DEVICE__ float ceilf(float __a) { return __nv_ceilf(__a); }
-#ifndef _OPENMP
-__DEVICE__ int clock() { return __nvvm_read_ptx_sreg_clock(); }
-__DEVICE__ long long clock64() { return __nvvm_read_ptx_sreg_clock64(); }
-#endif
-__DEVICE__ double copysign(double __a, double __b) {
- return __nv_copysign(__a, __b);
-}
-__DEVICE__ float copysignf(float __a, float __b) {
- return __nv_copysignf(__a, __b);
-}
-__DEVICE__ double cos(double __a) { return __nv_cos(__a); }
-__DEVICE__ float cosf(float __a) {
- return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(__a);
-}
-__DEVICE__ double cosh(double __a) { return __nv_cosh(__a); }
-__DEVICE__ float coshf(float __a) { return __nv_coshf(__a); }
-__DEVICE__ double cospi(double __a) { return __nv_cospi(__a); }
-__DEVICE__ float cospif(float __a) { return __nv_cospif(__a); }
-__DEVICE__ double cyl_bessel_i0(double __a) { return __nv_cyl_bessel_i0(__a); }
-__DEVICE__ float cyl_bessel_i0f(float __a) { return __nv_cyl_bessel_i0f(__a); }
-__DEVICE__ double cyl_bessel_i1(double __a) { return __nv_cyl_bessel_i1(__a); }
-__DEVICE__ float cyl_bessel_i1f(float __a) { return __nv_cyl_bessel_i1f(__a); }
-__DEVICE__ double erf(double __a) { return __nv_erf(__a); }
-__DEVICE__ double erfc(double __a) { return __nv_erfc(__a); }
-__DEVICE__ float erfcf(float __a) { return __nv_erfcf(__a); }
-__DEVICE__ double erfcinv(double __a) { return __nv_erfcinv(__a); }
-__DEVICE__ float erfcinvf(float __a) { return __nv_erfcinvf(__a); }
-__DEVICE__ double erfcx(double __a) { return __nv_erfcx(__a); }
-__DEVICE__ float erfcxf(float __a) { return __nv_erfcxf(__a); }
-__DEVICE__ float erff(float __a) { return __nv_erff(__a); }
-__DEVICE__ double erfinv(double __a) { return __nv_erfinv(__a); }
-__DEVICE__ float erfinvf(float __a) { return __nv_erfinvf(__a); }
-__DEVICE__ double exp(double __a) { return __nv_exp(__a); }
-__DEVICE__ double exp10(double __a) { return __nv_exp10(__a); }
-__DEVICE__ float exp10f(float __a) { return __nv_exp10f(__a); }
-__DEVICE__ double exp2(double __a) { return __nv_exp2(__a); }
-__DEVICE__ float exp2f(float __a) { return __nv_exp2f(__a); }
-__DEVICE__ float expf(float __a) { return __nv_expf(__a); }
-__DEVICE__ double expm1(double __a) { return __nv_expm1(__a); }
-__DEVICE__ float expm1f(float __a) { return __nv_expm1f(__a); }
-__DEVICE__ float fabsf(float __a) { return __nv_fabsf(__a); }
-__DEVICE__ double fdim(double __a, double __b) { return __nv_fdim(__a, __b); }
-__DEVICE__ float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); }
-__DEVICE__ double fdivide(double __a, double __b) { return __a / __b; }
-__DEVICE__ float fdividef(float __a, float __b) {
-#if __FAST_MATH__ && !__CUDA_PREC_DIV
- return __nv_fast_fdividef(__a, __b);
-#else
- return __a / __b;
-#endif
-}
-__DEVICE__ double floor(double __f) { return __nv_floor(__f); }
-__DEVICE__ float floorf(float __f) { return __nv_floorf(__f); }
-__DEVICE__ double fma(double __a, double __b, double __c) {
- return __nv_fma(__a, __b, __c);
-}
-__DEVICE__ float fmaf(float __a, float __b, float __c) {
- return __nv_fmaf(__a, __b, __c);
-}
-__DEVICE__ double fmax(double __a, double __b) { return __nv_fmax(__a, __b); }
-__DEVICE__ float fmaxf(float __a, float __b) { return __nv_fmaxf(__a, __b); }
-__DEVICE__ double fmin(double __a, double __b) { return __nv_fmin(__a, __b); }
-__DEVICE__ float fminf(float __a, float __b) { return __nv_fminf(__a, __b); }
-__DEVICE__ double fmod(double __a, double __b) { return __nv_fmod(__a, __b); }
-__DEVICE__ float fmodf(float __a, float __b) { return __nv_fmodf(__a, __b); }
-__DEVICE__ double frexp(double __a, int *__b) { return __nv_frexp(__a, __b); }
-__DEVICE__ float frexpf(float __a, int *__b) { return __nv_frexpf(__a, __b); }
-__DEVICE__ double hypot(double __a, double __b) { return __nv_hypot(__a, __b); }
-__DEVICE__ float hypotf(float __a, float __b) { return __nv_hypotf(__a, __b); }
-__DEVICE__ int ilogb(double __a) { return __nv_ilogb(__a); }
-__DEVICE__ int ilogbf(float __a) { return __nv_ilogbf(__a); }
-__DEVICE__ double j0(double __a) { return __nv_j0(__a); }
-__DEVICE__ float j0f(float __a) { return __nv_j0f(__a); }
-__DEVICE__ double j1(double __a) { return __nv_j1(__a); }
-__DEVICE__ float j1f(float __a) { return __nv_j1f(__a); }
-__DEVICE__ double jn(int __n, double __a) { return __nv_jn(__n, __a); }
-__DEVICE__ float jnf(int __n, float __a) { return __nv_jnf(__n, __a); }
-#if defined(__LP64__) || defined(_WIN64)
-__DEVICE__ long labs(long __a) __NOEXCEPT { return __nv_llabs(__a); };
-#else
-__DEVICE__ long labs(long __a) __NOEXCEPT { return __nv_abs(__a); };
-#endif
-__DEVICE__ double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); }
-__DEVICE__ float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); }
-__DEVICE__ double lgamma(double __a) { return __nv_lgamma(__a); }
-__DEVICE__ float lgammaf(float __a) { return __nv_lgammaf(__a); }
-__DEVICE__ long long llabs(long long __a) __NOEXCEPT { return __nv_llabs(__a); }
-__DEVICE__ long long llmax(long long __a, long long __b) {
- return __nv_llmax(__a, __b);
-}
-__DEVICE__ long long llmin(long long __a, long long __b) {
- return __nv_llmin(__a, __b);
-}
-__DEVICE__ long long llrint(double __a) { return __nv_llrint(__a); }
-__DEVICE__ long long llrintf(float __a) { return __nv_llrintf(__a); }
-__DEVICE__ long long llround(double __a) { return __nv_llround(__a); }
-__DEVICE__ long long llroundf(float __a) { return __nv_llroundf(__a); }
-__DEVICE__ double log(double __a) { return __nv_log(__a); }
-__DEVICE__ double log10(double __a) { return __nv_log10(__a); }
-__DEVICE__ float log10f(float __a) { return __nv_log10f(__a); }
-__DEVICE__ double log1p(double __a) { return __nv_log1p(__a); }
-__DEVICE__ float log1pf(float __a) { return __nv_log1pf(__a); }
-__DEVICE__ double log2(double __a) { return __nv_log2(__a); }
-__DEVICE__ float log2f(float __a) {
- return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(__a);
-}
-__DEVICE__ double logb(double __a) { return __nv_logb(__a); }
-__DEVICE__ float logbf(float __a) { return __nv_logbf(__a); }
-__DEVICE__ float logf(float __a) {
- return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a);
-}
-#if defined(__LP64__) || defined(_WIN64)
-__DEVICE__ long lrint(double __a) { return llrint(__a); }
-__DEVICE__ long lrintf(float __a) { return __float2ll_rn(__a); }
-__DEVICE__ long lround(double __a) { return llround(__a); }
-__DEVICE__ long lroundf(float __a) { return llroundf(__a); }
-#else
-__DEVICE__ long lrint(double __a) { return (long)rint(__a); }
-__DEVICE__ long lrintf(float __a) { return __float2int_rn(__a); }
-__DEVICE__ long lround(double __a) { return round(__a); }
-__DEVICE__ long lroundf(float __a) { return roundf(__a); }
+
+// For OpenMP we require the user to include <time.h> as we need to know what
+// clock_t is on the system.
+#ifndef __OPENMP_NVPTX__
+__DEVICE__ /* clock_t= */ int clock() { return __nvvm_read_ptx_sreg_clock(); }
#endif
-__DEVICE__ int max(int __a, int __b) { return __nv_max(__a, __b); }
+__DEVICE__ long long clock64() { return __nvvm_read_ptx_sreg_clock64(); }
+
// These functions shouldn't be declared when including this header
// for math function resolution purposes.
-#ifndef _OPENMP
+#ifndef __OPENMP_NVPTX__
__DEVICE__ void *memcpy(void *__a, const void *__b, size_t __c) {
return __builtin_memcpy(__a, __b, __c);
}
@@ -1636,158 +1481,6 @@ __DEVICE__ void *memset(void *__a, int __b, size_t __c) {
return __builtin_memset(__a, __b, __c);
}
#endif
-__DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
-__DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
-__DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
-__DEVICE__ double nearbyint(double __a) { return __nv_nearbyint(__a); }
-__DEVICE__ float nearbyintf(float __a) { return __nv_nearbyintf(__a); }
-__DEVICE__ double nextafter(double __a, double __b) {
- return __nv_nextafter(__a, __b);
-}
-__DEVICE__ float nextafterf(float __a, float __b) {
- return __nv_nextafterf(__a, __b);
-}
-__DEVICE__ double norm(int __dim, const double *__t) {
- return __nv_norm(__dim, __t);
-}
-__DEVICE__ double norm3d(double __a, double __b, double __c) {
- return __nv_norm3d(__a, __b, __c);
-}
-__DEVICE__ float norm3df(float __a, float __b, float __c) {
- return __nv_norm3df(__a, __b, __c);
-}
-__DEVICE__ double norm4d(double __a, double __b, double __c, double __d) {
- return __nv_norm4d(__a, __b, __c, __d);
-}
-__DEVICE__ float norm4df(float __a, float __b, float __c, float __d) {
- return __nv_norm4df(__a, __b, __c, __d);
-}
-__DEVICE__ double normcdf(double __a) { return __nv_normcdf(__a); }
-__DEVICE__ float normcdff(float __a) { return __nv_normcdff(__a); }
-__DEVICE__ double normcdfinv(double __a) { return __nv_normcdfinv(__a); }
-__DEVICE__ float normcdfinvf(float __a) { return __nv_normcdfinvf(__a); }
-__DEVICE__ float normf(int __dim, const float *__t) {
- return __nv_normf(__dim, __t);
-}
-__DEVICE__ double pow(double __a, double __b) { return __nv_pow(__a, __b); }
-__DEVICE__ float powf(float __a, float __b) { return __nv_powf(__a, __b); }
-__DEVICE__ double powi(double __a, int __b) { return __nv_powi(__a, __b); }
-__DEVICE__ float powif(float __a, int __b) { return __nv_powif(__a, __b); }
-__DEVICE__ double rcbrt(double __a) { return __nv_rcbrt(__a); }
-__DEVICE__ float rcbrtf(float __a) { return __nv_rcbrtf(__a); }
-__DEVICE__ double remainder(double __a, double __b) {
- return __nv_remainder(__a, __b);
-}
-__DEVICE__ float remainderf(float __a, float __b) {
- return __nv_remainderf(__a, __b);
-}
-__DEVICE__ double remquo(double __a, double __b, int *__c) {
- return __nv_remquo(__a, __b, __c);
-}
-__DEVICE__ float remquof(float __a, float __b, int *__c) {
- return __nv_remquof(__a, __b, __c);
-}
-__DEVICE__ double rhypot(double __a, double __b) {
- return __nv_rhypot(__a, __b);
-}
-__DEVICE__ float rhypotf(float __a, float __b) {
- return __nv_rhypotf(__a, __b);
-}
-__DEVICE__ double rint(double __a) { return __nv_rint(__a); }
-__DEVICE__ float rintf(float __a) { return __nv_rintf(__a); }
-__DEVICE__ double rnorm(int __a, const double *__b) {
- return __nv_rnorm(__a, __b);
-}
-__DEVICE__ double rnorm3d(double __a, double __b, double __c) {
- return __nv_rnorm3d(__a, __b, __c);
-}
-__DEVICE__ float rnorm3df(float __a, float __b, float __c) {
- return __nv_rnorm3df(__a, __b, __c);
-}
-__DEVICE__ double rnorm4d(double __a, double __b, double __c, double __d) {
- return __nv_rnorm4d(__a, __b, __c, __d);
-}
-__DEVICE__ float rnorm4df(float __a, float __b, float __c, float __d) {
- return __nv_rnorm4df(__a, __b, __c, __d);
-}
-__DEVICE__ float rnormf(int __dim, const float *__t) {
- return __nv_rnormf(__dim, __t);
-}
-__DEVICE__ double round(double __a) { return __nv_round(__a); }
-__DEVICE__ float roundf(float __a) { return __nv_roundf(__a); }
-__DEVICE__ double rsqrt(double __a) { return __nv_rsqrt(__a); }
-__DEVICE__ float rsqrtf(float __a) { return __nv_rsqrtf(__a); }
-__DEVICE__ double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); }
-__DEVICE__ float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); }
-// TODO: remove once variant is supported
-#ifndef _OPENMP
-__DEVICE__ double scalbln(double __a, long __b) {
- if (__b > INT_MAX)
- return __a > 0 ? HUGE_VAL : -HUGE_VAL;
- if (__b < INT_MIN)
- return __a > 0 ? 0.0 : -0.0;
- return scalbn(__a, (int)__b);
-}
-__DEVICE__ float scalblnf(float __a, long __b) {
- if (__b > INT_MAX)
- return __a > 0 ? HUGE_VALF : -HUGE_VALF;
- if (__b < INT_MIN)
- return __a > 0 ? 0.f : -0.f;
- return scalbnf(__a, (int)__b);
-}
-#endif
-__DEVICE__ double sin(double __a) { return __nv_sin(__a); }
-__DEVICE__ void sincos(double __a, double *__s, double *__c) {
- return __nv_sincos(__a, __s, __c);
-}
-__DEVICE__ void sincosf(float __a, float *__s, float *__c) {
- return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __s, __c);
-}
-__DEVICE__ void sincospi(double __a, double *__s, double *__c) {
- return __nv_sincospi(__a, __s, __c);
-}
-__DEVICE__ void sincospif(float __a, float *__s, float *__c) {
- return __nv_sincospif(__a, __s, __c);
-}
-__DEVICE__ float sinf(float __a) {
- return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a);
-}
-__DEVICE__ double sinh(double __a) { return __nv_sinh(__a); }
-__DEVICE__ float sinhf(float __a) { return __nv_sinhf(__a); }
-__DEVICE__ double sinpi(double __a) { return __nv_sinpi(__a); }
-__DEVICE__ float sinpif(float __a) { return __nv_sinpif(__a); }
-__DEVICE__ double sqrt(double __a) { return __nv_sqrt(__a); }
-__DEVICE__ float sqrtf(float __a) { return __nv_sqrtf(__a); }
-__DEVICE__ double tan(double __a) { return __nv_tan(__a); }
-__DEVICE__ float tanf(float __a) { return __nv_tanf(__a); }
-__DEVICE__ double tanh(double __a) { return __nv_tanh(__a); }
-__DEVICE__ float tanhf(float __a) { return __nv_tanhf(__a); }
-__DEVICE__ double tgamma(double __a) { return __nv_tgamma(__a); }
-__DEVICE__ float tgammaf(float __a) { return __nv_tgammaf(__a); }
-__DEVICE__ double trunc(double __a) { return __nv_trunc(__a); }
-__DEVICE__ float truncf(float __a) { return __nv_truncf(__a); }
-__DEVICE__ unsigned long long ullmax(unsigned long long __a,
- unsigned long long __b) {
- return __nv_ullmax(__a, __b);
-}
-__DEVICE__ unsigned long long ullmin(unsigned long long __a,
- unsigned long long __b) {
- return __nv_ullmin(__a, __b);
-}
-__DEVICE__ unsigned int umax(unsigned int __a, unsigned int __b) {
- return __nv_umax(__a, __b);
-}
-__DEVICE__ unsigned int umin(unsigned int __a, unsigned int __b) {
- return __nv_umin(__a, __b);
-}
-__DEVICE__ double y0(double __a) { return __nv_y0(__a); }
-__DEVICE__ float y0f(float __a) { return __nv_y0f(__a); }
-__DEVICE__ double y1(double __a) { return __nv_y1(__a); }
-__DEVICE__ float y1f(float __a) { return __nv_y1f(__a); }
-__DEVICE__ double yn(int __a, double __b) { return __nv_yn(__a, __b); }
-__DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }
-#undef __NOEXCEPT
#pragma pop_macro("__DEVICE__")
-#pragma pop_macro("__FAST_OR_SLOW")
#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_libdevice_declares.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_libdevice_declares.h
index 4d70353394c8..6173b589e3ef 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_libdevice_declares.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_libdevice_declares.h
@@ -14,7 +14,7 @@
extern "C" {
#endif
-#if defined(_OPENMP)
+#if defined(__OPENMP_NVPTX__)
#define __DEVICE__
#elif defined(__CUDA__)
#define __DEVICE__ __device__
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_math.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_math.h
new file mode 100644
index 000000000000..332e616702ac
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_math.h
@@ -0,0 +1,347 @@
+/*===---- __clang_cuda_math.h - Device-side CUDA math support --------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __CLANG_CUDA_MATH_H__
+#define __CLANG_CUDA_MATH_H__
+#ifndef __CUDA__
+#error "This file is for CUDA compilation only."
+#endif
+
+#ifndef __OPENMP_NVPTX__
+#if CUDA_VERSION < 9000
+#error This file is intended to be used with CUDA-9+ only.
+#endif
+#endif
+
+// __DEVICE__ is a helper macro with common set of attributes for the wrappers
+// we implement in this file. We need static in order to avoid emitting unused
+// functions and __forceinline__ helps inlining these wrappers at -O1.
+#pragma push_macro("__DEVICE__")
+#ifdef __OPENMP_NVPTX__
+#if defined(__cplusplus)
+#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
+#else
+#define __DEVICE__ static __attribute__((always_inline, nothrow))
+#endif
+#else
+#define __DEVICE__ static __device__ __forceinline__
+#endif
+
+// Specialized version of __DEVICE__ for functions with void return type. Needed
+// because the OpenMP overlay requires constexpr functions here but prior to
+// c++14 void return functions could not be constexpr.
+#pragma push_macro("__DEVICE_VOID__")
+#ifdef __OPENMP_NVPTX__ && defined(__cplusplus) && __cplusplus < 201402L
+#define __DEVICE_VOID__ static __attribute__((always_inline, nothrow))
+#else
+#define __DEVICE_VOID__ __DEVICE__
+#endif
+
+// libdevice provides fast low precision and slow full-recision implementations
+// for some functions. Which one gets selected depends on
+// __CLANG_CUDA_APPROX_TRANSCENDENTALS__ which gets defined by clang if
+// -ffast-math or -fcuda-approx-transcendentals are in effect.
+#pragma push_macro("__FAST_OR_SLOW")
+#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
+#define __FAST_OR_SLOW(fast, slow) fast
+#else
+#define __FAST_OR_SLOW(fast, slow) slow
+#endif
+
+__DEVICE__ int abs(int __a) { return __nv_abs(__a); }
+__DEVICE__ double fabs(double __a) { return __nv_fabs(__a); }
+__DEVICE__ double acos(double __a) { return __nv_acos(__a); }
+__DEVICE__ float acosf(float __a) { return __nv_acosf(__a); }
+__DEVICE__ double acosh(double __a) { return __nv_acosh(__a); }
+__DEVICE__ float acoshf(float __a) { return __nv_acoshf(__a); }
+__DEVICE__ double asin(double __a) { return __nv_asin(__a); }
+__DEVICE__ float asinf(float __a) { return __nv_asinf(__a); }
+__DEVICE__ double asinh(double __a) { return __nv_asinh(__a); }
+__DEVICE__ float asinhf(float __a) { return __nv_asinhf(__a); }
+__DEVICE__ double atan(double __a) { return __nv_atan(__a); }
+__DEVICE__ double atan2(double __a, double __b) { return __nv_atan2(__a, __b); }
+__DEVICE__ float atan2f(float __a, float __b) { return __nv_atan2f(__a, __b); }
+__DEVICE__ float atanf(float __a) { return __nv_atanf(__a); }
+__DEVICE__ double atanh(double __a) { return __nv_atanh(__a); }
+__DEVICE__ float atanhf(float __a) { return __nv_atanhf(__a); }
+__DEVICE__ double cbrt(double __a) { return __nv_cbrt(__a); }
+__DEVICE__ float cbrtf(float __a) { return __nv_cbrtf(__a); }
+__DEVICE__ double ceil(double __a) { return __nv_ceil(__a); }
+__DEVICE__ float ceilf(float __a) { return __nv_ceilf(__a); }
+__DEVICE__ double copysign(double __a, double __b) {
+ return __nv_copysign(__a, __b);
+}
+__DEVICE__ float copysignf(float __a, float __b) {
+ return __nv_copysignf(__a, __b);
+}
+__DEVICE__ double cos(double __a) { return __nv_cos(__a); }
+__DEVICE__ float cosf(float __a) {
+ return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(__a);
+}
+__DEVICE__ double cosh(double __a) { return __nv_cosh(__a); }
+__DEVICE__ float coshf(float __a) { return __nv_coshf(__a); }
+__DEVICE__ double cospi(double __a) { return __nv_cospi(__a); }
+__DEVICE__ float cospif(float __a) { return __nv_cospif(__a); }
+__DEVICE__ double cyl_bessel_i0(double __a) { return __nv_cyl_bessel_i0(__a); }
+__DEVICE__ float cyl_bessel_i0f(float __a) { return __nv_cyl_bessel_i0f(__a); }
+__DEVICE__ double cyl_bessel_i1(double __a) { return __nv_cyl_bessel_i1(__a); }
+__DEVICE__ float cyl_bessel_i1f(float __a) { return __nv_cyl_bessel_i1f(__a); }
+__DEVICE__ double erf(double __a) { return __nv_erf(__a); }
+__DEVICE__ double erfc(double __a) { return __nv_erfc(__a); }
+__DEVICE__ float erfcf(float __a) { return __nv_erfcf(__a); }
+__DEVICE__ double erfcinv(double __a) { return __nv_erfcinv(__a); }
+__DEVICE__ float erfcinvf(float __a) { return __nv_erfcinvf(__a); }
+__DEVICE__ double erfcx(double __a) { return __nv_erfcx(__a); }
+__DEVICE__ float erfcxf(float __a) { return __nv_erfcxf(__a); }
+__DEVICE__ float erff(float __a) { return __nv_erff(__a); }
+__DEVICE__ double erfinv(double __a) { return __nv_erfinv(__a); }
+__DEVICE__ float erfinvf(float __a) { return __nv_erfinvf(__a); }
+__DEVICE__ double exp(double __a) { return __nv_exp(__a); }
+__DEVICE__ double exp10(double __a) { return __nv_exp10(__a); }
+__DEVICE__ float exp10f(float __a) { return __nv_exp10f(__a); }
+__DEVICE__ double exp2(double __a) { return __nv_exp2(__a); }
+__DEVICE__ float exp2f(float __a) { return __nv_exp2f(__a); }
+__DEVICE__ float expf(float __a) { return __nv_expf(__a); }
+__DEVICE__ double expm1(double __a) { return __nv_expm1(__a); }
+__DEVICE__ float expm1f(float __a) { return __nv_expm1f(__a); }
+__DEVICE__ float fabsf(float __a) { return __nv_fabsf(__a); }
+__DEVICE__ double fdim(double __a, double __b) { return __nv_fdim(__a, __b); }
+__DEVICE__ float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); }
+__DEVICE__ double fdivide(double __a, double __b) { return __a / __b; }
+__DEVICE__ float fdividef(float __a, float __b) {
+#if __FAST_MATH__ && !__CUDA_PREC_DIV
+ return __nv_fast_fdividef(__a, __b);
+#else
+ return __a / __b;
+#endif
+}
+__DEVICE__ double floor(double __f) { return __nv_floor(__f); }
+__DEVICE__ float floorf(float __f) { return __nv_floorf(__f); }
+__DEVICE__ double fma(double __a, double __b, double __c) {
+ return __nv_fma(__a, __b, __c);
+}
+__DEVICE__ float fmaf(float __a, float __b, float __c) {
+ return __nv_fmaf(__a, __b, __c);
+}
+__DEVICE__ double fmax(double __a, double __b) { return __nv_fmax(__a, __b); }
+__DEVICE__ float fmaxf(float __a, float __b) { return __nv_fmaxf(__a, __b); }
+__DEVICE__ double fmin(double __a, double __b) { return __nv_fmin(__a, __b); }
+__DEVICE__ float fminf(float __a, float __b) { return __nv_fminf(__a, __b); }
+__DEVICE__ double fmod(double __a, double __b) { return __nv_fmod(__a, __b); }
+__DEVICE__ float fmodf(float __a, float __b) { return __nv_fmodf(__a, __b); }
+__DEVICE__ double frexp(double __a, int *__b) { return __nv_frexp(__a, __b); }
+__DEVICE__ float frexpf(float __a, int *__b) { return __nv_frexpf(__a, __b); }
+__DEVICE__ double hypot(double __a, double __b) { return __nv_hypot(__a, __b); }
+__DEVICE__ float hypotf(float __a, float __b) { return __nv_hypotf(__a, __b); }
+__DEVICE__ int ilogb(double __a) { return __nv_ilogb(__a); }
+__DEVICE__ int ilogbf(float __a) { return __nv_ilogbf(__a); }
+__DEVICE__ double j0(double __a) { return __nv_j0(__a); }
+__DEVICE__ float j0f(float __a) { return __nv_j0f(__a); }
+__DEVICE__ double j1(double __a) { return __nv_j1(__a); }
+__DEVICE__ float j1f(float __a) { return __nv_j1f(__a); }
+__DEVICE__ double jn(int __n, double __a) { return __nv_jn(__n, __a); }
+__DEVICE__ float jnf(int __n, float __a) { return __nv_jnf(__n, __a); }
+#if defined(__LP64__) || defined(_WIN64)
+__DEVICE__ long labs(long __a) { return __nv_llabs(__a); };
+#else
+__DEVICE__ long labs(long __a) { return __nv_abs(__a); };
+#endif
+__DEVICE__ double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); }
+__DEVICE__ float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); }
+__DEVICE__ double lgamma(double __a) { return __nv_lgamma(__a); }
+__DEVICE__ float lgammaf(float __a) { return __nv_lgammaf(__a); }
+__DEVICE__ long long llabs(long long __a) { return __nv_llabs(__a); }
+__DEVICE__ long long llmax(long long __a, long long __b) {
+ return __nv_llmax(__a, __b);
+}
+__DEVICE__ long long llmin(long long __a, long long __b) {
+ return __nv_llmin(__a, __b);
+}
+__DEVICE__ long long llrint(double __a) { return __nv_llrint(__a); }
+__DEVICE__ long long llrintf(float __a) { return __nv_llrintf(__a); }
+__DEVICE__ long long llround(double __a) { return __nv_llround(__a); }
+__DEVICE__ long long llroundf(float __a) { return __nv_llroundf(__a); }
+__DEVICE__ double log(double __a) { return __nv_log(__a); }
+__DEVICE__ double log10(double __a) { return __nv_log10(__a); }
+__DEVICE__ float log10f(float __a) { return __nv_log10f(__a); }
+__DEVICE__ double log1p(double __a) { return __nv_log1p(__a); }
+__DEVICE__ float log1pf(float __a) { return __nv_log1pf(__a); }
+__DEVICE__ double log2(double __a) { return __nv_log2(__a); }
+__DEVICE__ float log2f(float __a) {
+ return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(__a);
+}
+__DEVICE__ double logb(double __a) { return __nv_logb(__a); }
+__DEVICE__ float logbf(float __a) { return __nv_logbf(__a); }
+__DEVICE__ float logf(float __a) {
+ return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a);
+}
+#if defined(__LP64__) || defined(_WIN64)
+__DEVICE__ long lrint(double __a) { return llrint(__a); }
+__DEVICE__ long lrintf(float __a) { return __float2ll_rn(__a); }
+__DEVICE__ long lround(double __a) { return llround(__a); }
+__DEVICE__ long lroundf(float __a) { return llroundf(__a); }
+#else
+__DEVICE__ long lrint(double __a) { return (long)rint(__a); }
+__DEVICE__ long lrintf(float __a) { return __float2int_rn(__a); }
+__DEVICE__ long lround(double __a) { return round(__a); }
+__DEVICE__ long lroundf(float __a) { return roundf(__a); }
+#endif
+__DEVICE__ int max(int __a, int __b) { return __nv_max(__a, __b); }
+__DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
+__DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
+__DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
+__DEVICE__ double nearbyint(double __a) { return __nv_nearbyint(__a); }
+__DEVICE__ float nearbyintf(float __a) { return __nv_nearbyintf(__a); }
+__DEVICE__ double nextafter(double __a, double __b) {
+ return __nv_nextafter(__a, __b);
+}
+__DEVICE__ float nextafterf(float __a, float __b) {
+ return __nv_nextafterf(__a, __b);
+}
+__DEVICE__ double norm(int __dim, const double *__t) {
+ return __nv_norm(__dim, __t);
+}
+__DEVICE__ double norm3d(double __a, double __b, double __c) {
+ return __nv_norm3d(__a, __b, __c);
+}
+__DEVICE__ float norm3df(float __a, float __b, float __c) {
+ return __nv_norm3df(__a, __b, __c);
+}
+__DEVICE__ double norm4d(double __a, double __b, double __c, double __d) {
+ return __nv_norm4d(__a, __b, __c, __d);
+}
+__DEVICE__ float norm4df(float __a, float __b, float __c, float __d) {
+ return __nv_norm4df(__a, __b, __c, __d);
+}
+__DEVICE__ double normcdf(double __a) { return __nv_normcdf(__a); }
+__DEVICE__ float normcdff(float __a) { return __nv_normcdff(__a); }
+__DEVICE__ double normcdfinv(double __a) { return __nv_normcdfinv(__a); }
+__DEVICE__ float normcdfinvf(float __a) { return __nv_normcdfinvf(__a); }
+__DEVICE__ float normf(int __dim, const float *__t) {
+ return __nv_normf(__dim, __t);
+}
+__DEVICE__ double pow(double __a, double __b) { return __nv_pow(__a, __b); }
+__DEVICE__ float powf(float __a, float __b) { return __nv_powf(__a, __b); }
+__DEVICE__ double powi(double __a, int __b) { return __nv_powi(__a, __b); }
+__DEVICE__ float powif(float __a, int __b) { return __nv_powif(__a, __b); }
+__DEVICE__ double rcbrt(double __a) { return __nv_rcbrt(__a); }
+__DEVICE__ float rcbrtf(float __a) { return __nv_rcbrtf(__a); }
+__DEVICE__ double remainder(double __a, double __b) {
+ return __nv_remainder(__a, __b);
+}
+__DEVICE__ float remainderf(float __a, float __b) {
+ return __nv_remainderf(__a, __b);
+}
+__DEVICE__ double remquo(double __a, double __b, int *__c) {
+ return __nv_remquo(__a, __b, __c);
+}
+__DEVICE__ float remquof(float __a, float __b, int *__c) {
+ return __nv_remquof(__a, __b, __c);
+}
+__DEVICE__ double rhypot(double __a, double __b) {
+ return __nv_rhypot(__a, __b);
+}
+__DEVICE__ float rhypotf(float __a, float __b) {
+ return __nv_rhypotf(__a, __b);
+}
+__DEVICE__ double rint(double __a) { return __nv_rint(__a); }
+__DEVICE__ float rintf(float __a) { return __nv_rintf(__a); }
+__DEVICE__ double rnorm(int __a, const double *__b) {
+ return __nv_rnorm(__a, __b);
+}
+__DEVICE__ double rnorm3d(double __a, double __b, double __c) {
+ return __nv_rnorm3d(__a, __b, __c);
+}
+__DEVICE__ float rnorm3df(float __a, float __b, float __c) {
+ return __nv_rnorm3df(__a, __b, __c);
+}
+__DEVICE__ double rnorm4d(double __a, double __b, double __c, double __d) {
+ return __nv_rnorm4d(__a, __b, __c, __d);
+}
+__DEVICE__ float rnorm4df(float __a, float __b, float __c, float __d) {
+ return __nv_rnorm4df(__a, __b, __c, __d);
+}
+__DEVICE__ float rnormf(int __dim, const float *__t) {
+ return __nv_rnormf(__dim, __t);
+}
+__DEVICE__ double round(double __a) { return __nv_round(__a); }
+__DEVICE__ float roundf(float __a) { return __nv_roundf(__a); }
+__DEVICE__ double rsqrt(double __a) { return __nv_rsqrt(__a); }
+__DEVICE__ float rsqrtf(float __a) { return __nv_rsqrtf(__a); }
+__DEVICE__ double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); }
+__DEVICE__ float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); }
+__DEVICE__ double scalbln(double __a, long __b) {
+ if (__b > INT_MAX)
+ return __a > 0 ? HUGE_VAL : -HUGE_VAL;
+ if (__b < INT_MIN)
+ return __a > 0 ? 0.0 : -0.0;
+ return scalbn(__a, (int)__b);
+}
+__DEVICE__ float scalblnf(float __a, long __b) {
+ if (__b > INT_MAX)
+ return __a > 0 ? HUGE_VALF : -HUGE_VALF;
+ if (__b < INT_MIN)
+ return __a > 0 ? 0.f : -0.f;
+ return scalbnf(__a, (int)__b);
+}
+__DEVICE__ double sin(double __a) { return __nv_sin(__a); }
+__DEVICE_VOID__ void sincos(double __a, double *__s, double *__c) {
+ return __nv_sincos(__a, __s, __c);
+}
+__DEVICE_VOID__ void sincosf(float __a, float *__s, float *__c) {
+ return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __s, __c);
+}
+__DEVICE_VOID__ void sincospi(double __a, double *__s, double *__c) {
+ return __nv_sincospi(__a, __s, __c);
+}
+__DEVICE_VOID__ void sincospif(float __a, float *__s, float *__c) {
+ return __nv_sincospif(__a, __s, __c);
+}
+__DEVICE__ float sinf(float __a) {
+ return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a);
+}
+__DEVICE__ double sinh(double __a) { return __nv_sinh(__a); }
+__DEVICE__ float sinhf(float __a) { return __nv_sinhf(__a); }
+__DEVICE__ double sinpi(double __a) { return __nv_sinpi(__a); }
+__DEVICE__ float sinpif(float __a) { return __nv_sinpif(__a); }
+__DEVICE__ double sqrt(double __a) { return __nv_sqrt(__a); }
+__DEVICE__ float sqrtf(float __a) { return __nv_sqrtf(__a); }
+__DEVICE__ double tan(double __a) { return __nv_tan(__a); }
+__DEVICE__ float tanf(float __a) { return __nv_tanf(__a); }
+__DEVICE__ double tanh(double __a) { return __nv_tanh(__a); }
+__DEVICE__ float tanhf(float __a) { return __nv_tanhf(__a); }
+__DEVICE__ double tgamma(double __a) { return __nv_tgamma(__a); }
+__DEVICE__ float tgammaf(float __a) { return __nv_tgammaf(__a); }
+__DEVICE__ double trunc(double __a) { return __nv_trunc(__a); }
+__DEVICE__ float truncf(float __a) { return __nv_truncf(__a); }
+__DEVICE__ unsigned long long ullmax(unsigned long long __a,
+ unsigned long long __b) {
+ return __nv_ullmax(__a, __b);
+}
+__DEVICE__ unsigned long long ullmin(unsigned long long __a,
+ unsigned long long __b) {
+ return __nv_ullmin(__a, __b);
+}
+__DEVICE__ unsigned int umax(unsigned int __a, unsigned int __b) {
+ return __nv_umax(__a, __b);
+}
+__DEVICE__ unsigned int umin(unsigned int __a, unsigned int __b) {
+ return __nv_umin(__a, __b);
+}
+__DEVICE__ double y0(double __a) { return __nv_y0(__a); }
+__DEVICE__ float y0f(float __a) { return __nv_y0f(__a); }
+__DEVICE__ double y1(double __a) { return __nv_y1(__a); }
+__DEVICE__ float y1f(float __a) { return __nv_y1f(__a); }
+__DEVICE__ double yn(int __a, double __b) { return __nv_yn(__a, __b); }
+__DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }
+
+#pragma pop_macro("__DEVICE__")
+#pragma pop_macro("__DEVICE_VOID__")
+#pragma pop_macro("__FAST_OR_SLOW")
+
+#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_math_forward_declares.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_math_forward_declares.h
index 0afe4db556db..8a270859e4a5 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_math_forward_declares.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_math_forward_declares.h
@@ -8,8 +8,8 @@
*/
#ifndef __CLANG__CUDA_MATH_FORWARD_DECLARES_H__
#define __CLANG__CUDA_MATH_FORWARD_DECLARES_H__
-#ifndef __CUDA__
-#error "This file is for CUDA compilation only."
+#if !defined(__CUDA__) && !__HIP__
+#error "This file is for CUDA/HIP compilation only."
#endif
// This file forward-declares of some math functions we (or the CUDA headers)
@@ -20,37 +20,14 @@
// would preclude the use of our own __device__ overloads for these functions.
#pragma push_macro("__DEVICE__")
-#ifdef _OPENMP
-#define __DEVICE__ static __inline__ __attribute__((always_inline))
-#else
#define __DEVICE__ \
static __inline__ __attribute__((always_inline)) __attribute__((device))
-#endif
-
-// For C++ 17 we need to include noexcept attribute to be compatible
-// with the header-defined version. This may be removed once
-// variant is supported.
-#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
-#define __NOEXCEPT noexcept
-#else
-#define __NOEXCEPT
-#endif
-#if !(defined(_OPENMP) && defined(__cplusplus))
__DEVICE__ long abs(long);
__DEVICE__ long long abs(long long);
__DEVICE__ double abs(double);
__DEVICE__ float abs(float);
-#endif
-// While providing the CUDA declarations and definitions for math functions,
-// we may manually define additional functions.
-// TODO: Once variant is supported the additional functions will have
-// to be removed.
-#if defined(_OPENMP) && defined(__cplusplus)
-__DEVICE__ const double abs(const double);
-__DEVICE__ const float abs(const float);
-#endif
-__DEVICE__ int abs(int) __NOEXCEPT;
+__DEVICE__ int abs(int);
__DEVICE__ double acos(double);
__DEVICE__ float acos(float);
__DEVICE__ double acosh(double);
@@ -85,8 +62,8 @@ __DEVICE__ double exp(double);
__DEVICE__ float exp(float);
__DEVICE__ double expm1(double);
__DEVICE__ float expm1(float);
-__DEVICE__ double fabs(double) __NOEXCEPT;
-__DEVICE__ float fabs(float) __NOEXCEPT;
+__DEVICE__ double fabs(double);
+__DEVICE__ float fabs(float);
__DEVICE__ double fdim(double, double);
__DEVICE__ float fdim(float, float);
__DEVICE__ double floor(double);
@@ -136,12 +113,12 @@ __DEVICE__ bool isnormal(double);
__DEVICE__ bool isnormal(float);
__DEVICE__ bool isunordered(double, double);
__DEVICE__ bool isunordered(float, float);
-__DEVICE__ long labs(long) __NOEXCEPT;
+__DEVICE__ long labs(long);
__DEVICE__ double ldexp(double, int);
__DEVICE__ float ldexp(float, int);
__DEVICE__ double lgamma(double);
__DEVICE__ float lgamma(float);
-__DEVICE__ long long llabs(long long) __NOEXCEPT;
+__DEVICE__ long long llabs(long long);
__DEVICE__ long long llrint(double);
__DEVICE__ long long llrint(float);
__DEVICE__ double log10(double);
@@ -152,9 +129,6 @@ __DEVICE__ double log2(double);
__DEVICE__ float log2(float);
__DEVICE__ double logb(double);
__DEVICE__ float logb(float);
-#if defined(_OPENMP) && defined(__cplusplus)
-__DEVICE__ long double log(long double);
-#endif
__DEVICE__ double log(double);
__DEVICE__ float log(float);
__DEVICE__ long lrint(double);
@@ -302,7 +276,6 @@ _GLIBCXX_END_NAMESPACE_VERSION
} // namespace std
#endif
-#undef __NOEXCEPT
#pragma pop_macro("__DEVICE__")
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_runtime_wrapper.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_runtime_wrapper.h
index e91de3c81dbd..f43ed55de489 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_runtime_wrapper.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_runtime_wrapper.h
@@ -31,11 +31,17 @@
// Include some forward declares that must come before cmath.
#include <__clang_cuda_math_forward_declares.h>
+// Define __CUDACC__ early as libstdc++ standard headers with GNU extensions
+// enabled depend on it to avoid using __float128, which is unsupported in
+// CUDA.
+#define __CUDACC__
+
// Include some standard headers to avoid CUDA headers including them
// while some required macros (like __THROW) are in a weird state.
#include <cmath>
#include <cstdlib>
#include <stdlib.h>
+#undef __CUDACC__
// Preserve common macros that will be changed below by us or by CUDA
// headers.
@@ -83,13 +89,15 @@
#if CUDA_VERSION < 9000
#define __CUDABE__
#else
+#define __CUDACC__
#define __CUDA_LIBDEVICE__
#endif
// Disables definitions of device-side runtime support stubs in
// cuda_device_runtime_api.h
+#include "host_defines.h"
+#undef __CUDACC__
#include "driver_types.h"
#include "host_config.h"
-#include "host_defines.h"
// Temporarily replace "nv_weak" with weak, so __attribute__((nv_weak)) in
// cuda_device_runtime_api.h ends up being __attribute__((weak)) which is the
@@ -141,11 +149,12 @@ inline __host__ double __signbitd(double x) {
// to provide our own.
#include <__clang_cuda_libdevice_declares.h>
-// Wrappers for many device-side standard library functions became compiler
-// builtins in CUDA-9 and have been removed from the CUDA headers. Clang now
-// provides its own implementation of the wrappers.
+// Wrappers for many device-side standard library functions, incl. math
+// functions, became compiler builtins in CUDA-9 and have been removed from the
+// CUDA headers. Clang now provides its own implementation of the wrappers.
#if CUDA_VERSION >= 9000
#include <__clang_cuda_device_functions.h>
+#include <__clang_cuda_math.h>
#endif
// __THROW is redefined to be empty by device_functions_decls.h in CUDA. Clang's
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_hip_libdevice_declares.h b/contrib/llvm-project/clang/lib/Headers/__clang_hip_libdevice_declares.h
new file mode 100644
index 000000000000..e1cd49a39c65
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_hip_libdevice_declares.h
@@ -0,0 +1,326 @@
+/*===---- __clang_hip_libdevice_declares.h - HIP device library decls -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_HIP_LIBDEVICE_DECLARES_H__
+#define __CLANG_HIP_LIBDEVICE_DECLARES_H__
+
+extern "C" {
+
+// BEGIN FLOAT
+__device__ __attribute__((const)) float __ocml_acos_f32(float);
+__device__ __attribute__((pure)) float __ocml_acosh_f32(float);
+__device__ __attribute__((const)) float __ocml_asin_f32(float);
+__device__ __attribute__((pure)) float __ocml_asinh_f32(float);
+__device__ __attribute__((const)) float __ocml_atan2_f32(float, float);
+__device__ __attribute__((const)) float __ocml_atan_f32(float);
+__device__ __attribute__((pure)) float __ocml_atanh_f32(float);
+__device__ __attribute__((pure)) float __ocml_cbrt_f32(float);
+__device__ __attribute__((const)) float __ocml_ceil_f32(float);
+__device__ __attribute__((const)) __device__ float __ocml_copysign_f32(float,
+ float);
+__device__ float __ocml_cos_f32(float);
+__device__ float __ocml_native_cos_f32(float);
+__device__ __attribute__((pure)) __device__ float __ocml_cosh_f32(float);
+__device__ float __ocml_cospi_f32(float);
+__device__ float __ocml_i0_f32(float);
+__device__ float __ocml_i1_f32(float);
+__device__ __attribute__((pure)) float __ocml_erfc_f32(float);
+__device__ __attribute__((pure)) float __ocml_erfcinv_f32(float);
+__device__ __attribute__((pure)) float __ocml_erfcx_f32(float);
+__device__ __attribute__((pure)) float __ocml_erf_f32(float);
+__device__ __attribute__((pure)) float __ocml_erfinv_f32(float);
+__device__ __attribute__((pure)) float __ocml_exp10_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_exp10_f32(float);
+__device__ __attribute__((pure)) float __ocml_exp2_f32(float);
+__device__ __attribute__((pure)) float __ocml_exp_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_exp_f32(float);
+__device__ __attribute__((pure)) float __ocml_expm1_f32(float);
+__device__ __attribute__((const)) float __ocml_fabs_f32(float);
+__device__ __attribute__((const)) float __ocml_fdim_f32(float, float);
+__device__ __attribute__((const)) float __ocml_floor_f32(float);
+__device__ __attribute__((const)) float __ocml_fma_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_fmax_f32(float, float);
+__device__ __attribute__((const)) float __ocml_fmin_f32(float, float);
+__device__ __attribute__((const)) __device__ float __ocml_fmod_f32(float,
+ float);
+__device__ float __ocml_frexp_f32(float,
+ __attribute__((address_space(5))) int *);
+__device__ __attribute__((const)) float __ocml_hypot_f32(float, float);
+__device__ __attribute__((const)) int __ocml_ilogb_f32(float);
+__device__ __attribute__((const)) int __ocml_isfinite_f32(float);
+__device__ __attribute__((const)) int __ocml_isinf_f32(float);
+__device__ __attribute__((const)) int __ocml_isnan_f32(float);
+__device__ float __ocml_j0_f32(float);
+__device__ float __ocml_j1_f32(float);
+__device__ __attribute__((const)) float __ocml_ldexp_f32(float, int);
+__device__ float __ocml_lgamma_f32(float);
+__device__ __attribute__((pure)) float __ocml_log10_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_log10_f32(float);
+__device__ __attribute__((pure)) float __ocml_log1p_f32(float);
+__device__ __attribute__((pure)) float __ocml_log2_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_log2_f32(float);
+__device__ __attribute__((const)) float __ocml_logb_f32(float);
+__device__ __attribute__((pure)) float __ocml_log_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_log_f32(float);
+__device__ float __ocml_modf_f32(float,
+ __attribute__((address_space(5))) float *);
+__device__ __attribute__((const)) float __ocml_nearbyint_f32(float);
+__device__ __attribute__((const)) float __ocml_nextafter_f32(float, float);
+__device__ __attribute__((const)) float __ocml_len3_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_len4_f32(float, float, float,
+ float);
+__device__ __attribute__((pure)) float __ocml_ncdf_f32(float);
+__device__ __attribute__((pure)) float __ocml_ncdfinv_f32(float);
+__device__ __attribute__((pure)) float __ocml_pow_f32(float, float);
+__device__ __attribute__((pure)) float __ocml_rcbrt_f32(float);
+__device__ __attribute__((const)) float __ocml_remainder_f32(float, float);
+__device__ float __ocml_remquo_f32(float, float,
+ __attribute__((address_space(5))) int *);
+__device__ __attribute__((const)) float __ocml_rhypot_f32(float, float);
+__device__ __attribute__((const)) float __ocml_rint_f32(float);
+__device__ __attribute__((const)) float __ocml_rlen3_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_rlen4_f32(float, float, float,
+ float);
+__device__ __attribute__((const)) float __ocml_round_f32(float);
+__device__ __attribute__((pure)) float __ocml_rsqrt_f32(float);
+__device__ __attribute__((const)) float __ocml_scalb_f32(float, float);
+__device__ __attribute__((const)) float __ocml_scalbn_f32(float, int);
+__device__ __attribute__((const)) int __ocml_signbit_f32(float);
+__device__ float __ocml_sincos_f32(float,
+ __attribute__((address_space(5))) float *);
+__device__ float __ocml_sincospi_f32(float,
+ __attribute__((address_space(5))) float *);
+__device__ float __ocml_sin_f32(float);
+__device__ float __ocml_native_sin_f32(float);
+__device__ __attribute__((pure)) float __ocml_sinh_f32(float);
+__device__ float __ocml_sinpi_f32(float);
+__device__ __attribute__((const)) float __ocml_sqrt_f32(float);
+__device__ __attribute__((const)) float __ocml_native_sqrt_f32(float);
+__device__ float __ocml_tan_f32(float);
+__device__ __attribute__((pure)) float __ocml_tanh_f32(float);
+__device__ float __ocml_tgamma_f32(float);
+__device__ __attribute__((const)) float __ocml_trunc_f32(float);
+__device__ float __ocml_y0_f32(float);
+__device__ float __ocml_y1_f32(float);
+
+// BEGIN INTRINSICS
+__device__ __attribute__((const)) float __ocml_add_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_add_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_add_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_add_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sub_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sub_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sub_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sub_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_mul_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_mul_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_mul_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_mul_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_div_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_div_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_div_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_div_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_fma_rte_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_fma_rtn_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_fma_rtp_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_fma_rtz_f32(float, float, float);
+
+__device__ __attribute__((const)) float
+__llvm_amdgcn_cos_f32(float) __asm("llvm.amdgcn.cos.f32");
+__device__ __attribute__((const)) float
+__llvm_amdgcn_rcp_f32(float) __asm("llvm.amdgcn.rcp.f32");
+__device__ __attribute__((const)) float
+__llvm_amdgcn_rsq_f32(float) __asm("llvm.amdgcn.rsq.f32");
+__device__ __attribute__((const)) float
+__llvm_amdgcn_sin_f32(float) __asm("llvm.amdgcn.sin.f32");
+// END INTRINSICS
+// END FLOAT
+
+// BEGIN DOUBLE
+__device__ __attribute__((const)) double __ocml_acos_f64(double);
+__device__ __attribute__((pure)) double __ocml_acosh_f64(double);
+__device__ __attribute__((const)) double __ocml_asin_f64(double);
+__device__ __attribute__((pure)) double __ocml_asinh_f64(double);
+__device__ __attribute__((const)) double __ocml_atan2_f64(double, double);
+__device__ __attribute__((const)) double __ocml_atan_f64(double);
+__device__ __attribute__((pure)) double __ocml_atanh_f64(double);
+__device__ __attribute__((pure)) double __ocml_cbrt_f64(double);
+__device__ __attribute__((const)) double __ocml_ceil_f64(double);
+__device__ __attribute__((const)) double __ocml_copysign_f64(double, double);
+__device__ double __ocml_cos_f64(double);
+__device__ __attribute__((pure)) double __ocml_cosh_f64(double);
+__device__ double __ocml_cospi_f64(double);
+__device__ double __ocml_i0_f64(double);
+__device__ double __ocml_i1_f64(double);
+__device__ __attribute__((pure)) double __ocml_erfc_f64(double);
+__device__ __attribute__((pure)) double __ocml_erfcinv_f64(double);
+__device__ __attribute__((pure)) double __ocml_erfcx_f64(double);
+__device__ __attribute__((pure)) double __ocml_erf_f64(double);
+__device__ __attribute__((pure)) double __ocml_erfinv_f64(double);
+__device__ __attribute__((pure)) double __ocml_exp10_f64(double);
+__device__ __attribute__((pure)) double __ocml_exp2_f64(double);
+__device__ __attribute__((pure)) double __ocml_exp_f64(double);
+__device__ __attribute__((pure)) double __ocml_expm1_f64(double);
+__device__ __attribute__((const)) double __ocml_fabs_f64(double);
+__device__ __attribute__((const)) double __ocml_fdim_f64(double, double);
+__device__ __attribute__((const)) double __ocml_floor_f64(double);
+__device__ __attribute__((const)) double __ocml_fma_f64(double, double, double);
+__device__ __attribute__((const)) double __ocml_fmax_f64(double, double);
+__device__ __attribute__((const)) double __ocml_fmin_f64(double, double);
+__device__ __attribute__((const)) double __ocml_fmod_f64(double, double);
+__device__ double __ocml_frexp_f64(double,
+ __attribute__((address_space(5))) int *);
+__device__ __attribute__((const)) double __ocml_hypot_f64(double, double);
+__device__ __attribute__((const)) int __ocml_ilogb_f64(double);
+__device__ __attribute__((const)) int __ocml_isfinite_f64(double);
+__device__ __attribute__((const)) int __ocml_isinf_f64(double);
+__device__ __attribute__((const)) int __ocml_isnan_f64(double);
+__device__ double __ocml_j0_f64(double);
+__device__ double __ocml_j1_f64(double);
+__device__ __attribute__((const)) double __ocml_ldexp_f64(double, int);
+__device__ double __ocml_lgamma_f64(double);
+__device__ __attribute__((pure)) double __ocml_log10_f64(double);
+__device__ __attribute__((pure)) double __ocml_log1p_f64(double);
+__device__ __attribute__((pure)) double __ocml_log2_f64(double);
+__device__ __attribute__((const)) double __ocml_logb_f64(double);
+__device__ __attribute__((pure)) double __ocml_log_f64(double);
+__device__ double __ocml_modf_f64(double,
+ __attribute__((address_space(5))) double *);
+__device__ __attribute__((const)) double __ocml_nearbyint_f64(double);
+__device__ __attribute__((const)) double __ocml_nextafter_f64(double, double);
+__device__ __attribute__((const)) double __ocml_len3_f64(double, double,
+ double);
+__device__ __attribute__((const)) double __ocml_len4_f64(double, double, double,
+ double);
+__device__ __attribute__((pure)) double __ocml_ncdf_f64(double);
+__device__ __attribute__((pure)) double __ocml_ncdfinv_f64(double);
+__device__ __attribute__((pure)) double __ocml_pow_f64(double, double);
+__device__ __attribute__((pure)) double __ocml_rcbrt_f64(double);
+__device__ __attribute__((const)) double __ocml_remainder_f64(double, double);
+__device__ double __ocml_remquo_f64(double, double,
+ __attribute__((address_space(5))) int *);
+__device__ __attribute__((const)) double __ocml_rhypot_f64(double, double);
+__device__ __attribute__((const)) double __ocml_rint_f64(double);
+__device__ __attribute__((const)) double __ocml_rlen3_f64(double, double,
+ double);
+__device__ __attribute__((const)) double __ocml_rlen4_f64(double, double,
+ double, double);
+__device__ __attribute__((const)) double __ocml_round_f64(double);
+__device__ __attribute__((pure)) double __ocml_rsqrt_f64(double);
+__device__ __attribute__((const)) double __ocml_scalb_f64(double, double);
+__device__ __attribute__((const)) double __ocml_scalbn_f64(double, int);
+__device__ __attribute__((const)) int __ocml_signbit_f64(double);
+__device__ double __ocml_sincos_f64(double,
+ __attribute__((address_space(5))) double *);
+__device__ double
+__ocml_sincospi_f64(double, __attribute__((address_space(5))) double *);
+__device__ double __ocml_sin_f64(double);
+__device__ __attribute__((pure)) double __ocml_sinh_f64(double);
+__device__ double __ocml_sinpi_f64(double);
+__device__ __attribute__((const)) double __ocml_sqrt_f64(double);
+__device__ double __ocml_tan_f64(double);
+__device__ __attribute__((pure)) double __ocml_tanh_f64(double);
+__device__ double __ocml_tgamma_f64(double);
+__device__ __attribute__((const)) double __ocml_trunc_f64(double);
+__device__ double __ocml_y0_f64(double);
+__device__ double __ocml_y1_f64(double);
+
+// BEGIN INTRINSICS
+__device__ __attribute__((const)) double __ocml_add_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_add_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_add_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_add_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sub_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sub_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sub_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sub_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_mul_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_mul_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_mul_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_mul_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_div_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_div_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_div_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_div_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_fma_rte_f64(double, double,
+ double);
+__device__ __attribute__((const)) double __ocml_fma_rtn_f64(double, double,
+ double);
+__device__ __attribute__((const)) double __ocml_fma_rtp_f64(double, double,
+ double);
+__device__ __attribute__((const)) double __ocml_fma_rtz_f64(double, double,
+ double);
+
+__device__ __attribute__((const)) double
+__llvm_amdgcn_rcp_f64(double) __asm("llvm.amdgcn.rcp.f64");
+__device__ __attribute__((const)) double
+__llvm_amdgcn_rsq_f64(double) __asm("llvm.amdgcn.rsq.f64");
+
+__device__ __attribute__((const)) _Float16 __ocml_ceil_f16(_Float16);
+__device__ _Float16 __ocml_cos_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_exp_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_exp10_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_exp2_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_floor_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_fma_f16(_Float16, _Float16,
+ _Float16);
+__device__ __attribute__((const)) _Float16 __ocml_fabs_f16(_Float16);
+__device__ __attribute__((const)) int __ocml_isinf_f16(_Float16);
+__device__ __attribute__((const)) int __ocml_isnan_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_log_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_log10_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_log2_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __llvm_amdgcn_rcp_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_rint_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_rsqrt_f16(_Float16);
+__device__ _Float16 __ocml_sin_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_sqrt_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_trunc_f16(_Float16);
+
+typedef _Float16 __2f16 __attribute__((ext_vector_type(2)));
+typedef short __2i16 __attribute__((ext_vector_type(2)));
+
+__device__ __attribute__((const)) float __ockl_fdot2(__2f16 a, __2f16 b,
+ float c, bool s);
+__device__ __attribute__((const)) __2f16 __ocml_ceil_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_fabs_2f16(__2f16);
+__device__ __2f16 __ocml_cos_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_exp_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_exp10_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_exp2_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_floor_2f16(__2f16);
+__device__ __attribute__((const))
+__2f16 __ocml_fma_2f16(__2f16, __2f16, __2f16);
+__device__ __attribute__((const)) __2i16 __ocml_isinf_2f16(__2f16);
+__device__ __attribute__((const)) __2i16 __ocml_isnan_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_log_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_log10_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_log2_2f16(__2f16);
+__device__ inline __2f16
+__llvm_amdgcn_rcp_2f16(__2f16 __x) // Not currently exposed by ROCDL.
+{
+ return __2f16{__llvm_amdgcn_rcp_f16(__x.x), __llvm_amdgcn_rcp_f16(__x.y)};
+}
+__device__ __attribute__((const)) __2f16 __ocml_rint_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_rsqrt_2f16(__2f16);
+__device__ __2f16 __ocml_sin_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_sqrt_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_trunc_2f16(__2f16);
+
+} // extern "C"
+
+#endif // __CLANG_HIP_LIBDEVICE_DECLARES_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_hip_math.h b/contrib/llvm-project/clang/lib/Headers/__clang_hip_math.h
new file mode 100644
index 000000000000..cf7014b9aefe
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_hip_math.h
@@ -0,0 +1,1185 @@
+/*===---- __clang_hip_math.h - HIP math decls -------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_HIP_MATH_H__
+#define __CLANG_HIP_MATH_H__
+
+#include <algorithm>
+#include <limits.h>
+#include <limits>
+#include <stdint.h>
+
+#pragma push_macro("__DEVICE__")
+#pragma push_macro("__RETURN_TYPE")
+
+// to be consistent with __clang_cuda_math_forward_declares
+#define __DEVICE__ static __device__
+#define __RETURN_TYPE bool
+
+__DEVICE__
+inline uint64_t __make_mantissa_base8(const char *__tagp) {
+ uint64_t __r = 0;
+ while (__tagp) {
+ char __tmp = *__tagp;
+
+ if (__tmp >= '0' && __tmp <= '7')
+ __r = (__r * 8u) + __tmp - '0';
+ else
+ return 0;
+
+ ++__tagp;
+ }
+
+ return __r;
+}
+
+__DEVICE__
+inline uint64_t __make_mantissa_base10(const char *__tagp) {
+ uint64_t __r = 0;
+ while (__tagp) {
+ char __tmp = *__tagp;
+
+ if (__tmp >= '0' && __tmp <= '9')
+ __r = (__r * 10u) + __tmp - '0';
+ else
+ return 0;
+
+ ++__tagp;
+ }
+
+ return __r;
+}
+
+__DEVICE__
+inline uint64_t __make_mantissa_base16(const char *__tagp) {
+ uint64_t __r = 0;
+ while (__tagp) {
+ char __tmp = *__tagp;
+
+ if (__tmp >= '0' && __tmp <= '9')
+ __r = (__r * 16u) + __tmp - '0';
+ else if (__tmp >= 'a' && __tmp <= 'f')
+ __r = (__r * 16u) + __tmp - 'a' + 10;
+ else if (__tmp >= 'A' && __tmp <= 'F')
+ __r = (__r * 16u) + __tmp - 'A' + 10;
+ else
+ return 0;
+
+ ++__tagp;
+ }
+
+ return __r;
+}
+
+__DEVICE__
+inline uint64_t __make_mantissa(const char *__tagp) {
+ if (!__tagp)
+ return 0u;
+
+ if (*__tagp == '0') {
+ ++__tagp;
+
+ if (*__tagp == 'x' || *__tagp == 'X')
+ return __make_mantissa_base16(__tagp);
+ else
+ return __make_mantissa_base8(__tagp);
+ }
+
+ return __make_mantissa_base10(__tagp);
+}
+
+// BEGIN FLOAT
+__DEVICE__
+inline float abs(float __x) { return __ocml_fabs_f32(__x); }
+__DEVICE__
+inline float acosf(float __x) { return __ocml_acos_f32(__x); }
+__DEVICE__
+inline float acoshf(float __x) { return __ocml_acosh_f32(__x); }
+__DEVICE__
+inline float asinf(float __x) { return __ocml_asin_f32(__x); }
+__DEVICE__
+inline float asinhf(float __x) { return __ocml_asinh_f32(__x); }
+__DEVICE__
+inline float atan2f(float __x, float __y) { return __ocml_atan2_f32(__x, __y); }
+__DEVICE__
+inline float atanf(float __x) { return __ocml_atan_f32(__x); }
+__DEVICE__
+inline float atanhf(float __x) { return __ocml_atanh_f32(__x); }
+__DEVICE__
+inline float cbrtf(float __x) { return __ocml_cbrt_f32(__x); }
+__DEVICE__
+inline float ceilf(float __x) { return __ocml_ceil_f32(__x); }
+__DEVICE__
+inline float copysignf(float __x, float __y) {
+ return __ocml_copysign_f32(__x, __y);
+}
+__DEVICE__
+inline float cosf(float __x) { return __ocml_cos_f32(__x); }
+__DEVICE__
+inline float coshf(float __x) { return __ocml_cosh_f32(__x); }
+__DEVICE__
+inline float cospif(float __x) { return __ocml_cospi_f32(__x); }
+__DEVICE__
+inline float cyl_bessel_i0f(float __x) { return __ocml_i0_f32(__x); }
+__DEVICE__
+inline float cyl_bessel_i1f(float __x) { return __ocml_i1_f32(__x); }
+__DEVICE__
+inline float erfcf(float __x) { return __ocml_erfc_f32(__x); }
+__DEVICE__
+inline float erfcinvf(float __x) { return __ocml_erfcinv_f32(__x); }
+__DEVICE__
+inline float erfcxf(float __x) { return __ocml_erfcx_f32(__x); }
+__DEVICE__
+inline float erff(float __x) { return __ocml_erf_f32(__x); }
+__DEVICE__
+inline float erfinvf(float __x) { return __ocml_erfinv_f32(__x); }
+__DEVICE__
+inline float exp10f(float __x) { return __ocml_exp10_f32(__x); }
+__DEVICE__
+inline float exp2f(float __x) { return __ocml_exp2_f32(__x); }
+__DEVICE__
+inline float expf(float __x) { return __ocml_exp_f32(__x); }
+__DEVICE__
+inline float expm1f(float __x) { return __ocml_expm1_f32(__x); }
+__DEVICE__
+inline float fabsf(float __x) { return __ocml_fabs_f32(__x); }
+__DEVICE__
+inline float fdimf(float __x, float __y) { return __ocml_fdim_f32(__x, __y); }
+__DEVICE__
+inline float fdividef(float __x, float __y) { return __x / __y; }
+__DEVICE__
+inline float floorf(float __x) { return __ocml_floor_f32(__x); }
+__DEVICE__
+inline float fmaf(float __x, float __y, float __z) {
+ return __ocml_fma_f32(__x, __y, __z);
+}
+__DEVICE__
+inline float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); }
+__DEVICE__
+inline float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); }
+__DEVICE__
+inline float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); }
+__DEVICE__
+inline float frexpf(float __x, int *__nptr) {
+ int __tmp;
+ float __r =
+ __ocml_frexp_f32(__x, (__attribute__((address_space(5))) int *)&__tmp);
+ *__nptr = __tmp;
+
+ return __r;
+}
+__DEVICE__
+inline float hypotf(float __x, float __y) { return __ocml_hypot_f32(__x, __y); }
+__DEVICE__
+inline int ilogbf(float __x) { return __ocml_ilogb_f32(__x); }
+__DEVICE__
+inline __RETURN_TYPE isfinite(float __x) { return __ocml_isfinite_f32(__x); }
+__DEVICE__
+inline __RETURN_TYPE isinf(float __x) { return __ocml_isinf_f32(__x); }
+__DEVICE__
+inline __RETURN_TYPE isnan(float __x) { return __ocml_isnan_f32(__x); }
+__DEVICE__
+inline float j0f(float __x) { return __ocml_j0_f32(__x); }
+__DEVICE__
+inline float j1f(float __x) { return __ocml_j1_f32(__x); }
+__DEVICE__
+inline float jnf(int __n,
+ float __x) { // TODO: we could use Ahmes multiplication
+ // and the Miller & Brown algorithm
+ // for linear recurrences to get O(log n) steps, but it's unclear if
+ // it'd be beneficial in this case.
+ if (__n == 0)
+ return j0f(__x);
+ if (__n == 1)
+ return j1f(__x);
+
+ float __x0 = j0f(__x);
+ float __x1 = j1f(__x);
+ for (int __i = 1; __i < __n; ++__i) {
+ float __x2 = (2 * __i) / __x * __x1 - __x0;
+ __x0 = __x1;
+ __x1 = __x2;
+ }
+
+ return __x1;
+}
+__DEVICE__
+inline float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); }
+__DEVICE__
+inline float lgammaf(float __x) { return __ocml_lgamma_f32(__x); }
+__DEVICE__
+inline long long int llrintf(float __x) { return __ocml_rint_f32(__x); }
+__DEVICE__
+inline long long int llroundf(float __x) { return __ocml_round_f32(__x); }
+__DEVICE__
+inline float log10f(float __x) { return __ocml_log10_f32(__x); }
+__DEVICE__
+inline float log1pf(float __x) { return __ocml_log1p_f32(__x); }
+__DEVICE__
+inline float log2f(float __x) { return __ocml_log2_f32(__x); }
+__DEVICE__
+inline float logbf(float __x) { return __ocml_logb_f32(__x); }
+__DEVICE__
+inline float logf(float __x) { return __ocml_log_f32(__x); }
+__DEVICE__
+inline long int lrintf(float __x) { return __ocml_rint_f32(__x); }
+__DEVICE__
+inline long int lroundf(float __x) { return __ocml_round_f32(__x); }
+__DEVICE__
+inline float modff(float __x, float *__iptr) {
+ float __tmp;
+ float __r =
+ __ocml_modf_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
+ *__iptr = __tmp;
+
+ return __r;
+}
+__DEVICE__
+inline float nanf(const char *__tagp) {
+ union {
+ float val;
+ struct ieee_float {
+ uint32_t mantissa : 22;
+ uint32_t quiet : 1;
+ uint32_t exponent : 8;
+ uint32_t sign : 1;
+ } bits;
+
+ static_assert(sizeof(float) == sizeof(ieee_float), "");
+ } __tmp;
+
+ __tmp.bits.sign = 0u;
+ __tmp.bits.exponent = ~0u;
+ __tmp.bits.quiet = 1u;
+ __tmp.bits.mantissa = __make_mantissa(__tagp);
+
+ return __tmp.val;
+}
+__DEVICE__
+inline float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); }
+__DEVICE__
+inline float nextafterf(float __x, float __y) {
+ return __ocml_nextafter_f32(__x, __y);
+}
+__DEVICE__
+inline float norm3df(float __x, float __y, float __z) {
+ return __ocml_len3_f32(__x, __y, __z);
+}
+__DEVICE__
+inline float norm4df(float __x, float __y, float __z, float __w) {
+ return __ocml_len4_f32(__x, __y, __z, __w);
+}
+__DEVICE__
+inline float normcdff(float __x) { return __ocml_ncdf_f32(__x); }
+__DEVICE__
+inline float normcdfinvf(float __x) { return __ocml_ncdfinv_f32(__x); }
+__DEVICE__
+inline float
+normf(int __dim,
+ const float *__a) { // TODO: placeholder until OCML adds support.
+ float __r = 0;
+ while (__dim--) {
+ __r += __a[0] * __a[0];
+ ++__a;
+ }
+
+ return __ocml_sqrt_f32(__r);
+}
+__DEVICE__
+inline float powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
+__DEVICE__
+inline float rcbrtf(float __x) { return __ocml_rcbrt_f32(__x); }
+__DEVICE__
+inline float remainderf(float __x, float __y) {
+ return __ocml_remainder_f32(__x, __y);
+}
+__DEVICE__
+inline float remquof(float __x, float __y, int *__quo) {
+ int __tmp;
+ float __r = __ocml_remquo_f32(
+ __x, __y, (__attribute__((address_space(5))) int *)&__tmp);
+ *__quo = __tmp;
+
+ return __r;
+}
+__DEVICE__
+inline float rhypotf(float __x, float __y) {
+ return __ocml_rhypot_f32(__x, __y);
+}
+__DEVICE__
+inline float rintf(float __x) { return __ocml_rint_f32(__x); }
+__DEVICE__
+inline float rnorm3df(float __x, float __y, float __z) {
+ return __ocml_rlen3_f32(__x, __y, __z);
+}
+
+__DEVICE__
+inline float rnorm4df(float __x, float __y, float __z, float __w) {
+ return __ocml_rlen4_f32(__x, __y, __z, __w);
+}
+__DEVICE__
+inline float
+rnormf(int __dim,
+ const float *__a) { // TODO: placeholder until OCML adds support.
+ float __r = 0;
+ while (__dim--) {
+ __r += __a[0] * __a[0];
+ ++__a;
+ }
+
+ return __ocml_rsqrt_f32(__r);
+}
+__DEVICE__
+inline float roundf(float __x) { return __ocml_round_f32(__x); }
+__DEVICE__
+inline float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); }
+__DEVICE__
+inline float scalblnf(float __x, long int __n) {
+ return (__n < INT_MAX) ? __ocml_scalbn_f32(__x, __n)
+ : __ocml_scalb_f32(__x, __n);
+}
+__DEVICE__
+inline float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); }
+__DEVICE__
+inline __RETURN_TYPE signbit(float __x) { return __ocml_signbit_f32(__x); }
+__DEVICE__
+inline void sincosf(float __x, float *__sinptr, float *__cosptr) {
+ float __tmp;
+
+ *__sinptr =
+ __ocml_sincos_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
+ *__cosptr = __tmp;
+}
+__DEVICE__
+inline void sincospif(float __x, float *__sinptr, float *__cosptr) {
+ float __tmp;
+
+ *__sinptr = __ocml_sincospi_f32(
+ __x, (__attribute__((address_space(5))) float *)&__tmp);
+ *__cosptr = __tmp;
+}
+__DEVICE__
+inline float sinf(float __x) { return __ocml_sin_f32(__x); }
+__DEVICE__
+inline float sinhf(float __x) { return __ocml_sinh_f32(__x); }
+__DEVICE__
+inline float sinpif(float __x) { return __ocml_sinpi_f32(__x); }
+__DEVICE__
+inline float sqrtf(float __x) { return __ocml_sqrt_f32(__x); }
+__DEVICE__
+inline float tanf(float __x) { return __ocml_tan_f32(__x); }
+__DEVICE__
+inline float tanhf(float __x) { return __ocml_tanh_f32(__x); }
+__DEVICE__
+inline float tgammaf(float __x) { return __ocml_tgamma_f32(__x); }
+__DEVICE__
+inline float truncf(float __x) { return __ocml_trunc_f32(__x); }
+__DEVICE__
+inline float y0f(float __x) { return __ocml_y0_f32(__x); }
+__DEVICE__
+inline float y1f(float __x) { return __ocml_y1_f32(__x); }
+__DEVICE__
+inline float ynf(int __n,
+ float __x) { // TODO: we could use Ahmes multiplication
+ // and the Miller & Brown algorithm
+ // for linear recurrences to get O(log n) steps, but it's unclear if
+ // it'd be beneficial in this case. Placeholder until OCML adds
+ // support.
+ if (__n == 0)
+ return y0f(__x);
+ if (__n == 1)
+ return y1f(__x);
+
+ float __x0 = y0f(__x);
+ float __x1 = y1f(__x);
+ for (int __i = 1; __i < __n; ++__i) {
+ float __x2 = (2 * __i) / __x * __x1 - __x0;
+ __x0 = __x1;
+ __x1 = __x2;
+ }
+
+ return __x1;
+}
+
+// BEGIN INTRINSICS
+__DEVICE__
+inline float __cosf(float __x) { return __ocml_native_cos_f32(__x); }
+__DEVICE__
+inline float __exp10f(float __x) { return __ocml_native_exp10_f32(__x); }
+__DEVICE__
+inline float __expf(float __x) { return __ocml_native_exp_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fadd_rd(float __x, float __y) {
+ return __ocml_add_rtn_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fadd_rn(float __x, float __y) { return __x + __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fadd_ru(float __x, float __y) {
+ return __ocml_add_rtp_f32(__x, __y);
+}
+__DEVICE__
+inline float __fadd_rz(float __x, float __y) {
+ return __ocml_add_rtz_f32(__x, __y);
+}
+__DEVICE__
+inline float __fdiv_rd(float __x, float __y) {
+ return __ocml_div_rtn_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fdiv_rn(float __x, float __y) { return __x / __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fdiv_ru(float __x, float __y) {
+ return __ocml_div_rtp_f32(__x, __y);
+}
+__DEVICE__
+inline float __fdiv_rz(float __x, float __y) {
+ return __ocml_div_rtz_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fdividef(float __x, float __y) { return __x / __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fmaf_rd(float __x, float __y, float __z) {
+ return __ocml_fma_rtn_f32(__x, __y, __z);
+}
+#endif
+__DEVICE__
+inline float __fmaf_rn(float __x, float __y, float __z) {
+ return __ocml_fma_f32(__x, __y, __z);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fmaf_ru(float __x, float __y, float __z) {
+ return __ocml_fma_rtp_f32(__x, __y, __z);
+}
+__DEVICE__
+inline float __fmaf_rz(float __x, float __y, float __z) {
+ return __ocml_fma_rtz_f32(__x, __y, __z);
+}
+__DEVICE__
+inline float __fmul_rd(float __x, float __y) {
+ return __ocml_mul_rtn_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fmul_rn(float __x, float __y) { return __x * __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fmul_ru(float __x, float __y) {
+ return __ocml_mul_rtp_f32(__x, __y);
+}
+__DEVICE__
+inline float __fmul_rz(float __x, float __y) {
+ return __ocml_mul_rtz_f32(__x, __y);
+}
+__DEVICE__
+inline float __frcp_rd(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#endif
+__DEVICE__
+inline float __frcp_rn(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __frcp_ru(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+__DEVICE__
+inline float __frcp_rz(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#endif
+__DEVICE__
+inline float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fsqrt_rd(float __x) { return __ocml_sqrt_rtn_f32(__x); }
+#endif
+__DEVICE__
+inline float __fsqrt_rn(float __x) { return __ocml_native_sqrt_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fsqrt_ru(float __x) { return __ocml_sqrt_rtp_f32(__x); }
+__DEVICE__
+inline float __fsqrt_rz(float __x) { return __ocml_sqrt_rtz_f32(__x); }
+__DEVICE__
+inline float __fsub_rd(float __x, float __y) {
+ return __ocml_sub_rtn_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fsub_rn(float __x, float __y) { return __x - __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fsub_ru(float __x, float __y) {
+ return __ocml_sub_rtp_f32(__x, __y);
+}
+__DEVICE__
+inline float __fsub_rz(float __x, float __y) {
+ return __ocml_sub_rtz_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __log10f(float __x) { return __ocml_native_log10_f32(__x); }
+__DEVICE__
+inline float __log2f(float __x) { return __ocml_native_log2_f32(__x); }
+__DEVICE__
+inline float __logf(float __x) { return __ocml_native_log_f32(__x); }
+__DEVICE__
+inline float __powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
+__DEVICE__
+inline float __saturatef(float __x) {
+ return (__x < 0) ? 0 : ((__x > 1) ? 1 : __x);
+}
+__DEVICE__
+inline void __sincosf(float __x, float *__sinptr, float *__cosptr) {
+ *__sinptr = __ocml_native_sin_f32(__x);
+ *__cosptr = __ocml_native_cos_f32(__x);
+}
+__DEVICE__
+inline float __sinf(float __x) { return __ocml_native_sin_f32(__x); }
+__DEVICE__
+inline float __tanf(float __x) { return __ocml_tan_f32(__x); }
+// END INTRINSICS
+// END FLOAT
+
+// BEGIN DOUBLE
+__DEVICE__
+inline double abs(double __x) { return __ocml_fabs_f64(__x); }
+__DEVICE__
+inline double acos(double __x) { return __ocml_acos_f64(__x); }
+__DEVICE__
+inline double acosh(double __x) { return __ocml_acosh_f64(__x); }
+__DEVICE__
+inline double asin(double __x) { return __ocml_asin_f64(__x); }
+__DEVICE__
+inline double asinh(double __x) { return __ocml_asinh_f64(__x); }
+__DEVICE__
+inline double atan(double __x) { return __ocml_atan_f64(__x); }
+__DEVICE__
+inline double atan2(double __x, double __y) {
+ return __ocml_atan2_f64(__x, __y);
+}
+__DEVICE__
+inline double atanh(double __x) { return __ocml_atanh_f64(__x); }
+__DEVICE__
+inline double cbrt(double __x) { return __ocml_cbrt_f64(__x); }
+__DEVICE__
+inline double ceil(double __x) { return __ocml_ceil_f64(__x); }
+__DEVICE__
+inline double copysign(double __x, double __y) {
+ return __ocml_copysign_f64(__x, __y);
+}
+__DEVICE__
+inline double cos(double __x) { return __ocml_cos_f64(__x); }
+__DEVICE__
+inline double cosh(double __x) { return __ocml_cosh_f64(__x); }
+__DEVICE__
+inline double cospi(double __x) { return __ocml_cospi_f64(__x); }
+__DEVICE__
+inline double cyl_bessel_i0(double __x) { return __ocml_i0_f64(__x); }
+__DEVICE__
+inline double cyl_bessel_i1(double __x) { return __ocml_i1_f64(__x); }
+__DEVICE__
+inline double erf(double __x) { return __ocml_erf_f64(__x); }
+__DEVICE__
+inline double erfc(double __x) { return __ocml_erfc_f64(__x); }
+__DEVICE__
+inline double erfcinv(double __x) { return __ocml_erfcinv_f64(__x); }
+__DEVICE__
+inline double erfcx(double __x) { return __ocml_erfcx_f64(__x); }
+__DEVICE__
+inline double erfinv(double __x) { return __ocml_erfinv_f64(__x); }
+__DEVICE__
+inline double exp(double __x) { return __ocml_exp_f64(__x); }
+__DEVICE__
+inline double exp10(double __x) { return __ocml_exp10_f64(__x); }
+__DEVICE__
+inline double exp2(double __x) { return __ocml_exp2_f64(__x); }
+__DEVICE__
+inline double expm1(double __x) { return __ocml_expm1_f64(__x); }
+__DEVICE__
+inline double fabs(double __x) { return __ocml_fabs_f64(__x); }
+__DEVICE__
+inline double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); }
+__DEVICE__
+inline double floor(double __x) { return __ocml_floor_f64(__x); }
+__DEVICE__
+inline double fma(double __x, double __y, double __z) {
+ return __ocml_fma_f64(__x, __y, __z);
+}
+__DEVICE__
+inline double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); }
+__DEVICE__
+inline double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); }
+__DEVICE__
+inline double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); }
+__DEVICE__
+inline double frexp(double __x, int *__nptr) {
+ int __tmp;
+ double __r =
+ __ocml_frexp_f64(__x, (__attribute__((address_space(5))) int *)&__tmp);
+ *__nptr = __tmp;
+
+ return __r;
+}
+__DEVICE__
+inline double hypot(double __x, double __y) {
+ return __ocml_hypot_f64(__x, __y);
+}
+__DEVICE__
+inline int ilogb(double __x) { return __ocml_ilogb_f64(__x); }
+__DEVICE__
+inline __RETURN_TYPE isfinite(double __x) { return __ocml_isfinite_f64(__x); }
+__DEVICE__
+inline __RETURN_TYPE isinf(double __x) { return __ocml_isinf_f64(__x); }
+__DEVICE__
+inline __RETURN_TYPE isnan(double __x) { return __ocml_isnan_f64(__x); }
+__DEVICE__
+inline double j0(double __x) { return __ocml_j0_f64(__x); }
+__DEVICE__
+inline double j1(double __x) { return __ocml_j1_f64(__x); }
+__DEVICE__
+inline double jn(int __n,
+ double __x) { // TODO: we could use Ahmes multiplication
+ // and the Miller & Brown algorithm
+ // for linear recurrences to get O(log n) steps, but it's unclear if
+ // it'd be beneficial in this case. Placeholder until OCML adds
+ // support.
+ if (__n == 0)
+ return j0f(__x);
+ if (__n == 1)
+ return j1f(__x);
+
+ double __x0 = j0f(__x);
+ double __x1 = j1f(__x);
+ for (int __i = 1; __i < __n; ++__i) {
+ double __x2 = (2 * __i) / __x * __x1 - __x0;
+ __x0 = __x1;
+ __x1 = __x2;
+ }
+
+ return __x1;
+}
+__DEVICE__
+inline double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); }
+__DEVICE__
+inline double lgamma(double __x) { return __ocml_lgamma_f64(__x); }
+__DEVICE__
+inline long long int llrint(double __x) { return __ocml_rint_f64(__x); }
+__DEVICE__
+inline long long int llround(double __x) { return __ocml_round_f64(__x); }
+__DEVICE__
+inline double log(double __x) { return __ocml_log_f64(__x); }
+__DEVICE__
+inline double log10(double __x) { return __ocml_log10_f64(__x); }
+__DEVICE__
+inline double log1p(double __x) { return __ocml_log1p_f64(__x); }
+__DEVICE__
+inline double log2(double __x) { return __ocml_log2_f64(__x); }
+__DEVICE__
+inline double logb(double __x) { return __ocml_logb_f64(__x); }
+__DEVICE__
+inline long int lrint(double __x) { return __ocml_rint_f64(__x); }
+__DEVICE__
+inline long int lround(double __x) { return __ocml_round_f64(__x); }
+__DEVICE__
+inline double modf(double __x, double *__iptr) {
+ double __tmp;
+ double __r =
+ __ocml_modf_f64(__x, (__attribute__((address_space(5))) double *)&__tmp);
+ *__iptr = __tmp;
+
+ return __r;
+}
+__DEVICE__
+inline double nan(const char *__tagp) {
+#if !_WIN32
+ union {
+ double val;
+ struct ieee_double {
+ uint64_t mantissa : 51;
+ uint32_t quiet : 1;
+ uint32_t exponent : 11;
+ uint32_t sign : 1;
+ } bits;
+ static_assert(sizeof(double) == sizeof(ieee_double), "");
+ } __tmp;
+
+ __tmp.bits.sign = 0u;
+ __tmp.bits.exponent = ~0u;
+ __tmp.bits.quiet = 1u;
+ __tmp.bits.mantissa = __make_mantissa(__tagp);
+
+ return __tmp.val;
+#else
+ static_assert(sizeof(uint64_t) == sizeof(double));
+ uint64_t val = __make_mantissa(__tagp);
+ val |= 0xFFF << 51;
+ return *reinterpret_cast<double *>(&val);
+#endif
+}
+__DEVICE__
+inline double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); }
+__DEVICE__
+inline double nextafter(double __x, double __y) {
+ return __ocml_nextafter_f64(__x, __y);
+}
+__DEVICE__
+inline double
+norm(int __dim,
+ const double *__a) { // TODO: placeholder until OCML adds support.
+ double __r = 0;
+ while (__dim--) {
+ __r += __a[0] * __a[0];
+ ++__a;
+ }
+
+ return __ocml_sqrt_f64(__r);
+}
+__DEVICE__
+inline double norm3d(double __x, double __y, double __z) {
+ return __ocml_len3_f64(__x, __y, __z);
+}
+__DEVICE__
+inline double norm4d(double __x, double __y, double __z, double __w) {
+ return __ocml_len4_f64(__x, __y, __z, __w);
+}
+__DEVICE__
+inline double normcdf(double __x) { return __ocml_ncdf_f64(__x); }
+__DEVICE__
+inline double normcdfinv(double __x) { return __ocml_ncdfinv_f64(__x); }
+__DEVICE__
+inline double pow(double __x, double __y) { return __ocml_pow_f64(__x, __y); }
+__DEVICE__
+inline double rcbrt(double __x) { return __ocml_rcbrt_f64(__x); }
+__DEVICE__
+inline double remainder(double __x, double __y) {
+ return __ocml_remainder_f64(__x, __y);
+}
+__DEVICE__
+inline double remquo(double __x, double __y, int *__quo) {
+ int __tmp;
+ double __r = __ocml_remquo_f64(
+ __x, __y, (__attribute__((address_space(5))) int *)&__tmp);
+ *__quo = __tmp;
+
+ return __r;
+}
+__DEVICE__
+inline double rhypot(double __x, double __y) {
+ return __ocml_rhypot_f64(__x, __y);
+}
+__DEVICE__
+inline double rint(double __x) { return __ocml_rint_f64(__x); }
+__DEVICE__
+inline double
+rnorm(int __dim,
+ const double *__a) { // TODO: placeholder until OCML adds support.
+ double __r = 0;
+ while (__dim--) {
+ __r += __a[0] * __a[0];
+ ++__a;
+ }
+
+ return __ocml_rsqrt_f64(__r);
+}
+__DEVICE__
+inline double rnorm3d(double __x, double __y, double __z) {
+ return __ocml_rlen3_f64(__x, __y, __z);
+}
+__DEVICE__
+inline double rnorm4d(double __x, double __y, double __z, double __w) {
+ return __ocml_rlen4_f64(__x, __y, __z, __w);
+}
+__DEVICE__
+inline double round(double __x) { return __ocml_round_f64(__x); }
+__DEVICE__
+inline double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); }
+__DEVICE__
+inline double scalbln(double __x, long int __n) {
+ return (__n < INT_MAX) ? __ocml_scalbn_f64(__x, __n)
+ : __ocml_scalb_f64(__x, __n);
+}
+__DEVICE__
+inline double scalbn(double __x, int __n) {
+ return __ocml_scalbn_f64(__x, __n);
+}
+__DEVICE__
+inline __RETURN_TYPE signbit(double __x) { return __ocml_signbit_f64(__x); }
+__DEVICE__
+inline double sin(double __x) { return __ocml_sin_f64(__x); }
+__DEVICE__
+inline void sincos(double __x, double *__sinptr, double *__cosptr) {
+ double __tmp;
+ *__sinptr = __ocml_sincos_f64(
+ __x, (__attribute__((address_space(5))) double *)&__tmp);
+ *__cosptr = __tmp;
+}
+__DEVICE__
+inline void sincospi(double __x, double *__sinptr, double *__cosptr) {
+ double __tmp;
+ *__sinptr = __ocml_sincospi_f64(
+ __x, (__attribute__((address_space(5))) double *)&__tmp);
+ *__cosptr = __tmp;
+}
+__DEVICE__
+inline double sinh(double __x) { return __ocml_sinh_f64(__x); }
+__DEVICE__
+inline double sinpi(double __x) { return __ocml_sinpi_f64(__x); }
+__DEVICE__
+inline double sqrt(double __x) { return __ocml_sqrt_f64(__x); }
+__DEVICE__
+inline double tan(double __x) { return __ocml_tan_f64(__x); }
+__DEVICE__
+inline double tanh(double __x) { return __ocml_tanh_f64(__x); }
+__DEVICE__
+inline double tgamma(double __x) { return __ocml_tgamma_f64(__x); }
+__DEVICE__
+inline double trunc(double __x) { return __ocml_trunc_f64(__x); }
+__DEVICE__
+inline double y0(double __x) { return __ocml_y0_f64(__x); }
+__DEVICE__
+inline double y1(double __x) { return __ocml_y1_f64(__x); }
+__DEVICE__
+inline double yn(int __n,
+ double __x) { // TODO: we could use Ahmes multiplication
+ // and the Miller & Brown algorithm
+ // for linear recurrences to get O(log n) steps, but it's unclear if
+ // it'd be beneficial in this case. Placeholder until OCML adds
+ // support.
+ if (__n == 0)
+ return j0f(__x);
+ if (__n == 1)
+ return j1f(__x);
+
+ double __x0 = j0f(__x);
+ double __x1 = j1f(__x);
+ for (int __i = 1; __i < __n; ++__i) {
+ double __x2 = (2 * __i) / __x * __x1 - __x0;
+ __x0 = __x1;
+ __x1 = __x2;
+ }
+
+ return __x1;
+}
+
+// BEGIN INTRINSICS
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dadd_rd(double __x, double __y) {
+ return __ocml_add_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+inline double __dadd_rn(double __x, double __y) { return __x + __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dadd_ru(double __x, double __y) {
+ return __ocml_add_rtp_f64(__x, __y);
+}
+__DEVICE__
+inline double __dadd_rz(double __x, double __y) {
+ return __ocml_add_rtz_f64(__x, __y);
+}
+__DEVICE__
+inline double __ddiv_rd(double __x, double __y) {
+ return __ocml_div_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+inline double __ddiv_rn(double __x, double __y) { return __x / __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __ddiv_ru(double __x, double __y) {
+ return __ocml_div_rtp_f64(__x, __y);
+}
+__DEVICE__
+inline double __ddiv_rz(double __x, double __y) {
+ return __ocml_div_rtz_f64(__x, __y);
+}
+__DEVICE__
+inline double __dmul_rd(double __x, double __y) {
+ return __ocml_mul_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+inline double __dmul_rn(double __x, double __y) { return __x * __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dmul_ru(double __x, double __y) {
+ return __ocml_mul_rtp_f64(__x, __y);
+}
+__DEVICE__
+inline double __dmul_rz(double __x, double __y) {
+ return __ocml_mul_rtz_f64(__x, __y);
+}
+__DEVICE__
+inline double __drcp_rd(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+#endif
+__DEVICE__
+inline double __drcp_rn(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __drcp_ru(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+__DEVICE__
+inline double __drcp_rz(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+__DEVICE__
+inline double __dsqrt_rd(double __x) { return __ocml_sqrt_rtn_f64(__x); }
+#endif
+__DEVICE__
+inline double __dsqrt_rn(double __x) { return __ocml_sqrt_f64(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dsqrt_ru(double __x) { return __ocml_sqrt_rtp_f64(__x); }
+__DEVICE__
+inline double __dsqrt_rz(double __x) { return __ocml_sqrt_rtz_f64(__x); }
+__DEVICE__
+inline double __dsub_rd(double __x, double __y) {
+ return __ocml_sub_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+inline double __dsub_rn(double __x, double __y) { return __x - __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dsub_ru(double __x, double __y) {
+ return __ocml_sub_rtp_f64(__x, __y);
+}
+__DEVICE__
+inline double __dsub_rz(double __x, double __y) {
+ return __ocml_sub_rtz_f64(__x, __y);
+}
+__DEVICE__
+inline double __fma_rd(double __x, double __y, double __z) {
+ return __ocml_fma_rtn_f64(__x, __y, __z);
+}
+#endif
+__DEVICE__
+inline double __fma_rn(double __x, double __y, double __z) {
+ return __ocml_fma_f64(__x, __y, __z);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __fma_ru(double __x, double __y, double __z) {
+ return __ocml_fma_rtp_f64(__x, __y, __z);
+}
+__DEVICE__
+inline double __fma_rz(double __x, double __y, double __z) {
+ return __ocml_fma_rtz_f64(__x, __y, __z);
+}
+#endif
+// END INTRINSICS
+// END DOUBLE
+
+// BEGIN INTEGER
+__DEVICE__
+inline int abs(int __x) {
+ int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1);
+ return (__x ^ __sgn) - __sgn;
+}
+__DEVICE__
+inline long labs(long __x) {
+ long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1);
+ return (__x ^ __sgn) - __sgn;
+}
+__DEVICE__
+inline long long llabs(long long __x) {
+ long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1);
+ return (__x ^ __sgn) - __sgn;
+}
+
+#if defined(__cplusplus)
+__DEVICE__
+inline long abs(long __x) { return labs(__x); }
+__DEVICE__
+inline long long abs(long long __x) { return llabs(__x); }
+#endif
+// END INTEGER
+
+__DEVICE__
+inline _Float16 fma(_Float16 __x, _Float16 __y, _Float16 __z) {
+ return __ocml_fma_f16(__x, __y, __z);
+}
+
+__DEVICE__
+inline float fma(float __x, float __y, float __z) {
+ return fmaf(__x, __y, __z);
+}
+
+#pragma push_macro("__DEF_FUN1")
+#pragma push_macro("__DEF_FUN2")
+#pragma push_macro("__DEF_FUNI")
+#pragma push_macro("__DEF_FLOAT_FUN2I")
+#pragma push_macro("__HIP_OVERLOAD1")
+#pragma push_macro("__HIP_OVERLOAD2")
+
+// __hip_enable_if::type is a type function which returns __T if __B is true.
+template <bool __B, class __T = void> struct __hip_enable_if {};
+
+template <class __T> struct __hip_enable_if<true, __T> { typedef __T type; };
+
+// __HIP_OVERLOAD1 is used to resolve function calls with integer argument to
+// avoid compilation error due to ambibuity. e.g. floor(5) is resolved with
+// floor(double).
+#define __HIP_OVERLOAD1(__retty, __fn) \
+ template <typename __T> \
+ __DEVICE__ typename __hip_enable_if<std::numeric_limits<__T>::is_integer, \
+ __retty>::type \
+ __fn(__T __x) { \
+ return ::__fn((double)__x); \
+ }
+
+// __HIP_OVERLOAD2 is used to resolve function calls with mixed float/double
+// or integer argument to avoid compilation error due to ambibuity. e.g.
+// max(5.0f, 6.0) is resolved with max(double, double).
+#define __HIP_OVERLOAD2(__retty, __fn) \
+ template <typename __T1, typename __T2> \
+ __DEVICE__ \
+ typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized && \
+ std::numeric_limits<__T2>::is_specialized, \
+ __retty>::type \
+ __fn(__T1 __x, __T2 __y) { \
+ return __fn((double)__x, (double)__y); \
+ }
+
+// Define cmath functions with float argument and returns float.
+#define __DEF_FUN1(__retty, __func) \
+ __DEVICE__ \
+ inline float __func(float __x) { return __func##f(__x); } \
+ __HIP_OVERLOAD1(__retty, __func)
+
+// Define cmath functions with float argument and returns __retty.
+#define __DEF_FUNI(__retty, __func) \
+ __DEVICE__ \
+ inline __retty __func(float __x) { return __func##f(__x); } \
+ __HIP_OVERLOAD1(__retty, __func)
+
+// define cmath functions with two float arguments.
+#define __DEF_FUN2(__retty, __func) \
+ __DEVICE__ \
+ inline float __func(float __x, float __y) { return __func##f(__x, __y); } \
+ __HIP_OVERLOAD2(__retty, __func)
+
+__DEF_FUN1(double, acos)
+__DEF_FUN1(double, acosh)
+__DEF_FUN1(double, asin)
+__DEF_FUN1(double, asinh)
+__DEF_FUN1(double, atan)
+__DEF_FUN2(double, atan2);
+__DEF_FUN1(double, atanh)
+__DEF_FUN1(double, cbrt)
+__DEF_FUN1(double, ceil)
+__DEF_FUN2(double, copysign);
+__DEF_FUN1(double, cos)
+__DEF_FUN1(double, cosh)
+__DEF_FUN1(double, erf)
+__DEF_FUN1(double, erfc)
+__DEF_FUN1(double, exp)
+__DEF_FUN1(double, exp2)
+__DEF_FUN1(double, expm1)
+__DEF_FUN1(double, fabs)
+__DEF_FUN2(double, fdim);
+__DEF_FUN1(double, floor)
+__DEF_FUN2(double, fmax);
+__DEF_FUN2(double, fmin);
+__DEF_FUN2(double, fmod);
+//__HIP_OVERLOAD1(int, fpclassify)
+__DEF_FUN2(double, hypot);
+__DEF_FUNI(int, ilogb)
+__HIP_OVERLOAD1(bool, isfinite)
+__HIP_OVERLOAD2(bool, isgreater);
+__HIP_OVERLOAD2(bool, isgreaterequal);
+__HIP_OVERLOAD1(bool, isinf);
+__HIP_OVERLOAD2(bool, isless);
+__HIP_OVERLOAD2(bool, islessequal);
+__HIP_OVERLOAD2(bool, islessgreater);
+__HIP_OVERLOAD1(bool, isnan);
+//__HIP_OVERLOAD1(bool, isnormal)
+__HIP_OVERLOAD2(bool, isunordered);
+__DEF_FUN1(double, lgamma)
+__DEF_FUN1(double, log)
+__DEF_FUN1(double, log10)
+__DEF_FUN1(double, log1p)
+__DEF_FUN1(double, log2)
+__DEF_FUN1(double, logb)
+__DEF_FUNI(long long, llrint)
+__DEF_FUNI(long long, llround)
+__DEF_FUNI(long, lrint)
+__DEF_FUNI(long, lround)
+__DEF_FUN1(double, nearbyint);
+__DEF_FUN2(double, nextafter);
+__DEF_FUN2(double, pow);
+__DEF_FUN2(double, remainder);
+__DEF_FUN1(double, rint);
+__DEF_FUN1(double, round);
+__HIP_OVERLOAD1(bool, signbit)
+__DEF_FUN1(double, sin)
+__DEF_FUN1(double, sinh)
+__DEF_FUN1(double, sqrt)
+__DEF_FUN1(double, tan)
+__DEF_FUN1(double, tanh)
+__DEF_FUN1(double, tgamma)
+__DEF_FUN1(double, trunc);
+
+// define cmath functions with a float and an integer argument.
+#define __DEF_FLOAT_FUN2I(__func) \
+ __DEVICE__ \
+ inline float __func(float __x, int __y) { return __func##f(__x, __y); }
+__DEF_FLOAT_FUN2I(scalbn)
+
+template <class T> __DEVICE__ inline T min(T __arg1, T __arg2) {
+ return (__arg1 < __arg2) ? __arg1 : __arg2;
+}
+
+template <class T> __DEVICE__ inline T max(T __arg1, T __arg2) {
+ return (__arg1 > __arg2) ? __arg1 : __arg2;
+}
+
+__DEVICE__ inline int min(int __arg1, int __arg2) {
+ return (__arg1 < __arg2) ? __arg1 : __arg2;
+}
+__DEVICE__ inline int max(int __arg1, int __arg2) {
+ return (__arg1 > __arg2) ? __arg1 : __arg2;
+}
+
+__DEVICE__
+inline float max(float __x, float __y) { return fmaxf(__x, __y); }
+
+__DEVICE__
+inline double max(double __x, double __y) { return fmax(__x, __y); }
+
+__DEVICE__
+inline float min(float __x, float __y) { return fminf(__x, __y); }
+
+__DEVICE__
+inline double min(double __x, double __y) { return fmin(__x, __y); }
+
+__HIP_OVERLOAD2(double, max)
+__HIP_OVERLOAD2(double, min)
+
+__host__ inline static int min(int __arg1, int __arg2) {
+ return std::min(__arg1, __arg2);
+}
+
+__host__ inline static int max(int __arg1, int __arg2) {
+ return std::max(__arg1, __arg2);
+}
+
+#pragma pop_macro("__DEF_FUN1")
+#pragma pop_macro("__DEF_FUN2")
+#pragma pop_macro("__DEF_FUNI")
+#pragma pop_macro("__DEF_FLOAT_FUN2I")
+#pragma pop_macro("__HIP_OVERLOAD1")
+#pragma pop_macro("__HIP_OVERLOAD2")
+#pragma pop_macro("__DEVICE__")
+#pragma pop_macro("__RETURN_TYPE")
+
+#endif // __CLANG_HIP_MATH_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h b/contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h
new file mode 100644
index 000000000000..addae5605a5b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h
@@ -0,0 +1,64 @@
+/*===---- __clang_hip_runtime_wrapper.h - HIP runtime support ---------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/*
+ * WARNING: This header is intended to be directly -include'd by
+ * the compiler and is not supposed to be included by users.
+ *
+ */
+
+#ifndef __CLANG_HIP_RUNTIME_WRAPPER_H__
+#define __CLANG_HIP_RUNTIME_WRAPPER_H__
+
+#if __HIP__
+
+#include <cmath>
+#include <cstdlib>
+#include <stdlib.h>
+
+#define __host__ __attribute__((host))
+#define __device__ __attribute__((device))
+#define __global__ __attribute__((global))
+#define __shared__ __attribute__((shared))
+#define __constant__ __attribute__((constant))
+
+#if __HIP_ENABLE_DEVICE_MALLOC__
+extern "C" __device__ void *__hip_malloc(size_t __size);
+extern "C" __device__ void *__hip_free(void *__ptr);
+static inline __device__ void *malloc(size_t __size) {
+ return __hip_malloc(__size);
+}
+static inline __device__ void *free(void *__ptr) { return __hip_free(__ptr); }
+#else
+static inline __device__ void *malloc(size_t __size) {
+ __builtin_trap();
+ return nullptr;
+}
+static inline __device__ void *free(void *__ptr) {
+ __builtin_trap();
+ return nullptr;
+}
+#endif
+
+#include <__clang_hip_libdevice_declares.h>
+#include <__clang_hip_math.h>
+
+#if !_OPENMP || __HIP_ENABLE_CUDA_WRAPPER_FOR_OPENMP__
+#include <__clang_cuda_math_forward_declares.h>
+#include <__clang_cuda_complex_builtins.h>
+
+#include <algorithm>
+#include <complex>
+#include <new>
+#endif // !_OPENMP || __HIP_ENABLE_CUDA_WRAPPER_FOR_OPENMP__
+
+#define __CLANG_HIP_RUNTIME_WRAPPER_INCLUDED__ 1
+
+#endif // __HIP__
+#endif // __CLANG_HIP_RUNTIME_WRAPPER_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/altivec.h b/contrib/llvm-project/clang/lib/Headers/altivec.h
index 7e231a2a428e..ac5f43836316 100644
--- a/contrib/llvm-project/clang/lib/Headers/altivec.h
+++ b/contrib/llvm-project/clang/lib/Headers/altivec.h
@@ -16761,6 +16761,408 @@ static vector signed short __ATTRS_o_ai vec_nabs(vector signed short __a) {
static vector signed char __ATTRS_o_ai vec_nabs(vector signed char __a) {
return __builtin_altivec_vminsb(__a, -__a);
}
+
+#ifdef __POWER10_VECTOR__
+/* vec_pdep */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_pdep(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vpdepd(__a, __b);
+}
+
+/* vec_pext */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_pext(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vpextd(__a, __b);
+}
+
+/* vec_cfuge */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_cfuge(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vcfuged(__a, __b);
+}
+
+/* vec_gnb */
+
+#define vec_gnb(__a, __b) __builtin_altivec_vgnb(__a, __b)
+
+/* vec_ternarylogic */
+#ifdef __VSX__
+#define vec_ternarylogic(__a, __b, __c, __imm) \
+ _Generic((__a), vector unsigned char \
+ : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
+ vector unsigned short \
+ : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
+ vector unsigned int \
+ : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
+ vector unsigned long long \
+ : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
+ vector unsigned __int128 \
+ : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)))
+#endif /* __VSX__ */
+
+/* vec_genpcvm */
+
+#ifdef __VSX__
+#define vec_genpcvm(__a, __imm) \
+ _Generic((__a), vector unsigned char \
+ : __builtin_vsx_xxgenpcvbm((__a), (int)(__imm)), \
+ vector unsigned short \
+ : __builtin_vsx_xxgenpcvhm((__a), (int)(__imm)), \
+ vector unsigned int \
+ : __builtin_vsx_xxgenpcvwm((__a), (int)(__imm)), \
+ vector unsigned long long \
+ : __builtin_vsx_xxgenpcvdm((__a), (int)(__imm)))
+#endif /* __VSX__ */
+
+/* vec_clrl */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_clrl(vector signed char __a, unsigned int __n) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vclrrb(__a, __n);
+#else
+ return __builtin_altivec_vclrlb( __a, __n);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_clrl(vector unsigned char __a, unsigned int __n) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vclrrb((vector signed char)__a, __n);
+#else
+ return __builtin_altivec_vclrlb((vector signed char)__a, __n);
+#endif
+}
+
+/* vec_clrr */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_clrr(vector signed char __a, unsigned int __n) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vclrlb(__a, __n);
+#else
+ return __builtin_altivec_vclrrb( __a, __n);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_clrr(vector unsigned char __a, unsigned int __n) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vclrlb((vector signed char)__a, __n);
+#else
+ return __builtin_altivec_vclrrb((vector signed char)__a, __n);
+#endif
+}
+
+/* vec_cntlzm */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_cntlzm(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vclzdm(__a, __b);
+}
+
+/* vec_cnttzm */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_cnttzm(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vctzdm(__a, __b);
+}
+
+/* vec_sldbi */
+
+#define vec_sldb(__a, __b, __c) __builtin_altivec_vsldbi(__a, __b, (__c & 0x7))
+
+/* vec_srdbi */
+
+#define vec_srdb(__a, __b, __c) __builtin_altivec_vsrdbi(__a, __b, (__c & 0x7))
+
+/* vec_insertl */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_insertl(unsigned char __a, vector unsigned char __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinsbrx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinsblx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_insertl(unsigned short __a, vector unsigned short __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinshrx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinshlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_insertl(unsigned int __a, vector unsigned int __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinswrx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinswlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_insertl(unsigned long long __a, vector unsigned long long __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinsdrx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinsdlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_insertl(vector unsigned char __a, vector unsigned char __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinsbvrx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinsbvlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_insertl(vector unsigned short __a, vector unsigned short __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinshvrx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinshvlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_insertl(vector unsigned int __a, vector unsigned int __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinswvrx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinswvlx(__b, __c, __a);
+#endif
+}
+
+/* vec_inserth */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_inserth(unsigned char __a, vector unsigned char __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinsblx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinsbrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_inserth(unsigned short __a, vector unsigned short __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinshlx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinshrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_inserth(unsigned int __a, vector unsigned int __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinswlx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinswrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_inserth(unsigned long long __a, vector unsigned long long __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinsdlx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinsdrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_inserth(vector unsigned char __a, vector unsigned char __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinsbvlx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinsbvrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_inserth(vector unsigned short __a, vector unsigned short __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinshvlx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinshvrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_inserth(vector unsigned int __a, vector unsigned int __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinswvlx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinswvrx(__b, __c, __a);
+#endif
+}
+
+#ifdef __VSX__
+
+/* vec_permx */
+
+#define vec_permx(__a, __b, __c, __d) \
+ __builtin_vsx_xxpermx((__a), (__b), (__c), (__d))
+
+/* vec_blendv */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_blendv(vector signed char __a, vector signed char __b,
+ vector unsigned char __c) {
+ return __builtin_vsx_xxblendvb(__a, __b, __c);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_blendv(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_vsx_xxblendvb(__a, __b, __c);
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_blendv(vector signed short __a, vector signed short __b,
+ vector unsigned short __c) {
+ return __builtin_vsx_xxblendvh(__a, __b, __c);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_blendv(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_vsx_xxblendvh(__a, __b, __c);
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_blendv(vector signed int __a, vector signed int __b,
+ vector unsigned int __c) {
+ return __builtin_vsx_xxblendvw(__a, __b, __c);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_blendv(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_vsx_xxblendvw(__a, __b, __c);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_blendv(vector signed long long __a, vector signed long long __b,
+ vector unsigned long long __c) {
+ return __builtin_vsx_xxblendvd(__a, __b, __c);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_blendv(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned long long __c) {
+ return __builtin_vsx_xxblendvd(__a, __b, __c);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_blendv(vector float __a, vector float __b, vector unsigned int __c) {
+ return __builtin_vsx_xxblendvw(__a, __b, __c);
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_blendv(vector double __a, vector double __b,
+ vector unsigned long long __c) {
+ return __builtin_vsx_xxblendvd(__a, __b, __c);
+}
+
+/* vec_splati */
+
+#define vec_splati(__a) \
+ _Generic((__a), signed int \
+ : ((vector signed int)__a), unsigned int \
+ : ((vector unsigned int)__a), float \
+ : ((vector float)__a))
+
+/* vec_spatid */
+
+static __inline__ vector double __ATTRS_o_ai vec_splatid(const float __a) {
+ return ((vector double)((double)__a));
+}
+
+/* vec_splati_ins */
+
+static __inline__ vector signed int __ATTRS_o_ai vec_splati_ins(
+ vector signed int __a, const unsigned int __b, const signed int __c) {
+#ifdef __LITTLE_ENDIAN__
+ __a[1 - __b] = __c;
+ __a[3 - __b] = __c;
+#else
+ __a[__b] = __c;
+ __a[2 + __b] = __c;
+#endif
+ return __a;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai vec_splati_ins(
+ vector unsigned int __a, const unsigned int __b, const unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ __a[1 - __b] = __c;
+ __a[3 - __b] = __c;
+#else
+ __a[__b] = __c;
+ __a[2 + __b] = __c;
+#endif
+ return __a;
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_splati_ins(vector float __a, const unsigned int __b, const float __c) {
+#ifdef __LITTLE_ENDIAN__
+ __a[1 - __b] = __c;
+ __a[3 - __b] = __c;
+#else
+ __a[__b] = __c;
+ __a[2 + __b] = __c;
+#endif
+ return __a;
+}
+
+/* vec_test_lsbb_all_ones */
+
+static __inline__ int __ATTRS_o_ai
+vec_test_lsbb_all_ones(vector unsigned char __a) {
+ return __builtin_vsx_xvtlsbb(__a, 1);
+}
+
+/* vec_test_lsbb_all_zeros */
+
+static __inline__ int __ATTRS_o_ai
+vec_test_lsbb_all_zeros(vector unsigned char __a) {
+ return __builtin_vsx_xvtlsbb(__a, 0);
+}
+#endif /* __VSX__ */
+#endif /* __POWER10_VECTOR__ */
+
#undef __ATTRS_o_ai
#endif /* __ALTIVEC_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/amxintrin.h b/contrib/llvm-project/clang/lib/Headers/amxintrin.h
new file mode 100644
index 000000000000..58254e21c81a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/amxintrin.h
@@ -0,0 +1,225 @@
+/*===--------------- amxintrin.h - AMX intrinsics -*- C/C++ -*---------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===------------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <amxintrin.h> directly; include <immintrin.h> instead."
+#endif /* __IMMINTRIN_H */
+
+#ifndef __AMXINTRIN_H
+#define __AMXINTRIN_H
+#ifdef __x86_64__
+
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("amx-tile")))
+
+/// Load tile configuration from a 64-byte memory location specified by
+/// "mem_addr". The tile configuration includes the tile type palette, the
+/// number of bytes per row, and the number of rows. If the specified
+/// palette_id is zero, that signifies the init state for both the tile
+/// config and the tile data, and the tiles are zeroed. Any invalid
+/// configurations will result in #GP fault.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> LDTILECFG </c> instruction.
+///
+/// \param __config
+/// A pointer to 512-bits configuration
+static __inline__ void __DEFAULT_FN_ATTRS
+_tile_loadconfig(const void *__config)
+{
+ __builtin_ia32_tile_loadconfig(__config);
+}
+
+/// Stores the current tile configuration to a 64-byte memory location
+/// specified by "mem_addr". The tile configuration includes the tile type
+/// palette, the number of bytes per row, and the number of rows. If tiles
+/// are not configured, all zeroes will be stored to memory.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> STTILECFG </c> instruction.
+///
+/// \param __config
+/// A pointer to 512-bits configuration
+static __inline__ void __DEFAULT_FN_ATTRS
+_tile_storeconfig(void *__config)
+{
+ __builtin_ia32_tile_storeconfig(__config);
+}
+
+/// Release the tile configuration to return to the init state, which
+/// releases all storage it currently holds.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILERELEASE </c> instruction.
+static __inline__ void __DEFAULT_FN_ATTRS
+_tile_release(void)
+{
+ __builtin_ia32_tilerelease();
+}
+
+/// Load tile rows from memory specifieid by "base" address and "stride" into
+/// destination tile "dst" using the tile configuration previously configured
+/// via "_tile_loadconfig".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILELOADD </c> instruction.
+///
+/// \param dst
+/// A destination tile. Max size is 1024 Bytes.
+/// \param base
+/// A pointer to base address.
+/// \param stride
+/// The stride between the rows' data to be loaded in memory.
+#define _tile_loadd(dst, base, stride) \
+ __builtin_ia32_tileloadd64((dst), ((const void *)(base)), (__SIZE_TYPE__)(stride))
+
+/// Load tile rows from memory specifieid by "base" address and "stride" into
+/// destination tile "dst" using the tile configuration previously configured
+/// via "_tile_loadconfig". This intrinsic provides a hint to the implementation
+/// that the data will likely not be reused in the near future and the data
+/// caching can be optimized accordingly.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILELOADDT1 </c> instruction.
+///
+/// \param dst
+/// A destination tile. Max size is 1024 Bytes.
+/// \param base
+/// A pointer to base address.
+/// \param stride
+/// The stride between the rows' data to be loaded in memory.
+#define _tile_stream_loadd(dst, base, stride) \
+ __builtin_ia32_tileloaddt164((dst), ((const void *)(base)), (__SIZE_TYPE__)(stride))
+
+/// Store the tile specified by "src" to memory specifieid by "base" address and
+/// "stride" using the tile configuration previously configured via
+/// "_tile_loadconfig".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILESTORED </c> instruction.
+///
+/// \param dst
+/// A destination tile. Max size is 1024 Bytes.
+/// \param base
+/// A pointer to base address.
+/// \param stride
+/// The stride between the rows' data to be stored in memory.
+#define _tile_stored(dst, base, stride) \
+ __builtin_ia32_tilestored64((dst), ((void *)(base)), (__SIZE_TYPE__)(stride))
+
+/// Zero the tile specified by "tdest".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILEZERO </c> instruction.
+///
+/// \param tile
+/// The destination tile to be zero. Max size is 1024 Bytes.
+#define _tile_zero(tile) __builtin_ia32_tilezero((tile))
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
+/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
+/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
+/// and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBSSD </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbssd(dst, src0, src1) __builtin_ia32_tdpbssd((dst), (src0), (src1))
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
+/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
+/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer
+/// in "dst", and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBSUD </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbsud(dst, src0, src1) __builtin_ia32_tdpbsud((dst), (src0), (src1))
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
+/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
+/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
+/// and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBUSD </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbusd(dst, src0, src1) __builtin_ia32_tdpbusd((dst), (src0), (src1))
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
+/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
+/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer in
+/// "dst", and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBUUD </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbuud(dst, src0, src1) __builtin_ia32_tdpbuud((dst), (src0), (src1))
+
+/// Compute dot-product of BF16 (16-bit) floating-point pairs in tiles src0 and
+/// src1, accumulating the intermediate single-precision (32-bit) floating-point
+/// elements with elements in "dst", and store the 32-bit result back to tile
+/// "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBF16PS </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbf16ps(dst, src0, src1) \
+ __builtin_ia32_tdpbf16ps((dst), (src0), (src1))
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __x86_64__ */
+#endif /* __AMXINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/arm_acle.h b/contrib/llvm-project/clang/lib/Headers/arm_acle.h
index 596ea03cff2f..de568b4ff9c5 100644
--- a/contrib/llvm-project/clang/lib/Headers/arm_acle.h
+++ b/contrib/llvm-project/clang/lib/Headers/arm_acle.h
@@ -22,31 +22,43 @@ extern "C" {
/* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
/* 8.3 Memory barriers */
-#if !defined(_MSC_VER)
+#if !__has_builtin(__dmb)
#define __dmb(i) __builtin_arm_dmb(i)
+#endif
+#if !__has_builtin(__dsb)
#define __dsb(i) __builtin_arm_dsb(i)
+#endif
+#if !__has_builtin(__isb)
#define __isb(i) __builtin_arm_isb(i)
#endif
/* 8.4 Hints */
-#if !defined(_MSC_VER)
+#if !__has_builtin(__wfi)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) {
__builtin_arm_wfi();
}
+#endif
+#if !__has_builtin(__wfe)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) {
__builtin_arm_wfe();
}
+#endif
+#if !__has_builtin(__sev)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) {
__builtin_arm_sev();
}
+#endif
+#if !__has_builtin(__sevl)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) {
__builtin_arm_sevl();
}
+#endif
+#if !__has_builtin(__yield)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) {
__builtin_arm_yield();
}
diff --git a/contrib/llvm-project/clang/lib/Headers/avx2intrin.h b/contrib/llvm-project/clang/lib/Headers/avx2intrin.h
index 162e83ea2fbc..cc16720949ea 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx2intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx2intrin.h
@@ -740,6 +740,8 @@ _mm256_broadcastsi128_si256(__m128i __X)
return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1);
}
+#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X)
+
#define _mm_blend_epi32(V1, V2, M) \
(__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \
(__v4si)(__m128i)(V2), (int)(M))
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h
index 376558407683..4281a33d375c 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h
@@ -1504,13 +1504,14 @@ _mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi16(__m512i __A, int __B)
+_mm512_slli_epi16(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
+_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A,
+ unsigned int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_slli_epi16(__A, __B),
@@ -1518,7 +1519,7 @@ _mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, int __B)
+_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_slli_epi16(__A, __B),
@@ -1595,13 +1596,14 @@ _mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi16(__m512i __A, int __B)
+_mm512_srai_epi16(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
+_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A,
+ unsigned int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_srai_epi16(__A, __B),
@@ -1609,7 +1611,7 @@ _mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, int __B)
+_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_srai_epi16(__A, __B),
@@ -1639,13 +1641,14 @@ _mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi16(__m512i __A, int __B)
+_mm512_srli_epi16(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
+_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A,
+ unsigned int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_srli_epi16(__A, __B),
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
index 7465da379bdd..fa22ef3fdd18 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
@@ -5111,13 +5111,14 @@ _mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
(__v8di)_mm512_setzero_si512())
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi32(__m512i __A, int __B)
+_mm512_slli_epi32(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
+_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A,
+ unsigned int __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_slli_epi32(__A, __B),
@@ -5125,20 +5126,20 @@ _mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, int __B) {
+_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_slli_epi32(__A, __B),
(__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi64(__m512i __A, int __B)
+_mm512_slli_epi64(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
+_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_slli_epi64(__A, __B),
@@ -5146,7 +5147,7 @@ _mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, int __B)
+_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_slli_epi64(__A, __B),
@@ -5154,13 +5155,14 @@ _mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi32(__m512i __A, int __B)
+_mm512_srli_epi32(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
+_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A,
+ unsigned int __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_srli_epi32(__A, __B),
@@ -5168,20 +5170,21 @@ _mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, int __B) {
+_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_srli_epi32(__A, __B),
(__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi64(__m512i __A, int __B)
+_mm512_srli_epi64(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
+_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A,
+ unsigned int __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_srli_epi64(__A, __B),
@@ -5189,7 +5192,8 @@ _mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A, int __B)
+_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A,
+ unsigned int __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_srli_epi64(__A, __B),
@@ -6593,13 +6597,14 @@ _mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
(int)(R))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi32(__m512i __A, int __B)
+_mm512_srai_epi32(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_psradi512((__v16si)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
+_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A,
+ unsigned int __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_srai_epi32(__A, __B),
@@ -6607,20 +6612,21 @@ _mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A, int __B) {
+_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A,
+ unsigned int __B) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_srai_epi32(__A, __B),
(__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi64(__m512i __A, int __B)
+_mm512_srai_epi64(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
+_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_srai_epi64(__A, __B),
@@ -6628,7 +6634,7 @@ _mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, int __B)
+_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_srai_epi64(__A, __B),
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vlbwintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vlbwintrin.h
index cd9f2400daa0..6ed10ed9803b 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vlbwintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vlbwintrin.h
@@ -1939,7 +1939,7 @@ _mm256_maskz_sll_epi16(__mmask16 __U, __m256i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_slli_epi16(__A, __B),
@@ -1947,7 +1947,7 @@ _mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_slli_epi16(__A, __B),
@@ -1955,7 +1955,8 @@ _mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
+_mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A,
+ unsigned int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_slli_epi16(__A, __B),
@@ -1963,7 +1964,7 @@ _mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, int __B)
+_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_slli_epi16(__A, __B),
@@ -2091,7 +2092,7 @@ _mm256_maskz_sra_epi16(__mmask16 __U, __m256i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_srai_epi16(__A, __B),
@@ -2099,7 +2100,7 @@ _mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_srai_epi16(__A, __B),
@@ -2107,7 +2108,8 @@ _mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
+_mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A,
+ unsigned int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_srai_epi16(__A, __B),
@@ -2115,7 +2117,7 @@ _mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, int __B)
+_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_srai_epi16(__A, __B),
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h
index 9d1d791bb248..968c10efeac0 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h
@@ -4522,7 +4522,7 @@ _mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_slli_epi32(__A, __B),
@@ -4530,7 +4530,7 @@ _mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_slli_epi32(__A, __B),
@@ -4538,7 +4538,7 @@ _mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_slli_epi32(__A, __B),
@@ -4546,7 +4546,7 @@ _mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_slli_epi32(__A, __B),
@@ -4586,7 +4586,7 @@ _mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_slli_epi64(__A, __B),
@@ -4594,7 +4594,7 @@ _mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_slli_epi64(__A, __B),
@@ -4602,7 +4602,7 @@ _mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_slli_epi64(__A, __B),
@@ -4610,7 +4610,7 @@ _mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_slli_epi64(__A, __B),
@@ -4866,7 +4866,7 @@ _mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_srli_epi32(__A, __B),
@@ -4874,7 +4874,7 @@ _mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_srli_epi32(__A, __B),
@@ -4882,7 +4882,7 @@ _mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_srli_epi32(__A, __B),
@@ -4890,7 +4890,7 @@ _mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_srli_epi32(__A, __B),
@@ -4930,7 +4930,7 @@ _mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_srli_epi64(__A, __B),
@@ -4938,7 +4938,7 @@ _mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_srli_epi64(__A, __B),
@@ -4946,7 +4946,7 @@ _mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_srli_epi64(__A, __B),
@@ -4954,7 +4954,7 @@ _mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_srli_epi64(__A, __B),
@@ -6405,7 +6405,7 @@ _mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_srai_epi32(__A, __B),
@@ -6413,7 +6413,7 @@ _mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_srai_epi32(__A, __B),
@@ -6421,7 +6421,7 @@ _mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_srai_epi32(__A, __B),
@@ -6429,7 +6429,7 @@ _mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_srai_epi32(__A, __B),
@@ -6481,13 +6481,13 @@ _mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srai_epi64(__m128i __A, int __imm)
+_mm_srai_epi64(__m128i __A, unsigned int __imm)
{
return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, __imm);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __imm)
+_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __imm)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
(__v2di)_mm_srai_epi64(__A, __imm), \
@@ -6495,7 +6495,7 @@ _mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __imm)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, int __imm)
+_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, unsigned int __imm)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
(__v2di)_mm_srai_epi64(__A, __imm), \
@@ -6503,13 +6503,14 @@ _mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, int __imm)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srai_epi64(__m256i __A, int __imm)
+_mm256_srai_epi64(__m256i __A, unsigned int __imm)
{
return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, __imm);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __imm)
+_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A,
+ unsigned int __imm)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
(__v4di)_mm256_srai_epi64(__A, __imm), \
@@ -6517,7 +6518,7 @@ _mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __imm)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, int __imm)
+_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, unsigned int __imm)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
(__v4di)_mm256_srai_epi64(__A, __imm), \
diff --git a/contrib/llvm-project/clang/lib/Headers/bmiintrin.h b/contrib/llvm-project/clang/lib/Headers/bmiintrin.h
index 841bd84070e8..f583c215f919 100644
--- a/contrib/llvm-project/clang/lib/Headers/bmiintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/bmiintrin.h
@@ -111,7 +111,8 @@ _mm_tzcnt_64(unsigned long long __X)
#undef __RELAXED_FN_ATTRS
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__BMI__)
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
@@ -192,6 +193,28 @@ _bextr_u32(unsigned int __X, unsigned int __Y, unsigned int __Z)
return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
}
+/* Intel-specified, single-leading-underscore version of BEXTR2 */
+/// Extracts the specified bits from the first operand and returns them
+/// in the least significant bits of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
+///
+/// \param __X
+/// An unsigned integer whose bits are to be extracted.
+/// \param __Y
+/// An unsigned integer used to specify which bits are extracted. Bits [7:0]
+/// specify the index of the least significant bit. Bits [15:8] specify the
+/// number of bits to be extracted.
+/// \returns An unsigned integer whose least significant bits contain the
+/// extracted bits.
+/// \see __bextr_u32
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_bextr2_u32(unsigned int __X, unsigned int __Y) {
+ return __builtin_ia32_bextr_u32(__X, __Y);
+}
+
/// Clears all bits in the source except for the least significant bit
/// containing a value of 1 and returns the result.
///
@@ -321,6 +344,28 @@ _bextr_u64(unsigned long long __X, unsigned int __Y, unsigned int __Z)
return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
}
+/* Intel-specified, single-leading-underscore version of BEXTR2 */
+/// Extracts the specified bits from the first operand and returns them
+/// in the least significant bits of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
+///
+/// \param __X
+/// An unsigned 64-bit integer whose bits are to be extracted.
+/// \param __Y
+/// An unsigned 64-bit integer used to specify which bits are extracted. Bits
+/// [7:0] specify the index of the least significant bit. Bits [15:8] specify
+/// the number of bits to be extracted.
+/// \returns An unsigned 64-bit integer whose least significant bits contain the
+/// extracted bits.
+/// \see __bextr_u64
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_bextr2_u64(unsigned long long __X, unsigned long long __Y) {
+ return __builtin_ia32_bextr_u64(__X, __Y);
+}
+
/// Clears all bits in the source except for the least significant bit
/// containing a value of 1 and returns the result.
///
@@ -376,6 +421,7 @@ __blsr_u64(unsigned long long __X)
#undef __DEFAULT_FN_ATTRS
-#endif /* !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__) */
+#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \
+ || defined(__BMI__) */
#endif /* __BMIINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/cet.h b/contrib/llvm-project/clang/lib/Headers/cet.h
new file mode 100644
index 000000000000..ffb19dec8f2b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/cet.h
@@ -0,0 +1,66 @@
+/*===------ cet.h -Control-flow Enforcement Technology feature ------------===
+ * Add x86 feature with IBT and/or SHSTK bits to ELF program property if they
+ * are enabled. Otherwise, contents in this header file are unused. This file
+ * is mainly design for assembly source code which want to enable CET.
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __CET_H
+#define __CET_H
+
+#ifdef __ASSEMBLER__
+
+#ifndef __CET__
+# define _CET_ENDBR
+#endif
+
+#ifdef __CET__
+
+# ifdef __LP64__
+# if __CET__ & 0x1
+# define _CET_ENDBR endbr64
+# else
+# define _CET_ENDBR
+# endif
+# else
+# if __CET__ & 0x1
+# define _CET_ENDBR endbr32
+# else
+# define _CET_ENDBR
+# endif
+# endif
+
+
+# ifdef __LP64__
+# define __PROPERTY_ALIGN 3
+# else
+# define __PROPERTY_ALIGN 2
+# endif
+
+ .pushsection ".note.gnu.property", "a"
+ .p2align __PROPERTY_ALIGN
+ .long 1f - 0f /* name length. */
+ .long 4f - 1f /* data length. */
+ /* NT_GNU_PROPERTY_TYPE_0. */
+ .long 5 /* note type. */
+0:
+ .asciz "GNU" /* vendor name. */
+1:
+ .p2align __PROPERTY_ALIGN
+ /* GNU_PROPERTY_X86_FEATURE_1_AND. */
+ .long 0xc0000002 /* pr_type. */
+ .long 3f - 2f /* pr_datasz. */
+2:
+ /* GNU_PROPERTY_X86_FEATURE_1_XXX. */
+ .long __CET__
+3:
+ .p2align __PROPERTY_ALIGN
+4:
+ .popsection
+#endif
+#endif
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/cldemoteintrin.h b/contrib/llvm-project/clang/lib/Headers/cldemoteintrin.h
index 2413e7dea7a1..cfb951c1b4a9 100644
--- a/contrib/llvm-project/clang/lib/Headers/cldemoteintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/cldemoteintrin.h
@@ -18,11 +18,19 @@
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("cldemote")))
+/// Hint to hardware that the cache line that contains \p __P should be demoted
+/// from the cache closest to the processor core to a level more distant from
+/// the processor core.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> CLDEMOTE </c> instruction.
static __inline__ void __DEFAULT_FN_ATTRS
_cldemote(const void * __P) {
__builtin_ia32_cldemote(__P);
}
+#define _mm_cldemote(p) _cldemote(p)
#undef __DEFAULT_FN_ATTRS
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/cpuid.h b/contrib/llvm-project/clang/lib/Headers/cpuid.h
index 4ddd64847c32..2a88c042d046 100644
--- a/contrib/llvm-project/clang/lib/Headers/cpuid.h
+++ b/contrib/llvm-project/clang/lib/Headers/cpuid.h
@@ -24,6 +24,10 @@
#define signature_CYRIX_ebx 0x69727943
#define signature_CYRIX_edx 0x736e4978
#define signature_CYRIX_ecx 0x64616574
+/* HYGON: "HygonGenuine" */
+#define signature_HYGON_ebx 0x6f677948
+#define signature_HYGON_edx 0x6e65476e
+#define signature_HYGON_ecx 0x656e6975
/* INTEL: "GenuineIntel" */
#define signature_INTEL_ebx 0x756e6547
#define signature_INTEL_edx 0x49656e69
@@ -182,8 +186,13 @@
/* Features in %edx for leaf 7 sub-leaf 0 */
#define bit_AVX5124VNNIW 0x00000004
#define bit_AVX5124FMAPS 0x00000008
+#define bit_SERIALIZE 0x00004000
+#define bit_TSXLDTRK 0x00010000
#define bit_PCONFIG 0x00040000
#define bit_IBT 0x00100000
+#define bit_AMXBF16 0x00400000
+#define bit_AMXTILE 0x01000000
+#define bit_AMXINT8 0x02000000
/* Features in %eax for leaf 7 sub-leaf 1 */
#define bit_AVX512BF16 0x00000020
diff --git a/contrib/llvm-project/clang/lib/Headers/emmintrin.h b/contrib/llvm-project/clang/lib/Headers/emmintrin.h
index 993c688ce818..73a777b107c6 100644
--- a/contrib/llvm-project/clang/lib/Headers/emmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/emmintrin.h
@@ -4970,10 +4970,10 @@ void _mm_pause(void);
#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
-#define _MM_DENORMALS_ZERO_ON (0x0040)
-#define _MM_DENORMALS_ZERO_OFF (0x0000)
+#define _MM_DENORMALS_ZERO_ON (0x0040U)
+#define _MM_DENORMALS_ZERO_OFF (0x0000U)
-#define _MM_DENORMALS_ZERO_MASK (0x0040)
+#define _MM_DENORMALS_ZERO_MASK (0x0040U)
#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
diff --git a/contrib/llvm-project/clang/lib/Headers/immintrin.h b/contrib/llvm-project/clang/lib/Headers/immintrin.h
index edf8c42ec491..e9dff2310fdf 100644
--- a/contrib/llvm-project/clang/lib/Headers/immintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/immintrin.h
@@ -10,198 +10,231 @@
#ifndef __IMMINTRIN_H
#define __IMMINTRIN_H
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MMX__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__MMX__)
#include <mmintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SSE__)
#include <xmmintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE2__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SSE2__)
#include <emmintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE3__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SSE3__)
#include <pmmintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSSE3__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SSSE3__)
#include <tmmintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__SSE4_2__) || defined(__SSE4_1__))
#include <smmintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AES__) || defined(__PCLMUL__))
#include <wmmintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLFLUSHOPT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__CLFLUSHOPT__)
#include <clflushoptintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLWB__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__CLWB__)
#include <clwbintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX__)
#include <avxintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX2__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX2__)
#include <avx2intrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__F16C__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__F16C__)
#include <f16cintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__VPCLMULQDQ__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__VPCLMULQDQ__)
#include <vpclmulqdqintrin.h>
#endif
/* No feature check desired due to internal checks */
#include <bmiintrin.h>
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI2__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__BMI2__)
#include <bmi2intrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LZCNT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__LZCNT__)
#include <lzcntintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__POPCNT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__POPCNT__)
#include <popcntintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FMA__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__FMA__)
#include <fmaintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512F__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512F__)
#include <avx512fintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VL__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512VL__)
#include <avx512vlintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BW__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512BW__)
#include <avx512bwintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BITALG__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512BITALG__)
#include <avx512bitalgintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512CD__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512CD__)
#include <avx512cdintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VPOPCNTDQ__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512VPOPCNTDQ__)
#include <avx512vpopcntdqintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512VPOPCNTDQ__))
#include <avx512vpopcntdqvlintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VNNI__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512VNNI__)
#include <avx512vnniintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512VNNI__))
#include <avx512vlvnniintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512DQ__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512DQ__)
#include <avx512dqintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512BITALG__))
#include <avx512vlbitalgintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512BW__))
#include <avx512vlbwintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512CD__))
#include <avx512vlcdintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512DQ__))
#include <avx512vldqintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512ER__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512ER__)
#include <avx512erintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512IFMA__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512IFMA__)
#include <avx512ifmaintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512IFMA__) && defined(__AVX512VL__))
#include <avx512ifmavlintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VBMI__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512VBMI__)
#include <avx512vbmiintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VBMI__) && defined(__AVX512VL__))
#include <avx512vbmivlintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VBMI2__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512VBMI2__)
#include <avx512vbmi2intrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VBMI2__) && defined(__AVX512VL__))
#include <avx512vlvbmi2intrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512PF__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512PF__)
#include <avx512pfintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BF16__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512BF16__)
#include <avx512bf16intrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512BF16__))
#include <avx512vlbf16intrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PKU__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__PKU__)
#include <pkuintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__VAES__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__VAES__)
#include <vaesintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__GFNI__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__GFNI__)
#include <gfniintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDPID__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__RDPID__)
/// Returns the value of the IA32_TSC_AUX MSR (0xc0000103).
///
/// \headerfile <immintrin.h>
@@ -213,7 +246,8 @@ _rdpid_u32(void) {
}
#endif // __RDPID__
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDRND__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__RDRND__)
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand16_step(unsigned short *__p)
{
@@ -235,7 +269,8 @@ _rdrand64_step(unsigned long long *__p)
#endif
#endif /* __RDRND__ */
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FSGSBASE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__FSGSBASE__)
#ifdef __x86_64__
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readfsbase_u32(void)
@@ -288,7 +323,8 @@ _writegsbase_u64(unsigned long long __V)
#endif
#endif /* __FSGSBASE__ */
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MOVBE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__MOVBE__)
/* The structs used below are to force the load/store to be unaligned. This
* is accomplished with the __packed__ attribute. The __may_alias__ prevents
@@ -347,35 +383,42 @@ _storebe_i64(void * __P, long long __D) {
#endif
#endif /* __MOVBE */
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RTM__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__RTM__)
#include <rtmintrin.h>
#include <xtestintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SHA__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SHA__)
#include <shaintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FXSR__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__FXSR__)
#include <fxsrintrin.h>
#endif
/* No feature check desired due to internal MSC_VER checks */
#include <xsaveintrin.h>
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEOPT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__XSAVEOPT__)
#include <xsaveoptintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEC__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__XSAVEC__)
#include <xsavecintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVES__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__XSAVES__)
#include <xsavesintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SHSTK__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SHSTK__)
#include <cetintrin.h>
#endif
@@ -383,57 +426,81 @@ _storebe_i64(void * __P, long long __D) {
* whereas others are also available at all times. */
#include <adxintrin.h>
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDSEED__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__RDSEED__)
#include <rdseedintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__WBNOINVD__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__WBNOINVD__)
#include <wbnoinvdintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLDEMOTE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__CLDEMOTE__)
#include <cldemoteintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__WAITPKG__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__WAITPKG__)
#include <waitpkgintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
- defined(__MOVDIRI__) || defined(__MOVDIR64B__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__MOVDIRI__) || defined(__MOVDIR64B__)
#include <movdirintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PCONFIG__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__PCONFIG__)
#include <pconfigintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SGX__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SGX__)
#include <sgxintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PTWRITE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__PTWRITE__)
#include <ptwriteintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__INVPCID__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__INVPCID__)
#include <invpcidintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
- defined(__AVX512VP2INTERSECT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AMXTILE__) || defined(__AMXINT8__) || defined(__AMXBF16__)
+#include <amxintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512VP2INTERSECT__)
#include <avx512vp2intersectintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
- (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__))
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__))
#include <avx512vlvp2intersectintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__ENQCMD__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__ENQCMD__)
#include <enqcmdintrin.h>
#endif
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SERIALIZE__)
+#include <serializeintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__TSXLDTRK__)
+#include <tsxldtrkintrin.h>
+#endif
+
#if defined(_MSC_VER) && __has_extension(gnu_asm)
/* Define the default attributes for these intrinsics */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
diff --git a/contrib/llvm-project/clang/lib/Headers/intrin.h b/contrib/llvm-project/clang/lib/Headers/intrin.h
index f85f7a2beb49..871b47ca8267 100644
--- a/contrib/llvm-project/clang/lib/Headers/intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/intrin.h
@@ -289,6 +289,9 @@ unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
static __inline__
unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
+#endif
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
static __inline__
__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
static __inline__
diff --git a/contrib/llvm-project/clang/lib/Headers/module.modulemap b/contrib/llvm-project/clang/lib/Headers/module.modulemap
index 7954a77a4125..6894672ef052 100644
--- a/contrib/llvm-project/clang/lib/Headers/module.modulemap
+++ b/contrib/llvm-project/clang/lib/Headers/module.modulemap
@@ -27,6 +27,12 @@ module _Builtin_intrinsics [system] [extern_c] {
header "arm_fp16.h"
export *
}
+
+ explicit module sve {
+ requires sve
+ header "arm_sve.h"
+ export *
+ }
}
explicit module intel {
diff --git a/contrib/llvm-project/clang/lib/Headers/msa.h b/contrib/llvm-project/clang/lib/Headers/msa.h
index 19ea6071aa93..0ca4900870f1 100644
--- a/contrib/llvm-project/clang/lib/Headers/msa.h
+++ b/contrib/llvm-project/clang/lib/Headers/msa.h
@@ -212,10 +212,14 @@ typedef double v2f64_d __attribute__ ((vector_size(16), aligned(8)));
#define __msa_ld_h __builtin_msa_ld_h
#define __msa_ld_w __builtin_msa_ld_w
#define __msa_ld_d __builtin_msa_ld_d
+#define __msa_ldr_d __builtin_msa_ldr_d
+#define __msa_ldr_w __builtin_msa_ldrq_w
#define __msa_st_b __builtin_msa_st_b
#define __msa_st_h __builtin_msa_st_h
#define __msa_st_w __builtin_msa_st_w
#define __msa_st_d __builtin_msa_st_d
+#define __msa_str_d __builtin_msa_str_d
+#define __msa_str_w __builtin_msa_strq_w
#define __msa_sat_s_b __builtin_msa_sat_s_b
#define __msa_sat_s_h __builtin_msa_sat_s_h
#define __msa_sat_s_w __builtin_msa_sat_s_w
diff --git a/contrib/llvm-project/clang/lib/Headers/opencl-c.h b/contrib/llvm-project/clang/lib/Headers/opencl-c.h
index 06c5ab6a72f0..66e18bdd47bb 100644
--- a/contrib/llvm-project/clang/lib/Headers/opencl-c.h
+++ b/contrib/llvm-project/clang/lib/Headers/opencl-c.h
@@ -13432,18 +13432,12 @@ int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, m
uint __ovld atomic_fetch_min(volatile atomic_uint *object, uint operand);
uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order);
uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min(volatile atomic_uint *object, int operand);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, int operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, int operand, memory_order order, memory_scope scope);
int __ovld atomic_fetch_max(volatile atomic_int *object, int operand);
int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order);
int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
uint __ovld atomic_fetch_max(volatile atomic_uint *object, uint operand);
uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order);
uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max(volatile atomic_uint *object, int operand);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, int operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, int operand, memory_order order, memory_scope scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
long __ovld atomic_fetch_add(volatile atomic_long *object, long operand);
@@ -13482,18 +13476,12 @@ long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand
ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, ulong operand);
ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, long operand);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, long operand, memory_order order, memory_scope scope);
long __ovld atomic_fetch_max(volatile atomic_long *object, long operand);
long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order);
long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, ulong operand);
ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, long operand);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, long operand, memory_order order, memory_scope scope);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
// OpenCL v2.0 s6.13.11.7.5:
@@ -14682,7 +14670,7 @@ void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, flo
// OpenCL Extension v2.0 s9.18 - Mipmaps
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifdef cl_khr_mipmap_image
+#if defined(cl_khr_mipmap_image_writes)
void __ovld write_imagef(write_only image1d_t image, int coord, int lod, float4 color);
void __ovld write_imagei(write_only image1d_t image, int coord, int lod, int4 color);
void __ovld write_imageui(write_only image1d_t image, int coord, int lod, uint4 color);
@@ -14699,15 +14687,16 @@ void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, int
void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int lod, int4 color);
void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, int lod, uint4 color);
-void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, int lod, float color);
-void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, int lod, float color);
+void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, int lod, float depth);
+void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, int lod, float depth);
#ifdef cl_khr_3d_image_writes
void __ovld write_imagef(write_only image3d_t image, int4 coord, int lod, float4 color);
void __ovld write_imagei(write_only image3d_t image, int4 coord, int lod, int4 color);
void __ovld write_imageui(write_only image3d_t image, int4 coord, int lod, uint4 color);
-#endif
-#endif //cl_khr_mipmap_image
+#endif //cl_khr_3d_image_writes
+
+#endif //defined(cl_khr_mipmap_image_writes)
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// Image write functions for half4 type
@@ -14756,7 +14745,7 @@ void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, flo
#endif //cl_khr_depth_images
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifdef cl_khr_mipmap_image
+#if defined(cl_khr_mipmap_image_writes)
void __ovld write_imagef(read_write image1d_t image, int coord, int lod, float4 color);
void __ovld write_imagei(read_write image1d_t image, int coord, int lod, int4 color);
void __ovld write_imageui(read_write image1d_t image, int coord, int lod, uint4 color);
@@ -14780,8 +14769,9 @@ void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, int
void __ovld write_imagef(read_write image3d_t image, int4 coord, int lod, float4 color);
void __ovld write_imagei(read_write image3d_t image, int4 coord, int lod, int4 color);
void __ovld write_imageui(read_write image3d_t image, int4 coord, int lod, uint4 color);
-#endif
-#endif //cl_khr_mipmap_image
+#endif //cl_khr_3d_image_writes
+
+#endif //cl_khr_mipmap_image_writes
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// Image write functions for half4 type
@@ -15470,6 +15460,674 @@ double __ovld __conv sub_group_scan_inclusive_max(double x);
#endif //cl_khr_subgroups cl_intel_subgroups
+#if defined(cl_khr_subgroup_extended_types)
+char __ovld __conv sub_group_broadcast( char value, uint index );
+char2 __ovld __conv sub_group_broadcast( char2 value, uint index );
+char3 __ovld __conv sub_group_broadcast( char3 value, uint index );
+char4 __ovld __conv sub_group_broadcast( char4 value, uint index );
+char8 __ovld __conv sub_group_broadcast( char8 value, uint index );
+char16 __ovld __conv sub_group_broadcast( char16 value, uint index );
+
+uchar __ovld __conv sub_group_broadcast( uchar value, uint index );
+uchar2 __ovld __conv sub_group_broadcast( uchar2 value, uint index );
+uchar3 __ovld __conv sub_group_broadcast( uchar3 value, uint index );
+uchar4 __ovld __conv sub_group_broadcast( uchar4 value, uint index );
+uchar8 __ovld __conv sub_group_broadcast( uchar8 value, uint index );
+uchar16 __ovld __conv sub_group_broadcast( uchar16 value, uint index );
+
+short __ovld __conv sub_group_broadcast( short value, uint index );
+short2 __ovld __conv sub_group_broadcast( short2 value, uint index );
+short3 __ovld __conv sub_group_broadcast( short3 value, uint index );
+short4 __ovld __conv sub_group_broadcast( short4 value, uint index );
+short8 __ovld __conv sub_group_broadcast( short8 value, uint index );
+short16 __ovld __conv sub_group_broadcast( short16 value, uint index );
+
+ushort __ovld __conv sub_group_broadcast( ushort value, uint index );
+ushort2 __ovld __conv sub_group_broadcast( ushort2 value, uint index );
+ushort3 __ovld __conv sub_group_broadcast( ushort3 value, uint index );
+ushort4 __ovld __conv sub_group_broadcast( ushort4 value, uint index );
+ushort8 __ovld __conv sub_group_broadcast( ushort8 value, uint index );
+ushort16 __ovld __conv sub_group_broadcast( ushort16 value, uint index );
+
+// scalar int broadcast is part of cl_khr_subgroups
+int2 __ovld __conv sub_group_broadcast( int2 value, uint index );
+int3 __ovld __conv sub_group_broadcast( int3 value, uint index );
+int4 __ovld __conv sub_group_broadcast( int4 value, uint index );
+int8 __ovld __conv sub_group_broadcast( int8 value, uint index );
+int16 __ovld __conv sub_group_broadcast( int16 value, uint index );
+
+// scalar uint broadcast is part of cl_khr_subgroups
+uint2 __ovld __conv sub_group_broadcast( uint2 value, uint index );
+uint3 __ovld __conv sub_group_broadcast( uint3 value, uint index );
+uint4 __ovld __conv sub_group_broadcast( uint4 value, uint index );
+uint8 __ovld __conv sub_group_broadcast( uint8 value, uint index );
+uint16 __ovld __conv sub_group_broadcast( uint16 value, uint index );
+
+// scalar long broadcast is part of cl_khr_subgroups
+long2 __ovld __conv sub_group_broadcast( long2 value, uint index );
+long3 __ovld __conv sub_group_broadcast( long3 value, uint index );
+long4 __ovld __conv sub_group_broadcast( long4 value, uint index );
+long8 __ovld __conv sub_group_broadcast( long8 value, uint index );
+long16 __ovld __conv sub_group_broadcast( long16 value, uint index );
+
+// scalar ulong broadcast is part of cl_khr_subgroups
+ulong2 __ovld __conv sub_group_broadcast( ulong2 value, uint index );
+ulong3 __ovld __conv sub_group_broadcast( ulong3 value, uint index );
+ulong4 __ovld __conv sub_group_broadcast( ulong4 value, uint index );
+ulong8 __ovld __conv sub_group_broadcast( ulong8 value, uint index );
+ulong16 __ovld __conv sub_group_broadcast( ulong16 value, uint index );
+
+// scalar float broadcast is part of cl_khr_subgroups
+float2 __ovld __conv sub_group_broadcast( float2 value, uint index );
+float3 __ovld __conv sub_group_broadcast( float3 value, uint index );
+float4 __ovld __conv sub_group_broadcast( float4 value, uint index );
+float8 __ovld __conv sub_group_broadcast( float8 value, uint index );
+float16 __ovld __conv sub_group_broadcast( float16 value, uint index );
+
+char __ovld __conv sub_group_reduce_add( char value );
+uchar __ovld __conv sub_group_reduce_add( uchar value );
+short __ovld __conv sub_group_reduce_add( short value );
+ushort __ovld __conv sub_group_reduce_add( ushort value );
+
+char __ovld __conv sub_group_reduce_min( char value );
+uchar __ovld __conv sub_group_reduce_min( uchar value );
+short __ovld __conv sub_group_reduce_min( short value );
+ushort __ovld __conv sub_group_reduce_min( ushort value );
+
+char __ovld __conv sub_group_reduce_max( char value );
+uchar __ovld __conv sub_group_reduce_max( uchar value );
+short __ovld __conv sub_group_reduce_max( short value );
+ushort __ovld __conv sub_group_reduce_max( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_add( char value );
+uchar __ovld __conv sub_group_scan_inclusive_add( uchar value );
+short __ovld __conv sub_group_scan_inclusive_add( short value );
+ushort __ovld __conv sub_group_scan_inclusive_add( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_min( char value );
+uchar __ovld __conv sub_group_scan_inclusive_min( uchar value );
+short __ovld __conv sub_group_scan_inclusive_min( short value );
+ushort __ovld __conv sub_group_scan_inclusive_min( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_max( char value );
+uchar __ovld __conv sub_group_scan_inclusive_max( uchar value );
+short __ovld __conv sub_group_scan_inclusive_max( short value );
+ushort __ovld __conv sub_group_scan_inclusive_max( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_add( char value );
+uchar __ovld __conv sub_group_scan_exclusive_add( uchar value );
+short __ovld __conv sub_group_scan_exclusive_add( short value );
+ushort __ovld __conv sub_group_scan_exclusive_add( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_min( char value );
+uchar __ovld __conv sub_group_scan_exclusive_min( uchar value );
+short __ovld __conv sub_group_scan_exclusive_min( short value );
+ushort __ovld __conv sub_group_scan_exclusive_min( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_max( char value );
+uchar __ovld __conv sub_group_scan_exclusive_max( uchar value );
+short __ovld __conv sub_group_scan_exclusive_max( short value );
+ushort __ovld __conv sub_group_scan_exclusive_max( ushort value );
+
+#if defined(cl_khr_fp16)
+// scalar half broadcast is part of cl_khr_subgroups
+half2 __ovld __conv sub_group_broadcast( half2 value, uint index );
+half3 __ovld __conv sub_group_broadcast( half3 value, uint index );
+half4 __ovld __conv sub_group_broadcast( half4 value, uint index );
+half8 __ovld __conv sub_group_broadcast( half8 value, uint index );
+half16 __ovld __conv sub_group_broadcast( half16 value, uint index );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+// scalar double broadcast is part of cl_khr_subgroups
+double2 __ovld __conv sub_group_broadcast( double2 value, uint index );
+double3 __ovld __conv sub_group_broadcast( double3 value, uint index );
+double4 __ovld __conv sub_group_broadcast( double4 value, uint index );
+double8 __ovld __conv sub_group_broadcast( double8 value, uint index );
+double16 __ovld __conv sub_group_broadcast( double16 value, uint index );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_extended_types
+
+#if defined(cl_khr_subgroup_non_uniform_vote)
+int __ovld sub_group_elect(void);
+int __ovld sub_group_non_uniform_all( int predicate );
+int __ovld sub_group_non_uniform_any( int predicate );
+
+int __ovld sub_group_non_uniform_all_equal( char value );
+int __ovld sub_group_non_uniform_all_equal( uchar value );
+int __ovld sub_group_non_uniform_all_equal( short value );
+int __ovld sub_group_non_uniform_all_equal( ushort value );
+int __ovld sub_group_non_uniform_all_equal( int value );
+int __ovld sub_group_non_uniform_all_equal( uint value );
+int __ovld sub_group_non_uniform_all_equal( long value );
+int __ovld sub_group_non_uniform_all_equal( ulong value );
+int __ovld sub_group_non_uniform_all_equal( float value );
+
+#if defined(cl_khr_fp16)
+int __ovld sub_group_non_uniform_all_equal( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+int __ovld sub_group_non_uniform_all_equal( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_non_uniform_vote
+
+#if defined(cl_khr_subgroup_ballot)
+char __ovld sub_group_non_uniform_broadcast( char value, uint index );
+char2 __ovld sub_group_non_uniform_broadcast( char2 value, uint index );
+char3 __ovld sub_group_non_uniform_broadcast( char3 value, uint index );
+char4 __ovld sub_group_non_uniform_broadcast( char4 value, uint index );
+char8 __ovld sub_group_non_uniform_broadcast( char8 value, uint index );
+char16 __ovld sub_group_non_uniform_broadcast( char16 value, uint index );
+
+uchar __ovld sub_group_non_uniform_broadcast( uchar value, uint index );
+uchar2 __ovld sub_group_non_uniform_broadcast( uchar2 value, uint index );
+uchar3 __ovld sub_group_non_uniform_broadcast( uchar3 value, uint index );
+uchar4 __ovld sub_group_non_uniform_broadcast( uchar4 value, uint index );
+uchar8 __ovld sub_group_non_uniform_broadcast( uchar8 value, uint index );
+uchar16 __ovld sub_group_non_uniform_broadcast( uchar16 value, uint index );
+
+short __ovld sub_group_non_uniform_broadcast( short value, uint index );
+short2 __ovld sub_group_non_uniform_broadcast( short2 value, uint index );
+short3 __ovld sub_group_non_uniform_broadcast( short3 value, uint index );
+short4 __ovld sub_group_non_uniform_broadcast( short4 value, uint index );
+short8 __ovld sub_group_non_uniform_broadcast( short8 value, uint index );
+short16 __ovld sub_group_non_uniform_broadcast( short16 value, uint index );
+
+ushort __ovld sub_group_non_uniform_broadcast( ushort value, uint index );
+ushort2 __ovld sub_group_non_uniform_broadcast( ushort2 value, uint index );
+ushort3 __ovld sub_group_non_uniform_broadcast( ushort3 value, uint index );
+ushort4 __ovld sub_group_non_uniform_broadcast( ushort4 value, uint index );
+ushort8 __ovld sub_group_non_uniform_broadcast( ushort8 value, uint index );
+ushort16 __ovld sub_group_non_uniform_broadcast( ushort16 value, uint index );
+
+int __ovld sub_group_non_uniform_broadcast( int value, uint index );
+int2 __ovld sub_group_non_uniform_broadcast( int2 value, uint index );
+int3 __ovld sub_group_non_uniform_broadcast( int3 value, uint index );
+int4 __ovld sub_group_non_uniform_broadcast( int4 value, uint index );
+int8 __ovld sub_group_non_uniform_broadcast( int8 value, uint index );
+int16 __ovld sub_group_non_uniform_broadcast( int16 value, uint index );
+
+uint __ovld sub_group_non_uniform_broadcast( uint value, uint index );
+uint2 __ovld sub_group_non_uniform_broadcast( uint2 value, uint index );
+uint3 __ovld sub_group_non_uniform_broadcast( uint3 value, uint index );
+uint4 __ovld sub_group_non_uniform_broadcast( uint4 value, uint index );
+uint8 __ovld sub_group_non_uniform_broadcast( uint8 value, uint index );
+uint16 __ovld sub_group_non_uniform_broadcast( uint16 value, uint index );
+
+long __ovld sub_group_non_uniform_broadcast( long value, uint index );
+long2 __ovld sub_group_non_uniform_broadcast( long2 value, uint index );
+long3 __ovld sub_group_non_uniform_broadcast( long3 value, uint index );
+long4 __ovld sub_group_non_uniform_broadcast( long4 value, uint index );
+long8 __ovld sub_group_non_uniform_broadcast( long8 value, uint index );
+long16 __ovld sub_group_non_uniform_broadcast( long16 value, uint index );
+
+ulong __ovld sub_group_non_uniform_broadcast( ulong value, uint index );
+ulong2 __ovld sub_group_non_uniform_broadcast( ulong2 value, uint index );
+ulong3 __ovld sub_group_non_uniform_broadcast( ulong3 value, uint index );
+ulong4 __ovld sub_group_non_uniform_broadcast( ulong4 value, uint index );
+ulong8 __ovld sub_group_non_uniform_broadcast( ulong8 value, uint index );
+ulong16 __ovld sub_group_non_uniform_broadcast( ulong16 value, uint index );
+
+float __ovld sub_group_non_uniform_broadcast( float value, uint index );
+float2 __ovld sub_group_non_uniform_broadcast( float2 value, uint index );
+float3 __ovld sub_group_non_uniform_broadcast( float3 value, uint index );
+float4 __ovld sub_group_non_uniform_broadcast( float4 value, uint index );
+float8 __ovld sub_group_non_uniform_broadcast( float8 value, uint index );
+float16 __ovld sub_group_non_uniform_broadcast( float16 value, uint index );
+
+char __ovld sub_group_broadcast_first( char value );
+uchar __ovld sub_group_broadcast_first( uchar value );
+short __ovld sub_group_broadcast_first( short value );
+ushort __ovld sub_group_broadcast_first( ushort value );
+int __ovld sub_group_broadcast_first( int value );
+uint __ovld sub_group_broadcast_first( uint value );
+long __ovld sub_group_broadcast_first( long value );
+ulong __ovld sub_group_broadcast_first( ulong value );
+float __ovld sub_group_broadcast_first( float value );
+
+uint4 __ovld sub_group_ballot( int predicate );
+int __ovld __cnfn sub_group_inverse_ballot( uint4 value );
+int __ovld __cnfn sub_group_ballot_bit_extract( uint4 value, uint index );
+uint __ovld __cnfn sub_group_ballot_bit_count( uint4 value );
+
+uint __ovld sub_group_ballot_inclusive_scan( uint4 value );
+uint __ovld sub_group_ballot_exclusive_scan( uint4 value );
+uint __ovld sub_group_ballot_find_lsb( uint4 value );
+uint __ovld sub_group_ballot_find_msb( uint4 value );
+
+uint4 __ovld __cnfn get_sub_group_eq_mask(void);
+uint4 __ovld __cnfn get_sub_group_ge_mask(void);
+uint4 __ovld __cnfn get_sub_group_gt_mask(void);
+uint4 __ovld __cnfn get_sub_group_le_mask(void);
+uint4 __ovld __cnfn get_sub_group_lt_mask(void);
+
+#if defined(cl_khr_fp16)
+half __ovld sub_group_non_uniform_broadcast( half value, uint index );
+half2 __ovld sub_group_non_uniform_broadcast( half2 value, uint index );
+half3 __ovld sub_group_non_uniform_broadcast( half3 value, uint index );
+half4 __ovld sub_group_non_uniform_broadcast( half4 value, uint index );
+half8 __ovld sub_group_non_uniform_broadcast( half8 value, uint index );
+half16 __ovld sub_group_non_uniform_broadcast( half16 value, uint index );
+
+half __ovld sub_group_broadcast_first( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double __ovld sub_group_non_uniform_broadcast( double value, uint index );
+double2 __ovld sub_group_non_uniform_broadcast( double2 value, uint index );
+double3 __ovld sub_group_non_uniform_broadcast( double3 value, uint index );
+double4 __ovld sub_group_non_uniform_broadcast( double4 value, uint index );
+double8 __ovld sub_group_non_uniform_broadcast( double8 value, uint index );
+double16 __ovld sub_group_non_uniform_broadcast( double16 value, uint index );
+
+double __ovld sub_group_broadcast_first( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_ballot
+
+#if defined(cl_khr_subgroup_non_uniform_arithmetic)
+char __ovld sub_group_non_uniform_reduce_add( char value );
+uchar __ovld sub_group_non_uniform_reduce_add( uchar value );
+short __ovld sub_group_non_uniform_reduce_add( short value );
+ushort __ovld sub_group_non_uniform_reduce_add( ushort value );
+int __ovld sub_group_non_uniform_reduce_add( int value );
+uint __ovld sub_group_non_uniform_reduce_add( uint value );
+long __ovld sub_group_non_uniform_reduce_add( long value );
+ulong __ovld sub_group_non_uniform_reduce_add( ulong value );
+float __ovld sub_group_non_uniform_reduce_add( float value );
+
+char __ovld sub_group_non_uniform_reduce_mul( char value );
+uchar __ovld sub_group_non_uniform_reduce_mul( uchar value );
+short __ovld sub_group_non_uniform_reduce_mul( short value );
+ushort __ovld sub_group_non_uniform_reduce_mul( ushort value );
+int __ovld sub_group_non_uniform_reduce_mul( int value );
+uint __ovld sub_group_non_uniform_reduce_mul( uint value );
+long __ovld sub_group_non_uniform_reduce_mul( long value );
+ulong __ovld sub_group_non_uniform_reduce_mul( ulong value );
+float __ovld sub_group_non_uniform_reduce_mul( float value );
+
+char __ovld sub_group_non_uniform_reduce_min( char value );
+uchar __ovld sub_group_non_uniform_reduce_min( uchar value );
+short __ovld sub_group_non_uniform_reduce_min( short value );
+ushort __ovld sub_group_non_uniform_reduce_min( ushort value );
+int __ovld sub_group_non_uniform_reduce_min( int value );
+uint __ovld sub_group_non_uniform_reduce_min( uint value );
+long __ovld sub_group_non_uniform_reduce_min( long value );
+ulong __ovld sub_group_non_uniform_reduce_min( ulong value );
+float __ovld sub_group_non_uniform_reduce_min( float value );
+
+char __ovld sub_group_non_uniform_reduce_max( char value );
+uchar __ovld sub_group_non_uniform_reduce_max( uchar value );
+short __ovld sub_group_non_uniform_reduce_max( short value );
+ushort __ovld sub_group_non_uniform_reduce_max( ushort value );
+int __ovld sub_group_non_uniform_reduce_max( int value );
+uint __ovld sub_group_non_uniform_reduce_max( uint value );
+long __ovld sub_group_non_uniform_reduce_max( long value );
+ulong __ovld sub_group_non_uniform_reduce_max( ulong value );
+float __ovld sub_group_non_uniform_reduce_max( float value );
+
+char __ovld sub_group_non_uniform_scan_inclusive_add( char value );
+uchar __ovld sub_group_non_uniform_scan_inclusive_add( uchar value );
+short __ovld sub_group_non_uniform_scan_inclusive_add( short value );
+ushort __ovld sub_group_non_uniform_scan_inclusive_add( ushort value );
+int __ovld sub_group_non_uniform_scan_inclusive_add( int value );
+uint __ovld sub_group_non_uniform_scan_inclusive_add( uint value );
+long __ovld sub_group_non_uniform_scan_inclusive_add( long value );
+ulong __ovld sub_group_non_uniform_scan_inclusive_add( ulong value );
+float __ovld sub_group_non_uniform_scan_inclusive_add( float value );
+
+char __ovld sub_group_non_uniform_scan_inclusive_mul( char value );
+uchar __ovld sub_group_non_uniform_scan_inclusive_mul( uchar value );
+short __ovld sub_group_non_uniform_scan_inclusive_mul( short value );
+ushort __ovld sub_group_non_uniform_scan_inclusive_mul( ushort value );
+int __ovld sub_group_non_uniform_scan_inclusive_mul( int value );
+uint __ovld sub_group_non_uniform_scan_inclusive_mul( uint value );
+long __ovld sub_group_non_uniform_scan_inclusive_mul( long value );
+ulong __ovld sub_group_non_uniform_scan_inclusive_mul( ulong value );
+float __ovld sub_group_non_uniform_scan_inclusive_mul( float value );
+
+char __ovld sub_group_non_uniform_scan_inclusive_min( char value );
+uchar __ovld sub_group_non_uniform_scan_inclusive_min( uchar value );
+short __ovld sub_group_non_uniform_scan_inclusive_min( short value );
+ushort __ovld sub_group_non_uniform_scan_inclusive_min( ushort value );
+int __ovld sub_group_non_uniform_scan_inclusive_min( int value );
+uint __ovld sub_group_non_uniform_scan_inclusive_min( uint value );
+long __ovld sub_group_non_uniform_scan_inclusive_min( long value );
+ulong __ovld sub_group_non_uniform_scan_inclusive_min( ulong value );
+float __ovld sub_group_non_uniform_scan_inclusive_min( float value );
+
+char __ovld sub_group_non_uniform_scan_inclusive_max( char value );
+uchar __ovld sub_group_non_uniform_scan_inclusive_max( uchar value );
+short __ovld sub_group_non_uniform_scan_inclusive_max( short value );
+ushort __ovld sub_group_non_uniform_scan_inclusive_max( ushort value );
+int __ovld sub_group_non_uniform_scan_inclusive_max( int value );
+uint __ovld sub_group_non_uniform_scan_inclusive_max( uint value );
+long __ovld sub_group_non_uniform_scan_inclusive_max( long value );
+ulong __ovld sub_group_non_uniform_scan_inclusive_max( ulong value );
+float __ovld sub_group_non_uniform_scan_inclusive_max( float value );
+
+char __ovld sub_group_non_uniform_scan_exclusive_add( char value );
+uchar __ovld sub_group_non_uniform_scan_exclusive_add( uchar value );
+short __ovld sub_group_non_uniform_scan_exclusive_add( short value );
+ushort __ovld sub_group_non_uniform_scan_exclusive_add( ushort value );
+int __ovld sub_group_non_uniform_scan_exclusive_add( int value );
+uint __ovld sub_group_non_uniform_scan_exclusive_add( uint value );
+long __ovld sub_group_non_uniform_scan_exclusive_add( long value );
+ulong __ovld sub_group_non_uniform_scan_exclusive_add( ulong value );
+float __ovld sub_group_non_uniform_scan_exclusive_add( float value );
+
+char __ovld sub_group_non_uniform_scan_exclusive_mul( char value );
+uchar __ovld sub_group_non_uniform_scan_exclusive_mul( uchar value );
+short __ovld sub_group_non_uniform_scan_exclusive_mul( short value );
+ushort __ovld sub_group_non_uniform_scan_exclusive_mul( ushort value );
+int __ovld sub_group_non_uniform_scan_exclusive_mul( int value );
+uint __ovld sub_group_non_uniform_scan_exclusive_mul( uint value );
+long __ovld sub_group_non_uniform_scan_exclusive_mul( long value );
+ulong __ovld sub_group_non_uniform_scan_exclusive_mul( ulong value );
+float __ovld sub_group_non_uniform_scan_exclusive_mul( float value );
+
+char __ovld sub_group_non_uniform_scan_exclusive_min( char value );
+uchar __ovld sub_group_non_uniform_scan_exclusive_min( uchar value );
+short __ovld sub_group_non_uniform_scan_exclusive_min( short value );
+ushort __ovld sub_group_non_uniform_scan_exclusive_min( ushort value );
+int __ovld sub_group_non_uniform_scan_exclusive_min( int value );
+uint __ovld sub_group_non_uniform_scan_exclusive_min( uint value );
+long __ovld sub_group_non_uniform_scan_exclusive_min( long value );
+ulong __ovld sub_group_non_uniform_scan_exclusive_min( ulong value );
+float __ovld sub_group_non_uniform_scan_exclusive_min( float value );
+
+char __ovld sub_group_non_uniform_scan_exclusive_max( char value );
+uchar __ovld sub_group_non_uniform_scan_exclusive_max( uchar value );
+short __ovld sub_group_non_uniform_scan_exclusive_max( short value );
+ushort __ovld sub_group_non_uniform_scan_exclusive_max( ushort value );
+int __ovld sub_group_non_uniform_scan_exclusive_max( int value );
+uint __ovld sub_group_non_uniform_scan_exclusive_max( uint value );
+long __ovld sub_group_non_uniform_scan_exclusive_max( long value );
+ulong __ovld sub_group_non_uniform_scan_exclusive_max( ulong value );
+float __ovld sub_group_non_uniform_scan_exclusive_max( float value );
+
+char __ovld sub_group_non_uniform_reduce_and( char value );
+uchar __ovld sub_group_non_uniform_reduce_and( uchar value );
+short __ovld sub_group_non_uniform_reduce_and( short value );
+ushort __ovld sub_group_non_uniform_reduce_and( ushort value );
+int __ovld sub_group_non_uniform_reduce_and( int value );
+uint __ovld sub_group_non_uniform_reduce_and( uint value );
+long __ovld sub_group_non_uniform_reduce_and( long value );
+ulong __ovld sub_group_non_uniform_reduce_and( ulong value );
+
+char __ovld sub_group_non_uniform_reduce_or( char value );
+uchar __ovld sub_group_non_uniform_reduce_or( uchar value );
+short __ovld sub_group_non_uniform_reduce_or( short value );
+ushort __ovld sub_group_non_uniform_reduce_or( ushort value );
+int __ovld sub_group_non_uniform_reduce_or( int value );
+uint __ovld sub_group_non_uniform_reduce_or( uint value );
+long __ovld sub_group_non_uniform_reduce_or( long value );
+ulong __ovld sub_group_non_uniform_reduce_or( ulong value );
+
+char __ovld sub_group_non_uniform_reduce_xor( char value );
+uchar __ovld sub_group_non_uniform_reduce_xor( uchar value );
+short __ovld sub_group_non_uniform_reduce_xor( short value );
+ushort __ovld sub_group_non_uniform_reduce_xor( ushort value );
+int __ovld sub_group_non_uniform_reduce_xor( int value );
+uint __ovld sub_group_non_uniform_reduce_xor( uint value );
+long __ovld sub_group_non_uniform_reduce_xor( long value );
+ulong __ovld sub_group_non_uniform_reduce_xor( ulong value );
+
+char __ovld sub_group_non_uniform_scan_inclusive_and( char value );
+uchar __ovld sub_group_non_uniform_scan_inclusive_and( uchar value );
+short __ovld sub_group_non_uniform_scan_inclusive_and( short value );
+ushort __ovld sub_group_non_uniform_scan_inclusive_and( ushort value );
+int __ovld sub_group_non_uniform_scan_inclusive_and( int value );
+uint __ovld sub_group_non_uniform_scan_inclusive_and( uint value );
+long __ovld sub_group_non_uniform_scan_inclusive_and( long value );
+ulong __ovld sub_group_non_uniform_scan_inclusive_and( ulong value );
+
+char __ovld sub_group_non_uniform_scan_inclusive_or( char value );
+uchar __ovld sub_group_non_uniform_scan_inclusive_or( uchar value );
+short __ovld sub_group_non_uniform_scan_inclusive_or( short value );
+ushort __ovld sub_group_non_uniform_scan_inclusive_or( ushort value );
+int __ovld sub_group_non_uniform_scan_inclusive_or( int value );
+uint __ovld sub_group_non_uniform_scan_inclusive_or( uint value );
+long __ovld sub_group_non_uniform_scan_inclusive_or( long value );
+ulong __ovld sub_group_non_uniform_scan_inclusive_or( ulong value );
+
+char __ovld sub_group_non_uniform_scan_inclusive_xor( char value );
+uchar __ovld sub_group_non_uniform_scan_inclusive_xor( uchar value );
+short __ovld sub_group_non_uniform_scan_inclusive_xor( short value );
+ushort __ovld sub_group_non_uniform_scan_inclusive_xor( ushort value );
+int __ovld sub_group_non_uniform_scan_inclusive_xor( int value );
+uint __ovld sub_group_non_uniform_scan_inclusive_xor( uint value );
+long __ovld sub_group_non_uniform_scan_inclusive_xor( long value );
+ulong __ovld sub_group_non_uniform_scan_inclusive_xor( ulong value );
+
+char __ovld sub_group_non_uniform_scan_exclusive_and( char value );
+uchar __ovld sub_group_non_uniform_scan_exclusive_and( uchar value );
+short __ovld sub_group_non_uniform_scan_exclusive_and( short value );
+ushort __ovld sub_group_non_uniform_scan_exclusive_and( ushort value );
+int __ovld sub_group_non_uniform_scan_exclusive_and( int value );
+uint __ovld sub_group_non_uniform_scan_exclusive_and( uint value );
+long __ovld sub_group_non_uniform_scan_exclusive_and( long value );
+ulong __ovld sub_group_non_uniform_scan_exclusive_and( ulong value );
+
+char __ovld sub_group_non_uniform_scan_exclusive_or( char value );
+uchar __ovld sub_group_non_uniform_scan_exclusive_or( uchar value );
+short __ovld sub_group_non_uniform_scan_exclusive_or( short value );
+ushort __ovld sub_group_non_uniform_scan_exclusive_or( ushort value );
+int __ovld sub_group_non_uniform_scan_exclusive_or( int value );
+uint __ovld sub_group_non_uniform_scan_exclusive_or( uint value );
+long __ovld sub_group_non_uniform_scan_exclusive_or( long value );
+ulong __ovld sub_group_non_uniform_scan_exclusive_or( ulong value );
+
+char __ovld sub_group_non_uniform_scan_exclusive_xor( char value );
+uchar __ovld sub_group_non_uniform_scan_exclusive_xor( uchar value );
+short __ovld sub_group_non_uniform_scan_exclusive_xor( short value );
+ushort __ovld sub_group_non_uniform_scan_exclusive_xor( ushort value );
+int __ovld sub_group_non_uniform_scan_exclusive_xor( int value );
+uint __ovld sub_group_non_uniform_scan_exclusive_xor( uint value );
+long __ovld sub_group_non_uniform_scan_exclusive_xor( long value );
+ulong __ovld sub_group_non_uniform_scan_exclusive_xor( ulong value );
+
+int __ovld sub_group_non_uniform_reduce_logical_and( int predicate );
+int __ovld sub_group_non_uniform_reduce_logical_or( int predicate );
+int __ovld sub_group_non_uniform_reduce_logical_xor( int predicate );
+
+int __ovld sub_group_non_uniform_scan_inclusive_logical_and( int predicate );
+int __ovld sub_group_non_uniform_scan_inclusive_logical_or( int predicate );
+int __ovld sub_group_non_uniform_scan_inclusive_logical_xor( int predicate );
+
+int __ovld sub_group_non_uniform_scan_exclusive_logical_and( int predicate );
+int __ovld sub_group_non_uniform_scan_exclusive_logical_or( int predicate );
+int __ovld sub_group_non_uniform_scan_exclusive_logical_xor( int predicate );
+
+#if defined(cl_khr_fp16)
+half __ovld sub_group_non_uniform_reduce_add( half value );
+half __ovld sub_group_non_uniform_reduce_mul( half value );
+half __ovld sub_group_non_uniform_reduce_min( half value );
+half __ovld sub_group_non_uniform_reduce_max( half value );
+half __ovld sub_group_non_uniform_scan_inclusive_add( half value );
+half __ovld sub_group_non_uniform_scan_inclusive_mul( half value );
+half __ovld sub_group_non_uniform_scan_inclusive_min( half value );
+half __ovld sub_group_non_uniform_scan_inclusive_max( half value );
+half __ovld sub_group_non_uniform_scan_exclusive_add( half value );
+half __ovld sub_group_non_uniform_scan_exclusive_mul( half value );
+half __ovld sub_group_non_uniform_scan_exclusive_min( half value );
+half __ovld sub_group_non_uniform_scan_exclusive_max( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double __ovld sub_group_non_uniform_reduce_add( double value );
+double __ovld sub_group_non_uniform_reduce_mul( double value );
+double __ovld sub_group_non_uniform_reduce_min( double value );
+double __ovld sub_group_non_uniform_reduce_max( double value );
+double __ovld sub_group_non_uniform_scan_inclusive_add( double value );
+double __ovld sub_group_non_uniform_scan_inclusive_mul( double value );
+double __ovld sub_group_non_uniform_scan_inclusive_min( double value );
+double __ovld sub_group_non_uniform_scan_inclusive_max( double value );
+double __ovld sub_group_non_uniform_scan_exclusive_add( double value );
+double __ovld sub_group_non_uniform_scan_exclusive_mul( double value );
+double __ovld sub_group_non_uniform_scan_exclusive_min( double value );
+double __ovld sub_group_non_uniform_scan_exclusive_max( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_non_uniform_arithmetic
+
+#if defined(cl_khr_subgroup_shuffle)
+char __ovld sub_group_shuffle( char value, uint index );
+uchar __ovld sub_group_shuffle( uchar value, uint index );
+short __ovld sub_group_shuffle( short value, uint index );
+ushort __ovld sub_group_shuffle( ushort value, uint index );
+int __ovld sub_group_shuffle( int value, uint index );
+uint __ovld sub_group_shuffle( uint value, uint index );
+long __ovld sub_group_shuffle( long value, uint index );
+ulong __ovld sub_group_shuffle( ulong value, uint index );
+float __ovld sub_group_shuffle( float value, uint index );
+
+char __ovld sub_group_shuffle_xor( char value, uint mask );
+uchar __ovld sub_group_shuffle_xor( uchar value, uint mask );
+short __ovld sub_group_shuffle_xor( short value, uint mask );
+ushort __ovld sub_group_shuffle_xor( ushort value, uint mask );
+int __ovld sub_group_shuffle_xor( int value, uint mask );
+uint __ovld sub_group_shuffle_xor( uint value, uint mask );
+long __ovld sub_group_shuffle_xor( long value, uint mask );
+ulong __ovld sub_group_shuffle_xor( ulong value, uint mask );
+float __ovld sub_group_shuffle_xor( float value, uint mask );
+
+#if defined(cl_khr_fp16)
+half __ovld sub_group_shuffle( half value, uint index );
+half __ovld sub_group_shuffle_xor( half value, uint mask );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double __ovld sub_group_shuffle( double value, uint index );
+double __ovld sub_group_shuffle_xor( double value, uint mask );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_shuffle
+
+#if defined(cl_khr_subgroup_shuffle_relative)
+char __ovld sub_group_shuffle_up( char value, uint delta );
+uchar __ovld sub_group_shuffle_up( uchar value, uint delta );
+short __ovld sub_group_shuffle_up( short value, uint delta );
+ushort __ovld sub_group_shuffle_up( ushort value, uint delta );
+int __ovld sub_group_shuffle_up( int value, uint delta );
+uint __ovld sub_group_shuffle_up( uint value, uint delta );
+long __ovld sub_group_shuffle_up( long value, uint delta );
+ulong __ovld sub_group_shuffle_up( ulong value, uint delta );
+float __ovld sub_group_shuffle_up( float value, uint delta );
+
+char __ovld sub_group_shuffle_down( char value, uint delta );
+uchar __ovld sub_group_shuffle_down( uchar value, uint delta );
+short __ovld sub_group_shuffle_down( short value, uint delta );
+ushort __ovld sub_group_shuffle_down( ushort value, uint delta );
+int __ovld sub_group_shuffle_down( int value, uint delta );
+uint __ovld sub_group_shuffle_down( uint value, uint delta );
+long __ovld sub_group_shuffle_down( long value, uint delta );
+ulong __ovld sub_group_shuffle_down( ulong value, uint delta );
+float __ovld sub_group_shuffle_down( float value, uint delta );
+
+#if defined(cl_khr_fp16)
+half __ovld sub_group_shuffle_up( half value, uint delta );
+half __ovld sub_group_shuffle_down( half value, uint delta );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double __ovld sub_group_shuffle_up( double value, uint delta );
+double __ovld sub_group_shuffle_down( double value, uint delta );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_shuffle_relative
+
+#if defined(cl_khr_subgroup_clustered_reduce)
+char __ovld sub_group_clustered_reduce_add( char value, uint clustersize );
+uchar __ovld sub_group_clustered_reduce_add( uchar value, uint clustersize );
+short __ovld sub_group_clustered_reduce_add( short value, uint clustersize );
+ushort __ovld sub_group_clustered_reduce_add( ushort value, uint clustersize );
+int __ovld sub_group_clustered_reduce_add( int value, uint clustersize );
+uint __ovld sub_group_clustered_reduce_add( uint value, uint clustersize );
+long __ovld sub_group_clustered_reduce_add( long value, uint clustersize );
+ulong __ovld sub_group_clustered_reduce_add( ulong value, uint clustersize );
+float __ovld sub_group_clustered_reduce_add( float value, uint clustersize );
+
+char __ovld sub_group_clustered_reduce_mul( char value, uint clustersize );
+uchar __ovld sub_group_clustered_reduce_mul( uchar value, uint clustersize );
+short __ovld sub_group_clustered_reduce_mul( short value, uint clustersize );
+ushort __ovld sub_group_clustered_reduce_mul( ushort value, uint clustersize );
+int __ovld sub_group_clustered_reduce_mul( int value, uint clustersize );
+uint __ovld sub_group_clustered_reduce_mul( uint value, uint clustersize );
+long __ovld sub_group_clustered_reduce_mul( long value, uint clustersize );
+ulong __ovld sub_group_clustered_reduce_mul( ulong value, uint clustersize );
+float __ovld sub_group_clustered_reduce_mul( float value, uint clustersize );
+
+char __ovld sub_group_clustered_reduce_min( char value, uint clustersize );
+uchar __ovld sub_group_clustered_reduce_min( uchar value, uint clustersize );
+short __ovld sub_group_clustered_reduce_min( short value, uint clustersize );
+ushort __ovld sub_group_clustered_reduce_min( ushort value, uint clustersize );
+int __ovld sub_group_clustered_reduce_min( int value, uint clustersize );
+uint __ovld sub_group_clustered_reduce_min( uint value, uint clustersize );
+long __ovld sub_group_clustered_reduce_min( long value, uint clustersize );
+ulong __ovld sub_group_clustered_reduce_min( ulong value, uint clustersize );
+float __ovld sub_group_clustered_reduce_min( float value, uint clustersize );
+
+char __ovld sub_group_clustered_reduce_max( char value, uint clustersize );
+uchar __ovld sub_group_clustered_reduce_max( uchar value, uint clustersize );
+short __ovld sub_group_clustered_reduce_max( short value, uint clustersize );
+ushort __ovld sub_group_clustered_reduce_max( ushort value, uint clustersize );
+int __ovld sub_group_clustered_reduce_max( int value, uint clustersize );
+uint __ovld sub_group_clustered_reduce_max( uint value, uint clustersize );
+long __ovld sub_group_clustered_reduce_max( long value, uint clustersize );
+ulong __ovld sub_group_clustered_reduce_max( ulong value, uint clustersize );
+float __ovld sub_group_clustered_reduce_max( float value, uint clustersize );
+
+char __ovld sub_group_clustered_reduce_and( char value, uint clustersize );
+uchar __ovld sub_group_clustered_reduce_and( uchar value, uint clustersize );
+short __ovld sub_group_clustered_reduce_and( short value, uint clustersize );
+ushort __ovld sub_group_clustered_reduce_and( ushort value, uint clustersize );
+int __ovld sub_group_clustered_reduce_and( int value, uint clustersize );
+uint __ovld sub_group_clustered_reduce_and( uint value, uint clustersize );
+long __ovld sub_group_clustered_reduce_and( long value, uint clustersize );
+ulong __ovld sub_group_clustered_reduce_and( ulong value, uint clustersize );
+
+char __ovld sub_group_clustered_reduce_or( char value, uint clustersize );
+uchar __ovld sub_group_clustered_reduce_or( uchar value, uint clustersize );
+short __ovld sub_group_clustered_reduce_or( short value, uint clustersize );
+ushort __ovld sub_group_clustered_reduce_or( ushort value, uint clustersize );
+int __ovld sub_group_clustered_reduce_or( int value, uint clustersize );
+uint __ovld sub_group_clustered_reduce_or( uint value, uint clustersize );
+long __ovld sub_group_clustered_reduce_or( long value, uint clustersize );
+ulong __ovld sub_group_clustered_reduce_or( ulong value, uint clustersize );
+
+char __ovld sub_group_clustered_reduce_xor( char value, uint clustersize );
+uchar __ovld sub_group_clustered_reduce_xor( uchar value, uint clustersize );
+short __ovld sub_group_clustered_reduce_xor( short value, uint clustersize );
+ushort __ovld sub_group_clustered_reduce_xor( ushort value, uint clustersize );
+int __ovld sub_group_clustered_reduce_xor( int value, uint clustersize );
+uint __ovld sub_group_clustered_reduce_xor( uint value, uint clustersize );
+long __ovld sub_group_clustered_reduce_xor( long value, uint clustersize );
+ulong __ovld sub_group_clustered_reduce_xor( ulong value, uint clustersize );
+
+int __ovld sub_group_clustered_reduce_logical_and( int predicate, uint clustersize );
+int __ovld sub_group_clustered_reduce_logical_or( int predicate, uint clustersize );
+int __ovld sub_group_clustered_reduce_logical_xor( int predicate, uint clustersize );
+
+#if defined(cl_khr_fp16)
+half __ovld sub_group_clustered_reduce_add( half value, uint clustersize );
+half __ovld sub_group_clustered_reduce_mul( half value, uint clustersize );
+half __ovld sub_group_clustered_reduce_min( half value, uint clustersize );
+half __ovld sub_group_clustered_reduce_max( half value, uint clustersize );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double __ovld sub_group_clustered_reduce_add( double value, uint clustersize );
+double __ovld sub_group_clustered_reduce_mul( double value, uint clustersize );
+double __ovld sub_group_clustered_reduce_min( double value, uint clustersize );
+double __ovld sub_group_clustered_reduce_max( double value, uint clustersize );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_clustered_reduce
+
#if defined(cl_intel_subgroups)
// Intel-Specific Sub Group Functions
float __ovld __conv intel_sub_group_shuffle( float x, uint c );
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_math_declares.h b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h
index a422c98bf97d..406c9748e286 100644
--- a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_math_declares.h
+++ b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h
@@ -1,4 +1,4 @@
-/*===---- __clang_openmp_math_declares.h - OpenMP math declares ------------===
+/*===- __clang_openmp_device_functions.h - OpenMP device function declares -===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
@@ -7,27 +7,36 @@
*===-----------------------------------------------------------------------===
*/
-#ifndef __CLANG_OPENMP_MATH_DECLARES_H__
-#define __CLANG_OPENMP_MATH_DECLARES_H__
+#ifndef __CLANG_OPENMP_DEVICE_FUNCTIONS_H__
+#define __CLANG_OPENMP_DEVICE_FUNCTIONS_H__
#ifndef _OPENMP
#error "This file is for OpenMP compilation only."
#endif
-#if defined(__NVPTX__) && defined(_OPENMP)
+#pragma omp begin declare variant match( \
+ device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
-#define __CUDA__
-
-#if defined(__cplusplus)
- #include <__clang_cuda_math_forward_declares.h>
+#ifdef __cplusplus
+extern "C" {
#endif
+#define __CUDA__
+#define __OPENMP_NVPTX__
+
/// Include declarations for libdevice functions.
#include <__clang_cuda_libdevice_declares.h>
+
/// Provide definitions for these functions.
#include <__clang_cuda_device_functions.h>
+#undef __OPENMP_NVPTX__
#undef __CUDA__
+#ifdef __cplusplus
+} // extern "C"
#endif
+
+#pragma omp end declare variant
+
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_math.h b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_math.h
deleted file mode 100644
index 5d7ce9a965d3..000000000000
--- a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_math.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*===---- __clang_openmp_math.h - OpenMP target math support ---------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#if defined(__NVPTX__) && defined(_OPENMP)
-/// TODO:
-/// We are currently reusing the functionality of the Clang-CUDA code path
-/// as an alternative to the host declarations provided by math.h and cmath.
-/// This is suboptimal.
-///
-/// We should instead declare the device functions in a similar way, e.g.,
-/// through OpenMP 5.0 variants, and afterwards populate the module with the
-/// host declarations by unconditionally including the host math.h or cmath,
-/// respectively. This is actually what the Clang-CUDA code path does, using
-/// __device__ instead of variants to avoid redeclarations and get the desired
-/// overload resolution.
-
-#define __CUDA__
-
-#if defined(__cplusplus)
- #include <__clang_cuda_cmath.h>
-#endif
-
-#undef __CUDA__
-
-/// Magic macro for stopping the math.h/cmath host header from being included.
-#define __CLANG_NO_HOST_MATH__
-
-#endif
-
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/cmath b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/cmath
index a5183a1d8d1b..bd6011eb6f6d 100644
--- a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/cmath
+++ b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/cmath
@@ -1,4 +1,4 @@
-/*===-------------- cmath - Alternative cmath header -----------------------===
+/*===-- __clang_openmp_device_functions.h - OpenMP math declares ------ c++ -===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
@@ -7,10 +7,69 @@
*===-----------------------------------------------------------------------===
*/
-#include <__clang_openmp_math.h>
+#ifndef __CLANG_OPENMP_CMATH_H__
+#define __CLANG_OPENMP_CMATH_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
-#ifndef __CLANG_NO_HOST_MATH__
#include_next <cmath>
-#else
-#undef __CLANG_NO_HOST_MATH__
+
+// Make sure we include our math.h overlay, it probably happend already but we
+// need to be sure.
+#include <math.h>
+
+// We (might) need cstdlib because __clang_cuda_cmath.h below declares `abs`
+// which might live in cstdlib.
+#include <cstdlib>
+
+#pragma omp begin declare variant match( \
+ device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
+
+#define __CUDA__
+#define __OPENMP_NVPTX__
+#include <__clang_cuda_cmath.h>
+#undef __OPENMP_NVPTX__
+#undef __CUDA__
+
+// Overloads not provided by the CUDA wrappers but by the CUDA system headers.
+// Since we do not include the latter we define them ourselves.
+#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
+
+__DEVICE__ float acosh(float __x) { return ::acoshf(__x); }
+__DEVICE__ float asinh(float __x) { return ::asinhf(__x); }
+__DEVICE__ float atanh(float __x) { return ::atanhf(__x); }
+__DEVICE__ float cbrt(float __x) { return ::cbrtf(__x); }
+__DEVICE__ float erf(float __x) { return ::erff(__x); }
+__DEVICE__ float erfc(float __x) { return ::erfcf(__x); }
+__DEVICE__ float exp2(float __x) { return ::exp2f(__x); }
+__DEVICE__ float expm1(float __x) { return ::expm1f(__x); }
+__DEVICE__ float fdim(float __x, float __y) { return ::fdimf(__x, __y); }
+__DEVICE__ float hypot(float __x, float __y) { return ::hypotf(__x, __y); }
+__DEVICE__ int ilogb(float __x) { return ::ilogbf(__x); }
+__DEVICE__ float lgamma(float __x) { return ::lgammaf(__x); }
+__DEVICE__ long long int llrint(float __x) { return ::llrintf(__x); }
+__DEVICE__ long long int llround(float __x) { return ::llroundf(__x); }
+__DEVICE__ float log1p(float __x) { return ::log1pf(__x); }
+__DEVICE__ float log2(float __x) { return ::log2f(__x); }
+__DEVICE__ float logb(float __x) { return ::logbf(__x); }
+__DEVICE__ long int lrint(float __x) { return ::lrintf(__x); }
+__DEVICE__ long int lround(float __x) { return ::lroundf(__x); }
+__DEVICE__ float nextafter(float __x, float __y) {
+ return ::nextafterf(__x, __y);
+}
+__DEVICE__ float remainder(float __x, float __y) {
+ return ::remainderf(__x, __y);
+}
+__DEVICE__ float scalbln(float __x, long int __y) {
+ return ::scalblnf(__x, __y);
+}
+__DEVICE__ float scalbn(float __x, int __y) { return ::scalbnf(__x, __y); }
+__DEVICE__ float tgamma(float __x) { return ::tgammaf(__x); }
+
+#undef __DEVICE__
+
+#pragma omp end declare variant
+
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex
new file mode 100644
index 000000000000..1ed0b14879ef
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex
@@ -0,0 +1,25 @@
+/*===-- complex --- OpenMP complex wrapper for target regions --------- c++ -===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_COMPLEX__
+#define __CLANG_OPENMP_COMPLEX__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+// We require std::math functions in the complex builtins below.
+#include <cmath>
+
+#define __CUDA__
+#include <__clang_cuda_complex_builtins.h>
+#endif
+
+// Grab the host header too.
+#include_next <complex>
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex.h b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex.h
new file mode 100644
index 000000000000..829c7a785725
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex.h
@@ -0,0 +1,25 @@
+/*===-- complex --- OpenMP complex wrapper for target regions --------- c++ -===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_COMPLEX_H__
+#define __CLANG_OPENMP_COMPLEX_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+// We require math functions in the complex builtins below.
+#include <math.h>
+
+#define __CUDA__
+#include <__clang_cuda_complex_builtins.h>
+#endif
+
+// Grab the host header too.
+#include_next <complex.h>
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/math.h b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/math.h
index d2786ecb2424..c64af8b13ece 100644
--- a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/math.h
+++ b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/math.h
@@ -1,4 +1,4 @@
-/*===------------- math.h - Alternative math.h header ----------------------===
+/*===---- openmp_wrapper/math.h -------- OpenMP math.h intercept ------ c++ -===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
@@ -7,11 +7,45 @@
*===-----------------------------------------------------------------------===
*/
-#include <__clang_openmp_math.h>
+// If we are in C++ mode and include <math.h> (not <cmath>) first, we still need
+// to make sure <cmath> is read first. The problem otherwise is that we haven't
+// seen the declarations of the math.h functions when the system math.h includes
+// our cmath overlay. However, our cmath overlay, or better the underlying
+// overlay, e.g. CUDA, uses the math.h functions. Since we haven't declared them
+// yet we get errors. CUDA avoids this by eagerly declaring all math functions
+// (in the __device__ space) but we cannot do this. Instead we break the
+// dependence by forcing cmath to go first. While our cmath will in turn include
+// this file, the cmath guards will prevent recursion.
+#ifdef __cplusplus
+#include <cmath>
+#endif
-#ifndef __CLANG_NO_HOST_MATH__
-#include_next <math.h>
-#else
-#undef __CLANG_NO_HOST_MATH__
+#ifndef __CLANG_OPENMP_MATH_H__
+#define __CLANG_OPENMP_MATH_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
#endif
+#include_next <math.h>
+
+// We need limits.h for __clang_cuda_math.h below and because it should not hurt
+// we include it eagerly here.
+#include <limits.h>
+
+// We need stdlib.h because (for now) __clang_cuda_math.h below declares `abs`
+// which should live in stdlib.h.
+#include <stdlib.h>
+
+#pragma omp begin declare variant match( \
+ device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
+
+#define __CUDA__
+#define __OPENMP_NVPTX__
+#include <__clang_cuda_math.h>
+#undef __OPENMP_NVPTX__
+#undef __CUDA__
+
+#pragma omp end declare variant
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/new b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/new
new file mode 100644
index 000000000000..1387d925b126
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/new
@@ -0,0 +1,70 @@
+//===--------- new - OPENMP wrapper for <new> ------------------------------===
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===-----------------------------------------------------------------------===
+
+#ifndef __CLANG_OPENMP_WRAPPERS_NEW
+#define __CLANG_OPENMP_WRAPPERS_NEW
+
+#include_next <new>
+
+#if defined(__NVPTX__) && defined(_OPENMP)
+
+#include <cstdlib>
+
+#pragma push_macro("OPENMP_NOEXCEPT")
+#if __cplusplus >= 201103L
+#define OPENMP_NOEXCEPT noexcept
+#else
+#define OPENMP_NOEXCEPT
+#endif
+
+// Device overrides for non-placement new and delete.
+inline void *operator new(__SIZE_TYPE__ size) {
+ if (size == 0)
+ size = 1;
+ return ::malloc(size);
+}
+inline void *operator new(__SIZE_TYPE__ size,
+ const std::nothrow_t &) OPENMP_NOEXCEPT {
+ return ::operator new(size);
+}
+
+inline void *operator new[](__SIZE_TYPE__ size) { return ::operator new(size); }
+inline void *operator new[](__SIZE_TYPE__ size, const std::nothrow_t &) {
+ return ::operator new(size);
+}
+
+inline void operator delete(void *ptr)OPENMP_NOEXCEPT {
+ if (ptr)
+ ::free(ptr);
+}
+inline void operator delete(void *ptr, const std::nothrow_t &)OPENMP_NOEXCEPT {
+ ::operator delete(ptr);
+}
+
+inline void operator delete[](void *ptr) OPENMP_NOEXCEPT {
+ ::operator delete(ptr);
+}
+inline void operator delete[](void *ptr,
+ const std::nothrow_t &) OPENMP_NOEXCEPT {
+ ::operator delete(ptr);
+}
+
+// Sized delete, C++14 only.
+#if __cplusplus >= 201402L
+inline void operator delete(void *ptr, __SIZE_TYPE__ size)OPENMP_NOEXCEPT {
+ ::operator delete(ptr);
+}
+inline void operator delete[](void *ptr, __SIZE_TYPE__ size) OPENMP_NOEXCEPT {
+ ::operator delete(ptr);
+}
+#endif
+
+#pragma pop_macro("OPENMP_NOEXCEPT")
+#endif
+
+#endif // include guard
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/time.h b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/time.h
new file mode 100644
index 000000000000..c760dd1ed963
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/time.h
@@ -0,0 +1,32 @@
+/*===---- time.h - OpenMP time header wrapper ------------------------ c ---===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_TIME_H__
+#define __CLANG_OPENMP_TIME_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+#if defined(__cplusplus)
+#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
+#else
+#define __DEVICE__ static __attribute__((always_inline, nothrow))
+#endif
+
+#include_next <time.h>
+
+#pragma omp begin declare variant match( \
+ device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
+
+__DEVICE__ clock_t clock() { return __nvvm_read_ptx_sreg_clock(); }
+
+#pragma omp end declare variant
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/serializeintrin.h b/contrib/llvm-project/clang/lib/Headers/serializeintrin.h
new file mode 100644
index 000000000000..b774e5a24a0b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/serializeintrin.h
@@ -0,0 +1,30 @@
+/*===--------------- serializeintrin.h - serialize intrinsics --------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <serializeintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __SERIALIZEINTRIN_H
+#define __SERIALIZEINTRIN_H
+
+/// Serialize instruction fetch and execution.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> SERIALIZE </c> instruction.
+///
+static __inline__ void
+__attribute__((__always_inline__, __nodebug__, __target__("serialize")))
+_serialize (void)
+{
+ __builtin_ia32_serialize ();
+}
+
+#endif /* __SERIALIZEINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/tsxldtrkintrin.h b/contrib/llvm-project/clang/lib/Headers/tsxldtrkintrin.h
new file mode 100644
index 000000000000..491823e93fc0
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/tsxldtrkintrin.h
@@ -0,0 +1,56 @@
+/*===------------- tsxldtrkintrin.h - tsxldtrk intrinsics ------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <tsxldtrkintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __TSXLDTRKINTRIN_H
+#define __TSXLDTRKINTRIN_H
+
+/* Define the default attributes for the functions in this file */
+#define _DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("tsxldtrk")))
+
+/// Marks the start of an TSX (RTM) suspend load address tracking region. If
+/// this intrinsic is used inside a transactional region, subsequent loads
+/// are not added to the read set of the transaction. If it's used inside a
+/// suspend load address tracking region it will cause transaction abort.
+/// If it's used outside of a transactional region it behaves like a NOP.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c XSUSLDTRK instruction.
+///
+static __inline__ void _DEFAULT_FN_ATTRS
+_xsusldtrk (void)
+{
+ __builtin_ia32_xsusldtrk();
+}
+
+/// Marks the end of an TSX (RTM) suspend load address tracking region. If this
+/// intrinsic is used inside a suspend load address tracking region it will
+/// end the suspend region and all following load addresses will be added to
+/// the transaction read set. If it's used inside an active transaction but
+/// not in a suspend region it will cause transaction abort. If it's used
+/// outside of a transactional region it behaves like a NOP.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c XRESLDTRK instruction.
+///
+static __inline__ void _DEFAULT_FN_ATTRS
+_xresldtrk (void)
+{
+ __builtin_ia32_xresldtrk();
+}
+
+#undef _DEFAULT_FN_ATTRS
+
+#endif /* __TSXLDTRKINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/vecintrin.h b/contrib/llvm-project/clang/lib/Headers/vecintrin.h
index c71b76a3ee39..e58c9769e8cb 100644
--- a/contrib/llvm-project/clang/lib/Headers/vecintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/vecintrin.h
@@ -43,1281 +43,1341 @@ __lcbb(const void *__ptr, unsigned short __len)
/*-- vec_extract ------------------------------------------------------------*/
static inline __ATTRS_o_ai signed char
-vec_extract(vector signed char __vec, int __index) {
+vec_extract(__vector signed char __vec, int __index) {
return __vec[__index & 15];
}
static inline __ATTRS_o_ai unsigned char
-vec_extract(vector bool char __vec, int __index) {
+vec_extract(__vector __bool char __vec, int __index) {
return __vec[__index & 15];
}
static inline __ATTRS_o_ai unsigned char
-vec_extract(vector unsigned char __vec, int __index) {
+vec_extract(__vector unsigned char __vec, int __index) {
return __vec[__index & 15];
}
static inline __ATTRS_o_ai signed short
-vec_extract(vector signed short __vec, int __index) {
+vec_extract(__vector signed short __vec, int __index) {
return __vec[__index & 7];
}
static inline __ATTRS_o_ai unsigned short
-vec_extract(vector bool short __vec, int __index) {
+vec_extract(__vector __bool short __vec, int __index) {
return __vec[__index & 7];
}
static inline __ATTRS_o_ai unsigned short
-vec_extract(vector unsigned short __vec, int __index) {
+vec_extract(__vector unsigned short __vec, int __index) {
return __vec[__index & 7];
}
static inline __ATTRS_o_ai signed int
-vec_extract(vector signed int __vec, int __index) {
+vec_extract(__vector signed int __vec, int __index) {
return __vec[__index & 3];
}
static inline __ATTRS_o_ai unsigned int
-vec_extract(vector bool int __vec, int __index) {
+vec_extract(__vector __bool int __vec, int __index) {
return __vec[__index & 3];
}
static inline __ATTRS_o_ai unsigned int
-vec_extract(vector unsigned int __vec, int __index) {
+vec_extract(__vector unsigned int __vec, int __index) {
return __vec[__index & 3];
}
static inline __ATTRS_o_ai signed long long
-vec_extract(vector signed long long __vec, int __index) {
+vec_extract(__vector signed long long __vec, int __index) {
return __vec[__index & 1];
}
static inline __ATTRS_o_ai unsigned long long
-vec_extract(vector bool long long __vec, int __index) {
+vec_extract(__vector __bool long long __vec, int __index) {
return __vec[__index & 1];
}
static inline __ATTRS_o_ai unsigned long long
-vec_extract(vector unsigned long long __vec, int __index) {
+vec_extract(__vector unsigned long long __vec, int __index) {
return __vec[__index & 1];
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai float
-vec_extract(vector float __vec, int __index) {
+vec_extract(__vector float __vec, int __index) {
return __vec[__index & 3];
}
#endif
static inline __ATTRS_o_ai double
-vec_extract(vector double __vec, int __index) {
+vec_extract(__vector double __vec, int __index) {
return __vec[__index & 1];
}
/*-- vec_insert -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_insert(signed char __scalar, vector signed char __vec, int __index) {
+static inline __ATTRS_o_ai __vector signed char
+vec_insert(signed char __scalar, __vector signed char __vec, int __index) {
__vec[__index & 15] = __scalar;
return __vec;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_insert(unsigned char __scalar, vector bool char __vec, int __index) {
- vector unsigned char __newvec = (vector unsigned char)__vec;
+static inline __ATTRS_o_ai __vector unsigned char
+vec_insert(unsigned char __scalar, __vector __bool char __vec, int __index) {
+ __vector unsigned char __newvec = (__vector unsigned char)__vec;
__newvec[__index & 15] = (unsigned char)__scalar;
return __newvec;
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_insert(unsigned char __scalar, vector unsigned char __vec, int __index) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_insert(unsigned char __scalar, __vector unsigned char __vec, int __index) {
__vec[__index & 15] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector signed short
-vec_insert(signed short __scalar, vector signed short __vec, int __index) {
+static inline __ATTRS_o_ai __vector signed short
+vec_insert(signed short __scalar, __vector signed short __vec, int __index) {
__vec[__index & 7] = __scalar;
return __vec;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_insert(unsigned short __scalar, vector bool short __vec, int __index) {
- vector unsigned short __newvec = (vector unsigned short)__vec;
+static inline __ATTRS_o_ai __vector unsigned short
+vec_insert(unsigned short __scalar, __vector __bool short __vec,
+ int __index) {
+ __vector unsigned short __newvec = (__vector unsigned short)__vec;
__newvec[__index & 7] = (unsigned short)__scalar;
return __newvec;
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_insert(unsigned short __scalar, vector unsigned short __vec, int __index) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_insert(unsigned short __scalar, __vector unsigned short __vec,
+ int __index) {
__vec[__index & 7] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector signed int
-vec_insert(signed int __scalar, vector signed int __vec, int __index) {
+static inline __ATTRS_o_ai __vector signed int
+vec_insert(signed int __scalar, __vector signed int __vec, int __index) {
__vec[__index & 3] = __scalar;
return __vec;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_insert(unsigned int __scalar, vector bool int __vec, int __index) {
- vector unsigned int __newvec = (vector unsigned int)__vec;
+static inline __ATTRS_o_ai __vector unsigned int
+vec_insert(unsigned int __scalar, __vector __bool int __vec, int __index) {
+ __vector unsigned int __newvec = (__vector unsigned int)__vec;
__newvec[__index & 3] = __scalar;
return __newvec;
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_insert(unsigned int __scalar, vector unsigned int __vec, int __index) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_insert(unsigned int __scalar, __vector unsigned int __vec, int __index) {
__vec[__index & 3] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector signed long long
-vec_insert(signed long long __scalar, vector signed long long __vec,
+static inline __ATTRS_o_ai __vector signed long long
+vec_insert(signed long long __scalar, __vector signed long long __vec,
int __index) {
__vec[__index & 1] = __scalar;
return __vec;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_insert(unsigned long long __scalar, vector bool long long __vec,
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_insert(unsigned long long __scalar, __vector __bool long long __vec,
int __index) {
- vector unsigned long long __newvec = (vector unsigned long long)__vec;
+ __vector unsigned long long __newvec = (__vector unsigned long long)__vec;
__newvec[__index & 1] = __scalar;
return __newvec;
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_insert(unsigned long long __scalar, vector unsigned long long __vec,
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_insert(unsigned long long __scalar, __vector unsigned long long __vec,
int __index) {
__vec[__index & 1] = __scalar;
return __vec;
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_insert(float __scalar, vector float __vec, int __index) {
+static inline __ATTRS_o_ai __vector float
+vec_insert(float __scalar, __vector float __vec, int __index) {
__vec[__index & 1] = __scalar;
return __vec;
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_insert(double __scalar, vector double __vec, int __index) {
+static inline __ATTRS_o_ai __vector double
+vec_insert(double __scalar, __vector double __vec, int __index) {
__vec[__index & 1] = __scalar;
return __vec;
}
/*-- vec_promote ------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
+static inline __ATTRS_o_ai __vector signed char
vec_promote(signed char __scalar, int __index) {
- const vector signed char __zero = (vector signed char)0;
- vector signed char __vec = __builtin_shufflevector(__zero, __zero,
+ const __vector signed char __zero = (__vector signed char)0;
+ __vector signed char __vec = __builtin_shufflevector(__zero, __zero,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
__vec[__index & 15] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_promote(unsigned char __scalar, int __index) {
- const vector unsigned char __zero = (vector unsigned char)0;
- vector unsigned char __vec = __builtin_shufflevector(__zero, __zero,
+ const __vector unsigned char __zero = (__vector unsigned char)0;
+ __vector unsigned char __vec = __builtin_shufflevector(__zero, __zero,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
__vec[__index & 15] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector signed short
+static inline __ATTRS_o_ai __vector signed short
vec_promote(signed short __scalar, int __index) {
- const vector signed short __zero = (vector signed short)0;
- vector signed short __vec = __builtin_shufflevector(__zero, __zero,
+ const __vector signed short __zero = (__vector signed short)0;
+ __vector signed short __vec = __builtin_shufflevector(__zero, __zero,
-1, -1, -1, -1, -1, -1, -1, -1);
__vec[__index & 7] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned short
+static inline __ATTRS_o_ai __vector unsigned short
vec_promote(unsigned short __scalar, int __index) {
- const vector unsigned short __zero = (vector unsigned short)0;
- vector unsigned short __vec = __builtin_shufflevector(__zero, __zero,
+ const __vector unsigned short __zero = (__vector unsigned short)0;
+ __vector unsigned short __vec = __builtin_shufflevector(__zero, __zero,
-1, -1, -1, -1, -1, -1, -1, -1);
__vec[__index & 7] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector signed int
+static inline __ATTRS_o_ai __vector signed int
vec_promote(signed int __scalar, int __index) {
- const vector signed int __zero = (vector signed int)0;
- vector signed int __vec = __builtin_shufflevector(__zero, __zero,
- -1, -1, -1, -1);
+ const __vector signed int __zero = (__vector signed int)0;
+ __vector signed int __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1);
__vec[__index & 3] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned int
+static inline __ATTRS_o_ai __vector unsigned int
vec_promote(unsigned int __scalar, int __index) {
- const vector unsigned int __zero = (vector unsigned int)0;
- vector unsigned int __vec = __builtin_shufflevector(__zero, __zero,
- -1, -1, -1, -1);
+ const __vector unsigned int __zero = (__vector unsigned int)0;
+ __vector unsigned int __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1);
__vec[__index & 3] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector signed long long
+static inline __ATTRS_o_ai __vector signed long long
vec_promote(signed long long __scalar, int __index) {
- const vector signed long long __zero = (vector signed long long)0;
- vector signed long long __vec = __builtin_shufflevector(__zero, __zero,
- -1, -1);
+ const __vector signed long long __zero = (__vector signed long long)0;
+ __vector signed long long __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1);
__vec[__index & 1] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned long long
+static inline __ATTRS_o_ai __vector unsigned long long
vec_promote(unsigned long long __scalar, int __index) {
- const vector unsigned long long __zero = (vector unsigned long long)0;
- vector unsigned long long __vec = __builtin_shufflevector(__zero, __zero,
- -1, -1);
+ const __vector unsigned long long __zero = (__vector unsigned long long)0;
+ __vector unsigned long long __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1);
__vec[__index & 1] = __scalar;
return __vec;
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
+static inline __ATTRS_o_ai __vector float
vec_promote(float __scalar, int __index) {
- const vector float __zero = (vector float)0;
- vector float __vec = __builtin_shufflevector(__zero, __zero, -1, -1, -1, -1);
+ const __vector float __zero = (__vector float)0.0f;
+ __vector float __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1);
__vec[__index & 3] = __scalar;
return __vec;
}
#endif
-static inline __ATTRS_o_ai vector double
+static inline __ATTRS_o_ai __vector double
vec_promote(double __scalar, int __index) {
- const vector double __zero = (vector double)0;
- vector double __vec = __builtin_shufflevector(__zero, __zero, -1, -1);
+ const __vector double __zero = (__vector double)0.0;
+ __vector double __vec = __builtin_shufflevector(__zero, __zero, -1, -1);
__vec[__index & 1] = __scalar;
return __vec;
}
/*-- vec_insert_and_zero ----------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
+static inline __ATTRS_o_ai __vector signed char
vec_insert_and_zero(const signed char *__ptr) {
- vector signed char __vec = (vector signed char)0;
+ __vector signed char __vec = (__vector signed char)0;
__vec[7] = *__ptr;
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_insert_and_zero(const unsigned char *__ptr) {
- vector unsigned char __vec = (vector unsigned char)0;
+ __vector unsigned char __vec = (__vector unsigned char)0;
__vec[7] = *__ptr;
return __vec;
}
-static inline __ATTRS_o_ai vector signed short
+static inline __ATTRS_o_ai __vector signed short
vec_insert_and_zero(const signed short *__ptr) {
- vector signed short __vec = (vector signed short)0;
+ __vector signed short __vec = (__vector signed short)0;
__vec[3] = *__ptr;
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned short
+static inline __ATTRS_o_ai __vector unsigned short
vec_insert_and_zero(const unsigned short *__ptr) {
- vector unsigned short __vec = (vector unsigned short)0;
+ __vector unsigned short __vec = (__vector unsigned short)0;
__vec[3] = *__ptr;
return __vec;
}
-static inline __ATTRS_o_ai vector signed int
+static inline __ATTRS_o_ai __vector signed int
vec_insert_and_zero(const signed int *__ptr) {
- vector signed int __vec = (vector signed int)0;
+ __vector signed int __vec = (__vector signed int)0;
__vec[1] = *__ptr;
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned int
+static inline __ATTRS_o_ai __vector unsigned int
vec_insert_and_zero(const unsigned int *__ptr) {
- vector unsigned int __vec = (vector unsigned int)0;
+ __vector unsigned int __vec = (__vector unsigned int)0;
__vec[1] = *__ptr;
return __vec;
}
-static inline __ATTRS_o_ai vector signed long long
+static inline __ATTRS_o_ai __vector signed long long
vec_insert_and_zero(const signed long long *__ptr) {
- vector signed long long __vec = (vector signed long long)0;
+ __vector signed long long __vec = (__vector signed long long)0;
__vec[0] = *__ptr;
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned long long
+static inline __ATTRS_o_ai __vector unsigned long long
vec_insert_and_zero(const unsigned long long *__ptr) {
- vector unsigned long long __vec = (vector unsigned long long)0;
+ __vector unsigned long long __vec = (__vector unsigned long long)0;
__vec[0] = *__ptr;
return __vec;
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
+static inline __ATTRS_o_ai __vector float
vec_insert_and_zero(const float *__ptr) {
- vector float __vec = (vector float)0;
+ __vector float __vec = (__vector float)0.0f;
__vec[1] = *__ptr;
return __vec;
}
#endif
-static inline __ATTRS_o_ai vector double
+static inline __ATTRS_o_ai __vector double
vec_insert_and_zero(const double *__ptr) {
- vector double __vec = (vector double)0;
+ __vector double __vec = (__vector double)0.0;
__vec[0] = *__ptr;
return __vec;
}
/*-- vec_perm ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_perm(vector signed char __a, vector signed char __b,
- vector unsigned char __c) {
- return (vector signed char)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector signed char
+vec_perm(__vector signed char __a, __vector signed char __b,
+ __vector unsigned char __c) {
+ return (__vector signed char)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_perm(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
- return (vector unsigned char)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_perm(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
+ return (__vector unsigned char)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector bool char
-vec_perm(vector bool char __a, vector bool char __b,
- vector unsigned char __c) {
- return (vector bool char)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector __bool char
+vec_perm(__vector __bool char __a, __vector __bool char __b,
+ __vector unsigned char __c) {
+ return (__vector __bool char)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector signed short
-vec_perm(vector signed short __a, vector signed short __b,
- vector unsigned char __c) {
- return (vector signed short)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector signed short
+vec_perm(__vector signed short __a, __vector signed short __b,
+ __vector unsigned char __c) {
+ return (__vector signed short)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_perm(vector unsigned short __a, vector unsigned short __b,
- vector unsigned char __c) {
- return (vector unsigned short)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_perm(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned char __c) {
+ return (__vector unsigned short)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector bool short
-vec_perm(vector bool short __a, vector bool short __b,
- vector unsigned char __c) {
- return (vector bool short)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector __bool short
+vec_perm(__vector __bool short __a, __vector __bool short __b,
+ __vector unsigned char __c) {
+ return (__vector __bool short)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector signed int
-vec_perm(vector signed int __a, vector signed int __b,
- vector unsigned char __c) {
- return (vector signed int)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector signed int
+vec_perm(__vector signed int __a, __vector signed int __b,
+ __vector unsigned char __c) {
+ return (__vector signed int)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_perm(vector unsigned int __a, vector unsigned int __b,
- vector unsigned char __c) {
- return (vector unsigned int)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_perm(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned char __c) {
+ return (__vector unsigned int)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector bool int
-vec_perm(vector bool int __a, vector bool int __b,
- vector unsigned char __c) {
- return (vector bool int)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector __bool int
+vec_perm(__vector __bool int __a, __vector __bool int __b,
+ __vector unsigned char __c) {
+ return (__vector __bool int)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_perm(vector signed long long __a, vector signed long long __b,
- vector unsigned char __c) {
- return (vector signed long long)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector signed long long
+vec_perm(__vector signed long long __a, __vector signed long long __b,
+ __vector unsigned char __c) {
+ return (__vector signed long long)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_perm(vector unsigned long long __a, vector unsigned long long __b,
- vector unsigned char __c) {
- return (vector unsigned long long)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_perm(__vector unsigned long long __a, __vector unsigned long long __b,
+ __vector unsigned char __c) {
+ return (__vector unsigned long long)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_perm(vector bool long long __a, vector bool long long __b,
- vector unsigned char __c) {
- return (vector bool long long)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_perm(__vector __bool long long __a, __vector __bool long long __b,
+ __vector unsigned char __c) {
+ return (__vector __bool long long)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_perm(vector float __a, vector float __b,
- vector unsigned char __c) {
- return (vector float)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector float
+vec_perm(__vector float __a, __vector float __b,
+ __vector unsigned char __c) {
+ return (__vector float)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_perm(vector double __a, vector double __b,
- vector unsigned char __c) {
- return (vector double)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector double
+vec_perm(__vector double __a, __vector double __b,
+ __vector unsigned char __c) {
+ return (__vector double)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
/*-- vec_permi --------------------------------------------------------------*/
// This prototype is deprecated.
-extern __ATTRS_o vector signed long long
-vec_permi(vector signed long long __a, vector signed long long __b, int __c)
+extern __ATTRS_o __vector signed long long
+vec_permi(__vector signed long long __a, __vector signed long long __b,
+ int __c)
__constant_range(__c, 0, 3);
// This prototype is deprecated.
-extern __ATTRS_o vector unsigned long long
-vec_permi(vector unsigned long long __a, vector unsigned long long __b, int __c)
+extern __ATTRS_o __vector unsigned long long
+vec_permi(__vector unsigned long long __a, __vector unsigned long long __b,
+ int __c)
__constant_range(__c, 0, 3);
// This prototype is deprecated.
-extern __ATTRS_o vector bool long long
-vec_permi(vector bool long long __a, vector bool long long __b, int __c)
+extern __ATTRS_o __vector __bool long long
+vec_permi(__vector __bool long long __a, __vector __bool long long __b,
+ int __c)
__constant_range(__c, 0, 3);
// This prototype is deprecated.
-extern __ATTRS_o vector double
-vec_permi(vector double __a, vector double __b, int __c)
+extern __ATTRS_o __vector double
+vec_permi(__vector double __a, __vector double __b, int __c)
__constant_range(__c, 0, 3);
#define vec_permi(X, Y, Z) ((__typeof__((vec_permi)((X), (Y), (Z)))) \
- __builtin_s390_vpdi((vector unsigned long long)(X), \
- (vector unsigned long long)(Y), \
+ __builtin_s390_vpdi((__vector unsigned long long)(X), \
+ (__vector unsigned long long)(Y), \
(((Z) & 2) << 1) | ((Z) & 1)))
/*-- vec_bperm_u128 ---------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_ai vector unsigned long long
-vec_bperm_u128(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_ai __vector unsigned long long
+vec_bperm_u128(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vbperm(__a, __b);
}
#endif
/*-- vec_revb ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed short
-vec_revb(vector signed short __vec) {
- return (vector signed short)
- __builtin_s390_vlbrh((vector unsigned short)__vec);
+static inline __ATTRS_o_ai __vector signed short
+vec_revb(__vector signed short __vec) {
+ return (__vector signed short)
+ __builtin_s390_vlbrh((__vector unsigned short)__vec);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_revb(vector unsigned short __vec) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_revb(__vector unsigned short __vec) {
return __builtin_s390_vlbrh(__vec);
}
-static inline __ATTRS_o_ai vector signed int
-vec_revb(vector signed int __vec) {
- return (vector signed int)
- __builtin_s390_vlbrf((vector unsigned int)__vec);
+static inline __ATTRS_o_ai __vector signed int
+vec_revb(__vector signed int __vec) {
+ return (__vector signed int)
+ __builtin_s390_vlbrf((__vector unsigned int)__vec);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_revb(vector unsigned int __vec) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_revb(__vector unsigned int __vec) {
return __builtin_s390_vlbrf(__vec);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_revb(vector signed long long __vec) {
- return (vector signed long long)
- __builtin_s390_vlbrg((vector unsigned long long)__vec);
+static inline __ATTRS_o_ai __vector signed long long
+vec_revb(__vector signed long long __vec) {
+ return (__vector signed long long)
+ __builtin_s390_vlbrg((__vector unsigned long long)__vec);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_revb(vector unsigned long long __vec) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_revb(__vector unsigned long long __vec) {
return __builtin_s390_vlbrg(__vec);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_revb(vector float __vec) {
- return (vector float)
- __builtin_s390_vlbrf((vector unsigned int)__vec);
+static inline __ATTRS_o_ai __vector float
+vec_revb(__vector float __vec) {
+ return (__vector float)
+ __builtin_s390_vlbrf((__vector unsigned int)__vec);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_revb(vector double __vec) {
- return (vector double)
- __builtin_s390_vlbrg((vector unsigned long long)__vec);
+static inline __ATTRS_o_ai __vector double
+vec_revb(__vector double __vec) {
+ return (__vector double)
+ __builtin_s390_vlbrg((__vector unsigned long long)__vec);
}
/*-- vec_reve ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_reve(vector signed char __vec) {
- return (vector signed char) { __vec[15], __vec[14], __vec[13], __vec[12],
- __vec[11], __vec[10], __vec[9], __vec[8],
- __vec[7], __vec[6], __vec[5], __vec[4],
- __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_reve(vector unsigned char __vec) {
- return (vector unsigned char) { __vec[15], __vec[14], __vec[13], __vec[12],
+static inline __ATTRS_o_ai __vector signed char
+vec_reve(__vector signed char __vec) {
+ return (__vector signed char) { __vec[15], __vec[14], __vec[13], __vec[12],
__vec[11], __vec[10], __vec[9], __vec[8],
__vec[7], __vec[6], __vec[5], __vec[4],
__vec[3], __vec[2], __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector bool char
-vec_reve(vector bool char __vec) {
- return (vector bool char) { __vec[15], __vec[14], __vec[13], __vec[12],
- __vec[11], __vec[10], __vec[9], __vec[8],
- __vec[7], __vec[6], __vec[5], __vec[4],
- __vec[3], __vec[2], __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector unsigned char
+vec_reve(__vector unsigned char __vec) {
+ return (__vector unsigned char) { __vec[15], __vec[14], __vec[13], __vec[12],
+ __vec[11], __vec[10], __vec[9], __vec[8],
+ __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector signed short
-vec_reve(vector signed short __vec) {
- return (vector signed short) { __vec[7], __vec[6], __vec[5], __vec[4],
- __vec[3], __vec[2], __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector __bool char
+vec_reve(__vector __bool char __vec) {
+ return (__vector __bool char) { __vec[15], __vec[14], __vec[13], __vec[12],
+ __vec[11], __vec[10], __vec[9], __vec[8],
+ __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_reve(vector unsigned short __vec) {
- return (vector unsigned short) { __vec[7], __vec[6], __vec[5], __vec[4],
+static inline __ATTRS_o_ai __vector signed short
+vec_reve(__vector signed short __vec) {
+ return (__vector signed short) { __vec[7], __vec[6], __vec[5], __vec[4],
__vec[3], __vec[2], __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector bool short
-vec_reve(vector bool short __vec) {
- return (vector bool short) { __vec[7], __vec[6], __vec[5], __vec[4],
- __vec[3], __vec[2], __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector unsigned short
+vec_reve(__vector unsigned short __vec) {
+ return (__vector unsigned short) { __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_reve(__vector __bool short __vec) {
+ return (__vector __bool short) { __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector signed int
-vec_reve(vector signed int __vec) {
- return (vector signed int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector signed int
+vec_reve(__vector signed int __vec) {
+ return (__vector signed int) { __vec[3], __vec[2], __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_reve(vector unsigned int __vec) {
- return (vector unsigned int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector unsigned int
+vec_reve(__vector unsigned int __vec) {
+ return (__vector unsigned int) { __vec[3], __vec[2], __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector bool int
-vec_reve(vector bool int __vec) {
- return (vector bool int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector __bool int
+vec_reve(__vector __bool int __vec) {
+ return (__vector __bool int) { __vec[3], __vec[2], __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector signed long long
-vec_reve(vector signed long long __vec) {
- return (vector signed long long) { __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector signed long long
+vec_reve(__vector signed long long __vec) {
+ return (__vector signed long long) { __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_reve(vector unsigned long long __vec) {
- return (vector unsigned long long) { __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_reve(__vector unsigned long long __vec) {
+ return (__vector unsigned long long) { __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector bool long long
-vec_reve(vector bool long long __vec) {
- return (vector bool long long) { __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector __bool long long
+vec_reve(__vector __bool long long __vec) {
+ return (__vector __bool long long) { __vec[1], __vec[0] };
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_reve(vector float __vec) {
- return (vector float) { __vec[3], __vec[2], __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector float
+vec_reve(__vector float __vec) {
+ return (__vector float) { __vec[3], __vec[2], __vec[1], __vec[0] };
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_reve(vector double __vec) {
- return (vector double) { __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector double
+vec_reve(__vector double __vec) {
+ return (__vector double) { __vec[1], __vec[0] };
}
/*-- vec_sel ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_sel(vector signed char __a, vector signed char __b,
- vector unsigned char __c) {
- return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a);
+static inline __ATTRS_o_ai __vector signed char
+vec_sel(__vector signed char __a, __vector signed char __b,
+ __vector unsigned char __c) {
+ return (((__vector signed char)__c & __b) |
+ (~(__vector signed char)__c & __a));
}
-static inline __ATTRS_o_ai vector signed char
-vec_sel(vector signed char __a, vector signed char __b, vector bool char __c) {
- return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a);
+static inline __ATTRS_o_ai __vector signed char
+vec_sel(__vector signed char __a, __vector signed char __b,
+ __vector __bool char __c) {
+ return (((__vector signed char)__c & __b) |
+ (~(__vector signed char)__c & __a));
}
-static inline __ATTRS_o_ai vector bool char
-vec_sel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
- return ((vector bool char)__c & __b) | (~(vector bool char)__c & __a);
+static inline __ATTRS_o_ai __vector __bool char
+vec_sel(__vector __bool char __a, __vector __bool char __b,
+ __vector unsigned char __c) {
+ return (((__vector __bool char)__c & __b) |
+ (~(__vector __bool char)__c & __a));
}
-static inline __ATTRS_o_ai vector bool char
-vec_sel(vector bool char __a, vector bool char __b, vector bool char __c) {
+static inline __ATTRS_o_ai __vector __bool char
+vec_sel(__vector __bool char __a, __vector __bool char __b,
+ __vector __bool char __c) {
return (__c & __b) | (~__c & __a);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_sel(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sel(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return (__c & __b) | (~__c & __a);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_sel(vector unsigned char __a, vector unsigned char __b,
- vector bool char __c) {
- return ((vector unsigned char)__c & __b) | (~(vector unsigned char)__c & __a);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sel(__vector unsigned char __a, __vector unsigned char __b,
+ __vector __bool char __c) {
+ return (((__vector unsigned char)__c & __b) |
+ (~(__vector unsigned char)__c & __a));
}
-static inline __ATTRS_o_ai vector signed short
-vec_sel(vector signed short __a, vector signed short __b,
- vector unsigned short __c) {
- return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a);
+static inline __ATTRS_o_ai __vector signed short
+vec_sel(__vector signed short __a, __vector signed short __b,
+ __vector unsigned short __c) {
+ return (((__vector signed short)__c & __b) |
+ (~(__vector signed short)__c & __a));
}
-static inline __ATTRS_o_ai vector signed short
-vec_sel(vector signed short __a, vector signed short __b,
- vector bool short __c) {
- return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a);
+static inline __ATTRS_o_ai __vector signed short
+vec_sel(__vector signed short __a, __vector signed short __b,
+ __vector __bool short __c) {
+ return (((__vector signed short)__c & __b) |
+ (~(__vector signed short)__c & __a));
}
-static inline __ATTRS_o_ai vector bool short
-vec_sel(vector bool short __a, vector bool short __b,
- vector unsigned short __c) {
- return ((vector bool short)__c & __b) | (~(vector bool short)__c & __a);
+static inline __ATTRS_o_ai __vector __bool short
+vec_sel(__vector __bool short __a, __vector __bool short __b,
+ __vector unsigned short __c) {
+ return (((__vector __bool short)__c & __b) |
+ (~(__vector __bool short)__c & __a));
}
-static inline __ATTRS_o_ai vector bool short
-vec_sel(vector bool short __a, vector bool short __b, vector bool short __c) {
+static inline __ATTRS_o_ai __vector __bool short
+vec_sel(__vector __bool short __a, __vector __bool short __b,
+ __vector __bool short __c) {
return (__c & __b) | (~__c & __a);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_sel(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sel(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
return (__c & __b) | (~__c & __a);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_sel(vector unsigned short __a, vector unsigned short __b,
- vector bool short __c) {
- return (((vector unsigned short)__c & __b) |
- (~(vector unsigned short)__c & __a));
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sel(__vector unsigned short __a, __vector unsigned short __b,
+ __vector __bool short __c) {
+ return (((__vector unsigned short)__c & __b) |
+ (~(__vector unsigned short)__c & __a));
}
-static inline __ATTRS_o_ai vector signed int
-vec_sel(vector signed int __a, vector signed int __b,
- vector unsigned int __c) {
- return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a);
+static inline __ATTRS_o_ai __vector signed int
+vec_sel(__vector signed int __a, __vector signed int __b,
+ __vector unsigned int __c) {
+ return (((__vector signed int)__c & __b) |
+ (~(__vector signed int)__c & __a));
}
-static inline __ATTRS_o_ai vector signed int
-vec_sel(vector signed int __a, vector signed int __b, vector bool int __c) {
- return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a);
+static inline __ATTRS_o_ai __vector signed int
+vec_sel(__vector signed int __a, __vector signed int __b,
+ __vector __bool int __c) {
+ return (((__vector signed int)__c & __b) |
+ (~(__vector signed int)__c & __a));
}
-static inline __ATTRS_o_ai vector bool int
-vec_sel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
- return ((vector bool int)__c & __b) | (~(vector bool int)__c & __a);
+static inline __ATTRS_o_ai __vector __bool int
+vec_sel(__vector __bool int __a, __vector __bool int __b,
+ __vector unsigned int __c) {
+ return (((__vector __bool int)__c & __b) |
+ (~(__vector __bool int)__c & __a));
}
-static inline __ATTRS_o_ai vector bool int
-vec_sel(vector bool int __a, vector bool int __b, vector bool int __c) {
+static inline __ATTRS_o_ai __vector __bool int
+vec_sel(__vector __bool int __a, __vector __bool int __b,
+ __vector __bool int __c) {
return (__c & __b) | (~__c & __a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_sel(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sel(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
return (__c & __b) | (~__c & __a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_sel(vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
- return ((vector unsigned int)__c & __b) | (~(vector unsigned int)__c & __a);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sel(__vector unsigned int __a, __vector unsigned int __b,
+ __vector __bool int __c) {
+ return (((__vector unsigned int)__c & __b) |
+ (~(__vector unsigned int)__c & __a));
}
-static inline __ATTRS_o_ai vector signed long long
-vec_sel(vector signed long long __a, vector signed long long __b,
- vector unsigned long long __c) {
- return (((vector signed long long)__c & __b) |
- (~(vector signed long long)__c & __a));
+static inline __ATTRS_o_ai __vector signed long long
+vec_sel(__vector signed long long __a, __vector signed long long __b,
+ __vector unsigned long long __c) {
+ return (((__vector signed long long)__c & __b) |
+ (~(__vector signed long long)__c & __a));
}
-static inline __ATTRS_o_ai vector signed long long
-vec_sel(vector signed long long __a, vector signed long long __b,
- vector bool long long __c) {
- return (((vector signed long long)__c & __b) |
- (~(vector signed long long)__c & __a));
+static inline __ATTRS_o_ai __vector signed long long
+vec_sel(__vector signed long long __a, __vector signed long long __b,
+ __vector __bool long long __c) {
+ return (((__vector signed long long)__c & __b) |
+ (~(__vector signed long long)__c & __a));
}
-static inline __ATTRS_o_ai vector bool long long
-vec_sel(vector bool long long __a, vector bool long long __b,
- vector unsigned long long __c) {
- return (((vector bool long long)__c & __b) |
- (~(vector bool long long)__c & __a));
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sel(__vector __bool long long __a, __vector __bool long long __b,
+ __vector unsigned long long __c) {
+ return (((__vector __bool long long)__c & __b) |
+ (~(__vector __bool long long)__c & __a));
}
-static inline __ATTRS_o_ai vector bool long long
-vec_sel(vector bool long long __a, vector bool long long __b,
- vector bool long long __c) {
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sel(__vector __bool long long __a, __vector __bool long long __b,
+ __vector __bool long long __c) {
return (__c & __b) | (~__c & __a);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sel(vector unsigned long long __a, vector unsigned long long __b,
- vector unsigned long long __c) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sel(__vector unsigned long long __a, __vector unsigned long long __b,
+ __vector unsigned long long __c) {
return (__c & __b) | (~__c & __a);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sel(vector unsigned long long __a, vector unsigned long long __b,
- vector bool long long __c) {
- return (((vector unsigned long long)__c & __b) |
- (~(vector unsigned long long)__c & __a));
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sel(__vector unsigned long long __a, __vector unsigned long long __b,
+ __vector __bool long long __c) {
+ return (((__vector unsigned long long)__c & __b) |
+ (~(__vector unsigned long long)__c & __a));
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_sel(vector float __a, vector float __b, vector unsigned int __c) {
- return (vector float)((__c & (vector unsigned int)__b) |
- (~__c & (vector unsigned int)__a));
+static inline __ATTRS_o_ai __vector float
+vec_sel(__vector float __a, __vector float __b, __vector unsigned int __c) {
+ return (__vector float)((__c & (__vector unsigned int)__b) |
+ (~__c & (__vector unsigned int)__a));
}
-static inline __ATTRS_o_ai vector float
-vec_sel(vector float __a, vector float __b, vector bool int __c) {
- vector unsigned int __ac = (vector unsigned int)__a;
- vector unsigned int __bc = (vector unsigned int)__b;
- vector unsigned int __cc = (vector unsigned int)__c;
- return (vector float)((__cc & __bc) | (~__cc & __ac));
+static inline __ATTRS_o_ai __vector float
+vec_sel(__vector float __a, __vector float __b, __vector __bool int __c) {
+ __vector unsigned int __ac = (__vector unsigned int)__a;
+ __vector unsigned int __bc = (__vector unsigned int)__b;
+ __vector unsigned int __cc = (__vector unsigned int)__c;
+ return (__vector float)((__cc & __bc) | (~__cc & __ac));
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_sel(vector double __a, vector double __b, vector unsigned long long __c) {
- return (vector double)((__c & (vector unsigned long long)__b) |
- (~__c & (vector unsigned long long)__a));
+static inline __ATTRS_o_ai __vector double
+vec_sel(__vector double __a, __vector double __b,
+ __vector unsigned long long __c) {
+ return (__vector double)((__c & (__vector unsigned long long)__b) |
+ (~__c & (__vector unsigned long long)__a));
}
-static inline __ATTRS_o_ai vector double
-vec_sel(vector double __a, vector double __b, vector bool long long __c) {
- vector unsigned long long __ac = (vector unsigned long long)__a;
- vector unsigned long long __bc = (vector unsigned long long)__b;
- vector unsigned long long __cc = (vector unsigned long long)__c;
- return (vector double)((__cc & __bc) | (~__cc & __ac));
+static inline __ATTRS_o_ai __vector double
+vec_sel(__vector double __a, __vector double __b,
+ __vector __bool long long __c) {
+ __vector unsigned long long __ac = (__vector unsigned long long)__a;
+ __vector unsigned long long __bc = (__vector unsigned long long)__b;
+ __vector unsigned long long __cc = (__vector unsigned long long)__c;
+ return (__vector double)((__cc & __bc) | (~__cc & __ac));
}
/*-- vec_gather_element -----------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed int
-vec_gather_element(vector signed int __vec, vector unsigned int __offset,
+static inline __ATTRS_o_ai __vector signed int
+vec_gather_element(__vector signed int __vec,
+ __vector unsigned int __offset,
const signed int *__ptr, int __index)
__constant_range(__index, 0, 3) {
__vec[__index] = *(const signed int *)(
- (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ (const char *)__ptr + __offset[__index]);
return __vec;
}
-static inline __ATTRS_o_ai vector bool int
-vec_gather_element(vector bool int __vec, vector unsigned int __offset,
+static inline __ATTRS_o_ai __vector __bool int
+vec_gather_element(__vector __bool int __vec,
+ __vector unsigned int __offset,
const unsigned int *__ptr, int __index)
__constant_range(__index, 0, 3) {
__vec[__index] = *(const unsigned int *)(
- (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ (const char *)__ptr + __offset[__index]);
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_gather_element(vector unsigned int __vec, vector unsigned int __offset,
+static inline __ATTRS_o_ai __vector unsigned int
+vec_gather_element(__vector unsigned int __vec,
+ __vector unsigned int __offset,
const unsigned int *__ptr, int __index)
__constant_range(__index, 0, 3) {
__vec[__index] = *(const unsigned int *)(
- (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ (const char *)__ptr + __offset[__index]);
return __vec;
}
-static inline __ATTRS_o_ai vector signed long long
-vec_gather_element(vector signed long long __vec,
- vector unsigned long long __offset,
+static inline __ATTRS_o_ai __vector signed long long
+vec_gather_element(__vector signed long long __vec,
+ __vector unsigned long long __offset,
const signed long long *__ptr, int __index)
__constant_range(__index, 0, 1) {
__vec[__index] = *(const signed long long *)(
- (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ (const char *)__ptr + __offset[__index]);
return __vec;
}
-static inline __ATTRS_o_ai vector bool long long
-vec_gather_element(vector bool long long __vec,
- vector unsigned long long __offset,
+static inline __ATTRS_o_ai __vector __bool long long
+vec_gather_element(__vector __bool long long __vec,
+ __vector unsigned long long __offset,
const unsigned long long *__ptr, int __index)
__constant_range(__index, 0, 1) {
__vec[__index] = *(const unsigned long long *)(
- (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ (const char *)__ptr + __offset[__index]);
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_gather_element(vector unsigned long long __vec,
- vector unsigned long long __offset,
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_gather_element(__vector unsigned long long __vec,
+ __vector unsigned long long __offset,
const unsigned long long *__ptr, int __index)
__constant_range(__index, 0, 1) {
__vec[__index] = *(const unsigned long long *)(
- (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ (const char *)__ptr + __offset[__index]);
return __vec;
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_gather_element(vector float __vec, vector unsigned int __offset,
+static inline __ATTRS_o_ai __vector float
+vec_gather_element(__vector float __vec,
+ __vector unsigned int __offset,
const float *__ptr, int __index)
__constant_range(__index, 0, 3) {
__vec[__index] = *(const float *)(
- (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ (const char *)__ptr + __offset[__index]);
return __vec;
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_gather_element(vector double __vec, vector unsigned long long __offset,
+static inline __ATTRS_o_ai __vector double
+vec_gather_element(__vector double __vec,
+ __vector unsigned long long __offset,
const double *__ptr, int __index)
__constant_range(__index, 0, 1) {
__vec[__index] = *(const double *)(
- (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ (const char *)__ptr + __offset[__index]);
return __vec;
}
/*-- vec_scatter_element ----------------------------------------------------*/
static inline __ATTRS_o_ai void
-vec_scatter_element(vector signed int __vec, vector unsigned int __offset,
+vec_scatter_element(__vector signed int __vec,
+ __vector unsigned int __offset,
signed int *__ptr, int __index)
__constant_range(__index, 0, 3) {
- *(signed int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ *(signed int *)((char *)__ptr + __offset[__index]) =
__vec[__index];
}
static inline __ATTRS_o_ai void
-vec_scatter_element(vector bool int __vec, vector unsigned int __offset,
+vec_scatter_element(__vector __bool int __vec,
+ __vector unsigned int __offset,
unsigned int *__ptr, int __index)
__constant_range(__index, 0, 3) {
- *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ *(unsigned int *)((char *)__ptr + __offset[__index]) =
__vec[__index];
}
static inline __ATTRS_o_ai void
-vec_scatter_element(vector unsigned int __vec, vector unsigned int __offset,
+vec_scatter_element(__vector unsigned int __vec,
+ __vector unsigned int __offset,
unsigned int *__ptr, int __index)
__constant_range(__index, 0, 3) {
- *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ *(unsigned int *)((char *)__ptr + __offset[__index]) =
__vec[__index];
}
static inline __ATTRS_o_ai void
-vec_scatter_element(vector signed long long __vec,
- vector unsigned long long __offset,
+vec_scatter_element(__vector signed long long __vec,
+ __vector unsigned long long __offset,
signed long long *__ptr, int __index)
__constant_range(__index, 0, 1) {
- *(signed long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ *(signed long long *)((char *)__ptr + __offset[__index]) =
__vec[__index];
}
static inline __ATTRS_o_ai void
-vec_scatter_element(vector bool long long __vec,
- vector unsigned long long __offset,
+vec_scatter_element(__vector __bool long long __vec,
+ __vector unsigned long long __offset,
unsigned long long *__ptr, int __index)
__constant_range(__index, 0, 1) {
- *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ *(unsigned long long *)((char *)__ptr + __offset[__index]) =
__vec[__index];
}
static inline __ATTRS_o_ai void
-vec_scatter_element(vector unsigned long long __vec,
- vector unsigned long long __offset,
+vec_scatter_element(__vector unsigned long long __vec,
+ __vector unsigned long long __offset,
unsigned long long *__ptr, int __index)
__constant_range(__index, 0, 1) {
- *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ *(unsigned long long *)((char *)__ptr + __offset[__index]) =
__vec[__index];
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai void
-vec_scatter_element(vector float __vec, vector unsigned int __offset,
+vec_scatter_element(__vector float __vec,
+ __vector unsigned int __offset,
float *__ptr, int __index)
__constant_range(__index, 0, 3) {
- *(float *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ *(float *)((char *)__ptr + __offset[__index]) =
__vec[__index];
}
#endif
static inline __ATTRS_o_ai void
-vec_scatter_element(vector double __vec, vector unsigned long long __offset,
+vec_scatter_element(__vector double __vec,
+ __vector unsigned long long __offset,
double *__ptr, int __index)
__constant_range(__index, 0, 1) {
- *(double *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ *(double *)((char *)__ptr + __offset[__index]) =
__vec[__index];
}
/*-- vec_xl -----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
+static inline __ATTRS_o_ai __vector signed char
vec_xl(long __offset, const signed char *__ptr) {
- return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed char *)
+ ((const char *)__ptr + __offset);
}
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_xl(long __offset, const unsigned char *__ptr) {
- return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned char *)
+ ((const char *)__ptr + __offset);
}
-static inline __ATTRS_o_ai vector signed short
+static inline __ATTRS_o_ai __vector signed short
vec_xl(long __offset, const signed short *__ptr) {
- return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed short *)
+ ((const char *)__ptr + __offset);
}
-static inline __ATTRS_o_ai vector unsigned short
+static inline __ATTRS_o_ai __vector unsigned short
vec_xl(long __offset, const unsigned short *__ptr) {
- return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned short *)
+ ((const char *)__ptr + __offset);
}
-static inline __ATTRS_o_ai vector signed int
+static inline __ATTRS_o_ai __vector signed int
vec_xl(long __offset, const signed int *__ptr) {
- return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed int *)
+ ((const char *)__ptr + __offset);
}
-static inline __ATTRS_o_ai vector unsigned int
+static inline __ATTRS_o_ai __vector unsigned int
vec_xl(long __offset, const unsigned int *__ptr) {
- return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned int *)
+ ((const char *)__ptr + __offset);
}
-static inline __ATTRS_o_ai vector signed long long
+static inline __ATTRS_o_ai __vector signed long long
vec_xl(long __offset, const signed long long *__ptr) {
- return *(const vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed long long *)
+ ((const char *)__ptr + __offset);
}
-static inline __ATTRS_o_ai vector unsigned long long
+static inline __ATTRS_o_ai __vector unsigned long long
vec_xl(long __offset, const unsigned long long *__ptr) {
- return *(const vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned long long *)
+ ((const char *)__ptr + __offset);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
+static inline __ATTRS_o_ai __vector float
vec_xl(long __offset, const float *__ptr) {
- return *(const vector float *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector float *)
+ ((const char *)__ptr + __offset);
}
#endif
-static inline __ATTRS_o_ai vector double
+static inline __ATTRS_o_ai __vector double
vec_xl(long __offset, const double *__ptr) {
- return *(const vector double *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector double *)
+ ((const char *)__ptr + __offset);
}
/*-- vec_xld2 ---------------------------------------------------------------*/
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
+static inline __ATTRS_o_ai __vector signed char
vec_xld2(long __offset, const signed char *__ptr) {
- return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed char *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_xld2(long __offset, const unsigned char *__ptr) {
- return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned char *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
+static inline __ATTRS_o_ai __vector signed short
vec_xld2(long __offset, const signed short *__ptr) {
- return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed short *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
+static inline __ATTRS_o_ai __vector unsigned short
vec_xld2(long __offset, const unsigned short *__ptr) {
- return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned short *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
+static inline __ATTRS_o_ai __vector signed int
vec_xld2(long __offset, const signed int *__ptr) {
- return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed int *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
+static inline __ATTRS_o_ai __vector unsigned int
vec_xld2(long __offset, const unsigned int *__ptr) {
- return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned int *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
+static inline __ATTRS_o_ai __vector signed long long
vec_xld2(long __offset, const signed long long *__ptr) {
- return *(const vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed long long *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
+static inline __ATTRS_o_ai __vector unsigned long long
vec_xld2(long __offset, const unsigned long long *__ptr) {
- return *(const vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned long long *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
+static inline __ATTRS_o_ai __vector double
vec_xld2(long __offset, const double *__ptr) {
- return *(const vector double *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector double *)
+ ((const char *)__ptr + __offset);
}
/*-- vec_xlw4 ---------------------------------------------------------------*/
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
+static inline __ATTRS_o_ai __vector signed char
vec_xlw4(long __offset, const signed char *__ptr) {
- return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed char *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_xlw4(long __offset, const unsigned char *__ptr) {
- return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned char *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
+static inline __ATTRS_o_ai __vector signed short
vec_xlw4(long __offset, const signed short *__ptr) {
- return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed short *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
+static inline __ATTRS_o_ai __vector unsigned short
vec_xlw4(long __offset, const unsigned short *__ptr) {
- return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned short *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
+static inline __ATTRS_o_ai __vector signed int
vec_xlw4(long __offset, const signed int *__ptr) {
- return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed int *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
+static inline __ATTRS_o_ai __vector unsigned int
vec_xlw4(long __offset, const unsigned int *__ptr) {
- return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned int *)
+ ((const char *)__ptr + __offset);
}
/*-- vec_xst ----------------------------------------------------------------*/
static inline __ATTRS_o_ai void
-vec_xst(vector signed char __vec, long __offset, signed char *__ptr) {
- *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xst(__vector signed char __vec, long __offset, signed char *__ptr) {
+ *(__vector signed char *)((char *)__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
-vec_xst(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
- *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xst(__vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+ *(__vector unsigned char *)((char *)__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
-vec_xst(vector signed short __vec, long __offset, signed short *__ptr) {
- *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xst(__vector signed short __vec, long __offset, signed short *__ptr) {
+ *(__vector signed short *)((char *)__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
-vec_xst(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
- *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xst(__vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+ *(__vector unsigned short *)((char *)__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
-vec_xst(vector signed int __vec, long __offset, signed int *__ptr) {
- *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xst(__vector signed int __vec, long __offset, signed int *__ptr) {
+ *(__vector signed int *)((char *)__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
-vec_xst(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
- *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xst(__vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+ *(__vector unsigned int *)((char *)__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
-vec_xst(vector signed long long __vec, long __offset,
+vec_xst(__vector signed long long __vec, long __offset,
signed long long *__ptr) {
- *(vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+ *(__vector signed long long *)((char *)__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
-vec_xst(vector unsigned long long __vec, long __offset,
+vec_xst(__vector unsigned long long __vec, long __offset,
unsigned long long *__ptr) {
- *(vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset) =
- __vec;
+ *(__vector unsigned long long *)((char *)__ptr + __offset) = __vec;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai void
-vec_xst(vector float __vec, long __offset, float *__ptr) {
- *(vector float *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xst(__vector float __vec, long __offset, float *__ptr) {
+ *(__vector float *)((char *)__ptr + __offset) = __vec;
}
#endif
static inline __ATTRS_o_ai void
-vec_xst(vector double __vec, long __offset, double *__ptr) {
- *(vector double *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xst(__vector double __vec, long __offset, double *__ptr) {
+ *(__vector double *)((char *)__ptr + __offset) = __vec;
}
/*-- vec_xstd2 --------------------------------------------------------------*/
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector signed char __vec, long __offset, signed char *__ptr) {
- *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstd2(__vector signed char __vec, long __offset, signed char *__ptr) {
+ *(__vector signed char *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
- *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstd2(__vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+ *(__vector unsigned char *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector signed short __vec, long __offset, signed short *__ptr) {
- *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstd2(__vector signed short __vec, long __offset, signed short *__ptr) {
+ *(__vector signed short *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
- *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstd2(__vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+ *(__vector unsigned short *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector signed int __vec, long __offset, signed int *__ptr) {
- *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstd2(__vector signed int __vec, long __offset, signed int *__ptr) {
+ *(__vector signed int *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
- *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstd2(__vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+ *(__vector unsigned int *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector signed long long __vec, long __offset,
+vec_xstd2(__vector signed long long __vec, long __offset,
signed long long *__ptr) {
- *(vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+ *(__vector signed long long *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector unsigned long long __vec, long __offset,
+vec_xstd2(__vector unsigned long long __vec, long __offset,
unsigned long long *__ptr) {
- *(vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset) =
- __vec;
+ *(__vector unsigned long long *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector double __vec, long __offset, double *__ptr) {
- *(vector double *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstd2(__vector double __vec, long __offset, double *__ptr) {
+ *(__vector double *)((char *)__ptr + __offset) = __vec;
}
/*-- vec_xstw4 --------------------------------------------------------------*/
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstw4(vector signed char __vec, long __offset, signed char *__ptr) {
- *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstw4(__vector signed char __vec, long __offset, signed char *__ptr) {
+ *(__vector signed char *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstw4(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
- *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstw4(__vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+ *(__vector unsigned char *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstw4(vector signed short __vec, long __offset, signed short *__ptr) {
- *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstw4(__vector signed short __vec, long __offset, signed short *__ptr) {
+ *(__vector signed short *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstw4(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
- *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstw4(__vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+ *(__vector unsigned short *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstw4(vector signed int __vec, long __offset, signed int *__ptr) {
- *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstw4(__vector signed int __vec, long __offset, signed int *__ptr) {
+ *(__vector signed int *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstw4(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
- *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstw4(__vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+ *(__vector unsigned int *)((char *)__ptr + __offset) = __vec;
}
/*-- vec_load_bndry ---------------------------------------------------------*/
-extern __ATTRS_o vector signed char
+extern __ATTRS_o __vector signed char
vec_load_bndry(const signed char *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
-extern __ATTRS_o vector unsigned char
+extern __ATTRS_o __vector unsigned char
vec_load_bndry(const unsigned char *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
-extern __ATTRS_o vector signed short
+extern __ATTRS_o __vector signed short
vec_load_bndry(const signed short *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
-extern __ATTRS_o vector unsigned short
+extern __ATTRS_o __vector unsigned short
vec_load_bndry(const unsigned short *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
-extern __ATTRS_o vector signed int
+extern __ATTRS_o __vector signed int
vec_load_bndry(const signed int *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
-extern __ATTRS_o vector unsigned int
+extern __ATTRS_o __vector unsigned int
vec_load_bndry(const unsigned int *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
-extern __ATTRS_o vector signed long long
+extern __ATTRS_o __vector signed long long
vec_load_bndry(const signed long long *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
-extern __ATTRS_o vector unsigned long long
+extern __ATTRS_o __vector unsigned long long
vec_load_bndry(const unsigned long long *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
#if __ARCH__ >= 12
-extern __ATTRS_o vector float
+extern __ATTRS_o __vector float
vec_load_bndry(const float *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
#endif
-extern __ATTRS_o vector double
+extern __ATTRS_o __vector double
vec_load_bndry(const double *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
@@ -1332,159 +1392,159 @@ vec_load_bndry(const double *__ptr, unsigned short __len)
/*-- vec_load_len -----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
+static inline __ATTRS_o_ai __vector signed char
vec_load_len(const signed char *__ptr, unsigned int __len) {
- return (vector signed char)__builtin_s390_vll(__len, __ptr);
+ return (__vector signed char)__builtin_s390_vll(__len, __ptr);
}
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_load_len(const unsigned char *__ptr, unsigned int __len) {
- return (vector unsigned char)__builtin_s390_vll(__len, __ptr);
+ return (__vector unsigned char)__builtin_s390_vll(__len, __ptr);
}
-static inline __ATTRS_o_ai vector signed short
+static inline __ATTRS_o_ai __vector signed short
vec_load_len(const signed short *__ptr, unsigned int __len) {
- return (vector signed short)__builtin_s390_vll(__len, __ptr);
+ return (__vector signed short)__builtin_s390_vll(__len, __ptr);
}
-static inline __ATTRS_o_ai vector unsigned short
+static inline __ATTRS_o_ai __vector unsigned short
vec_load_len(const unsigned short *__ptr, unsigned int __len) {
- return (vector unsigned short)__builtin_s390_vll(__len, __ptr);
+ return (__vector unsigned short)__builtin_s390_vll(__len, __ptr);
}
-static inline __ATTRS_o_ai vector signed int
+static inline __ATTRS_o_ai __vector signed int
vec_load_len(const signed int *__ptr, unsigned int __len) {
- return (vector signed int)__builtin_s390_vll(__len, __ptr);
+ return (__vector signed int)__builtin_s390_vll(__len, __ptr);
}
-static inline __ATTRS_o_ai vector unsigned int
+static inline __ATTRS_o_ai __vector unsigned int
vec_load_len(const unsigned int *__ptr, unsigned int __len) {
- return (vector unsigned int)__builtin_s390_vll(__len, __ptr);
+ return (__vector unsigned int)__builtin_s390_vll(__len, __ptr);
}
-static inline __ATTRS_o_ai vector signed long long
+static inline __ATTRS_o_ai __vector signed long long
vec_load_len(const signed long long *__ptr, unsigned int __len) {
- return (vector signed long long)__builtin_s390_vll(__len, __ptr);
+ return (__vector signed long long)__builtin_s390_vll(__len, __ptr);
}
-static inline __ATTRS_o_ai vector unsigned long long
+static inline __ATTRS_o_ai __vector unsigned long long
vec_load_len(const unsigned long long *__ptr, unsigned int __len) {
- return (vector unsigned long long)__builtin_s390_vll(__len, __ptr);
+ return (__vector unsigned long long)__builtin_s390_vll(__len, __ptr);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
+static inline __ATTRS_o_ai __vector float
vec_load_len(const float *__ptr, unsigned int __len) {
- return (vector float)__builtin_s390_vll(__len, __ptr);
+ return (__vector float)__builtin_s390_vll(__len, __ptr);
}
#endif
-static inline __ATTRS_o_ai vector double
+static inline __ATTRS_o_ai __vector double
vec_load_len(const double *__ptr, unsigned int __len) {
- return (vector double)__builtin_s390_vll(__len, __ptr);
+ return (__vector double)__builtin_s390_vll(__len, __ptr);
}
/*-- vec_load_len_r ---------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_ai vector unsigned char
+static inline __ATTRS_ai __vector unsigned char
vec_load_len_r(const unsigned char *__ptr, unsigned int __len) {
- return (vector unsigned char)__builtin_s390_vlrl(__len, __ptr);
+ return (__vector unsigned char)__builtin_s390_vlrl(__len, __ptr);
}
#endif
/*-- vec_store_len ----------------------------------------------------------*/
static inline __ATTRS_o_ai void
-vec_store_len(vector signed char __vec, signed char *__ptr,
+vec_store_len(__vector signed char __vec, signed char *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
static inline __ATTRS_o_ai void
-vec_store_len(vector unsigned char __vec, unsigned char *__ptr,
+vec_store_len(__vector unsigned char __vec, unsigned char *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
static inline __ATTRS_o_ai void
-vec_store_len(vector signed short __vec, signed short *__ptr,
+vec_store_len(__vector signed short __vec, signed short *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
static inline __ATTRS_o_ai void
-vec_store_len(vector unsigned short __vec, unsigned short *__ptr,
+vec_store_len(__vector unsigned short __vec, unsigned short *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
static inline __ATTRS_o_ai void
-vec_store_len(vector signed int __vec, signed int *__ptr,
+vec_store_len(__vector signed int __vec, signed int *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
static inline __ATTRS_o_ai void
-vec_store_len(vector unsigned int __vec, unsigned int *__ptr,
+vec_store_len(__vector unsigned int __vec, unsigned int *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
static inline __ATTRS_o_ai void
-vec_store_len(vector signed long long __vec, signed long long *__ptr,
+vec_store_len(__vector signed long long __vec, signed long long *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
static inline __ATTRS_o_ai void
-vec_store_len(vector unsigned long long __vec, unsigned long long *__ptr,
+vec_store_len(__vector unsigned long long __vec, unsigned long long *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai void
-vec_store_len(vector float __vec, float *__ptr,
+vec_store_len(__vector float __vec, float *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
#endif
static inline __ATTRS_o_ai void
-vec_store_len(vector double __vec, double *__ptr,
+vec_store_len(__vector double __vec, double *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
/*-- vec_store_len_r --------------------------------------------------------*/
#if __ARCH__ >= 12
static inline __ATTRS_ai void
-vec_store_len_r(vector unsigned char __vec, unsigned char *__ptr,
+vec_store_len_r(__vector unsigned char __vec, unsigned char *__ptr,
unsigned int __len) {
- __builtin_s390_vstrl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstrl((__vector signed char)__vec, __len, __ptr);
}
#endif
/*-- vec_load_pair ----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed long long
+static inline __ATTRS_o_ai __vector signed long long
vec_load_pair(signed long long __a, signed long long __b) {
- return (vector signed long long)(__a, __b);
+ return (__vector signed long long)(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
+static inline __ATTRS_o_ai __vector unsigned long long
vec_load_pair(unsigned long long __a, unsigned long long __b) {
- return (vector unsigned long long)(__a, __b);
+ return (__vector unsigned long long)(__a, __b);
}
/*-- vec_genmask ------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_genmask(unsigned short __mask)
__constant(__mask) {
- return (vector unsigned char)(
+ return (__vector unsigned char)(
__mask & 0x8000 ? 0xff : 0,
__mask & 0x4000 ? 0xff : 0,
__mask & 0x2000 ? 0xff : 0,
@@ -1505,7 +1565,7 @@ vec_genmask(unsigned short __mask)
/*-- vec_genmasks_* ---------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_genmasks_8(unsigned char __first, unsigned char __last)
__constant(__first) __constant(__last) {
unsigned char __bit1 = __first & 7;
@@ -1515,10 +1575,10 @@ vec_genmasks_8(unsigned char __first, unsigned char __last)
unsigned char __value = (__bit1 <= __bit2 ?
__mask1 & ~__mask2 :
__mask1 | ~__mask2);
- return (vector unsigned char)__value;
+ return (__vector unsigned char)__value;
}
-static inline __ATTRS_o_ai vector unsigned short
+static inline __ATTRS_o_ai __vector unsigned short
vec_genmasks_16(unsigned char __first, unsigned char __last)
__constant(__first) __constant(__last) {
unsigned char __bit1 = __first & 15;
@@ -1528,10 +1588,10 @@ vec_genmasks_16(unsigned char __first, unsigned char __last)
unsigned short __value = (__bit1 <= __bit2 ?
__mask1 & ~__mask2 :
__mask1 | ~__mask2);
- return (vector unsigned short)__value;
+ return (__vector unsigned short)__value;
}
-static inline __ATTRS_o_ai vector unsigned int
+static inline __ATTRS_o_ai __vector unsigned int
vec_genmasks_32(unsigned char __first, unsigned char __last)
__constant(__first) __constant(__last) {
unsigned char __bit1 = __first & 31;
@@ -1541,10 +1601,10 @@ vec_genmasks_32(unsigned char __first, unsigned char __last)
unsigned int __value = (__bit1 <= __bit2 ?
__mask1 & ~__mask2 :
__mask1 | ~__mask2);
- return (vector unsigned int)__value;
+ return (__vector unsigned int)__value;
}
-static inline __ATTRS_o_ai vector unsigned long long
+static inline __ATTRS_o_ai __vector unsigned long long
vec_genmasks_64(unsigned char __first, unsigned char __last)
__constant(__first) __constant(__last) {
unsigned char __bit1 = __first & 63;
@@ -1554,978 +1614,986 @@ vec_genmasks_64(unsigned char __first, unsigned char __last)
unsigned long long __value = (__bit1 <= __bit2 ?
__mask1 & ~__mask2 :
__mask1 | ~__mask2);
- return (vector unsigned long long)__value;
+ return (__vector unsigned long long)__value;
}
/*-- vec_splat --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_splat(vector signed char __vec, int __index)
+static inline __ATTRS_o_ai __vector signed char
+vec_splat(__vector signed char __vec, int __index)
__constant_range(__index, 0, 15) {
- return (vector signed char)__vec[__index];
+ return (__vector signed char)__vec[__index];
}
-static inline __ATTRS_o_ai vector bool char
-vec_splat(vector bool char __vec, int __index)
+static inline __ATTRS_o_ai __vector __bool char
+vec_splat(__vector __bool char __vec, int __index)
__constant_range(__index, 0, 15) {
- return (vector bool char)(vector unsigned char)__vec[__index];
+ return (__vector __bool char)(__vector unsigned char)__vec[__index];
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_splat(vector unsigned char __vec, int __index)
+static inline __ATTRS_o_ai __vector unsigned char
+vec_splat(__vector unsigned char __vec, int __index)
__constant_range(__index, 0, 15) {
- return (vector unsigned char)__vec[__index];
+ return (__vector unsigned char)__vec[__index];
}
-static inline __ATTRS_o_ai vector signed short
-vec_splat(vector signed short __vec, int __index)
+static inline __ATTRS_o_ai __vector signed short
+vec_splat(__vector signed short __vec, int __index)
__constant_range(__index, 0, 7) {
- return (vector signed short)__vec[__index];
+ return (__vector signed short)__vec[__index];
}
-static inline __ATTRS_o_ai vector bool short
-vec_splat(vector bool short __vec, int __index)
+static inline __ATTRS_o_ai __vector __bool short
+vec_splat(__vector __bool short __vec, int __index)
__constant_range(__index, 0, 7) {
- return (vector bool short)(vector unsigned short)__vec[__index];
+ return (__vector __bool short)(__vector unsigned short)__vec[__index];
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_splat(vector unsigned short __vec, int __index)
+static inline __ATTRS_o_ai __vector unsigned short
+vec_splat(__vector unsigned short __vec, int __index)
__constant_range(__index, 0, 7) {
- return (vector unsigned short)__vec[__index];
+ return (__vector unsigned short)__vec[__index];
}
-static inline __ATTRS_o_ai vector signed int
-vec_splat(vector signed int __vec, int __index)
+static inline __ATTRS_o_ai __vector signed int
+vec_splat(__vector signed int __vec, int __index)
__constant_range(__index, 0, 3) {
- return (vector signed int)__vec[__index];
+ return (__vector signed int)__vec[__index];
}
-static inline __ATTRS_o_ai vector bool int
-vec_splat(vector bool int __vec, int __index)
+static inline __ATTRS_o_ai __vector __bool int
+vec_splat(__vector __bool int __vec, int __index)
__constant_range(__index, 0, 3) {
- return (vector bool int)(vector unsigned int)__vec[__index];
+ return (__vector __bool int)(__vector unsigned int)__vec[__index];
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_splat(vector unsigned int __vec, int __index)
+static inline __ATTRS_o_ai __vector unsigned int
+vec_splat(__vector unsigned int __vec, int __index)
__constant_range(__index, 0, 3) {
- return (vector unsigned int)__vec[__index];
+ return (__vector unsigned int)__vec[__index];
}
-static inline __ATTRS_o_ai vector signed long long
-vec_splat(vector signed long long __vec, int __index)
+static inline __ATTRS_o_ai __vector signed long long
+vec_splat(__vector signed long long __vec, int __index)
__constant_range(__index, 0, 1) {
- return (vector signed long long)__vec[__index];
+ return (__vector signed long long)__vec[__index];
}
-static inline __ATTRS_o_ai vector bool long long
-vec_splat(vector bool long long __vec, int __index)
+static inline __ATTRS_o_ai __vector __bool long long
+vec_splat(__vector __bool long long __vec, int __index)
__constant_range(__index, 0, 1) {
- return (vector bool long long)(vector unsigned long long)__vec[__index];
+ return ((__vector __bool long long)
+ (__vector unsigned long long)__vec[__index]);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_splat(vector unsigned long long __vec, int __index)
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_splat(__vector unsigned long long __vec, int __index)
__constant_range(__index, 0, 1) {
- return (vector unsigned long long)__vec[__index];
+ return (__vector unsigned long long)__vec[__index];
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_splat(vector float __vec, int __index)
+static inline __ATTRS_o_ai __vector float
+vec_splat(__vector float __vec, int __index)
__constant_range(__index, 0, 3) {
- return (vector float)__vec[__index];
+ return (__vector float)__vec[__index];
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_splat(vector double __vec, int __index)
+static inline __ATTRS_o_ai __vector double
+vec_splat(__vector double __vec, int __index)
__constant_range(__index, 0, 1) {
- return (vector double)__vec[__index];
+ return (__vector double)__vec[__index];
}
/*-- vec_splat_s* -----------------------------------------------------------*/
-static inline __ATTRS_ai vector signed char
+static inline __ATTRS_ai __vector signed char
vec_splat_s8(signed char __scalar)
__constant(__scalar) {
- return (vector signed char)__scalar;
+ return (__vector signed char)__scalar;
}
-static inline __ATTRS_ai vector signed short
+static inline __ATTRS_ai __vector signed short
vec_splat_s16(signed short __scalar)
__constant(__scalar) {
- return (vector signed short)__scalar;
+ return (__vector signed short)__scalar;
}
-static inline __ATTRS_ai vector signed int
+static inline __ATTRS_ai __vector signed int
vec_splat_s32(signed short __scalar)
__constant(__scalar) {
- return (vector signed int)(signed int)__scalar;
+ return (__vector signed int)(signed int)__scalar;
}
-static inline __ATTRS_ai vector signed long long
+static inline __ATTRS_ai __vector signed long long
vec_splat_s64(signed short __scalar)
__constant(__scalar) {
- return (vector signed long long)(signed long)__scalar;
+ return (__vector signed long long)(signed long)__scalar;
}
/*-- vec_splat_u* -----------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
+static inline __ATTRS_ai __vector unsigned char
vec_splat_u8(unsigned char __scalar)
__constant(__scalar) {
- return (vector unsigned char)__scalar;
+ return (__vector unsigned char)__scalar;
}
-static inline __ATTRS_ai vector unsigned short
+static inline __ATTRS_ai __vector unsigned short
vec_splat_u16(unsigned short __scalar)
__constant(__scalar) {
- return (vector unsigned short)__scalar;
+ return (__vector unsigned short)__scalar;
}
-static inline __ATTRS_ai vector unsigned int
+static inline __ATTRS_ai __vector unsigned int
vec_splat_u32(signed short __scalar)
__constant(__scalar) {
- return (vector unsigned int)(signed int)__scalar;
+ return (__vector unsigned int)(signed int)__scalar;
}
-static inline __ATTRS_ai vector unsigned long long
+static inline __ATTRS_ai __vector unsigned long long
vec_splat_u64(signed short __scalar)
__constant(__scalar) {
- return (vector unsigned long long)(signed long long)__scalar;
+ return (__vector unsigned long long)(signed long long)__scalar;
}
/*-- vec_splats -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
+static inline __ATTRS_o_ai __vector signed char
vec_splats(signed char __scalar) {
- return (vector signed char)__scalar;
+ return (__vector signed char)__scalar;
}
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_splats(unsigned char __scalar) {
- return (vector unsigned char)__scalar;
+ return (__vector unsigned char)__scalar;
}
-static inline __ATTRS_o_ai vector signed short
+static inline __ATTRS_o_ai __vector signed short
vec_splats(signed short __scalar) {
- return (vector signed short)__scalar;
+ return (__vector signed short)__scalar;
}
-static inline __ATTRS_o_ai vector unsigned short
+static inline __ATTRS_o_ai __vector unsigned short
vec_splats(unsigned short __scalar) {
- return (vector unsigned short)__scalar;
+ return (__vector unsigned short)__scalar;
}
-static inline __ATTRS_o_ai vector signed int
+static inline __ATTRS_o_ai __vector signed int
vec_splats(signed int __scalar) {
- return (vector signed int)__scalar;
+ return (__vector signed int)__scalar;
}
-static inline __ATTRS_o_ai vector unsigned int
+static inline __ATTRS_o_ai __vector unsigned int
vec_splats(unsigned int __scalar) {
- return (vector unsigned int)__scalar;
+ return (__vector unsigned int)__scalar;
}
-static inline __ATTRS_o_ai vector signed long long
+static inline __ATTRS_o_ai __vector signed long long
vec_splats(signed long long __scalar) {
- return (vector signed long long)__scalar;
+ return (__vector signed long long)__scalar;
}
-static inline __ATTRS_o_ai vector unsigned long long
+static inline __ATTRS_o_ai __vector unsigned long long
vec_splats(unsigned long long __scalar) {
- return (vector unsigned long long)__scalar;
+ return (__vector unsigned long long)__scalar;
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
+static inline __ATTRS_o_ai __vector float
vec_splats(float __scalar) {
- return (vector float)__scalar;
+ return (__vector float)__scalar;
}
#endif
-static inline __ATTRS_o_ai vector double
+static inline __ATTRS_o_ai __vector double
vec_splats(double __scalar) {
- return (vector double)__scalar;
+ return (__vector double)__scalar;
}
/*-- vec_extend_s64 ---------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed long long
-vec_extend_s64(vector signed char __a) {
- return (vector signed long long)(__a[7], __a[15]);
+static inline __ATTRS_o_ai __vector signed long long
+vec_extend_s64(__vector signed char __a) {
+ return (__vector signed long long)(__a[7], __a[15]);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_extend_s64(vector signed short __a) {
- return (vector signed long long)(__a[3], __a[7]);
+static inline __ATTRS_o_ai __vector signed long long
+vec_extend_s64(__vector signed short __a) {
+ return (__vector signed long long)(__a[3], __a[7]);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_extend_s64(vector signed int __a) {
- return (vector signed long long)(__a[1], __a[3]);
+static inline __ATTRS_o_ai __vector signed long long
+vec_extend_s64(__vector signed int __a) {
+ return (__vector signed long long)(__a[1], __a[3]);
}
/*-- vec_mergeh -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_mergeh(vector signed char __a, vector signed char __b) {
- return (vector signed char)(
+static inline __ATTRS_o_ai __vector signed char
+vec_mergeh(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)(
__a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
__a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
}
-static inline __ATTRS_o_ai vector bool char
-vec_mergeh(vector bool char __a, vector bool char __b) {
- return (vector bool char)(
+static inline __ATTRS_o_ai __vector __bool char
+vec_mergeh(__vector __bool char __a, __vector __bool char __b) {
+ return (__vector __bool char)(
__a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
__a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_mergeh(vector unsigned char __a, vector unsigned char __b) {
- return (vector unsigned char)(
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mergeh(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector unsigned char)(
__a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
__a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
}
-static inline __ATTRS_o_ai vector signed short
-vec_mergeh(vector signed short __a, vector signed short __b) {
- return (vector signed short)(
+static inline __ATTRS_o_ai __vector signed short
+vec_mergeh(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)(
__a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
}
-static inline __ATTRS_o_ai vector bool short
-vec_mergeh(vector bool short __a, vector bool short __b) {
- return (vector bool short)(
+static inline __ATTRS_o_ai __vector __bool short
+vec_mergeh(__vector __bool short __a, __vector __bool short __b) {
+ return (__vector __bool short)(
__a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_mergeh(vector unsigned short __a, vector unsigned short __b) {
- return (vector unsigned short)(
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mergeh(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector unsigned short)(
__a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
}
-static inline __ATTRS_o_ai vector signed int
-vec_mergeh(vector signed int __a, vector signed int __b) {
- return (vector signed int)(__a[0], __b[0], __a[1], __b[1]);
+static inline __ATTRS_o_ai __vector signed int
+vec_mergeh(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)(__a[0], __b[0], __a[1], __b[1]);
}
-static inline __ATTRS_o_ai vector bool int
-vec_mergeh(vector bool int __a, vector bool int __b) {
- return (vector bool int)(__a[0], __b[0], __a[1], __b[1]);
+static inline __ATTRS_o_ai __vector __bool int
+vec_mergeh(__vector __bool int __a, __vector __bool int __b) {
+ return (__vector __bool int)(__a[0], __b[0], __a[1], __b[1]);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_mergeh(vector unsigned int __a, vector unsigned int __b) {
- return (vector unsigned int)(__a[0], __b[0], __a[1], __b[1]);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mergeh(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector unsigned int)(__a[0], __b[0], __a[1], __b[1]);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_mergeh(vector signed long long __a, vector signed long long __b) {
- return (vector signed long long)(__a[0], __b[0]);
+static inline __ATTRS_o_ai __vector signed long long
+vec_mergeh(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector signed long long)(__a[0], __b[0]);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_mergeh(vector bool long long __a, vector bool long long __b) {
- return (vector bool long long)(__a[0], __b[0]);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_mergeh(__vector __bool long long __a, __vector __bool long long __b) {
+ return (__vector __bool long long)(__a[0], __b[0]);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector unsigned long long)(__a[0], __b[0]);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_mergeh(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector unsigned long long)(__a[0], __b[0]);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_mergeh(vector float __a, vector float __b) {
- return (vector float)(__a[0], __b[0], __a[1], __b[1]);
+static inline __ATTRS_o_ai __vector float
+vec_mergeh(__vector float __a, __vector float __b) {
+ return (__vector float)(__a[0], __b[0], __a[1], __b[1]);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_mergeh(vector double __a, vector double __b) {
- return (vector double)(__a[0], __b[0]);
+static inline __ATTRS_o_ai __vector double
+vec_mergeh(__vector double __a, __vector double __b) {
+ return (__vector double)(__a[0], __b[0]);
}
/*-- vec_mergel -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_mergel(vector signed char __a, vector signed char __b) {
- return (vector signed char)(
+static inline __ATTRS_o_ai __vector signed char
+vec_mergel(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)(
__a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
__a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
}
-static inline __ATTRS_o_ai vector bool char
-vec_mergel(vector bool char __a, vector bool char __b) {
- return (vector bool char)(
+static inline __ATTRS_o_ai __vector __bool char
+vec_mergel(__vector __bool char __a, __vector __bool char __b) {
+ return (__vector __bool char)(
__a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
__a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_mergel(vector unsigned char __a, vector unsigned char __b) {
- return (vector unsigned char)(
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mergel(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector unsigned char)(
__a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
__a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
}
-static inline __ATTRS_o_ai vector signed short
-vec_mergel(vector signed short __a, vector signed short __b) {
- return (vector signed short)(
+static inline __ATTRS_o_ai __vector signed short
+vec_mergel(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)(
__a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
}
-static inline __ATTRS_o_ai vector bool short
-vec_mergel(vector bool short __a, vector bool short __b) {
- return (vector bool short)(
+static inline __ATTRS_o_ai __vector __bool short
+vec_mergel(__vector __bool short __a, __vector __bool short __b) {
+ return (__vector __bool short)(
__a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_mergel(vector unsigned short __a, vector unsigned short __b) {
- return (vector unsigned short)(
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mergel(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector unsigned short)(
__a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
}
-static inline __ATTRS_o_ai vector signed int
-vec_mergel(vector signed int __a, vector signed int __b) {
- return (vector signed int)(__a[2], __b[2], __a[3], __b[3]);
+static inline __ATTRS_o_ai __vector signed int
+vec_mergel(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)(__a[2], __b[2], __a[3], __b[3]);
}
-static inline __ATTRS_o_ai vector bool int
-vec_mergel(vector bool int __a, vector bool int __b) {
- return (vector bool int)(__a[2], __b[2], __a[3], __b[3]);
+static inline __ATTRS_o_ai __vector __bool int
+vec_mergel(__vector __bool int __a, __vector __bool int __b) {
+ return (__vector __bool int)(__a[2], __b[2], __a[3], __b[3]);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_mergel(vector unsigned int __a, vector unsigned int __b) {
- return (vector unsigned int)(__a[2], __b[2], __a[3], __b[3]);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mergel(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector unsigned int)(__a[2], __b[2], __a[3], __b[3]);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_mergel(vector signed long long __a, vector signed long long __b) {
- return (vector signed long long)(__a[1], __b[1]);
+static inline __ATTRS_o_ai __vector signed long long
+vec_mergel(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector signed long long)(__a[1], __b[1]);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_mergel(vector bool long long __a, vector bool long long __b) {
- return (vector bool long long)(__a[1], __b[1]);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_mergel(__vector __bool long long __a, __vector __bool long long __b) {
+ return (__vector __bool long long)(__a[1], __b[1]);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_mergel(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector unsigned long long)(__a[1], __b[1]);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_mergel(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector unsigned long long)(__a[1], __b[1]);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_mergel(vector float __a, vector float __b) {
- return (vector float)(__a[2], __b[2], __a[3], __b[3]);
+static inline __ATTRS_o_ai __vector float
+vec_mergel(__vector float __a, __vector float __b) {
+ return (__vector float)(__a[2], __b[2], __a[3], __b[3]);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_mergel(vector double __a, vector double __b) {
- return (vector double)(__a[1], __b[1]);
+static inline __ATTRS_o_ai __vector double
+vec_mergel(__vector double __a, __vector double __b) {
+ return (__vector double)(__a[1], __b[1]);
}
/*-- vec_pack ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_pack(vector signed short __a, vector signed short __b) {
- vector signed char __ac = (vector signed char)__a;
- vector signed char __bc = (vector signed char)__b;
- return (vector signed char)(
+static inline __ATTRS_o_ai __vector signed char
+vec_pack(__vector signed short __a, __vector signed short __b) {
+ __vector signed char __ac = (__vector signed char)__a;
+ __vector signed char __bc = (__vector signed char)__b;
+ return (__vector signed char)(
__ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
__bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
}
-static inline __ATTRS_o_ai vector bool char
-vec_pack(vector bool short __a, vector bool short __b) {
- vector bool char __ac = (vector bool char)__a;
- vector bool char __bc = (vector bool char)__b;
- return (vector bool char)(
+static inline __ATTRS_o_ai __vector __bool char
+vec_pack(__vector __bool short __a, __vector __bool short __b) {
+ __vector __bool char __ac = (__vector __bool char)__a;
+ __vector __bool char __bc = (__vector __bool char)__b;
+ return (__vector __bool char)(
__ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
__bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_pack(vector unsigned short __a, vector unsigned short __b) {
- vector unsigned char __ac = (vector unsigned char)__a;
- vector unsigned char __bc = (vector unsigned char)__b;
- return (vector unsigned char)(
+static inline __ATTRS_o_ai __vector unsigned char
+vec_pack(__vector unsigned short __a, __vector unsigned short __b) {
+ __vector unsigned char __ac = (__vector unsigned char)__a;
+ __vector unsigned char __bc = (__vector unsigned char)__b;
+ return (__vector unsigned char)(
__ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
__bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
}
-static inline __ATTRS_o_ai vector signed short
-vec_pack(vector signed int __a, vector signed int __b) {
- vector signed short __ac = (vector signed short)__a;
- vector signed short __bc = (vector signed short)__b;
- return (vector signed short)(
+static inline __ATTRS_o_ai __vector signed short
+vec_pack(__vector signed int __a, __vector signed int __b) {
+ __vector signed short __ac = (__vector signed short)__a;
+ __vector signed short __bc = (__vector signed short)__b;
+ return (__vector signed short)(
__ac[1], __ac[3], __ac[5], __ac[7],
__bc[1], __bc[3], __bc[5], __bc[7]);
}
-static inline __ATTRS_o_ai vector bool short
-vec_pack(vector bool int __a, vector bool int __b) {
- vector bool short __ac = (vector bool short)__a;
- vector bool short __bc = (vector bool short)__b;
- return (vector bool short)(
+static inline __ATTRS_o_ai __vector __bool short
+vec_pack(__vector __bool int __a, __vector __bool int __b) {
+ __vector __bool short __ac = (__vector __bool short)__a;
+ __vector __bool short __bc = (__vector __bool short)__b;
+ return (__vector __bool short)(
__ac[1], __ac[3], __ac[5], __ac[7],
__bc[1], __bc[3], __bc[5], __bc[7]);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_pack(vector unsigned int __a, vector unsigned int __b) {
- vector unsigned short __ac = (vector unsigned short)__a;
- vector unsigned short __bc = (vector unsigned short)__b;
- return (vector unsigned short)(
+static inline __ATTRS_o_ai __vector unsigned short
+vec_pack(__vector unsigned int __a, __vector unsigned int __b) {
+ __vector unsigned short __ac = (__vector unsigned short)__a;
+ __vector unsigned short __bc = (__vector unsigned short)__b;
+ return (__vector unsigned short)(
__ac[1], __ac[3], __ac[5], __ac[7],
__bc[1], __bc[3], __bc[5], __bc[7]);
}
-static inline __ATTRS_o_ai vector signed int
-vec_pack(vector signed long long __a, vector signed long long __b) {
- vector signed int __ac = (vector signed int)__a;
- vector signed int __bc = (vector signed int)__b;
- return (vector signed int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+static inline __ATTRS_o_ai __vector signed int
+vec_pack(__vector signed long long __a, __vector signed long long __b) {
+ __vector signed int __ac = (__vector signed int)__a;
+ __vector signed int __bc = (__vector signed int)__b;
+ return (__vector signed int)(__ac[1], __ac[3], __bc[1], __bc[3]);
}
-static inline __ATTRS_o_ai vector bool int
-vec_pack(vector bool long long __a, vector bool long long __b) {
- vector bool int __ac = (vector bool int)__a;
- vector bool int __bc = (vector bool int)__b;
- return (vector bool int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+static inline __ATTRS_o_ai __vector __bool int
+vec_pack(__vector __bool long long __a, __vector __bool long long __b) {
+ __vector __bool int __ac = (__vector __bool int)__a;
+ __vector __bool int __bc = (__vector __bool int)__b;
+ return (__vector __bool int)(__ac[1], __ac[3], __bc[1], __bc[3]);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_pack(vector unsigned long long __a, vector unsigned long long __b) {
- vector unsigned int __ac = (vector unsigned int)__a;
- vector unsigned int __bc = (vector unsigned int)__b;
- return (vector unsigned int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_pack(__vector unsigned long long __a, __vector unsigned long long __b) {
+ __vector unsigned int __ac = (__vector unsigned int)__a;
+ __vector unsigned int __bc = (__vector unsigned int)__b;
+ return (__vector unsigned int)(__ac[1], __ac[3], __bc[1], __bc[3]);
}
/*-- vec_packs --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_packs(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_packs(__vector signed short __a, __vector signed short __b) {
return __builtin_s390_vpksh(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_packs(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packs(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vpklsh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_packs(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_packs(__vector signed int __a, __vector signed int __b) {
return __builtin_s390_vpksf(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_packs(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packs(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vpklsf(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_packs(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_packs(__vector signed long long __a, __vector signed long long __b) {
return __builtin_s390_vpksg(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_packs(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packs(__vector unsigned long long __a, __vector unsigned long long __b) {
return __builtin_s390_vpklsg(__a, __b);
}
/*-- vec_packs_cc -----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_packs_cc(vector signed short __a, vector signed short __b, int *__cc) {
+static inline __ATTRS_o_ai __vector signed char
+vec_packs_cc(__vector signed short __a, __vector signed short __b, int *__cc) {
return __builtin_s390_vpkshs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_packs_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packs_cc(__vector unsigned short __a, __vector unsigned short __b,
+ int *__cc) {
return __builtin_s390_vpklshs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_packs_cc(vector signed int __a, vector signed int __b, int *__cc) {
+static inline __ATTRS_o_ai __vector signed short
+vec_packs_cc(__vector signed int __a, __vector signed int __b, int *__cc) {
return __builtin_s390_vpksfs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_packs_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packs_cc(__vector unsigned int __a, __vector unsigned int __b, int *__cc) {
return __builtin_s390_vpklsfs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_packs_cc(vector signed long long __a, vector signed long long __b,
+static inline __ATTRS_o_ai __vector signed int
+vec_packs_cc(__vector signed long long __a, __vector signed long long __b,
int *__cc) {
return __builtin_s390_vpksgs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_packs_cc(vector unsigned long long __a, vector unsigned long long __b,
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packs_cc(__vector unsigned long long __a, __vector unsigned long long __b,
int *__cc) {
return __builtin_s390_vpklsgs(__a, __b, __cc);
}
/*-- vec_packsu -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_packsu(vector signed short __a, vector signed short __b) {
- const vector signed short __zero = (vector signed short)0;
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packsu(__vector signed short __a, __vector signed short __b) {
+ const __vector signed short __zero = (__vector signed short)0;
return __builtin_s390_vpklsh(
- (vector unsigned short)(__a >= __zero) & (vector unsigned short)__a,
- (vector unsigned short)(__b >= __zero) & (vector unsigned short)__b);
+ (__vector unsigned short)(__a >= __zero) & (__vector unsigned short)__a,
+ (__vector unsigned short)(__b >= __zero) & (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_packsu(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packsu(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vpklsh(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_packsu(vector signed int __a, vector signed int __b) {
- const vector signed int __zero = (vector signed int)0;
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packsu(__vector signed int __a, __vector signed int __b) {
+ const __vector signed int __zero = (__vector signed int)0;
return __builtin_s390_vpklsf(
- (vector unsigned int)(__a >= __zero) & (vector unsigned int)__a,
- (vector unsigned int)(__b >= __zero) & (vector unsigned int)__b);
+ (__vector unsigned int)(__a >= __zero) & (__vector unsigned int)__a,
+ (__vector unsigned int)(__b >= __zero) & (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_packsu(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packsu(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vpklsf(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_packsu(vector signed long long __a, vector signed long long __b) {
- const vector signed long long __zero = (vector signed long long)0;
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packsu(__vector signed long long __a, __vector signed long long __b) {
+ const __vector signed long long __zero = (__vector signed long long)0;
return __builtin_s390_vpklsg(
- (vector unsigned long long)(__a >= __zero) &
- (vector unsigned long long)__a,
- (vector unsigned long long)(__b >= __zero) &
- (vector unsigned long long)__b);
+ (__vector unsigned long long)(__a >= __zero) &
+ (__vector unsigned long long)__a,
+ (__vector unsigned long long)(__b >= __zero) &
+ (__vector unsigned long long)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_packsu(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packsu(__vector unsigned long long __a, __vector unsigned long long __b) {
return __builtin_s390_vpklsg(__a, __b);
}
/*-- vec_packsu_cc ----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_packsu_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packsu_cc(__vector unsigned short __a, __vector unsigned short __b,
+ int *__cc) {
return __builtin_s390_vpklshs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_packsu_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packsu_cc(__vector unsigned int __a, __vector unsigned int __b, int *__cc) {
return __builtin_s390_vpklsfs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_packsu_cc(vector unsigned long long __a, vector unsigned long long __b,
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packsu_cc(__vector unsigned long long __a, __vector unsigned long long __b,
int *__cc) {
return __builtin_s390_vpklsgs(__a, __b, __cc);
}
/*-- vec_unpackh ------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed short
-vec_unpackh(vector signed char __a) {
+static inline __ATTRS_o_ai __vector signed short
+vec_unpackh(__vector signed char __a) {
return __builtin_s390_vuphb(__a);
}
-static inline __ATTRS_o_ai vector bool short
-vec_unpackh(vector bool char __a) {
- return (vector bool short)__builtin_s390_vuphb((vector signed char)__a);
+static inline __ATTRS_o_ai __vector __bool short
+vec_unpackh(__vector __bool char __a) {
+ return ((__vector __bool short)
+ __builtin_s390_vuphb((__vector signed char)__a));
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_unpackh(vector unsigned char __a) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_unpackh(__vector unsigned char __a) {
return __builtin_s390_vuplhb(__a);
}
-static inline __ATTRS_o_ai vector signed int
-vec_unpackh(vector signed short __a) {
+static inline __ATTRS_o_ai __vector signed int
+vec_unpackh(__vector signed short __a) {
return __builtin_s390_vuphh(__a);
}
-static inline __ATTRS_o_ai vector bool int
-vec_unpackh(vector bool short __a) {
- return (vector bool int)__builtin_s390_vuphh((vector signed short)__a);
+static inline __ATTRS_o_ai __vector __bool int
+vec_unpackh(__vector __bool short __a) {
+ return (__vector __bool int)__builtin_s390_vuphh((__vector signed short)__a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_unpackh(vector unsigned short __a) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_unpackh(__vector unsigned short __a) {
return __builtin_s390_vuplhh(__a);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_unpackh(vector signed int __a) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_unpackh(__vector signed int __a) {
return __builtin_s390_vuphf(__a);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_unpackh(vector bool int __a) {
- return (vector bool long long)__builtin_s390_vuphf((vector signed int)__a);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_unpackh(__vector __bool int __a) {
+ return ((__vector __bool long long)
+ __builtin_s390_vuphf((__vector signed int)__a));
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_unpackh(vector unsigned int __a) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_unpackh(__vector unsigned int __a) {
return __builtin_s390_vuplhf(__a);
}
/*-- vec_unpackl ------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed short
-vec_unpackl(vector signed char __a) {
+static inline __ATTRS_o_ai __vector signed short
+vec_unpackl(__vector signed char __a) {
return __builtin_s390_vuplb(__a);
}
-static inline __ATTRS_o_ai vector bool short
-vec_unpackl(vector bool char __a) {
- return (vector bool short)__builtin_s390_vuplb((vector signed char)__a);
+static inline __ATTRS_o_ai __vector __bool short
+vec_unpackl(__vector __bool char __a) {
+ return ((__vector __bool short)
+ __builtin_s390_vuplb((__vector signed char)__a));
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_unpackl(vector unsigned char __a) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_unpackl(__vector unsigned char __a) {
return __builtin_s390_vupllb(__a);
}
-static inline __ATTRS_o_ai vector signed int
-vec_unpackl(vector signed short __a) {
+static inline __ATTRS_o_ai __vector signed int
+vec_unpackl(__vector signed short __a) {
return __builtin_s390_vuplhw(__a);
}
-static inline __ATTRS_o_ai vector bool int
-vec_unpackl(vector bool short __a) {
- return (vector bool int)__builtin_s390_vuplhw((vector signed short)__a);
+static inline __ATTRS_o_ai __vector __bool int
+vec_unpackl(__vector __bool short __a) {
+ return ((__vector __bool int)
+ __builtin_s390_vuplhw((__vector signed short)__a));
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_unpackl(vector unsigned short __a) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_unpackl(__vector unsigned short __a) {
return __builtin_s390_vupllh(__a);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_unpackl(vector signed int __a) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_unpackl(__vector signed int __a) {
return __builtin_s390_vuplf(__a);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_unpackl(vector bool int __a) {
- return (vector bool long long)__builtin_s390_vuplf((vector signed int)__a);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_unpackl(__vector __bool int __a) {
+ return ((__vector __bool long long)
+ __builtin_s390_vuplf((__vector signed int)__a));
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_unpackl(vector unsigned int __a) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_unpackl(__vector unsigned int __a) {
return __builtin_s390_vupllf(__a);
}
/*-- vec_cmpeq --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmpeq(vector bool char __a, vector bool char __b) {
- return (vector bool char)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpeq(__vector __bool char __a, __vector __bool char __b) {
+ return (__vector __bool char)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool char
-vec_cmpeq(vector signed char __a, vector signed char __b) {
- return (vector bool char)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpeq(__vector signed char __a, __vector signed char __b) {
+ return (__vector __bool char)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool char
-vec_cmpeq(vector unsigned char __a, vector unsigned char __b) {
- return (vector bool char)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpeq(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector __bool char)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpeq(vector bool short __a, vector bool short __b) {
- return (vector bool short)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpeq(__vector __bool short __a, __vector __bool short __b) {
+ return (__vector __bool short)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpeq(vector signed short __a, vector signed short __b) {
- return (vector bool short)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpeq(__vector signed short __a, __vector signed short __b) {
+ return (__vector __bool short)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpeq(vector unsigned short __a, vector unsigned short __b) {
- return (vector bool short)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpeq(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector __bool short)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpeq(vector bool int __a, vector bool int __b) {
- return (vector bool int)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpeq(__vector __bool int __a, __vector __bool int __b) {
+ return (__vector __bool int)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpeq(vector signed int __a, vector signed int __b) {
- return (vector bool int)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpeq(__vector signed int __a, __vector signed int __b) {
+ return (__vector __bool int)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpeq(vector unsigned int __a, vector unsigned int __b) {
- return (vector bool int)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpeq(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector __bool int)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpeq(vector bool long long __a, vector bool long long __b) {
- return (vector bool long long)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpeq(__vector __bool long long __a, __vector __bool long long __b) {
+ return (__vector __bool long long)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpeq(vector signed long long __a, vector signed long long __b) {
- return (vector bool long long)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpeq(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector __bool long long)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector bool long long)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpeq(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector __bool long long)(__a == __b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmpeq(vector float __a, vector float __b) {
- return (vector bool int)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpeq(__vector float __a, __vector float __b) {
+ return (__vector __bool int)(__a == __b);
}
#endif
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpeq(vector double __a, vector double __b) {
- return (vector bool long long)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpeq(__vector double __a, __vector double __b) {
+ return (__vector __bool long long)(__a == __b);
}
/*-- vec_cmpge --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmpge(vector signed char __a, vector signed char __b) {
- return (vector bool char)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpge(__vector signed char __a, __vector signed char __b) {
+ return (__vector __bool char)(__a >= __b);
}
-static inline __ATTRS_o_ai vector bool char
-vec_cmpge(vector unsigned char __a, vector unsigned char __b) {
- return (vector bool char)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpge(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector __bool char)(__a >= __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpge(vector signed short __a, vector signed short __b) {
- return (vector bool short)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpge(__vector signed short __a, __vector signed short __b) {
+ return (__vector __bool short)(__a >= __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpge(vector unsigned short __a, vector unsigned short __b) {
- return (vector bool short)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpge(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector __bool short)(__a >= __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpge(vector signed int __a, vector signed int __b) {
- return (vector bool int)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpge(__vector signed int __a, __vector signed int __b) {
+ return (__vector __bool int)(__a >= __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpge(vector unsigned int __a, vector unsigned int __b) {
- return (vector bool int)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpge(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector __bool int)(__a >= __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpge(vector signed long long __a, vector signed long long __b) {
- return (vector bool long long)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpge(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector __bool long long)(__a >= __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector bool long long)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpge(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector __bool long long)(__a >= __b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmpge(vector float __a, vector float __b) {
- return (vector bool int)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpge(__vector float __a, __vector float __b) {
+ return (__vector __bool int)(__a >= __b);
}
#endif
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpge(vector double __a, vector double __b) {
- return (vector bool long long)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpge(__vector double __a, __vector double __b) {
+ return (__vector __bool long long)(__a >= __b);
}
/*-- vec_cmpgt --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmpgt(vector signed char __a, vector signed char __b) {
- return (vector bool char)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpgt(__vector signed char __a, __vector signed char __b) {
+ return (__vector __bool char)(__a > __b);
}
-static inline __ATTRS_o_ai vector bool char
-vec_cmpgt(vector unsigned char __a, vector unsigned char __b) {
- return (vector bool char)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpgt(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector __bool char)(__a > __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpgt(vector signed short __a, vector signed short __b) {
- return (vector bool short)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpgt(__vector signed short __a, __vector signed short __b) {
+ return (__vector __bool short)(__a > __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpgt(vector unsigned short __a, vector unsigned short __b) {
- return (vector bool short)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpgt(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector __bool short)(__a > __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpgt(vector signed int __a, vector signed int __b) {
- return (vector bool int)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpgt(__vector signed int __a, __vector signed int __b) {
+ return (__vector __bool int)(__a > __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpgt(vector unsigned int __a, vector unsigned int __b) {
- return (vector bool int)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpgt(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector __bool int)(__a > __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpgt(vector signed long long __a, vector signed long long __b) {
- return (vector bool long long)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpgt(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector __bool long long)(__a > __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector bool long long)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpgt(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector __bool long long)(__a > __b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmpgt(vector float __a, vector float __b) {
- return (vector bool int)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpgt(__vector float __a, __vector float __b) {
+ return (__vector __bool int)(__a > __b);
}
#endif
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpgt(vector double __a, vector double __b) {
- return (vector bool long long)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpgt(__vector double __a, __vector double __b) {
+ return (__vector __bool long long)(__a > __b);
}
/*-- vec_cmple --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmple(vector signed char __a, vector signed char __b) {
- return (vector bool char)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmple(__vector signed char __a, __vector signed char __b) {
+ return (__vector __bool char)(__a <= __b);
}
-static inline __ATTRS_o_ai vector bool char
-vec_cmple(vector unsigned char __a, vector unsigned char __b) {
- return (vector bool char)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmple(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector __bool char)(__a <= __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmple(vector signed short __a, vector signed short __b) {
- return (vector bool short)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmple(__vector signed short __a, __vector signed short __b) {
+ return (__vector __bool short)(__a <= __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmple(vector unsigned short __a, vector unsigned short __b) {
- return (vector bool short)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmple(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector __bool short)(__a <= __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmple(vector signed int __a, vector signed int __b) {
- return (vector bool int)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmple(__vector signed int __a, __vector signed int __b) {
+ return (__vector __bool int)(__a <= __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmple(vector unsigned int __a, vector unsigned int __b) {
- return (vector bool int)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmple(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector __bool int)(__a <= __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmple(vector signed long long __a, vector signed long long __b) {
- return (vector bool long long)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmple(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector __bool long long)(__a <= __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector bool long long)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmple(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector __bool long long)(__a <= __b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmple(vector float __a, vector float __b) {
- return (vector bool int)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmple(__vector float __a, __vector float __b) {
+ return (__vector __bool int)(__a <= __b);
}
#endif
-static inline __ATTRS_o_ai vector bool long long
-vec_cmple(vector double __a, vector double __b) {
- return (vector bool long long)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmple(__vector double __a, __vector double __b) {
+ return (__vector __bool long long)(__a <= __b);
}
/*-- vec_cmplt --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmplt(vector signed char __a, vector signed char __b) {
- return (vector bool char)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmplt(__vector signed char __a, __vector signed char __b) {
+ return (__vector __bool char)(__a < __b);
}
-static inline __ATTRS_o_ai vector bool char
-vec_cmplt(vector unsigned char __a, vector unsigned char __b) {
- return (vector bool char)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmplt(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector __bool char)(__a < __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmplt(vector signed short __a, vector signed short __b) {
- return (vector bool short)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmplt(__vector signed short __a, __vector signed short __b) {
+ return (__vector __bool short)(__a < __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmplt(vector unsigned short __a, vector unsigned short __b) {
- return (vector bool short)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmplt(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector __bool short)(__a < __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmplt(vector signed int __a, vector signed int __b) {
- return (vector bool int)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmplt(__vector signed int __a, __vector signed int __b) {
+ return (__vector __bool int)(__a < __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmplt(vector unsigned int __a, vector unsigned int __b) {
- return (vector bool int)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmplt(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector __bool int)(__a < __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmplt(vector signed long long __a, vector signed long long __b) {
- return (vector bool long long)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmplt(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector __bool long long)(__a < __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector bool long long)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmplt(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector __bool long long)(__a < __b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmplt(vector float __a, vector float __b) {
- return (vector bool int)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmplt(__vector float __a, __vector float __b) {
+ return (__vector __bool int)(__a < __b);
}
#endif
-static inline __ATTRS_o_ai vector bool long long
-vec_cmplt(vector double __a, vector double __b) {
- return (vector bool long long)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmplt(__vector double __a, __vector double __b) {
+ return (__vector __bool long long)(__a < __b);
}
/*-- vec_all_eq -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_all_eq(vector signed char __a, vector signed char __b) {
+vec_all_eq(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vceqbs(__a, __b, &__cc);
return __cc == 0;
@@ -2533,56 +2601,56 @@ vec_all_eq(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector signed char __a, vector bool char __b) {
+vec_all_eq(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool char __a, vector signed char __b) {
+vec_all_eq(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned char __a, vector unsigned char __b) {
+vec_all_eq(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned char __a, vector bool char __b) {
+vec_all_eq(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool char __a, vector unsigned char __b) {
+vec_all_eq(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool char __a, vector bool char __b) {
+vec_all_eq(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector signed short __a, vector signed short __b) {
+vec_all_eq(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vceqhs(__a, __b, &__cc);
return __cc == 0;
@@ -2590,56 +2658,56 @@ vec_all_eq(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector signed short __a, vector bool short __b) {
+vec_all_eq(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool short __a, vector signed short __b) {
+vec_all_eq(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned short __a, vector unsigned short __b) {
+vec_all_eq(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned short __a, vector bool short __b) {
+vec_all_eq(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool short __a, vector unsigned short __b) {
+vec_all_eq(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool short __a, vector bool short __b) {
+vec_all_eq(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector signed int __a, vector signed int __b) {
+vec_all_eq(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vceqfs(__a, __b, &__cc);
return __cc == 0;
@@ -2647,56 +2715,56 @@ vec_all_eq(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector signed int __a, vector bool int __b) {
+vec_all_eq(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool int __a, vector signed int __b) {
+vec_all_eq(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned int __a, vector unsigned int __b) {
+vec_all_eq(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned int __a, vector bool int __b) {
+vec_all_eq(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool int __a, vector unsigned int __b) {
+vec_all_eq(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool int __a, vector bool int __b) {
+vec_all_eq(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector signed long long __a, vector signed long long __b) {
+vec_all_eq(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vceqgs(__a, __b, &__cc);
return __cc == 0;
@@ -2704,57 +2772,57 @@ vec_all_eq(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector signed long long __a, vector bool long long __b) {
+vec_all_eq(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool long long __a, vector signed long long __b) {
+vec_all_eq(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned long long __a, vector unsigned long long __b) {
+vec_all_eq(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned long long __a, vector bool long long __b) {
+vec_all_eq(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool long long __a, vector unsigned long long __b) {
+vec_all_eq(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool long long __a, vector bool long long __b) {
+vec_all_eq(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc == 0;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_eq(vector float __a, vector float __b) {
+vec_all_eq(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfcesbs(__a, __b, &__cc);
return __cc == 0;
@@ -2762,7 +2830,7 @@ vec_all_eq(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_eq(vector double __a, vector double __b) {
+vec_all_eq(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfcedbs(__a, __b, &__cc);
return __cc == 0;
@@ -2771,7 +2839,7 @@ vec_all_eq(vector double __a, vector double __b) {
/*-- vec_all_ne -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_all_ne(vector signed char __a, vector signed char __b) {
+vec_all_ne(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vceqbs(__a, __b, &__cc);
return __cc == 3;
@@ -2779,56 +2847,56 @@ vec_all_ne(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector signed char __a, vector bool char __b) {
+vec_all_ne(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool char __a, vector signed char __b) {
+vec_all_ne(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned char __a, vector unsigned char __b) {
+vec_all_ne(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned char __a, vector bool char __b) {
+vec_all_ne(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool char __a, vector unsigned char __b) {
+vec_all_ne(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool char __a, vector bool char __b) {
+vec_all_ne(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector signed short __a, vector signed short __b) {
+vec_all_ne(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vceqhs(__a, __b, &__cc);
return __cc == 3;
@@ -2836,56 +2904,56 @@ vec_all_ne(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector signed short __a, vector bool short __b) {
+vec_all_ne(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool short __a, vector signed short __b) {
+vec_all_ne(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned short __a, vector unsigned short __b) {
+vec_all_ne(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned short __a, vector bool short __b) {
+vec_all_ne(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool short __a, vector unsigned short __b) {
+vec_all_ne(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool short __a, vector bool short __b) {
+vec_all_ne(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector signed int __a, vector signed int __b) {
+vec_all_ne(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vceqfs(__a, __b, &__cc);
return __cc == 3;
@@ -2893,56 +2961,56 @@ vec_all_ne(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector signed int __a, vector bool int __b) {
+vec_all_ne(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool int __a, vector signed int __b) {
+vec_all_ne(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned int __a, vector unsigned int __b) {
+vec_all_ne(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned int __a, vector bool int __b) {
+vec_all_ne(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool int __a, vector unsigned int __b) {
+vec_all_ne(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool int __a, vector bool int __b) {
+vec_all_ne(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector signed long long __a, vector signed long long __b) {
+vec_all_ne(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vceqgs(__a, __b, &__cc);
return __cc == 3;
@@ -2950,57 +3018,57 @@ vec_all_ne(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector signed long long __a, vector bool long long __b) {
+vec_all_ne(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool long long __a, vector signed long long __b) {
+vec_all_ne(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned long long __a, vector unsigned long long __b) {
+vec_all_ne(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned long long __a, vector bool long long __b) {
+vec_all_ne(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool long long __a, vector unsigned long long __b) {
+vec_all_ne(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool long long __a, vector bool long long __b) {
+vec_all_ne(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc == 3;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_ne(vector float __a, vector float __b) {
+vec_all_ne(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfcesbs(__a, __b, &__cc);
return __cc == 3;
@@ -3008,7 +3076,7 @@ vec_all_ne(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_ne(vector double __a, vector double __b) {
+vec_all_ne(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfcedbs(__a, __b, &__cc);
return __cc == 3;
@@ -3017,7 +3085,7 @@ vec_all_ne(vector double __a, vector double __b) {
/*-- vec_all_ge -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_all_ge(vector signed char __a, vector signed char __b) {
+vec_all_ge(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vchbs(__b, __a, &__cc);
return __cc == 3;
@@ -3025,22 +3093,22 @@ vec_all_ge(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector signed char __a, vector bool char __b) {
+vec_all_ge(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool char __a, vector signed char __b) {
+vec_all_ge(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned char __a, vector unsigned char __b) {
+vec_all_ge(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
__builtin_s390_vchlbs(__b, __a, &__cc);
return __cc == 3;
@@ -3048,31 +3116,31 @@ vec_all_ge(vector unsigned char __a, vector unsigned char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned char __a, vector bool char __b) {
+vec_all_ge(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool char __a, vector unsigned char __b) {
+vec_all_ge(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool char __a, vector bool char __b) {
+vec_all_ge(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__b,
- (vector unsigned char)__a, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__b,
+ (__vector unsigned char)__a, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ge(vector signed short __a, vector signed short __b) {
+vec_all_ge(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vchhs(__b, __a, &__cc);
return __cc == 3;
@@ -3080,22 +3148,22 @@ vec_all_ge(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector signed short __a, vector bool short __b) {
+vec_all_ge(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool short __a, vector signed short __b) {
+vec_all_ge(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned short __a, vector unsigned short __b) {
+vec_all_ge(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
__builtin_s390_vchlhs(__b, __a, &__cc);
return __cc == 3;
@@ -3103,31 +3171,31 @@ vec_all_ge(vector unsigned short __a, vector unsigned short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned short __a, vector bool short __b) {
+vec_all_ge(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool short __a, vector unsigned short __b) {
+vec_all_ge(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool short __a, vector bool short __b) {
+vec_all_ge(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__b,
- (vector unsigned short)__a, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__b,
+ (__vector unsigned short)__a, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ge(vector signed int __a, vector signed int __b) {
+vec_all_ge(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vchfs(__b, __a, &__cc);
return __cc == 3;
@@ -3135,22 +3203,22 @@ vec_all_ge(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector signed int __a, vector bool int __b) {
+vec_all_ge(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool int __a, vector signed int __b) {
+vec_all_ge(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned int __a, vector unsigned int __b) {
+vec_all_ge(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
__builtin_s390_vchlfs(__b, __a, &__cc);
return __cc == 3;
@@ -3158,31 +3226,31 @@ vec_all_ge(vector unsigned int __a, vector unsigned int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned int __a, vector bool int __b) {
+vec_all_ge(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool int __a, vector unsigned int __b) {
+vec_all_ge(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool int __a, vector bool int __b) {
+vec_all_ge(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__b,
- (vector unsigned int)__a, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__b,
+ (__vector unsigned int)__a, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ge(vector signed long long __a, vector signed long long __b) {
+vec_all_ge(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vchgs(__b, __a, &__cc);
return __cc == 3;
@@ -3190,22 +3258,22 @@ vec_all_ge(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector signed long long __a, vector bool long long __b) {
+vec_all_ge(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool long long __a, vector signed long long __b) {
+vec_all_ge(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned long long __a, vector unsigned long long __b) {
+vec_all_ge(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
__builtin_s390_vchlgs(__b, __a, &__cc);
return __cc == 3;
@@ -3213,32 +3281,32 @@ vec_all_ge(vector unsigned long long __a, vector unsigned long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned long long __a, vector bool long long __b) {
+vec_all_ge(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool long long __a, vector unsigned long long __b) {
+vec_all_ge(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool long long __a, vector bool long long __b) {
+vec_all_ge(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__b,
- (vector unsigned long long)__a, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__b,
+ (__vector unsigned long long)__a, &__cc);
return __cc == 3;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_ge(vector float __a, vector float __b) {
+vec_all_ge(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchesbs(__a, __b, &__cc);
return __cc == 0;
@@ -3246,7 +3314,7 @@ vec_all_ge(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_ge(vector double __a, vector double __b) {
+vec_all_ge(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchedbs(__a, __b, &__cc);
return __cc == 0;
@@ -3255,7 +3323,7 @@ vec_all_ge(vector double __a, vector double __b) {
/*-- vec_all_gt -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_all_gt(vector signed char __a, vector signed char __b) {
+vec_all_gt(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vchbs(__a, __b, &__cc);
return __cc == 0;
@@ -3263,22 +3331,22 @@ vec_all_gt(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector signed char __a, vector bool char __b) {
+vec_all_gt(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool char __a, vector signed char __b) {
+vec_all_gt(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned char __a, vector unsigned char __b) {
+vec_all_gt(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
__builtin_s390_vchlbs(__a, __b, &__cc);
return __cc == 0;
@@ -3286,31 +3354,31 @@ vec_all_gt(vector unsigned char __a, vector unsigned char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned char __a, vector bool char __b) {
+vec_all_gt(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool char __a, vector unsigned char __b) {
+vec_all_gt(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool char __a, vector bool char __b) {
+vec_all_gt(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__a,
- (vector unsigned char)__b, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_gt(vector signed short __a, vector signed short __b) {
+vec_all_gt(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vchhs(__a, __b, &__cc);
return __cc == 0;
@@ -3318,22 +3386,22 @@ vec_all_gt(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector signed short __a, vector bool short __b) {
+vec_all_gt(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool short __a, vector signed short __b) {
+vec_all_gt(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned short __a, vector unsigned short __b) {
+vec_all_gt(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
__builtin_s390_vchlhs(__a, __b, &__cc);
return __cc == 0;
@@ -3341,31 +3409,31 @@ vec_all_gt(vector unsigned short __a, vector unsigned short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned short __a, vector bool short __b) {
+vec_all_gt(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool short __a, vector unsigned short __b) {
+vec_all_gt(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool short __a, vector bool short __b) {
+vec_all_gt(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__a,
- (vector unsigned short)__b, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_gt(vector signed int __a, vector signed int __b) {
+vec_all_gt(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vchfs(__a, __b, &__cc);
return __cc == 0;
@@ -3373,22 +3441,22 @@ vec_all_gt(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector signed int __a, vector bool int __b) {
+vec_all_gt(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool int __a, vector signed int __b) {
+vec_all_gt(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned int __a, vector unsigned int __b) {
+vec_all_gt(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
__builtin_s390_vchlfs(__a, __b, &__cc);
return __cc == 0;
@@ -3396,31 +3464,31 @@ vec_all_gt(vector unsigned int __a, vector unsigned int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned int __a, vector bool int __b) {
+vec_all_gt(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool int __a, vector unsigned int __b) {
+vec_all_gt(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool int __a, vector bool int __b) {
+vec_all_gt(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__a,
- (vector unsigned int)__b, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_gt(vector signed long long __a, vector signed long long __b) {
+vec_all_gt(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vchgs(__a, __b, &__cc);
return __cc == 0;
@@ -3428,22 +3496,22 @@ vec_all_gt(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector signed long long __a, vector bool long long __b) {
+vec_all_gt(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool long long __a, vector signed long long __b) {
+vec_all_gt(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned long long __a, vector unsigned long long __b) {
+vec_all_gt(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
__builtin_s390_vchlgs(__a, __b, &__cc);
return __cc == 0;
@@ -3451,32 +3519,32 @@ vec_all_gt(vector unsigned long long __a, vector unsigned long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned long long __a, vector bool long long __b) {
+vec_all_gt(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool long long __a, vector unsigned long long __b) {
+vec_all_gt(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool long long __a, vector bool long long __b) {
+vec_all_gt(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__a,
- (vector unsigned long long)__b, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc == 0;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_gt(vector float __a, vector float __b) {
+vec_all_gt(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchsbs(__a, __b, &__cc);
return __cc == 0;
@@ -3484,7 +3552,7 @@ vec_all_gt(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_gt(vector double __a, vector double __b) {
+vec_all_gt(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchdbs(__a, __b, &__cc);
return __cc == 0;
@@ -3493,7 +3561,7 @@ vec_all_gt(vector double __a, vector double __b) {
/*-- vec_all_le -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_all_le(vector signed char __a, vector signed char __b) {
+vec_all_le(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vchbs(__a, __b, &__cc);
return __cc == 3;
@@ -3501,22 +3569,22 @@ vec_all_le(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector signed char __a, vector bool char __b) {
+vec_all_le(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool char __a, vector signed char __b) {
+vec_all_le(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned char __a, vector unsigned char __b) {
+vec_all_le(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
__builtin_s390_vchlbs(__a, __b, &__cc);
return __cc == 3;
@@ -3524,31 +3592,31 @@ vec_all_le(vector unsigned char __a, vector unsigned char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned char __a, vector bool char __b) {
+vec_all_le(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool char __a, vector unsigned char __b) {
+vec_all_le(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool char __a, vector bool char __b) {
+vec_all_le(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__a,
- (vector unsigned char)__b, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_le(vector signed short __a, vector signed short __b) {
+vec_all_le(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vchhs(__a, __b, &__cc);
return __cc == 3;
@@ -3556,22 +3624,22 @@ vec_all_le(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector signed short __a, vector bool short __b) {
+vec_all_le(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool short __a, vector signed short __b) {
+vec_all_le(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned short __a, vector unsigned short __b) {
+vec_all_le(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
__builtin_s390_vchlhs(__a, __b, &__cc);
return __cc == 3;
@@ -3579,31 +3647,31 @@ vec_all_le(vector unsigned short __a, vector unsigned short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned short __a, vector bool short __b) {
+vec_all_le(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool short __a, vector unsigned short __b) {
+vec_all_le(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool short __a, vector bool short __b) {
+vec_all_le(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__a,
- (vector unsigned short)__b, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_le(vector signed int __a, vector signed int __b) {
+vec_all_le(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vchfs(__a, __b, &__cc);
return __cc == 3;
@@ -3611,22 +3679,22 @@ vec_all_le(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector signed int __a, vector bool int __b) {
+vec_all_le(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool int __a, vector signed int __b) {
+vec_all_le(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned int __a, vector unsigned int __b) {
+vec_all_le(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
__builtin_s390_vchlfs(__a, __b, &__cc);
return __cc == 3;
@@ -3634,31 +3702,31 @@ vec_all_le(vector unsigned int __a, vector unsigned int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned int __a, vector bool int __b) {
+vec_all_le(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool int __a, vector unsigned int __b) {
+vec_all_le(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool int __a, vector bool int __b) {
+vec_all_le(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__a,
- (vector unsigned int)__b, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_le(vector signed long long __a, vector signed long long __b) {
+vec_all_le(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vchgs(__a, __b, &__cc);
return __cc == 3;
@@ -3666,22 +3734,22 @@ vec_all_le(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector signed long long __a, vector bool long long __b) {
+vec_all_le(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool long long __a, vector signed long long __b) {
+vec_all_le(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned long long __a, vector unsigned long long __b) {
+vec_all_le(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
__builtin_s390_vchlgs(__a, __b, &__cc);
return __cc == 3;
@@ -3689,32 +3757,32 @@ vec_all_le(vector unsigned long long __a, vector unsigned long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned long long __a, vector bool long long __b) {
+vec_all_le(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool long long __a, vector unsigned long long __b) {
+vec_all_le(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool long long __a, vector bool long long __b) {
+vec_all_le(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__a,
- (vector unsigned long long)__b, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc == 3;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_le(vector float __a, vector float __b) {
+vec_all_le(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchesbs(__b, __a, &__cc);
return __cc == 0;
@@ -3722,7 +3790,7 @@ vec_all_le(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_le(vector double __a, vector double __b) {
+vec_all_le(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchedbs(__b, __a, &__cc);
return __cc == 0;
@@ -3731,7 +3799,7 @@ vec_all_le(vector double __a, vector double __b) {
/*-- vec_all_lt -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_all_lt(vector signed char __a, vector signed char __b) {
+vec_all_lt(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vchbs(__b, __a, &__cc);
return __cc == 0;
@@ -3739,22 +3807,22 @@ vec_all_lt(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector signed char __a, vector bool char __b) {
+vec_all_lt(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool char __a, vector signed char __b) {
+vec_all_lt(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned char __a, vector unsigned char __b) {
+vec_all_lt(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
__builtin_s390_vchlbs(__b, __a, &__cc);
return __cc == 0;
@@ -3762,31 +3830,31 @@ vec_all_lt(vector unsigned char __a, vector unsigned char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned char __a, vector bool char __b) {
+vec_all_lt(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool char __a, vector unsigned char __b) {
+vec_all_lt(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool char __a, vector bool char __b) {
+vec_all_lt(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__b,
- (vector unsigned char)__a, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__b,
+ (__vector unsigned char)__a, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_lt(vector signed short __a, vector signed short __b) {
+vec_all_lt(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vchhs(__b, __a, &__cc);
return __cc == 0;
@@ -3794,22 +3862,22 @@ vec_all_lt(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector signed short __a, vector bool short __b) {
+vec_all_lt(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool short __a, vector signed short __b) {
+vec_all_lt(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned short __a, vector unsigned short __b) {
+vec_all_lt(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
__builtin_s390_vchlhs(__b, __a, &__cc);
return __cc == 0;
@@ -3817,31 +3885,31 @@ vec_all_lt(vector unsigned short __a, vector unsigned short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned short __a, vector bool short __b) {
+vec_all_lt(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool short __a, vector unsigned short __b) {
+vec_all_lt(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool short __a, vector bool short __b) {
+vec_all_lt(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__b,
- (vector unsigned short)__a, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__b,
+ (__vector unsigned short)__a, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_lt(vector signed int __a, vector signed int __b) {
+vec_all_lt(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vchfs(__b, __a, &__cc);
return __cc == 0;
@@ -3849,22 +3917,22 @@ vec_all_lt(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector signed int __a, vector bool int __b) {
+vec_all_lt(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool int __a, vector signed int __b) {
+vec_all_lt(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned int __a, vector unsigned int __b) {
+vec_all_lt(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
__builtin_s390_vchlfs(__b, __a, &__cc);
return __cc == 0;
@@ -3872,31 +3940,31 @@ vec_all_lt(vector unsigned int __a, vector unsigned int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned int __a, vector bool int __b) {
+vec_all_lt(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool int __a, vector unsigned int __b) {
+vec_all_lt(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool int __a, vector bool int __b) {
+vec_all_lt(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__b,
- (vector unsigned int)__a, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__b,
+ (__vector unsigned int)__a, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_lt(vector signed long long __a, vector signed long long __b) {
+vec_all_lt(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vchgs(__b, __a, &__cc);
return __cc == 0;
@@ -3904,22 +3972,22 @@ vec_all_lt(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector signed long long __a, vector bool long long __b) {
+vec_all_lt(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool long long __a, vector signed long long __b) {
+vec_all_lt(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned long long __a, vector unsigned long long __b) {
+vec_all_lt(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
__builtin_s390_vchlgs(__b, __a, &__cc);
return __cc == 0;
@@ -3927,32 +3995,32 @@ vec_all_lt(vector unsigned long long __a, vector unsigned long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned long long __a, vector bool long long __b) {
+vec_all_lt(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool long long __a, vector unsigned long long __b) {
+vec_all_lt(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool long long __a, vector bool long long __b) {
+vec_all_lt(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__b,
- (vector unsigned long long)__a, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__b,
+ (__vector unsigned long long)__a, &__cc);
return __cc == 0;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_lt(vector float __a, vector float __b) {
+vec_all_lt(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchsbs(__b, __a, &__cc);
return __cc == 0;
@@ -3960,7 +4028,7 @@ vec_all_lt(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_lt(vector double __a, vector double __b) {
+vec_all_lt(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchdbs(__b, __a, &__cc);
return __cc == 0;
@@ -3970,7 +4038,7 @@ vec_all_lt(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_nge(vector float __a, vector float __b) {
+vec_all_nge(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchesbs(__a, __b, &__cc);
return __cc == 3;
@@ -3978,7 +4046,7 @@ vec_all_nge(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_nge(vector double __a, vector double __b) {
+vec_all_nge(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchedbs(__a, __b, &__cc);
return __cc == 3;
@@ -3988,7 +4056,7 @@ vec_all_nge(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_ngt(vector float __a, vector float __b) {
+vec_all_ngt(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchsbs(__a, __b, &__cc);
return __cc == 3;
@@ -3996,7 +4064,7 @@ vec_all_ngt(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_ngt(vector double __a, vector double __b) {
+vec_all_ngt(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchdbs(__a, __b, &__cc);
return __cc == 3;
@@ -4006,7 +4074,7 @@ vec_all_ngt(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_nle(vector float __a, vector float __b) {
+vec_all_nle(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchesbs(__b, __a, &__cc);
return __cc == 3;
@@ -4014,7 +4082,7 @@ vec_all_nle(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_nle(vector double __a, vector double __b) {
+vec_all_nle(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchedbs(__b, __a, &__cc);
return __cc == 3;
@@ -4024,7 +4092,7 @@ vec_all_nle(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_nlt(vector float __a, vector float __b) {
+vec_all_nlt(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchsbs(__b, __a, &__cc);
return __cc == 3;
@@ -4032,7 +4100,7 @@ vec_all_nlt(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_nlt(vector double __a, vector double __b) {
+vec_all_nlt(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchdbs(__b, __a, &__cc);
return __cc == 3;
@@ -4042,7 +4110,7 @@ vec_all_nlt(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_nan(vector float __a) {
+vec_all_nan(__vector float __a) {
int __cc;
__builtin_s390_vftcisb(__a, 15, &__cc);
return __cc == 0;
@@ -4050,7 +4118,7 @@ vec_all_nan(vector float __a) {
#endif
static inline __ATTRS_o_ai int
-vec_all_nan(vector double __a) {
+vec_all_nan(__vector double __a) {
int __cc;
__builtin_s390_vftcidb(__a, 15, &__cc);
return __cc == 0;
@@ -4060,7 +4128,7 @@ vec_all_nan(vector double __a) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_numeric(vector float __a) {
+vec_all_numeric(__vector float __a) {
int __cc;
__builtin_s390_vftcisb(__a, 15, &__cc);
return __cc == 3;
@@ -4068,7 +4136,7 @@ vec_all_numeric(vector float __a) {
#endif
static inline __ATTRS_o_ai int
-vec_all_numeric(vector double __a) {
+vec_all_numeric(__vector double __a) {
int __cc;
__builtin_s390_vftcidb(__a, 15, &__cc);
return __cc == 3;
@@ -4077,7 +4145,7 @@ vec_all_numeric(vector double __a) {
/*-- vec_any_eq -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_any_eq(vector signed char __a, vector signed char __b) {
+vec_any_eq(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vceqbs(__a, __b, &__cc);
return __cc <= 1;
@@ -4085,56 +4153,56 @@ vec_any_eq(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector signed char __a, vector bool char __b) {
+vec_any_eq(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool char __a, vector signed char __b) {
+vec_any_eq(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned char __a, vector unsigned char __b) {
+vec_any_eq(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned char __a, vector bool char __b) {
+vec_any_eq(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool char __a, vector unsigned char __b) {
+vec_any_eq(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool char __a, vector bool char __b) {
+vec_any_eq(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector signed short __a, vector signed short __b) {
+vec_any_eq(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vceqhs(__a, __b, &__cc);
return __cc <= 1;
@@ -4142,56 +4210,56 @@ vec_any_eq(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector signed short __a, vector bool short __b) {
+vec_any_eq(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool short __a, vector signed short __b) {
+vec_any_eq(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned short __a, vector unsigned short __b) {
+vec_any_eq(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned short __a, vector bool short __b) {
+vec_any_eq(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool short __a, vector unsigned short __b) {
+vec_any_eq(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool short __a, vector bool short __b) {
+vec_any_eq(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector signed int __a, vector signed int __b) {
+vec_any_eq(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vceqfs(__a, __b, &__cc);
return __cc <= 1;
@@ -4199,56 +4267,56 @@ vec_any_eq(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector signed int __a, vector bool int __b) {
+vec_any_eq(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool int __a, vector signed int __b) {
+vec_any_eq(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned int __a, vector unsigned int __b) {
+vec_any_eq(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned int __a, vector bool int __b) {
+vec_any_eq(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool int __a, vector unsigned int __b) {
+vec_any_eq(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool int __a, vector bool int __b) {
+vec_any_eq(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector signed long long __a, vector signed long long __b) {
+vec_any_eq(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vceqgs(__a, __b, &__cc);
return __cc <= 1;
@@ -4256,57 +4324,57 @@ vec_any_eq(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector signed long long __a, vector bool long long __b) {
+vec_any_eq(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool long long __a, vector signed long long __b) {
+vec_any_eq(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned long long __a, vector unsigned long long __b) {
+vec_any_eq(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned long long __a, vector bool long long __b) {
+vec_any_eq(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool long long __a, vector unsigned long long __b) {
+vec_any_eq(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool long long __a, vector bool long long __b) {
+vec_any_eq(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc <= 1;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_eq(vector float __a, vector float __b) {
+vec_any_eq(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfcesbs(__a, __b, &__cc);
return __cc <= 1;
@@ -4314,7 +4382,7 @@ vec_any_eq(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_eq(vector double __a, vector double __b) {
+vec_any_eq(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfcedbs(__a, __b, &__cc);
return __cc <= 1;
@@ -4323,7 +4391,7 @@ vec_any_eq(vector double __a, vector double __b) {
/*-- vec_any_ne -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_any_ne(vector signed char __a, vector signed char __b) {
+vec_any_ne(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vceqbs(__a, __b, &__cc);
return __cc != 0;
@@ -4331,56 +4399,56 @@ vec_any_ne(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector signed char __a, vector bool char __b) {
+vec_any_ne(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool char __a, vector signed char __b) {
+vec_any_ne(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned char __a, vector unsigned char __b) {
+vec_any_ne(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned char __a, vector bool char __b) {
+vec_any_ne(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool char __a, vector unsigned char __b) {
+vec_any_ne(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool char __a, vector bool char __b) {
+vec_any_ne(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector signed short __a, vector signed short __b) {
+vec_any_ne(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vceqhs(__a, __b, &__cc);
return __cc != 0;
@@ -4388,56 +4456,56 @@ vec_any_ne(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector signed short __a, vector bool short __b) {
+vec_any_ne(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool short __a, vector signed short __b) {
+vec_any_ne(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned short __a, vector unsigned short __b) {
+vec_any_ne(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned short __a, vector bool short __b) {
+vec_any_ne(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool short __a, vector unsigned short __b) {
+vec_any_ne(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool short __a, vector bool short __b) {
+vec_any_ne(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector signed int __a, vector signed int __b) {
+vec_any_ne(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vceqfs(__a, __b, &__cc);
return __cc != 0;
@@ -4445,56 +4513,56 @@ vec_any_ne(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector signed int __a, vector bool int __b) {
+vec_any_ne(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool int __a, vector signed int __b) {
+vec_any_ne(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned int __a, vector unsigned int __b) {
+vec_any_ne(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned int __a, vector bool int __b) {
+vec_any_ne(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool int __a, vector unsigned int __b) {
+vec_any_ne(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool int __a, vector bool int __b) {
+vec_any_ne(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector signed long long __a, vector signed long long __b) {
+vec_any_ne(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vceqgs(__a, __b, &__cc);
return __cc != 0;
@@ -4502,57 +4570,57 @@ vec_any_ne(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector signed long long __a, vector bool long long __b) {
+vec_any_ne(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool long long __a, vector signed long long __b) {
+vec_any_ne(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned long long __a, vector unsigned long long __b) {
+vec_any_ne(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned long long __a, vector bool long long __b) {
+vec_any_ne(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool long long __a, vector unsigned long long __b) {
+vec_any_ne(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool long long __a, vector bool long long __b) {
+vec_any_ne(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc != 0;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_ne(vector float __a, vector float __b) {
+vec_any_ne(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfcesbs(__a, __b, &__cc);
return __cc != 0;
@@ -4560,7 +4628,7 @@ vec_any_ne(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_ne(vector double __a, vector double __b) {
+vec_any_ne(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfcedbs(__a, __b, &__cc);
return __cc != 0;
@@ -4569,7 +4637,7 @@ vec_any_ne(vector double __a, vector double __b) {
/*-- vec_any_ge -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_any_ge(vector signed char __a, vector signed char __b) {
+vec_any_ge(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vchbs(__b, __a, &__cc);
return __cc != 0;
@@ -4577,22 +4645,22 @@ vec_any_ge(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector signed char __a, vector bool char __b) {
+vec_any_ge(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool char __a, vector signed char __b) {
+vec_any_ge(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned char __a, vector unsigned char __b) {
+vec_any_ge(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
__builtin_s390_vchlbs(__b, __a, &__cc);
return __cc != 0;
@@ -4600,31 +4668,31 @@ vec_any_ge(vector unsigned char __a, vector unsigned char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned char __a, vector bool char __b) {
+vec_any_ge(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool char __a, vector unsigned char __b) {
+vec_any_ge(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool char __a, vector bool char __b) {
+vec_any_ge(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__b,
- (vector unsigned char)__a, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__b,
+ (__vector unsigned char)__a, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ge(vector signed short __a, vector signed short __b) {
+vec_any_ge(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vchhs(__b, __a, &__cc);
return __cc != 0;
@@ -4632,22 +4700,22 @@ vec_any_ge(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector signed short __a, vector bool short __b) {
+vec_any_ge(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool short __a, vector signed short __b) {
+vec_any_ge(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned short __a, vector unsigned short __b) {
+vec_any_ge(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
__builtin_s390_vchlhs(__b, __a, &__cc);
return __cc != 0;
@@ -4655,31 +4723,31 @@ vec_any_ge(vector unsigned short __a, vector unsigned short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned short __a, vector bool short __b) {
+vec_any_ge(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool short __a, vector unsigned short __b) {
+vec_any_ge(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool short __a, vector bool short __b) {
+vec_any_ge(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__b,
- (vector unsigned short)__a, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__b,
+ (__vector unsigned short)__a, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ge(vector signed int __a, vector signed int __b) {
+vec_any_ge(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vchfs(__b, __a, &__cc);
return __cc != 0;
@@ -4687,22 +4755,22 @@ vec_any_ge(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector signed int __a, vector bool int __b) {
+vec_any_ge(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool int __a, vector signed int __b) {
+vec_any_ge(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned int __a, vector unsigned int __b) {
+vec_any_ge(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
__builtin_s390_vchlfs(__b, __a, &__cc);
return __cc != 0;
@@ -4710,31 +4778,31 @@ vec_any_ge(vector unsigned int __a, vector unsigned int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned int __a, vector bool int __b) {
+vec_any_ge(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool int __a, vector unsigned int __b) {
+vec_any_ge(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool int __a, vector bool int __b) {
+vec_any_ge(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__b,
- (vector unsigned int)__a, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__b,
+ (__vector unsigned int)__a, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ge(vector signed long long __a, vector signed long long __b) {
+vec_any_ge(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vchgs(__b, __a, &__cc);
return __cc != 0;
@@ -4742,22 +4810,22 @@ vec_any_ge(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector signed long long __a, vector bool long long __b) {
+vec_any_ge(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool long long __a, vector signed long long __b) {
+vec_any_ge(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned long long __a, vector unsigned long long __b) {
+vec_any_ge(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
__builtin_s390_vchlgs(__b, __a, &__cc);
return __cc != 0;
@@ -4765,32 +4833,32 @@ vec_any_ge(vector unsigned long long __a, vector unsigned long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned long long __a, vector bool long long __b) {
+vec_any_ge(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool long long __a, vector unsigned long long __b) {
+vec_any_ge(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool long long __a, vector bool long long __b) {
+vec_any_ge(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__b,
- (vector unsigned long long)__a, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__b,
+ (__vector unsigned long long)__a, &__cc);
return __cc != 0;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_ge(vector float __a, vector float __b) {
+vec_any_ge(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchesbs(__a, __b, &__cc);
return __cc <= 1;
@@ -4798,7 +4866,7 @@ vec_any_ge(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_ge(vector double __a, vector double __b) {
+vec_any_ge(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchedbs(__a, __b, &__cc);
return __cc <= 1;
@@ -4807,7 +4875,7 @@ vec_any_ge(vector double __a, vector double __b) {
/*-- vec_any_gt -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_any_gt(vector signed char __a, vector signed char __b) {
+vec_any_gt(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vchbs(__a, __b, &__cc);
return __cc <= 1;
@@ -4815,22 +4883,22 @@ vec_any_gt(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector signed char __a, vector bool char __b) {
+vec_any_gt(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool char __a, vector signed char __b) {
+vec_any_gt(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned char __a, vector unsigned char __b) {
+vec_any_gt(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
__builtin_s390_vchlbs(__a, __b, &__cc);
return __cc <= 1;
@@ -4838,31 +4906,31 @@ vec_any_gt(vector unsigned char __a, vector unsigned char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned char __a, vector bool char __b) {
+vec_any_gt(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool char __a, vector unsigned char __b) {
+vec_any_gt(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool char __a, vector bool char __b) {
+vec_any_gt(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__a,
- (vector unsigned char)__b, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_gt(vector signed short __a, vector signed short __b) {
+vec_any_gt(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vchhs(__a, __b, &__cc);
return __cc <= 1;
@@ -4870,22 +4938,22 @@ vec_any_gt(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector signed short __a, vector bool short __b) {
+vec_any_gt(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool short __a, vector signed short __b) {
+vec_any_gt(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned short __a, vector unsigned short __b) {
+vec_any_gt(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
__builtin_s390_vchlhs(__a, __b, &__cc);
return __cc <= 1;
@@ -4893,31 +4961,31 @@ vec_any_gt(vector unsigned short __a, vector unsigned short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned short __a, vector bool short __b) {
+vec_any_gt(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool short __a, vector unsigned short __b) {
+vec_any_gt(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool short __a, vector bool short __b) {
+vec_any_gt(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__a,
- (vector unsigned short)__b, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_gt(vector signed int __a, vector signed int __b) {
+vec_any_gt(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vchfs(__a, __b, &__cc);
return __cc <= 1;
@@ -4925,22 +4993,22 @@ vec_any_gt(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector signed int __a, vector bool int __b) {
+vec_any_gt(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool int __a, vector signed int __b) {
+vec_any_gt(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned int __a, vector unsigned int __b) {
+vec_any_gt(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
__builtin_s390_vchlfs(__a, __b, &__cc);
return __cc <= 1;
@@ -4948,31 +5016,31 @@ vec_any_gt(vector unsigned int __a, vector unsigned int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned int __a, vector bool int __b) {
+vec_any_gt(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool int __a, vector unsigned int __b) {
+vec_any_gt(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool int __a, vector bool int __b) {
+vec_any_gt(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__a,
- (vector unsigned int)__b, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_gt(vector signed long long __a, vector signed long long __b) {
+vec_any_gt(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vchgs(__a, __b, &__cc);
return __cc <= 1;
@@ -4980,22 +5048,22 @@ vec_any_gt(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector signed long long __a, vector bool long long __b) {
+vec_any_gt(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool long long __a, vector signed long long __b) {
+vec_any_gt(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned long long __a, vector unsigned long long __b) {
+vec_any_gt(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
__builtin_s390_vchlgs(__a, __b, &__cc);
return __cc <= 1;
@@ -5003,32 +5071,32 @@ vec_any_gt(vector unsigned long long __a, vector unsigned long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned long long __a, vector bool long long __b) {
+vec_any_gt(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool long long __a, vector unsigned long long __b) {
+vec_any_gt(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool long long __a, vector bool long long __b) {
+vec_any_gt(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__a,
- (vector unsigned long long)__b, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc <= 1;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_gt(vector float __a, vector float __b) {
+vec_any_gt(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchsbs(__a, __b, &__cc);
return __cc <= 1;
@@ -5036,7 +5104,7 @@ vec_any_gt(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_gt(vector double __a, vector double __b) {
+vec_any_gt(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchdbs(__a, __b, &__cc);
return __cc <= 1;
@@ -5045,7 +5113,7 @@ vec_any_gt(vector double __a, vector double __b) {
/*-- vec_any_le -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_any_le(vector signed char __a, vector signed char __b) {
+vec_any_le(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vchbs(__a, __b, &__cc);
return __cc != 0;
@@ -5053,22 +5121,22 @@ vec_any_le(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector signed char __a, vector bool char __b) {
+vec_any_le(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool char __a, vector signed char __b) {
+vec_any_le(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned char __a, vector unsigned char __b) {
+vec_any_le(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
__builtin_s390_vchlbs(__a, __b, &__cc);
return __cc != 0;
@@ -5076,31 +5144,31 @@ vec_any_le(vector unsigned char __a, vector unsigned char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned char __a, vector bool char __b) {
+vec_any_le(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool char __a, vector unsigned char __b) {
+vec_any_le(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool char __a, vector bool char __b) {
+vec_any_le(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__a,
- (vector unsigned char)__b, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_le(vector signed short __a, vector signed short __b) {
+vec_any_le(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vchhs(__a, __b, &__cc);
return __cc != 0;
@@ -5108,22 +5176,22 @@ vec_any_le(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector signed short __a, vector bool short __b) {
+vec_any_le(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool short __a, vector signed short __b) {
+vec_any_le(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned short __a, vector unsigned short __b) {
+vec_any_le(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
__builtin_s390_vchlhs(__a, __b, &__cc);
return __cc != 0;
@@ -5131,31 +5199,31 @@ vec_any_le(vector unsigned short __a, vector unsigned short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned short __a, vector bool short __b) {
+vec_any_le(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool short __a, vector unsigned short __b) {
+vec_any_le(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool short __a, vector bool short __b) {
+vec_any_le(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__a,
- (vector unsigned short)__b, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_le(vector signed int __a, vector signed int __b) {
+vec_any_le(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vchfs(__a, __b, &__cc);
return __cc != 0;
@@ -5163,22 +5231,22 @@ vec_any_le(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector signed int __a, vector bool int __b) {
+vec_any_le(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool int __a, vector signed int __b) {
+vec_any_le(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned int __a, vector unsigned int __b) {
+vec_any_le(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
__builtin_s390_vchlfs(__a, __b, &__cc);
return __cc != 0;
@@ -5186,31 +5254,31 @@ vec_any_le(vector unsigned int __a, vector unsigned int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned int __a, vector bool int __b) {
+vec_any_le(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool int __a, vector unsigned int __b) {
+vec_any_le(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool int __a, vector bool int __b) {
+vec_any_le(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__a,
- (vector unsigned int)__b, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_le(vector signed long long __a, vector signed long long __b) {
+vec_any_le(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vchgs(__a, __b, &__cc);
return __cc != 0;
@@ -5218,22 +5286,22 @@ vec_any_le(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector signed long long __a, vector bool long long __b) {
+vec_any_le(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool long long __a, vector signed long long __b) {
+vec_any_le(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned long long __a, vector unsigned long long __b) {
+vec_any_le(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
__builtin_s390_vchlgs(__a, __b, &__cc);
return __cc != 0;
@@ -5241,32 +5309,32 @@ vec_any_le(vector unsigned long long __a, vector unsigned long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned long long __a, vector bool long long __b) {
+vec_any_le(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool long long __a, vector unsigned long long __b) {
+vec_any_le(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool long long __a, vector bool long long __b) {
+vec_any_le(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__a,
- (vector unsigned long long)__b, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc != 0;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_le(vector float __a, vector float __b) {
+vec_any_le(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchesbs(__b, __a, &__cc);
return __cc <= 1;
@@ -5274,7 +5342,7 @@ vec_any_le(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_le(vector double __a, vector double __b) {
+vec_any_le(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchedbs(__b, __a, &__cc);
return __cc <= 1;
@@ -5283,7 +5351,7 @@ vec_any_le(vector double __a, vector double __b) {
/*-- vec_any_lt -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_any_lt(vector signed char __a, vector signed char __b) {
+vec_any_lt(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vchbs(__b, __a, &__cc);
return __cc <= 1;
@@ -5291,22 +5359,22 @@ vec_any_lt(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector signed char __a, vector bool char __b) {
+vec_any_lt(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool char __a, vector signed char __b) {
+vec_any_lt(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned char __a, vector unsigned char __b) {
+vec_any_lt(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
__builtin_s390_vchlbs(__b, __a, &__cc);
return __cc <= 1;
@@ -5314,31 +5382,31 @@ vec_any_lt(vector unsigned char __a, vector unsigned char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned char __a, vector bool char __b) {
+vec_any_lt(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool char __a, vector unsigned char __b) {
+vec_any_lt(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool char __a, vector bool char __b) {
+vec_any_lt(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__b,
- (vector unsigned char)__a, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__b,
+ (__vector unsigned char)__a, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_lt(vector signed short __a, vector signed short __b) {
+vec_any_lt(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vchhs(__b, __a, &__cc);
return __cc <= 1;
@@ -5346,22 +5414,22 @@ vec_any_lt(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector signed short __a, vector bool short __b) {
+vec_any_lt(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool short __a, vector signed short __b) {
+vec_any_lt(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned short __a, vector unsigned short __b) {
+vec_any_lt(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
__builtin_s390_vchlhs(__b, __a, &__cc);
return __cc <= 1;
@@ -5369,31 +5437,31 @@ vec_any_lt(vector unsigned short __a, vector unsigned short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned short __a, vector bool short __b) {
+vec_any_lt(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool short __a, vector unsigned short __b) {
+vec_any_lt(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool short __a, vector bool short __b) {
+vec_any_lt(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__b,
- (vector unsigned short)__a, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__b,
+ (__vector unsigned short)__a, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_lt(vector signed int __a, vector signed int __b) {
+vec_any_lt(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vchfs(__b, __a, &__cc);
return __cc <= 1;
@@ -5401,22 +5469,22 @@ vec_any_lt(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector signed int __a, vector bool int __b) {
+vec_any_lt(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool int __a, vector signed int __b) {
+vec_any_lt(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned int __a, vector unsigned int __b) {
+vec_any_lt(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
__builtin_s390_vchlfs(__b, __a, &__cc);
return __cc <= 1;
@@ -5424,31 +5492,31 @@ vec_any_lt(vector unsigned int __a, vector unsigned int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned int __a, vector bool int __b) {
+vec_any_lt(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool int __a, vector unsigned int __b) {
+vec_any_lt(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool int __a, vector bool int __b) {
+vec_any_lt(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__b,
- (vector unsigned int)__a, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__b,
+ (__vector unsigned int)__a, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_lt(vector signed long long __a, vector signed long long __b) {
+vec_any_lt(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vchgs(__b, __a, &__cc);
return __cc <= 1;
@@ -5456,22 +5524,22 @@ vec_any_lt(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector signed long long __a, vector bool long long __b) {
+vec_any_lt(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool long long __a, vector signed long long __b) {
+vec_any_lt(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned long long __a, vector unsigned long long __b) {
+vec_any_lt(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
__builtin_s390_vchlgs(__b, __a, &__cc);
return __cc <= 1;
@@ -5479,32 +5547,32 @@ vec_any_lt(vector unsigned long long __a, vector unsigned long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned long long __a, vector bool long long __b) {
+vec_any_lt(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool long long __a, vector unsigned long long __b) {
+vec_any_lt(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool long long __a, vector bool long long __b) {
+vec_any_lt(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__b,
- (vector unsigned long long)__a, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__b,
+ (__vector unsigned long long)__a, &__cc);
return __cc <= 1;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_lt(vector float __a, vector float __b) {
+vec_any_lt(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchsbs(__b, __a, &__cc);
return __cc <= 1;
@@ -5512,7 +5580,7 @@ vec_any_lt(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_lt(vector double __a, vector double __b) {
+vec_any_lt(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchdbs(__b, __a, &__cc);
return __cc <= 1;
@@ -5522,7 +5590,7 @@ vec_any_lt(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_nge(vector float __a, vector float __b) {
+vec_any_nge(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchesbs(__a, __b, &__cc);
return __cc != 0;
@@ -5530,7 +5598,7 @@ vec_any_nge(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_nge(vector double __a, vector double __b) {
+vec_any_nge(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchedbs(__a, __b, &__cc);
return __cc != 0;
@@ -5540,7 +5608,7 @@ vec_any_nge(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_ngt(vector float __a, vector float __b) {
+vec_any_ngt(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchsbs(__a, __b, &__cc);
return __cc != 0;
@@ -5548,7 +5616,7 @@ vec_any_ngt(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_ngt(vector double __a, vector double __b) {
+vec_any_ngt(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchdbs(__a, __b, &__cc);
return __cc != 0;
@@ -5558,7 +5626,7 @@ vec_any_ngt(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_nle(vector float __a, vector float __b) {
+vec_any_nle(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchesbs(__b, __a, &__cc);
return __cc != 0;
@@ -5566,7 +5634,7 @@ vec_any_nle(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_nle(vector double __a, vector double __b) {
+vec_any_nle(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchedbs(__b, __a, &__cc);
return __cc != 0;
@@ -5576,7 +5644,7 @@ vec_any_nle(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_nlt(vector float __a, vector float __b) {
+vec_any_nlt(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchsbs(__b, __a, &__cc);
return __cc != 0;
@@ -5584,7 +5652,7 @@ vec_any_nlt(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_nlt(vector double __a, vector double __b) {
+vec_any_nlt(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchdbs(__b, __a, &__cc);
return __cc != 0;
@@ -5594,7 +5662,7 @@ vec_any_nlt(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_nan(vector float __a) {
+vec_any_nan(__vector float __a) {
int __cc;
__builtin_s390_vftcisb(__a, 15, &__cc);
return __cc != 3;
@@ -5602,7 +5670,7 @@ vec_any_nan(vector float __a) {
#endif
static inline __ATTRS_o_ai int
-vec_any_nan(vector double __a) {
+vec_any_nan(__vector double __a) {
int __cc;
__builtin_s390_vftcidb(__a, 15, &__cc);
return __cc != 3;
@@ -5612,7 +5680,7 @@ vec_any_nan(vector double __a) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_numeric(vector float __a) {
+vec_any_numeric(__vector float __a) {
int __cc;
__builtin_s390_vftcisb(__a, 15, &__cc);
return __cc != 0;
@@ -5620,7 +5688,7 @@ vec_any_numeric(vector float __a) {
#endif
static inline __ATTRS_o_ai int
-vec_any_numeric(vector double __a) {
+vec_any_numeric(__vector double __a) {
int __cc;
__builtin_s390_vftcidb(__a, 15, &__cc);
return __cc != 0;
@@ -5628,2389 +5696,2393 @@ vec_any_numeric(vector double __a) {
/*-- vec_andc ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_andc(vector bool char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector __bool char
+vec_andc(__vector __bool char __a, __vector __bool char __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector signed char
-vec_andc(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_andc(__vector signed char __a, __vector signed char __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_andc(vector bool char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_andc(__vector __bool char __a, __vector signed char __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_andc(vector signed char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_andc(__vector signed char __a, __vector __bool char __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_andc(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_andc(__vector unsigned char __a, __vector unsigned char __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_andc(vector bool char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_andc(__vector __bool char __a, __vector unsigned char __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_andc(vector unsigned char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_andc(__vector unsigned char __a, __vector __bool char __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector bool short
-vec_andc(vector bool short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector __bool short
+vec_andc(__vector __bool short __a, __vector __bool short __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector signed short
-vec_andc(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_andc(__vector signed short __a, __vector signed short __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_andc(vector bool short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_andc(__vector __bool short __a, __vector signed short __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_andc(vector signed short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_andc(__vector signed short __a, __vector __bool short __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_andc(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_andc(__vector unsigned short __a, __vector unsigned short __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_andc(vector bool short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_andc(__vector __bool short __a, __vector unsigned short __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_andc(vector unsigned short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_andc(__vector unsigned short __a, __vector __bool short __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector bool int
-vec_andc(vector bool int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector __bool int
+vec_andc(__vector __bool int __a, __vector __bool int __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector signed int
-vec_andc(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_andc(__vector signed int __a, __vector signed int __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_andc(vector bool int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_andc(__vector __bool int __a, __vector signed int __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_andc(vector signed int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_andc(__vector signed int __a, __vector __bool int __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_andc(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_andc(__vector unsigned int __a, __vector unsigned int __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_andc(vector bool int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_andc(__vector __bool int __a, __vector unsigned int __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_andc(vector unsigned int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_andc(__vector unsigned int __a, __vector __bool int __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector bool long long
-vec_andc(vector bool long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector __bool long long
+vec_andc(__vector __bool long long __a, __vector __bool long long __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector signed long long
-vec_andc(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_andc(__vector signed long long __a, __vector signed long long __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_andc(vector bool long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_andc(__vector __bool long long __a, __vector signed long long __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_andc(vector signed long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_andc(__vector signed long long __a, __vector __bool long long __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_andc(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_andc(__vector unsigned long long __a, __vector unsigned long long __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_andc(vector bool long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_andc(__vector __bool long long __a, __vector unsigned long long __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_andc(vector unsigned long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_andc(__vector unsigned long long __a, __vector __bool long long __b) {
return __a & ~__b;
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_andc(vector float __a, vector float __b) {
- return (vector float)((vector unsigned int)__a &
- ~(vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector float
+vec_andc(__vector float __a, __vector float __b) {
+ return (__vector float)((__vector unsigned int)__a &
+ ~(__vector unsigned int)__b);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_andc(vector double __a, vector double __b) {
- return (vector double)((vector unsigned long long)__a &
- ~(vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_andc(__vector double __a, __vector double __b) {
+ return (__vector double)((__vector unsigned long long)__a &
+ ~(__vector unsigned long long)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_andc(vector bool long long __a, vector double __b) {
- return (vector double)((vector unsigned long long)__a &
- ~(vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_andc(__vector __bool long long __a, __vector double __b) {
+ return (__vector double)((__vector unsigned long long)__a &
+ ~(__vector unsigned long long)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_andc(vector double __a, vector bool long long __b) {
- return (vector double)((vector unsigned long long)__a &
- ~(vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_andc(__vector double __a, __vector __bool long long __b) {
+ return (__vector double)((__vector unsigned long long)__a &
+ ~(__vector unsigned long long)__b);
}
/*-- vec_nor ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_nor(vector bool char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector __bool char
+vec_nor(__vector __bool char __a, __vector __bool char __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector signed char
-vec_nor(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_nor(__vector signed char __a, __vector signed char __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_nor(vector bool char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_nor(__vector __bool char __a, __vector signed char __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_nor(vector signed char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_nor(__vector signed char __a, __vector __bool char __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_nor(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_nor(__vector unsigned char __a, __vector unsigned char __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_nor(vector bool char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_nor(__vector __bool char __a, __vector unsigned char __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_nor(vector unsigned char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_nor(__vector unsigned char __a, __vector __bool char __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_nor(vector bool short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector __bool short
+vec_nor(__vector __bool short __a, __vector __bool short __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_nor(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_nor(__vector signed short __a, __vector signed short __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_nor(vector bool short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_nor(__vector __bool short __a, __vector signed short __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_nor(vector signed short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_nor(__vector signed short __a, __vector __bool short __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_nor(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_nor(__vector unsigned short __a, __vector unsigned short __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_nor(vector bool short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_nor(__vector __bool short __a, __vector unsigned short __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_nor(vector unsigned short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_nor(__vector unsigned short __a, __vector __bool short __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_nor(vector bool int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector __bool int
+vec_nor(__vector __bool int __a, __vector __bool int __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_nor(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_nor(__vector signed int __a, __vector signed int __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_nor(vector bool int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_nor(__vector __bool int __a, __vector signed int __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_nor(vector signed int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_nor(__vector signed int __a, __vector __bool int __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_nor(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_nor(__vector unsigned int __a, __vector unsigned int __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_nor(vector bool int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_nor(__vector __bool int __a, __vector unsigned int __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_nor(vector unsigned int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_nor(__vector unsigned int __a, __vector __bool int __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_nor(vector bool long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector __bool long long
+vec_nor(__vector __bool long long __a, __vector __bool long long __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_nor(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_nor(__vector signed long long __a, __vector signed long long __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_nor(vector bool long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_nor(__vector __bool long long __a, __vector signed long long __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_nor(vector signed long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_nor(__vector signed long long __a, __vector __bool long long __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_nor(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_nor(__vector unsigned long long __a, __vector unsigned long long __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_nor(vector bool long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_nor(__vector __bool long long __a, __vector unsigned long long __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_nor(vector unsigned long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_nor(__vector unsigned long long __a, __vector __bool long long __b) {
return ~(__a | __b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_nor(vector float __a, vector float __b) {
- return (vector float)~((vector unsigned int)__a |
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector float
+vec_nor(__vector float __a, __vector float __b) {
+ return (__vector float)~((__vector unsigned int)__a |
+ (__vector unsigned int)__b);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_nor(vector double __a, vector double __b) {
- return (vector double)~((vector unsigned long long)__a |
- (vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_nor(__vector double __a, __vector double __b) {
+ return (__vector double)~((__vector unsigned long long)__a |
+ (__vector unsigned long long)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_nor(vector bool long long __a, vector double __b) {
- return (vector double)~((vector unsigned long long)__a |
- (vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_nor(__vector __bool long long __a, __vector double __b) {
+ return (__vector double)~((__vector unsigned long long)__a |
+ (__vector unsigned long long)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_nor(vector double __a, vector bool long long __b) {
- return (vector double)~((vector unsigned long long)__a |
- (vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_nor(__vector double __a, __vector __bool long long __b) {
+ return (__vector double)~((__vector unsigned long long)__a |
+ (__vector unsigned long long)__b);
}
/*-- vec_orc ----------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool char
-vec_orc(vector bool char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector __bool char
+vec_orc(__vector __bool char __a, __vector __bool char __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector signed char
-vec_orc(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_orc(__vector signed char __a, __vector signed char __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_orc(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_orc(__vector unsigned char __a, __vector unsigned char __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector bool short
-vec_orc(vector bool short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector __bool short
+vec_orc(__vector __bool short __a, __vector __bool short __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector signed short
-vec_orc(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_orc(__vector signed short __a, __vector signed short __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_orc(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_orc(__vector unsigned short __a, __vector unsigned short __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector bool int
-vec_orc(vector bool int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector __bool int
+vec_orc(__vector __bool int __a, __vector __bool int __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector signed int
-vec_orc(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_orc(__vector signed int __a, __vector signed int __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_orc(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_orc(__vector unsigned int __a, __vector unsigned int __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector bool long long
-vec_orc(vector bool long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector __bool long long
+vec_orc(__vector __bool long long __a, __vector __bool long long __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector signed long long
-vec_orc(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_orc(__vector signed long long __a, __vector signed long long __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_orc(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_orc(__vector unsigned long long __a, __vector unsigned long long __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector float
-vec_orc(vector float __a, vector float __b) {
- return (vector float)((vector unsigned int)__a |
- ~(vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector float
+vec_orc(__vector float __a, __vector float __b) {
+ return (__vector float)((__vector unsigned int)__a |
+ ~(__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector double
-vec_orc(vector double __a, vector double __b) {
- return (vector double)((vector unsigned long long)__a |
- ~(vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_orc(__vector double __a, __vector double __b) {
+ return (__vector double)((__vector unsigned long long)__a |
+ ~(__vector unsigned long long)__b);
}
#endif
/*-- vec_nand ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool char
-vec_nand(vector bool char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector __bool char
+vec_nand(__vector __bool char __a, __vector __bool char __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector signed char
-vec_nand(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_nand(__vector signed char __a, __vector signed char __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_nand(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_nand(__vector unsigned char __a, __vector unsigned char __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_nand(vector bool short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector __bool short
+vec_nand(__vector __bool short __a, __vector __bool short __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_nand(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_nand(__vector signed short __a, __vector signed short __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_nand(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_nand(__vector unsigned short __a, __vector unsigned short __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_nand(vector bool int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector __bool int
+vec_nand(__vector __bool int __a, __vector __bool int __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_nand(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_nand(__vector signed int __a, __vector signed int __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_nand(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_nand(__vector unsigned int __a, __vector unsigned int __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_nand(vector bool long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector __bool long long
+vec_nand(__vector __bool long long __a, __vector __bool long long __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_nand(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_nand(__vector signed long long __a, __vector signed long long __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_nand(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_nand(__vector unsigned long long __a, __vector unsigned long long __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector float
-vec_nand(vector float __a, vector float __b) {
- return (vector float)~((vector unsigned int)__a &
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector float
+vec_nand(__vector float __a, __vector float __b) {
+ return (__vector float)~((__vector unsigned int)__a &
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector double
-vec_nand(vector double __a, vector double __b) {
- return (vector double)~((vector unsigned long long)__a &
- (vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_nand(__vector double __a, __vector double __b) {
+ return (__vector double)~((__vector unsigned long long)__a &
+ (__vector unsigned long long)__b);
}
#endif
/*-- vec_eqv ----------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool char
-vec_eqv(vector bool char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector __bool char
+vec_eqv(__vector __bool char __a, __vector __bool char __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector signed char
-vec_eqv(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_eqv(__vector signed char __a, __vector signed char __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_eqv(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_eqv(__vector unsigned char __a, __vector unsigned char __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_eqv(vector bool short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector __bool short
+vec_eqv(__vector __bool short __a, __vector __bool short __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_eqv(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_eqv(__vector signed short __a, __vector signed short __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_eqv(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_eqv(__vector unsigned short __a, __vector unsigned short __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_eqv(vector bool int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector __bool int
+vec_eqv(__vector __bool int __a, __vector __bool int __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_eqv(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_eqv(__vector signed int __a, __vector signed int __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_eqv(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_eqv(__vector unsigned int __a, __vector unsigned int __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_eqv(vector bool long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector __bool long long
+vec_eqv(__vector __bool long long __a, __vector __bool long long __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_eqv(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_eqv(__vector signed long long __a, __vector signed long long __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_eqv(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_eqv(__vector unsigned long long __a, __vector unsigned long long __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector float
-vec_eqv(vector float __a, vector float __b) {
- return (vector float)~((vector unsigned int)__a ^
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector float
+vec_eqv(__vector float __a, __vector float __b) {
+ return (__vector float)~((__vector unsigned int)__a ^
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector double
-vec_eqv(vector double __a, vector double __b) {
- return (vector double)~((vector unsigned long long)__a ^
- (vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_eqv(__vector double __a, __vector double __b) {
+ return (__vector double)~((__vector unsigned long long)__a ^
+ (__vector unsigned long long)__b);
}
#endif
/*-- vec_cntlz --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cntlz(vector signed char __a) {
- return __builtin_s390_vclzb((vector unsigned char)__a);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cntlz(__vector signed char __a) {
+ return __builtin_s390_vclzb((__vector unsigned char)__a);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cntlz(vector unsigned char __a) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cntlz(__vector unsigned char __a) {
return __builtin_s390_vclzb(__a);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cntlz(vector signed short __a) {
- return __builtin_s390_vclzh((vector unsigned short)__a);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cntlz(__vector signed short __a) {
+ return __builtin_s390_vclzh((__vector unsigned short)__a);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cntlz(vector unsigned short __a) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cntlz(__vector unsigned short __a) {
return __builtin_s390_vclzh(__a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cntlz(vector signed int __a) {
- return __builtin_s390_vclzf((vector unsigned int)__a);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cntlz(__vector signed int __a) {
+ return __builtin_s390_vclzf((__vector unsigned int)__a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cntlz(vector unsigned int __a) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cntlz(__vector unsigned int __a) {
return __builtin_s390_vclzf(__a);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_cntlz(vector signed long long __a) {
- return __builtin_s390_vclzg((vector unsigned long long)__a);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_cntlz(__vector signed long long __a) {
+ return __builtin_s390_vclzg((__vector unsigned long long)__a);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_cntlz(vector unsigned long long __a) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_cntlz(__vector unsigned long long __a) {
return __builtin_s390_vclzg(__a);
}
/*-- vec_cnttz --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cnttz(vector signed char __a) {
- return __builtin_s390_vctzb((vector unsigned char)__a);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cnttz(__vector signed char __a) {
+ return __builtin_s390_vctzb((__vector unsigned char)__a);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cnttz(vector unsigned char __a) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cnttz(__vector unsigned char __a) {
return __builtin_s390_vctzb(__a);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cnttz(vector signed short __a) {
- return __builtin_s390_vctzh((vector unsigned short)__a);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cnttz(__vector signed short __a) {
+ return __builtin_s390_vctzh((__vector unsigned short)__a);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cnttz(vector unsigned short __a) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cnttz(__vector unsigned short __a) {
return __builtin_s390_vctzh(__a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cnttz(vector signed int __a) {
- return __builtin_s390_vctzf((vector unsigned int)__a);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cnttz(__vector signed int __a) {
+ return __builtin_s390_vctzf((__vector unsigned int)__a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cnttz(vector unsigned int __a) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cnttz(__vector unsigned int __a) {
return __builtin_s390_vctzf(__a);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_cnttz(vector signed long long __a) {
- return __builtin_s390_vctzg((vector unsigned long long)__a);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_cnttz(__vector signed long long __a) {
+ return __builtin_s390_vctzg((__vector unsigned long long)__a);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_cnttz(vector unsigned long long __a) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_cnttz(__vector unsigned long long __a) {
return __builtin_s390_vctzg(__a);
}
/*-- vec_popcnt -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_popcnt(vector signed char __a) {
- return __builtin_s390_vpopctb((vector unsigned char)__a);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_popcnt(__vector signed char __a) {
+ return __builtin_s390_vpopctb((__vector unsigned char)__a);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_popcnt(vector unsigned char __a) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_popcnt(__vector unsigned char __a) {
return __builtin_s390_vpopctb(__a);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_popcnt(vector signed short __a) {
- return __builtin_s390_vpopcth((vector unsigned short)__a);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_popcnt(__vector signed short __a) {
+ return __builtin_s390_vpopcth((__vector unsigned short)__a);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_popcnt(vector unsigned short __a) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_popcnt(__vector unsigned short __a) {
return __builtin_s390_vpopcth(__a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_popcnt(vector signed int __a) {
- return __builtin_s390_vpopctf((vector unsigned int)__a);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_popcnt(__vector signed int __a) {
+ return __builtin_s390_vpopctf((__vector unsigned int)__a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_popcnt(vector unsigned int __a) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_popcnt(__vector unsigned int __a) {
return __builtin_s390_vpopctf(__a);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_popcnt(vector signed long long __a) {
- return __builtin_s390_vpopctg((vector unsigned long long)__a);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_popcnt(__vector signed long long __a) {
+ return __builtin_s390_vpopctg((__vector unsigned long long)__a);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_popcnt(vector unsigned long long __a) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_popcnt(__vector unsigned long long __a) {
return __builtin_s390_vpopctg(__a);
}
/*-- vec_rl -----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_rl(vector signed char __a, vector unsigned char __b) {
- return (vector signed char)__builtin_s390_verllvb(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed char
+vec_rl(__vector signed char __a, __vector unsigned char __b) {
+ return (__vector signed char)__builtin_s390_verllvb(
+ (__vector unsigned char)__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_rl(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_rl(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_verllvb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_rl(vector signed short __a, vector unsigned short __b) {
- return (vector signed short)__builtin_s390_verllvh(
- (vector unsigned short)__a, __b);
+static inline __ATTRS_o_ai __vector signed short
+vec_rl(__vector signed short __a, __vector unsigned short __b) {
+ return (__vector signed short)__builtin_s390_verllvh(
+ (__vector unsigned short)__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_rl(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_rl(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_verllvh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_rl(vector signed int __a, vector unsigned int __b) {
- return (vector signed int)__builtin_s390_verllvf(
- (vector unsigned int)__a, __b);
+static inline __ATTRS_o_ai __vector signed int
+vec_rl(__vector signed int __a, __vector unsigned int __b) {
+ return (__vector signed int)__builtin_s390_verllvf(
+ (__vector unsigned int)__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_rl(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_rl(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_verllvf(__a, __b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_rl(vector signed long long __a, vector unsigned long long __b) {
- return (vector signed long long)__builtin_s390_verllvg(
- (vector unsigned long long)__a, __b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_rl(__vector signed long long __a, __vector unsigned long long __b) {
+ return (__vector signed long long)__builtin_s390_verllvg(
+ (__vector unsigned long long)__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_rl(__vector unsigned long long __a, __vector unsigned long long __b) {
return __builtin_s390_verllvg(__a, __b);
}
/*-- vec_rli ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_rli(vector signed char __a, unsigned long __b) {
- return (vector signed char)__builtin_s390_verllb(
- (vector unsigned char)__a, (int)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_rli(__vector signed char __a, unsigned long __b) {
+ return (__vector signed char)__builtin_s390_verllb(
+ (__vector unsigned char)__a, (int)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_rli(vector unsigned char __a, unsigned long __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_rli(__vector unsigned char __a, unsigned long __b) {
return __builtin_s390_verllb(__a, (int)__b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_rli(vector signed short __a, unsigned long __b) {
- return (vector signed short)__builtin_s390_verllh(
- (vector unsigned short)__a, (int)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_rli(__vector signed short __a, unsigned long __b) {
+ return (__vector signed short)__builtin_s390_verllh(
+ (__vector unsigned short)__a, (int)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_rli(vector unsigned short __a, unsigned long __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_rli(__vector unsigned short __a, unsigned long __b) {
return __builtin_s390_verllh(__a, (int)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_rli(vector signed int __a, unsigned long __b) {
- return (vector signed int)__builtin_s390_verllf(
- (vector unsigned int)__a, (int)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_rli(__vector signed int __a, unsigned long __b) {
+ return (__vector signed int)__builtin_s390_verllf(
+ (__vector unsigned int)__a, (int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_rli(vector unsigned int __a, unsigned long __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_rli(__vector unsigned int __a, unsigned long __b) {
return __builtin_s390_verllf(__a, (int)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_rli(vector signed long long __a, unsigned long __b) {
- return (vector signed long long)__builtin_s390_verllg(
- (vector unsigned long long)__a, (int)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_rli(__vector signed long long __a, unsigned long __b) {
+ return (__vector signed long long)__builtin_s390_verllg(
+ (__vector unsigned long long)__a, (int)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_rli(vector unsigned long long __a, unsigned long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_rli(__vector unsigned long long __a, unsigned long __b) {
return __builtin_s390_verllg(__a, (int)__b);
}
/*-- vec_rl_mask ------------------------------------------------------------*/
-extern __ATTRS_o vector signed char
-vec_rl_mask(vector signed char __a, vector unsigned char __b,
+extern __ATTRS_o __vector signed char
+vec_rl_mask(__vector signed char __a, __vector unsigned char __b,
unsigned char __c) __constant(__c);
-extern __ATTRS_o vector unsigned char
-vec_rl_mask(vector unsigned char __a, vector unsigned char __b,
+extern __ATTRS_o __vector unsigned char
+vec_rl_mask(__vector unsigned char __a, __vector unsigned char __b,
unsigned char __c) __constant(__c);
-extern __ATTRS_o vector signed short
-vec_rl_mask(vector signed short __a, vector unsigned short __b,
+extern __ATTRS_o __vector signed short
+vec_rl_mask(__vector signed short __a, __vector unsigned short __b,
unsigned char __c) __constant(__c);
-extern __ATTRS_o vector unsigned short
-vec_rl_mask(vector unsigned short __a, vector unsigned short __b,
+extern __ATTRS_o __vector unsigned short
+vec_rl_mask(__vector unsigned short __a, __vector unsigned short __b,
unsigned char __c) __constant(__c);
-extern __ATTRS_o vector signed int
-vec_rl_mask(vector signed int __a, vector unsigned int __b,
+extern __ATTRS_o __vector signed int
+vec_rl_mask(__vector signed int __a, __vector unsigned int __b,
unsigned char __c) __constant(__c);
-extern __ATTRS_o vector unsigned int
-vec_rl_mask(vector unsigned int __a, vector unsigned int __b,
+extern __ATTRS_o __vector unsigned int
+vec_rl_mask(__vector unsigned int __a, __vector unsigned int __b,
unsigned char __c) __constant(__c);
-extern __ATTRS_o vector signed long long
-vec_rl_mask(vector signed long long __a, vector unsigned long long __b,
+extern __ATTRS_o __vector signed long long
+vec_rl_mask(__vector signed long long __a, __vector unsigned long long __b,
unsigned char __c) __constant(__c);
-extern __ATTRS_o vector unsigned long long
-vec_rl_mask(vector unsigned long long __a, vector unsigned long long __b,
+extern __ATTRS_o __vector unsigned long long
+vec_rl_mask(__vector unsigned long long __a, __vector unsigned long long __b,
unsigned char __c) __constant(__c);
#define vec_rl_mask(X, Y, Z) ((__typeof__((vec_rl_mask)((X), (Y), (Z)))) \
__extension__ ({ \
- vector unsigned char __res; \
- vector unsigned char __x = (vector unsigned char)(X); \
- vector unsigned char __y = (vector unsigned char)(Y); \
+ __vector unsigned char __res; \
+ __vector unsigned char __x = (__vector unsigned char)(X); \
+ __vector unsigned char __y = (__vector unsigned char)(Y); \
switch (sizeof ((X)[0])) { \
- case 1: __res = (vector unsigned char) __builtin_s390_verimb( \
- (vector unsigned char)__x, (vector unsigned char)__x, \
- (vector unsigned char)__y, (Z)); break; \
- case 2: __res = (vector unsigned char) __builtin_s390_verimh( \
- (vector unsigned short)__x, (vector unsigned short)__x, \
- (vector unsigned short)__y, (Z)); break; \
- case 4: __res = (vector unsigned char) __builtin_s390_verimf( \
- (vector unsigned int)__x, (vector unsigned int)__x, \
- (vector unsigned int)__y, (Z)); break; \
- default: __res = (vector unsigned char) __builtin_s390_verimg( \
- (vector unsigned long long)__x, (vector unsigned long long)__x, \
- (vector unsigned long long)__y, (Z)); break; \
+ case 1: __res = (__vector unsigned char) __builtin_s390_verimb( \
+ (__vector unsigned char)__x, (__vector unsigned char)__x, \
+ (__vector unsigned char)__y, (Z)); break; \
+ case 2: __res = (__vector unsigned char) __builtin_s390_verimh( \
+ (__vector unsigned short)__x, (__vector unsigned short)__x, \
+ (__vector unsigned short)__y, (Z)); break; \
+ case 4: __res = (__vector unsigned char) __builtin_s390_verimf( \
+ (__vector unsigned int)__x, (__vector unsigned int)__x, \
+ (__vector unsigned int)__y, (Z)); break; \
+ default: __res = (__vector unsigned char) __builtin_s390_verimg( \
+ (__vector unsigned long long)__x, (__vector unsigned long long)__x, \
+ (__vector unsigned long long)__y, (Z)); break; \
} __res; }))
/*-- vec_sll ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_sll(vector signed char __a, vector unsigned char __b) {
- return (vector signed char)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed char
+vec_sll(__vector signed char __a, __vector unsigned char __b) {
+ return (__vector signed char)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_sll(vector signed char __a, vector unsigned short __b) {
- return (vector signed char)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_sll(__vector signed char __a, __vector unsigned short __b) {
+ return (__vector signed char)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_sll(vector signed char __a, vector unsigned int __b) {
- return (vector signed char)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_sll(__vector signed char __a, __vector unsigned int __b) {
+ return (__vector signed char)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sll(vector bool char __a, vector unsigned char __b) {
- return (vector bool char)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_sll(__vector __bool char __a, __vector unsigned char __b) {
+ return (__vector __bool char)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sll(vector bool char __a, vector unsigned short __b) {
- return (vector bool char)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_sll(__vector __bool char __a, __vector unsigned short __b) {
+ return (__vector __bool char)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sll(vector bool char __a, vector unsigned int __b) {
- return (vector bool char)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_sll(__vector __bool char __a, __vector unsigned int __b) {
+ return (__vector __bool char)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_sll(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sll(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vsl(__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_sll(vector unsigned char __a, vector unsigned short __b) {
- return __builtin_s390_vsl(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sll(__vector unsigned char __a, __vector unsigned short __b) {
+ return __builtin_s390_vsl(__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_sll(vector unsigned char __a, vector unsigned int __b) {
- return __builtin_s390_vsl(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sll(__vector unsigned char __a, __vector unsigned int __b) {
+ return __builtin_s390_vsl(__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_sll(vector signed short __a, vector unsigned char __b) {
- return (vector signed short)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed short
+vec_sll(__vector signed short __a, __vector unsigned char __b) {
+ return (__vector signed short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_sll(vector signed short __a, vector unsigned short __b) {
- return (vector signed short)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_sll(__vector signed short __a, __vector unsigned short __b) {
+ return (__vector signed short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_sll(vector signed short __a, vector unsigned int __b) {
- return (vector signed short)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_sll(__vector signed short __a, __vector unsigned int __b) {
+ return (__vector signed short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sll(vector bool short __a, vector unsigned char __b) {
- return (vector bool short)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_sll(__vector __bool short __a, __vector unsigned char __b) {
+ return (__vector __bool short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sll(vector bool short __a, vector unsigned short __b) {
- return (vector bool short)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_sll(__vector __bool short __a, __vector unsigned short __b) {
+ return (__vector __bool short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sll(vector bool short __a, vector unsigned int __b) {
- return (vector bool short)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_sll(__vector __bool short __a, __vector unsigned int __b) {
+ return (__vector __bool short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_sll(vector unsigned short __a, vector unsigned char __b) {
- return (vector unsigned short)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sll(__vector unsigned short __a, __vector unsigned char __b) {
+ return (__vector unsigned short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_sll(vector unsigned short __a, vector unsigned short __b) {
- return (vector unsigned short)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sll(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector unsigned short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_sll(vector unsigned short __a, vector unsigned int __b) {
- return (vector unsigned short)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sll(__vector unsigned short __a, __vector unsigned int __b) {
+ return (__vector unsigned short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_sll(vector signed int __a, vector unsigned char __b) {
- return (vector signed int)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed int
+vec_sll(__vector signed int __a, __vector unsigned char __b) {
+ return (__vector signed int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_sll(vector signed int __a, vector unsigned short __b) {
- return (vector signed int)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_sll(__vector signed int __a, __vector unsigned short __b) {
+ return (__vector signed int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_sll(vector signed int __a, vector unsigned int __b) {
- return (vector signed int)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_sll(__vector signed int __a, __vector unsigned int __b) {
+ return (__vector signed int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sll(vector bool int __a, vector unsigned char __b) {
- return (vector bool int)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_sll(__vector __bool int __a, __vector unsigned char __b) {
+ return (__vector __bool int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sll(vector bool int __a, vector unsigned short __b) {
- return (vector bool int)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_sll(__vector __bool int __a, __vector unsigned short __b) {
+ return (__vector __bool int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sll(vector bool int __a, vector unsigned int __b) {
- return (vector bool int)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_sll(__vector __bool int __a, __vector unsigned int __b) {
+ return (__vector __bool int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_sll(vector unsigned int __a, vector unsigned char __b) {
- return (vector unsigned int)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sll(__vector unsigned int __a, __vector unsigned char __b) {
+ return (__vector unsigned int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_sll(vector unsigned int __a, vector unsigned short __b) {
- return (vector unsigned int)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sll(__vector unsigned int __a, __vector unsigned short __b) {
+ return (__vector unsigned int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_sll(vector unsigned int __a, vector unsigned int __b) {
- return (vector unsigned int)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sll(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector unsigned int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_sll(vector signed long long __a, vector unsigned char __b) {
- return (vector signed long long)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_sll(__vector signed long long __a, __vector unsigned char __b) {
+ return (__vector signed long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_sll(vector signed long long __a, vector unsigned short __b) {
- return (vector signed long long)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_sll(__vector signed long long __a, __vector unsigned short __b) {
+ return (__vector signed long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_sll(vector signed long long __a, vector unsigned int __b) {
- return (vector signed long long)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_sll(__vector signed long long __a, __vector unsigned int __b) {
+ return (__vector signed long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sll(vector bool long long __a, vector unsigned char __b) {
- return (vector bool long long)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sll(__vector __bool long long __a, __vector unsigned char __b) {
+ return (__vector __bool long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sll(vector bool long long __a, vector unsigned short __b) {
- return (vector bool long long)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sll(__vector __bool long long __a, __vector unsigned short __b) {
+ return (__vector __bool long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sll(vector bool long long __a, vector unsigned int __b) {
- return (vector bool long long)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sll(__vector __bool long long __a, __vector unsigned int __b) {
+ return (__vector __bool long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sll(vector unsigned long long __a, vector unsigned char __b) {
- return (vector unsigned long long)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sll(__vector unsigned long long __a, __vector unsigned char __b) {
+ return (__vector unsigned long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sll(vector unsigned long long __a, vector unsigned short __b) {
- return (vector unsigned long long)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sll(__vector unsigned long long __a, __vector unsigned short __b) {
+ return (__vector unsigned long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sll(vector unsigned long long __a, vector unsigned int __b) {
- return (vector unsigned long long)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sll(__vector unsigned long long __a, __vector unsigned int __b) {
+ return (__vector unsigned long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
/*-- vec_slb ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_slb(vector signed char __a, vector signed char __b) {
- return (vector signed char)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_slb(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed char
-vec_slb(vector signed char __a, vector unsigned char __b) {
- return (vector signed char)__builtin_s390_vslb(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed char
+vec_slb(__vector signed char __a, __vector unsigned char __b) {
+ return (__vector signed char)__builtin_s390_vslb(
+ (__vector unsigned char)__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_slb(vector unsigned char __a, vector signed char __b) {
- return __builtin_s390_vslb(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_slb(__vector unsigned char __a, __vector signed char __b) {
+ return __builtin_s390_vslb(__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_slb(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_slb(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vslb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_slb(vector signed short __a, vector signed short __b) {
- return (vector signed short)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_slb(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_slb(vector signed short __a, vector unsigned short __b) {
- return (vector signed short)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_slb(__vector signed short __a, __vector unsigned short __b) {
+ return (__vector signed short)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_slb(vector unsigned short __a, vector signed short __b) {
- return (vector unsigned short)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_slb(__vector unsigned short __a, __vector signed short __b) {
+ return (__vector unsigned short)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_slb(vector unsigned short __a, vector unsigned short __b) {
- return (vector unsigned short)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_slb(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector unsigned short)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_slb(vector signed int __a, vector signed int __b) {
- return (vector signed int)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_slb(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_slb(vector signed int __a, vector unsigned int __b) {
- return (vector signed int)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_slb(__vector signed int __a, __vector unsigned int __b) {
+ return (__vector signed int)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_slb(vector unsigned int __a, vector signed int __b) {
- return (vector unsigned int)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_slb(__vector unsigned int __a, __vector signed int __b) {
+ return (__vector unsigned int)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_slb(vector unsigned int __a, vector unsigned int __b) {
- return (vector unsigned int)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_slb(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector unsigned int)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_slb(vector signed long long __a, vector signed long long __b) {
- return (vector signed long long)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_slb(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector signed long long)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_slb(vector signed long long __a, vector unsigned long long __b) {
- return (vector signed long long)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_slb(__vector signed long long __a, __vector unsigned long long __b) {
+ return (__vector signed long long)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_slb(vector unsigned long long __a, vector signed long long __b) {
- return (vector unsigned long long)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_slb(__vector unsigned long long __a, __vector signed long long __b) {
+ return (__vector unsigned long long)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_slb(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector unsigned long long)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_slb(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector unsigned long long)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_slb(vector float __a, vector signed int __b) {
- return (vector float)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector float
+vec_slb(__vector float __a, __vector signed int __b) {
+ return (__vector float)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector float
-vec_slb(vector float __a, vector unsigned int __b) {
- return (vector float)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector float
+vec_slb(__vector float __a, __vector unsigned int __b) {
+ return (__vector float)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_slb(vector double __a, vector signed long long __b) {
- return (vector double)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector double
+vec_slb(__vector double __a, __vector signed long long __b) {
+ return (__vector double)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector double
-vec_slb(vector double __a, vector unsigned long long __b) {
- return (vector double)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector double
+vec_slb(__vector double __a, __vector unsigned long long __b) {
+ return (__vector double)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
/*-- vec_sld ----------------------------------------------------------------*/
-extern __ATTRS_o vector signed char
-vec_sld(vector signed char __a, vector signed char __b, int __c)
+extern __ATTRS_o __vector signed char
+vec_sld(__vector signed char __a, __vector signed char __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector bool char
-vec_sld(vector bool char __a, vector bool char __b, int __c)
+extern __ATTRS_o __vector __bool char
+vec_sld(__vector __bool char __a, __vector __bool char __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector unsigned char
-vec_sld(vector unsigned char __a, vector unsigned char __b, int __c)
+extern __ATTRS_o __vector unsigned char
+vec_sld(__vector unsigned char __a, __vector unsigned char __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector signed short
-vec_sld(vector signed short __a, vector signed short __b, int __c)
+extern __ATTRS_o __vector signed short
+vec_sld(__vector signed short __a, __vector signed short __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector bool short
-vec_sld(vector bool short __a, vector bool short __b, int __c)
+extern __ATTRS_o __vector __bool short
+vec_sld(__vector __bool short __a, __vector __bool short __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector unsigned short
-vec_sld(vector unsigned short __a, vector unsigned short __b, int __c)
+extern __ATTRS_o __vector unsigned short
+vec_sld(__vector unsigned short __a, __vector unsigned short __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector signed int
-vec_sld(vector signed int __a, vector signed int __b, int __c)
+extern __ATTRS_o __vector signed int
+vec_sld(__vector signed int __a, __vector signed int __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector bool int
-vec_sld(vector bool int __a, vector bool int __b, int __c)
+extern __ATTRS_o __vector __bool int
+vec_sld(__vector __bool int __a, __vector __bool int __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector unsigned int
-vec_sld(vector unsigned int __a, vector unsigned int __b, int __c)
+extern __ATTRS_o __vector unsigned int
+vec_sld(__vector unsigned int __a, __vector unsigned int __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector signed long long
-vec_sld(vector signed long long __a, vector signed long long __b, int __c)
+extern __ATTRS_o __vector signed long long
+vec_sld(__vector signed long long __a, __vector signed long long __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector bool long long
-vec_sld(vector bool long long __a, vector bool long long __b, int __c)
+extern __ATTRS_o __vector __bool long long
+vec_sld(__vector __bool long long __a, __vector __bool long long __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector unsigned long long
-vec_sld(vector unsigned long long __a, vector unsigned long long __b, int __c)
+extern __ATTRS_o __vector unsigned long long
+vec_sld(__vector unsigned long long __a, __vector unsigned long long __b,
+ int __c)
__constant_range(__c, 0, 15);
#if __ARCH__ >= 12
-extern __ATTRS_o vector float
-vec_sld(vector float __a, vector float __b, int __c)
+extern __ATTRS_o __vector float
+vec_sld(__vector float __a, __vector float __b, int __c)
__constant_range(__c, 0, 15);
#endif
-extern __ATTRS_o vector double
-vec_sld(vector double __a, vector double __b, int __c)
+extern __ATTRS_o __vector double
+vec_sld(__vector double __a, __vector double __b, int __c)
__constant_range(__c, 0, 15);
#define vec_sld(X, Y, Z) ((__typeof__((vec_sld)((X), (Y), (Z)))) \
- __builtin_s390_vsldb((vector unsigned char)(X), \
- (vector unsigned char)(Y), (Z)))
+ __builtin_s390_vsldb((__vector unsigned char)(X), \
+ (__vector unsigned char)(Y), (Z)))
/*-- vec_sldw ---------------------------------------------------------------*/
-extern __ATTRS_o vector signed char
-vec_sldw(vector signed char __a, vector signed char __b, int __c)
+extern __ATTRS_o __vector signed char
+vec_sldw(__vector signed char __a, __vector signed char __b, int __c)
__constant_range(__c, 0, 3);
-extern __ATTRS_o vector unsigned char
-vec_sldw(vector unsigned char __a, vector unsigned char __b, int __c)
+extern __ATTRS_o __vector unsigned char
+vec_sldw(__vector unsigned char __a, __vector unsigned char __b, int __c)
__constant_range(__c, 0, 3);
-extern __ATTRS_o vector signed short
-vec_sldw(vector signed short __a, vector signed short __b, int __c)
+extern __ATTRS_o __vector signed short
+vec_sldw(__vector signed short __a, __vector signed short __b, int __c)
__constant_range(__c, 0, 3);
-extern __ATTRS_o vector unsigned short
-vec_sldw(vector unsigned short __a, vector unsigned short __b, int __c)
+extern __ATTRS_o __vector unsigned short
+vec_sldw(__vector unsigned short __a, __vector unsigned short __b, int __c)
__constant_range(__c, 0, 3);
-extern __ATTRS_o vector signed int
-vec_sldw(vector signed int __a, vector signed int __b, int __c)
+extern __ATTRS_o __vector signed int
+vec_sldw(__vector signed int __a, __vector signed int __b, int __c)
__constant_range(__c, 0, 3);
-extern __ATTRS_o vector unsigned int
-vec_sldw(vector unsigned int __a, vector unsigned int __b, int __c)
+extern __ATTRS_o __vector unsigned int
+vec_sldw(__vector unsigned int __a, __vector unsigned int __b, int __c)
__constant_range(__c, 0, 3);
-extern __ATTRS_o vector signed long long
-vec_sldw(vector signed long long __a, vector signed long long __b, int __c)
+extern __ATTRS_o __vector signed long long
+vec_sldw(__vector signed long long __a, __vector signed long long __b, int __c)
__constant_range(__c, 0, 3);
-extern __ATTRS_o vector unsigned long long
-vec_sldw(vector unsigned long long __a, vector unsigned long long __b, int __c)
+extern __ATTRS_o __vector unsigned long long
+vec_sldw(__vector unsigned long long __a, __vector unsigned long long __b,
+ int __c)
__constant_range(__c, 0, 3);
// This prototype is deprecated.
-extern __ATTRS_o vector double
-vec_sldw(vector double __a, vector double __b, int __c)
+extern __ATTRS_o __vector double
+vec_sldw(__vector double __a, __vector double __b, int __c)
__constant_range(__c, 0, 3);
#define vec_sldw(X, Y, Z) ((__typeof__((vec_sldw)((X), (Y), (Z)))) \
- __builtin_s390_vsldb((vector unsigned char)(X), \
- (vector unsigned char)(Y), (Z) * 4))
+ __builtin_s390_vsldb((__vector unsigned char)(X), \
+ (__vector unsigned char)(Y), (Z) * 4))
/*-- vec_sldb ---------------------------------------------------------------*/
#if __ARCH__ >= 13
-extern __ATTRS_o vector signed char
-vec_sldb(vector signed char __a, vector signed char __b, int __c)
+extern __ATTRS_o __vector signed char
+vec_sldb(__vector signed char __a, __vector signed char __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector unsigned char
-vec_sldb(vector unsigned char __a, vector unsigned char __b, int __c)
+extern __ATTRS_o __vector unsigned char
+vec_sldb(__vector unsigned char __a, __vector unsigned char __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector signed short
-vec_sldb(vector signed short __a, vector signed short __b, int __c)
+extern __ATTRS_o __vector signed short
+vec_sldb(__vector signed short __a, __vector signed short __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector unsigned short
-vec_sldb(vector unsigned short __a, vector unsigned short __b, int __c)
+extern __ATTRS_o __vector unsigned short
+vec_sldb(__vector unsigned short __a, __vector unsigned short __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector signed int
-vec_sldb(vector signed int __a, vector signed int __b, int __c)
+extern __ATTRS_o __vector signed int
+vec_sldb(__vector signed int __a, __vector signed int __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector unsigned int
-vec_sldb(vector unsigned int __a, vector unsigned int __b, int __c)
+extern __ATTRS_o __vector unsigned int
+vec_sldb(__vector unsigned int __a, __vector unsigned int __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector signed long long
-vec_sldb(vector signed long long __a, vector signed long long __b, int __c)
+extern __ATTRS_o __vector signed long long
+vec_sldb(__vector signed long long __a, __vector signed long long __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector unsigned long long
-vec_sldb(vector unsigned long long __a, vector unsigned long long __b, int __c)
+extern __ATTRS_o __vector unsigned long long
+vec_sldb(__vector unsigned long long __a, __vector unsigned long long __b,
+ int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector float
-vec_sldb(vector float __a, vector float __b, int __c)
+extern __ATTRS_o __vector float
+vec_sldb(__vector float __a, __vector float __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector double
-vec_sldb(vector double __a, vector double __b, int __c)
+extern __ATTRS_o __vector double
+vec_sldb(__vector double __a, __vector double __b, int __c)
__constant_range(__c, 0, 7);
#define vec_sldb(X, Y, Z) ((__typeof__((vec_sldb)((X), (Y), (Z)))) \
- __builtin_s390_vsld((vector unsigned char)(X), \
- (vector unsigned char)(Y), (Z)))
+ __builtin_s390_vsld((__vector unsigned char)(X), \
+ (__vector unsigned char)(Y), (Z)))
#endif
/*-- vec_sral ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_sral(vector signed char __a, vector unsigned char __b) {
- return (vector signed char)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed char
+vec_sral(__vector signed char __a, __vector unsigned char __b) {
+ return (__vector signed char)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_sral(vector signed char __a, vector unsigned short __b) {
- return (vector signed char)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_sral(__vector signed char __a, __vector unsigned short __b) {
+ return (__vector signed char)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_sral(vector signed char __a, vector unsigned int __b) {
- return (vector signed char)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_sral(__vector signed char __a, __vector unsigned int __b) {
+ return (__vector signed char)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sral(vector bool char __a, vector unsigned char __b) {
- return (vector bool char)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_sral(__vector __bool char __a, __vector unsigned char __b) {
+ return (__vector __bool char)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sral(vector bool char __a, vector unsigned short __b) {
- return (vector bool char)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_sral(__vector __bool char __a, __vector unsigned short __b) {
+ return (__vector __bool char)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sral(vector bool char __a, vector unsigned int __b) {
- return (vector bool char)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_sral(__vector __bool char __a, __vector unsigned int __b) {
+ return (__vector __bool char)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_sral(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sral(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vsra(__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_sral(vector unsigned char __a, vector unsigned short __b) {
- return __builtin_s390_vsra(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sral(__vector unsigned char __a, __vector unsigned short __b) {
+ return __builtin_s390_vsra(__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_sral(vector unsigned char __a, vector unsigned int __b) {
- return __builtin_s390_vsra(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sral(__vector unsigned char __a, __vector unsigned int __b) {
+ return __builtin_s390_vsra(__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_sral(vector signed short __a, vector unsigned char __b) {
- return (vector signed short)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed short
+vec_sral(__vector signed short __a, __vector unsigned char __b) {
+ return (__vector signed short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_sral(vector signed short __a, vector unsigned short __b) {
- return (vector signed short)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_sral(__vector signed short __a, __vector unsigned short __b) {
+ return (__vector signed short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_sral(vector signed short __a, vector unsigned int __b) {
- return (vector signed short)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_sral(__vector signed short __a, __vector unsigned int __b) {
+ return (__vector signed short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sral(vector bool short __a, vector unsigned char __b) {
- return (vector bool short)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_sral(__vector __bool short __a, __vector unsigned char __b) {
+ return (__vector __bool short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sral(vector bool short __a, vector unsigned short __b) {
- return (vector bool short)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_sral(__vector __bool short __a, __vector unsigned short __b) {
+ return (__vector __bool short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sral(vector bool short __a, vector unsigned int __b) {
- return (vector bool short)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_sral(__vector __bool short __a, __vector unsigned int __b) {
+ return (__vector __bool short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_sral(vector unsigned short __a, vector unsigned char __b) {
- return (vector unsigned short)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sral(__vector unsigned short __a, __vector unsigned char __b) {
+ return (__vector unsigned short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_sral(vector unsigned short __a, vector unsigned short __b) {
- return (vector unsigned short)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sral(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector unsigned short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_sral(vector unsigned short __a, vector unsigned int __b) {
- return (vector unsigned short)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sral(__vector unsigned short __a, __vector unsigned int __b) {
+ return (__vector unsigned short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_sral(vector signed int __a, vector unsigned char __b) {
- return (vector signed int)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed int
+vec_sral(__vector signed int __a, __vector unsigned char __b) {
+ return (__vector signed int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_sral(vector signed int __a, vector unsigned short __b) {
- return (vector signed int)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_sral(__vector signed int __a, __vector unsigned short __b) {
+ return (__vector signed int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_sral(vector signed int __a, vector unsigned int __b) {
- return (vector signed int)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_sral(__vector signed int __a, __vector unsigned int __b) {
+ return (__vector signed int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sral(vector bool int __a, vector unsigned char __b) {
- return (vector bool int)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_sral(__vector __bool int __a, __vector unsigned char __b) {
+ return (__vector __bool int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sral(vector bool int __a, vector unsigned short __b) {
- return (vector bool int)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_sral(__vector __bool int __a, __vector unsigned short __b) {
+ return (__vector __bool int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sral(vector bool int __a, vector unsigned int __b) {
- return (vector bool int)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_sral(__vector __bool int __a, __vector unsigned int __b) {
+ return (__vector __bool int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_sral(vector unsigned int __a, vector unsigned char __b) {
- return (vector unsigned int)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sral(__vector unsigned int __a, __vector unsigned char __b) {
+ return (__vector unsigned int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_sral(vector unsigned int __a, vector unsigned short __b) {
- return (vector unsigned int)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sral(__vector unsigned int __a, __vector unsigned short __b) {
+ return (__vector unsigned int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_sral(vector unsigned int __a, vector unsigned int __b) {
- return (vector unsigned int)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sral(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector unsigned int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_sral(vector signed long long __a, vector unsigned char __b) {
- return (vector signed long long)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_sral(__vector signed long long __a, __vector unsigned char __b) {
+ return (__vector signed long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_sral(vector signed long long __a, vector unsigned short __b) {
- return (vector signed long long)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_sral(__vector signed long long __a, __vector unsigned short __b) {
+ return (__vector signed long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_sral(vector signed long long __a, vector unsigned int __b) {
- return (vector signed long long)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_sral(__vector signed long long __a, __vector unsigned int __b) {
+ return (__vector signed long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sral(vector bool long long __a, vector unsigned char __b) {
- return (vector bool long long)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sral(__vector __bool long long __a, __vector unsigned char __b) {
+ return (__vector __bool long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sral(vector bool long long __a, vector unsigned short __b) {
- return (vector bool long long)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sral(__vector __bool long long __a, __vector unsigned short __b) {
+ return (__vector __bool long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sral(vector bool long long __a, vector unsigned int __b) {
- return (vector bool long long)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sral(__vector __bool long long __a, __vector unsigned int __b) {
+ return (__vector __bool long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sral(vector unsigned long long __a, vector unsigned char __b) {
- return (vector unsigned long long)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sral(__vector unsigned long long __a, __vector unsigned char __b) {
+ return (__vector unsigned long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sral(vector unsigned long long __a, vector unsigned short __b) {
- return (vector unsigned long long)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sral(__vector unsigned long long __a, __vector unsigned short __b) {
+ return (__vector unsigned long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sral(vector unsigned long long __a, vector unsigned int __b) {
- return (vector unsigned long long)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sral(__vector unsigned long long __a, __vector unsigned int __b) {
+ return (__vector unsigned long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
/*-- vec_srab ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_srab(vector signed char __a, vector signed char __b) {
- return (vector signed char)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_srab(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed char
-vec_srab(vector signed char __a, vector unsigned char __b) {
- return (vector signed char)__builtin_s390_vsrab(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed char
+vec_srab(__vector signed char __a, __vector unsigned char __b) {
+ return (__vector signed char)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_srab(vector unsigned char __a, vector signed char __b) {
- return __builtin_s390_vsrab(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srab(__vector unsigned char __a, __vector signed char __b) {
+ return __builtin_s390_vsrab(__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_srab(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srab(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vsrab(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_srab(vector signed short __a, vector signed short __b) {
- return (vector signed short)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_srab(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_srab(vector signed short __a, vector unsigned short __b) {
- return (vector signed short)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_srab(__vector signed short __a, __vector unsigned short __b) {
+ return (__vector signed short)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_srab(vector unsigned short __a, vector signed short __b) {
- return (vector unsigned short)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srab(__vector unsigned short __a, __vector signed short __b) {
+ return (__vector unsigned short)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_srab(vector unsigned short __a, vector unsigned short __b) {
- return (vector unsigned short)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srab(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector unsigned short)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_srab(vector signed int __a, vector signed int __b) {
- return (vector signed int)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_srab(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_srab(vector signed int __a, vector unsigned int __b) {
- return (vector signed int)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_srab(__vector signed int __a, __vector unsigned int __b) {
+ return (__vector signed int)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_srab(vector unsigned int __a, vector signed int __b) {
- return (vector unsigned int)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srab(__vector unsigned int __a, __vector signed int __b) {
+ return (__vector unsigned int)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_srab(vector unsigned int __a, vector unsigned int __b) {
- return (vector unsigned int)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srab(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector unsigned int)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_srab(vector signed long long __a, vector signed long long __b) {
- return (vector signed long long)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_srab(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector signed long long)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_srab(vector signed long long __a, vector unsigned long long __b) {
- return (vector signed long long)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_srab(__vector signed long long __a, __vector unsigned long long __b) {
+ return (__vector signed long long)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srab(vector unsigned long long __a, vector signed long long __b) {
- return (vector unsigned long long)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srab(__vector unsigned long long __a, __vector signed long long __b) {
+ return (__vector unsigned long long)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srab(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector unsigned long long)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srab(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector unsigned long long)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_srab(vector float __a, vector signed int __b) {
- return (vector float)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector float
+vec_srab(__vector float __a, __vector signed int __b) {
+ return (__vector float)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector float
-vec_srab(vector float __a, vector unsigned int __b) {
- return (vector float)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector float
+vec_srab(__vector float __a, __vector unsigned int __b) {
+ return (__vector float)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_srab(vector double __a, vector signed long long __b) {
- return (vector double)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector double
+vec_srab(__vector double __a, __vector signed long long __b) {
+ return (__vector double)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector double
-vec_srab(vector double __a, vector unsigned long long __b) {
- return (vector double)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector double
+vec_srab(__vector double __a, __vector unsigned long long __b) {
+ return (__vector double)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
/*-- vec_srl ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_srl(vector signed char __a, vector unsigned char __b) {
- return (vector signed char)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed char
+vec_srl(__vector signed char __a, __vector unsigned char __b) {
+ return (__vector signed char)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_srl(vector signed char __a, vector unsigned short __b) {
- return (vector signed char)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_srl(__vector signed char __a, __vector unsigned short __b) {
+ return (__vector signed char)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_srl(vector signed char __a, vector unsigned int __b) {
- return (vector signed char)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_srl(__vector signed char __a, __vector unsigned int __b) {
+ return (__vector signed char)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_srl(vector bool char __a, vector unsigned char __b) {
- return (vector bool char)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_srl(__vector __bool char __a, __vector unsigned char __b) {
+ return (__vector __bool char)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_srl(vector bool char __a, vector unsigned short __b) {
- return (vector bool char)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_srl(__vector __bool char __a, __vector unsigned short __b) {
+ return (__vector __bool char)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_srl(vector bool char __a, vector unsigned int __b) {
- return (vector bool char)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_srl(__vector __bool char __a, __vector unsigned int __b) {
+ return (__vector __bool char)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_srl(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srl(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vsrl(__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_srl(vector unsigned char __a, vector unsigned short __b) {
- return __builtin_s390_vsrl(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srl(__vector unsigned char __a, __vector unsigned short __b) {
+ return __builtin_s390_vsrl(__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_srl(vector unsigned char __a, vector unsigned int __b) {
- return __builtin_s390_vsrl(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srl(__vector unsigned char __a, __vector unsigned int __b) {
+ return __builtin_s390_vsrl(__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_srl(vector signed short __a, vector unsigned char __b) {
- return (vector signed short)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed short
+vec_srl(__vector signed short __a, __vector unsigned char __b) {
+ return (__vector signed short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_srl(vector signed short __a, vector unsigned short __b) {
- return (vector signed short)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_srl(__vector signed short __a, __vector unsigned short __b) {
+ return (__vector signed short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_srl(vector signed short __a, vector unsigned int __b) {
- return (vector signed short)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_srl(__vector signed short __a, __vector unsigned int __b) {
+ return (__vector signed short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_srl(vector bool short __a, vector unsigned char __b) {
- return (vector bool short)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_srl(__vector __bool short __a, __vector unsigned char __b) {
+ return (__vector __bool short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_srl(vector bool short __a, vector unsigned short __b) {
- return (vector bool short)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_srl(__vector __bool short __a, __vector unsigned short __b) {
+ return (__vector __bool short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_srl(vector bool short __a, vector unsigned int __b) {
- return (vector bool short)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_srl(__vector __bool short __a, __vector unsigned int __b) {
+ return (__vector __bool short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_srl(vector unsigned short __a, vector unsigned char __b) {
- return (vector unsigned short)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srl(__vector unsigned short __a, __vector unsigned char __b) {
+ return (__vector unsigned short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_srl(vector unsigned short __a, vector unsigned short __b) {
- return (vector unsigned short)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srl(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector unsigned short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_srl(vector unsigned short __a, vector unsigned int __b) {
- return (vector unsigned short)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srl(__vector unsigned short __a, __vector unsigned int __b) {
+ return (__vector unsigned short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_srl(vector signed int __a, vector unsigned char __b) {
- return (vector signed int)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed int
+vec_srl(__vector signed int __a, __vector unsigned char __b) {
+ return (__vector signed int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_srl(vector signed int __a, vector unsigned short __b) {
- return (vector signed int)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_srl(__vector signed int __a, __vector unsigned short __b) {
+ return (__vector signed int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_srl(vector signed int __a, vector unsigned int __b) {
- return (vector signed int)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_srl(__vector signed int __a, __vector unsigned int __b) {
+ return (__vector signed int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_srl(vector bool int __a, vector unsigned char __b) {
- return (vector bool int)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_srl(__vector __bool int __a, __vector unsigned char __b) {
+ return (__vector __bool int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_srl(vector bool int __a, vector unsigned short __b) {
- return (vector bool int)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_srl(__vector __bool int __a, __vector unsigned short __b) {
+ return (__vector __bool int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_srl(vector bool int __a, vector unsigned int __b) {
- return (vector bool int)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_srl(__vector __bool int __a, __vector unsigned int __b) {
+ return (__vector __bool int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_srl(vector unsigned int __a, vector unsigned char __b) {
- return (vector unsigned int)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srl(__vector unsigned int __a, __vector unsigned char __b) {
+ return (__vector unsigned int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_srl(vector unsigned int __a, vector unsigned short __b) {
- return (vector unsigned int)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srl(__vector unsigned int __a, __vector unsigned short __b) {
+ return (__vector unsigned int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_srl(vector unsigned int __a, vector unsigned int __b) {
- return (vector unsigned int)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srl(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector unsigned int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_srl(vector signed long long __a, vector unsigned char __b) {
- return (vector signed long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_srl(__vector signed long long __a, __vector unsigned char __b) {
+ return (__vector signed long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_srl(vector signed long long __a, vector unsigned short __b) {
- return (vector signed long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_srl(__vector signed long long __a, __vector unsigned short __b) {
+ return (__vector signed long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_srl(vector signed long long __a, vector unsigned int __b) {
- return (vector signed long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_srl(__vector signed long long __a, __vector unsigned int __b) {
+ return (__vector signed long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_srl(vector bool long long __a, vector unsigned char __b) {
- return (vector bool long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_srl(__vector __bool long long __a, __vector unsigned char __b) {
+ return (__vector __bool long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_srl(vector bool long long __a, vector unsigned short __b) {
- return (vector bool long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_srl(__vector __bool long long __a, __vector unsigned short __b) {
+ return (__vector __bool long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_srl(vector bool long long __a, vector unsigned int __b) {
- return (vector bool long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_srl(__vector __bool long long __a, __vector unsigned int __b) {
+ return (__vector __bool long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srl(vector unsigned long long __a, vector unsigned char __b) {
- return (vector unsigned long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srl(__vector unsigned long long __a, __vector unsigned char __b) {
+ return (__vector unsigned long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srl(vector unsigned long long __a, vector unsigned short __b) {
- return (vector unsigned long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srl(__vector unsigned long long __a, __vector unsigned short __b) {
+ return (__vector unsigned long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srl(vector unsigned long long __a, vector unsigned int __b) {
- return (vector unsigned long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srl(__vector unsigned long long __a, __vector unsigned int __b) {
+ return (__vector unsigned long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
/*-- vec_srb ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_srb(vector signed char __a, vector signed char __b) {
- return (vector signed char)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_srb(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed char
-vec_srb(vector signed char __a, vector unsigned char __b) {
- return (vector signed char)__builtin_s390_vsrlb(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed char
+vec_srb(__vector signed char __a, __vector unsigned char __b) {
+ return (__vector signed char)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_srb(vector unsigned char __a, vector signed char __b) {
- return __builtin_s390_vsrlb(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srb(__vector unsigned char __a, __vector signed char __b) {
+ return __builtin_s390_vsrlb(__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_srb(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srb(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vsrlb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_srb(vector signed short __a, vector signed short __b) {
- return (vector signed short)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_srb(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_srb(vector signed short __a, vector unsigned short __b) {
- return (vector signed short)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_srb(__vector signed short __a, __vector unsigned short __b) {
+ return (__vector signed short)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_srb(vector unsigned short __a, vector signed short __b) {
- return (vector unsigned short)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srb(__vector unsigned short __a, __vector signed short __b) {
+ return (__vector unsigned short)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_srb(vector unsigned short __a, vector unsigned short __b) {
- return (vector unsigned short)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srb(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector unsigned short)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_srb(vector signed int __a, vector signed int __b) {
- return (vector signed int)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_srb(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_srb(vector signed int __a, vector unsigned int __b) {
- return (vector signed int)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_srb(__vector signed int __a, __vector unsigned int __b) {
+ return (__vector signed int)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_srb(vector unsigned int __a, vector signed int __b) {
- return (vector unsigned int)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srb(__vector unsigned int __a, __vector signed int __b) {
+ return (__vector unsigned int)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_srb(vector unsigned int __a, vector unsigned int __b) {
- return (vector unsigned int)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srb(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector unsigned int)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_srb(vector signed long long __a, vector signed long long __b) {
- return (vector signed long long)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_srb(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector signed long long)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_srb(vector signed long long __a, vector unsigned long long __b) {
- return (vector signed long long)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_srb(__vector signed long long __a, __vector unsigned long long __b) {
+ return (__vector signed long long)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srb(vector unsigned long long __a, vector signed long long __b) {
- return (vector unsigned long long)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srb(__vector unsigned long long __a, __vector signed long long __b) {
+ return (__vector unsigned long long)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srb(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector unsigned long long)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srb(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector unsigned long long)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_srb(vector float __a, vector signed int __b) {
- return (vector float)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector float
+vec_srb(__vector float __a, __vector signed int __b) {
+ return (__vector float)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector float
-vec_srb(vector float __a, vector unsigned int __b) {
- return (vector float)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector float
+vec_srb(__vector float __a, __vector unsigned int __b) {
+ return (__vector float)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_srb(vector double __a, vector signed long long __b) {
- return (vector double)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector double
+vec_srb(__vector double __a, __vector signed long long __b) {
+ return (__vector double)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector double
-vec_srb(vector double __a, vector unsigned long long __b) {
- return (vector double)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector double
+vec_srb(__vector double __a, __vector unsigned long long __b) {
+ return (__vector double)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
/*-- vec_srdb ---------------------------------------------------------------*/
#if __ARCH__ >= 13
-extern __ATTRS_o vector signed char
-vec_srdb(vector signed char __a, vector signed char __b, int __c)
+extern __ATTRS_o __vector signed char
+vec_srdb(__vector signed char __a, __vector signed char __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector unsigned char
-vec_srdb(vector unsigned char __a, vector unsigned char __b, int __c)
+extern __ATTRS_o __vector unsigned char
+vec_srdb(__vector unsigned char __a, __vector unsigned char __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector signed short
-vec_srdb(vector signed short __a, vector signed short __b, int __c)
+extern __ATTRS_o __vector signed short
+vec_srdb(__vector signed short __a, __vector signed short __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector unsigned short
-vec_srdb(vector unsigned short __a, vector unsigned short __b, int __c)
+extern __ATTRS_o __vector unsigned short
+vec_srdb(__vector unsigned short __a, __vector unsigned short __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector signed int
-vec_srdb(vector signed int __a, vector signed int __b, int __c)
+extern __ATTRS_o __vector signed int
+vec_srdb(__vector signed int __a, __vector signed int __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector unsigned int
-vec_srdb(vector unsigned int __a, vector unsigned int __b, int __c)
+extern __ATTRS_o __vector unsigned int
+vec_srdb(__vector unsigned int __a, __vector unsigned int __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector signed long long
-vec_srdb(vector signed long long __a, vector signed long long __b, int __c)
+extern __ATTRS_o __vector signed long long
+vec_srdb(__vector signed long long __a, __vector signed long long __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector unsigned long long
-vec_srdb(vector unsigned long long __a, vector unsigned long long __b, int __c)
+extern __ATTRS_o __vector unsigned long long
+vec_srdb(__vector unsigned long long __a, __vector unsigned long long __b,
+ int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector float
-vec_srdb(vector float __a, vector float __b, int __c)
+extern __ATTRS_o __vector float
+vec_srdb(__vector float __a, __vector float __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector double
-vec_srdb(vector double __a, vector double __b, int __c)
+extern __ATTRS_o __vector double
+vec_srdb(__vector double __a, __vector double __b, int __c)
__constant_range(__c, 0, 7);
#define vec_srdb(X, Y, Z) ((__typeof__((vec_srdb)((X), (Y), (Z)))) \
- __builtin_s390_vsrd((vector unsigned char)(X), \
- (vector unsigned char)(Y), (Z)))
+ __builtin_s390_vsrd((__vector unsigned char)(X), \
+ (__vector unsigned char)(Y), (Z)))
#endif
/*-- vec_abs ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_abs(vector signed char __a) {
- return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed char)0));
+static inline __ATTRS_o_ai __vector signed char
+vec_abs(__vector signed char __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed char)0));
}
-static inline __ATTRS_o_ai vector signed short
-vec_abs(vector signed short __a) {
- return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed short)0));
+static inline __ATTRS_o_ai __vector signed short
+vec_abs(__vector signed short __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed short)0));
}
-static inline __ATTRS_o_ai vector signed int
-vec_abs(vector signed int __a) {
- return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed int)0));
+static inline __ATTRS_o_ai __vector signed int
+vec_abs(__vector signed int __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed int)0));
}
-static inline __ATTRS_o_ai vector signed long long
-vec_abs(vector signed long long __a) {
- return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed long long)0));
+static inline __ATTRS_o_ai __vector signed long long
+vec_abs(__vector signed long long __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed long long)0));
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_abs(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_abs(__vector float __a) {
return __builtin_s390_vflpsb(__a);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_abs(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_abs(__vector double __a) {
return __builtin_s390_vflpdb(__a);
}
/*-- vec_nabs ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_nabs(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_nabs(__vector float __a) {
return __builtin_s390_vflnsb(__a);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_nabs(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_nabs(__vector double __a) {
return __builtin_s390_vflndb(__a);
}
/*-- vec_max ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_max(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_max(__vector signed char __a, __vector signed char __b) {
return vec_sel(__b, __a, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_max(vector signed char __a, vector bool char __b) {
- vector signed char __bc = (vector signed char)__b;
+static inline __ATTRS_o_ai __vector signed char
+vec_max(__vector signed char __a, __vector __bool char __b) {
+ __vector signed char __bc = (__vector signed char)__b;
return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_max(vector bool char __a, vector signed char __b) {
- vector signed char __ac = (vector signed char)__a;
+static inline __ATTRS_o_ai __vector signed char
+vec_max(__vector __bool char __a, __vector signed char __b) {
+ __vector signed char __ac = (__vector signed char)__a;
return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_max(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_max(__vector unsigned char __a, __vector unsigned char __b) {
return vec_sel(__b, __a, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_max(vector unsigned char __a, vector bool char __b) {
- vector unsigned char __bc = (vector unsigned char)__b;
+static inline __ATTRS_o_ai __vector unsigned char
+vec_max(__vector unsigned char __a, __vector __bool char __b) {
+ __vector unsigned char __bc = (__vector unsigned char)__b;
return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_max(vector bool char __a, vector unsigned char __b) {
- vector unsigned char __ac = (vector unsigned char)__a;
+static inline __ATTRS_o_ai __vector unsigned char
+vec_max(__vector __bool char __a, __vector unsigned char __b) {
+ __vector unsigned char __ac = (__vector unsigned char)__a;
return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector signed short
-vec_max(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_max(__vector signed short __a, __vector signed short __b) {
return vec_sel(__b, __a, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_max(vector signed short __a, vector bool short __b) {
- vector signed short __bc = (vector signed short)__b;
+static inline __ATTRS_o_ai __vector signed short
+vec_max(__vector signed short __a, __vector __bool short __b) {
+ __vector signed short __bc = (__vector signed short)__b;
return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_max(vector bool short __a, vector signed short __b) {
- vector signed short __ac = (vector signed short)__a;
+static inline __ATTRS_o_ai __vector signed short
+vec_max(__vector __bool short __a, __vector signed short __b) {
+ __vector signed short __ac = (__vector signed short)__a;
return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_max(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_max(__vector unsigned short __a, __vector unsigned short __b) {
return vec_sel(__b, __a, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_max(vector unsigned short __a, vector bool short __b) {
- vector unsigned short __bc = (vector unsigned short)__b;
+static inline __ATTRS_o_ai __vector unsigned short
+vec_max(__vector unsigned short __a, __vector __bool short __b) {
+ __vector unsigned short __bc = (__vector unsigned short)__b;
return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_max(vector bool short __a, vector unsigned short __b) {
- vector unsigned short __ac = (vector unsigned short)__a;
+static inline __ATTRS_o_ai __vector unsigned short
+vec_max(__vector __bool short __a, __vector unsigned short __b) {
+ __vector unsigned short __ac = (__vector unsigned short)__a;
return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector signed int
-vec_max(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_max(__vector signed int __a, __vector signed int __b) {
return vec_sel(__b, __a, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_max(vector signed int __a, vector bool int __b) {
- vector signed int __bc = (vector signed int)__b;
+static inline __ATTRS_o_ai __vector signed int
+vec_max(__vector signed int __a, __vector __bool int __b) {
+ __vector signed int __bc = (__vector signed int)__b;
return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_max(vector bool int __a, vector signed int __b) {
- vector signed int __ac = (vector signed int)__a;
+static inline __ATTRS_o_ai __vector signed int
+vec_max(__vector __bool int __a, __vector signed int __b) {
+ __vector signed int __ac = (__vector signed int)__a;
return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_max(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_max(__vector unsigned int __a, __vector unsigned int __b) {
return vec_sel(__b, __a, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_max(vector unsigned int __a, vector bool int __b) {
- vector unsigned int __bc = (vector unsigned int)__b;
+static inline __ATTRS_o_ai __vector unsigned int
+vec_max(__vector unsigned int __a, __vector __bool int __b) {
+ __vector unsigned int __bc = (__vector unsigned int)__b;
return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_max(vector bool int __a, vector unsigned int __b) {
- vector unsigned int __ac = (vector unsigned int)__a;
+static inline __ATTRS_o_ai __vector unsigned int
+vec_max(__vector __bool int __a, __vector unsigned int __b) {
+ __vector unsigned int __ac = (__vector unsigned int)__a;
return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector signed long long
-vec_max(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_max(__vector signed long long __a, __vector signed long long __b) {
return vec_sel(__b, __a, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_max(vector signed long long __a, vector bool long long __b) {
- vector signed long long __bc = (vector signed long long)__b;
+static inline __ATTRS_o_ai __vector signed long long
+vec_max(__vector signed long long __a, __vector __bool long long __b) {
+ __vector signed long long __bc = (__vector signed long long)__b;
return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_max(vector bool long long __a, vector signed long long __b) {
- vector signed long long __ac = (vector signed long long)__a;
+static inline __ATTRS_o_ai __vector signed long long
+vec_max(__vector __bool long long __a, __vector signed long long __b) {
+ __vector signed long long __ac = (__vector signed long long)__a;
return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_max(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_max(__vector unsigned long long __a, __vector unsigned long long __b) {
return vec_sel(__b, __a, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_max(vector unsigned long long __a, vector bool long long __b) {
- vector unsigned long long __bc = (vector unsigned long long)__b;
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_max(__vector unsigned long long __a, __vector __bool long long __b) {
+ __vector unsigned long long __bc = (__vector unsigned long long)__b;
return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_max(vector bool long long __a, vector unsigned long long __b) {
- vector unsigned long long __ac = (vector unsigned long long)__a;
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_max(__vector __bool long long __a, __vector unsigned long long __b) {
+ __vector unsigned long long __ac = (__vector unsigned long long)__a;
return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_max(vector float __a, vector float __b) {
+static inline __ATTRS_o_ai __vector float
+vec_max(__vector float __a, __vector float __b) {
return __builtin_s390_vfmaxsb(__a, __b, 0);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_max(vector double __a, vector double __b) {
+static inline __ATTRS_o_ai __vector double
+vec_max(__vector double __a, __vector double __b) {
#if __ARCH__ >= 12
return __builtin_s390_vfmaxdb(__a, __b, 0);
#else
@@ -8020,167 +8092,167 @@ vec_max(vector double __a, vector double __b) {
/*-- vec_min ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_min(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_min(__vector signed char __a, __vector signed char __b) {
return vec_sel(__a, __b, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_min(vector signed char __a, vector bool char __b) {
- vector signed char __bc = (vector signed char)__b;
+static inline __ATTRS_o_ai __vector signed char
+vec_min(__vector signed char __a, __vector __bool char __b) {
+ __vector signed char __bc = (__vector signed char)__b;
return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_min(vector bool char __a, vector signed char __b) {
- vector signed char __ac = (vector signed char)__a;
+static inline __ATTRS_o_ai __vector signed char
+vec_min(__vector __bool char __a, __vector signed char __b) {
+ __vector signed char __ac = (__vector signed char)__a;
return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_min(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_min(__vector unsigned char __a, __vector unsigned char __b) {
return vec_sel(__a, __b, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_min(vector unsigned char __a, vector bool char __b) {
- vector unsigned char __bc = (vector unsigned char)__b;
+static inline __ATTRS_o_ai __vector unsigned char
+vec_min(__vector unsigned char __a, __vector __bool char __b) {
+ __vector unsigned char __bc = (__vector unsigned char)__b;
return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_min(vector bool char __a, vector unsigned char __b) {
- vector unsigned char __ac = (vector unsigned char)__a;
+static inline __ATTRS_o_ai __vector unsigned char
+vec_min(__vector __bool char __a, __vector unsigned char __b) {
+ __vector unsigned char __ac = (__vector unsigned char)__a;
return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector signed short
-vec_min(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_min(__vector signed short __a, __vector signed short __b) {
return vec_sel(__a, __b, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_min(vector signed short __a, vector bool short __b) {
- vector signed short __bc = (vector signed short)__b;
+static inline __ATTRS_o_ai __vector signed short
+vec_min(__vector signed short __a, __vector __bool short __b) {
+ __vector signed short __bc = (__vector signed short)__b;
return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_min(vector bool short __a, vector signed short __b) {
- vector signed short __ac = (vector signed short)__a;
+static inline __ATTRS_o_ai __vector signed short
+vec_min(__vector __bool short __a, __vector signed short __b) {
+ __vector signed short __ac = (__vector signed short)__a;
return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_min(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_min(__vector unsigned short __a, __vector unsigned short __b) {
return vec_sel(__a, __b, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_min(vector unsigned short __a, vector bool short __b) {
- vector unsigned short __bc = (vector unsigned short)__b;
+static inline __ATTRS_o_ai __vector unsigned short
+vec_min(__vector unsigned short __a, __vector __bool short __b) {
+ __vector unsigned short __bc = (__vector unsigned short)__b;
return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_min(vector bool short __a, vector unsigned short __b) {
- vector unsigned short __ac = (vector unsigned short)__a;
+static inline __ATTRS_o_ai __vector unsigned short
+vec_min(__vector __bool short __a, __vector unsigned short __b) {
+ __vector unsigned short __ac = (__vector unsigned short)__a;
return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector signed int
-vec_min(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_min(__vector signed int __a, __vector signed int __b) {
return vec_sel(__a, __b, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_min(vector signed int __a, vector bool int __b) {
- vector signed int __bc = (vector signed int)__b;
+static inline __ATTRS_o_ai __vector signed int
+vec_min(__vector signed int __a, __vector __bool int __b) {
+ __vector signed int __bc = (__vector signed int)__b;
return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_min(vector bool int __a, vector signed int __b) {
- vector signed int __ac = (vector signed int)__a;
+static inline __ATTRS_o_ai __vector signed int
+vec_min(__vector __bool int __a, __vector signed int __b) {
+ __vector signed int __ac = (__vector signed int)__a;
return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_min(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_min(__vector unsigned int __a, __vector unsigned int __b) {
return vec_sel(__a, __b, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_min(vector unsigned int __a, vector bool int __b) {
- vector unsigned int __bc = (vector unsigned int)__b;
+static inline __ATTRS_o_ai __vector unsigned int
+vec_min(__vector unsigned int __a, __vector __bool int __b) {
+ __vector unsigned int __bc = (__vector unsigned int)__b;
return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_min(vector bool int __a, vector unsigned int __b) {
- vector unsigned int __ac = (vector unsigned int)__a;
+static inline __ATTRS_o_ai __vector unsigned int
+vec_min(__vector __bool int __a, __vector unsigned int __b) {
+ __vector unsigned int __ac = (__vector unsigned int)__a;
return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector signed long long
-vec_min(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_min(__vector signed long long __a, __vector signed long long __b) {
return vec_sel(__a, __b, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_min(vector signed long long __a, vector bool long long __b) {
- vector signed long long __bc = (vector signed long long)__b;
+static inline __ATTRS_o_ai __vector signed long long
+vec_min(__vector signed long long __a, __vector __bool long long __b) {
+ __vector signed long long __bc = (__vector signed long long)__b;
return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_min(vector bool long long __a, vector signed long long __b) {
- vector signed long long __ac = (vector signed long long)__a;
+static inline __ATTRS_o_ai __vector signed long long
+vec_min(__vector __bool long long __a, __vector signed long long __b) {
+ __vector signed long long __ac = (__vector signed long long)__a;
return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_min(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_min(__vector unsigned long long __a, __vector unsigned long long __b) {
return vec_sel(__a, __b, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_min(vector unsigned long long __a, vector bool long long __b) {
- vector unsigned long long __bc = (vector unsigned long long)__b;
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_min(__vector unsigned long long __a, __vector __bool long long __b) {
+ __vector unsigned long long __bc = (__vector unsigned long long)__b;
return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_min(vector bool long long __a, vector unsigned long long __b) {
- vector unsigned long long __ac = (vector unsigned long long)__a;
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_min(__vector __bool long long __a, __vector unsigned long long __b) {
+ __vector unsigned long long __ac = (__vector unsigned long long)__a;
return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_min(vector float __a, vector float __b) {
+static inline __ATTRS_o_ai __vector float
+vec_min(__vector float __a, __vector float __b) {
return __builtin_s390_vfminsb(__a, __b, 0);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_min(vector double __a, vector double __b) {
+static inline __ATTRS_o_ai __vector double
+vec_min(__vector double __a, __vector double __b) {
#if __ARCH__ >= 12
return __builtin_s390_vfmindb(__a, __b, 0);
#else
@@ -8190,439 +8262,440 @@ vec_min(vector double __a, vector double __b) {
/*-- vec_add_u128 -----------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
-vec_add_u128(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_ai __vector unsigned char
+vec_add_u128(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vaq(__a, __b);
}
/*-- vec_addc ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_addc(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_addc(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vaccb(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_addc(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_addc(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vacch(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_addc(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_addc(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vaccf(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_addc(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_addc(__vector unsigned long long __a, __vector unsigned long long __b) {
return __builtin_s390_vaccg(__a, __b);
}
/*-- vec_addc_u128 ----------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
-vec_addc_u128(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_ai __vector unsigned char
+vec_addc_u128(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vaccq(__a, __b);
}
/*-- vec_adde_u128 ----------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
-vec_adde_u128(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_ai __vector unsigned char
+vec_adde_u128(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vacq(__a, __b, __c);
}
/*-- vec_addec_u128 ---------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
-vec_addec_u128(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_ai __vector unsigned char
+vec_addec_u128(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vacccq(__a, __b, __c);
}
/*-- vec_avg ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_avg(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_avg(__vector signed char __a, __vector signed char __b) {
return __builtin_s390_vavgb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_avg(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_avg(__vector signed short __a, __vector signed short __b) {
return __builtin_s390_vavgh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_avg(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_avg(__vector signed int __a, __vector signed int __b) {
return __builtin_s390_vavgf(__a, __b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_avg(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_avg(__vector signed long long __a, __vector signed long long __b) {
return __builtin_s390_vavgg(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_avg(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_avg(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vavglb(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_avg(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_avg(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vavglh(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_avg(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_avg(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vavglf(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_avg(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_avg(__vector unsigned long long __a, __vector unsigned long long __b) {
return __builtin_s390_vavglg(__a, __b);
}
/*-- vec_checksum -----------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned int
-vec_checksum(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_ai __vector unsigned int
+vec_checksum(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vcksm(__a, __b);
}
/*-- vec_gfmsum -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned short
-vec_gfmsum(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_gfmsum(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vgfmb(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_gfmsum(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_gfmsum(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vgfmh(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_gfmsum(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_gfmsum(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vgfmf(__a, __b);
}
/*-- vec_gfmsum_128 ---------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_gfmsum_128(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_gfmsum_128(__vector unsigned long long __a,
+ __vector unsigned long long __b) {
return __builtin_s390_vgfmg(__a, __b);
}
/*-- vec_gfmsum_accum -------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned short
-vec_gfmsum_accum(vector unsigned char __a, vector unsigned char __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_gfmsum_accum(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned short __c) {
return __builtin_s390_vgfmab(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_gfmsum_accum(vector unsigned short __a, vector unsigned short __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_gfmsum_accum(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned int __c) {
return __builtin_s390_vgfmah(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_gfmsum_accum(vector unsigned int __a, vector unsigned int __b,
- vector unsigned long long __c) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_gfmsum_accum(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned long long __c) {
return __builtin_s390_vgfmaf(__a, __b, __c);
}
/*-- vec_gfmsum_accum_128 ---------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_gfmsum_accum_128(vector unsigned long long __a,
- vector unsigned long long __b,
- vector unsigned char __c) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_gfmsum_accum_128(__vector unsigned long long __a,
+ __vector unsigned long long __b,
+ __vector unsigned char __c) {
return __builtin_s390_vgfmag(__a, __b, __c);
}
/*-- vec_mladd --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_mladd(vector signed char __a, vector signed char __b,
- vector signed char __c) {
+static inline __ATTRS_o_ai __vector signed char
+vec_mladd(__vector signed char __a, __vector signed char __b,
+ __vector signed char __c) {
return __a * __b + __c;
}
-static inline __ATTRS_o_ai vector signed char
-vec_mladd(vector unsigned char __a, vector signed char __b,
- vector signed char __c) {
- return (vector signed char)__a * __b + __c;
+static inline __ATTRS_o_ai __vector signed char
+vec_mladd(__vector unsigned char __a, __vector signed char __b,
+ __vector signed char __c) {
+ return (__vector signed char)__a * __b + __c;
}
-static inline __ATTRS_o_ai vector signed char
-vec_mladd(vector signed char __a, vector unsigned char __b,
- vector unsigned char __c) {
- return __a * (vector signed char)__b + (vector signed char)__c;
+static inline __ATTRS_o_ai __vector signed char
+vec_mladd(__vector signed char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
+ return __a * (__vector signed char)__b + (__vector signed char)__c;
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_mladd(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mladd(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __a * __b + __c;
}
-static inline __ATTRS_o_ai vector signed short
-vec_mladd(vector signed short __a, vector signed short __b,
- vector signed short __c) {
+static inline __ATTRS_o_ai __vector signed short
+vec_mladd(__vector signed short __a, __vector signed short __b,
+ __vector signed short __c) {
return __a * __b + __c;
}
-static inline __ATTRS_o_ai vector signed short
-vec_mladd(vector unsigned short __a, vector signed short __b,
- vector signed short __c) {
- return (vector signed short)__a * __b + __c;
+static inline __ATTRS_o_ai __vector signed short
+vec_mladd(__vector unsigned short __a, __vector signed short __b,
+ __vector signed short __c) {
+ return (__vector signed short)__a * __b + __c;
}
-static inline __ATTRS_o_ai vector signed short
-vec_mladd(vector signed short __a, vector unsigned short __b,
- vector unsigned short __c) {
- return __a * (vector signed short)__b + (vector signed short)__c;
+static inline __ATTRS_o_ai __vector signed short
+vec_mladd(__vector signed short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
+ return __a * (__vector signed short)__b + (__vector signed short)__c;
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_mladd(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mladd(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
return __a * __b + __c;
}
-static inline __ATTRS_o_ai vector signed int
-vec_mladd(vector signed int __a, vector signed int __b,
- vector signed int __c) {
+static inline __ATTRS_o_ai __vector signed int
+vec_mladd(__vector signed int __a, __vector signed int __b,
+ __vector signed int __c) {
return __a * __b + __c;
}
-static inline __ATTRS_o_ai vector signed int
-vec_mladd(vector unsigned int __a, vector signed int __b,
- vector signed int __c) {
- return (vector signed int)__a * __b + __c;
+static inline __ATTRS_o_ai __vector signed int
+vec_mladd(__vector unsigned int __a, __vector signed int __b,
+ __vector signed int __c) {
+ return (__vector signed int)__a * __b + __c;
}
-static inline __ATTRS_o_ai vector signed int
-vec_mladd(vector signed int __a, vector unsigned int __b,
- vector unsigned int __c) {
- return __a * (vector signed int)__b + (vector signed int)__c;
+static inline __ATTRS_o_ai __vector signed int
+vec_mladd(__vector signed int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
+ return __a * (__vector signed int)__b + (__vector signed int)__c;
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_mladd(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mladd(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
return __a * __b + __c;
}
/*-- vec_mhadd --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_mhadd(vector signed char __a, vector signed char __b,
- vector signed char __c) {
+static inline __ATTRS_o_ai __vector signed char
+vec_mhadd(__vector signed char __a, __vector signed char __b,
+ __vector signed char __c) {
return __builtin_s390_vmahb(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_mhadd(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mhadd(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vmalhb(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector signed short
-vec_mhadd(vector signed short __a, vector signed short __b,
- vector signed short __c) {
+static inline __ATTRS_o_ai __vector signed short
+vec_mhadd(__vector signed short __a, __vector signed short __b,
+ __vector signed short __c) {
return __builtin_s390_vmahh(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_mhadd(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mhadd(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
return __builtin_s390_vmalhh(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector signed int
-vec_mhadd(vector signed int __a, vector signed int __b,
- vector signed int __c) {
+static inline __ATTRS_o_ai __vector signed int
+vec_mhadd(__vector signed int __a, __vector signed int __b,
+ __vector signed int __c) {
return __builtin_s390_vmahf(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_mhadd(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mhadd(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
return __builtin_s390_vmalhf(__a, __b, __c);
}
/*-- vec_meadd --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed short
-vec_meadd(vector signed char __a, vector signed char __b,
- vector signed short __c) {
+static inline __ATTRS_o_ai __vector signed short
+vec_meadd(__vector signed char __a, __vector signed char __b,
+ __vector signed short __c) {
return __builtin_s390_vmaeb(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_meadd(vector unsigned char __a, vector unsigned char __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_meadd(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned short __c) {
return __builtin_s390_vmaleb(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector signed int
-vec_meadd(vector signed short __a, vector signed short __b,
- vector signed int __c) {
+static inline __ATTRS_o_ai __vector signed int
+vec_meadd(__vector signed short __a, __vector signed short __b,
+ __vector signed int __c) {
return __builtin_s390_vmaeh(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_meadd(vector unsigned short __a, vector unsigned short __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_meadd(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned int __c) {
return __builtin_s390_vmaleh(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_meadd(vector signed int __a, vector signed int __b,
- vector signed long long __c) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_meadd(__vector signed int __a, __vector signed int __b,
+ __vector signed long long __c) {
return __builtin_s390_vmaef(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_meadd(vector unsigned int __a, vector unsigned int __b,
- vector unsigned long long __c) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_meadd(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned long long __c) {
return __builtin_s390_vmalef(__a, __b, __c);
}
/*-- vec_moadd --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed short
-vec_moadd(vector signed char __a, vector signed char __b,
- vector signed short __c) {
+static inline __ATTRS_o_ai __vector signed short
+vec_moadd(__vector signed char __a, __vector signed char __b,
+ __vector signed short __c) {
return __builtin_s390_vmaob(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_moadd(vector unsigned char __a, vector unsigned char __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_moadd(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned short __c) {
return __builtin_s390_vmalob(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector signed int
-vec_moadd(vector signed short __a, vector signed short __b,
- vector signed int __c) {
+static inline __ATTRS_o_ai __vector signed int
+vec_moadd(__vector signed short __a, __vector signed short __b,
+ __vector signed int __c) {
return __builtin_s390_vmaoh(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_moadd(vector unsigned short __a, vector unsigned short __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_moadd(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned int __c) {
return __builtin_s390_vmaloh(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_moadd(vector signed int __a, vector signed int __b,
- vector signed long long __c) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_moadd(__vector signed int __a, __vector signed int __b,
+ __vector signed long long __c) {
return __builtin_s390_vmaof(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_moadd(vector unsigned int __a, vector unsigned int __b,
- vector unsigned long long __c) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_moadd(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned long long __c) {
return __builtin_s390_vmalof(__a, __b, __c);
}
/*-- vec_mulh ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_mulh(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_mulh(__vector signed char __a, __vector signed char __b) {
return __builtin_s390_vmhb(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_mulh(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mulh(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vmlhb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_mulh(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_mulh(__vector signed short __a, __vector signed short __b) {
return __builtin_s390_vmhh(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_mulh(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mulh(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vmlhh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_mulh(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_mulh(__vector signed int __a, __vector signed int __b) {
return __builtin_s390_vmhf(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_mulh(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mulh(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vmlhf(__a, __b);
}
/*-- vec_mule ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed short
-vec_mule(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_mule(__vector signed char __a, __vector signed char __b) {
return __builtin_s390_vmeb(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_mule(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mule(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vmleb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_mule(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_mule(__vector signed short __a, __vector signed short __b) {
return __builtin_s390_vmeh(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_mule(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mule(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vmleh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_mule(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_mule(__vector signed int __a, __vector signed int __b) {
return __builtin_s390_vmef(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_mule(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_mule(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vmlef(__a, __b);
}
/*-- vec_mulo ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed short
-vec_mulo(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_mulo(__vector signed char __a, __vector signed char __b) {
return __builtin_s390_vmob(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_mulo(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mulo(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vmlob(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_mulo(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_mulo(__vector signed short __a, __vector signed short __b) {
return __builtin_s390_vmoh(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_mulo(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mulo(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vmloh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_mulo(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_mulo(__vector signed int __a, __vector signed int __b) {
return __builtin_s390_vmof(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_mulo(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_mulo(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vmlof(__a, __b);
}
@@ -8630,198 +8703,199 @@ vec_mulo(vector unsigned int __a, vector unsigned int __b) {
#if __ARCH__ >= 12
#define vec_msum_u128(X, Y, Z, W) \
- ((vector unsigned char)__builtin_s390_vmslg((X), (Y), (Z), (W)));
+ ((__vector unsigned char)__builtin_s390_vmslg((X), (Y), (Z), (W)));
#endif
/*-- vec_sub_u128 -----------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
-vec_sub_u128(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_ai __vector unsigned char
+vec_sub_u128(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vsq(__a, __b);
}
/*-- vec_subc ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_subc(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_subc(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vscbib(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_subc(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_subc(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vscbih(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_subc(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_subc(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vscbif(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_subc(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_subc(__vector unsigned long long __a, __vector unsigned long long __b) {
return __builtin_s390_vscbig(__a, __b);
}
/*-- vec_subc_u128 ----------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
-vec_subc_u128(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_ai __vector unsigned char
+vec_subc_u128(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vscbiq(__a, __b);
}
/*-- vec_sube_u128 ----------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
-vec_sube_u128(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_ai __vector unsigned char
+vec_sube_u128(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vsbiq(__a, __b, __c);
}
/*-- vec_subec_u128 ---------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
-vec_subec_u128(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_ai __vector unsigned char
+vec_subec_u128(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vsbcbiq(__a, __b, __c);
}
/*-- vec_sum2 ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sum2(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sum2(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vsumgh(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sum2(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sum2(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vsumgf(__a, __b);
}
/*-- vec_sum_u128 -----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_sum_u128(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sum_u128(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vsumqf(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_sum_u128(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sum_u128(__vector unsigned long long __a, __vector unsigned long long __b) {
return __builtin_s390_vsumqg(__a, __b);
}
/*-- vec_sum4 ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned int
-vec_sum4(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sum4(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vsumb(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_sum4(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sum4(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vsumh(__a, __b);
}
/*-- vec_test_mask ----------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_test_mask(vector signed char __a, vector unsigned char __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector signed char __a, __vector unsigned char __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
static inline __ATTRS_o_ai int
-vec_test_mask(vector unsigned char __a, vector unsigned char __b) {
+vec_test_mask(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vtm(__a, __b);
}
static inline __ATTRS_o_ai int
-vec_test_mask(vector signed short __a, vector unsigned short __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector signed short __a, __vector unsigned short __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
static inline __ATTRS_o_ai int
-vec_test_mask(vector unsigned short __a, vector unsigned short __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector unsigned short __a, __vector unsigned short __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
static inline __ATTRS_o_ai int
-vec_test_mask(vector signed int __a, vector unsigned int __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector signed int __a, __vector unsigned int __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
static inline __ATTRS_o_ai int
-vec_test_mask(vector unsigned int __a, vector unsigned int __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector unsigned int __a, __vector unsigned int __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
static inline __ATTRS_o_ai int
-vec_test_mask(vector signed long long __a, vector unsigned long long __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector signed long long __a, __vector unsigned long long __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
static inline __ATTRS_o_ai int
-vec_test_mask(vector unsigned long long __a, vector unsigned long long __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector unsigned long long __a,
+ __vector unsigned long long __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_test_mask(vector float __a, vector unsigned int __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector float __a, __vector unsigned int __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
#endif
static inline __ATTRS_o_ai int
-vec_test_mask(vector double __a, vector unsigned long long __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector double __a, __vector unsigned long long __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
/*-- vec_madd ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_madd(vector float __a, vector float __b, vector float __c) {
+static inline __ATTRS_o_ai __vector float
+vec_madd(__vector float __a, __vector float __b, __vector float __c) {
return __builtin_s390_vfmasb(__a, __b, __c);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_madd(vector double __a, vector double __b, vector double __c) {
+static inline __ATTRS_o_ai __vector double
+vec_madd(__vector double __a, __vector double __b, __vector double __c) {
return __builtin_s390_vfmadb(__a, __b, __c);
}
/*-- vec_msub ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_msub(vector float __a, vector float __b, vector float __c) {
+static inline __ATTRS_o_ai __vector float
+vec_msub(__vector float __a, __vector float __b, __vector float __c) {
return __builtin_s390_vfmssb(__a, __b, __c);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_msub(vector double __a, vector double __b, vector double __c) {
+static inline __ATTRS_o_ai __vector double
+vec_msub(__vector double __a, __vector double __b, __vector double __c) {
return __builtin_s390_vfmsdb(__a, __b, __c);
}
/*-- vec_nmadd ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_nmadd(vector float __a, vector float __b, vector float __c) {
+static inline __ATTRS_o_ai __vector float
+vec_nmadd(__vector float __a, __vector float __b, __vector float __c) {
return __builtin_s390_vfnmasb(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector double
-vec_nmadd(vector double __a, vector double __b, vector double __c) {
+static inline __ATTRS_o_ai __vector double
+vec_nmadd(__vector double __a, __vector double __b, __vector double __c) {
return __builtin_s390_vfnmadb(__a, __b, __c);
}
#endif
@@ -8829,13 +8903,13 @@ vec_nmadd(vector double __a, vector double __b, vector double __c) {
/*-- vec_nmsub ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_nmsub(vector float __a, vector float __b, vector float __c) {
+static inline __ATTRS_o_ai __vector float
+vec_nmsub(__vector float __a, __vector float __b, __vector float __c) {
return __builtin_s390_vfnmssb(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector double
-vec_nmsub(vector double __a, vector double __b, vector double __c) {
+static inline __ATTRS_o_ai __vector double
+vec_nmsub(__vector double __a, __vector double __b, __vector double __c) {
return __builtin_s390_vfnmsdb(__a, __b, __c);
}
#endif
@@ -8843,31 +8917,31 @@ vec_nmsub(vector double __a, vector double __b, vector double __c) {
/*-- vec_sqrt ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_sqrt(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_sqrt(__vector float __a) {
return __builtin_s390_vfsqsb(__a);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_sqrt(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_sqrt(__vector double __a) {
return __builtin_s390_vfsqdb(__a);
}
/*-- vec_ld2f ---------------------------------------------------------------*/
// This prototype is deprecated.
-static inline __ATTRS_ai vector double
+static inline __ATTRS_ai __vector double
vec_ld2f(const float *__ptr) {
typedef float __v2f32 __attribute__((__vector_size__(8)));
- return __builtin_convertvector(*(const __v2f32 *)__ptr, vector double);
+ return __builtin_convertvector(*(const __v2f32 *)__ptr, __vector double);
}
/*-- vec_st2f ---------------------------------------------------------------*/
// This prototype is deprecated.
static inline __ATTRS_ai void
-vec_st2f(vector double __a, float *__ptr) {
+vec_st2f(__vector double __a, float *__ptr) {
typedef float __v2f32 __attribute__((__vector_size__(8)));
*(__v2f32 *)__ptr = __builtin_convertvector(__a, __v2f32);
}
@@ -8875,59 +8949,63 @@ vec_st2f(vector double __a, float *__ptr) {
/*-- vec_ctd ----------------------------------------------------------------*/
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_ctd(vector signed long long __a, int __b)
+static inline __ATTRS_o_ai __vector double
+vec_ctd(__vector signed long long __a, int __b)
__constant_range(__b, 0, 31) {
- vector double __conv = __builtin_convertvector(__a, vector double);
- __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
+ __vector double __conv = __builtin_convertvector(__a, __vector double);
+ __conv *= ((__vector double)(__vector unsigned long long)
+ ((0x3ffULL - __b) << 52));
return __conv;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_ctd(vector unsigned long long __a, int __b)
+static inline __ATTRS_o_ai __vector double
+vec_ctd(__vector unsigned long long __a, int __b)
__constant_range(__b, 0, 31) {
- vector double __conv = __builtin_convertvector(__a, vector double);
- __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
+ __vector double __conv = __builtin_convertvector(__a, __vector double);
+ __conv *= ((__vector double)(__vector unsigned long long)
+ ((0x3ffULL - __b) << 52));
return __conv;
}
/*-- vec_ctsl ---------------------------------------------------------------*/
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_ctsl(vector double __a, int __b)
+static inline __ATTRS_o_ai __vector signed long long
+vec_ctsl(__vector double __a, int __b)
__constant_range(__b, 0, 31) {
- __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
- return __builtin_convertvector(__a, vector signed long long);
+ __a *= ((__vector double)(__vector unsigned long long)
+ ((0x3ffULL + __b) << 52));
+ return __builtin_convertvector(__a, __vector signed long long);
}
/*-- vec_ctul ---------------------------------------------------------------*/
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_ctul(vector double __a, int __b)
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_ctul(__vector double __a, int __b)
__constant_range(__b, 0, 31) {
- __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
- return __builtin_convertvector(__a, vector unsigned long long);
+ __a *= ((__vector double)(__vector unsigned long long)
+ ((0x3ffULL + __b) << 52));
+ return __builtin_convertvector(__a, __vector unsigned long long);
}
/*-- vec_doublee ------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_ai vector double
-vec_doublee(vector float __a) {
+static inline __ATTRS_ai __vector double
+vec_doublee(__vector float __a) {
typedef float __v2f32 __attribute__((__vector_size__(8)));
__v2f32 __pack = __builtin_shufflevector(__a, __a, 0, 2);
- return __builtin_convertvector(__pack, vector double);
+ return __builtin_convertvector(__pack, __vector double);
}
#endif
/*-- vec_floate -------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_ai vector float
-vec_floate(vector double __a) {
+static inline __ATTRS_ai __vector float
+vec_floate(__vector double __a) {
typedef float __v2f32 __attribute__((__vector_size__(8)));
__v2f32 __pack = __builtin_convertvector(__a, __v2f32);
return __builtin_shufflevector(__pack, __pack, 0, -1, 1, -1);
@@ -8936,86 +9014,86 @@ vec_floate(vector double __a) {
/*-- vec_double -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector double
-vec_double(vector signed long long __a) {
- return __builtin_convertvector(__a, vector double);
+static inline __ATTRS_o_ai __vector double
+vec_double(__vector signed long long __a) {
+ return __builtin_convertvector(__a, __vector double);
}
-static inline __ATTRS_o_ai vector double
-vec_double(vector unsigned long long __a) {
- return __builtin_convertvector(__a, vector double);
+static inline __ATTRS_o_ai __vector double
+vec_double(__vector unsigned long long __a) {
+ return __builtin_convertvector(__a, __vector double);
}
/*-- vec_float --------------------------------------------------------------*/
#if __ARCH__ >= 13
-static inline __ATTRS_o_ai vector float
-vec_float(vector signed int __a) {
- return __builtin_convertvector(__a, vector float);
+static inline __ATTRS_o_ai __vector float
+vec_float(__vector signed int __a) {
+ return __builtin_convertvector(__a, __vector float);
}
-static inline __ATTRS_o_ai vector float
-vec_float(vector unsigned int __a) {
- return __builtin_convertvector(__a, vector float);
+static inline __ATTRS_o_ai __vector float
+vec_float(__vector unsigned int __a) {
+ return __builtin_convertvector(__a, __vector float);
}
#endif
/*-- vec_signed -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed long long
-vec_signed(vector double __a) {
- return __builtin_convertvector(__a, vector signed long long);
+static inline __ATTRS_o_ai __vector signed long long
+vec_signed(__vector double __a) {
+ return __builtin_convertvector(__a, __vector signed long long);
}
#if __ARCH__ >= 13
-static inline __ATTRS_o_ai vector signed int
-vec_signed(vector float __a) {
- return __builtin_convertvector(__a, vector signed int);
+static inline __ATTRS_o_ai __vector signed int
+vec_signed(__vector float __a) {
+ return __builtin_convertvector(__a, __vector signed int);
}
#endif
/*-- vec_unsigned -----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned long long
-vec_unsigned(vector double __a) {
- return __builtin_convertvector(__a, vector unsigned long long);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_unsigned(__vector double __a) {
+ return __builtin_convertvector(__a, __vector unsigned long long);
}
#if __ARCH__ >= 13
-static inline __ATTRS_o_ai vector unsigned int
-vec_unsigned(vector float __a) {
- return __builtin_convertvector(__a, vector unsigned int);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_unsigned(__vector float __a) {
+ return __builtin_convertvector(__a, __vector unsigned int);
}
#endif
/*-- vec_roundp -------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_roundp(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_roundp(__vector float __a) {
return __builtin_s390_vfisb(__a, 4, 6);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_roundp(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_roundp(__vector double __a) {
return __builtin_s390_vfidb(__a, 4, 6);
}
/*-- vec_ceil ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_ceil(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_ceil(__vector float __a) {
// On this platform, vec_ceil never triggers the IEEE-inexact exception.
return __builtin_s390_vfisb(__a, 4, 6);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_ceil(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_ceil(__vector double __a) {
// On this platform, vec_ceil never triggers the IEEE-inexact exception.
return __builtin_s390_vfidb(__a, 4, 6);
}
@@ -9023,29 +9101,29 @@ vec_ceil(vector double __a) {
/*-- vec_roundm -------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_roundm(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_roundm(__vector float __a) {
return __builtin_s390_vfisb(__a, 4, 7);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_roundm(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_roundm(__vector double __a) {
return __builtin_s390_vfidb(__a, 4, 7);
}
/*-- vec_floor --------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_floor(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_floor(__vector float __a) {
// On this platform, vec_floor never triggers the IEEE-inexact exception.
return __builtin_s390_vfisb(__a, 4, 7);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_floor(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_floor(__vector double __a) {
// On this platform, vec_floor never triggers the IEEE-inexact exception.
return __builtin_s390_vfidb(__a, 4, 7);
}
@@ -9053,29 +9131,29 @@ vec_floor(vector double __a) {
/*-- vec_roundz -------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_roundz(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_roundz(__vector float __a) {
return __builtin_s390_vfisb(__a, 4, 5);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_roundz(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_roundz(__vector double __a) {
return __builtin_s390_vfidb(__a, 4, 5);
}
/*-- vec_trunc --------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_trunc(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_trunc(__vector float __a) {
// On this platform, vec_trunc never triggers the IEEE-inexact exception.
return __builtin_s390_vfisb(__a, 4, 5);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_trunc(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_trunc(__vector double __a) {
// On this platform, vec_trunc never triggers the IEEE-inexact exception.
return __builtin_s390_vfidb(__a, 4, 5);
}
@@ -9083,29 +9161,29 @@ vec_trunc(vector double __a) {
/*-- vec_roundc -------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_roundc(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_roundc(__vector float __a) {
return __builtin_s390_vfisb(__a, 4, 0);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_roundc(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_roundc(__vector double __a) {
return __builtin_s390_vfidb(__a, 4, 0);
}
/*-- vec_rint ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_rint(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_rint(__vector float __a) {
// vec_rint may trigger the IEEE-inexact exception.
return __builtin_s390_vfisb(__a, 0, 0);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_rint(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_rint(__vector double __a) {
// vec_rint may trigger the IEEE-inexact exception.
return __builtin_s390_vfidb(__a, 0, 0);
}
@@ -9113,45 +9191,45 @@ vec_rint(vector double __a) {
/*-- vec_round --------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_round(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_round(__vector float __a) {
return __builtin_s390_vfisb(__a, 4, 4);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_round(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_round(__vector double __a) {
return __builtin_s390_vfidb(__a, 4, 4);
}
/*-- vec_fp_test_data_class -------------------------------------------------*/
#if __ARCH__ >= 12
-extern __ATTRS_o vector bool int
-vec_fp_test_data_class(vector float __a, int __b, int *__c)
+extern __ATTRS_o __vector __bool int
+vec_fp_test_data_class(__vector float __a, int __b, int *__c)
__constant_range(__b, 0, 4095);
-extern __ATTRS_o vector bool long long
-vec_fp_test_data_class(vector double __a, int __b, int *__c)
+extern __ATTRS_o __vector __bool long long
+vec_fp_test_data_class(__vector double __a, int __b, int *__c)
__constant_range(__b, 0, 4095);
#define vec_fp_test_data_class(X, Y, Z) \
((__typeof__((vec_fp_test_data_class)((X), (Y), (Z)))) \
__extension__ ({ \
- vector unsigned char __res; \
- vector unsigned char __x = (vector unsigned char)(X); \
+ __vector unsigned char __res; \
+ __vector unsigned char __x = (__vector unsigned char)(X); \
int *__z = (Z); \
switch (sizeof ((X)[0])) { \
- case 4: __res = (vector unsigned char) \
- __builtin_s390_vftcisb((vector float)__x, (Y), __z); \
+ case 4: __res = (__vector unsigned char) \
+ __builtin_s390_vftcisb((__vector float)__x, (Y), __z); \
break; \
- default: __res = (vector unsigned char) \
- __builtin_s390_vftcidb((vector double)__x, (Y), __z); \
+ default: __res = (__vector unsigned char) \
+ __builtin_s390_vftcidb((__vector double)__x, (Y), __z); \
break; \
} __res; }))
#else
#define vec_fp_test_data_class(X, Y, Z) \
- ((vector bool long long)__builtin_s390_vftcidb((X), (Y), (Z)))
+ ((__vector __bool long long)__builtin_s390_vftcidb((X), (Y), (Z)))
#endif
#define __VEC_CLASS_FP_ZERO_P (1 << 11)
@@ -9183,1527 +9261,1585 @@ vec_fp_test_data_class(vector double __a, int __b, int *__c)
/*-- vec_cp_until_zero ------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cp_until_zero(vector signed char __a) {
- return (vector signed char)__builtin_s390_vistrb((vector unsigned char)__a);
+static inline __ATTRS_o_ai __vector signed char
+vec_cp_until_zero(__vector signed char __a) {
+ return ((__vector signed char)
+ __builtin_s390_vistrb((__vector unsigned char)__a));
}
-static inline __ATTRS_o_ai vector bool char
-vec_cp_until_zero(vector bool char __a) {
- return (vector bool char)__builtin_s390_vistrb((vector unsigned char)__a);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cp_until_zero(__vector __bool char __a) {
+ return ((__vector __bool char)
+ __builtin_s390_vistrb((__vector unsigned char)__a));
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cp_until_zero(vector unsigned char __a) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cp_until_zero(__vector unsigned char __a) {
return __builtin_s390_vistrb(__a);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cp_until_zero(vector signed short __a) {
- return (vector signed short)__builtin_s390_vistrh((vector unsigned short)__a);
+static inline __ATTRS_o_ai __vector signed short
+vec_cp_until_zero(__vector signed short __a) {
+ return ((__vector signed short)
+ __builtin_s390_vistrh((__vector unsigned short)__a));
}
-static inline __ATTRS_o_ai vector bool short
-vec_cp_until_zero(vector bool short __a) {
- return (vector bool short)__builtin_s390_vistrh((vector unsigned short)__a);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cp_until_zero(__vector __bool short __a) {
+ return ((__vector __bool short)
+ __builtin_s390_vistrh((__vector unsigned short)__a));
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cp_until_zero(vector unsigned short __a) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cp_until_zero(__vector unsigned short __a) {
return __builtin_s390_vistrh(__a);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cp_until_zero(vector signed int __a) {
- return (vector signed int)__builtin_s390_vistrf((vector unsigned int)__a);
+static inline __ATTRS_o_ai __vector signed int
+vec_cp_until_zero(__vector signed int __a) {
+ return ((__vector signed int)
+ __builtin_s390_vistrf((__vector unsigned int)__a));
}
-static inline __ATTRS_o_ai vector bool int
-vec_cp_until_zero(vector bool int __a) {
- return (vector bool int)__builtin_s390_vistrf((vector unsigned int)__a);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cp_until_zero(__vector __bool int __a) {
+ return ((__vector __bool int)
+ __builtin_s390_vistrf((__vector unsigned int)__a));
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cp_until_zero(vector unsigned int __a) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cp_until_zero(__vector unsigned int __a) {
return __builtin_s390_vistrf(__a);
}
/*-- vec_cp_until_zero_cc ---------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cp_until_zero_cc(vector signed char __a, int *__cc) {
- return (vector signed char)
- __builtin_s390_vistrbs((vector unsigned char)__a, __cc);
+static inline __ATTRS_o_ai __vector signed char
+vec_cp_until_zero_cc(__vector signed char __a, int *__cc) {
+ return (__vector signed char)
+ __builtin_s390_vistrbs((__vector unsigned char)__a, __cc);
}
-static inline __ATTRS_o_ai vector bool char
-vec_cp_until_zero_cc(vector bool char __a, int *__cc) {
- return (vector bool char)
- __builtin_s390_vistrbs((vector unsigned char)__a, __cc);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cp_until_zero_cc(__vector __bool char __a, int *__cc) {
+ return (__vector __bool char)
+ __builtin_s390_vistrbs((__vector unsigned char)__a, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cp_until_zero_cc(vector unsigned char __a, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cp_until_zero_cc(__vector unsigned char __a, int *__cc) {
return __builtin_s390_vistrbs(__a, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cp_until_zero_cc(vector signed short __a, int *__cc) {
- return (vector signed short)
- __builtin_s390_vistrhs((vector unsigned short)__a, __cc);
+static inline __ATTRS_o_ai __vector signed short
+vec_cp_until_zero_cc(__vector signed short __a, int *__cc) {
+ return (__vector signed short)
+ __builtin_s390_vistrhs((__vector unsigned short)__a, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cp_until_zero_cc(vector bool short __a, int *__cc) {
- return (vector bool short)
- __builtin_s390_vistrhs((vector unsigned short)__a, __cc);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cp_until_zero_cc(__vector __bool short __a, int *__cc) {
+ return (__vector __bool short)
+ __builtin_s390_vistrhs((__vector unsigned short)__a, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cp_until_zero_cc(vector unsigned short __a, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cp_until_zero_cc(__vector unsigned short __a, int *__cc) {
return __builtin_s390_vistrhs(__a, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cp_until_zero_cc(vector signed int __a, int *__cc) {
- return (vector signed int)
- __builtin_s390_vistrfs((vector unsigned int)__a, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_cp_until_zero_cc(__vector signed int __a, int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vistrfs((__vector unsigned int)__a, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cp_until_zero_cc(vector bool int __a, int *__cc) {
- return (vector bool int)__builtin_s390_vistrfs((vector unsigned int)__a,
- __cc);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cp_until_zero_cc(__vector __bool int __a, int *__cc) {
+ return (__vector __bool int)
+ __builtin_s390_vistrfs((__vector unsigned int)__a, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cp_until_zero_cc(vector unsigned int __a, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cp_until_zero_cc(__vector unsigned int __a, int *__cc) {
return __builtin_s390_vistrfs(__a, __cc);
}
/*-- vec_cmpeq_idx ----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cmpeq_idx(vector signed char __a, vector signed char __b) {
- return (vector signed char)
- __builtin_s390_vfeeb((vector unsigned char)__a,
- (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpeq_idx(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)
+ __builtin_s390_vfeeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_idx(vector bool char __a, vector bool char __b) {
- return __builtin_s390_vfeeb((vector unsigned char)__a,
- (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_idx(__vector __bool char __a, __vector __bool char __b) {
+ return __builtin_s390_vfeeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_idx(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_idx(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vfeeb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cmpeq_idx(vector signed short __a, vector signed short __b) {
- return (vector signed short)
- __builtin_s390_vfeeh((vector unsigned short)__a,
- (vector unsigned short)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpeq_idx(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)
+ __builtin_s390_vfeeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_idx(vector bool short __a, vector bool short __b) {
- return __builtin_s390_vfeeh((vector unsigned short)__a,
- (vector unsigned short)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_idx(__vector __bool short __a, __vector __bool short __b) {
+ return __builtin_s390_vfeeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_idx(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_idx(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vfeeh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cmpeq_idx(vector signed int __a, vector signed int __b) {
- return (vector signed int)
- __builtin_s390_vfeef((vector unsigned int)__a,
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpeq_idx(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)
+ __builtin_s390_vfeef((__vector unsigned int)__a,
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_idx(vector bool int __a, vector bool int __b) {
- return __builtin_s390_vfeef((vector unsigned int)__a,
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_idx(__vector __bool int __a, __vector __bool int __b) {
+ return __builtin_s390_vfeef((__vector unsigned int)__a,
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_idx(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_idx(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vfeef(__a, __b);
}
/*-- vec_cmpeq_idx_cc -------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cmpeq_idx_cc(vector signed char __a, vector signed char __b, int *__cc) {
- return (vector signed char)
- __builtin_s390_vfeebs((vector unsigned char)__a,
- (vector unsigned char)__b, __cc);
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpeq_idx_cc(__vector signed char __a, __vector signed char __b, int *__cc) {
+ return (__vector signed char)
+ __builtin_s390_vfeebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
- return __builtin_s390_vfeebs((vector unsigned char)__a,
- (vector unsigned char)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_idx_cc(__vector __bool char __a, __vector __bool char __b, int *__cc) {
+ return __builtin_s390_vfeebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_idx_cc(vector unsigned char __a, vector unsigned char __b,
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
int *__cc) {
return __builtin_s390_vfeebs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cmpeq_idx_cc(vector signed short __a, vector signed short __b, int *__cc) {
- return (vector signed short)
- __builtin_s390_vfeehs((vector unsigned short)__a,
- (vector unsigned short)__b, __cc);
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpeq_idx_cc(__vector signed short __a, __vector signed short __b,
+ int *__cc) {
+ return (__vector signed short)
+ __builtin_s390_vfeehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
- return __builtin_s390_vfeehs((vector unsigned short)__a,
- (vector unsigned short)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_idx_cc(__vector __bool short __a, __vector __bool short __b, int *__cc) {
+ return __builtin_s390_vfeehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_idx_cc(vector unsigned short __a, vector unsigned short __b,
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
int *__cc) {
return __builtin_s390_vfeehs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cmpeq_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
- return (vector signed int)
- __builtin_s390_vfeefs((vector unsigned int)__a,
- (vector unsigned int)__b, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpeq_idx_cc(__vector signed int __a, __vector signed int __b, int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vfeefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
- return __builtin_s390_vfeefs((vector unsigned int)__a,
- (vector unsigned int)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_idx_cc(__vector __bool int __a, __vector __bool int __b, int *__cc) {
+ return __builtin_s390_vfeefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+ int *__cc) {
return __builtin_s390_vfeefs(__a, __b, __cc);
}
/*-- vec_cmpeq_or_0_idx -----------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cmpeq_or_0_idx(vector signed char __a, vector signed char __b) {
- return (vector signed char)
- __builtin_s390_vfeezb((vector unsigned char)__a,
- (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpeq_or_0_idx(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)
+ __builtin_s390_vfeezb((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_or_0_idx(vector bool char __a, vector bool char __b) {
- return __builtin_s390_vfeezb((vector unsigned char)__a,
- (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_or_0_idx(__vector __bool char __a, __vector __bool char __b) {
+ return __builtin_s390_vfeezb((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_or_0_idx(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vfeezb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cmpeq_or_0_idx(vector signed short __a, vector signed short __b) {
- return (vector signed short)
- __builtin_s390_vfeezh((vector unsigned short)__a,
- (vector unsigned short)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpeq_or_0_idx(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)
+ __builtin_s390_vfeezh((__vector unsigned short)__a,
+ (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_or_0_idx(vector bool short __a, vector bool short __b) {
- return __builtin_s390_vfeezh((vector unsigned short)__a,
- (vector unsigned short)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_or_0_idx(__vector __bool short __a, __vector __bool short __b) {
+ return __builtin_s390_vfeezh((__vector unsigned short)__a,
+ (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_or_0_idx(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vfeezh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cmpeq_or_0_idx(vector signed int __a, vector signed int __b) {
- return (vector signed int)
- __builtin_s390_vfeezf((vector unsigned int)__a,
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpeq_or_0_idx(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)
+ __builtin_s390_vfeezf((__vector unsigned int)__a,
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_or_0_idx(vector bool int __a, vector bool int __b) {
- return __builtin_s390_vfeezf((vector unsigned int)__a,
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_or_0_idx(__vector __bool int __a, __vector __bool int __b) {
+ return __builtin_s390_vfeezf((__vector unsigned int)__a,
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_or_0_idx(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vfeezf(__a, __b);
}
/*-- vec_cmpeq_or_0_idx_cc --------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cmpeq_or_0_idx_cc(vector signed char __a, vector signed char __b,
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpeq_or_0_idx_cc(__vector signed char __a, __vector signed char __b,
int *__cc) {
- return (vector signed char)
- __builtin_s390_vfeezbs((vector unsigned char)__a,
- (vector unsigned char)__b, __cc);
+ return (__vector signed char)
+ __builtin_s390_vfeezbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
- return __builtin_s390_vfeezbs((vector unsigned char)__a,
- (vector unsigned char)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_or_0_idx_cc(__vector __bool char __a, __vector __bool char __b,
+ int *__cc) {
+ return __builtin_s390_vfeezbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_or_0_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
int *__cc) {
return __builtin_s390_vfeezbs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cmpeq_or_0_idx_cc(vector signed short __a, vector signed short __b,
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpeq_or_0_idx_cc(__vector signed short __a, __vector signed short __b,
int *__cc) {
- return (vector signed short)
- __builtin_s390_vfeezhs((vector unsigned short)__a,
- (vector unsigned short)__b, __cc);
+ return (__vector signed short)
+ __builtin_s390_vfeezhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
- return __builtin_s390_vfeezhs((vector unsigned short)__a,
- (vector unsigned short)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_or_0_idx_cc(__vector __bool short __a, __vector __bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfeezhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_or_0_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
int *__cc) {
return __builtin_s390_vfeezhs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cmpeq_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
- return (vector signed int)
- __builtin_s390_vfeezfs((vector unsigned int)__a,
- (vector unsigned int)__b, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpeq_or_0_idx_cc(__vector signed int __a, __vector signed int __b,
+ int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vfeezfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
- return __builtin_s390_vfeezfs((vector unsigned int)__a,
- (vector unsigned int)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_or_0_idx_cc(__vector __bool int __a, __vector __bool int __b,
+ int *__cc) {
+ return __builtin_s390_vfeezfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_or_0_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
int *__cc) {
return __builtin_s390_vfeezfs(__a, __b, __cc);
}
/*-- vec_cmpne_idx ----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cmpne_idx(vector signed char __a, vector signed char __b) {
- return (vector signed char)
- __builtin_s390_vfeneb((vector unsigned char)__a,
- (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpne_idx(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)
+ __builtin_s390_vfeneb((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_idx(vector bool char __a, vector bool char __b) {
- return __builtin_s390_vfeneb((vector unsigned char)__a,
- (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_idx(__vector __bool char __a, __vector __bool char __b) {
+ return __builtin_s390_vfeneb((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_idx(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_idx(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vfeneb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cmpne_idx(vector signed short __a, vector signed short __b) {
- return (vector signed short)
- __builtin_s390_vfeneh((vector unsigned short)__a,
- (vector unsigned short)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpne_idx(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)
+ __builtin_s390_vfeneh((__vector unsigned short)__a,
+ (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_idx(vector bool short __a, vector bool short __b) {
- return __builtin_s390_vfeneh((vector unsigned short)__a,
- (vector unsigned short)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_idx(__vector __bool short __a, __vector __bool short __b) {
+ return __builtin_s390_vfeneh((__vector unsigned short)__a,
+ (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_idx(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_idx(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vfeneh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cmpne_idx(vector signed int __a, vector signed int __b) {
- return (vector signed int)
- __builtin_s390_vfenef((vector unsigned int)__a,
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpne_idx(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)
+ __builtin_s390_vfenef((__vector unsigned int)__a,
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_idx(vector bool int __a, vector bool int __b) {
- return __builtin_s390_vfenef((vector unsigned int)__a,
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_idx(__vector __bool int __a, __vector __bool int __b) {
+ return __builtin_s390_vfenef((__vector unsigned int)__a,
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_idx(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_idx(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vfenef(__a, __b);
}
/*-- vec_cmpne_idx_cc -------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cmpne_idx_cc(vector signed char __a, vector signed char __b, int *__cc) {
- return (vector signed char)
- __builtin_s390_vfenebs((vector unsigned char)__a,
- (vector unsigned char)__b, __cc);
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpne_idx_cc(__vector signed char __a, __vector signed char __b, int *__cc) {
+ return (__vector signed char)
+ __builtin_s390_vfenebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
- return __builtin_s390_vfenebs((vector unsigned char)__a,
- (vector unsigned char)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_idx_cc(__vector __bool char __a, __vector __bool char __b, int *__cc) {
+ return __builtin_s390_vfenebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_idx_cc(vector unsigned char __a, vector unsigned char __b,
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
int *__cc) {
return __builtin_s390_vfenebs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cmpne_idx_cc(vector signed short __a, vector signed short __b, int *__cc) {
- return (vector signed short)
- __builtin_s390_vfenehs((vector unsigned short)__a,
- (vector unsigned short)__b, __cc);
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpne_idx_cc(__vector signed short __a, __vector signed short __b,
+ int *__cc) {
+ return (__vector signed short)
+ __builtin_s390_vfenehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
- return __builtin_s390_vfenehs((vector unsigned short)__a,
- (vector unsigned short)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_idx_cc(__vector __bool short __a, __vector __bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfenehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_idx_cc(vector unsigned short __a, vector unsigned short __b,
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
int *__cc) {
return __builtin_s390_vfenehs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cmpne_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
- return (vector signed int)
- __builtin_s390_vfenefs((vector unsigned int)__a,
- (vector unsigned int)__b, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpne_idx_cc(__vector signed int __a, __vector signed int __b, int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vfenefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
- return __builtin_s390_vfenefs((vector unsigned int)__a,
- (vector unsigned int)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_idx_cc(__vector __bool int __a, __vector __bool int __b, int *__cc) {
+ return __builtin_s390_vfenefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+ int *__cc) {
return __builtin_s390_vfenefs(__a, __b, __cc);
}
/*-- vec_cmpne_or_0_idx -----------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cmpne_or_0_idx(vector signed char __a, vector signed char __b) {
- return (vector signed char)
- __builtin_s390_vfenezb((vector unsigned char)__a,
- (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpne_or_0_idx(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)
+ __builtin_s390_vfenezb((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_or_0_idx(vector bool char __a, vector bool char __b) {
- return __builtin_s390_vfenezb((vector unsigned char)__a,
- (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_or_0_idx(__vector __bool char __a, __vector __bool char __b) {
+ return __builtin_s390_vfenezb((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_or_0_idx(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vfenezb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cmpne_or_0_idx(vector signed short __a, vector signed short __b) {
- return (vector signed short)
- __builtin_s390_vfenezh((vector unsigned short)__a,
- (vector unsigned short)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpne_or_0_idx(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)
+ __builtin_s390_vfenezh((__vector unsigned short)__a,
+ (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_or_0_idx(vector bool short __a, vector bool short __b) {
- return __builtin_s390_vfenezh((vector unsigned short)__a,
- (vector unsigned short)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_or_0_idx(__vector __bool short __a, __vector __bool short __b) {
+ return __builtin_s390_vfenezh((__vector unsigned short)__a,
+ (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_or_0_idx(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vfenezh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cmpne_or_0_idx(vector signed int __a, vector signed int __b) {
- return (vector signed int)
- __builtin_s390_vfenezf((vector unsigned int)__a,
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpne_or_0_idx(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)
+ __builtin_s390_vfenezf((__vector unsigned int)__a,
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_or_0_idx(vector bool int __a, vector bool int __b) {
- return __builtin_s390_vfenezf((vector unsigned int)__a,
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_or_0_idx(__vector __bool int __a, __vector __bool int __b) {
+ return __builtin_s390_vfenezf((__vector unsigned int)__a,
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_or_0_idx(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vfenezf(__a, __b);
}
/*-- vec_cmpne_or_0_idx_cc --------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cmpne_or_0_idx_cc(vector signed char __a, vector signed char __b,
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpne_or_0_idx_cc(__vector signed char __a, __vector signed char __b,
int *__cc) {
- return (vector signed char)
- __builtin_s390_vfenezbs((vector unsigned char)__a,
- (vector unsigned char)__b, __cc);
+ return (__vector signed char)
+ __builtin_s390_vfenezbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
- return __builtin_s390_vfenezbs((vector unsigned char)__a,
- (vector unsigned char)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_or_0_idx_cc(__vector __bool char __a, __vector __bool char __b,
+ int *__cc) {
+ return __builtin_s390_vfenezbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_or_0_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
int *__cc) {
return __builtin_s390_vfenezbs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cmpne_or_0_idx_cc(vector signed short __a, vector signed short __b,
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpne_or_0_idx_cc(__vector signed short __a, __vector signed short __b,
int *__cc) {
- return (vector signed short)
- __builtin_s390_vfenezhs((vector unsigned short)__a,
- (vector unsigned short)__b, __cc);
+ return (__vector signed short)
+ __builtin_s390_vfenezhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
- return __builtin_s390_vfenezhs((vector unsigned short)__a,
- (vector unsigned short)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_or_0_idx_cc(__vector __bool short __a, __vector __bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfenezhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_or_0_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
int *__cc) {
return __builtin_s390_vfenezhs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cmpne_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
- return (vector signed int)
- __builtin_s390_vfenezfs((vector unsigned int)__a,
- (vector unsigned int)__b, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpne_or_0_idx_cc(__vector signed int __a, __vector signed int __b,
+ int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vfenezfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
- return __builtin_s390_vfenezfs((vector unsigned int)__a,
- (vector unsigned int)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_or_0_idx_cc(__vector __bool int __a, __vector __bool int __b,
+ int *__cc) {
+ return __builtin_s390_vfenezfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_or_0_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
int *__cc) {
return __builtin_s390_vfenezfs(__a, __b, __cc);
}
/*-- vec_cmprg --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmprg(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
- return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 4);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmprg(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
+ return (__vector __bool char)__builtin_s390_vstrcb(__a, __b, __c, 4);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmprg(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
- return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 4);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmprg(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
+ return (__vector __bool short)__builtin_s390_vstrch(__a, __b, __c, 4);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmprg(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
- return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 4);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmprg(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
+ return (__vector __bool int)__builtin_s390_vstrcf(__a, __b, __c, 4);
}
/*-- vec_cmprg_cc -----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmprg_cc(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c, int *__cc) {
- return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 4, __cc);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmprg_cc(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c, int *__cc) {
+ return (__vector __bool char)__builtin_s390_vstrcbs(__a, __b, __c, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmprg_cc(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c, int *__cc) {
- return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 4, __cc);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmprg_cc(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c, int *__cc) {
+ return (__vector __bool short)__builtin_s390_vstrchs(__a, __b, __c, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmprg_cc(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c, int *__cc) {
- return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 4, __cc);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmprg_cc(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c, int *__cc) {
+ return (__vector __bool int)__builtin_s390_vstrcfs(__a, __b, __c, 4, __cc);
}
/*-- vec_cmprg_idx ----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmprg_idx(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmprg_idx(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vstrcb(__a, __b, __c, 0);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmprg_idx(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmprg_idx(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
return __builtin_s390_vstrch(__a, __b, __c, 0);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmprg_idx(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmprg_idx(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
return __builtin_s390_vstrcf(__a, __b, __c, 0);
}
/*-- vec_cmprg_idx_cc -------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmprg_idx_cc(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmprg_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrcbs(__a, __b, __c, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmprg_idx_cc(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmprg_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c, int *__cc) {
return __builtin_s390_vstrchs(__a, __b, __c, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmprg_idx_cc(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmprg_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c, int *__cc) {
return __builtin_s390_vstrcfs(__a, __b, __c, 0, __cc);
}
/*-- vec_cmprg_or_0_idx -----------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmprg_or_0_idx(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmprg_or_0_idx(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vstrczb(__a, __b, __c, 0);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmprg_or_0_idx(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmprg_or_0_idx(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
return __builtin_s390_vstrczh(__a, __b, __c, 0);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmprg_or_0_idx(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmprg_or_0_idx(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
return __builtin_s390_vstrczf(__a, __b, __c, 0);
}
/*-- vec_cmprg_or_0_idx_cc --------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmprg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmprg_or_0_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrczbs(__a, __b, __c, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmprg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmprg_or_0_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c, int *__cc) {
return __builtin_s390_vstrczhs(__a, __b, __c, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmprg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmprg_or_0_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c, int *__cc) {
return __builtin_s390_vstrczfs(__a, __b, __c, 0, __cc);
}
/*-- vec_cmpnrg -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmpnrg(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
- return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 12);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpnrg(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
+ return (__vector __bool char)__builtin_s390_vstrcb(__a, __b, __c, 12);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpnrg(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
- return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 12);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpnrg(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
+ return (__vector __bool short)__builtin_s390_vstrch(__a, __b, __c, 12);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpnrg(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
- return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 12);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpnrg(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
+ return (__vector __bool int)__builtin_s390_vstrcf(__a, __b, __c, 12);
}
/*-- vec_cmpnrg_cc ----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmpnrg_cc(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c, int *__cc) {
- return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpnrg_cc(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c, int *__cc) {
+ return (__vector __bool char)
+ __builtin_s390_vstrcbs(__a, __b, __c, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpnrg_cc(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c, int *__cc) {
- return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpnrg_cc(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c, int *__cc) {
+ return (__vector __bool short)
+ __builtin_s390_vstrchs(__a, __b, __c, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpnrg_cc(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c, int *__cc) {
- return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpnrg_cc(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c, int *__cc) {
+ return (__vector __bool int)
+ __builtin_s390_vstrcfs(__a, __b, __c, 12, __cc);
}
/*-- vec_cmpnrg_idx ---------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpnrg_idx(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpnrg_idx(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vstrcb(__a, __b, __c, 8);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpnrg_idx(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpnrg_idx(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
return __builtin_s390_vstrch(__a, __b, __c, 8);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpnrg_idx(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpnrg_idx(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
return __builtin_s390_vstrcf(__a, __b, __c, 8);
}
/*-- vec_cmpnrg_idx_cc ------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpnrg_idx_cc(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpnrg_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrcbs(__a, __b, __c, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpnrg_idx_cc(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpnrg_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c, int *__cc) {
return __builtin_s390_vstrchs(__a, __b, __c, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpnrg_idx_cc(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpnrg_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c, int *__cc) {
return __builtin_s390_vstrcfs(__a, __b, __c, 8, __cc);
}
/*-- vec_cmpnrg_or_0_idx ----------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpnrg_or_0_idx(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpnrg_or_0_idx(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vstrczb(__a, __b, __c, 8);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpnrg_or_0_idx(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpnrg_or_0_idx(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
return __builtin_s390_vstrczh(__a, __b, __c, 8);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpnrg_or_0_idx(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpnrg_or_0_idx(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
return __builtin_s390_vstrczf(__a, __b, __c, 8);
}
/*-- vec_cmpnrg_or_0_idx_cc -------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpnrg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpnrg_or_0_idx_cc(__vector unsigned char __a,
+ __vector unsigned char __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrczbs(__a, __b, __c, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpnrg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpnrg_or_0_idx_cc(__vector unsigned short __a,
+ __vector unsigned short __b,
+ __vector unsigned short __c, int *__cc) {
return __builtin_s390_vstrczhs(__a, __b, __c, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpnrg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpnrg_or_0_idx_cc(__vector unsigned int __a,
+ __vector unsigned int __b,
+ __vector unsigned int __c, int *__cc) {
return __builtin_s390_vstrczfs(__a, __b, __c, 8, __cc);
}
/*-- vec_find_any_eq --------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq(vector signed char __a, vector signed char __b) {
- return (vector bool char)
- __builtin_s390_vfaeb((vector unsigned char)__a,
- (vector unsigned char)__b, 4);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq(__vector signed char __a, __vector signed char __b) {
+ return (__vector __bool char)
+ __builtin_s390_vfaeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 4);
}
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq(vector bool char __a, vector bool char __b) {
- return (vector bool char)
- __builtin_s390_vfaeb((vector unsigned char)__a,
- (vector unsigned char)__b, 4);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq(__vector __bool char __a, __vector __bool char __b) {
+ return (__vector __bool char)
+ __builtin_s390_vfaeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 4);
}
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq(vector unsigned char __a, vector unsigned char __b) {
- return (vector bool char)__builtin_s390_vfaeb(__a, __b, 4);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector __bool char)__builtin_s390_vfaeb(__a, __b, 4);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq(vector signed short __a, vector signed short __b) {
- return (vector bool short)
- __builtin_s390_vfaeh((vector unsigned short)__a,
- (vector unsigned short)__b, 4);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq(__vector signed short __a, __vector signed short __b) {
+ return (__vector __bool short)
+ __builtin_s390_vfaeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 4);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq(vector bool short __a, vector bool short __b) {
- return (vector bool short)
- __builtin_s390_vfaeh((vector unsigned short)__a,
- (vector unsigned short)__b, 4);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq(__vector __bool short __a, __vector __bool short __b) {
+ return (__vector __bool short)
+ __builtin_s390_vfaeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 4);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq(vector unsigned short __a, vector unsigned short __b) {
- return (vector bool short)__builtin_s390_vfaeh(__a, __b, 4);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector __bool short)__builtin_s390_vfaeh(__a, __b, 4);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq(vector signed int __a, vector signed int __b) {
- return (vector bool int)
- __builtin_s390_vfaef((vector unsigned int)__a,
- (vector unsigned int)__b, 4);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq(__vector signed int __a, __vector signed int __b) {
+ return (__vector __bool int)
+ __builtin_s390_vfaef((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 4);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq(vector bool int __a, vector bool int __b) {
- return (vector bool int)
- __builtin_s390_vfaef((vector unsigned int)__a,
- (vector unsigned int)__b, 4);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq(__vector __bool int __a, __vector __bool int __b) {
+ return (__vector __bool int)
+ __builtin_s390_vfaef((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 4);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq(vector unsigned int __a, vector unsigned int __b) {
- return (vector bool int)__builtin_s390_vfaef(__a, __b, 4);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector __bool int)__builtin_s390_vfaef(__a, __b, 4);
}
/*-- vec_find_any_eq_cc -----------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq_cc(vector signed char __a, vector signed char __b, int *__cc) {
- return (vector bool char)
- __builtin_s390_vfaebs((vector unsigned char)__a,
- (vector unsigned char)__b, 4, __cc);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq_cc(__vector signed char __a, __vector signed char __b,
+ int *__cc) {
+ return (__vector __bool char)
+ __builtin_s390_vfaebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq_cc(vector bool char __a, vector bool char __b, int *__cc) {
- return (vector bool char)
- __builtin_s390_vfaebs((vector unsigned char)__a,
- (vector unsigned char)__b, 4, __cc);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq_cc(__vector __bool char __a, __vector __bool char __b,
+ int *__cc) {
+ return (__vector __bool char)
+ __builtin_s390_vfaebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq_cc(vector unsigned char __a, vector unsigned char __b,
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq_cc(__vector unsigned char __a, __vector unsigned char __b,
int *__cc) {
- return (vector bool char)__builtin_s390_vfaebs(__a, __b, 4, __cc);
+ return (__vector __bool char)__builtin_s390_vfaebs(__a, __b, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq_cc(vector signed short __a, vector signed short __b,
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq_cc(__vector signed short __a, __vector signed short __b,
int *__cc) {
- return (vector bool short)
- __builtin_s390_vfaehs((vector unsigned short)__a,
- (vector unsigned short)__b, 4, __cc);
+ return (__vector __bool short)
+ __builtin_s390_vfaehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq_cc(vector bool short __a, vector bool short __b, int *__cc) {
- return (vector bool short)
- __builtin_s390_vfaehs((vector unsigned short)__a,
- (vector unsigned short)__b, 4, __cc);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq_cc(__vector __bool short __a, __vector __bool short __b,
+ int *__cc) {
+ return (__vector __bool short)
+ __builtin_s390_vfaehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq_cc(vector unsigned short __a, vector unsigned short __b,
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq_cc(__vector unsigned short __a, __vector unsigned short __b,
int *__cc) {
- return (vector bool short)__builtin_s390_vfaehs(__a, __b, 4, __cc);
+ return (__vector __bool short)__builtin_s390_vfaehs(__a, __b, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq_cc(vector signed int __a, vector signed int __b, int *__cc) {
- return (vector bool int)
- __builtin_s390_vfaefs((vector unsigned int)__a,
- (vector unsigned int)__b, 4, __cc);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq_cc(__vector signed int __a, __vector signed int __b,
+ int *__cc) {
+ return (__vector __bool int)
+ __builtin_s390_vfaefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq_cc(vector bool int __a, vector bool int __b, int *__cc) {
- return (vector bool int)
- __builtin_s390_vfaefs((vector unsigned int)__a,
- (vector unsigned int)__b, 4, __cc);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq_cc(__vector __bool int __a, __vector __bool int __b,
+ int *__cc) {
+ return (__vector __bool int)
+ __builtin_s390_vfaefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq_cc(vector unsigned int __a, vector unsigned int __b,
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq_cc(__vector unsigned int __a, __vector unsigned int __b,
int *__cc) {
- return (vector bool int)__builtin_s390_vfaefs(__a, __b, 4, __cc);
+ return (__vector __bool int)__builtin_s390_vfaefs(__a, __b, 4, __cc);
}
/*-- vec_find_any_eq_idx ----------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_eq_idx(vector signed char __a, vector signed char __b) {
- return (vector signed char)
- __builtin_s390_vfaeb((vector unsigned char)__a,
- (vector unsigned char)__b, 0);
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_eq_idx(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)
+ __builtin_s390_vfaeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_idx(vector bool char __a, vector bool char __b) {
- return __builtin_s390_vfaeb((vector unsigned char)__a,
- (vector unsigned char)__b, 0);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_idx(__vector __bool char __a, __vector __bool char __b) {
+ return __builtin_s390_vfaeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_idx(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_idx(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vfaeb(__a, __b, 0);
}
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_eq_idx(vector signed short __a, vector signed short __b) {
- return (vector signed short)
- __builtin_s390_vfaeh((vector unsigned short)__a,
- (vector unsigned short)__b, 0);
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_eq_idx(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)
+ __builtin_s390_vfaeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_idx(vector bool short __a, vector bool short __b) {
- return __builtin_s390_vfaeh((vector unsigned short)__a,
- (vector unsigned short)__b, 0);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_idx(__vector __bool short __a, __vector __bool short __b) {
+ return __builtin_s390_vfaeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_idx(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_idx(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vfaeh(__a, __b, 0);
}
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_eq_idx(vector signed int __a, vector signed int __b) {
- return (vector signed int)
- __builtin_s390_vfaef((vector unsigned int)__a,
- (vector unsigned int)__b, 0);
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_eq_idx(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)
+ __builtin_s390_vfaef((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_idx(vector bool int __a, vector bool int __b) {
- return __builtin_s390_vfaef((vector unsigned int)__a,
- (vector unsigned int)__b, 0);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_idx(__vector __bool int __a, __vector __bool int __b) {
+ return __builtin_s390_vfaef((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_idx(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_idx(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vfaef(__a, __b, 0);
}
/*-- vec_find_any_eq_idx_cc -------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_eq_idx_cc(vector signed char __a, vector signed char __b,
- int *__cc) {
- return (vector signed char)
- __builtin_s390_vfaebs((vector unsigned char)__a,
- (vector unsigned char)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_eq_idx_cc(__vector signed char __a,
+ __vector signed char __b, int *__cc) {
+ return (__vector signed char)
+ __builtin_s390_vfaebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
- return __builtin_s390_vfaebs((vector unsigned char)__a,
- (vector unsigned char)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_idx_cc(__vector __bool char __a,
+ __vector __bool char __b, int *__cc) {
+ return __builtin_s390_vfaebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_idx_cc(vector unsigned char __a, vector unsigned char __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_idx_cc(__vector unsigned char __a,
+ __vector unsigned char __b, int *__cc) {
return __builtin_s390_vfaebs(__a, __b, 0, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_eq_idx_cc(vector signed short __a, vector signed short __b,
- int *__cc) {
- return (vector signed short)
- __builtin_s390_vfaehs((vector unsigned short)__a,
- (vector unsigned short)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_eq_idx_cc(__vector signed short __a,
+ __vector signed short __b, int *__cc) {
+ return (__vector signed short)
+ __builtin_s390_vfaehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_idx_cc(vector bool short __a, vector bool short __b,
- int *__cc) {
- return __builtin_s390_vfaehs((vector unsigned short)__a,
- (vector unsigned short)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_idx_cc(__vector __bool short __a,
+ __vector __bool short __b, int *__cc) {
+ return __builtin_s390_vfaehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_idx_cc(vector unsigned short __a, vector unsigned short __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_idx_cc(__vector unsigned short __a,
+ __vector unsigned short __b, int *__cc) {
return __builtin_s390_vfaehs(__a, __b, 0, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_eq_idx_cc(vector signed int __a, vector signed int __b,
- int *__cc) {
- return (vector signed int)
- __builtin_s390_vfaefs((vector unsigned int)__a,
- (vector unsigned int)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_eq_idx_cc(__vector signed int __a,
+ __vector signed int __b, int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vfaefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
- return __builtin_s390_vfaefs((vector unsigned int)__a,
- (vector unsigned int)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_idx_cc(__vector __bool int __a,
+ __vector __bool int __b, int *__cc) {
+ return __builtin_s390_vfaefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_idx_cc(vector unsigned int __a, vector unsigned int __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_idx_cc(__vector unsigned int __a,
+ __vector unsigned int __b, int *__cc) {
return __builtin_s390_vfaefs(__a, __b, 0, __cc);
}
/*-- vec_find_any_eq_or_0_idx -----------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_eq_or_0_idx(vector signed char __a, vector signed char __b) {
- return (vector signed char)
- __builtin_s390_vfaezb((vector unsigned char)__a,
- (vector unsigned char)__b, 0);
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_eq_or_0_idx(__vector signed char __a,
+ __vector signed char __b) {
+ return (__vector signed char)
+ __builtin_s390_vfaezb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_or_0_idx(vector bool char __a, vector bool char __b) {
- return __builtin_s390_vfaezb((vector unsigned char)__a,
- (vector unsigned char)__b, 0);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_or_0_idx(__vector __bool char __a,
+ __vector __bool char __b) {
+ return __builtin_s390_vfaezb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_or_0_idx(__vector unsigned char __a,
+ __vector unsigned char __b) {
return __builtin_s390_vfaezb(__a, __b, 0);
}
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_eq_or_0_idx(vector signed short __a, vector signed short __b) {
- return (vector signed short)
- __builtin_s390_vfaezh((vector unsigned short)__a,
- (vector unsigned short)__b, 0);
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_eq_or_0_idx(__vector signed short __a,
+ __vector signed short __b) {
+ return (__vector signed short)
+ __builtin_s390_vfaezh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_or_0_idx(vector bool short __a, vector bool short __b) {
- return __builtin_s390_vfaezh((vector unsigned short)__a,
- (vector unsigned short)__b, 0);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_or_0_idx(__vector __bool short __a,
+ __vector __bool short __b) {
+ return __builtin_s390_vfaezh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_or_0_idx(__vector unsigned short __a,
+ __vector unsigned short __b) {
return __builtin_s390_vfaezh(__a, __b, 0);
}
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_eq_or_0_idx(vector signed int __a, vector signed int __b) {
- return (vector signed int)
- __builtin_s390_vfaezf((vector unsigned int)__a,
- (vector unsigned int)__b, 0);
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_eq_or_0_idx(__vector signed int __a,
+ __vector signed int __b) {
+ return (__vector signed int)
+ __builtin_s390_vfaezf((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_or_0_idx(vector bool int __a, vector bool int __b) {
- return __builtin_s390_vfaezf((vector unsigned int)__a,
- (vector unsigned int)__b, 0);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_or_0_idx(__vector __bool int __a,
+ __vector __bool int __b) {
+ return __builtin_s390_vfaezf((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_or_0_idx(__vector unsigned int __a,
+ __vector unsigned int __b) {
return __builtin_s390_vfaezf(__a, __b, 0);
}
/*-- vec_find_any_eq_or_0_idx_cc --------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_eq_or_0_idx_cc(vector signed char __a, vector signed char __b,
- int *__cc) {
- return (vector signed char)
- __builtin_s390_vfaezbs((vector unsigned char)__a,
- (vector unsigned char)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_eq_or_0_idx_cc(__vector signed char __a,
+ __vector signed char __b, int *__cc) {
+ return (__vector signed char)
+ __builtin_s390_vfaezbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_or_0_idx_cc(vector bool char __a, vector bool char __b,
- int *__cc) {
- return __builtin_s390_vfaezbs((vector unsigned char)__a,
- (vector unsigned char)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_or_0_idx_cc(__vector __bool char __a,
+ __vector __bool char __b, int *__cc) {
+ return __builtin_s390_vfaezbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_or_0_idx_cc(__vector unsigned char __a,
+ __vector unsigned char __b, int *__cc) {
return __builtin_s390_vfaezbs(__a, __b, 0, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_eq_or_0_idx_cc(vector signed short __a, vector signed short __b,
- int *__cc) {
- return (vector signed short)
- __builtin_s390_vfaezhs((vector unsigned short)__a,
- (vector unsigned short)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_eq_or_0_idx_cc(__vector signed short __a,
+ __vector signed short __b, int *__cc) {
+ return (__vector signed short)
+ __builtin_s390_vfaezhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_or_0_idx_cc(vector bool short __a, vector bool short __b,
- int *__cc) {
- return __builtin_s390_vfaezhs((vector unsigned short)__a,
- (vector unsigned short)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_or_0_idx_cc(__vector __bool short __a,
+ __vector __bool short __b, int *__cc) {
+ return __builtin_s390_vfaezhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_or_0_idx_cc(vector unsigned short __a,
- vector unsigned short __b, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_or_0_idx_cc(__vector unsigned short __a,
+ __vector unsigned short __b, int *__cc) {
return __builtin_s390_vfaezhs(__a, __b, 0, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_eq_or_0_idx_cc(vector signed int __a, vector signed int __b,
- int *__cc) {
- return (vector signed int)
- __builtin_s390_vfaezfs((vector unsigned int)__a,
- (vector unsigned int)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_eq_or_0_idx_cc(__vector signed int __a,
+ __vector signed int __b, int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vfaezfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_or_0_idx_cc(vector bool int __a, vector bool int __b,
- int *__cc) {
- return __builtin_s390_vfaezfs((vector unsigned int)__a,
- (vector unsigned int)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_or_0_idx_cc(__vector __bool int __a,
+ __vector __bool int __b, int *__cc) {
+ return __builtin_s390_vfaezfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_or_0_idx_cc(__vector unsigned int __a,
+ __vector unsigned int __b, int *__cc) {
return __builtin_s390_vfaezfs(__a, __b, 0, __cc);
}
/*-- vec_find_any_ne --------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne(vector signed char __a, vector signed char __b) {
- return (vector bool char)
- __builtin_s390_vfaeb((vector unsigned char)__a,
- (vector unsigned char)__b, 12);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne(__vector signed char __a, __vector signed char __b) {
+ return (__vector __bool char)
+ __builtin_s390_vfaeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 12);
}
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne(vector bool char __a, vector bool char __b) {
- return (vector bool char)
- __builtin_s390_vfaeb((vector unsigned char)__a,
- (vector unsigned char)__b, 12);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne(__vector __bool char __a, __vector __bool char __b) {
+ return (__vector __bool char)
+ __builtin_s390_vfaeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 12);
}
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne(vector unsigned char __a, vector unsigned char __b) {
- return (vector bool char)__builtin_s390_vfaeb(__a, __b, 12);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector __bool char)__builtin_s390_vfaeb(__a, __b, 12);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne(vector signed short __a, vector signed short __b) {
- return (vector bool short)
- __builtin_s390_vfaeh((vector unsigned short)__a,
- (vector unsigned short)__b, 12);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne(__vector signed short __a, __vector signed short __b) {
+ return (__vector __bool short)
+ __builtin_s390_vfaeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 12);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne(vector bool short __a, vector bool short __b) {
- return (vector bool short)
- __builtin_s390_vfaeh((vector unsigned short)__a,
- (vector unsigned short)__b, 12);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne(__vector __bool short __a, __vector __bool short __b) {
+ return (__vector __bool short)
+ __builtin_s390_vfaeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 12);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne(vector unsigned short __a, vector unsigned short __b) {
- return (vector bool short)__builtin_s390_vfaeh(__a, __b, 12);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector __bool short)__builtin_s390_vfaeh(__a, __b, 12);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne(vector signed int __a, vector signed int __b) {
- return (vector bool int)
- __builtin_s390_vfaef((vector unsigned int)__a,
- (vector unsigned int)__b, 12);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne(__vector signed int __a, __vector signed int __b) {
+ return (__vector __bool int)
+ __builtin_s390_vfaef((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 12);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne(vector bool int __a, vector bool int __b) {
- return (vector bool int)
- __builtin_s390_vfaef((vector unsigned int)__a,
- (vector unsigned int)__b, 12);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne(__vector __bool int __a, __vector __bool int __b) {
+ return (__vector __bool int)
+ __builtin_s390_vfaef((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 12);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne(vector unsigned int __a, vector unsigned int __b) {
- return (vector bool int)__builtin_s390_vfaef(__a, __b, 12);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector __bool int)__builtin_s390_vfaef(__a, __b, 12);
}
/*-- vec_find_any_ne_cc -----------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne_cc(vector signed char __a, vector signed char __b, int *__cc) {
- return (vector bool char)
- __builtin_s390_vfaebs((vector unsigned char)__a,
- (vector unsigned char)__b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne_cc(__vector signed char __a,
+ __vector signed char __b, int *__cc) {
+ return (__vector __bool char)
+ __builtin_s390_vfaebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne_cc(vector bool char __a, vector bool char __b, int *__cc) {
- return (vector bool char)
- __builtin_s390_vfaebs((vector unsigned char)__a,
- (vector unsigned char)__b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne_cc(__vector __bool char __a,
+ __vector __bool char __b, int *__cc) {
+ return (__vector __bool char)
+ __builtin_s390_vfaebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne_cc(vector unsigned char __a, vector unsigned char __b,
- int *__cc) {
- return (vector bool char)__builtin_s390_vfaebs(__a, __b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne_cc(__vector unsigned char __a,
+ __vector unsigned char __b, int *__cc) {
+ return (__vector __bool char)__builtin_s390_vfaebs(__a, __b, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne_cc(vector signed short __a, vector signed short __b,
- int *__cc) {
- return (vector bool short)
- __builtin_s390_vfaehs((vector unsigned short)__a,
- (vector unsigned short)__b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne_cc(__vector signed short __a,
+ __vector signed short __b, int *__cc) {
+ return (__vector __bool short)
+ __builtin_s390_vfaehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne_cc(vector bool short __a, vector bool short __b, int *__cc) {
- return (vector bool short)
- __builtin_s390_vfaehs((vector unsigned short)__a,
- (vector unsigned short)__b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne_cc(__vector __bool short __a,
+ __vector __bool short __b, int *__cc) {
+ return (__vector __bool short)
+ __builtin_s390_vfaehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne_cc(vector unsigned short __a, vector unsigned short __b,
- int *__cc) {
- return (vector bool short)__builtin_s390_vfaehs(__a, __b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne_cc(__vector unsigned short __a,
+ __vector unsigned short __b, int *__cc) {
+ return (__vector __bool short)__builtin_s390_vfaehs(__a, __b, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne_cc(vector signed int __a, vector signed int __b, int *__cc) {
- return (vector bool int)
- __builtin_s390_vfaefs((vector unsigned int)__a,
- (vector unsigned int)__b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne_cc(__vector signed int __a,
+ __vector signed int __b, int *__cc) {
+ return (__vector __bool int)
+ __builtin_s390_vfaefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne_cc(vector bool int __a, vector bool int __b, int *__cc) {
- return (vector bool int)
- __builtin_s390_vfaefs((vector unsigned int)__a,
- (vector unsigned int)__b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne_cc(__vector __bool int __a,
+ __vector __bool int __b, int *__cc) {
+ return (__vector __bool int)
+ __builtin_s390_vfaefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne_cc(vector unsigned int __a, vector unsigned int __b,
- int *__cc) {
- return (vector bool int)__builtin_s390_vfaefs(__a, __b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne_cc(__vector unsigned int __a,
+ __vector unsigned int __b, int *__cc) {
+ return (__vector __bool int)__builtin_s390_vfaefs(__a, __b, 12, __cc);
}
/*-- vec_find_any_ne_idx ----------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_ne_idx(vector signed char __a, vector signed char __b) {
- return (vector signed char)
- __builtin_s390_vfaeb((vector unsigned char)__a,
- (vector unsigned char)__b, 8);
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_ne_idx(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)
+ __builtin_s390_vfaeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_idx(vector bool char __a, vector bool char __b) {
- return __builtin_s390_vfaeb((vector unsigned char)__a,
- (vector unsigned char)__b, 8);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_idx(__vector __bool char __a, __vector __bool char __b) {
+ return __builtin_s390_vfaeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_idx(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_idx(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vfaeb(__a, __b, 8);
}
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_ne_idx(vector signed short __a, vector signed short __b) {
- return (vector signed short)
- __builtin_s390_vfaeh((vector unsigned short)__a,
- (vector unsigned short)__b, 8);
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_ne_idx(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)
+ __builtin_s390_vfaeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_idx(vector bool short __a, vector bool short __b) {
- return __builtin_s390_vfaeh((vector unsigned short)__a,
- (vector unsigned short)__b, 8);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_idx(__vector __bool short __a, __vector __bool short __b) {
+ return __builtin_s390_vfaeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_idx(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_idx(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vfaeh(__a, __b, 8);
}
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_ne_idx(vector signed int __a, vector signed int __b) {
- return (vector signed int)
- __builtin_s390_vfaef((vector unsigned int)__a,
- (vector unsigned int)__b, 8);
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_ne_idx(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)
+ __builtin_s390_vfaef((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_idx(vector bool int __a, vector bool int __b) {
- return __builtin_s390_vfaef((vector unsigned int)__a,
- (vector unsigned int)__b, 8);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_idx(__vector __bool int __a, __vector __bool int __b) {
+ return __builtin_s390_vfaef((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_idx(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_idx(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vfaef(__a, __b, 8);
}
/*-- vec_find_any_ne_idx_cc -------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_ne_idx_cc(vector signed char __a, vector signed char __b,
- int *__cc) {
- return (vector signed char)
- __builtin_s390_vfaebs((vector unsigned char)__a,
- (vector unsigned char)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_ne_idx_cc(__vector signed char __a,
+ __vector signed char __b, int *__cc) {
+ return (__vector signed char)
+ __builtin_s390_vfaebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
- return __builtin_s390_vfaebs((vector unsigned char)__a,
- (vector unsigned char)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_idx_cc(__vector __bool char __a,
+ __vector __bool char __b, int *__cc) {
+ return __builtin_s390_vfaebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_idx_cc(vector unsigned char __a, vector unsigned char __b,
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_idx_cc(__vector unsigned char __a,
+ __vector unsigned char __b,
int *__cc) {
return __builtin_s390_vfaebs(__a, __b, 8, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_ne_idx_cc(vector signed short __a, vector signed short __b,
- int *__cc) {
- return (vector signed short)
- __builtin_s390_vfaehs((vector unsigned short)__a,
- (vector unsigned short)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_ne_idx_cc(__vector signed short __a,
+ __vector signed short __b, int *__cc) {
+ return (__vector signed short)
+ __builtin_s390_vfaehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_idx_cc(vector bool short __a, vector bool short __b,
- int *__cc) {
- return __builtin_s390_vfaehs((vector unsigned short)__a,
- (vector unsigned short)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_idx_cc(__vector __bool short __a,
+ __vector __bool short __b, int *__cc) {
+ return __builtin_s390_vfaehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_idx_cc(vector unsigned short __a, vector unsigned short __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_idx_cc(__vector unsigned short __a,
+ __vector unsigned short __b, int *__cc) {
return __builtin_s390_vfaehs(__a, __b, 8, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_ne_idx_cc(vector signed int __a, vector signed int __b,
- int *__cc) {
- return (vector signed int)
- __builtin_s390_vfaefs((vector unsigned int)__a,
- (vector unsigned int)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_ne_idx_cc(__vector signed int __a,
+ __vector signed int __b, int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vfaefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
- return __builtin_s390_vfaefs((vector unsigned int)__a,
- (vector unsigned int)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_idx_cc(__vector __bool int __a,
+ __vector __bool int __b, int *__cc) {
+ return __builtin_s390_vfaefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_idx_cc(vector unsigned int __a, vector unsigned int __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_idx_cc(__vector unsigned int __a,
+ __vector unsigned int __b, int *__cc) {
return __builtin_s390_vfaefs(__a, __b, 8, __cc);
}
/*-- vec_find_any_ne_or_0_idx -----------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_ne_or_0_idx(vector signed char __a, vector signed char __b) {
- return (vector signed char)
- __builtin_s390_vfaezb((vector unsigned char)__a,
- (vector unsigned char)__b, 8);
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_ne_or_0_idx(__vector signed char __a,
+ __vector signed char __b) {
+ return (__vector signed char)
+ __builtin_s390_vfaezb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_or_0_idx(vector bool char __a, vector bool char __b) {
- return __builtin_s390_vfaezb((vector unsigned char)__a,
- (vector unsigned char)__b, 8);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_or_0_idx(__vector __bool char __a,
+ __vector __bool char __b) {
+ return __builtin_s390_vfaezb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_or_0_idx(__vector unsigned char __a,
+ __vector unsigned char __b) {
return __builtin_s390_vfaezb(__a, __b, 8);
}
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_ne_or_0_idx(vector signed short __a, vector signed short __b) {
- return (vector signed short)
- __builtin_s390_vfaezh((vector unsigned short)__a,
- (vector unsigned short)__b, 8);
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_ne_or_0_idx(__vector signed short __a,
+ __vector signed short __b) {
+ return (__vector signed short)
+ __builtin_s390_vfaezh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_or_0_idx(vector bool short __a, vector bool short __b) {
- return __builtin_s390_vfaezh((vector unsigned short)__a,
- (vector unsigned short)__b, 8);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_or_0_idx(__vector __bool short __a,
+ __vector __bool short __b) {
+ return __builtin_s390_vfaezh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_or_0_idx(__vector unsigned short __a,
+ __vector unsigned short __b) {
return __builtin_s390_vfaezh(__a, __b, 8);
}
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_ne_or_0_idx(vector signed int __a, vector signed int __b) {
- return (vector signed int)
- __builtin_s390_vfaezf((vector unsigned int)__a,
- (vector unsigned int)__b, 8);
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_ne_or_0_idx(__vector signed int __a,
+ __vector signed int __b) {
+ return (__vector signed int)
+ __builtin_s390_vfaezf((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_or_0_idx(vector bool int __a, vector bool int __b) {
- return __builtin_s390_vfaezf((vector unsigned int)__a,
- (vector unsigned int)__b, 8);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_or_0_idx(__vector __bool int __a,
+ __vector __bool int __b) {
+ return __builtin_s390_vfaezf((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_or_0_idx(__vector unsigned int __a,
+ __vector unsigned int __b) {
return __builtin_s390_vfaezf(__a, __b, 8);
}
/*-- vec_find_any_ne_or_0_idx_cc --------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_ne_or_0_idx_cc(vector signed char __a, vector signed char __b,
- int *__cc) {
- return (vector signed char)
- __builtin_s390_vfaezbs((vector unsigned char)__a,
- (vector unsigned char)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_ne_or_0_idx_cc(__vector signed char __a,
+ __vector signed char __b, int *__cc) {
+ return (__vector signed char)
+ __builtin_s390_vfaezbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_or_0_idx_cc(vector bool char __a, vector bool char __b,
- int *__cc) {
- return __builtin_s390_vfaezbs((vector unsigned char)__a,
- (vector unsigned char)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_or_0_idx_cc(__vector __bool char __a,
+ __vector __bool char __b, int *__cc) {
+ return __builtin_s390_vfaezbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_or_0_idx_cc(__vector unsigned char __a,
+ __vector unsigned char __b, int *__cc) {
return __builtin_s390_vfaezbs(__a, __b, 8, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_ne_or_0_idx_cc(vector signed short __a, vector signed short __b,
- int *__cc) {
- return (vector signed short)
- __builtin_s390_vfaezhs((vector unsigned short)__a,
- (vector unsigned short)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_ne_or_0_idx_cc(__vector signed short __a,
+ __vector signed short __b, int *__cc) {
+ return (__vector signed short)
+ __builtin_s390_vfaezhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_or_0_idx_cc(vector bool short __a, vector bool short __b,
- int *__cc) {
- return __builtin_s390_vfaezhs((vector unsigned short)__a,
- (vector unsigned short)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_or_0_idx_cc(__vector __bool short __a,
+ __vector __bool short __b, int *__cc) {
+ return __builtin_s390_vfaezhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_or_0_idx_cc(vector unsigned short __a,
- vector unsigned short __b, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_or_0_idx_cc(__vector unsigned short __a,
+ __vector unsigned short __b, int *__cc) {
return __builtin_s390_vfaezhs(__a, __b, 8, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_ne_or_0_idx_cc(vector signed int __a, vector signed int __b,
- int *__cc) {
- return (vector signed int)
- __builtin_s390_vfaezfs((vector unsigned int)__a,
- (vector unsigned int)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_ne_or_0_idx_cc(__vector signed int __a,
+ __vector signed int __b, int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vfaezfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_or_0_idx_cc(vector bool int __a, vector bool int __b,
- int *__cc) {
- return __builtin_s390_vfaezfs((vector unsigned int)__a,
- (vector unsigned int)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_or_0_idx_cc(__vector __bool int __a,
+ __vector __bool int __b, int *__cc) {
+ return __builtin_s390_vfaezfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_or_0_idx_cc(__vector unsigned int __a,
+ __vector unsigned int __b, int *__cc) {
return __builtin_s390_vfaezfs(__a, __b, 8, __cc);
}
@@ -10711,63 +10847,63 @@ vec_find_any_ne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
#if __ARCH__ >= 13
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector signed char __a, vector signed char __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrsb((vector unsigned char)__a,
- (vector unsigned char)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector signed char __a, __vector signed char __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector bool char __a, vector bool char __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrsb((vector unsigned char)__a,
- (vector unsigned char)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector __bool char __a, __vector __bool char __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrsb(__a, __b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector signed short __a, vector signed short __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrsh((vector unsigned short)__a,
- (vector unsigned short)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector signed short __a, __vector signed short __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector bool short __a, vector bool short __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrsh((vector unsigned short)__a,
- (vector unsigned short)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector __bool short __a, __vector __bool short __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector unsigned short __a, vector unsigned short __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrsh(__a, __b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector signed int __a, vector signed int __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrsf((vector unsigned int)__a,
- (vector unsigned int)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector signed int __a, __vector signed int __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsf((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector bool int __a, vector bool int __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrsf((vector unsigned int)__a,
- (vector unsigned int)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector __bool int __a, __vector __bool int __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsf((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector unsigned int __a, vector unsigned int __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrsf(__a, __b, __c, __cc);
}
@@ -10777,72 +10913,72 @@ vec_search_string_cc(vector unsigned int __a, vector unsigned int __b,
#if __ARCH__ >= 13
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector signed char __a,
- vector signed char __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrszb((vector unsigned char)__a,
- (vector unsigned char)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector signed char __a,
+ __vector signed char __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector bool char __a,
- vector bool char __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrszb((vector unsigned char)__a,
- (vector unsigned char)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector __bool char __a,
+ __vector __bool char __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector unsigned char __a,
- vector unsigned char __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector unsigned char __a,
+ __vector unsigned char __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrszb(__a, __b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector signed short __a,
- vector signed short __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrszh((vector unsigned short)__a,
- (vector unsigned short)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector signed short __a,
+ __vector signed short __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector bool short __a,
- vector bool short __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrszh((vector unsigned short)__a,
- (vector unsigned short)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector __bool short __a,
+ __vector __bool short __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector unsigned short __a,
- vector unsigned short __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector unsigned short __a,
+ __vector unsigned short __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrszh(__a, __b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector signed int __a,
- vector signed int __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrszf((vector unsigned int)__a,
- (vector unsigned int)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector signed int __a,
+ __vector signed int __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszf((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector bool int __a,
- vector bool int __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrszf((vector unsigned int)__a,
- (vector unsigned int)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector __bool int __a,
+ __vector __bool int __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszf((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector unsigned int __a,
- vector unsigned int __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector unsigned int __a,
+ __vector unsigned int __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrszf(__a, __b, __c, __cc);
}
diff --git a/contrib/llvm-project/clang/lib/Headers/wasm_simd128.h b/contrib/llvm-project/clang/lib/Headers/wasm_simd128.h
new file mode 100644
index 000000000000..b78123834b64
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/wasm_simd128.h
@@ -0,0 +1,1133 @@
+/*===---- wasm_simd128.h - WebAssembly portable SIMD intrinsics ------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __WASM_SIMD128_H
+#define __WASM_SIMD128_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+// User-facing type
+typedef int32_t v128_t __attribute__((__vector_size__(16), __aligned__(16)));
+
+// Internal types determined by clang builtin definitions
+typedef int32_t __v128_u __attribute__((__vector_size__(16), __aligned__(1)));
+typedef char __i8x16 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef signed char __s8x16
+ __attribute__((__vector_size__(16), __aligned__(16)));
+typedef unsigned char __u8x16
+ __attribute__((__vector_size__(16), __aligned__(16)));
+typedef short __i16x8 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef unsigned short __u16x8
+ __attribute__((__vector_size__(16), __aligned__(16)));
+typedef int __i32x4 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef unsigned int __u32x4
+ __attribute__((__vector_size__(16), __aligned__(16)));
+typedef long long __i64x2 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef unsigned long long __u64x2
+ __attribute__((__vector_size__(16), __aligned__(16)));
+typedef float __f32x4 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef double __f64x2 __attribute__((__vector_size__(16), __aligned__(16)));
+
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("simd128"), \
+ __min_vector_width__(128)))
+
+#define __REQUIRE_CONSTANT(e) \
+ _Static_assert(__builtin_constant_p(e), "Expected constant")
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load(const void *__mem) {
+ // UB-free unaligned access copied from xmmintrin.h
+ struct __wasm_v128_load_struct {
+ __v128_u __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((const struct __wasm_v128_load_struct *)__mem)->__v;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v8x16_load_splat(const void *__mem) {
+ struct __wasm_v8x16_load_splat_struct {
+ uint8_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ uint8_t __v = ((const struct __wasm_v8x16_load_splat_struct *)__mem)->__v;
+ return (v128_t)(__u8x16){__v, __v, __v, __v, __v, __v, __v, __v,
+ __v, __v, __v, __v, __v, __v, __v, __v};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v16x8_load_splat(const void *__mem) {
+ struct __wasm_v16x8_load_splat_struct {
+ uint16_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ uint16_t __v = ((const struct __wasm_v16x8_load_splat_struct *)__mem)->__v;
+ return (v128_t)(__u16x8){__v, __v, __v, __v, __v, __v, __v, __v};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v32x4_load_splat(const void *__mem) {
+ struct __wasm_v32x4_load_splat_struct {
+ uint32_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ uint32_t __v = ((const struct __wasm_v32x4_load_splat_struct *)__mem)->__v;
+ return (v128_t)(__u32x4){__v, __v, __v, __v};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v64x2_load_splat(const void *__mem) {
+ struct __wasm_v64x2_load_splat_struct {
+ uint64_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ uint64_t __v = ((const struct __wasm_v64x2_load_splat_struct *)__mem)->__v;
+ return (v128_t)(__u64x2){__v, __v};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_load_8x8(const void *__mem) {
+ typedef int8_t __i8x8 __attribute__((__vector_size__(8), __aligned__(8)));
+ struct __wasm_i16x8_load_8x8_struct {
+ __i8x8 __v;
+ } __attribute__((__packed__, __may_alias__));
+ __i8x8 __v = ((const struct __wasm_i16x8_load_8x8_struct *)__mem)->__v;
+ return (v128_t) __builtin_convertvector(__v, __i16x8);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_load_8x8(const void *__mem) {
+ typedef uint8_t __u8x8 __attribute__((__vector_size__(8), __aligned__(8)));
+ struct __wasm_u16x8_load_8x8_struct {
+ __u8x8 __v;
+ } __attribute__((__packed__, __may_alias__));
+ __u8x8 __v = ((const struct __wasm_u16x8_load_8x8_struct *)__mem)->__v;
+ return (v128_t) __builtin_convertvector(__v, __u16x8);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_load_16x4(const void *__mem) {
+ typedef int16_t __i16x4 __attribute__((__vector_size__(8), __aligned__(8)));
+ struct __wasm_i32x4_load_16x4_struct {
+ __i16x4 __v;
+ } __attribute__((__packed__, __may_alias__));
+ __i16x4 __v = ((const struct __wasm_i32x4_load_16x4_struct *)__mem)->__v;
+ return (v128_t) __builtin_convertvector(__v, __i32x4);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u32x4_load_16x4(const void *__mem) {
+ typedef uint16_t __u16x4 __attribute__((__vector_size__(8), __aligned__(8)));
+ struct __wasm_u32x4_load_16x4_struct {
+ __u16x4 __v;
+ } __attribute__((__packed__, __may_alias__));
+ __u16x4 __v = ((const struct __wasm_u32x4_load_16x4_struct *)__mem)->__v;
+ return (v128_t) __builtin_convertvector(__v, __u32x4);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i64x2_load_32x2(const void *__mem) {
+ typedef int32_t __i32x2 __attribute__((__vector_size__(8), __aligned__(8)));
+ struct __wasm_i64x2_load_32x2_struct {
+ __i32x2 __v;
+ } __attribute__((__packed__, __may_alias__));
+ __i32x2 __v = ((const struct __wasm_i64x2_load_32x2_struct *)__mem)->__v;
+ return (v128_t) __builtin_convertvector(__v, __i64x2);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u64x2_load_32x2(const void *__mem) {
+ typedef uint32_t __u32x2 __attribute__((__vector_size__(8), __aligned__(8)));
+ struct __wasm_u64x2_load_32x2_struct {
+ __u32x2 __v;
+ } __attribute__((__packed__, __may_alias__));
+ __u32x2 __v = ((const struct __wasm_u64x2_load_32x2_struct *)__mem)->__v;
+ return (v128_t) __builtin_convertvector(__v, __u64x2);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store(void *__mem,
+ v128_t __a) {
+ // UB-free unaligned access copied from xmmintrin.h
+ struct __wasm_v128_store_struct {
+ __v128_u __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __wasm_v128_store_struct *)__mem)->__v = __a;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i8x16_make(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3, int8_t __c4,
+ int8_t __c5, int8_t __c6, int8_t __c7, int8_t __c8, int8_t __c9,
+ int8_t __c10, int8_t __c11, int8_t __c12, int8_t __c13,
+ int8_t __c14, int8_t __c15) {
+ return (v128_t)(__i8x16){__c0, __c1, __c2, __c3, __c4, __c5,
+ __c6, __c7, __c8, __c9, __c10, __c11,
+ __c12, __c13, __c14, __c15};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_make(int16_t __c0, int16_t __c1, int16_t __c2, int16_t __c3,
+ int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7) {
+ return (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_make(int32_t __c0,
+ int32_t __c1,
+ int32_t __c2,
+ int32_t __c3) {
+ return (v128_t)(__i32x4){__c0, __c1, __c2, __c3};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_make(float __c0,
+ float __c1,
+ float __c2,
+ float __c3) {
+ return (v128_t)(__f32x4){__c0, __c1, __c2, __c3};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_make(int64_t __c0,
+ int64_t __c1) {
+ return (v128_t)(__i64x2){__c0, __c1};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_make(double __c0,
+ double __c1) {
+ return (v128_t)(__f64x2){__c0, __c1};
+}
+
+#define wasm_i8x16_const(__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7, __c8, \
+ __c9, __c10, __c11, __c12, __c13, __c14, __c15) \
+ __extension__({ \
+ __REQUIRE_CONSTANT(__c0); \
+ __REQUIRE_CONSTANT(__c1); \
+ __REQUIRE_CONSTANT(__c2); \
+ __REQUIRE_CONSTANT(__c3); \
+ __REQUIRE_CONSTANT(__c4); \
+ __REQUIRE_CONSTANT(__c5); \
+ __REQUIRE_CONSTANT(__c6); \
+ __REQUIRE_CONSTANT(__c7); \
+ __REQUIRE_CONSTANT(__c8); \
+ __REQUIRE_CONSTANT(__c9); \
+ __REQUIRE_CONSTANT(__c10); \
+ __REQUIRE_CONSTANT(__c11); \
+ __REQUIRE_CONSTANT(__c12); \
+ __REQUIRE_CONSTANT(__c13); \
+ __REQUIRE_CONSTANT(__c14); \
+ __REQUIRE_CONSTANT(__c15); \
+ (v128_t)(__i8x16){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7, \
+ __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15}; \
+ })
+
+#define wasm_i16x8_const(__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7) \
+ __extension__({ \
+ __REQUIRE_CONSTANT(__c0); \
+ __REQUIRE_CONSTANT(__c1); \
+ __REQUIRE_CONSTANT(__c2); \
+ __REQUIRE_CONSTANT(__c3); \
+ __REQUIRE_CONSTANT(__c4); \
+ __REQUIRE_CONSTANT(__c5); \
+ __REQUIRE_CONSTANT(__c6); \
+ __REQUIRE_CONSTANT(__c7); \
+ (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7}; \
+ })
+
+#define wasm_i32x4_const(__c0, __c1, __c2, __c3) \
+ __extension__({ \
+ __REQUIRE_CONSTANT(__c0); \
+ __REQUIRE_CONSTANT(__c1); \
+ __REQUIRE_CONSTANT(__c2); \
+ __REQUIRE_CONSTANT(__c3); \
+ (v128_t)(__i32x4){__c0, __c1, __c2, __c3}; \
+ })
+
+#define wasm_f32x4_const(__c0, __c1, __c2, __c3) \
+ __extension__({ \
+ __REQUIRE_CONSTANT(__c0); \
+ __REQUIRE_CONSTANT(__c1); \
+ __REQUIRE_CONSTANT(__c2); \
+ __REQUIRE_CONSTANT(__c3); \
+ (v128_t)(__f32x4){__c0, __c1, __c2, __c3}; \
+ })
+
+#define wasm_i64x2_const(__c0, __c1) \
+ __extension__({ \
+ __REQUIRE_CONSTANT(__c0); \
+ __REQUIRE_CONSTANT(__c1); \
+ (v128_t)(__i64x2){__c0, __c1}; \
+ })
+
+#define wasm_f64x2_const(__c0, __c1) \
+ __extension__({ \
+ __REQUIRE_CONSTANT(__c0); \
+ __REQUIRE_CONSTANT(__c1); \
+ (v128_t)(__f64x2){__c0, __c1}; \
+ })
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_splat(int8_t __a) {
+ return (v128_t)(__i8x16){__a, __a, __a, __a, __a, __a, __a, __a,
+ __a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+#define wasm_i8x16_extract_lane(__a, __i) \
+ (__builtin_wasm_extract_lane_s_i8x16((__i8x16)(__a), __i))
+
+#define wasm_u8x16_extract_lane(__a, __i) \
+ (__builtin_wasm_extract_lane_u_i8x16((__i8x16)(__a), __i))
+
+#define wasm_i8x16_replace_lane(__a, __i, __b) \
+ ((v128_t)__builtin_wasm_replace_lane_i8x16((__i8x16)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_splat(int16_t __a) {
+ return (v128_t)(__i16x8){__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+#define wasm_i16x8_extract_lane(__a, __i) \
+ (__builtin_wasm_extract_lane_s_i16x8((__i16x8)(__a), __i))
+
+#define wasm_u16x8_extract_lane(__a, __i) \
+ (__builtin_wasm_extract_lane_u_i16x8((__i16x8)(__a), __i))
+
+#define wasm_i16x8_replace_lane(__a, __i, __b) \
+ ((v128_t)__builtin_wasm_replace_lane_i16x8((__i16x8)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_splat(int32_t __a) {
+ return (v128_t)(__i32x4){__a, __a, __a, __a};
+}
+
+#define wasm_i32x4_extract_lane(__a, __i) \
+ (__builtin_wasm_extract_lane_i32x4((__i32x4)(__a), __i))
+
+#define wasm_i32x4_replace_lane(__a, __i, __b) \
+ ((v128_t)__builtin_wasm_replace_lane_i32x4((__i32x4)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_splat(int64_t __a) {
+ return (v128_t)(__i64x2){__a, __a};
+}
+
+#define wasm_i64x2_extract_lane(__a, __i) \
+ (__builtin_wasm_extract_lane_i64x2((__i64x2)(__a), __i))
+
+#define wasm_i64x2_replace_lane(__a, __i, __b) \
+ ((v128_t)__builtin_wasm_replace_lane_i64x2((__i64x2)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_splat(float __a) {
+ return (v128_t)(__f32x4){__a, __a, __a, __a};
+}
+
+#define wasm_f32x4_extract_lane(__a, __i) \
+ (__builtin_wasm_extract_lane_f32x4((__f32x4)(__a), __i))
+
+#define wasm_f32x4_replace_lane(__a, __i, __b) \
+ ((v128_t)__builtin_wasm_replace_lane_f32x4((__f32x4)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_splat(double __a) {
+ return (v128_t)(__f64x2){__a, __a};
+}
+
+#define wasm_f64x2_extract_lane(__a, __i) \
+ (__builtin_wasm_extract_lane_f64x2((__f64x2)(__a), __i))
+
+#define wasm_f64x2_replace_lane(__a, __i, __b) \
+ ((v128_t)__builtin_wasm_replace_lane_f64x2((__f64x2)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_eq(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__s8x16)__a == (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ne(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__s8x16)__a != (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_lt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__s8x16)__a < (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_lt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u8x16)__a < (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_gt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__s8x16)__a > (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_gt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u8x16)__a > (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_le(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__s8x16)__a <= (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_le(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u8x16)__a <= (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ge(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__s8x16)__a >= (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_ge(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u8x16)__a >= (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_eq(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i16x8)__a == (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ne(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u16x8)__a != (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_lt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i16x8)__a < (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_lt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u16x8)__a < (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_gt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i16x8)__a > (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_gt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u16x8)__a > (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_le(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i16x8)__a <= (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_le(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u16x8)__a <= (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ge(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i16x8)__a >= (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_ge(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u16x8)__a >= (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_eq(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i32x4)__a == (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ne(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i32x4)__a != (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_lt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i32x4)__a < (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_lt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u32x4)__a < (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_gt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i32x4)__a > (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_gt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u32x4)__a > (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_le(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i32x4)__a <= (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_le(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u32x4)__a <= (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ge(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i32x4)__a >= (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_ge(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u32x4)__a >= (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_eq(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a == (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ne(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a != (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_lt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a < (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_gt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a > (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_le(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a <= (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ge(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a >= (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_eq(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a == (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ne(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a != (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_lt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a < (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_gt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a > (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_le(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a <= (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ge(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a >= (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_not(v128_t __a) {
+ return ~__a;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_and(v128_t __a,
+ v128_t __b) {
+ return __a & __b;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_or(v128_t __a,
+ v128_t __b) {
+ return __a | __b;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_xor(v128_t __a,
+ v128_t __b) {
+ return __a ^ __b;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_andnot(v128_t __a,
+ v128_t __b) {
+ return __a & ~__b;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_bitselect(v128_t __a,
+ v128_t __b,
+ v128_t __mask) {
+ return (v128_t)__builtin_wasm_bitselect((__i32x4)__a, (__i32x4)__b,
+ (__i32x4)__mask);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_abs(v128_t __a) {
+ return (v128_t)__builtin_wasm_abs_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_neg(v128_t __a) {
+ return (v128_t)(-(__u8x16)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_any_true(v128_t __a) {
+ return __builtin_wasm_any_true_i8x16((__i8x16)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_all_true(v128_t __a) {
+ return __builtin_wasm_all_true_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shl(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__i8x16)__a << __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shr(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__s8x16)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_shr(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__u8x16)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u8x16)__a + (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i8x16_add_saturate(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_add_saturate_s_i8x16((__i8x16)__a,
+ (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u8x16_add_saturate(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_add_saturate_u_i8x16((__i8x16)__a,
+ (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u8x16)__a - (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i8x16_sub_saturate(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_sub_saturate_s_i8x16((__i8x16)__a,
+ (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u8x16_sub_saturate(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_sub_saturate_u_i8x16((__i8x16)__a,
+ (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_min_s_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_min_u_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_max_s_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_max_u_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_avgr(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_avgr_u_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_abs(v128_t __a) {
+ return (v128_t)__builtin_wasm_abs_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_neg(v128_t __a) {
+ return (v128_t)(-(__u16x8)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_any_true(v128_t __a) {
+ return __builtin_wasm_any_true_i16x8((__i16x8)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_all_true(v128_t __a) {
+ return __builtin_wasm_all_true_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shl(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__i16x8)__a << __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shr(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__i16x8)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_shr(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__u16x8)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u16x8)__a + (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_add_saturate(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_add_saturate_s_i16x8((__i16x8)__a,
+ (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_add_saturate(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_add_saturate_u_i16x8((__i16x8)__a,
+ (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i16x8)__a - (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_sub_saturate(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_sub_saturate_s_i16x8((__i16x8)__a,
+ (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_sub_saturate(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_sub_saturate_u_i16x8((__i16x8)__a,
+ (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_mul(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u16x8)__a * (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_min_s_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_min_u_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_max_s_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_max_u_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_avgr(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_avgr_u_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_abs(v128_t __a) {
+ return (v128_t)__builtin_wasm_abs_i32x4((__i32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_neg(v128_t __a) {
+ return (v128_t)(-(__u32x4)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_any_true(v128_t __a) {
+ return __builtin_wasm_any_true_i32x4((__i32x4)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_all_true(v128_t __a) {
+ return __builtin_wasm_all_true_i32x4((__i32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shl(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__i32x4)__a << __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shr(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__i32x4)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_shr(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__u32x4)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_add(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u32x4)__a + (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_sub(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u32x4)__a - (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_mul(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u32x4)__a * (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_min_s_i32x4((__i32x4)__a, (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_min_u_i32x4((__i32x4)__a, (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_max_s_i32x4((__i32x4)__a, (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_max_u_i32x4((__i32x4)__a, (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_neg(v128_t __a) {
+ return (v128_t)(-(__u64x2)__a);
+}
+
+#ifdef __wasm_unimplemented_simd128__
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_any_true(v128_t __a) {
+ return __builtin_wasm_any_true_i64x2((__i64x2)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_all_true(v128_t __a) {
+ return __builtin_wasm_all_true_i64x2((__i64x2)__a);
+}
+
+#endif // __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shl(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__i64x2)__a << (int64_t)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shr(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__i64x2)__a >> (int64_t)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_shr(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__u64x2)__a >> (int64_t)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_add(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u64x2)__a + (__u64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_sub(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u64x2)__a - (__u64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_mul(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u64x2)__a * (__u64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_abs(v128_t __a) {
+ return (v128_t)__builtin_wasm_abs_f32x4((__f32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_neg(v128_t __a) {
+ return (v128_t)(-(__f32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sqrt(v128_t __a) {
+ return (v128_t)__builtin_wasm_sqrt_f32x4((__f32x4)__a);
+}
+
+#ifdef __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_qfma(v128_t __a,
+ v128_t __b,
+ v128_t __c) {
+ return (v128_t)__builtin_wasm_qfma_f32x4((__f32x4)__a, (__f32x4)__b,
+ (__f32x4)__c);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_qfms(v128_t __a,
+ v128_t __b,
+ v128_t __c) {
+ return (v128_t)__builtin_wasm_qfms_f32x4((__f32x4)__a, (__f32x4)__b,
+ (__f32x4)__c);
+}
+
+#endif // __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_add(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a + (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sub(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a - (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_mul(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a * (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_div(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a / (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_min_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_max_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmin(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_pmin_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmax(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_pmax_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_abs(v128_t __a) {
+ return (v128_t)__builtin_wasm_abs_f64x2((__f64x2)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_neg(v128_t __a) {
+ return (v128_t)(-(__f64x2)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sqrt(v128_t __a) {
+ return (v128_t)__builtin_wasm_sqrt_f64x2((__f64x2)__a);
+}
+
+#ifdef __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_qfma(v128_t __a,
+ v128_t __b,
+ v128_t __c) {
+ return (v128_t)__builtin_wasm_qfma_f64x2((__f64x2)__a, (__f64x2)__b,
+ (__f64x2)__c);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_qfms(v128_t __a,
+ v128_t __b,
+ v128_t __c) {
+ return (v128_t)__builtin_wasm_qfms_f64x2((__f64x2)__a, (__f64x2)__b,
+ (__f64x2)__c);
+}
+
+#endif // __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_add(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a + (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sub(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a - (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_mul(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a * (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_div(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a / (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_min_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_max_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmin(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_pmin_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmax(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_pmax_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_trunc_saturate_f32x4(v128_t __a) {
+ return (v128_t)__builtin_wasm_trunc_saturate_s_i32x4_f32x4((__f32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u32x4_trunc_saturate_f32x4(v128_t __a) {
+ return (v128_t)__builtin_wasm_trunc_saturate_u_i32x4_f32x4((__f32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_f32x4_convert_i32x4(v128_t __a) {
+ return (v128_t) __builtin_convertvector((__i32x4)__a, __f32x4);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_f32x4_convert_u32x4(v128_t __a) {
+ return (v128_t) __builtin_convertvector((__u32x4)__a, __f32x4);
+}
+
+#define wasm_v8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
+ __c7, __c8, __c9, __c10, __c11, __c12, __c13, \
+ __c14, __c15) \
+ ((v128_t)__builtin_wasm_shuffle_v8x16( \
+ (__i8x16)(__a), (__i8x16)(__b), __c0, __c1, __c2, __c3, __c4, __c5, \
+ __c6, __c7, __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15))
+
+#define wasm_v16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
+ __c7) \
+ ((v128_t)__builtin_wasm_shuffle_v8x16( \
+ (__i8x16)(__a), (__i8x16)(__b), (__c0)*2, (__c0)*2 + 1, (__c1)*2, \
+ (__c1)*2 + 1, (__c2)*2, (__c2)*2 + 1, (__c3)*2, (__c3)*2 + 1, (__c4)*2, \
+ (__c4)*2 + 1, (__c5)*2, (__c5)*2 + 1, (__c6)*2, (__c6)*2 + 1, (__c7)*2, \
+ (__c7)*2 + 1))
+
+#define wasm_v32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3) \
+ ((v128_t)__builtin_wasm_shuffle_v8x16( \
+ (__i8x16)(__a), (__i8x16)(__b), (__c0)*4, (__c0)*4 + 1, (__c0)*4 + 2, \
+ (__c0)*4 + 3, (__c1)*4, (__c1)*4 + 1, (__c1)*4 + 2, (__c1)*4 + 3, \
+ (__c2)*4, (__c2)*4 + 1, (__c2)*4 + 2, (__c2)*4 + 3, (__c3)*4, \
+ (__c3)*4 + 1, (__c3)*4 + 2, (__c3)*4 + 3))
+
+#define wasm_v64x2_shuffle(__a, __b, __c0, __c1) \
+ ((v128_t)__builtin_wasm_shuffle_v8x16( \
+ (__i8x16)(__a), (__i8x16)(__b), (__c0)*8, (__c0)*8 + 1, (__c0)*8 + 2, \
+ (__c0)*8 + 3, (__c0)*8 + 4, (__c0)*8 + 5, (__c0)*8 + 6, (__c0)*8 + 7, \
+ (__c1)*8, (__c1)*8 + 1, (__c1)*8 + 2, (__c1)*8 + 3, (__c1)*8 + 4, \
+ (__c1)*8 + 5, (__c1)*8 + 6, (__c1)*8 + 7))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v8x16_swizzle(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_swizzle_v8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i8x16_narrow_i16x8(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_narrow_s_i8x16_i16x8((__i16x8)__a,
+ (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u8x16_narrow_i16x8(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_narrow_u_i8x16_i16x8((__i16x8)__a,
+ (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_narrow_i32x4(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_narrow_s_i16x8_i32x4((__i32x4)__a,
+ (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_narrow_i32x4(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_narrow_u_i16x8_i32x4((__i32x4)__a,
+ (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_widen_low_i8x16(v128_t __a) {
+ return (v128_t)__builtin_wasm_widen_low_s_i16x8_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_widen_high_i8x16(v128_t __a) {
+ return (v128_t)__builtin_wasm_widen_high_s_i16x8_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_widen_low_u8x16(v128_t __a) {
+ return (v128_t)__builtin_wasm_widen_low_u_i16x8_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_widen_high_u8x16(v128_t __a) {
+ return (v128_t)__builtin_wasm_widen_high_u_i16x8_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_widen_low_i16x8(v128_t __a) {
+ return (v128_t)__builtin_wasm_widen_low_s_i32x4_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_widen_high_i16x8(v128_t __a) {
+ return (v128_t)__builtin_wasm_widen_high_s_i32x4_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_widen_low_u16x8(v128_t __a) {
+ return (v128_t)__builtin_wasm_widen_low_u_i32x4_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_widen_high_u16x8(v128_t __a) {
+ return (v128_t)__builtin_wasm_widen_high_u_i32x4_i16x8((__i16x8)__a);
+}
+
+// Undefine helper macros
+#undef __DEFAULT_FN_ATTRS
+
+#endif // __WASM_SIMD128_H
diff --git a/contrib/llvm-project/clang/lib/Headers/x86intrin.h b/contrib/llvm-project/clang/lib/Headers/x86intrin.h
index a8b36622d410..768d0e56ab05 100644
--- a/contrib/llvm-project/clang/lib/Headers/x86intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/x86intrin.h
@@ -14,39 +14,48 @@
#include <immintrin.h>
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__3dNOW__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__3dNOW__)
#include <mm3dnow.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PRFCHW__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__PRFCHW__)
#include <prfchwintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE4A__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SSE4A__)
#include <ammintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FMA4__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__FMA4__)
#include <fma4intrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XOP__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__XOP__)
#include <xopintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__TBM__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__TBM__)
#include <tbmintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LWP__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__LWP__)
#include <lwpintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MWAITX__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__MWAITX__)
#include <mwaitxintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLZERO__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__CLZERO__)
#include <clzerointrin.h>
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/xmmintrin.h b/contrib/llvm-project/clang/lib/Headers/xmmintrin.h
index 9b8de63f04d5..f4686691c7ed 100644
--- a/contrib/llvm-project/clang/lib/Headers/xmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/xmmintrin.h
@@ -2931,31 +2931,31 @@ _mm_movemask_ps(__m128 __a)
#define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
-#define _MM_EXCEPT_INVALID (0x0001)
-#define _MM_EXCEPT_DENORM (0x0002)
-#define _MM_EXCEPT_DIV_ZERO (0x0004)
-#define _MM_EXCEPT_OVERFLOW (0x0008)
-#define _MM_EXCEPT_UNDERFLOW (0x0010)
-#define _MM_EXCEPT_INEXACT (0x0020)
-#define _MM_EXCEPT_MASK (0x003f)
-
-#define _MM_MASK_INVALID (0x0080)
-#define _MM_MASK_DENORM (0x0100)
-#define _MM_MASK_DIV_ZERO (0x0200)
-#define _MM_MASK_OVERFLOW (0x0400)
-#define _MM_MASK_UNDERFLOW (0x0800)
-#define _MM_MASK_INEXACT (0x1000)
-#define _MM_MASK_MASK (0x1f80)
-
-#define _MM_ROUND_NEAREST (0x0000)
-#define _MM_ROUND_DOWN (0x2000)
-#define _MM_ROUND_UP (0x4000)
-#define _MM_ROUND_TOWARD_ZERO (0x6000)
-#define _MM_ROUND_MASK (0x6000)
-
-#define _MM_FLUSH_ZERO_MASK (0x8000)
-#define _MM_FLUSH_ZERO_ON (0x8000)
-#define _MM_FLUSH_ZERO_OFF (0x0000)
+#define _MM_EXCEPT_INVALID (0x0001U)
+#define _MM_EXCEPT_DENORM (0x0002U)
+#define _MM_EXCEPT_DIV_ZERO (0x0004U)
+#define _MM_EXCEPT_OVERFLOW (0x0008U)
+#define _MM_EXCEPT_UNDERFLOW (0x0010U)
+#define _MM_EXCEPT_INEXACT (0x0020U)
+#define _MM_EXCEPT_MASK (0x003fU)
+
+#define _MM_MASK_INVALID (0x0080U)
+#define _MM_MASK_DENORM (0x0100U)
+#define _MM_MASK_DIV_ZERO (0x0200U)
+#define _MM_MASK_OVERFLOW (0x0400U)
+#define _MM_MASK_UNDERFLOW (0x0800U)
+#define _MM_MASK_INEXACT (0x1000U)
+#define _MM_MASK_MASK (0x1f80U)
+
+#define _MM_ROUND_NEAREST (0x0000U)
+#define _MM_ROUND_DOWN (0x2000U)
+#define _MM_ROUND_UP (0x4000U)
+#define _MM_ROUND_TOWARD_ZERO (0x6000U)
+#define _MM_ROUND_MASK (0x6000U)
+
+#define _MM_FLUSH_ZERO_MASK (0x8000U)
+#define _MM_FLUSH_ZERO_ON (0x8000U)
+#define _MM_FLUSH_ZERO_OFF (0x0000U)
#define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)
#define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)
diff --git a/contrib/llvm-project/clang/lib/Index/CommentToXML.cpp b/contrib/llvm-project/clang/lib/Index/CommentToXML.cpp
index ce6f9e2b13bd..1cbd14cd326c 100644
--- a/contrib/llvm-project/clang/lib/Index/CommentToXML.cpp
+++ b/contrib/llvm-project/clang/lib/Index/CommentToXML.cpp
@@ -11,6 +11,8 @@
#include "clang/AST/Attr.h"
#include "clang/AST/Comment.h"
#include "clang/AST/CommentVisitor.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include "clang/Index/USRGeneration.h"
#include "llvm/ADT/StringExtras.h"
diff --git a/contrib/llvm-project/clang/lib/Index/FileIndexRecord.cpp b/contrib/llvm-project/clang/lib/Index/FileIndexRecord.cpp
index c9dcb0f5377d..753bdf2ce21d 100644
--- a/contrib/llvm-project/clang/lib/Index/FileIndexRecord.cpp
+++ b/contrib/llvm-project/clang/lib/Index/FileIndexRecord.cpp
@@ -10,6 +10,7 @@
#include "FileIndexRecord.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Path.h"
diff --git a/contrib/llvm-project/clang/lib/Index/IndexBody.cpp b/contrib/llvm-project/clang/lib/Index/IndexBody.cpp
index 07a94f30c883..01cf559d7057 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexBody.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexBody.cpp
@@ -414,7 +414,7 @@ public:
auto visitSyntacticDesignatedInitExpr = [&](DesignatedInitExpr *E) -> bool {
for (DesignatedInitExpr::Designator &D : llvm::reverse(E->designators())) {
- if (D.isFieldDesignator())
+ if (D.isFieldDesignator() && D.getField())
return IndexCtx.handleReference(D.getField(), D.getFieldLoc(),
Parent, ParentDC, SymbolRoleSet(),
{}, E);
diff --git a/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp b/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp
index c59b1372e399..2ba323e63575 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp
@@ -80,7 +80,7 @@ public:
!MD->isSynthesizedAccessorStub();
}
-
+
void handleDeclarator(const DeclaratorDecl *D,
const NamedDecl *Parent = nullptr,
bool isIBType = false) {
@@ -90,6 +90,12 @@ public:
Parent->getLexicalDeclContext(),
/*isBase=*/false, isIBType);
IndexCtx.indexNestedNameSpecifierLoc(D->getQualifierLoc(), Parent);
+ auto IndexDefaultParmeterArgument = [&](const ParmVarDecl *Parm,
+ const NamedDecl *Parent) {
+ if (Parm->hasDefaultArg() && !Parm->hasUninstantiatedDefaultArg() &&
+ !Parm->hasUnparsedDefaultArg())
+ IndexCtx.indexBody(Parm->getDefaultArg(), Parent);
+ };
if (IndexCtx.shouldIndexFunctionLocalSymbols()) {
if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(D)) {
auto *DC = Parm->getDeclContext();
@@ -106,7 +112,8 @@ public:
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (IndexCtx.shouldIndexParametersInDeclarations() ||
FD->isThisDeclarationADefinition()) {
- for (auto PI : FD->parameters()) {
+ for (const auto *PI : FD->parameters()) {
+ IndexDefaultParmeterArgument(PI, D);
IndexCtx.handleDecl(PI);
}
}
@@ -116,9 +123,7 @@ public:
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->isThisDeclarationADefinition()) {
for (const auto *PV : FD->parameters()) {
- if (PV->hasDefaultArg() && !PV->hasUninstantiatedDefaultArg() &&
- !PV->hasUnparsedDefaultArg())
- IndexCtx.indexBody(PV->getDefaultArg(), D);
+ IndexDefaultParmeterArgument(PV, D);
}
}
}
@@ -760,6 +765,9 @@ bool IndexingContext::indexTopLevelDecl(const Decl *D) {
if (isa<ObjCMethodDecl>(D))
return true; // Wait for the objc container.
+ if (IndexOpts.ShouldTraverseDecl && !IndexOpts.ShouldTraverseDecl(D))
+ return true; // skip
+
return indexDecl(D);
}
diff --git a/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp b/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp
index ae9134bf1182..0d2e557cdd36 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp
@@ -357,6 +357,15 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
case Decl::VarTemplate:
llvm_unreachable("variables handled before");
break;
+ case Decl::TemplateTypeParm:
+ Info.Kind = SymbolKind::TemplateTypeParm;
+ break;
+ case Decl::TemplateTemplateParm:
+ Info.Kind = SymbolKind::TemplateTemplateParm;
+ break;
+ case Decl::NonTypeTemplateParm:
+ Info.Kind = SymbolKind::NonTypeTemplateParm;
+ break;
// Other decls get the 'unknown' kind.
default:
break;
@@ -517,6 +526,9 @@ StringRef index::getSymbolKindString(SymbolKind K) {
case SymbolKind::ConversionFunction: return "conversion-func";
case SymbolKind::Parameter: return "param";
case SymbolKind::Using: return "using";
+ case SymbolKind::TemplateTypeParm: return "template-type-param";
+ case SymbolKind::TemplateTemplateParm: return "template-template-param";
+ case SymbolKind::NonTypeTemplateParm: return "non-type-template-param";
}
llvm_unreachable("invalid symbol kind");
}
diff --git a/contrib/llvm-project/clang/lib/Index/IndexTypeSourceInfo.cpp b/contrib/llvm-project/clang/lib/Index/IndexTypeSourceInfo.cpp
index 959d5f1197fe..b9fc90040cfc 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexTypeSourceInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexTypeSourceInfo.cpp
@@ -170,6 +170,11 @@ public:
return true;
}
+ bool VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
+ return IndexCtx.handleReference(TL.getDecl(), TL.getNameLoc(), Parent,
+ ParentDC, SymbolRoleSet(), Relations);
+ }
+
bool VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
const DependentNameType *DNT = TL.getTypePtr();
const NestedNameSpecifier *NNS = DNT->getQualifier();
diff --git a/contrib/llvm-project/clang/lib/Index/IndexingAction.cpp b/contrib/llvm-project/clang/lib/Index/IndexingAction.cpp
index 4f402135672c..e698c07133a9 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexingAction.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexingAction.cpp
@@ -131,6 +131,21 @@ std::unique_ptr<ASTConsumer> index::createIndexingASTConsumer(
ShouldSkipFunctionBody);
}
+std::unique_ptr<ASTConsumer> clang::index::createIndexingASTConsumer(
+ std::shared_ptr<IndexDataConsumer> DataConsumer,
+ const IndexingOptions &Opts, std::shared_ptr<Preprocessor> PP) {
+ std::function<bool(const Decl *)> ShouldSkipFunctionBody = [](const Decl *) {
+ return false;
+ };
+ if (Opts.ShouldTraverseDecl)
+ ShouldSkipFunctionBody =
+ [ShouldTraverseDecl(Opts.ShouldTraverseDecl)](const Decl *D) {
+ return !ShouldTraverseDecl(D);
+ };
+ return createIndexingASTConsumer(std::move(DataConsumer), Opts, std::move(PP),
+ std::move(ShouldSkipFunctionBody));
+}
+
std::unique_ptr<FrontendAction>
index::createIndexingAction(std::shared_ptr<IndexDataConsumer> DataConsumer,
const IndexingOptions &Opts) {
diff --git a/contrib/llvm-project/clang/lib/Index/IndexingContext.cpp b/contrib/llvm-project/clang/lib/Index/IndexingContext.cpp
index a7c37e8528d1..784a6008575b 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexingContext.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexingContext.cpp
@@ -169,6 +169,10 @@ bool IndexingContext::isTemplateImplicitInstantiation(const Decl *D) {
}
switch (TKind) {
case TSK_Undeclared:
+ // Instantiation maybe not happen yet when we see a SpecializationDecl,
+ // e.g. when the type doesn't need to be complete, we still treat it as an
+ // instantiation as we'd like to keep the canonicalized result consistent.
+ return isa<ClassTemplateSpecializationDecl>(D);
case TSK_ExplicitSpecialization:
return false;
case TSK_ImplicitInstantiation:
@@ -206,7 +210,12 @@ getDeclContextForTemplateInstationPattern(const Decl *D) {
static const Decl *adjustTemplateImplicitInstantiation(const Decl *D) {
if (const ClassTemplateSpecializationDecl *
SD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
- return SD->getTemplateInstantiationPattern();
+ const auto *Template = SD->getTemplateInstantiationPattern();
+ if (Template)
+ return Template;
+ // Fallback to primary template if no instantiation is available yet (e.g.
+ // the type doesn't need to be complete).
+ return SD->getSpecializedTemplate()->getTemplatedDecl();
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
return FD->getTemplateInstantiationPattern();
} else if (auto *VD = dyn_cast<VarDecl>(D)) {
diff --git a/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp b/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp
index 394daf94c4b2..0d1e81219823 100644
--- a/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp
+++ b/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp
@@ -11,6 +11,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclVisitor.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Lex/PreprocessingRecord.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
@@ -382,6 +383,14 @@ void USRGenerator::VisitNamespaceAliasDecl(const NamespaceAliasDecl *D) {
Out << "@NA@" << D->getName();
}
+static const ObjCCategoryDecl *getCategoryContext(const NamedDecl *D) {
+ if (auto *CD = dyn_cast<ObjCCategoryDecl>(D->getDeclContext()))
+ return CD;
+ if (auto *ICD = dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext()))
+ return ICD->getCategoryDecl();
+ return nullptr;
+}
+
void USRGenerator::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
const DeclContext *container = D->getDeclContext();
if (const ObjCProtocolDecl *pd = dyn_cast<ObjCProtocolDecl>(container)) {
@@ -395,14 +404,6 @@ void USRGenerator::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
IgnoreResults = true;
return;
}
- auto getCategoryContext = [](const ObjCMethodDecl *D) ->
- const ObjCCategoryDecl * {
- if (auto *CD = dyn_cast<ObjCCategoryDecl>(D->getDeclContext()))
- return CD;
- if (auto *ICD = dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext()))
- return ICD->getCategoryDecl();
- return nullptr;
- };
auto *CD = getCategoryContext(D);
VisitObjCContainerDecl(ID, CD);
}
@@ -475,7 +476,7 @@ void USRGenerator::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
// The USR for a property declared in a class extension or category is based
// on the ObjCInterfaceDecl, not the ObjCCategoryDecl.
if (const ObjCInterfaceDecl *ID = Context->getObjContainingInterface(D))
- Visit(ID);
+ VisitObjCContainerDecl(ID, getCategoryContext(D));
else
Visit(cast<Decl>(D->getDeclContext()));
GenObjCProperty(D->getName(), D->isClassProperty());
@@ -752,6 +753,7 @@ void USRGenerator::VisitType(QualType T) {
case BuiltinType::SatUShortFract:
case BuiltinType::SatUFract:
case BuiltinType::SatULongFract:
+ case BuiltinType::BFloat16:
IgnoreResults = true;
return;
case BuiltinType::ObjCId:
diff --git a/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp b/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp
index 029bfe1cd600..cdb4a79fa11a 100644
--- a/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp
@@ -18,6 +18,7 @@
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Lex/LexDiagnostic.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/MemoryBuffer.h"
diff --git a/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp b/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
index f0c5900c8ce4..1df28cc07209 100644
--- a/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
@@ -133,7 +133,7 @@ const HeaderMap *HeaderSearch::CreateHeaderMap(const FileEntry *FE) {
void HeaderSearch::getHeaderMapFileNames(
SmallVectorImpl<std::string> &Names) const {
for (auto &HM : HeaderMaps)
- Names.push_back(HM.first->getName());
+ Names.push_back(std::string(HM.first->getName()));
}
std::string HeaderSearch::getCachedModuleFileName(Module *Module) {
@@ -145,7 +145,7 @@ std::string HeaderSearch::getCachedModuleFileName(Module *Module) {
std::string HeaderSearch::getPrebuiltModuleFileName(StringRef ModuleName,
bool FileMapOnly) {
// First check the module name to pcm file map.
- auto i (HSOpts->PrebuiltModuleFiles.find(ModuleName));
+ auto i(HSOpts->PrebuiltModuleFiles.find(ModuleName));
if (i != HSOpts->PrebuiltModuleFiles.end())
return i->second;
@@ -159,7 +159,7 @@ std::string HeaderSearch::getPrebuiltModuleFileName(StringRef ModuleName,
llvm::sys::fs::make_absolute(Result);
llvm::sys::path::append(Result, ModuleName + ".pcm");
if (getFileMgr().getFile(Result.str()))
- return Result.str().str();
+ return std::string(Result);
}
return {};
}
@@ -184,7 +184,8 @@ std::string HeaderSearch::getCachedModuleFileName(StringRef ModuleName,
//
// To avoid false-negatives, we form as canonical a path as we can, and map
// to lower-case in case we're on a case-insensitive file system.
- std::string Parent = llvm::sys::path::parent_path(ModuleMapPath);
+ std::string Parent =
+ std::string(llvm::sys::path::parent_path(ModuleMapPath));
if (Parent.empty())
Parent = ".";
auto Dir = FileMgr.getDirectory(Parent);
@@ -468,7 +469,7 @@ getTopFrameworkDir(FileManager &FileMgr, StringRef DirName,
// If this is a framework directory, then we're a subframework of this
// framework.
if (llvm::sys::path::extension(DirName) == ".framework") {
- SubmodulePath.push_back(llvm::sys::path::stem(DirName));
+ SubmodulePath.push_back(std::string(llvm::sys::path::stem(DirName)));
TopFrameworkDir = *Dir;
}
} while (true);
@@ -1218,9 +1219,11 @@ HeaderSearch::getExistingFileInfo(const FileEntry *FE,
}
bool HeaderSearch::isFileMultipleIncludeGuarded(const FileEntry *File) {
- // Check if we've ever seen this file as a header.
+ // Check if we've entered this file and found an include guard or #pragma
+ // once. Note that we dor't check for #import, because that's not a property
+ // of the file itself.
if (auto *HFI = getExistingFileInfo(File))
- return HFI->isPragmaOnce || HFI->isImport || HFI->ControllingMacro ||
+ return HFI->isPragmaOnce || HFI->ControllingMacro ||
HFI->ControllingMacroID;
return false;
}
@@ -1273,14 +1276,12 @@ bool HeaderSearch::ShouldEnterIncludeFile(Preprocessor &PP,
//
// It's common that libc++ and system modules will both define such
// submodules. Make sure cached results for a builtin header won't
- // prevent other builtin modules to potentially enter the builtin header.
- // Note that builtins are header guarded and the decision to actually
- // enter them is postponed to the controlling macros logic below.
+ // prevent other builtin modules from potentially entering the builtin
+ // header. Note that builtins are header guarded and the decision to
+ // actually enter them is postponed to the controlling macros logic below.
bool TryEnterHdr = false;
if (FileInfo.isCompilingModuleHeader && FileInfo.isModuleHeader)
- TryEnterHdr = File->getDir() == ModMap.getBuiltinDir() &&
- ModuleMap::isBuiltinHeader(
- llvm::sys::path::filename(File->getName()));
+ TryEnterHdr = ModMap.isBuiltinHeader(File);
// Textual headers can be #imported from different modules. Since ObjC
// headers find in the wild might rely only on #import and do not contain
@@ -1398,25 +1399,46 @@ HeaderSearch::findModuleForHeader(const FileEntry *File,
return ModMap.findModuleForHeader(File, AllowTextual);
}
+ArrayRef<ModuleMap::KnownHeader>
+HeaderSearch::findAllModulesForHeader(const FileEntry *File) const {
+ if (ExternalSource) {
+ // Make sure the external source has handled header info about this file,
+ // which includes whether the file is part of a module.
+ (void)getExistingFileInfo(File);
+ }
+ return ModMap.findAllModulesForHeader(File);
+}
+
static bool suggestModule(HeaderSearch &HS, const FileEntry *File,
Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule) {
ModuleMap::KnownHeader Module =
HS.findModuleForHeader(File, /*AllowTextual*/true);
- if (SuggestedModule)
- *SuggestedModule = (Module.getRole() & ModuleMap::TextualHeader)
- ? ModuleMap::KnownHeader()
- : Module;
// If this module specifies [no_undeclared_includes], we cannot find any
// file that's in a non-dependency module.
if (RequestingModule && Module && RequestingModule->NoUndeclaredIncludes) {
- HS.getModuleMap().resolveUses(RequestingModule, /*Complain*/false);
+ HS.getModuleMap().resolveUses(RequestingModule, /*Complain*/ false);
if (!RequestingModule->directlyUses(Module.getModule())) {
+ // Builtin headers are a special case. Multiple modules can use the same
+ // builtin as a modular header (see also comment in
+ // ShouldEnterIncludeFile()), so the builtin header may have been
+ // "claimed" by an unrelated module. This shouldn't prevent us from
+ // including the builtin header textually in this module.
+ if (HS.getModuleMap().isBuiltinHeader(File)) {
+ if (SuggestedModule)
+ *SuggestedModule = ModuleMap::KnownHeader();
+ return true;
+ }
return false;
}
}
+ if (SuggestedModule)
+ *SuggestedModule = (Module.getRole() & ModuleMap::TextualHeader)
+ ? ModuleMap::KnownHeader()
+ : Module;
+
return true;
}
@@ -1567,6 +1589,16 @@ HeaderSearch::lookupModuleMapFile(const DirectoryEntry *Dir, bool IsFramework) {
llvm::sys::path::append(ModuleMapFileName, "module.map");
if (auto F = FileMgr.getFile(ModuleMapFileName))
return *F;
+
+ // For frameworks, allow to have a private module map with a preferred
+ // spelling when a public module map is absent.
+ if (IsFramework) {
+ ModuleMapFileName = Dir->getName();
+ llvm::sys::path::append(ModuleMapFileName, "Modules",
+ "module.private.modulemap");
+ if (auto F = FileMgr.getFile(ModuleMapFileName))
+ return *F;
+ }
return nullptr;
}
diff --git a/contrib/llvm-project/clang/lib/Lex/Lexer.cpp b/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
index 981111d03744..a559ca3eac2b 100644
--- a/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
@@ -29,6 +29,7 @@
#include "clang/Basic/TokenKinds.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/StringRef.h"
@@ -253,7 +254,7 @@ template <typename T> static void StringifyImpl(T &Str, char Quote) {
}
std::string Lexer::Stringify(StringRef Str, bool Charify) {
- std::string Result = Str;
+ std::string Result = std::string(Str);
char Quote = Charify ? '\'' : '"';
StringifyImpl(Result, Quote);
return Result;
@@ -1861,7 +1862,7 @@ const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
char Next = getCharAndSizeNoWarn(CurPtr + Consumed, NextSize,
getLangOpts());
if (!isIdentifierBody(Next)) {
- // End of suffix. Check whether this is on the whitelist.
+ // End of suffix. Check whether this is on the allowed list.
const StringRef CompleteSuffix(Buffer, Chars);
IsUDSuffix = StringLiteralParser::isValidUDSuffix(getLangOpts(),
CompleteSuffix);
@@ -2092,7 +2093,8 @@ void Lexer::codeCompleteIncludedFile(const char *PathStart,
bool IsAngled) {
// Completion only applies to the filename, after the last slash.
StringRef PartialPath(PathStart, CompletionPoint - PathStart);
- auto Slash = PartialPath.find_last_of(LangOpts.MSVCCompat ? "/\\" : "/");
+ llvm::StringRef SlashChars = LangOpts.MSVCCompat ? "/\\" : "/";
+ auto Slash = PartialPath.find_last_of(SlashChars);
StringRef Dir =
(Slash == StringRef::npos) ? "" : PartialPath.take_front(Slash);
const char *StartOfFilename =
@@ -2100,7 +2102,8 @@ void Lexer::codeCompleteIncludedFile(const char *PathStart,
// Code completion filter range is the filename only, up to completion point.
PP->setCodeCompletionIdentifierInfo(&PP->getIdentifierTable().get(
StringRef(StartOfFilename, CompletionPoint - StartOfFilename)));
- // We should replace the characters up to the closing quote, if any.
+ // We should replace the characters up to the closing quote or closest slash,
+ // if any.
while (CompletionPoint < BufferEnd) {
char Next = *(CompletionPoint + 1);
if (Next == 0 || Next == '\r' || Next == '\n')
@@ -2108,7 +2111,10 @@ void Lexer::codeCompleteIncludedFile(const char *PathStart,
++CompletionPoint;
if (Next == (IsAngled ? '>' : '"'))
break;
+ if (llvm::is_contained(SlashChars, Next))
+ break;
}
+
PP->setCodeCompletionTokenRange(
FileLoc.getLocWithOffset(StartOfFilename - BufferStart),
FileLoc.getLocWithOffset(CompletionPoint - BufferStart));
@@ -3694,7 +3700,7 @@ LexNextToken:
} else if (Char == '=') {
char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
if (After == '>') {
- if (getLangOpts().CPlusPlus2a) {
+ if (getLangOpts().CPlusPlus20) {
if (!isLexingRawMode())
Diag(BufferPtr, diag::warn_cxx17_compat_spaceship);
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
@@ -3705,7 +3711,7 @@ LexNextToken:
// Suggest adding a space between the '<=' and the '>' to avoid a
// change in semantics if this turns up in C++ <=17 mode.
if (getLangOpts().CPlusPlus && !isLexingRawMode()) {
- Diag(BufferPtr, diag::warn_cxx2a_compat_spaceship)
+ Diag(BufferPtr, diag::warn_cxx20_compat_spaceship)
<< FixItHint::CreateInsertion(
getSourceLocation(CurPtr + SizeTmp, SizeTmp2), " ");
}
diff --git a/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp b/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp
index 9a852141c6ee..eb16bc8c7da2 100644
--- a/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp
@@ -25,6 +25,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
@@ -524,8 +525,12 @@ static void EncodeUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
///
NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
SourceLocation TokLoc,
- Preprocessor &PP)
- : PP(PP), ThisTokBegin(TokSpelling.begin()), ThisTokEnd(TokSpelling.end()) {
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const TargetInfo &Target,
+ DiagnosticsEngine &Diags)
+ : SM(SM), LangOpts(LangOpts), Diags(Diags),
+ ThisTokBegin(TokSpelling.begin()), ThisTokEnd(TokSpelling.end()) {
// This routine assumes that the range begin/end matches the regex for integer
// and FP constants (specifically, the 'pp-number' regex), and assumes that
@@ -571,7 +576,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
checkSeparator(TokLoc, s, CSK_AfterDigits);
// Initial scan to lookahead for fixed point suffix.
- if (PP.getLangOpts().FixedPoint) {
+ if (LangOpts.FixedPoint) {
for (const char *c = s; c != ThisTokEnd; ++c) {
if (*c == 'r' || *c == 'k' || *c == 'R' || *c == 'K') {
saw_fixed_point_suffix = true;
@@ -582,6 +587,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
// Parse the suffix. At this point we can classify whether we have an FP or
// integer constant.
+ bool isFixedPointConstant = isFixedPointLiteral();
bool isFPConstant = isFloatingLiteral();
// Loop over all of the characters of the suffix. If we see something bad,
@@ -590,14 +596,16 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
switch (*s) {
case 'R':
case 'r':
- if (!PP.getLangOpts().FixedPoint) break;
+ if (!LangOpts.FixedPoint)
+ break;
if (isFract || isAccum) break;
if (!(saw_period || saw_exponent)) break;
isFract = true;
continue;
case 'K':
case 'k':
- if (!PP.getLangOpts().FixedPoint) break;
+ if (!LangOpts.FixedPoint)
+ break;
if (isFract || isAccum) break;
if (!(saw_period || saw_exponent)) break;
isAccum = true;
@@ -605,7 +613,8 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
case 'h': // FP Suffix for "half".
case 'H':
// OpenCL Extension v1.2 s9.5 - h or H suffix for half type.
- if (!(PP.getLangOpts().Half || PP.getLangOpts().FixedPoint)) break;
+ if (!(LangOpts.Half || LangOpts.FixedPoint))
+ break;
if (isIntegerLiteral()) break; // Error for integer constant.
if (isHalf || isFloat || isLong) break; // HH, FH, LH invalid.
isHalf = true;
@@ -619,8 +628,8 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
// CUDA host and device may have different _Float16 support, therefore
// allows f16 literals to avoid false alarm.
// ToDo: more precise check for CUDA.
- if ((PP.getTargetInfo().hasFloat16Type() || PP.getLangOpts().CUDA) &&
- s + 2 < ThisTokEnd && s[1] == '1' && s[2] == '6') {
+ if ((Target.hasFloat16Type() || LangOpts.CUDA) && s + 2 < ThisTokEnd &&
+ s[1] == '1' && s[2] == '6') {
s += 2; // success, eat up 2 characters.
isFloat16 = true;
continue;
@@ -655,10 +664,10 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
} else {
isLong = true;
}
- continue; // Success.
+ continue; // Success.
case 'i':
case 'I':
- if (PP.getLangOpts().MicrosoftExt) {
+ if (LangOpts.MicrosoftExt) {
if (isLong || isLongLong || MicrosoftInteger)
break;
@@ -711,7 +720,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
if (s != ThisTokEnd || isImaginary) {
// FIXME: Don't bother expanding UCNs if !tok.hasUCN().
expandUCNs(UDSuffixBuf, StringRef(SuffixBegin, ThisTokEnd - SuffixBegin));
- if (isValidUDSuffix(PP.getLangOpts(), UDSuffixBuf)) {
+ if (isValidUDSuffix(LangOpts, UDSuffixBuf)) {
if (!isImaginary) {
// Any suffix pieces we might have parsed are actually part of the
// ud-suffix.
@@ -734,9 +743,11 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
if (s != ThisTokEnd) {
// Report an error if there are any.
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, SuffixBegin - ThisTokBegin),
- diag::err_invalid_suffix_constant)
- << StringRef(SuffixBegin, ThisTokEnd - SuffixBegin) << isFPConstant;
+ Diags.Report(Lexer::AdvanceToTokenCharacter(
+ TokLoc, SuffixBegin - ThisTokBegin, SM, LangOpts),
+ diag::err_invalid_suffix_constant)
+ << StringRef(SuffixBegin, ThisTokEnd - SuffixBegin)
+ << (isFixedPointConstant ? 2 : isFPConstant);
hadError = true;
}
}
@@ -755,9 +766,11 @@ void NumericLiteralParser::ParseDecimalOrOctalCommon(SourceLocation TokLoc){
// If we have a hex digit other than 'e' (which denotes a FP exponent) then
// the code is using an incorrect base.
if (isHexDigit(*s) && *s != 'e' && *s != 'E' &&
- !isValidUDSuffix(PP.getLangOpts(), StringRef(s, ThisTokEnd - s))) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
- diag::err_invalid_digit) << StringRef(s, 1) << (radix == 8 ? 1 : 0);
+ !isValidUDSuffix(LangOpts, StringRef(s, ThisTokEnd - s))) {
+ Diags.Report(
+ Lexer::AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin, SM, LangOpts),
+ diag::err_invalid_digit)
+ << StringRef(s, 1) << (radix == 8 ? 1 : 0);
hadError = true;
return;
}
@@ -783,8 +796,9 @@ void NumericLiteralParser::ParseDecimalOrOctalCommon(SourceLocation TokLoc){
s = first_non_digit;
} else {
if (!hadError) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
- diag::err_exponent_has_no_digits);
+ Diags.Report(Lexer::AdvanceToTokenCharacter(
+ TokLoc, Exponent - ThisTokBegin, SM, LangOpts),
+ diag::err_exponent_has_no_digits);
hadError = true;
}
return;
@@ -815,7 +829,7 @@ bool NumericLiteralParser::isValidUDSuffix(const LangOptions &LangOpts,
.Cases("h", "min", "s", true)
.Cases("ms", "us", "ns", true)
.Cases("il", "i", "if", true)
- .Cases("d", "y", LangOpts.CPlusPlus2a)
+ .Cases("d", "y", LangOpts.CPlusPlus20)
.Default(false);
}
@@ -830,9 +844,10 @@ void NumericLiteralParser::checkSeparator(SourceLocation TokLoc,
return;
if (isDigitSeparator(*Pos)) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Pos - ThisTokBegin),
- diag::err_digit_separator_not_between_digits)
- << IsAfterDigits;
+ Diags.Report(Lexer::AdvanceToTokenCharacter(TokLoc, Pos - ThisTokBegin, SM,
+ LangOpts),
+ diag::err_digit_separator_not_between_digits)
+ << IsAfterDigits;
hadError = true;
}
}
@@ -870,9 +885,10 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
}
if (!HasSignificandDigits) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin),
- diag::err_hex_constant_requires)
- << PP.getLangOpts().CPlusPlus << 1;
+ Diags.Report(Lexer::AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin, SM,
+ LangOpts),
+ diag::err_hex_constant_requires)
+ << LangOpts.CPlusPlus << 1;
hadError = true;
return;
}
@@ -888,8 +904,9 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
const char *first_non_digit = SkipDigits(s);
if (!containsDigits(s, first_non_digit)) {
if (!hadError) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
- diag::err_exponent_has_no_digits);
+ Diags.Report(Lexer::AdvanceToTokenCharacter(
+ TokLoc, Exponent - ThisTokBegin, SM, LangOpts),
+ diag::err_exponent_has_no_digits);
hadError = true;
}
return;
@@ -897,16 +914,17 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
checkSeparator(TokLoc, s, CSK_BeforeDigits);
s = first_non_digit;
- if (!PP.getLangOpts().HexFloats)
- PP.Diag(TokLoc, PP.getLangOpts().CPlusPlus
- ? diag::ext_hex_literal_invalid
- : diag::ext_hex_constant_invalid);
- else if (PP.getLangOpts().CPlusPlus17)
- PP.Diag(TokLoc, diag::warn_cxx17_hex_literal);
+ if (!LangOpts.HexFloats)
+ Diags.Report(TokLoc, LangOpts.CPlusPlus
+ ? diag::ext_hex_literal_invalid
+ : diag::ext_hex_constant_invalid);
+ else if (LangOpts.CPlusPlus17)
+ Diags.Report(TokLoc, diag::warn_cxx17_hex_literal);
} else if (saw_period) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin),
- diag::err_hex_constant_requires)
- << PP.getLangOpts().CPlusPlus << 0;
+ Diags.Report(Lexer::AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin, SM,
+ LangOpts),
+ diag::err_hex_constant_requires)
+ << LangOpts.CPlusPlus << 0;
hadError = true;
}
return;
@@ -915,12 +933,10 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
// Handle simple binary numbers 0b01010
if ((c1 == 'b' || c1 == 'B') && (s[1] == '0' || s[1] == '1')) {
// 0b101010 is a C++1y / GCC extension.
- PP.Diag(TokLoc,
- PP.getLangOpts().CPlusPlus14
- ? diag::warn_cxx11_compat_binary_literal
- : PP.getLangOpts().CPlusPlus
- ? diag::ext_binary_literal_cxx14
- : diag::ext_binary_literal);
+ Diags.Report(TokLoc, LangOpts.CPlusPlus14
+ ? diag::warn_cxx11_compat_binary_literal
+ : LangOpts.CPlusPlus ? diag::ext_binary_literal_cxx14
+ : diag::ext_binary_literal);
++s;
assert(s < ThisTokEnd && "didn't maximally munch?");
radix = 2;
@@ -929,10 +945,11 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
if (s == ThisTokEnd) {
// Done.
} else if (isHexDigit(*s) &&
- !isValidUDSuffix(PP.getLangOpts(),
- StringRef(s, ThisTokEnd - s))) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
- diag::err_invalid_digit) << StringRef(s, 1) << 2;
+ !isValidUDSuffix(LangOpts, StringRef(s, ThisTokEnd - s))) {
+ Diags.Report(Lexer::AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin, SM,
+ LangOpts),
+ diag::err_invalid_digit)
+ << StringRef(s, 1) << 2;
hadError = true;
}
// Other suffixes will be diagnosed by the caller.
diff --git a/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp b/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
index fe20a3507036..bcdc5b8062a0 100644
--- a/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
@@ -262,7 +262,7 @@ void ModuleMap::resolveHeader(Module *Mod,
// Record this umbrella header.
setUmbrellaHeader(Mod, File, RelativePathName.str());
} else {
- Module::Header H = {RelativePathName.str(), File};
+ Module::Header H = {std::string(RelativePathName.str()), File};
if (Header.Kind == Module::HK_Excluded)
excludeHeader(Mod, H);
else
@@ -282,7 +282,7 @@ void ModuleMap::resolveHeader(Module *Mod,
// resolved. (Such a module still can't be built though, except from
// preprocessed source.)
if (!Header.Size && !Header.ModTime)
- Mod->markUnavailable();
+ Mod->markUnavailable(/*Unimportable=*/false);
}
}
@@ -305,7 +305,7 @@ bool ModuleMap::resolveAsBuiltinHeader(
return false;
auto Role = headerKindToRole(Header.Kind);
- Module::Header H = {Path.str(), *File};
+ Module::Header H = {std::string(Path.str()), *File};
addHeader(Mod, H, Role);
return true;
}
@@ -387,13 +387,17 @@ bool ModuleMap::isBuiltinHeader(StringRef FileName) {
.Default(false);
}
+bool ModuleMap::isBuiltinHeader(const FileEntry *File) {
+ return File->getDir() == BuiltinIncludeDir &&
+ ModuleMap::isBuiltinHeader(llvm::sys::path::filename(File->getName()));
+}
+
ModuleMap::HeadersMap::iterator
ModuleMap::findKnownHeader(const FileEntry *File) {
resolveHeaderDirectives(File);
HeadersMap::iterator Known = Headers.find(File);
if (HeaderInfo.getHeaderSearchOpts().ImplicitModuleMaps &&
- Known == Headers.end() && File->getDir() == BuiltinIncludeDir &&
- ModuleMap::isBuiltinHeader(llvm::sys::path::filename(File->getName()))) {
+ Known == Headers.end() && ModuleMap::isBuiltinHeader(File)) {
HeaderInfo.loadTopLevelSystemModules();
return Headers.find(File);
}
@@ -544,6 +548,9 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
static bool isBetterKnownHeader(const ModuleMap::KnownHeader &New,
const ModuleMap::KnownHeader &Old) {
// Prefer available modules.
+ // FIXME: Considering whether the module is available rather than merely
+ // importable is non-hermetic and can result in surprising behavior for
+ // prebuilt modules. Consider only checking for importability here.
if (New.getModule()->isAvailable() && !Old.getModule()->isAvailable())
return true;
@@ -659,7 +666,20 @@ ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(const FileEntry *File) {
}
ArrayRef<ModuleMap::KnownHeader>
-ModuleMap::findAllModulesForHeader(const FileEntry *File) const {
+ModuleMap::findAllModulesForHeader(const FileEntry *File) {
+ HeadersMap::iterator Known = findKnownHeader(File);
+ if (Known != Headers.end())
+ return Known->second;
+
+ if (findOrCreateModuleForHeaderInUmbrellaDir(File))
+ return Headers.find(File)->second;
+
+ return None;
+}
+
+ArrayRef<ModuleMap::KnownHeader>
+ModuleMap::findResolvedModulesForHeader(const FileEntry *File) const {
+ // FIXME: Is this necessary?
resolveHeaderDirectives(File);
auto It = Headers.find(File);
if (It == Headers.end())
@@ -1094,7 +1114,7 @@ Module *ModuleMap::createShadowedModule(StringRef Name, bool IsFramework,
new Module(Name, SourceLocation(), /*Parent=*/nullptr, IsFramework,
/*IsExplicit=*/false, NumCreatedModules++);
Result->ShadowingModule = ShadowingModule;
- Result->IsAvailable = false;
+ Result->markUnavailable(/*Unimportable*/true);
ModuleScopeIDs[Result] = CurrentModuleScopeID;
ShadowModules.push_back(Result);
@@ -1105,6 +1125,7 @@ void ModuleMap::setUmbrellaHeader(Module *Mod, const FileEntry *UmbrellaHeader,
Twine NameAsWritten) {
Headers[UmbrellaHeader].push_back(KnownHeader(Mod, NormalHeader));
Mod->Umbrella = UmbrellaHeader;
+ Mod->HasUmbrellaDir = false;
Mod->UmbrellaAsWritten = NameAsWritten.str();
UmbrellaDirs[UmbrellaHeader->getDir()] = Mod;
@@ -1116,6 +1137,7 @@ void ModuleMap::setUmbrellaHeader(Module *Mod, const FileEntry *UmbrellaHeader,
void ModuleMap::setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir,
Twine NameAsWritten) {
Mod->Umbrella = UmbrellaDir;
+ Mod->HasUmbrellaDir = true;
Mod->UmbrellaAsWritten = NameAsWritten.str();
UmbrellaDirs[UmbrellaDir] = Mod;
}
@@ -1242,6 +1264,11 @@ void ModuleMap::setInferredModuleAllowedBy(Module *M, const FileEntry *ModMap) {
InferredModuleAllowedBy[M] = ModMap;
}
+void ModuleMap::addAdditionalModuleMapFile(const Module *M,
+ const FileEntry *ModuleMap) {
+ AdditionalModMaps[M].insert(ModuleMap);
+}
+
LLVM_DUMP_METHOD void ModuleMap::dump() {
llvm::errs() << "Modules:";
for (llvm::StringMap<Module *>::iterator M = Modules.begin(),
@@ -1681,7 +1708,8 @@ bool ModuleMapParser::parseModuleId(ModuleId &Id) {
Id.clear();
do {
if (Tok.is(MMToken::Identifier) || Tok.is(MMToken::StringLiteral)) {
- Id.push_back(std::make_pair(Tok.getString(), Tok.getLocation()));
+ Id.push_back(
+ std::make_pair(std::string(Tok.getString()), Tok.getLocation()));
consumeToken();
} else {
Diags.Report(Tok.getLocation(), diag::err_mmap_expected_module_name);
@@ -2088,9 +2116,9 @@ void ModuleMapParser::parseModuleDecl() {
// If the module meets all requirements but is still unavailable, mark the
// whole tree as unavailable to prevent it from building.
- if (!ActiveModule->IsAvailable && !ActiveModule->IsMissingRequirement &&
+ if (!ActiveModule->IsAvailable && !ActiveModule->IsUnimportable &&
ActiveModule->Parent) {
- ActiveModule->getTopLevelModule()->markUnavailable();
+ ActiveModule->getTopLevelModule()->markUnavailable(/*Unimportable=*/false);
ActiveModule->getTopLevelModule()->MissingHeaders.append(
ActiveModule->MissingHeaders.begin(), ActiveModule->MissingHeaders.end());
}
@@ -2129,7 +2157,7 @@ void ModuleMapParser::parseExternModuleDecl() {
HadError = true;
return;
}
- std::string FileName = Tok.getString();
+ std::string FileName = std::string(Tok.getString());
consumeToken(); // filename
StringRef FileNameRef = FileName;
@@ -2209,7 +2237,7 @@ void ModuleMapParser::parseRequiresDecl() {
}
// Consume the feature name.
- std::string Feature = Tok.getString();
+ std::string Feature = std::string(Tok.getString());
consumeToken();
bool IsRequiresExcludedHack = false;
@@ -2283,7 +2311,7 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
return;
}
Module::UnresolvedHeaderDirective Header;
- Header.FileName = Tok.getString();
+ Header.FileName = std::string(Tok.getString());
Header.FileNameLoc = consumeToken();
Header.IsUmbrella = LeadingToken == MMToken::UmbrellaKeyword;
Header.Kind =
@@ -2380,7 +2408,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
return;
}
- std::string DirName = Tok.getString();
+ std::string DirName = std::string(Tok.getString());
SourceLocation DirNameLoc = consumeToken();
// Check whether we already have an umbrella.
@@ -2422,8 +2450,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
for (llvm::vfs::recursive_directory_iterator I(FS, Dir->getName(), EC), E;
I != E && !EC; I.increment(EC)) {
if (auto FE = SourceMgr.getFileManager().getFile(I->path())) {
-
- Module::Header Header = {I->path(), *FE};
+ Module::Header Header = {std::string(I->path()), *FE};
Headers.push_back(std::move(Header));
}
}
@@ -2466,8 +2493,8 @@ void ModuleMapParser::parseExportDecl() {
do {
// FIXME: Support string-literal module names here.
if (Tok.is(MMToken::Identifier)) {
- ParsedModuleId.push_back(std::make_pair(Tok.getString(),
- Tok.getLocation()));
+ ParsedModuleId.push_back(
+ std::make_pair(std::string(Tok.getString()), Tok.getLocation()));
consumeToken();
if (Tok.is(MMToken::Period)) {
@@ -2526,7 +2553,7 @@ void ModuleMapParser::parseExportAsDecl() {
}
}
- ActiveModule->ExportAsModule = Tok.getString();
+ ActiveModule->ExportAsModule = std::string(Tok.getString());
Map.addLinkAsDependency(ActiveModule);
consumeToken();
@@ -2572,7 +2599,7 @@ void ModuleMapParser::parseLinkDecl() {
return;
}
- std::string LibraryName = Tok.getString();
+ std::string LibraryName = std::string(Tok.getString());
consumeToken();
ActiveModule->LinkLibraries.push_back(Module::LinkLibrary(LibraryName,
IsFramework));
@@ -2794,8 +2821,8 @@ void ModuleMapParser::parseInferredModuleDecl(bool Framework, bool Explicit) {
break;
}
- Map.InferredDirectories[Directory].ExcludedModules
- .push_back(Tok.getString());
+ Map.InferredDirectories[Directory].ExcludedModules.push_back(
+ std::string(Tok.getString()));
consumeToken();
break;
diff --git a/contrib/llvm-project/clang/lib/Lex/PPCallbacks.cpp b/contrib/llvm-project/clang/lib/Lex/PPCallbacks.cpp
index cd8b04b20d24..b618071590ba 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPCallbacks.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPCallbacks.cpp
@@ -7,7 +7,24 @@
//===----------------------------------------------------------------------===//
#include "clang/Lex/PPCallbacks.h"
+#include "clang/Basic/FileManager.h"
using namespace clang;
-void PPChainedCallbacks::anchor() { }
+// Out of line key method.
+PPCallbacks::~PPCallbacks() = default;
+
+void PPCallbacks::HasInclude(SourceLocation Loc, StringRef FileName,
+ bool IsAngled, Optional<FileEntryRef> File,
+ SrcMgr::CharacteristicKind FileType) {}
+
+// Out of line key method.
+PPChainedCallbacks::~PPChainedCallbacks() = default;
+
+void PPChainedCallbacks::HasInclude(SourceLocation Loc, StringRef FileName,
+ bool IsAngled, Optional<FileEntryRef> File,
+ SrcMgr::CharacteristicKind FileType) {
+ First->HasInclude(Loc, FileName, IsAngled, File, FileType);
+ Second->HasInclude(Loc, FileName, IsAngled, File, FileType);
+}
+
diff --git a/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp b/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
index e433b2cf1b95..053ef1d2dd18 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
@@ -432,6 +432,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// Skip to the next '#endif' / '#else' / '#elif'.
CurLexer->skipOver(*SkipLength);
}
+ SourceLocation endLoc;
while (true) {
CurLexer->Lex(Tok);
@@ -538,7 +539,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// Restore the value of LexingRawMode so that trailing comments
// are handled correctly, if we've reached the outermost block.
CurPPLexer->LexingRawMode = false;
- CheckEndOfDirective("endif");
+ endLoc = CheckEndOfDirective("endif");
CurPPLexer->LexingRawMode = true;
if (Callbacks)
Callbacks->Endif(Tok.getLocation(), CondInfo.IfLoc);
@@ -565,7 +566,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// Restore the value of LexingRawMode so that trailing comments
// are handled correctly.
CurPPLexer->LexingRawMode = false;
- CheckEndOfDirective("else");
+ endLoc = CheckEndOfDirective("else");
CurPPLexer->LexingRawMode = true;
if (Callbacks)
Callbacks->Else(Tok.getLocation(), CondInfo.IfLoc);
@@ -621,7 +622,9 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// by the end of the preamble; we'll resume parsing after the preamble.
if (Callbacks && (Tok.isNot(tok::eof) || !isRecordingPreamble()))
Callbacks->SourceRangeSkipped(
- SourceRange(HashTokenLoc, CurPPLexer->getSourceLocation()),
+ SourceRange(HashTokenLoc, endLoc.isValid()
+ ? endLoc
+ : CurPPLexer->getSourceLocation()),
Tok.getLocation());
}
@@ -646,24 +649,8 @@ Module *Preprocessor::getModuleForLocation(SourceLocation Loc) {
}
const FileEntry *
-Preprocessor::getModuleHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
- Module *M,
- SourceLocation Loc) {
- assert(M && "no module to include");
-
- // If the context is the global module fragment of some module, we never
- // want to return that file; instead, we want the innermost include-guarded
- // header that it included.
- bool InGlobalModuleFragment = M->Kind == Module::GlobalModuleFragment;
-
- // If we have a module import syntax, we shouldn't include a header to
- // make a particular module visible.
- if ((getLangOpts().ObjC || getLangOpts().CPlusPlusModules ||
- getLangOpts().ModulesTS) &&
- !InGlobalModuleFragment)
- return nullptr;
-
- Module *TopM = M->getTopLevelModule();
+Preprocessor::getHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
+ SourceLocation Loc) {
Module *IncM = getModuleForLocation(IncLoc);
// Walk up through the include stack, looking through textual headers of M
@@ -677,37 +664,50 @@ Preprocessor::getModuleHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
if (!FE)
break;
- if (InGlobalModuleFragment) {
- if (getHeaderSearchInfo().isFileMultipleIncludeGuarded(FE))
- return FE;
- Loc = SM.getIncludeLoc(ID);
- continue;
- }
-
- bool InTextualHeader = false;
- for (auto Header : HeaderInfo.getModuleMap().findAllModulesForHeader(FE)) {
- if (!Header.getModule()->isSubModuleOf(TopM))
- continue;
-
- if (!(Header.getRole() & ModuleMap::TextualHeader)) {
- // If this is an accessible, non-textual header of M's top-level module
- // that transitively includes the given location and makes the
- // corresponding module visible, this is the thing to #include.
- if (Header.isAccessibleFrom(IncM))
- return FE;
+ // We want to find all possible modules that might contain this header, so
+ // search all enclosing directories for module maps and load them.
+ HeaderInfo.hasModuleMap(FE->getName(), /*Root*/ nullptr,
+ SourceMgr.isInSystemHeader(Loc));
+ bool InPrivateHeader = false;
+ for (auto Header : HeaderInfo.findAllModulesForHeader(FE)) {
+ if (!Header.isAccessibleFrom(IncM)) {
// It's in a private header; we can't #include it.
// FIXME: If there's a public header in some module that re-exports it,
// then we could suggest including that, but it's not clear that's the
// expected way to make this entity visible.
+ InPrivateHeader = true;
continue;
}
- InTextualHeader = true;
+ // We'll suggest including textual headers below if they're
+ // include-guarded.
+ if (Header.getRole() & ModuleMap::TextualHeader)
+ continue;
+
+ // If we have a module import syntax, we shouldn't include a header to
+ // make a particular module visible. Let the caller know they should
+ // suggest an import instead.
+ if (getLangOpts().ObjC || getLangOpts().CPlusPlusModules ||
+ getLangOpts().ModulesTS)
+ return nullptr;
+
+ // If this is an accessible, non-textual header of M's top-level module
+ // that transitively includes the given location and makes the
+ // corresponding module visible, this is the thing to #include.
+ return FE;
}
- if (!InTextualHeader)
- break;
+ // FIXME: If we're bailing out due to a private header, we shouldn't suggest
+ // an import either.
+ if (InPrivateHeader)
+ return nullptr;
+
+ // If the header is includable and has an include guard, assume the
+ // intended way to expose its contents is by #include, not by importing a
+ // module that transitively includes it.
+ if (getHeaderSearchInfo().isFileMultipleIncludeGuarded(FE))
+ return FE;
Loc = SM.getIncludeLoc(ID);
}
@@ -1708,15 +1708,22 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
EnterAnnotationToken(SourceRange(HashLoc, EndLoc),
tok::annot_module_include, Action.ModuleForHeader);
break;
+ case ImportAction::Failure:
+ assert(TheModuleLoader.HadFatalFailure &&
+ "This should be an early exit only to a fatal error");
+ TheModuleLoader.HadFatalFailure = true;
+ IncludeTok.setKind(tok::eof);
+ CurLexer->cutOffLexing();
+ return;
}
}
Optional<FileEntryRef> Preprocessor::LookupHeaderIncludeOrImport(
- const DirectoryLookup *&CurDir, StringRef Filename,
+ const DirectoryLookup *&CurDir, StringRef& Filename,
SourceLocation FilenameLoc, CharSourceRange FilenameRange,
const Token &FilenameTok, bool &IsFrameworkFound, bool IsImportDecl,
bool &IsMapped, const DirectoryLookup *LookupFrom,
- const FileEntry *LookupFromFile, StringRef LookupFilename,
+ const FileEntry *LookupFromFile, StringRef& LookupFilename,
SmallVectorImpl<char> &RelativePath, SmallVectorImpl<char> &SearchPath,
ModuleMap::KnownHeader &SuggestedModule, bool isAngled) {
Optional<FileEntryRef> File = LookupFile(
@@ -1785,21 +1792,10 @@ Optional<FileEntryRef> Preprocessor::LookupHeaderIncludeOrImport(
return Filename;
};
StringRef TypoCorrectionName = CorrectTypoFilename(Filename);
-
-#ifndef _WIN32
- // Normalize slashes when compiling with -fms-extensions on non-Windows.
- // This is unnecessary on Windows since the filesystem there handles
- // backslashes.
- SmallString<128> NormalizedTypoCorrectionPath;
- if (LangOpts.MicrosoftExt) {
- NormalizedTypoCorrectionPath = TypoCorrectionName;
- llvm::sys::path::native(NormalizedTypoCorrectionPath);
- TypoCorrectionName = NormalizedTypoCorrectionPath;
- }
-#endif
+ StringRef TypoCorrectionLookupName = CorrectTypoFilename(LookupFilename);
Optional<FileEntryRef> File = LookupFile(
- FilenameLoc, TypoCorrectionName, isAngled, LookupFrom, LookupFromFile,
+ FilenameLoc, TypoCorrectionLookupName, isAngled, LookupFrom, LookupFromFile,
CurDir, Callbacks ? &SearchPath : nullptr,
Callbacks ? &RelativePath : nullptr, &SuggestedModule, &IsMapped,
/*IsFrameworkFound=*/nullptr);
@@ -1814,6 +1810,7 @@ Optional<FileEntryRef> Preprocessor::LookupHeaderIncludeOrImport(
// We found the file, so set the Filename to the name after typo
// correction.
Filename = TypoCorrectionName;
+ LookupFilename = TypoCorrectionLookupName;
return File;
}
}
@@ -1911,14 +1908,18 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
SourceLocation FilenameLoc = FilenameTok.getLocation();
StringRef LookupFilename = Filename;
-#ifndef _WIN32
+#ifdef _WIN32
+ llvm::sys::path::Style BackslashStyle = llvm::sys::path::Style::windows;
+#else
// Normalize slashes when compiling with -fms-extensions on non-Windows. This
// is unnecessary on Windows since the filesystem there handles backslashes.
SmallString<128> NormalizedPath;
+ llvm::sys::path::Style BackslashStyle = llvm::sys::path::Style::posix;
if (LangOpts.MicrosoftExt) {
NormalizedPath = Filename.str();
llvm::sys::path::native(NormalizedPath);
LookupFilename = NormalizedPath;
+ BackslashStyle = llvm::sys::path::Style::windows;
}
#endif
@@ -1933,19 +1934,6 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
return {ImportAction::None};
}
- // Check for circular inclusion of the main file.
- // We can't generate a consistent preamble with regard to the conditional
- // stack if the main file is included again as due to the preamble bounds
- // some directives (e.g. #endif of a header guard) will never be seen.
- // Since this will lead to confusing errors, avoid the inclusion.
- if (File && PreambleConditionalStack.isRecording() &&
- SourceMgr.translateFile(&File->getFileEntry()) ==
- SourceMgr.getMainFileID()) {
- Diag(FilenameTok.getLocation(),
- diag::err_pp_including_mainfile_in_preamble);
- return {ImportAction::None};
- }
-
// Should we enter the source file? Set to Skip if either the source file is
// known to have no effect beyond its effect on module visibility -- that is,
// if it's got an include guard that is already defined, set to Import if it
@@ -2063,6 +2051,18 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
Action = (SuggestedModule && !getLangOpts().CompilingPCH) ? Import : Skip;
}
+ // Check for circular inclusion of the main file.
+ // We can't generate a consistent preamble with regard to the conditional
+ // stack if the main file is included again as due to the preamble bounds
+ // some directives (e.g. #endif of a header guard) will never be seen.
+ // Since this will lead to confusing errors, avoid the inclusion.
+ if (Action == Enter && File && PreambleConditionalStack.isRecording() &&
+ SourceMgr.isMainFile(*File)) {
+ Diag(FilenameTok.getLocation(),
+ diag::err_pp_including_mainfile_in_preamble);
+ return {ImportAction::None};
+ }
+
if (Callbacks && !IsImportDecl) {
// Notify the callback object that we've seen an inclusion directive.
// FIXME: Use a different callback for a pp-import?
@@ -2093,29 +2093,90 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
if (CheckIncludePathPortability) {
StringRef Name = LookupFilename;
+ StringRef NameWithoriginalSlashes = Filename;
+#if defined(_WIN32)
+ // Skip UNC prefix if present. (tryGetRealPathName() always
+ // returns a path with the prefix skipped.)
+ bool NameWasUNC = Name.consume_front("\\\\?\\");
+ NameWithoriginalSlashes.consume_front("\\\\?\\");
+#endif
StringRef RealPathName = File->getFileEntry().tryGetRealPathName();
SmallVector<StringRef, 16> Components(llvm::sys::path::begin(Name),
llvm::sys::path::end(Name));
+#if defined(_WIN32)
+ // -Wnonportable-include-path is designed to diagnose includes using
+ // case even on systems with a case-insensitive file system.
+ // On Windows, RealPathName always starts with an upper-case drive
+ // letter for absolute paths, but Name might start with either
+ // case depending on if `cd c:\foo` or `cd C:\foo` was used in the shell.
+ // ("foo" will always have on-disk case, no matter which case was
+ // used in the cd command). To not emit this warning solely for
+ // the drive letter, whose case is dependent on if `cd` is used
+ // with upper- or lower-case drive letters, always consider the
+ // given drive letter case as correct for the purpose of this warning.
+ SmallString<128> FixedDriveRealPath;
+ if (llvm::sys::path::is_absolute(Name) &&
+ llvm::sys::path::is_absolute(RealPathName) &&
+ toLowercase(Name[0]) == toLowercase(RealPathName[0]) &&
+ isLowercase(Name[0]) != isLowercase(RealPathName[0])) {
+ assert(Components.size() >= 3 && "should have drive, backslash, name");
+ assert(Components[0].size() == 2 && "should start with drive");
+ assert(Components[0][1] == ':' && "should have colon");
+ FixedDriveRealPath = (Name.substr(0, 1) + RealPathName.substr(1)).str();
+ RealPathName = FixedDriveRealPath;
+ }
+#endif
if (trySimplifyPath(Components, RealPathName)) {
SmallString<128> Path;
Path.reserve(Name.size()+2);
Path.push_back(isAngled ? '<' : '"');
- bool isLeadingSeparator = llvm::sys::path::is_absolute(Name);
+
+ const auto IsSep = [BackslashStyle](char c) {
+ return llvm::sys::path::is_separator(c, BackslashStyle);
+ };
+
for (auto Component : Components) {
- if (isLeadingSeparator)
- isLeadingSeparator = false;
- else
+ // On POSIX, Components will contain a single '/' as first element
+ // exactly if Name is an absolute path.
+ // On Windows, it will contain "C:" followed by '\' for absolute paths.
+ // The drive letter is optional for absolute paths on Windows, but
+ // clang currently cannot process absolute paths in #include lines that
+ // don't have a drive.
+ // If the first entry in Components is a directory separator,
+ // then the code at the bottom of this loop that keeps the original
+ // directory separator style copies it. If the second entry is
+ // a directory separator (the C:\ case), then that separator already
+ // got copied when the C: was processed and we want to skip that entry.
+ if (!(Component.size() == 1 && IsSep(Component[0])))
Path.append(Component);
- // Append the separator the user used, or the close quote
- Path.push_back(
- Path.size() <= Filename.size() ? Filename[Path.size()-1] :
- (isAngled ? '>' : '"'));
+ else if (!Path.empty())
+ continue;
+
+ // Append the separator(s) the user used, or the close quote
+ if (Path.size() > NameWithoriginalSlashes.size()) {
+ Path.push_back(isAngled ? '>' : '"');
+ continue;
+ }
+ assert(IsSep(NameWithoriginalSlashes[Path.size()-1]));
+ do
+ Path.push_back(NameWithoriginalSlashes[Path.size()-1]);
+ while (Path.size() <= NameWithoriginalSlashes.size() &&
+ IsSep(NameWithoriginalSlashes[Path.size()-1]));
}
- // For user files and known standard headers, by default we issue a diagnostic.
- // For other system headers, we don't. They can be controlled separately.
- auto DiagId = (FileCharacter == SrcMgr::C_User || warnByDefaultOnWrongCase(Name)) ?
- diag::pp_nonportable_path : diag::pp_nonportable_system_path;
+
+#if defined(_WIN32)
+ // Restore UNC prefix if it was there.
+ if (NameWasUNC)
+ Path = (Path.substr(0, 1) + "\\\\?\\" + Path.substr(1)).str();
+#endif
+
+ // For user files and known standard headers, issue a diagnostic.
+ // For other system headers, don't. They can be controlled separately.
+ auto DiagId =
+ (FileCharacter == SrcMgr::C_User || warnByDefaultOnWrongCase(Name))
+ ? diag::pp_nonportable_path
+ : diag::pp_nonportable_system_path;
Diag(FilenameTok, DiagId) << Path <<
FixItHint::CreateReplacement(FilenameRange, Path);
}
@@ -2165,7 +2226,10 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
if (IncludePos.isMacroID())
IncludePos = SourceMgr.getExpansionRange(IncludePos).getEnd();
FileID FID = SourceMgr.createFileID(*File, IncludePos, FileCharacter);
- assert(FID.isValid() && "Expected valid file ID");
+ if (!FID.isValid()) {
+ TheModuleLoader.HadFatalFailure = true;
+ return ImportAction::Failure;
+ }
// If all is good, enter the new file!
if (EnterSourceFile(FID, CurDir, FilenameTok.getLocation()))
@@ -2792,7 +2856,9 @@ void Preprocessor::HandleDefineDirective(
// warn-because-unused-macro set. If it gets used it will be removed from set.
if (getSourceManager().isInMainFile(MI->getDefinitionLoc()) &&
!Diags->isIgnored(diag::pp_macro_not_used, MI->getDefinitionLoc()) &&
- !MacroExpansionInDirectivesOverride) {
+ !MacroExpansionInDirectivesOverride &&
+ getSourceManager().getFileID(MI->getDefinitionLoc()) !=
+ getPredefinesFileID()) {
MI->setIsWarnIfUnused(true);
WarnUnusedMacroLocs.insert(MI->getDefinitionLoc());
}
diff --git a/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp b/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp
index e5ec2b99f507..8c120c13d7d2 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp
@@ -15,7 +15,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
@@ -26,9 +25,12 @@
#include "clang/Lex/LiteralSupport.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/Token.h"
#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SaveAndRestore.h"
@@ -251,8 +253,24 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
// If this identifier isn't 'defined' or one of the special
// preprocessor keywords and it wasn't macro expanded, it turns
// into a simple 0
- if (ValueLive)
+ if (ValueLive) {
PP.Diag(PeekTok, diag::warn_pp_undef_identifier) << II;
+
+ const DiagnosticsEngine &DiagEngine = PP.getDiagnostics();
+ // If 'Wundef' is enabled, do not emit 'undef-prefix' diagnostics.
+ if (DiagEngine.isIgnored(diag::warn_pp_undef_identifier,
+ PeekTok.getLocation())) {
+ const std::vector<std::string> UndefPrefixes =
+ DiagEngine.getDiagnosticOptions().UndefPrefixes;
+ const StringRef IdentifierName = II->getName();
+ if (llvm::any_of(UndefPrefixes,
+ [&IdentifierName](const std::string &Prefix) {
+ return IdentifierName.startswith(Prefix);
+ }))
+ PP.Diag(PeekTok, diag::warn_pp_undef_prefix)
+ << AddFlagValue{llvm::join(UndefPrefixes, ",")} << II;
+ }
+ }
Result.Val = 0;
Result.Val.setIsUnsigned(false); // "0" is signed intmax_t 0.
Result.setIdentifier(II);
@@ -277,7 +295,9 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
if (NumberInvalid)
return true; // a diagnostic was already reported
- NumericLiteralParser Literal(Spelling, PeekTok.getLocation(), PP);
+ NumericLiteralParser Literal(Spelling, PeekTok.getLocation(),
+ PP.getSourceManager(), PP.getLangOpts(),
+ PP.getTargetInfo(), PP.getDiagnostics());
if (Literal.hadError)
return true; // a diagnostic was already reported.
diff --git a/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp b/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp
index 802172693960..b7c7e2693ef1 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp
@@ -24,8 +24,6 @@
#include "llvm/Support/Path.h"
using namespace clang;
-PPCallbacks::~PPCallbacks() {}
-
//===----------------------------------------------------------------------===//
// Miscellaneous Methods.
//===----------------------------------------------------------------------===//
@@ -81,7 +79,7 @@ bool Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
if (Invalid) {
SourceLocation FileStart = SourceMgr.getLocForStartOfFile(FID);
Diag(Loc, diag::err_pp_error_opening_file)
- << std::string(SourceMgr.getBufferName(FileStart)) << "";
+ << std::string(SourceMgr.getBufferName(FileStart)) << "";
return true;
}
@@ -417,7 +415,10 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
}
if (!isEndOfMacro && CurPPLexer &&
- SourceMgr.getIncludeLoc(CurPPLexer->getFileID()).isValid()) {
+ (SourceMgr.getIncludeLoc(CurPPLexer->getFileID()).isValid() ||
+ // Predefines file doesn't have a valid include location.
+ (PredefinesFileID.isValid() &&
+ CurPPLexer->getFileID() == PredefinesFileID))) {
// Notify SourceManager to record the number of FileIDs that were created
// during lexing of the #include'd file.
unsigned NumFIDs =
diff --git a/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
index cf8bb2fbab99..4908594d6081 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
@@ -1456,10 +1456,8 @@ static void remapMacroPath(
const std::map<std::string, std::string, std::greater<std::string>>
&MacroPrefixMap) {
for (const auto &Entry : MacroPrefixMap)
- if (Path.startswith(Entry.first)) {
- Path = (Twine(Entry.second) + Path.substr(Entry.first.size())).str();
+ if (llvm::sys::path::replace_path_prefix(Path, Entry.first, Entry.second))
break;
- }
}
/// ExpandBuiltinMacro - If an identifier token is read that is to be expanded
@@ -1543,8 +1541,8 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
} else {
FN += PLoc.getFilename();
}
- Lexer::Stringify(FN);
remapMacroPath(FN, PPOpts->MacroPrefixMap);
+ Lexer::Stringify(FN);
OS << '"' << FN << '"';
}
Tok.setKind(tok::string_literal);
diff --git a/contrib/llvm-project/clang/lib/Lex/Pragma.cpp b/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
index 57a95815488e..b512a547de7d 100644
--- a/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
@@ -71,31 +71,36 @@ void EmptyPragmaHandler::HandlePragma(Preprocessor &PP,
// PragmaNamespace Implementation.
//===----------------------------------------------------------------------===//
-PragmaNamespace::~PragmaNamespace() {
- llvm::DeleteContainerSeconds(Handlers);
-}
-
/// FindHandler - Check to see if there is already a handler for the
/// specified name. If not, return the handler for the null identifier if it
/// exists, otherwise return null. If IgnoreNull is true (the default) then
/// the null handler isn't returned on failure to match.
PragmaHandler *PragmaNamespace::FindHandler(StringRef Name,
bool IgnoreNull) const {
- if (PragmaHandler *Handler = Handlers.lookup(Name))
- return Handler;
- return IgnoreNull ? nullptr : Handlers.lookup(StringRef());
+ auto I = Handlers.find(Name);
+ if (I != Handlers.end())
+ return I->getValue().get();
+ if (IgnoreNull)
+ return nullptr;
+ I = Handlers.find(StringRef());
+ if (I != Handlers.end())
+ return I->getValue().get();
+ return nullptr;
}
void PragmaNamespace::AddPragma(PragmaHandler *Handler) {
- assert(!Handlers.lookup(Handler->getName()) &&
+ assert(!Handlers.count(Handler->getName()) &&
"A handler with this name is already registered in this namespace");
- Handlers[Handler->getName()] = Handler;
+ Handlers[Handler->getName()].reset(Handler);
}
void PragmaNamespace::RemovePragmaHandler(PragmaHandler *Handler) {
- assert(Handlers.lookup(Handler->getName()) &&
+ auto I = Handlers.find(Handler->getName());
+ assert(I != Handlers.end() &&
"Handler not registered in this namespace");
- Handlers.erase(Handler->getName());
+ // Release ownership back to the caller.
+ I->getValue().release();
+ Handlers.erase(I);
}
void PragmaNamespace::HandlePragma(Preprocessor &PP,
@@ -1902,10 +1907,9 @@ void Preprocessor::RegisterBuiltinPragmas() {
}
// Pragmas added by plugins
- for (PragmaHandlerRegistry::iterator it = PragmaHandlerRegistry::begin(),
- ie = PragmaHandlerRegistry::end();
- it != ie; ++it) {
- AddPragmaHandler(it->instantiate().release());
+ for (const PragmaHandlerRegistry::entry &handler :
+ PragmaHandlerRegistry::entries()) {
+ AddPragmaHandler(handler.instantiate().release());
}
}
diff --git a/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
index 0e9be3923630..160e2b6ed884 100644
--- a/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
@@ -119,7 +119,7 @@ Preprocessor::Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
// a macro. They get unpoisoned where it is allowed.
(Ident__VA_ARGS__ = getIdentifierInfo("__VA_ARGS__"))->setIsPoisoned();
SetPoisonReason(Ident__VA_ARGS__,diag::ext_pp_bad_vaargs_use);
- if (getLangOpts().CPlusPlus2a) {
+ if (getLangOpts().CPlusPlus20) {
(Ident__VA_OPT__ = getIdentifierInfo("__VA_OPT__"))->setIsPoisoned();
SetPoisonReason(Ident__VA_OPT__,diag::ext_pp_bad_vaopt_use);
} else {
@@ -166,6 +166,8 @@ Preprocessor::Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
this->PPOpts->ExcludedConditionalDirectiveSkipMappings;
if (ExcludedConditionalDirectiveSkipMappings)
ExcludedConditionalDirectiveSkipMappings->clear();
+
+ MaxTokens = LangOpts.MaxTokens;
}
Preprocessor::~Preprocessor() {
@@ -769,9 +771,13 @@ static diag::kind getFutureCompatDiagKind(const IdentifierInfo &II,
return llvm::StringSwitch<diag::kind>(II.getName())
#define CXX11_KEYWORD(NAME, FLAGS) \
.Case(#NAME, diag::warn_cxx11_keyword)
-#define CXX2A_KEYWORD(NAME, FLAGS) \
- .Case(#NAME, diag::warn_cxx2a_keyword)
+#define CXX20_KEYWORD(NAME, FLAGS) \
+ .Case(#NAME, diag::warn_cxx20_keyword)
#include "clang/Basic/TokenKinds.def"
+ // char8_t is not modeled as a CXX20_KEYWORD because it's not
+ // unconditionally enabled in C++20 mode. (It can be disabled
+ // by -fno-char8_t.)
+ .Case("char8_t", diag::warn_cxx20_keyword)
;
llvm_unreachable(
@@ -906,6 +912,9 @@ void Preprocessor::Lex(Token &Result) {
}
} while (!ReturnedToken);
+ if (Result.is(tok::unknown) && TheModuleLoader.HadFatalFailure)
+ return;
+
if (Result.is(tok::code_completion) && Result.getIdentifierInfo()) {
// Remember the identifier before code completion token.
setCodeCompletionIdentifierInfo(Result.getIdentifierInfo());
@@ -959,8 +968,12 @@ void Preprocessor::Lex(Token &Result) {
LastTokenWasAt = Result.is(tok::at);
--LexLevel;
- if (OnToken && LexLevel == 0 && !Result.getFlag(Token::IsReinjected))
- OnToken(Result);
+
+ if (LexLevel == 0 && !Result.getFlag(Token::IsReinjected)) {
+ ++TokenCount;
+ if (OnToken)
+ OnToken(Result);
+ }
}
/// Lex a header-name token (including one formed from header-name-tokens if
@@ -1200,6 +1213,13 @@ bool Preprocessor::LexAfterModuleImport(Token &Result) {
Suffix[0].setAnnotationValue(Action.ModuleForHeader);
// FIXME: Call the moduleImport callback?
break;
+ case ImportAction::Failure:
+ assert(TheModuleLoader.HadFatalFailure &&
+ "This should be an early exit only to a fatal error");
+ Result.setKind(tok::eof);
+ CurLexer->cutOffLexing();
+ EnterTokens(Suffix);
+ return true;
}
EnterTokens(Suffix);
@@ -1339,7 +1359,7 @@ bool Preprocessor::FinishLexStringLiteral(Token &Result, std::string &String,
return false;
}
- String = Literal.GetString();
+ String = std::string(Literal.GetString());
return true;
}
@@ -1350,7 +1370,9 @@ bool Preprocessor::parseSimpleIntegerLiteral(Token &Tok, uint64_t &Value) {
StringRef Spelling = getSpelling(Tok, IntegerBuffer, &NumberInvalid);
if (NumberInvalid)
return false;
- NumericLiteralParser Literal(Spelling, Tok.getLocation(), *this);
+ NumericLiteralParser Literal(Spelling, Tok.getLocation(), getSourceManager(),
+ getLangOpts(), getTargetInfo(),
+ getDiagnostics());
if (Literal.hadError || !Literal.isIntegerLiteral() || Literal.hasUDSuffix())
return false;
llvm::APInt APVal(64, 0);
diff --git a/contrib/llvm-project/clang/lib/Lex/TokenConcatenation.cpp b/contrib/llvm-project/clang/lib/Lex/TokenConcatenation.cpp
index e626cfcc927f..f6b005d9e19c 100644
--- a/contrib/llvm-project/clang/lib/Lex/TokenConcatenation.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/TokenConcatenation.cpp
@@ -103,7 +103,7 @@ TokenConcatenation::TokenConcatenation(const Preprocessor &pp) : PP(pp) {
TokenInfo[tok::utf8_char_constant] |= aci_custom;
// These tokens have custom code in C++2a mode.
- if (PP.getLangOpts().CPlusPlus2a)
+ if (PP.getLangOpts().CPlusPlus20)
TokenInfo[tok::lessequal ] |= aci_custom_firstchar;
// These tokens change behavior if followed by an '='.
@@ -292,6 +292,6 @@ bool TokenConcatenation::AvoidConcat(const Token &PrevPrevTok,
case tok::arrow: // ->*
return PP.getLangOpts().CPlusPlus && FirstChar == '*';
case tok::lessequal: // <=> (C++2a)
- return PP.getLangOpts().CPlusPlus2a && FirstChar == '>';
+ return PP.getLangOpts().CPlusPlus20 && FirstChar == '>';
}
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp b/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
index a75965784168..d05332b5ac5a 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -133,9 +133,6 @@ NamedDecl *Parser::ParseCXXInlineMethodDef(
LexedMethod* LM = new LexedMethod(this, FnD);
getCurrentClass().LateParsedDeclarations.push_back(LM);
- LM->TemplateScope = getCurScope()->isTemplateParamScope() ||
- (FnD && isa<FunctionTemplateDecl>(FnD) &&
- cast<FunctionTemplateDecl>(FnD)->isAbbreviated());
CachedTokens &Toks = LM->Toks;
tok::TokenKind kind = Tok.getKind();
@@ -225,6 +222,7 @@ Parser::LateParsedDeclaration::~LateParsedDeclaration() {}
void Parser::LateParsedDeclaration::ParseLexedMethodDeclarations() {}
void Parser::LateParsedDeclaration::ParseLexedMemberInitializers() {}
void Parser::LateParsedDeclaration::ParseLexedMethodDefs() {}
+void Parser::LateParsedDeclaration::ParseLexedAttributes() {}
void Parser::LateParsedDeclaration::ParseLexedPragmas() {}
Parser::LateParsedClass::LateParsedClass(Parser *P, ParsingClass *C)
@@ -246,6 +244,10 @@ void Parser::LateParsedClass::ParseLexedMethodDefs() {
Self->ParseLexedMethodDefs(*Class);
}
+void Parser::LateParsedClass::ParseLexedAttributes() {
+ Self->ParseLexedAttributes(*Class);
+}
+
void Parser::LateParsedClass::ParseLexedPragmas() {
Self->ParseLexedPragmas(*Class);
}
@@ -262,57 +264,79 @@ void Parser::LateParsedMemberInitializer::ParseLexedMemberInitializers() {
Self->ParseLexedMemberInitializer(*this);
}
+void Parser::LateParsedAttribute::ParseLexedAttributes() {
+ Self->ParseLexedAttribute(*this, true, false);
+}
+
void Parser::LateParsedPragma::ParseLexedPragmas() {
Self->ParseLexedPragma(*this);
}
+/// Utility to re-enter a possibly-templated scope while parsing its
+/// late-parsed components.
+struct Parser::ReenterTemplateScopeRAII {
+ Parser &P;
+ MultiParseScope Scopes;
+ TemplateParameterDepthRAII CurTemplateDepthTracker;
+
+ ReenterTemplateScopeRAII(Parser &P, Decl *MaybeTemplated, bool Enter = true)
+ : P(P), Scopes(P), CurTemplateDepthTracker(P.TemplateParameterDepth) {
+ if (Enter) {
+ CurTemplateDepthTracker.addDepth(
+ P.ReenterTemplateScopes(Scopes, MaybeTemplated));
+ }
+ }
+};
+
+/// Utility to re-enter a class scope while parsing its late-parsed components.
+struct Parser::ReenterClassScopeRAII : ReenterTemplateScopeRAII {
+ ParsingClass &Class;
+
+ ReenterClassScopeRAII(Parser &P, ParsingClass &Class)
+ : ReenterTemplateScopeRAII(P, Class.TagOrTemplate,
+ /*Enter=*/!Class.TopLevelClass),
+ Class(Class) {
+ // If this is the top-level class, we're still within its scope.
+ if (Class.TopLevelClass)
+ return;
+
+ // Re-enter the class scope itself.
+ Scopes.Enter(Scope::ClassScope|Scope::DeclScope);
+ P.Actions.ActOnStartDelayedMemberDeclarations(P.getCurScope(),
+ Class.TagOrTemplate);
+ }
+ ~ReenterClassScopeRAII() {
+ if (Class.TopLevelClass)
+ return;
+
+ P.Actions.ActOnFinishDelayedMemberDeclarations(P.getCurScope(),
+ Class.TagOrTemplate);
+ }
+};
+
/// ParseLexedMethodDeclarations - We finished parsing the member
/// specification of a top (non-nested) C++ class. Now go over the
/// stack of method declarations with some parts for which parsing was
/// delayed (such as default arguments) and parse them.
void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) {
- bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
- ParseScope ClassTemplateScope(this, Scope::TemplateParamScope,
- HasTemplateScope);
- TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
- if (HasTemplateScope) {
- Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
- ++CurTemplateDepthTracker;
- }
-
- // The current scope is still active if we're the top-level class.
- // Otherwise we'll need to push and enter a new scope.
- bool HasClassScope = !Class.TopLevelClass;
- ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope,
- HasClassScope);
- if (HasClassScope)
- Actions.ActOnStartDelayedMemberDeclarations(getCurScope(),
- Class.TagOrTemplate);
-
- for (size_t i = 0; i < Class.LateParsedDeclarations.size(); ++i) {
- Class.LateParsedDeclarations[i]->ParseLexedMethodDeclarations();
- }
+ ReenterClassScopeRAII InClassScope(*this, Class);
- if (HasClassScope)
- Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(),
- Class.TagOrTemplate);
+ for (LateParsedDeclaration *LateD : Class.LateParsedDeclarations)
+ LateD->ParseLexedMethodDeclarations();
}
void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
// If this is a member template, introduce the template parameter scope.
- ParseScope TemplateScope(this, Scope::TemplateParamScope, LM.TemplateScope);
- TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
- if (LM.TemplateScope) {
- Actions.ActOnReenterTemplateScope(getCurScope(), LM.Method);
- ++CurTemplateDepthTracker;
- }
+ ReenterTemplateScopeRAII InFunctionTemplateScope(*this, LM.Method);
+
// Start the delayed C++ method declaration
Actions.ActOnStartDelayedCXXMethodDeclaration(getCurScope(), LM.Method);
// Introduce the parameters into scope and parse their default
// arguments.
- ParseScope PrototypeScope(this, Scope::FunctionPrototypeScope |
- Scope::FunctionDeclarationScope | Scope::DeclScope);
+ InFunctionTemplateScope.Scopes.Enter(Scope::FunctionPrototypeScope |
+ Scope::FunctionDeclarationScope |
+ Scope::DeclScope);
for (unsigned I = 0, N = LM.DefaultArgs.size(); I != N; ++I) {
auto Param = cast<ParmVarDecl>(LM.DefaultArgs[I].Param);
// Introduce the parameter into scope.
@@ -466,7 +490,7 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
LM.ExceptionSpecTokens = nullptr;
}
- PrototypeScope.Exit();
+ InFunctionTemplateScope.Scopes.Exit();
// Finish the delayed C++ method declaration.
Actions.ActOnFinishDelayedCXXMethodDeclaration(getCurScope(), LM.Method);
@@ -476,30 +500,15 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
/// (non-nested) C++ class. Now go over the stack of lexed methods that were
/// collected during its parsing and parse them all.
void Parser::ParseLexedMethodDefs(ParsingClass &Class) {
- bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
- ParseScope ClassTemplateScope(this, Scope::TemplateParamScope, HasTemplateScope);
- TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
- if (HasTemplateScope) {
- Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
- ++CurTemplateDepthTracker;
- }
- bool HasClassScope = !Class.TopLevelClass;
- ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope,
- HasClassScope);
+ ReenterClassScopeRAII InClassScope(*this, Class);
- for (size_t i = 0; i < Class.LateParsedDeclarations.size(); ++i) {
- Class.LateParsedDeclarations[i]->ParseLexedMethodDefs();
- }
+ for (LateParsedDeclaration *D : Class.LateParsedDeclarations)
+ D->ParseLexedMethodDefs();
}
void Parser::ParseLexedMethodDef(LexedMethod &LM) {
// If this is a member template, introduce the template parameter scope.
- ParseScope TemplateScope(this, Scope::TemplateParamScope, LM.TemplateScope);
- TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
- if (LM.TemplateScope) {
- Actions.ActOnReenterTemplateScope(getCurScope(), LM.D);
- ++CurTemplateDepthTracker;
- }
+ ReenterTemplateScopeRAII InFunctionTemplateScope(*this, LM.D);
ParenBraceBracketBalancer BalancerRAIIObj(*this);
@@ -580,23 +589,7 @@ void Parser::ParseLexedMethodDef(LexedMethod &LM) {
/// of a top (non-nested) C++ class. Now go over the stack of lexed data member
/// initializers that were collected during its parsing and parse them all.
void Parser::ParseLexedMemberInitializers(ParsingClass &Class) {
- bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
- ParseScope ClassTemplateScope(this, Scope::TemplateParamScope,
- HasTemplateScope);
- TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
- if (HasTemplateScope) {
- Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
- ++CurTemplateDepthTracker;
- }
- // Set or update the scope flags.
- bool AlreadyHasClassScope = Class.TopLevelClass;
- unsigned ScopeFlags = Scope::ClassScope|Scope::DeclScope;
- ParseScope ClassScope(this, ScopeFlags, !AlreadyHasClassScope);
- ParseScopeFlags ClassScopeFlags(this, ScopeFlags, AlreadyHasClassScope);
-
- if (!AlreadyHasClassScope)
- Actions.ActOnStartDelayedMemberDeclarations(getCurScope(),
- Class.TagOrTemplate);
+ ReenterClassScopeRAII InClassScope(*this, Class);
if (!Class.LateParsedDeclarations.empty()) {
// C++11 [expr.prim.general]p4:
@@ -604,18 +597,14 @@ void Parser::ParseLexedMemberInitializers(ParsingClass &Class) {
// (9.2) of a class X, the expression this is a prvalue of type "pointer
// to X" within the optional brace-or-equal-initializer. It shall not
// appear elsewhere in the member-declarator.
+ // FIXME: This should be done in ParseLexedMemberInitializer, not here.
Sema::CXXThisScopeRAII ThisScope(Actions, Class.TagOrTemplate,
Qualifiers());
- for (size_t i = 0; i < Class.LateParsedDeclarations.size(); ++i) {
- Class.LateParsedDeclarations[i]->ParseLexedMemberInitializers();
- }
+ for (LateParsedDeclaration *D : Class.LateParsedDeclarations)
+ D->ParseLexedMemberInitializers();
}
- if (!AlreadyHasClassScope)
- Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(),
- Class.TagOrTemplate);
-
Actions.ActOnFinishDelayedMemberInitializers(Class.TagOrTemplate);
}
@@ -662,21 +651,115 @@ void Parser::ParseLexedMemberInitializer(LateParsedMemberInitializer &MI) {
ConsumeAnyToken();
}
-void Parser::ParseLexedPragmas(ParsingClass &Class) {
- bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
- ParseScope ClassTemplateScope(this, Scope::TemplateParamScope,
- HasTemplateScope);
- TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
- if (HasTemplateScope) {
- Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
- ++CurTemplateDepthTracker;
+/// Wrapper class which calls ParseLexedAttribute, after setting up the
+/// scope appropriately.
+void Parser::ParseLexedAttributes(ParsingClass &Class) {
+ ReenterClassScopeRAII InClassScope(*this, Class);
+
+ for (LateParsedDeclaration *LateD : Class.LateParsedDeclarations)
+ LateD->ParseLexedAttributes();
+}
+
+/// Parse all attributes in LAs, and attach them to Decl D.
+void Parser::ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
+ bool EnterScope, bool OnDefinition) {
+ assert(LAs.parseSoon() &&
+ "Attribute list should be marked for immediate parsing.");
+ for (unsigned i = 0, ni = LAs.size(); i < ni; ++i) {
+ if (D)
+ LAs[i]->addDecl(D);
+ ParseLexedAttribute(*LAs[i], EnterScope, OnDefinition);
+ delete LAs[i];
+ }
+ LAs.clear();
+}
+
+/// Finish parsing an attribute for which parsing was delayed.
+/// This will be called at the end of parsing a class declaration
+/// for each LateParsedAttribute. We consume the saved tokens and
+/// create an attribute with the arguments filled in. We add this
+/// to the Attribute list for the decl.
+void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
+ bool EnterScope, bool OnDefinition) {
+ // Create a fake EOF so that attribute parsing won't go off the end of the
+ // attribute.
+ Token AttrEnd;
+ AttrEnd.startToken();
+ AttrEnd.setKind(tok::eof);
+ AttrEnd.setLocation(Tok.getLocation());
+ AttrEnd.setEofData(LA.Toks.data());
+ LA.Toks.push_back(AttrEnd);
+
+ // Append the current token at the end of the new token stream so that it
+ // doesn't get lost.
+ LA.Toks.push_back(Tok);
+ PP.EnterTokenStream(LA.Toks, true, /*IsReinject=*/true);
+ // Consume the previously pushed token.
+ ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
+
+ ParsedAttributes Attrs(AttrFactory);
+ SourceLocation endLoc;
+
+ if (LA.Decls.size() > 0) {
+ Decl *D = LA.Decls[0];
+ NamedDecl *ND = dyn_cast<NamedDecl>(D);
+ RecordDecl *RD = dyn_cast_or_null<RecordDecl>(D->getDeclContext());
+
+ // Allow 'this' within late-parsed attributes.
+ Sema::CXXThisScopeRAII ThisScope(Actions, RD, Qualifiers(),
+ ND && ND->isCXXInstanceMember());
+
+ if (LA.Decls.size() == 1) {
+ // If the Decl is templatized, add template parameters to scope.
+ ReenterTemplateScopeRAII InDeclScope(*this, D, EnterScope);
+
+ // If the Decl is on a function, add function parameters to the scope.
+ bool HasFunScope = EnterScope && D->isFunctionOrFunctionTemplate();
+ if (HasFunScope) {
+ InDeclScope.Scopes.Enter(Scope::FnScope | Scope::DeclScope |
+ Scope::CompoundStmtScope);
+ Actions.ActOnReenterFunctionContext(Actions.CurScope, D);
+ }
+
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
+ nullptr, SourceLocation(), ParsedAttr::AS_GNU,
+ nullptr);
+
+ if (HasFunScope)
+ Actions.ActOnExitFunctionContext();
+ } else {
+ // If there are multiple decls, then the decl cannot be within the
+ // function scope.
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
+ nullptr, SourceLocation(), ParsedAttr::AS_GNU,
+ nullptr);
+ }
+ } else {
+ Diag(Tok, diag::warn_attribute_no_decl) << LA.AttrName.getName();
}
- bool HasClassScope = !Class.TopLevelClass;
- ParseScope ClassScope(this, Scope::ClassScope | Scope::DeclScope,
- HasClassScope);
- for (LateParsedDeclaration *LPD : Class.LateParsedDeclarations)
- LPD->ParseLexedPragmas();
+ if (OnDefinition && !Attrs.empty() && !Attrs.begin()->isCXX11Attribute() &&
+ Attrs.begin()->isKnownToGCC())
+ Diag(Tok, diag::warn_attribute_on_function_definition)
+ << &LA.AttrName;
+
+ for (unsigned i = 0, ni = LA.Decls.size(); i < ni; ++i)
+ Actions.ActOnFinishDelayedAttribute(getCurScope(), LA.Decls[i], Attrs);
+
+ // Due to a parsing error, we either went over the cached tokens or
+ // there are still cached tokens left, so we skip the leftover tokens.
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ if (Tok.is(tok::eof) && Tok.getEofData() == AttrEnd.getEofData())
+ ConsumeAnyToken();
+}
+
+void Parser::ParseLexedPragmas(ParsingClass &Class) {
+ ReenterClassScopeRAII InClassScope(*this, Class);
+
+ for (LateParsedDeclaration *D : Class.LateParsedDeclarations)
+ D->ParseLexedPragmas();
}
void Parser::ParseLexedPragma(LateParsedPragma &LP) {
@@ -1114,17 +1197,14 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
break;
}
+ // Put the token stream back and undo any annotations we performed
+ // after the comma. They may reflect a different parse than the one
+ // we will actually perform at the end of the class.
+ PA.RevertAnnotations();
+
// If what follows could be a declaration, it is a declaration.
- if (Result != TPResult::False && Result != TPResult::Error) {
- PA.Revert();
+ if (Result != TPResult::False && Result != TPResult::Error)
return true;
- }
-
- // In the uncommon case that we decide the following tokens are part
- // of a template argument, revert any annotations we've performed in
- // those tokens. We're not going to look them up until we've parsed
- // the rest of the class, and that might add more declarations.
- PA.RevertAnnotations();
}
// Keep going. We know we're inside a template argument list now.
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
index 6353e14bc41a..c87d240a8206 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
@@ -1409,154 +1409,6 @@ void Parser::ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
Syntax);
}
-// Late Parsed Attributes:
-// See other examples of late parsing in lib/Parse/ParseCXXInlineMethods
-
-void Parser::LateParsedDeclaration::ParseLexedAttributes() {}
-
-void Parser::LateParsedClass::ParseLexedAttributes() {
- Self->ParseLexedAttributes(*Class);
-}
-
-void Parser::LateParsedAttribute::ParseLexedAttributes() {
- Self->ParseLexedAttribute(*this, true, false);
-}
-
-/// Wrapper class which calls ParseLexedAttribute, after setting up the
-/// scope appropriately.
-void Parser::ParseLexedAttributes(ParsingClass &Class) {
- // Deal with templates
- // FIXME: Test cases to make sure this does the right thing for templates.
- bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
- ParseScope ClassTemplateScope(this, Scope::TemplateParamScope,
- HasTemplateScope);
- if (HasTemplateScope)
- Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
-
- // Set or update the scope flags.
- bool AlreadyHasClassScope = Class.TopLevelClass;
- unsigned ScopeFlags = Scope::ClassScope|Scope::DeclScope;
- ParseScope ClassScope(this, ScopeFlags, !AlreadyHasClassScope);
- ParseScopeFlags ClassScopeFlags(this, ScopeFlags, AlreadyHasClassScope);
-
- // Enter the scope of nested classes
- if (!AlreadyHasClassScope)
- Actions.ActOnStartDelayedMemberDeclarations(getCurScope(),
- Class.TagOrTemplate);
- if (!Class.LateParsedDeclarations.empty()) {
- for (unsigned i = 0, ni = Class.LateParsedDeclarations.size(); i < ni; ++i){
- Class.LateParsedDeclarations[i]->ParseLexedAttributes();
- }
- }
-
- if (!AlreadyHasClassScope)
- Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(),
- Class.TagOrTemplate);
-}
-
-/// Parse all attributes in LAs, and attach them to Decl D.
-void Parser::ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
- bool EnterScope, bool OnDefinition) {
- assert(LAs.parseSoon() &&
- "Attribute list should be marked for immediate parsing.");
- for (unsigned i = 0, ni = LAs.size(); i < ni; ++i) {
- if (D)
- LAs[i]->addDecl(D);
- ParseLexedAttribute(*LAs[i], EnterScope, OnDefinition);
- delete LAs[i];
- }
- LAs.clear();
-}
-
-/// Finish parsing an attribute for which parsing was delayed.
-/// This will be called at the end of parsing a class declaration
-/// for each LateParsedAttribute. We consume the saved tokens and
-/// create an attribute with the arguments filled in. We add this
-/// to the Attribute list for the decl.
-void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
- bool EnterScope, bool OnDefinition) {
- // Create a fake EOF so that attribute parsing won't go off the end of the
- // attribute.
- Token AttrEnd;
- AttrEnd.startToken();
- AttrEnd.setKind(tok::eof);
- AttrEnd.setLocation(Tok.getLocation());
- AttrEnd.setEofData(LA.Toks.data());
- LA.Toks.push_back(AttrEnd);
-
- // Append the current token at the end of the new token stream so that it
- // doesn't get lost.
- LA.Toks.push_back(Tok);
- PP.EnterTokenStream(LA.Toks, true, /*IsReinject=*/true);
- // Consume the previously pushed token.
- ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
-
- ParsedAttributes Attrs(AttrFactory);
- SourceLocation endLoc;
-
- if (LA.Decls.size() > 0) {
- Decl *D = LA.Decls[0];
- NamedDecl *ND = dyn_cast<NamedDecl>(D);
- RecordDecl *RD = dyn_cast_or_null<RecordDecl>(D->getDeclContext());
-
- // Allow 'this' within late-parsed attributes.
- Sema::CXXThisScopeRAII ThisScope(Actions, RD, Qualifiers(),
- ND && ND->isCXXInstanceMember());
-
- if (LA.Decls.size() == 1) {
- // If the Decl is templatized, add template parameters to scope.
- bool HasTemplateScope = EnterScope && D->isTemplateDecl();
- ParseScope TempScope(this, Scope::TemplateParamScope, HasTemplateScope);
- if (HasTemplateScope)
- Actions.ActOnReenterTemplateScope(Actions.CurScope, D);
-
- // If the Decl is on a function, add function parameters to the scope.
- bool HasFunScope = EnterScope && D->isFunctionOrFunctionTemplate();
- ParseScope FnScope(
- this, Scope::FnScope | Scope::DeclScope | Scope::CompoundStmtScope,
- HasFunScope);
- if (HasFunScope)
- Actions.ActOnReenterFunctionContext(Actions.CurScope, D);
-
- ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
- nullptr, SourceLocation(), ParsedAttr::AS_GNU,
- nullptr);
-
- if (HasFunScope) {
- Actions.ActOnExitFunctionContext();
- FnScope.Exit(); // Pop scope, and remove Decls from IdResolver
- }
- if (HasTemplateScope) {
- TempScope.Exit();
- }
- } else {
- // If there are multiple decls, then the decl cannot be within the
- // function scope.
- ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
- nullptr, SourceLocation(), ParsedAttr::AS_GNU,
- nullptr);
- }
- } else {
- Diag(Tok, diag::warn_attribute_no_decl) << LA.AttrName.getName();
- }
-
- if (OnDefinition && !Attrs.empty() && !Attrs.begin()->isCXX11Attribute() &&
- Attrs.begin()->isKnownToGCC())
- Diag(Tok, diag::warn_attribute_on_function_definition)
- << &LA.AttrName;
-
- for (unsigned i = 0, ni = LA.Decls.size(); i < ni; ++i)
- Actions.ActOnFinishDelayedAttribute(getCurScope(), LA.Decls[i], Attrs);
-
- // Due to a parsing error, we either went over the cached tokens or
- // there are still cached tokens left, so we skip the leftover tokens.
- while (Tok.isNot(tok::eof))
- ConsumeAnyToken();
-
- if (Tok.is(tok::eof) && Tok.getEofData() == AttrEnd.getEofData())
- ConsumeAnyToken();
-}
-
void Parser::ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
@@ -2046,46 +1898,52 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
}
// Check to see if we have a function *definition* which must have a body.
- if (D.isFunctionDeclarator() &&
- // Look at the next token to make sure that this isn't a function
- // declaration. We have to check this because __attribute__ might be the
- // start of a function definition in GCC-extended K&R C.
- !isDeclarationAfterDeclarator()) {
-
- // Function definitions are only allowed at file scope and in C++ classes.
- // The C++ inline method definition case is handled elsewhere, so we only
- // need to handle the file scope definition case.
- if (Context == DeclaratorContext::FileContext) {
- if (isStartOfFunctionDefinition(D)) {
- if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
- Diag(Tok, diag::err_function_declared_typedef);
-
- // Recover by treating the 'typedef' as spurious.
- DS.ClearStorageClassSpecs();
- }
+ if (D.isFunctionDeclarator()) {
+ if (Tok.is(tok::equal) && NextToken().is(tok::code_completion)) {
+ Actions.CodeCompleteAfterFunctionEquals(D);
+ cutOffParsing();
+ return nullptr;
+ }
+ // Look at the next token to make sure that this isn't a function
+ // declaration. We have to check this because __attribute__ might be the
+ // start of a function definition in GCC-extended K&R C.
+ if (!isDeclarationAfterDeclarator()) {
+
+ // Function definitions are only allowed at file scope and in C++ classes.
+ // The C++ inline method definition case is handled elsewhere, so we only
+ // need to handle the file scope definition case.
+ if (Context == DeclaratorContext::FileContext) {
+ if (isStartOfFunctionDefinition(D)) {
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ Diag(Tok, diag::err_function_declared_typedef);
+
+ // Recover by treating the 'typedef' as spurious.
+ DS.ClearStorageClassSpecs();
+ }
- Decl *TheDecl =
- ParseFunctionDefinition(D, ParsedTemplateInfo(), &LateParsedAttrs);
- return Actions.ConvertDeclToDeclGroup(TheDecl);
- }
+ Decl *TheDecl = ParseFunctionDefinition(D, ParsedTemplateInfo(),
+ &LateParsedAttrs);
+ return Actions.ConvertDeclToDeclGroup(TheDecl);
+ }
- if (isDeclarationSpecifier()) {
- // If there is an invalid declaration specifier right after the
- // function prototype, then we must be in a missing semicolon case
- // where this isn't actually a body. Just fall through into the code
- // that handles it as a prototype, and let the top-level code handle
- // the erroneous declspec where it would otherwise expect a comma or
- // semicolon.
+ if (isDeclarationSpecifier()) {
+ // If there is an invalid declaration specifier right after the
+ // function prototype, then we must be in a missing semicolon case
+ // where this isn't actually a body. Just fall through into the code
+ // that handles it as a prototype, and let the top-level code handle
+ // the erroneous declspec where it would otherwise expect a comma or
+ // semicolon.
+ } else {
+ Diag(Tok, diag::err_expected_fn_body);
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
} else {
- Diag(Tok, diag::err_expected_fn_body);
- SkipUntil(tok::semi);
- return nullptr;
- }
- } else {
- if (Tok.is(tok::l_brace)) {
- Diag(Tok, diag::err_function_definition_not_allowed);
- SkipMalformedDecl();
- return nullptr;
+ if (Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::err_function_definition_not_allowed);
+ SkipMalformedDecl();
+ return nullptr;
+ }
}
}
}
@@ -2359,7 +2217,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
<< 0 /* default */;
else
Diag(ConsumeToken(), diag::err_default_special_members)
- << getLangOpts().CPlusPlus2a;
+ << getLangOpts().CPlusPlus20;
} else {
InitializerScopeRAII InitScope(*this, D, ThisDecl);
@@ -2460,6 +2318,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
InitializerScopeRAII InitScope(*this, D, ThisDecl);
+ PreferredType.enterVariableInit(Tok.getLocation(), ThisDecl);
ExprResult Init(ParseBraceInitializer());
InitScope.pop();
@@ -2741,7 +2600,7 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
default:
// This is probably supposed to be a type. This includes cases like:
// int f(itn);
- // struct S { unsinged : 4; };
+ // struct S { unsigned : 4; };
break;
}
}
@@ -2879,6 +2738,25 @@ void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
ParsedAttr::AS_Keyword, EllipsisLoc);
}
+ExprResult Parser::ParseExtIntegerArgument() {
+ assert(Tok.is(tok::kw__ExtInt) && "Not an extended int type");
+ ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume())
+ return ExprError();
+
+ ExprResult ER = ParseConstantExpression();
+ if (ER.isInvalid()) {
+ T.skipToEnd();
+ return ExprError();
+ }
+
+ if(T.consumeClose())
+ return ExprError();
+ return ER;
+}
+
/// Determine whether we're looking at something that might be a declarator
/// in a simple-declaration. If it can't possibly be a declarator, maybe
/// diagnose a missing semicolon after a prior tag definition in the decl
@@ -3161,9 +3039,19 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// We are looking for a qualified typename.
Token Next = NextToken();
- if (Next.is(tok::annot_template_id) &&
- static_cast<TemplateIdAnnotation *>(Next.getAnnotationValue())
- ->Kind == TNK_Type_template) {
+
+ TemplateIdAnnotation *TemplateId = Next.is(tok::annot_template_id)
+ ? takeTemplateIdAnnotation(Next)
+ : nullptr;
+ if (TemplateId && TemplateId->hasInvalidName()) {
+ // We found something like 'T::U<Args> x', but U is not a template.
+ // Assume it was supposed to be a type.
+ DS.SetTypeSpecError();
+ ConsumeAnnotationToken();
+ break;
+ }
+
+ if (TemplateId && TemplateId->Kind == TNK_Type_template) {
// We have a qualified template-id, e.g., N::A<int>
// If this would be a valid constructor declaration with template
@@ -3173,7 +3061,6 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
//
// To improve diagnostics for this case, parse the declaration as a
// constructor (and reject the extra template arguments later).
- TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Next);
if ((DSContext == DeclSpecContext::DSC_top_level ||
DSContext == DeclSpecContext::DSC_class) &&
TemplateId->Name &&
@@ -3194,9 +3081,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
continue;
}
- if (Next.is(tok::annot_template_id) &&
- static_cast<TemplateIdAnnotation *>(Next.getAnnotationValue())
- ->Kind == TNK_Concept_template &&
+ if (TemplateId && TemplateId->Kind == TNK_Concept_template &&
GetLookAheadToken(2).isOneOf(tok::kw_auto, tok::kw_decltype)) {
DS.getTypeSpecScope() = SS;
// This is a qualified placeholder-specifier, e.g., ::C<int> auto ...
@@ -3209,16 +3094,12 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
if (Next.is(tok::annot_typename)) {
DS.getTypeSpecScope() = SS;
ConsumeAnnotationToken(); // The C++ scope.
- if (Tok.getAnnotationValue()) {
- ParsedType T = getTypeAnnotation(Tok);
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename,
- Tok.getAnnotationEndLoc(),
- PrevSpec, DiagID, T, Policy);
- if (isInvalid)
- break;
- }
- else
- DS.SetTypeSpecError();
+ TypeResult T = getTypeAnnotation(Tok);
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename,
+ Tok.getAnnotationEndLoc(),
+ PrevSpec, DiagID, T, Policy);
+ if (isInvalid)
+ break;
DS.SetRangeEnd(Tok.getAnnotationEndLoc());
ConsumeAnnotationToken(); // The typename
}
@@ -3250,10 +3131,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
if (!TypeRep) {
if (TryAnnotateTypeConstraint())
goto DoneWithDeclSpec;
- if (isTypeConstraintAnnotation())
- continue;
- if (NextToken().is(tok::annot_template_id))
- // Might have been annotated by TryAnnotateTypeConstraint.
+ if (Tok.isNot(tok::annot_cxxscope) ||
+ NextToken().isNot(tok::identifier))
continue;
// Eat the scope spec so the identifier is current.
ConsumeAnnotationToken();
@@ -3288,13 +3167,9 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
if (DS.hasTypeSpecifier() && DS.hasTagDefinition())
goto DoneWithDeclSpec;
- if (Tok.getAnnotationValue()) {
- ParsedType T = getTypeAnnotation(Tok);
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
- DiagID, T, Policy);
- } else
- DS.SetTypeSpecError();
-
+ TypeResult T = getTypeAnnotation(Tok);
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
+ DiagID, T, Policy);
if (isInvalid)
break;
@@ -3406,10 +3281,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
if (!TypeRep) {
if (TryAnnotateTypeConstraint())
goto DoneWithDeclSpec;
- if (isTypeConstraintAnnotation())
- continue;
- if (Tok.is(tok::annot_template_id))
- // Might have been annotated by TryAnnotateTypeConstraint.
+ if (Tok.isNot(tok::identifier))
continue;
ParsedAttributesWithRange Attrs(AttrFactory);
if (ParseImplicitInt(DS, nullptr, TemplateInfo, AS, DSContext, Attrs)) {
@@ -3463,7 +3335,18 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// type-name or placeholder-specifier
case tok::annot_template_id: {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+
+ if (TemplateId->hasInvalidName()) {
+ DS.SetTypeSpecError();
+ break;
+ }
+
if (TemplateId->Kind == TNK_Concept_template) {
+ // If we've already diagnosed that this type-constraint has invalid
+ // arguemnts, drop it and just form 'auto' or 'decltype(auto)'.
+ if (TemplateId->hasInvalidArgs())
+ TemplateId = nullptr;
+
if (NextToken().is(tok::identifier)) {
Diag(Loc, diag::err_placeholder_expected_auto_or_decltype_auto)
<< FixItHint::CreateInsertion(NextToken().getLocation(), "auto");
@@ -3687,8 +3570,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
ConsumedEnd = ExplicitLoc;
ConsumeToken(); // kw_explicit
if (Tok.is(tok::l_paren)) {
- if (getLangOpts().CPlusPlus2a || isExplicitBool() == TPResult::True) {
- Diag(Tok.getLocation(), getLangOpts().CPlusPlus2a
+ if (getLangOpts().CPlusPlus20 || isExplicitBool() == TPResult::True) {
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_explicit_bool
: diag::ext_explicit_bool);
@@ -3705,7 +3588,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
} else
Tracker.skipToEnd();
} else {
- Diag(Tok.getLocation(), diag::warn_cxx2a_compat_explicit_bool);
+ Diag(Tok.getLocation(), diag::warn_cxx20_compat_explicit_bool);
}
}
isInvalid = DS.setFunctionSpecExplicit(ExplicitLoc, PrevSpec, DiagID,
@@ -3801,6 +3684,14 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, PrevSpec,
DiagID, Policy);
break;
+ case tok::kw__ExtInt: {
+ ExprResult ER = ParseExtIntegerArgument();
+ if (ER.isInvalid())
+ continue;
+ isInvalid = DS.SetExtIntType(Loc, ER.get(), PrevSpec, DiagID, Policy);
+ ConsumedEnd = PrevTokLocation;
+ break;
+ }
case tok::kw___int128:
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_int128, Loc, PrevSpec,
DiagID, Policy);
@@ -3809,6 +3700,10 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_half, Loc, PrevSpec,
DiagID, Policy);
break;
+ case tok::kw___bf16:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_BFloat16, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
case tok::kw_float:
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_float, Loc, PrevSpec,
DiagID, Policy);
@@ -4212,7 +4107,7 @@ void Parser::ParseStructDeclaration(
/// [OBC] '@' 'defs' '(' class-name ')'
///
void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
- DeclSpec::TST TagType, Decl *TagDecl) {
+ DeclSpec::TST TagType, RecordDecl *TagDecl) {
PrettyDeclStackTraceEntry CrashInfo(Actions.Context, TagDecl, RecordLoc,
"parsing struct/union body");
assert(!getLangOpts().CPlusPlus && "C++ declarations not supported");
@@ -4224,8 +4119,6 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
ParseScope StructScope(this, Scope::ClassScope|Scope::DeclScope);
Actions.ActOnTagStartDefinition(getCurScope(), TagDecl);
- SmallVector<Decl *, 32> FieldDecls;
-
// While we still have something to read, read the declarations in the struct.
while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
Tok.isNot(tok::eof)) {
@@ -4277,7 +4170,6 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
Actions.ActOnField(getCurScope(), TagDecl,
FD.D.getDeclSpec().getSourceRange().getBegin(),
FD.D, FD.BitfieldSize);
- FieldDecls.push_back(Field);
FD.complete(Field);
};
@@ -4301,7 +4193,6 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
SmallVector<Decl *, 16> Fields;
Actions.ActOnDefs(getCurScope(), TagDecl, Tok.getLocation(),
Tok.getIdentifierInfo(), Fields);
- FieldDecls.insert(FieldDecls.end(), Fields.begin(), Fields.end());
ConsumeToken();
ExpectAndConsume(tok::r_paren);
}
@@ -4327,6 +4218,9 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
// If attributes exist after struct contents, parse them.
MaybeParseGNUAttributes(attrs);
+ SmallVector<Decl *, 32> FieldDecls(TagDecl->field_begin(),
+ TagDecl->field_end());
+
Actions.ActOnFields(getCurScope(), RecordLoc, TagDecl, FieldDecls,
T.getOpenLocation(), T.getCloseLocation(), attrs);
StructScope.Exit();
@@ -4361,7 +4255,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
/// ':' type-specifier-seq
///
/// [C++] elaborated-type-specifier:
-/// [C++] 'enum' '::'[opt] nested-name-specifier[opt] identifier
+/// [C++] 'enum' nested-name-specifier[opt] identifier
///
void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
@@ -4410,17 +4304,24 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
SuppressAccessChecks diagsFromTag(*this, shouldDelayDiagsInTag);
- // Enum definitions should not be parsed in a trailing-return-type.
- bool AllowDeclaration = DSC != DeclSpecContext::DSC_trailing;
+ // Determine whether this declaration is permitted to have an enum-base.
+ AllowDefiningTypeSpec AllowEnumSpecifier =
+ isDefiningTypeSpecifierContext(DSC);
+ bool CanBeOpaqueEnumDeclaration =
+ DS.isEmpty() && isOpaqueEnumDeclarationContext(DSC);
+ bool CanHaveEnumBase = (getLangOpts().CPlusPlus11 || getLangOpts().ObjC ||
+ getLangOpts().MicrosoftExt) &&
+ (AllowEnumSpecifier == AllowDefiningTypeSpec::Yes ||
+ CanBeOpaqueEnumDeclaration);
CXXScopeSpec &SS = DS.getTypeSpecScope();
if (getLangOpts().CPlusPlus) {
- // "enum foo : bar;" is not a potential typo for "enum foo::bar;"
- // if a fixed underlying type is allowed.
- ColonProtectionRAIIObject X(*this, AllowDeclaration);
+ // "enum foo : bar;" is not a potential typo for "enum foo::bar;".
+ ColonProtectionRAIIObject X(*this);
CXXScopeSpec Spec;
- if (ParseOptionalCXXScopeSpecifier(Spec, nullptr,
+ if (ParseOptionalCXXScopeSpecifier(Spec, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/true))
return;
@@ -4437,9 +4338,9 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
SS = Spec;
}
- // Must have either 'enum name' or 'enum {...}'.
+ // Must have either 'enum name' or 'enum {...}' or (rarely) 'enum : T { ... }'.
if (Tok.isNot(tok::identifier) && Tok.isNot(tok::l_brace) &&
- !(AllowDeclaration && Tok.is(tok::colon))) {
+ Tok.isNot(tok::colon)) {
Diag(Tok, diag::err_expected_either) << tok::identifier << tok::l_brace;
// Skip the rest of this declarator, up until the comma or semicolon.
@@ -4469,78 +4370,69 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
diagsFromTag.done();
TypeResult BaseType;
+ SourceRange BaseRange;
- // Parse the fixed underlying type.
- bool CanBeBitfield = getCurScope()->getFlags() & Scope::ClassScope;
- if (AllowDeclaration && Tok.is(tok::colon)) {
- bool PossibleBitfield = false;
- if (CanBeBitfield) {
- // If we're in class scope, this can either be an enum declaration with
- // an underlying type, or a declaration of a bitfield member. We try to
- // use a simple disambiguation scheme first to catch the common cases
- // (integer literal, sizeof); if it's still ambiguous, we then consider
- // anything that's a simple-type-specifier followed by '(' as an
- // expression. This suffices because function types are not valid
- // underlying types anyway.
- EnterExpressionEvaluationContext Unevaluated(
- Actions, Sema::ExpressionEvaluationContext::ConstantEvaluated);
- TPResult TPR = isExpressionOrTypeSpecifierSimple(NextToken().getKind());
- // If the next token starts an expression, we know we're parsing a
- // bit-field. This is the common case.
- if (TPR == TPResult::True)
- PossibleBitfield = true;
- // If the next token starts a type-specifier-seq, it may be either a
- // a fixed underlying type or the start of a function-style cast in C++;
- // lookahead one more token to see if it's obvious that we have a
- // fixed underlying type.
- else if (TPR == TPResult::False &&
- GetLookAheadToken(2).getKind() == tok::semi) {
- // Consume the ':'.
- ConsumeToken();
- } else {
- // We have the start of a type-specifier-seq, so we have to perform
- // tentative parsing to determine whether we have an expression or a
- // type.
- TentativeParsingAction TPA(*this);
-
- // Consume the ':'.
- ConsumeToken();
+ bool CanBeBitfield = (getCurScope()->getFlags() & Scope::ClassScope) &&
+ ScopedEnumKWLoc.isInvalid() && Name;
- // If we see a type specifier followed by an open-brace, we have an
- // ambiguity between an underlying type and a C++11 braced
- // function-style cast. Resolve this by always treating it as an
- // underlying type.
- // FIXME: The standard is not entirely clear on how to disambiguate in
- // this case.
- if ((getLangOpts().CPlusPlus &&
- isCXXDeclarationSpecifier(TPResult::True) != TPResult::True) ||
- (!getLangOpts().CPlusPlus && !isDeclarationSpecifier(true))) {
- // We'll parse this as a bitfield later.
- PossibleBitfield = true;
- TPA.Revert();
- } else {
- // We have a type-specifier-seq.
- TPA.Commit();
- }
- }
- } else {
- // Consume the ':'.
- ConsumeToken();
- }
+ // Parse the fixed underlying type.
+ if (Tok.is(tok::colon)) {
+ // This might be an enum-base or part of some unrelated enclosing context.
+ //
+ // 'enum E : base' is permitted in two circumstances:
+ //
+ // 1) As a defining-type-specifier, when followed by '{'.
+ // 2) As the sole constituent of a complete declaration -- when DS is empty
+ // and the next token is ';'.
+ //
+ // The restriction to defining-type-specifiers is important to allow parsing
+ // a ? new enum E : int{}
+ // _Generic(a, enum E : int{})
+ // properly.
+ //
+ // One additional consideration applies:
+ //
+ // C++ [dcl.enum]p1:
+ // A ':' following "enum nested-name-specifier[opt] identifier" within
+ // the decl-specifier-seq of a member-declaration is parsed as part of
+ // an enum-base.
+ //
+ // Other language modes supporting enumerations with fixed underlying types
+ // do not have clear rules on this, so we disambiguate to determine whether
+ // the tokens form a bit-field width or an enum-base.
+
+ if (CanBeBitfield && !isEnumBase(CanBeOpaqueEnumDeclaration)) {
+ // Outside C++11, do not interpret the tokens as an enum-base if they do
+ // not make sense as one. In C++11, it's an error if this happens.
+ if (getLangOpts().CPlusPlus11)
+ Diag(Tok.getLocation(), diag::err_anonymous_enum_bitfield);
+ } else if (CanHaveEnumBase || !ColonIsSacred) {
+ SourceLocation ColonLoc = ConsumeToken();
+
+ // Parse a type-specifier-seq as a type. We can't just ParseTypeName here,
+ // because under -fms-extensions,
+ // enum E : int *p;
+ // declares 'enum E : int; E *p;' not 'enum E : int*; E p;'.
+ DeclSpec DS(AttrFactory);
+ ParseSpecifierQualifierList(DS, AS, DeclSpecContext::DSC_type_specifier);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
+ BaseType = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
- if (!PossibleBitfield) {
- SourceRange Range;
- BaseType = ParseTypeName(&Range);
+ BaseRange = SourceRange(ColonLoc, DeclaratorInfo.getSourceRange().getEnd());
if (!getLangOpts().ObjC) {
if (getLangOpts().CPlusPlus11)
- Diag(StartLoc, diag::warn_cxx98_compat_enum_fixed_underlying_type);
+ Diag(ColonLoc, diag::warn_cxx98_compat_enum_fixed_underlying_type)
+ << BaseRange;
else if (getLangOpts().CPlusPlus)
- Diag(StartLoc, diag::ext_cxx11_enum_fixed_underlying_type);
+ Diag(ColonLoc, diag::ext_cxx11_enum_fixed_underlying_type)
+ << BaseRange;
else if (getLangOpts().MicrosoftExt)
- Diag(StartLoc, diag::ext_ms_c_enum_fixed_underlying_type);
+ Diag(ColonLoc, diag::ext_ms_c_enum_fixed_underlying_type)
+ << BaseRange;
else
- Diag(StartLoc, diag::ext_clang_c_enum_fixed_underlying_type);
+ Diag(ColonLoc, diag::ext_clang_c_enum_fixed_underlying_type)
+ << BaseRange;
}
}
}
@@ -4556,14 +4448,19 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// enum foo {..}; void bar() { enum foo x; } <- use of old foo.
//
Sema::TagUseKind TUK;
- if (!AllowDeclaration) {
+ if (AllowEnumSpecifier == AllowDefiningTypeSpec::No)
TUK = Sema::TUK_Reference;
- } else if (Tok.is(tok::l_brace)) {
+ else if (Tok.is(tok::l_brace)) {
if (DS.isFriendSpecified()) {
Diag(Tok.getLocation(), diag::err_friend_decl_defines_type)
<< SourceRange(DS.getFriendSpecLoc());
ConsumeBrace();
SkipUntil(tok::r_brace, StopAtSemi);
+ // Discard any other definition-only pieces.
+ attrs.clear();
+ ScopedEnumKWLoc = SourceLocation();
+ IsScopedUsingClassTag = false;
+ BaseType = TypeResult();
TUK = Sema::TUK_Friend;
} else {
TUK = Sema::TUK_Definition;
@@ -4572,6 +4469,9 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
(Tok.is(tok::semi) ||
(Tok.isAtStartOfLine() &&
!isValidAfterTypeSpecifier(CanBeBitfield)))) {
+ // An opaque-enum-declaration is required to be standalone (no preceding or
+ // following tokens in the declaration). Sema enforces this separately by
+ // diagnosing anything else in the DeclSpec.
TUK = DS.isFriendSpecified() ? Sema::TUK_Friend : Sema::TUK_Declaration;
if (Tok.isNot(tok::semi)) {
// A semicolon was missing after this declaration. Diagnose and recover.
@@ -4583,8 +4483,11 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
TUK = Sema::TUK_Reference;
}
- // If this is an elaborated type specifier, and we delayed
- // diagnostics before, just merge them into the current pool.
+ bool IsElaboratedTypeSpecifier =
+ TUK == Sema::TUK_Reference || TUK == Sema::TUK_Friend;
+
+ // If this is an elaborated type specifier nested in a larger declaration,
+ // and we delayed diagnostics before, just merge them into the current pool.
if (TUK == Sema::TUK_Reference && shouldDelayDiagsInTag) {
diagsFromTag.redelay();
}
@@ -4611,9 +4514,6 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
TemplateInfo.TemplateParams->size());
}
- if (TUK == Sema::TUK_Reference)
- ProhibitAttributes(attrs);
-
if (!Name && TUK != Sema::TUK_Definition) {
Diag(Tok, diag::err_enumerator_unnamed_no_def);
@@ -4622,6 +4522,25 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
return;
}
+ // An elaborated-type-specifier has a much more constrained grammar:
+ //
+ // 'enum' nested-name-specifier[opt] identifier
+ //
+ // If we parsed any other bits, reject them now.
+ //
+ // MSVC and (for now at least) Objective-C permit a full enum-specifier
+ // or opaque-enum-declaration anywhere.
+ if (IsElaboratedTypeSpecifier && !getLangOpts().MicrosoftExt &&
+ !getLangOpts().ObjC) {
+ ProhibitAttributes(attrs);
+ if (BaseType.isUsable())
+ Diag(BaseRange.getBegin(), diag::ext_enum_base_in_type_specifier)
+ << (AllowEnumSpecifier == AllowDefiningTypeSpec::Yes) << BaseRange;
+ else if (ScopedEnumKWLoc.isValid())
+ Diag(ScopedEnumKWLoc, diag::ext_elaborated_enum_class)
+ << FixItHint::CreateRemoval(ScopedEnumKWLoc) << IsScopedUsingClassTag;
+ }
+
stripTypeAttributesOffDeclSpec(attrs, DS, TUK);
Sema::SkipBodyInfo SkipBody;
@@ -4696,7 +4615,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
return;
}
- if (Tok.is(tok::l_brace) && TUK != Sema::TUK_Reference) {
+ if (Tok.is(tok::l_brace) && TUK == Sema::TUK_Definition) {
Decl *D = SkipBody.CheckSameAsPrevious ? SkipBody.New : TagDecl;
ParseEnumBody(StartLoc, D);
if (SkipBody.CheckSameAsPrevious &&
@@ -4883,6 +4802,8 @@ bool Parser::isKnownToBeTypeSpecifier(const Token &Tok) const {
case tok::kw_char16_t:
case tok::kw_char32_t:
case tok::kw_int:
+ case tok::kw__ExtInt:
+ case tok::kw___bf16:
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
@@ -4962,7 +4883,9 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::kw_char16_t:
case tok::kw_char32_t:
case tok::kw_int:
+ case tok::kw__ExtInt:
case tok::kw_half:
+ case tok::kw___bf16:
case tok::kw_float:
case tok::kw_double:
case tok::kw__Accum:
@@ -5128,7 +5051,9 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw_char32_t:
case tok::kw_int:
+ case tok::kw__ExtInt:
case tok::kw_half:
+ case tok::kw___bf16:
case tok::kw_float:
case tok::kw_double:
case tok::kw__Accum:
@@ -5200,14 +5125,30 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
// placeholder-type-specifier
case tok::annot_template_id: {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ if (TemplateId->hasInvalidName())
+ return true;
+ // FIXME: What about type templates that have only been annotated as
+ // annot_template_id, not as annot_typename?
return isTypeConstraintAnnotation() &&
- (NextToken().is(tok::kw_auto) || NextToken().is(tok::kw_decltype));
+ (NextToken().is(tok::kw_auto) || NextToken().is(tok::kw_decltype));
}
- case tok::annot_cxxscope:
+
+ case tok::annot_cxxscope: {
+ TemplateIdAnnotation *TemplateId =
+ NextToken().is(tok::annot_template_id)
+ ? takeTemplateIdAnnotation(NextToken())
+ : nullptr;
+ if (TemplateId && TemplateId->hasInvalidName())
+ return true;
+ // FIXME: What about type templates that have only been annotated as
+ // annot_template_id, not as annot_typename?
if (NextToken().is(tok::identifier) && TryAnnotateTypeConstraint())
return true;
return isTypeConstraintAnnotation() &&
GetLookAheadToken(2).isOneOf(tok::kw_auto, tok::kw_decltype);
+ }
+
case tok::kw___declspec:
case tok::kw___cdecl:
case tok::kw___stdcall:
@@ -5253,7 +5194,8 @@ bool Parser::isConstructorDeclarator(bool IsUnqualified, bool DeductionGuide) {
// Parse the C++ scope specifier.
CXXScopeSpec SS;
- if (ParseOptionalCXXScopeSpecifier(SS, nullptr,
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/true)) {
TPA.Revert();
return false;
@@ -5633,7 +5575,8 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
D.getContext() == DeclaratorContext::FileContext ||
D.getContext() == DeclaratorContext::MemberContext;
CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, nullptr, EnteringContext);
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, EnteringContext);
if (SS.isNotEmpty()) {
if (Tok.isNot(tok::star)) {
@@ -5648,8 +5591,8 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
return;
}
- SourceLocation Loc = ConsumeToken();
- D.SetRangeEnd(Loc);
+ SourceLocation StarLoc = ConsumeToken();
+ D.SetRangeEnd(StarLoc);
DeclSpec DS(AttrFactory);
ParseTypeQualifierListOpt(DS);
D.ExtendWithDeclSpec(DS);
@@ -5660,7 +5603,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
// Sema will have to catch (syntactically invalid) pointers into global
// scope. It has to catch pointers into namespace scope anyway.
D.AddTypeInfo(DeclaratorChunk::getMemberPointer(
- SS, DS.getTypeQualifiers(), DS.getEndLoc()),
+ SS, DS.getTypeQualifiers(), StarLoc, DS.getEndLoc()),
std::move(DS.getAttributes()),
/* Don't replace range end. */ SourceLocation());
return;
@@ -5856,8 +5799,9 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
bool EnteringContext =
D.getContext() == DeclaratorContext::FileContext ||
D.getContext() == DeclaratorContext::MemberContext;
- ParseOptionalCXXScopeSpecifier(D.getCXXScopeSpec(), nullptr,
- EnteringContext);
+ ParseOptionalCXXScopeSpecifier(
+ D.getCXXScopeSpec(), /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, EnteringContext);
}
if (D.getCXXScopeSpec().isValid()) {
@@ -5931,10 +5875,11 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
bool HadScope = D.getCXXScopeSpec().isValid();
if (ParseUnqualifiedId(D.getCXXScopeSpec(),
+ /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/true,
/*AllowDestructorName=*/true, AllowConstructorName,
- AllowDeductionGuide, nullptr, nullptr,
- D.getName()) ||
+ AllowDeductionGuide, nullptr, D.getName()) ||
// Once we're past the identifier, if the scope was bad, mark the
// whole declarator bad.
D.getCXXScopeSpec().isInvalid()) {
@@ -6788,6 +6733,31 @@ void Parser::ParseParameterDeclarationClause(
Actions.containsUnexpandedParameterPacks(ParmDeclarator))
DiagnoseMisplacedEllipsisInDeclarator(ConsumeToken(), ParmDeclarator);
+ // Now we are at the point where declarator parsing is finished.
+ //
+ // Try to catch keywords in place of the identifier in a declarator, and
+ // in particular the common case where:
+ // 1 identifier comes at the end of the declarator
+ // 2 if the identifier is dropped, the declarator is valid but anonymous
+ // (no identifier)
+ // 3 declarator parsing succeeds, and then we have a trailing keyword,
+ // which is never valid in a param list (e.g. missing a ',')
+ // And we can't handle this in ParseDeclarator because in general keywords
+ // may be allowed to follow the declarator. (And in some cases there'd be
+ // better recovery like inserting punctuation). ParseDeclarator is just
+ // treating this as an anonymous parameter, and fortunately at this point
+ // we've already almost done that.
+ //
+ // We care about case 1) where the declarator type should be known, and
+ // the identifier should be null.
+ if (!ParmDeclarator.isInvalidType() && !ParmDeclarator.hasName()) {
+ if (Tok.getIdentifierInfo() &&
+ Tok.getIdentifierInfo()->isKeyword(getLangOpts())) {
+ Diag(Tok, diag::err_keyword_as_parameter) << PP.getSpelling(Tok);
+ // Consume the keyword.
+ ConsumeToken();
+ }
+ }
// Inform the actions module about the parameter declarator, so it gets
// added to the current scope.
Decl *Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDeclarator);
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
index 09e5c7996fcd..ddcbb5615fee 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
@@ -155,7 +155,7 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
// Normal namespace definition, not a nested-namespace-definition.
} else if (InlineLoc.isValid()) {
Diag(InlineLoc, diag::err_inline_nested_namespace_definition);
- } else if (getLangOpts().CPlusPlus2a) {
+ } else if (getLangOpts().CPlusPlus20) {
Diag(ExtraNSs[0].NamespaceLoc,
diag::warn_cxx14_compat_nested_namespace_definition);
if (FirstNestedInlineLoc.isValid())
@@ -290,7 +290,9 @@ Decl *Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
CXXScopeSpec SS;
// Parse (optional) nested-name-specifier.
- ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext=*/false,
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false,
/*MayBePseudoDestructor=*/nullptr,
/*IsTypename=*/false,
/*LastII=*/nullptr,
@@ -530,7 +532,9 @@ Decl *Parser::ParseUsingDirective(DeclaratorContext Context,
CXXScopeSpec SS;
// Parse (optional) nested-name-specifier.
- ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext=*/false,
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false,
/*MayBePseudoDestructor=*/nullptr,
/*IsTypename=*/false,
/*LastII=*/nullptr,
@@ -597,7 +601,9 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
// Parse nested-name-specifier.
IdentifierInfo *LastII = nullptr;
- if (ParseOptionalCXXScopeSpecifier(D.SS, nullptr, /*EnteringContext=*/false,
+ if (ParseOptionalCXXScopeSpecifier(D.SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false,
/*MayBePseudoDtor=*/nullptr,
/*IsTypename=*/false,
/*LastII=*/&LastII,
@@ -632,12 +638,12 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
D.Name.setConstructorName(Type, IdLoc, IdLoc);
} else {
if (ParseUnqualifiedId(
- D.SS, /*EnteringContext=*/false,
+ D.SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, /*EnteringContext=*/false,
/*AllowDestructorName=*/true,
- /*AllowConstructorName=*/!(Tok.is(tok::identifier) &&
- NextToken().is(tok::equal)),
- /*AllowDeductionGuide=*/false,
- nullptr, nullptr, D.Name))
+ /*AllowConstructorName=*/
+ !(Tok.is(tok::identifier) && NextToken().is(tok::equal)),
+ /*AllowDeductionGuide=*/false, nullptr, D.Name))
return true;
}
@@ -978,10 +984,10 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
EnterExpressionEvaluationContext Unevaluated(
Actions, Sema::ExpressionEvaluationContext::Unevaluated, nullptr,
Sema::ExpressionEvaluationContextRecord::EK_Decltype);
- Result =
- Actions.CorrectDelayedTyposInExpr(ParseExpression(), [](Expr *E) {
- return E->hasPlaceholderType() ? ExprError() : E;
- });
+ Result = Actions.CorrectDelayedTyposInExpr(
+ ParseExpression(), /*InitDecl=*/nullptr,
+ /*RecoverUncorrectedTypos=*/false,
+ [](Expr *E) { return E->hasPlaceholderType() ? ExprError() : E; });
if (Result.isInvalid()) {
DS.SetTypeSpecError();
if (SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch)) {
@@ -1115,7 +1121,9 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
// Parse optional nested-name-specifier
CXXScopeSpec SS;
- if (ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext=*/false))
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false))
return true;
BaseLoc = Tok.getLocation();
@@ -1139,19 +1147,14 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
// Check whether we have a template-id that names a type.
if (Tok.is(tok::annot_template_id)) {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
- if (TemplateId->Kind == TNK_Type_template ||
- TemplateId->Kind == TNK_Dependent_template_name ||
- TemplateId->Kind == TNK_Undeclared_template) {
+ if (TemplateId->mightBeType()) {
AnnotateTemplateIdTokenAsType(SS, /*IsClassName*/true);
assert(Tok.is(tok::annot_typename) && "template-id -> type failed");
- ParsedType Type = getTypeAnnotation(Tok);
+ TypeResult Type = getTypeAnnotation(Tok);
EndLocation = Tok.getAnnotationEndLoc();
ConsumeAnnotationToken();
-
- if (Type)
- return Type;
- return true;
+ return Type;
}
// Fall through to produce an error below.
@@ -1168,7 +1171,9 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
if (Tok.is(tok::less)) {
// It looks the user intended to write a template-id here, but the
// template-name was wrong. Try to fix that.
- TemplateNameKind TNK = TNK_Type_template;
+ // FIXME: Invoke ParseOptionalCXXScopeSpecifier in a "'template' is neither
+ // required nor permitted" mode, and do this there.
+ TemplateNameKind TNK = TNK_Non_template;
TemplateTy Template;
if (!Actions.DiagnoseUnknownTemplateName(*Id, IdLoc, getCurScope(),
&SS, Template, TNK)) {
@@ -1176,14 +1181,6 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
<< Id;
}
- if (!Template) {
- TemplateArgList TemplateArgs;
- SourceLocation LAngleLoc, RAngleLoc;
- ParseTemplateIdAfterTemplateName(true, LAngleLoc, TemplateArgs,
- RAngleLoc);
- return true;
- }
-
// Form the template name
UnqualifiedId TemplateName;
TemplateName.setIdentifier(Id, IdLoc);
@@ -1192,7 +1189,8 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
TemplateName))
return true;
- if (TNK == TNK_Type_template || TNK == TNK_Dependent_template_name)
+ if (Tok.is(tok::annot_template_id) &&
+ takeTemplateIdAnnotation(Tok)->mightBeType())
AnnotateTemplateIdTokenAsType(SS, /*IsClassName*/true);
// If we didn't end up with a typename token, there's nothing more we
@@ -1203,7 +1201,7 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
// Retrieve the type from the annotation token, consume that token, and
// return.
EndLocation = Tok.getAnnotationEndLoc();
- ParsedType Type = getTypeAnnotation(Tok);
+ TypeResult Type = getTypeAnnotation(Tok);
ConsumeAnnotationToken();
return Type;
}
@@ -1285,7 +1283,8 @@ bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
case tok::annot_pragma_ms_pointers_to_members:
return true;
case tok::colon:
- return CouldBeBitfield; // enum E { ... } : 2;
+ return CouldBeBitfield || // enum E { ... } : 2;
+ ColonIsSacred; // _Generic(..., enum E : 2);
// Microsoft compatibility
case tok::kw___cdecl: // struct foo {...} __cdecl x;
case tok::kw___fastcall: // struct foo {...} __fastcall x;
@@ -1547,7 +1546,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
CXXScopeSpec Spec;
bool HasValidSpec = true;
- if (ParseOptionalCXXScopeSpecifier(Spec, nullptr, EnteringContext)) {
+ if (ParseOptionalCXXScopeSpecifier(Spec, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ EnteringContext)) {
DS.SetTypeSpecError();
HasValidSpec = false;
}
@@ -1620,9 +1621,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
NameLoc = ConsumeAnnotationToken();
if (TemplateId->Kind == TNK_Undeclared_template) {
- // Try to resolve the template name to a type template.
- Actions.ActOnUndeclaredTypeTemplateName(getCurScope(), TemplateId->Template,
- TemplateId->Kind, NameLoc, Name);
+ // Try to resolve the template name to a type template. May update Kind.
+ Actions.ActOnUndeclaredTypeTemplateName(
+ getCurScope(), TemplateId->Template, TemplateId->Kind, NameLoc, Name);
if (TemplateId->Kind == TNK_Undeclared_template) {
RecoverFromUndeclaredTemplateName(
Name, NameLoc,
@@ -1631,10 +1632,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
}
}
- if (TemplateId && TemplateId->Kind != TNK_Type_template &&
- TemplateId->Kind != TNK_Dependent_template_name) {
+ if (TemplateId && !TemplateId->mightBeType()) {
// The template-name in the simple-template-id refers to
- // something other than a class template. Give an appropriate
+ // something other than a type template. Give an appropriate
// error message and skip to the ';'.
SourceRange Range(NameLoc);
if (SS.isNotEmpty())
@@ -1681,7 +1681,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
Sema::TagUseKind TUK;
- if (DSC == DeclSpecContext::DSC_trailing)
+ if (isDefiningTypeSpecifierContext(DSC) == AllowDefiningTypeSpec::No ||
+ (getLangOpts().OpenMP && OpenMPDirectiveParsing))
TUK = Sema::TUK_Reference;
else if (Tok.is(tok::l_brace) ||
(getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
@@ -1806,7 +1807,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// or explicit instantiation.
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
- if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
+ if (TemplateId->isInvalid()) {
+ // Can't build the declaration.
+ } else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
TUK == Sema::TUK_Declaration) {
// This is an explicit instantiation of a class template.
ProhibitAttributes(attrs);
@@ -1962,7 +1965,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
Decl *D =
SkipBody.CheckSameAsPrevious ? SkipBody.New : TagOrTempResult.get();
// Parse the definition body.
- ParseStructUnionBody(StartLoc, TagType, D);
+ ParseStructUnionBody(StartLoc, TagType, cast<RecordDecl>(D));
if (SkipBody.CheckSameAsPrevious &&
!Actions.ActOnDuplicateDefinition(DS, TagOrTempResult.get(),
SkipBody)) {
@@ -2182,7 +2185,6 @@ void Parser::HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
// declarations.
auto LateMethod = new LateParsedMethodDeclaration(this, ThisDecl);
getCurrentClass().LateParsedDeclarations.push_back(LateMethod);
- LateMethod->TemplateScope = getCurScope()->isTemplateParamScope();
// Stash the exception-specification tokens in the late-pased method.
LateMethod->ExceptionSpecTokens = FTI.ExceptionSpecTokens;
@@ -2501,7 +2503,8 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
if (isAccessDecl) {
// Collect the scope specifier token we annotated earlier.
CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, nullptr,
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/false);
if (SS.isInvalid()) {
@@ -2512,8 +2515,9 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// Try to parse an unqualified-id.
SourceLocation TemplateKWLoc;
UnqualifiedId Name;
- if (ParseUnqualifiedId(SS, false, true, true, false, nullptr,
- &TemplateKWLoc, Name)) {
+ if (ParseUnqualifiedId(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, false, true, true,
+ false, &TemplateKWLoc, Name)) {
SkipUntil(tok::semi);
return nullptr;
}
@@ -2658,7 +2662,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
auto &Zero = NextToken();
SmallString<8> Buffer;
- if (Zero.isNot(tok::numeric_constant) || Zero.getLength() != 1 ||
+ if (Zero.isNot(tok::numeric_constant) ||
PP.getSpelling(Zero, Buffer) != "0")
return false;
@@ -2709,6 +2713,11 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
DefinitionKind = FDK_Defaulted;
else if (KW.is(tok::kw_delete))
DefinitionKind = FDK_Deleted;
+ else if (KW.is(tok::code_completion)) {
+ Actions.CodeCompleteAfterFunctionEquals(DeclaratorInfo);
+ cutOffParsing();
+ return nullptr;
+ }
}
}
DeclaratorInfo.setFunctionDefinitionKind(DefinitionKind);
@@ -2784,7 +2793,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
!DS.isFriendSpecified()) {
// It's a default member initializer.
if (BitfieldSize.get())
- Diag(Tok, getLangOpts().CPlusPlus2a
+ Diag(Tok, getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_bitfield_member_init
: diag::ext_bitfield_member_init);
HasInClassInit = Tok.is(tok::equal) ? ICIS_CopyInit : ICIS_ListInit;
@@ -2989,7 +2998,7 @@ ExprResult Parser::ParseCXXMemberInitializer(Decl *D, bool IsFunction,
<< 0 /* default */;
else
Diag(ConsumeToken(), diag::err_default_special_members)
- << getLangOpts().CPlusPlus2a;
+ << getLangOpts().CPlusPlus20;
return ExprError();
}
}
@@ -3336,6 +3345,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
// Each iteration of this loop reads one member-declaration.
ParseCXXClassMemberDeclarationWithPragmas(
CurAS, AccessAttrs, static_cast<DeclSpec::TST>(TagType), TagDecl);
+ MaybeDestroyTemplateIds();
}
T.consumeClose();
} else {
@@ -3361,6 +3371,16 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
// are complete and we can parse the delayed portions of method
// declarations and the lexed inline method definitions, along with any
// delayed attributes.
+
+ // Save the state of Sema.FPFeatures, and change the setting
+ // to the levels specified on the command line. Previous level
+ // will be restored when the RAII object is destroyed.
+ Sema::FPFeaturesStateRAII SaveFPFeaturesState(Actions);
+ FPOptionsOverride NewOverrides;
+ Actions.CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
+ Actions.FpPragmaStack.Act(Tok.getLocation(), Sema::PSK_Reset, StringRef(),
+ 0 /*unused*/);
+
SourceLocation SavedPrevTokLocation = PrevTokLocation;
ParseLexedPragmas(getCurrentClass());
ParseLexedAttributes(getCurrentClass());
@@ -3493,7 +3513,9 @@ void Parser::ParseConstructorInitializer(Decl *ConstructorDecl) {
MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
// parse '::'[opt] nested-name-specifier[opt]
CXXScopeSpec SS;
- if (ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext=*/false))
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false))
return true;
// : identifier
@@ -3502,7 +3524,7 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
// : declype(...)
DeclSpec DS(AttrFactory);
// : template_name<...>
- ParsedType TemplateTypeTy;
+ TypeResult TemplateTypeTy;
if (Tok.is(tok::identifier)) {
// Get the identifier. This may be a member name or a class name,
@@ -3519,15 +3541,11 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
TemplateIdAnnotation *TemplateId = Tok.is(tok::annot_template_id)
? takeTemplateIdAnnotation(Tok)
: nullptr;
- if (TemplateId && (TemplateId->Kind == TNK_Type_template ||
- TemplateId->Kind == TNK_Dependent_template_name ||
- TemplateId->Kind == TNK_Undeclared_template)) {
+ if (TemplateId && TemplateId->mightBeType()) {
AnnotateTemplateIdTokenAsType(SS, /*IsClassName*/true);
assert(Tok.is(tok::annot_typename) && "template-id -> type failed");
TemplateTypeTy = getTypeAnnotation(Tok);
ConsumeAnnotationToken();
- if (!TemplateTypeTy)
- return true;
} else {
Diag(Tok, diag::err_expected_member_or_base_name);
return true;
@@ -3546,8 +3564,10 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
SourceLocation EllipsisLoc;
TryConsumeToken(tok::ellipsis, EllipsisLoc);
+ if (TemplateTypeTy.isInvalid())
+ return true;
return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
- TemplateTypeTy, DS, IdLoc,
+ TemplateTypeTy.get(), DS, IdLoc,
InitList.get(), EllipsisLoc);
} else if(Tok.is(tok::l_paren)) {
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -3557,8 +3577,10 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
ExprVector ArgExprs;
CommaLocsTy CommaLocs;
auto RunSignatureHelp = [&] {
+ if (TemplateTypeTy.isInvalid())
+ return QualType();
QualType PreferredType = Actions.ProduceCtorInitMemberSignatureHelp(
- getCurScope(), ConstructorDecl, SS, TemplateTypeTy, ArgExprs, II,
+ getCurScope(), ConstructorDecl, SS, TemplateTypeTy.get(), ArgExprs, II,
T.getOpenLocation());
CalledSignatureHelp = true;
return PreferredType;
@@ -3579,12 +3601,17 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
SourceLocation EllipsisLoc;
TryConsumeToken(tok::ellipsis, EllipsisLoc);
+ if (TemplateTypeTy.isInvalid())
+ return true;
return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
- TemplateTypeTy, DS, IdLoc,
+ TemplateTypeTy.get(), DS, IdLoc,
T.getOpenLocation(), ArgExprs,
T.getCloseLocation(), EllipsisLoc);
}
+ if (TemplateTypeTy.isInvalid())
+ return true;
+
if (getLangOpts().CPlusPlus11)
return Diag(Tok, diag::err_expected_either) << tok::l_paren << tok::l_brace;
else
@@ -3910,8 +3937,8 @@ void Parser::PopParsingClass(Sema::ParsingClassState state) {
// after the top-level class is completely defined. Therefore, add
// it to the list of nested classes within its parent.
assert(getCurScope()->isClassScope() && "Nested class outside of class scope?");
- ClassStack.top()->LateParsedDeclarations.push_back(new LateParsedClass(this, Victim));
- Victim->TemplateScope = getCurScope()->getParent()->isTemplateParamScope();
+ ClassStack.top()->LateParsedDeclarations.push_back(
+ new LateParsedClass(this, Victim));
}
/// Try to parse an 'identifier' which appears within an attribute-token.
@@ -4366,7 +4393,7 @@ void Parser::ParseMicrosoftAttributes(ParsedAttributes &attrs,
BalancedDelimiterTracker T(*this, tok::l_square);
T.consumeOpen();
- // Skip most ms attributes except for a whitelist.
+ // Skip most ms attributes except for a specific list.
while (true) {
SkipUntil(tok::r_square, tok::identifier, StopAtSemi | StopBeforeMatch);
if (Tok.isNot(tok::identifier)) // ']', but also eof
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp b/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
index ad9627a2425c..81e87582c6ad 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
@@ -625,13 +625,31 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
SourceRange(Actions.getExprRange(LHS.get()).getBegin(),
Actions.getExprRange(RHS.get()).getEnd()));
- LHS = Actions.ActOnBinOp(getCurScope(), OpToken.getLocation(),
- OpToken.getKind(), LHS.get(), RHS.get());
-
+ ExprResult BinOp =
+ Actions.ActOnBinOp(getCurScope(), OpToken.getLocation(),
+ OpToken.getKind(), LHS.get(), RHS.get());
+ if (BinOp.isInvalid())
+ BinOp = Actions.CreateRecoveryExpr(LHS.get()->getBeginLoc(),
+ RHS.get()->getEndLoc(),
+ {LHS.get(), RHS.get()});
+
+ LHS = BinOp;
} else {
- LHS = Actions.ActOnConditionalOp(OpToken.getLocation(), ColonLoc,
- LHS.get(), TernaryMiddle.get(),
- RHS.get());
+ ExprResult CondOp = Actions.ActOnConditionalOp(
+ OpToken.getLocation(), ColonLoc, LHS.get(), TernaryMiddle.get(),
+ RHS.get());
+ if (CondOp.isInvalid()) {
+ std::vector<clang::Expr *> Args;
+ // TernaryMiddle can be null for the GNU conditional expr extension.
+ if (TernaryMiddle.get())
+ Args = {LHS.get(), TernaryMiddle.get(), RHS.get()};
+ else
+ Args = {LHS.get(), RHS.get()};
+ CondOp = Actions.CreateRecoveryExpr(LHS.get()->getBeginLoc(),
+ RHS.get()->getEndLoc(), Args);
+ }
+
+ LHS = CondOp;
}
// In this case, ActOnBinOp or ActOnConditionalOp performed the
// CorrectDelayedTyposInExpr check.
@@ -902,6 +920,11 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
auto SavedType = PreferredType;
NotCastExpr = false;
+ // Are postfix-expression suffix operators permitted after this
+ // cast-expression? If not, and we find some, we'll parse them anyway and
+ // diagnose them.
+ bool AllowSuffix = true;
+
// This handles all of cast-expression, unary-expression, postfix-expression,
// and primary-expression. We handle them together like this for efficiency
// and to simplify handling of an expression starting with a '(' token: which
@@ -911,8 +934,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
// If the parsed tokens consist of a primary-expression, the cases below
// break out of the switch; at the end we call ParsePostfixExpressionSuffix
// to handle the postfix expression suffixes. Cases that cannot be followed
- // by postfix exprs should return without invoking
- // ParsePostfixExpressionSuffix.
+ // by postfix exprs should set AllowSuffix to false.
switch (SavedKind) {
case tok::l_paren: {
// If this expression is limited to being a unary-expression, the paren can
@@ -935,8 +957,11 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
Res = ParseParenExpression(ParenExprType, false/*stopIfCastExr*/,
isTypeCast == IsTypeCast, CastTy, RParenLoc);
+ // FIXME: What should we do if a vector literal is followed by a
+ // postfix-expression suffix? Usually postfix operators are permitted on
+ // literals.
if (isVectorLiteral)
- return Res;
+ return Res;
switch (ParenExprType) {
case SimpleExpr: break; // Nothing else to do.
@@ -974,14 +999,31 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw___objc_yes:
case tok::kw___objc_no:
- return ParseObjCBoolLiteral();
+ Res = ParseObjCBoolLiteral();
+ break;
case tok::kw_nullptr:
Diag(Tok, diag::warn_cxx98_compat_nullptr);
- return Actions.ActOnCXXNullPtrLiteral(ConsumeToken());
+ Res = Actions.ActOnCXXNullPtrLiteral(ConsumeToken());
+ break;
+ case tok::annot_uneval_primary_expr:
case tok::annot_primary_expr:
Res = getExprAnnotation(Tok);
+ if (SavedKind == tok::annot_uneval_primary_expr) {
+ if (Expr *E = Res.get()) {
+ if (!E->isTypeDependent() && !E->containsErrors()) {
+ // TransformToPotentiallyEvaluated expects that it will still be in a
+ // (temporary) unevaluated context and then looks through that context
+ // to build it in the surrounding context. So we need to push an
+ // unevaluated context to balance things out.
+ EnterExpressionEvaluationContext Unevaluated(
+ Actions, Sema::ExpressionEvaluationContext::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
+ Res = Actions.TransformToPotentiallyEvaluated(Res.get());
+ }
+ }
+ }
ConsumeAnnotationToken();
if (!Res.isInvalid() && Tok.is(tok::less))
checkPotentialAngleBracket(Res);
@@ -1006,7 +1048,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
assert(Tok.isNot(tok::kw_decltype) && Tok.isNot(tok::kw___super));
return ParseCastExpression(ParseKind, isAddressOfOperand, isTypeCast,
isVectorLiteral, NotPrimaryExpression);
-
+
case tok::identifier: { // primary-expression: identifier
// unqualified-id: identifier
// constant: enumeration-constant
@@ -1262,7 +1304,8 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
Res = ParseGenericSelectionExpression();
break;
case tok::kw___builtin_available:
- return ParseAvailabilityCheckExpr(Tok.getLocation());
+ Res = ParseAvailabilityCheckExpr(Tok.getLocation());
+ break;
case tok::kw___builtin_va_arg:
case tok::kw___builtin_offsetof:
case tok::kw___builtin_choose_expr:
@@ -1274,9 +1317,11 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw___builtin_LINE:
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
+ // This parses the complete suffix; we can return early.
return ParseBuiltinPrimaryExpression();
case tok::kw___null:
- return Actions.ActOnGNUNullExpr(ConsumeToken());
+ Res = Actions.ActOnGNUNullExpr(ConsumeToken());
+ break;
case tok::plusplus: // unary-expression: '++' unary-expression [C99]
case tok::minusminus: { // unary-expression: '--' unary-expression [C99]
@@ -1305,9 +1350,14 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
UnconsumeToken(SavedTok);
return ExprError();
}
- if (!Res.isInvalid())
+ if (!Res.isInvalid()) {
+ Expr *Arg = Res.get();
Res = Actions.ActOnUnaryOp(getCurScope(), SavedTok.getLocation(),
- SavedKind, Res.get());
+ SavedKind, Arg);
+ if (Res.isInvalid())
+ Res = Actions.CreateRecoveryExpr(SavedTok.getLocation(),
+ Arg->getEndLoc(), Arg);
+ }
return Res;
}
case tok::amp: { // unary-expression: '&' cast-expression
@@ -1317,8 +1367,13 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
SourceLocation SavedLoc = ConsumeToken();
PreferredType.enterUnary(Actions, Tok.getLocation(), tok::amp, SavedLoc);
Res = ParseCastExpression(AnyCastExpr, true);
- if (!Res.isInvalid())
- Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
+ if (!Res.isInvalid()) {
+ Expr *Arg = Res.get();
+ Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Arg);
+ if (Res.isInvalid())
+ Res = Actions.CreateRecoveryExpr(Tok.getLocation(), Arg->getEndLoc(),
+ Arg);
+ }
return Res;
}
@@ -1334,8 +1389,12 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
SourceLocation SavedLoc = ConsumeToken();
PreferredType.enterUnary(Actions, Tok.getLocation(), SavedKind, SavedLoc);
Res = ParseCastExpression(AnyCastExpr);
- if (!Res.isInvalid())
- Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
+ if (!Res.isInvalid()) {
+ Expr *Arg = Res.get();
+ Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Arg);
+ if (Res.isInvalid())
+ Res = Actions.CreateRecoveryExpr(SavedLoc, Arg->getEndLoc(), Arg);
+ }
return Res;
}
@@ -1374,7 +1433,9 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw___builtin_omp_required_simd_align:
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
- return ParseUnaryExprOrTypeTraitExpression();
+ AllowSuffix = false;
+ Res = ParseUnaryExprOrTypeTraitExpression();
+ break;
case tok::ampamp: { // unary-expression: '&&' identifier
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
@@ -1390,12 +1451,14 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
Tok.getLocation());
Res = Actions.ActOnAddrLabel(AmpAmpLoc, Tok.getLocation(), LD);
ConsumeToken();
- return Res;
+ AllowSuffix = false;
+ break;
}
case tok::kw_const_cast:
case tok::kw_dynamic_cast:
case tok::kw_reinterpret_cast:
case tok::kw_static_cast:
+ case tok::kw_addrspace_cast:
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
Res = ParseCXXCasts();
@@ -1418,10 +1481,12 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw_this:
Res = ParseCXXThis();
break;
-
+ case tok::kw___builtin_unique_stable_name:
+ Res = ParseUniqueStableNameExpression();
+ break;
case tok::annot_typename:
if (isStartOfObjCClassMessageMissingOpenBracket()) {
- ParsedType Type = getTypeAnnotation(Tok);
+ TypeResult Type = getTypeAnnotation(Tok);
// Fake up a Declarator to use with ActOnTypeName.
DeclSpec DS(AttrFactory);
@@ -1458,11 +1523,13 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw_long:
case tok::kw___int64:
case tok::kw___int128:
+ case tok::kw__ExtInt:
case tok::kw_signed:
case tok::kw_unsigned:
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
+ case tok::kw___bf16:
case tok::kw__Float16:
case tok::kw___float128:
case tok::kw_void:
@@ -1529,7 +1596,8 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
// type, translate it into a type and continue parsing as a
// cast expression.
CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, nullptr,
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/false);
AnnotateTemplateIdTokenAsType(SS);
return ParseCastExpression(ParseKind, isAddressOfOperand, NotCastExpr,
@@ -1579,12 +1647,16 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
if (Tok.is(tok::kw_new)) {
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
- return ParseCXXNewExpression(true, CCLoc);
+ Res = ParseCXXNewExpression(true, CCLoc);
+ AllowSuffix = false;
+ break;
}
if (Tok.is(tok::kw_delete)) {
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
- return ParseCXXDeleteExpression(true, CCLoc);
+ Res = ParseCXXDeleteExpression(true, CCLoc);
+ AllowSuffix = false;
+ break;
}
// This is not a type name or scope specifier, it is an invalid expression.
@@ -1595,15 +1667,21 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw_new: // [C++] new-expression
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
- return ParseCXXNewExpression(false, Tok.getLocation());
+ Res = ParseCXXNewExpression(false, Tok.getLocation());
+ AllowSuffix = false;
+ break;
case tok::kw_delete: // [C++] delete-expression
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
- return ParseCXXDeleteExpression(false, Tok.getLocation());
+ Res = ParseCXXDeleteExpression(false, Tok.getLocation());
+ AllowSuffix = false;
+ break;
case tok::kw_requires: // [C++2a] requires-expression
- return ParseRequiresExpression();
+ Res = ParseRequiresExpression();
+ AllowSuffix = false;
+ break;
case tok::kw_noexcept: { // [C++0x] 'noexcept' '(' expression ')'
if (NotPrimaryExpression)
@@ -1619,32 +1697,36 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
// which is an unevaluated operand, can throw an exception.
EnterExpressionEvaluationContext Unevaluated(
Actions, Sema::ExpressionEvaluationContext::Unevaluated);
- ExprResult Result = ParseExpression();
+ Res = ParseExpression();
T.consumeClose();
- if (!Result.isInvalid())
- Result = Actions.ActOnNoexceptExpr(KeyLoc, T.getOpenLocation(),
- Result.get(), T.getCloseLocation());
- return Result;
+ if (!Res.isInvalid())
+ Res = Actions.ActOnNoexceptExpr(KeyLoc, T.getOpenLocation(), Res.get(),
+ T.getCloseLocation());
+ AllowSuffix = false;
+ break;
}
#define TYPE_TRAIT(N,Spelling,K) \
case tok::kw_##Spelling:
#include "clang/Basic/TokenKinds.def"
- return ParseTypeTrait();
+ Res = ParseTypeTrait();
+ break;
case tok::kw___array_rank:
case tok::kw___array_extent:
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
- return ParseArrayTypeTrait();
+ Res = ParseArrayTypeTrait();
+ break;
case tok::kw___is_lvalue_expr:
case tok::kw___is_rvalue_expr:
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
- return ParseExpressionTrait();
+ Res = ParseExpressionTrait();
+ break;
case tok::at: {
if (NotPrimaryExpression)
@@ -1701,6 +1783,41 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
// parsed.
return Res;
+ if (!AllowSuffix) {
+ // FIXME: Don't parse a primary-expression suffix if we encountered a parse
+ // error already.
+ if (Res.isInvalid())
+ return Res;
+
+ switch (Tok.getKind()) {
+ case tok::l_square:
+ case tok::l_paren:
+ case tok::plusplus:
+ case tok::minusminus:
+ // "expected ';'" or similar is probably the right diagnostic here. Let
+ // the caller decide what to do.
+ if (Tok.isAtStartOfLine())
+ return Res;
+
+ LLVM_FALLTHROUGH;
+ case tok::period:
+ case tok::arrow:
+ break;
+
+ default:
+ return Res;
+ }
+
+ // This was a unary-expression for which a postfix-expression suffix is
+ // not permitted by the grammar (eg, a sizeof expression or
+ // new-expression or similar). Diagnose but parse the suffix anyway.
+ Diag(Tok.getLocation(), diag::err_postfix_after_unary_requires_parens)
+ << Tok.getKind() << Res.get()->getSourceRange()
+ << FixItHint::CreateInsertion(Res.get()->getBeginLoc(), "(")
+ << FixItHint::CreateInsertion(PP.getLocForEndOfToken(PrevTokLocation),
+ ")");
+ }
+
// These can be followed by postfix-expr pieces.
PreferredType = SavedType;
Res = ParsePostfixExpressionSuffix(Res);
@@ -1792,8 +1909,8 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
BalancedDelimiterTracker T(*this, tok::l_square);
T.consumeOpen();
Loc = T.getOpenLocation();
- ExprResult Idx, Length;
- SourceLocation ColonLoc;
+ ExprResult Idx, Length, Stride;
+ SourceLocation ColonLocFirst, ColonLocSecond;
PreferredType.enterSubscript(Actions, Tok.getLocation(), LHS.get());
if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
@@ -1807,10 +1924,22 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
}
if (Tok.is(tok::colon)) {
// Consume ':'
- ColonLoc = ConsumeToken();
- if (Tok.isNot(tok::r_square))
+ ColonLocFirst = ConsumeToken();
+ if (Tok.isNot(tok::r_square) &&
+ (getLangOpts().OpenMP < 50 ||
+ ((Tok.isNot(tok::colon) && getLangOpts().OpenMP >= 50))))
Length = ParseExpression();
}
+ if (getLangOpts().OpenMP >= 50 &&
+ (OMPClauseKind == llvm::omp::Clause::OMPC_to ||
+ OMPClauseKind == llvm::omp::Clause::OMPC_from) &&
+ Tok.is(tok::colon)) {
+ // Consume ':'
+ ColonLocSecond = ConsumeToken();
+ if (Tok.isNot(tok::r_square)) {
+ Stride = ParseExpression();
+ }
+ }
} else
Idx = ParseExpression();
@@ -1820,10 +1949,11 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
Idx = Actions.CorrectDelayedTyposInExpr(Idx);
Length = Actions.CorrectDelayedTyposInExpr(Length);
if (!LHS.isInvalid() && !Idx.isInvalid() && !Length.isInvalid() &&
- Tok.is(tok::r_square)) {
- if (ColonLoc.isValid()) {
- LHS = Actions.ActOnOMPArraySectionExpr(LHS.get(), Loc, Idx.get(),
- ColonLoc, Length.get(), RLoc);
+ !Stride.isInvalid() && Tok.is(tok::r_square)) {
+ if (ColonLocFirst.isValid() || ColonLocSecond.isValid()) {
+ LHS = Actions.ActOnOMPArraySectionExpr(
+ LHS.get(), Loc, Idx.get(), ColonLocFirst, ColonLocSecond,
+ Length.get(), Stride.get(), RLoc);
} else {
LHS = Actions.ActOnArraySubscriptExpr(getCurScope(), LHS.get(), Loc,
Idx.get(), RLoc);
@@ -1940,12 +2070,18 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
PT.consumeClose();
LHS = ExprError();
} else {
- assert((ArgExprs.size() == 0 ||
- ArgExprs.size()-1 == CommaLocs.size())&&
- "Unexpected number of commas!");
- LHS = Actions.ActOnCallExpr(getCurScope(), LHS.get(), Loc,
- ArgExprs, Tok.getLocation(),
+ assert(
+ (ArgExprs.size() == 0 || ArgExprs.size() - 1 == CommaLocs.size()) &&
+ "Unexpected number of commas!");
+ Expr *Fn = LHS.get();
+ SourceLocation RParLoc = Tok.getLocation();
+ LHS = Actions.ActOnCallExpr(getCurScope(), Fn, Loc, ArgExprs, RParLoc,
ExecConfig);
+ if (LHS.isInvalid()) {
+ ArgExprs.insert(ArgExprs.begin(), Fn);
+ LHS =
+ Actions.CreateRecoveryExpr(Fn->getBeginLoc(), RParLoc, ArgExprs);
+ }
PT.consumeClose();
}
@@ -1977,15 +2113,22 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
return ParsePostfixExpressionSuffix(Base);
}
- LHS = Actions.ActOnStartCXXMemberReference(getCurScope(), Base,
- OpLoc, OpKind, ObjectType,
+ LHS = Actions.ActOnStartCXXMemberReference(getCurScope(), Base, OpLoc,
+ OpKind, ObjectType,
MayBePseudoDestructor);
- if (LHS.isInvalid())
+ if (LHS.isInvalid()) {
+ // Clang will try to perform expression based completion as a
+ // fallback, which is confusing in case of member references. So we
+ // stop here without any completions.
+ if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
+ return ExprError();
+ }
break;
-
- ParseOptionalCXXScopeSpecifier(SS, ObjectType,
- /*EnteringContext=*/false,
- &MayBePseudoDestructor);
+ }
+ ParseOptionalCXXScopeSpecifier(
+ SS, ObjectType, LHS.get() && LHS.get()->containsErrors(),
+ /*EnteringContext=*/false, &MayBePseudoDestructor);
if (SS.isNotEmpty())
ObjectType = nullptr;
}
@@ -2045,14 +2188,13 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
IdentifierInfo *Id = Tok.getIdentifierInfo();
SourceLocation Loc = ConsumeToken();
Name.setIdentifier(Id, Loc);
- } else if (ParseUnqualifiedId(SS,
- /*EnteringContext=*/false,
- /*AllowDestructorName=*/true,
- /*AllowConstructorName=*/
- getLangOpts().MicrosoftExt &&
- SS.isNotEmpty(),
- /*AllowDeductionGuide=*/false,
- ObjectType, &TemplateKWLoc, Name)) {
+ } else if (ParseUnqualifiedId(
+ SS, ObjectType, LHS.get() && LHS.get()->containsErrors(),
+ /*EnteringContext=*/false,
+ /*AllowDestructorName=*/true,
+ /*AllowConstructorName=*/
+ getLangOpts().MicrosoftExt && SS.isNotEmpty(),
+ /*AllowDeductionGuide=*/false, &TemplateKWLoc, Name)) {
(void)Actions.CorrectDelayedTyposInExpr(LHS);
LHS = ExprError();
}
@@ -2062,15 +2204,25 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
OpKind, SS, TemplateKWLoc, Name,
CurParsedObjCImpl ? CurParsedObjCImpl->Dcl
: nullptr);
- if (!LHS.isInvalid() && Tok.is(tok::less))
- checkPotentialAngleBracket(LHS);
+ if (!LHS.isInvalid()) {
+ if (Tok.is(tok::less))
+ checkPotentialAngleBracket(LHS);
+ } else if (OrigLHS && Name.isValid()) {
+ // Preserve the LHS if the RHS is an invalid member.
+ LHS = Actions.CreateRecoveryExpr(OrigLHS->getBeginLoc(),
+ Name.getEndLoc(), {OrigLHS});
+ }
break;
}
case tok::plusplus: // postfix-expression: postfix-expression '++'
case tok::minusminus: // postfix-expression: postfix-expression '--'
if (!LHS.isInvalid()) {
+ Expr *Arg = LHS.get();
LHS = Actions.ActOnPostfixUnaryOp(getCurScope(), Tok.getLocation(),
- Tok.getKind(), LHS.get());
+ Tok.getKind(), Arg);
+ if (LHS.isInvalid())
+ LHS = Actions.CreateRecoveryExpr(Arg->getBeginLoc(),
+ Tok.getLocation(), Arg);
}
ConsumeToken();
break;
@@ -2180,6 +2332,43 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
}
+ExprResult Parser::ParseUniqueStableNameExpression() {
+ assert(Tok.is(tok::kw___builtin_unique_stable_name) &&
+ "Not __bulitin_unique_stable_name");
+
+ SourceLocation OpLoc = ConsumeToken();
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+
+ // typeid expressions are always parenthesized.
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ "__builtin_unique_stable_name"))
+ return ExprError();
+
+ if (isTypeIdInParens()) {
+ TypeResult Ty = ParseTypeName();
+ T.consumeClose();
+
+ if (Ty.isInvalid())
+ return ExprError();
+
+ return Actions.ActOnUniqueStableNameExpr(OpLoc, T.getOpenLocation(),
+ T.getCloseLocation(), Ty.get());
+ }
+
+ EnterExpressionEvaluationContext Unevaluated(
+ Actions, Sema::ExpressionEvaluationContext::Unevaluated);
+ ExprResult Result = ParseExpression();
+
+ if (Result.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return Result;
+ }
+
+ T.consumeClose();
+ return Actions.ActOnUniqueStableNameExpr(OpLoc, T.getOpenLocation(),
+ T.getCloseLocation(), Result.get());
+}
+
/// Parse a sizeof or alignof expression.
///
/// \verbatim
@@ -2561,6 +2750,33 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
return ParsePostfixExpressionSuffix(Res.get());
}
+bool Parser::tryParseOpenMPArrayShapingCastPart() {
+ assert(Tok.is(tok::l_square) && "Expected open bracket");
+ bool ErrorFound = true;
+ TentativeParsingAction TPA(*this);
+ do {
+ if (Tok.isNot(tok::l_square))
+ break;
+ // Consume '['
+ ConsumeBracket();
+ // Skip inner expression.
+ while (!SkipUntil(tok::r_square, tok::annot_pragma_openmp_end,
+ StopAtSemi | StopBeforeMatch))
+ ;
+ if (Tok.isNot(tok::r_square))
+ break;
+ // Consume ']'
+ ConsumeBracket();
+ // Found ')' - done.
+ if (Tok.is(tok::r_paren)) {
+ ErrorFound = false;
+ break;
+ }
+ } while (Tok.isNot(tok::annot_pragma_openmp_end));
+ TPA.Revert();
+ return !ErrorFound;
+}
+
/// ParseParenExpression - This parses the unit that starts with a '(' token,
/// based on what is allowed by ExprType. The actual thing parsed is returned
/// in ExprType. If stopIfCastExpr is true, it will only return the parsed type,
@@ -2585,6 +2801,8 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
/// '(' '...' fold-operator cast-expression ')'
/// '(' cast-expression fold-operator '...'
/// fold-operator cast-expression ')'
+/// [OPENMP] Array shaping operation
+/// '(' '[' expression ']' { '[' expression ']' } cast-expression
/// \endverbatim
ExprResult
Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
@@ -2691,7 +2909,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
PreferredType.enterTypeCast(Tok.getLocation(), Ty.get().get());
ExprResult SubExpr = ParseCastExpression(AnyCastExpr);
-
+
if (Ty.isInvalid() || SubExpr.isInvalid())
return ExprError();
@@ -2861,6 +3079,38 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
Result = Actions.ActOnParenListExpr(OpenLoc, Tok.getLocation(),
ArgExprs);
}
+ } else if (getLangOpts().OpenMP >= 50 && OpenMPDirectiveParsing &&
+ ExprType == CastExpr && Tok.is(tok::l_square) &&
+ tryParseOpenMPArrayShapingCastPart()) {
+ bool ErrorFound = false;
+ SmallVector<Expr *, 4> OMPDimensions;
+ SmallVector<SourceRange, 4> OMPBracketsRanges;
+ do {
+ BalancedDelimiterTracker TS(*this, tok::l_square);
+ TS.consumeOpen();
+ ExprResult NumElements =
+ Actions.CorrectDelayedTyposInExpr(ParseExpression());
+ if (!NumElements.isUsable()) {
+ ErrorFound = true;
+ while (!SkipUntil(tok::r_square, tok::r_paren,
+ StopAtSemi | StopBeforeMatch))
+ ;
+ }
+ TS.consumeClose();
+ OMPDimensions.push_back(NumElements.get());
+ OMPBracketsRanges.push_back(TS.getRange());
+ } while (Tok.isNot(tok::r_paren));
+ // Match the ')'.
+ T.consumeClose();
+ RParenLoc = T.getCloseLocation();
+ Result = Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ if (ErrorFound) {
+ Result = ExprError();
+ } else if (!Result.isInvalid()) {
+ Result = Actions.ActOnOMPArrayShapingExpr(
+ Result.get(), OpenLoc, RParenLoc, OMPDimensions, OMPBracketsRanges);
+ }
+ return Result;
} else {
InMessageExpressionRAIIObject InMessage(*this, false);
@@ -3129,6 +3379,16 @@ bool Parser::ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
if (Tok.is(tok::ellipsis))
Expr = Actions.ActOnPackExpansion(Expr.get(), ConsumeToken());
+ else if (Tok.is(tok::code_completion)) {
+ // There's nothing to suggest in here as we parsed a full expression.
+ // Instead fail and propogate the error since caller might have something
+ // the suggest, e.g. signature help in function call. Note that this is
+ // performed before pushing the \p Expr, so that signature help can report
+ // current argument correctly.
+ SawError = true;
+ cutOffParsing();
+ break;
+ }
if (Expr.isInvalid()) {
SkipUntil(tok::comma, tok::r_paren, StopBeforeMatch);
SawError = true;
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp b/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
index 17f81ec96c1f..aa35200c33b6 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
@@ -31,10 +31,11 @@ static int SelectDigraphErrorMessage(tok::TokenKind Kind) {
// template name
case tok::unknown: return 0;
// casts
- case tok::kw_const_cast: return 1;
- case tok::kw_dynamic_cast: return 2;
- case tok::kw_reinterpret_cast: return 3;
- case tok::kw_static_cast: return 4;
+ case tok::kw_addrspace_cast: return 1;
+ case tok::kw_const_cast: return 2;
+ case tok::kw_dynamic_cast: return 3;
+ case tok::kw_reinterpret_cast: return 4;
+ case tok::kw_static_cast: return 5;
default:
llvm_unreachable("Unknown type for digraph error message.");
}
@@ -124,13 +125,17 @@ void Parser::CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectType,
/// the "." or "->" of a member access expression, this parameter provides the
/// type of the object whose members are being accessed.
///
+/// \param ObjectHadErrors if this unqualified-id occurs within a member access
+/// expression, indicates whether the original subexpressions had any errors.
+/// When true, diagnostics for missing 'template' keyword will be supressed.
+///
/// \param EnteringContext whether we will be entering into the context of
/// the nested-name-specifier after parsing it.
///
/// \param MayBePseudoDestructor When non-NULL, points to a flag that
/// indicates whether this nested-name-specifier may be part of a
/// pseudo-destructor name. In this case, the flag will be set false
-/// if we don't actually end up parsing a destructor name. Moreorover,
+/// if we don't actually end up parsing a destructor name. Moreover,
/// if we do end up determining that we are parsing a destructor name,
/// the last component of the nested-name-specifier is not parsed as
/// part of the scope specifier.
@@ -146,14 +151,10 @@ void Parser::CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectType,
///
///
/// \returns true if there was an error parsing a scope specifier
-bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
- ParsedType ObjectType,
- bool EnteringContext,
- bool *MayBePseudoDestructor,
- bool IsTypename,
- IdentifierInfo **LastII,
- bool OnlyNamespace,
- bool InUsingDeclaration) {
+bool Parser::ParseOptionalCXXScopeSpecifier(
+ CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors,
+ bool EnteringContext, bool *MayBePseudoDestructor, bool IsTypename,
+ IdentifierInfo **LastII, bool OnlyNamespace, bool InUsingDeclaration) {
assert(getLangOpts().CPlusPlus &&
"Call sites of this function should be guarded by checking for C++");
@@ -316,13 +317,11 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
// Commit to parsing the template-id.
TPA.Commit();
TemplateTy Template;
- if (TemplateNameKind TNK = Actions.ActOnDependentTemplateName(
- getCurScope(), SS, TemplateKWLoc, TemplateName, ObjectType,
- EnteringContext, Template, /*AllowInjectedClassName*/ true)) {
- if (AnnotateTemplateIdToken(Template, TNK, SS, TemplateKWLoc,
- TemplateName, false))
- return true;
- } else
+ TemplateNameKind TNK = Actions.ActOnTemplateName(
+ getCurScope(), SS, TemplateKWLoc, TemplateName, ObjectType,
+ EnteringContext, Template, /*AllowInjectedClassName*/ true);
+ if (AnnotateTemplateIdToken(Template, TNK, SS, TemplateKWLoc,
+ TemplateName, false))
return true;
continue;
@@ -356,7 +355,8 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
- if (Actions.ActOnCXXNestedNameSpecifier(getCurScope(),
+ if (TemplateId->isInvalid() ||
+ Actions.ActOnCXXNestedNameSpecifier(getCurScope(),
SS,
TemplateId->TemplateKWLoc,
TemplateId->Template,
@@ -418,8 +418,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
}
if (Next.is(tok::coloncolon)) {
- if (CheckForDestructor && GetLookAheadToken(2).is(tok::tilde) &&
- !Actions.isNonTypeNestedNameSpecifier(getCurScope(), SS, IdInfo)) {
+ if (CheckForDestructor && GetLookAheadToken(2).is(tok::tilde)) {
*MayBePseudoDestructor = true;
return false;
}
@@ -512,28 +511,29 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
if (MemberOfUnknownSpecialization && (ObjectType || SS.isSet()) &&
(IsTypename || isTemplateArgumentList(1) == TPResult::True)) {
- // We have something like t::getAs<T>, where getAs is a
- // member of an unknown specialization. However, this will only
- // parse correctly as a template, so suggest the keyword 'template'
- // before 'getAs' and treat this as a dependent template name.
- unsigned DiagID = diag::err_missing_dependent_template_keyword;
- if (getLangOpts().MicrosoftExt)
- DiagID = diag::warn_missing_dependent_template_keyword;
-
- Diag(Tok.getLocation(), DiagID)
- << II.getName()
- << FixItHint::CreateInsertion(Tok.getLocation(), "template ");
-
- if (TemplateNameKind TNK = Actions.ActOnDependentTemplateName(
- getCurScope(), SS, Tok.getLocation(), TemplateName, ObjectType,
- EnteringContext, Template, /*AllowInjectedClassName*/ true)) {
- // Consume the identifier.
- ConsumeToken();
- if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
- TemplateName, false))
- return true;
+ // If we had errors before, ObjectType can be dependent even without any
+ // templates. Do not report missing template keyword in that case.
+ if (!ObjectHadErrors) {
+ // We have something like t::getAs<T>, where getAs is a
+ // member of an unknown specialization. However, this will only
+ // parse correctly as a template, so suggest the keyword 'template'
+ // before 'getAs' and treat this as a dependent template name.
+ unsigned DiagID = diag::err_missing_dependent_template_keyword;
+ if (getLangOpts().MicrosoftExt)
+ DiagID = diag::warn_missing_dependent_template_keyword;
+
+ Diag(Tok.getLocation(), DiagID)
+ << II.getName()
+ << FixItHint::CreateInsertion(Tok.getLocation(), "template ");
}
- else
+
+ SourceLocation TemplateNameLoc = ConsumeToken();
+
+ TemplateNameKind TNK = Actions.ActOnTemplateName(
+ getCurScope(), SS, TemplateNameLoc, TemplateName, ObjectType,
+ EnteringContext, Template, /*AllowInjectedClassName*/ true);
+ if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
+ TemplateName, false))
return true;
continue;
@@ -548,7 +548,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
// Even if we didn't see any pieces of a nested-name-specifier, we
// still check whether there is a tilde in this position, which
// indicates a potential pseudo-destructor.
- if (CheckForDestructor && Tok.is(tok::tilde))
+ if (CheckForDestructor && !HasScopeSpecifier && Tok.is(tok::tilde))
*MayBePseudoDestructor = true;
return false;
@@ -594,12 +594,12 @@ ExprResult Parser::tryParseCXXIdExpression(CXXScopeSpec &SS,
default:
SourceLocation TemplateKWLoc;
UnqualifiedId Name;
- if (ParseUnqualifiedId(SS,
+ if (ParseUnqualifiedId(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/false,
/*AllowDestructorName=*/false,
/*AllowConstructorName=*/false,
- /*AllowDeductionGuide=*/false,
- /*ObjectType=*/nullptr, &TemplateKWLoc, Name))
+ /*AllowDeductionGuide=*/false, &TemplateKWLoc, Name))
return ExprError();
// This is only the direct operand of an & operator if it is not
@@ -667,7 +667,9 @@ ExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) {
// '::' unqualified-id
//
CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext=*/false);
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false);
Token Replacement;
ExprResult Result =
@@ -1256,17 +1258,16 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
};
// FIXME: Consider allowing this as an extension for GCC compatibiblity.
- const bool HasExplicitTemplateParams = Tok.is(tok::less);
- ParseScope TemplateParamScope(this, Scope::TemplateParamScope,
- /*EnteredScope=*/HasExplicitTemplateParams);
- if (HasExplicitTemplateParams) {
- Diag(Tok, getLangOpts().CPlusPlus2a
+ MultiParseScope TemplateParamScope(*this);
+ if (Tok.is(tok::less)) {
+ Diag(Tok, getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_lambda_template_parameter_list
: diag::ext_lambda_template_parameter_list);
SmallVector<NamedDecl*, 4> TemplateParams;
SourceLocation LAngleLoc, RAngleLoc;
- if (ParseTemplateParameters(CurTemplateDepthTracker.getDepth(),
+ if (ParseTemplateParameters(TemplateParamScope,
+ CurTemplateDepthTracker.getDepth(),
TemplateParams, LAngleLoc, RAngleLoc)) {
Actions.ActOnLambdaError(LambdaBeginLoc, getCurScope());
return ExprError();
@@ -1303,7 +1304,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
ParseParameterDeclarationClause(D.getContext(), Attr, ParamInfo,
EllipsisLoc);
- // For a generic lambda, each 'auto' within the parameter declaration
+ // For a generic lambda, each 'auto' within the parameter declaration
// clause creates a template type parameter, so increment the depth.
// If we've parsed any explicit template parameters, then the depth will
// have already been incremented. So we make sure that at most a single
@@ -1511,12 +1512,15 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
/// 'reinterpret_cast' '<' type-name '>' '(' expression ')'
/// 'const_cast' '<' type-name '>' '(' expression ')'
///
+/// C++ for OpenCL s2.3.1 adds:
+/// 'addrspace_cast' '<' type-name '>' '(' expression ')'
ExprResult Parser::ParseCXXCasts() {
tok::TokenKind Kind = Tok.getKind();
const char *CastName = nullptr; // For error messages
switch (Kind) {
default: llvm_unreachable("Unknown C++ cast!");
+ case tok::kw_addrspace_cast: CastName = "addrspace_cast"; break;
case tok::kw_const_cast: CastName = "const_cast"; break;
case tok::kw_dynamic_cast: CastName = "dynamic_cast"; break;
case tok::kw_reinterpret_cast: CastName = "reinterpret_cast"; break;
@@ -1689,31 +1693,42 @@ ExprResult Parser::ParseCXXUuidof() {
/// Parse a C++ pseudo-destructor expression after the base,
/// . or -> operator, and nested-name-specifier have already been
-/// parsed.
+/// parsed. We're handling this fragment of the grammar:
+///
+/// postfix-expression: [C++2a expr.post]
+/// postfix-expression . template[opt] id-expression
+/// postfix-expression -> template[opt] id-expression
///
-/// postfix-expression: [C++ 5.2]
-/// postfix-expression . pseudo-destructor-name
-/// postfix-expression -> pseudo-destructor-name
+/// id-expression:
+/// qualified-id
+/// unqualified-id
///
-/// pseudo-destructor-name:
-/// ::[opt] nested-name-specifier[opt] type-name :: ~type-name
-/// ::[opt] nested-name-specifier template simple-template-id ::
-/// ~type-name
-/// ::[opt] nested-name-specifier[opt] ~type-name
+/// qualified-id:
+/// nested-name-specifier template[opt] unqualified-id
///
+/// nested-name-specifier:
+/// type-name ::
+/// decltype-specifier :: FIXME: not implemented, but probably only
+/// allowed in C++ grammar by accident
+/// nested-name-specifier identifier ::
+/// nested-name-specifier template[opt] simple-template-id ::
+/// [...]
+///
+/// unqualified-id:
+/// ~ type-name
+/// ~ decltype-specifier
+/// [...]
+///
+/// ... where the all but the last component of the nested-name-specifier
+/// has already been parsed, and the base expression is not of a non-dependent
+/// class type.
ExprResult
Parser::ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType) {
- // We're parsing either a pseudo-destructor-name or a dependent
- // member access that has the same form as a
- // pseudo-destructor-name. We parse both in the same way and let
- // the action model sort them out.
- //
- // Note that the ::[opt] nested-name-specifier[opt] has already
- // been parsed, and if there was a simple-template-id, it has
- // been coalesced into a template-id annotation token.
+ // If the last component of the (optional) nested-name-specifier is
+ // template[opt] simple-template-id, it has already been annotated.
UnqualifiedId FirstTypeName;
SourceLocation CCLoc;
if (Tok.is(tok::identifier)) {
@@ -1722,14 +1737,16 @@ Parser::ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
assert(Tok.is(tok::coloncolon) &&"ParseOptionalCXXScopeSpecifier fail");
CCLoc = ConsumeToken();
} else if (Tok.is(tok::annot_template_id)) {
- // FIXME: retrieve TemplateKWLoc from template-id annotation and
- // store it in the pseudo-dtor node (to be used when instantiating it).
- FirstTypeName.setTemplateId(
- (TemplateIdAnnotation *)Tok.getAnnotationValue());
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ // FIXME: Carry on and build an AST representation for tooling.
+ if (TemplateId->isInvalid())
+ return ExprError();
+ FirstTypeName.setTemplateId(TemplateId);
ConsumeAnnotationToken();
assert(Tok.is(tok::coloncolon) &&"ParseOptionalCXXScopeSpecifier fail");
CCLoc = ConsumeToken();
} else {
+ assert(SS.isEmpty() && "missing last component of nested name specifier");
FirstTypeName.setIdentifier(nullptr, SourceLocation());
}
@@ -1737,7 +1754,7 @@ Parser::ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
assert(Tok.is(tok::tilde) && "ParseOptionalCXXScopeSpecifier fail");
SourceLocation TildeLoc = ConsumeToken();
- if (Tok.is(tok::kw_decltype) && !FirstTypeName.isValid() && SS.isEmpty()) {
+ if (Tok.is(tok::kw_decltype) && !FirstTypeName.isValid()) {
DeclSpec DS(AttrFactory);
ParseDecltypeSpecifier(DS);
if (DS.getTypeSpecType() == TST_error)
@@ -1759,11 +1776,17 @@ Parser::ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
// If there is a '<', the second type name is a template-id. Parse
// it as such.
+ //
+ // FIXME: This is not a context in which a '<' is assumed to start a template
+ // argument list. This affects examples such as
+ // void f(auto *p) { p->~X<int>(); }
+ // ... but there's no ambiguity, and nowhere to write 'template' in such an
+ // example, so we accept it anyway.
if (Tok.is(tok::less) &&
- ParseUnqualifiedIdTemplateId(SS, SourceLocation(),
- Name, NameLoc,
- false, ObjectType, SecondTypeName,
- /*AssumeTemplateId=*/true))
+ ParseUnqualifiedIdTemplateId(
+ SS, ObjectType, Base && Base->containsErrors(), SourceLocation(),
+ Name, NameLoc, false, SecondTypeName,
+ /*AssumeTemplateId=*/true))
return ExprError();
return Actions.ActOnPseudoDestructorExpr(getCurScope(), Base, OpLoc, OpKind,
@@ -1857,6 +1880,7 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
&& "Expected '(' or '{'!");
if (Tok.is(tok::l_brace)) {
+ PreferredType.enterTypeCast(Tok.getLocation(), TypeRep.get());
ExprResult Init = ParseBraceInitializer();
if (Init.isInvalid())
return Init;
@@ -2126,12 +2150,8 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
// type-name
case tok::annot_typename: {
- if (getTypeAnnotation(Tok))
- DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec, DiagID,
- getTypeAnnotation(Tok), Policy);
- else
- DS.SetTypeSpecError();
-
+ DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec, DiagID,
+ getTypeAnnotation(Tok), Policy);
DS.SetRangeEnd(Tok.getAnnotationEndLoc());
ConsumeAnnotationToken();
@@ -2139,6 +2159,19 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
return;
}
+ case tok::kw__ExtInt: {
+ ExprResult ER = ParseExtIntegerArgument();
+ if (ER.isInvalid())
+ DS.SetTypeSpecError();
+ else
+ DS.SetExtIntType(Loc, ER.get(), PrevSpec, DiagID, Policy);
+
+ // Do this here because we have already consumed the close paren.
+ DS.SetRangeEnd(PrevTokLocation);
+ DS.Finish(Actions, Policy);
+ return;
+ }
+
// builtin types
case tok::kw_short:
DS.SetTypeSpecWidth(DeclSpec::TSW_short, Loc, PrevSpec, DiagID, Policy);
@@ -2167,6 +2200,9 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
case tok::kw___int128:
DS.SetTypeSpecType(DeclSpec::TST_int128, Loc, PrevSpec, DiagID, Policy);
break;
+ case tok::kw___bf16:
+ DS.SetTypeSpecType(DeclSpec::TST_BFloat16, Loc, PrevSpec, DiagID, Policy);
+ break;
case tok::kw_half:
DS.SetTypeSpecType(DeclSpec::TST_half, Loc, PrevSpec, DiagID, Policy);
break;
@@ -2249,6 +2285,12 @@ bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
/// \param SS the nested-name-specifier that precedes this template-id, if
/// we're actually parsing a qualified-id.
///
+/// \param ObjectType if this unqualified-id occurs within a member access
+/// expression, the type of the base object whose member is being accessed.
+///
+/// \param ObjectHadErrors this unqualified-id occurs within a member access
+/// expression, indicates whether the original subexpressions had any errors.
+///
/// \param Name for constructor and destructor names, this is the actual
/// identifier that may be a template-name.
///
@@ -2258,9 +2300,6 @@ bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
/// \param EnteringContext whether we're entering the scope of the
/// nested-name-specifier.
///
-/// \param ObjectType if this unqualified-id occurs within a member access
-/// expression, the type of the base object whose member is being accessed.
-///
/// \param Id as input, describes the template-name or operator-function-id
/// that precedes the '<'. If template arguments were parsed successfully,
/// will be updated with the template-id.
@@ -2269,14 +2308,10 @@ bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
/// refers to a template without performing name lookup to verify.
///
/// \returns true if a parse error occurred, false otherwise.
-bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
- SourceLocation TemplateKWLoc,
- IdentifierInfo *Name,
- SourceLocation NameLoc,
- bool EnteringContext,
- ParsedType ObjectType,
- UnqualifiedId &Id,
- bool AssumeTemplateId) {
+bool Parser::ParseUnqualifiedIdTemplateId(
+ CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors,
+ SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc,
+ bool EnteringContext, UnqualifiedId &Id, bool AssumeTemplateId) {
assert(Tok.is(tok::less) && "Expected '<' to finish parsing a template-id");
TemplateTy Template;
@@ -2288,11 +2323,9 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
if (AssumeTemplateId) {
// We defer the injected-class-name checks until we've found whether
// this template-id is used to form a nested-name-specifier or not.
- TNK = Actions.ActOnDependentTemplateName(
- getCurScope(), SS, TemplateKWLoc, Id, ObjectType, EnteringContext,
- Template, /*AllowInjectedClassName*/ true);
- if (TNK == TNK_Non_template)
- return true;
+ TNK = Actions.ActOnTemplateName(getCurScope(), SS, TemplateKWLoc, Id,
+ ObjectType, EnteringContext, Template,
+ /*AllowInjectedClassName*/ true);
} else {
bool MemberOfUnknownSpecialization;
TNK = Actions.isTemplateName(getCurScope(), SS,
@@ -2308,28 +2341,32 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
if (TNK == TNK_Non_template && MemberOfUnknownSpecialization &&
ObjectType && isTemplateArgumentList(0) == TPResult::True) {
- // We have something like t->getAs<T>(), where getAs is a
- // member of an unknown specialization. However, this will only
- // parse correctly as a template, so suggest the keyword 'template'
- // before 'getAs' and treat this as a dependent template name.
- std::string Name;
- if (Id.getKind() == UnqualifiedIdKind::IK_Identifier)
- Name = Id.Identifier->getName();
- else {
- Name = "operator ";
- if (Id.getKind() == UnqualifiedIdKind::IK_OperatorFunctionId)
- Name += getOperatorSpelling(Id.OperatorFunctionId.Operator);
- else
- Name += Id.Identifier->getName();
+ // If we had errors before, ObjectType can be dependent even without any
+ // templates, do not report missing template keyword in that case.
+ if (!ObjectHadErrors) {
+ // We have something like t->getAs<T>(), where getAs is a
+ // member of an unknown specialization. However, this will only
+ // parse correctly as a template, so suggest the keyword 'template'
+ // before 'getAs' and treat this as a dependent template name.
+ std::string Name;
+ if (Id.getKind() == UnqualifiedIdKind::IK_Identifier)
+ Name = std::string(Id.Identifier->getName());
+ else {
+ Name = "operator ";
+ if (Id.getKind() == UnqualifiedIdKind::IK_OperatorFunctionId)
+ Name += getOperatorSpelling(Id.OperatorFunctionId.Operator);
+ else
+ Name += Id.Identifier->getName();
+ }
+ Diag(Id.StartLocation, diag::err_missing_dependent_template_keyword)
+ << Name
+ << FixItHint::CreateInsertion(Id.StartLocation, "template ");
}
- Diag(Id.StartLocation, diag::err_missing_dependent_template_keyword)
- << Name
- << FixItHint::CreateInsertion(Id.StartLocation, "template ");
- TNK = Actions.ActOnDependentTemplateName(
+ TNK = Actions.ActOnTemplateName(
getCurScope(), SS, TemplateKWLoc, Id, ObjectType, EnteringContext,
Template, /*AllowInjectedClassName*/ true);
- if (TNK == TNK_Non_template)
- return true;
+ } else if (TNK == TNK_Non_template) {
+ return false;
}
}
break;
@@ -2342,6 +2379,8 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
TemplateName, ObjectType,
EnteringContext, Template,
MemberOfUnknownSpecialization);
+ if (TNK == TNK_Non_template)
+ return false;
break;
}
@@ -2350,11 +2389,9 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
bool MemberOfUnknownSpecialization;
TemplateName.setIdentifier(Name, NameLoc);
if (ObjectType) {
- TNK = Actions.ActOnDependentTemplateName(
+ TNK = Actions.ActOnTemplateName(
getCurScope(), SS, TemplateKWLoc, TemplateName, ObjectType,
EnteringContext, Template, /*AllowInjectedClassName*/ true);
- if (TNK == TNK_Non_template)
- return true;
} else {
TNK = Actions.isTemplateName(getCurScope(), SS, TemplateKWLoc.isValid(),
TemplateName, ObjectType,
@@ -2364,7 +2401,7 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
if (TNK == TNK_Non_template && !Id.DestructorName.get()) {
Diag(NameLoc, diag::err_destructor_template_id)
<< Name << SS.getRange();
- return true;
+ // Carry on to parse the template arguments before bailing out.
}
}
break;
@@ -2374,9 +2411,6 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
return false;
}
- if (TNK == TNK_Non_template)
- return false;
-
// Parse the enclosed template argument list.
SourceLocation LAngleLoc, RAngleLoc;
TemplateArgList TemplateArgs;
@@ -2384,6 +2418,10 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
RAngleLoc))
return true;
+ // If this is a non-template, we already issued a diagnostic.
+ if (TNK == TNK_Non_template)
+ return true;
+
if (Id.getKind() == UnqualifiedIdKind::IK_Identifier ||
Id.getKind() == UnqualifiedIdKind::IK_OperatorFunctionId ||
Id.getKind() == UnqualifiedIdKind::IK_LiteralOperatorId) {
@@ -2401,7 +2439,7 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
TemplateIdAnnotation *TemplateId = TemplateIdAnnotation::Create(
TemplateKWLoc, Id.StartLocation, TemplateII, OpKind, Template, TNK,
- LAngleLoc, RAngleLoc, TemplateArgs, TemplateIds);
+ LAngleLoc, RAngleLoc, TemplateArgs, /*ArgsInvalid*/false, TemplateIds);
Id.setTemplateId(TemplateId);
return false;
@@ -2681,6 +2719,13 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
/// \param SS The nested-name-specifier that preceded this unqualified-id. If
/// non-empty, then we are parsing the unqualified-id of a qualified-id.
///
+/// \param ObjectType if this unqualified-id occurs within a member access
+/// expression, the type of the base object whose member is being accessed.
+///
+/// \param ObjectHadErrors if this unqualified-id occurs within a member access
+/// expression, indicates whether the original subexpressions had any errors.
+/// When true, diagnostics for missing 'template' keyword will be supressed.
+///
/// \param EnteringContext whether we are entering the scope of the
/// nested-name-specifier.
///
@@ -2690,17 +2735,14 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
///
/// \param AllowDeductionGuide whether we allow parsing a deduction guide name.
///
-/// \param ObjectType if this unqualified-id occurs within a member access
-/// expression, the type of the base object whose member is being accessed.
-///
/// \param Result on a successful parse, contains the parsed unqualified-id.
///
/// \returns true if parsing fails, false otherwise.
-bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
+bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
+ bool ObjectHadErrors, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
- ParsedType ObjectType,
SourceLocation *TemplateKWLoc,
UnqualifiedId &Result) {
if (TemplateKWLoc)
@@ -2759,10 +2801,11 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
TemplateTy Template;
if (Tok.is(tok::less))
return ParseUnqualifiedIdTemplateId(
- SS, TemplateKWLoc ? *TemplateKWLoc : SourceLocation(), Id, IdLoc,
- EnteringContext, ObjectType, Result, TemplateSpecified);
+ SS, ObjectType, ObjectHadErrors,
+ TemplateKWLoc ? *TemplateKWLoc : SourceLocation(), Id, IdLoc,
+ EnteringContext, Result, TemplateSpecified);
else if (TemplateSpecified &&
- Actions.ActOnDependentTemplateName(
+ Actions.ActOnTemplateName(
getCurScope(), SS, *TemplateKWLoc, Result, ObjectType,
EnteringContext, Template,
/*AllowInjectedClassName*/ true) == TNK_Non_template)
@@ -2776,6 +2819,13 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
if (Tok.is(tok::annot_template_id)) {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ // FIXME: Consider passing invalid template-ids on to callers; they may
+ // be able to recover better than we can.
+ if (TemplateId->isInvalid()) {
+ ConsumeAnnotationToken();
+ return true;
+ }
+
// If the template-name names the current class, then this is a constructor
if (AllowConstructorName && TemplateId->Name &&
Actions.isCurrentClassName(*TemplateId->Name, getCurScope(), &SS)) {
@@ -2837,11 +2887,11 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
Result.getKind() == UnqualifiedIdKind::IK_LiteralOperatorId) &&
Tok.is(tok::less))
return ParseUnqualifiedIdTemplateId(
- SS, TemplateKWLoc ? *TemplateKWLoc : SourceLocation(), nullptr,
- SourceLocation(), EnteringContext, ObjectType, Result,
- TemplateSpecified);
+ SS, ObjectType, ObjectHadErrors,
+ TemplateKWLoc ? *TemplateKWLoc : SourceLocation(), nullptr,
+ SourceLocation(), EnteringContext, Result, TemplateSpecified);
else if (TemplateSpecified &&
- Actions.ActOnDependentTemplateName(
+ Actions.ActOnTemplateName(
getCurScope(), SS, *TemplateKWLoc, Result, ObjectType,
EnteringContext, Template,
/*AllowInjectedClassName*/ true) == TNK_Non_template)
@@ -2860,6 +2910,22 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
// Parse the '~'.
SourceLocation TildeLoc = ConsumeToken();
+ if (TemplateSpecified) {
+ // C++ [temp.names]p3:
+ // A name prefixed by the keyword template shall be a template-id [...]
+ //
+ // A template-id cannot begin with a '~' token. This would never work
+ // anyway: x.~A<int>() would specify that the destructor is a template,
+ // not that 'A' is a template.
+ //
+ // FIXME: Suggest replacing the attempted destructor name with a correct
+ // destructor name and recover. (This is not trivial if this would become
+ // a pseudo-destructor name).
+ Diag(*TemplateKWLoc, diag::err_unexpected_template_in_destructor_name)
+ << Tok.getLocation();
+ return true;
+ }
+
if (SS.isEmpty() && Tok.is(tok::kw_decltype)) {
DeclSpec DS(AttrFactory);
SourceLocation EndLoc = ParseDecltypeSpecifier(DS);
@@ -2879,7 +2945,7 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
// If the user wrote ~T::T, correct it to T::~T.
DeclaratorScopeObj DeclScopeObj(*this, SS);
- if (!TemplateSpecified && NextToken().is(tok::coloncolon)) {
+ if (NextToken().is(tok::coloncolon)) {
// Don't let ParseOptionalCXXScopeSpecifier() "correct"
// `int A; struct { ~A::A(); };` to `int A; struct { ~A:A(); };`,
// it will confuse this recovery logic.
@@ -2889,7 +2955,8 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
AnnotateScopeToken(SS, /*NewAnnotation*/true);
SS.clear();
}
- if (ParseOptionalCXXScopeSpecifier(SS, ObjectType, EnteringContext))
+ if (ParseOptionalCXXScopeSpecifier(SS, ObjectType, ObjectHadErrors,
+ EnteringContext))
return true;
if (SS.isNotEmpty())
ObjectType = nullptr;
@@ -2916,8 +2983,9 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
if (Tok.is(tok::less)) {
Result.setDestructorName(TildeLoc, nullptr, ClassNameLoc);
return ParseUnqualifiedIdTemplateId(
- SS, TemplateKWLoc ? *TemplateKWLoc : SourceLocation(), ClassName,
- ClassNameLoc, EnteringContext, ObjectType, Result, TemplateSpecified);
+ SS, ObjectType, ObjectHadErrors,
+ TemplateKWLoc ? *TemplateKWLoc : SourceLocation(), ClassName,
+ ClassNameLoc, EnteringContext, Result, TemplateSpecified);
}
// Note that this is a destructor name.
@@ -3052,10 +3120,14 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
auto RunSignatureHelp = [&]() {
ParsedType TypeRep =
Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get();
- assert(TypeRep && "invalid types should be handled before");
- QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
- getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
- DeclaratorInfo.getEndLoc(), ConstructorArgs, ConstructorLParen);
+ QualType PreferredType;
+ // ActOnTypeName might adjust DeclaratorInfo and return a null type even
+ // the passing DeclaratorInfo is valid, e.g. running SignatureHelp on
+ // `new decltype(invalid) (^)`.
+ if (TypeRep)
+ PreferredType = Actions.ProduceConstructorSignatureHelp(
+ getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
+ DeclaratorInfo.getEndLoc(), ConstructorArgs, ConstructorLParen);
CalledSignatureHelp = true;
return PreferredType;
};
@@ -3296,7 +3368,6 @@ ExprResult Parser::ParseRequiresExpression() {
ParsedAttributes FirstArgAttrs(getAttrFactory());
SourceLocation EllipsisLoc;
llvm::SmallVector<DeclaratorChunk::ParamInfo, 2> LocalParameters;
- DiagnosticErrorTrap Trap(Diags);
ParseParameterDeclarationClause(DeclaratorContext::RequiresExprContext,
FirstArgAttrs, LocalParameters,
EllipsisLoc);
@@ -3304,8 +3375,6 @@ ExprResult Parser::ParseRequiresExpression() {
Diag(EllipsisLoc, diag::err_requires_expr_parameter_list_ellipsis);
for (auto &ParamInfo : LocalParameters)
LocalParameterDecls.push_back(cast<ParmVarDecl>(ParamInfo.Param));
- if (Trap.hasErrorOccurred())
- SkipUntil(tok::r_paren, StopBeforeMatch);
}
Parens.consumeClose();
}
@@ -3499,6 +3568,8 @@ ExprResult Parser::ParseRequiresExpression() {
} else {
TemplateId = takeTemplateIdAnnotation(Tok);
ConsumeAnnotationToken();
+ if (TemplateId->isInvalid())
+ break;
}
if (auto *Req = Actions.ActOnTypeRequirement(TypenameKWLoc, SS,
@@ -3577,18 +3648,24 @@ case tok::kw_ ## Spelling: return BTT_ ## Name;
}
static ArrayTypeTrait ArrayTypeTraitFromTokKind(tok::TokenKind kind) {
- switch(kind) {
- default: llvm_unreachable("Not a known binary type trait");
- case tok::kw___array_rank: return ATT_ArrayRank;
- case tok::kw___array_extent: return ATT_ArrayExtent;
+ switch (kind) {
+ default:
+ llvm_unreachable("Not a known array type trait");
+#define ARRAY_TYPE_TRAIT(Spelling, Name, Key) \
+ case tok::kw_##Spelling: \
+ return ATT_##Name;
+#include "clang/Basic/TokenKinds.def"
}
}
static ExpressionTrait ExpressionTraitFromTokKind(tok::TokenKind kind) {
- switch(kind) {
- default: llvm_unreachable("Not a known unary expression trait.");
- case tok::kw___is_lvalue_expr: return ET_IsLValueExpr;
- case tok::kw___is_rvalue_expr: return ET_IsRValueExpr;
+ switch (kind) {
+ default:
+ llvm_unreachable("Not a known unary expression trait.");
+#define EXPRESSION_TRAIT(Spelling, Name, Key) \
+ case tok::kw_##Spelling: \
+ return ET_##Name;
+#include "clang/Basic/TokenKinds.def"
}
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp b/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp
index 5ab055130dc2..9ac2b2e6f79b 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp
@@ -10,11 +10,14 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Basic/TokenKinds.h"
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/Designator.h"
+#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
using namespace clang;
@@ -154,7 +157,9 @@ static void CheckArrayDesignatorSyntax(Parser &P, SourceLocation Loc,
/// initializer (because it is an expression). We need to consider this case
/// when parsing array designators.
///
-ExprResult Parser::ParseInitializerWithPotentialDesignator() {
+/// \p CodeCompleteCB is called with Designation parsed so far.
+ExprResult Parser::ParseInitializerWithPotentialDesignator(
+ llvm::function_ref<void(const Designation &)> CodeCompleteCB) {
// If this is the old-style GNU extension:
// designation ::= identifier ':'
@@ -193,6 +198,11 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
// designator: '.' identifier
SourceLocation DotLoc = ConsumeToken();
+ if (Tok.is(tok::code_completion)) {
+ CodeCompleteCB(Desig);
+ cutOffParsing();
+ return ExprError();
+ }
if (Tok.isNot(tok::identifier)) {
Diag(Tok.getLocation(), diag::err_expected_field_designator);
return ExprError();
@@ -407,7 +417,6 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
return ExprError();
}
-
/// ParseBraceInitializer - Called when parsing an initializer that has a
/// leading open brace.
///
@@ -444,6 +453,10 @@ ExprResult Parser::ParseBraceInitializer() {
Actions, EnterExpressionEvaluationContext::InitList);
bool InitExprsOk = true;
+ auto CodeCompleteDesignation = [&](const Designation &D) {
+ Actions.CodeCompleteDesignator(PreferredType.get(T.getOpenLocation()),
+ InitExprs, D);
+ };
while (1) {
// Handle Microsoft __if_exists/if_not_exists if necessary.
@@ -463,7 +476,7 @@ ExprResult Parser::ParseBraceInitializer() {
// initializer directly.
ExprResult SubElt;
if (MayBeDesignationStart())
- SubElt = ParseInitializerWithPotentialDesignator();
+ SubElt = ParseInitializerWithPotentialDesignator(CodeCompleteDesignation);
else
SubElt = ParseInitializer();
@@ -543,13 +556,17 @@ bool Parser::ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
return false;
}
+ auto CodeCompleteDesignation = [&](const Designation &D) {
+ Actions.CodeCompleteDesignator(PreferredType.get(Braces.getOpenLocation()),
+ InitExprs, D);
+ };
while (!isEofOrEom()) {
trailingComma = false;
// If we know that this cannot be a designation, just parse the nested
// initializer directly.
ExprResult SubElt;
if (MayBeDesignationStart())
- SubElt = ParseInitializerWithPotentialDesignator();
+ SubElt = ParseInitializerWithPotentialDesignator(CodeCompleteDesignation);
else
SubElt = ParseInitializer();
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp b/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
index efcef6d3b123..eaea8666bc10 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
@@ -10,11 +10,12 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Parse/Parser.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Scope.h"
@@ -522,10 +523,9 @@ ObjCTypeParamList *Parser::parseObjCTypeParamListOrProtocolRefs(
SkipUntil(tok::greater, tok::at, StopBeforeMatch);
if (Tok.is(tok::greater))
ConsumeToken();
- } else if (ParseGreaterThanInTemplateList(rAngleLoc,
+ } else if (ParseGreaterThanInTemplateList(lAngleLoc, rAngleLoc,
/*ConsumeLastToken=*/true,
/*ObjCGenericList=*/true)) {
- Diag(lAngleLoc, diag::note_matching) << "'<'";
SkipUntil({tok::greater, tok::greaterequal, tok::at, tok::minus,
tok::minus, tok::plus, tok::colon, tok::l_paren, tok::l_brace,
tok::comma, tok::semi },
@@ -740,7 +740,8 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
// Map a nullability property attribute to a context-sensitive keyword
// attribute.
- if (OCDS.getPropertyAttributes() & ObjCDeclSpec::DQ_PR_nullability)
+ if (OCDS.getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_nullability)
addContextSensitiveTypeNullability(*this, FD.D, OCDS.getNullability(),
OCDS.getNullabilityLoc(),
addedToDeclSpec);
@@ -860,25 +861,25 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS) {
SourceLocation AttrName = ConsumeToken(); // consume last attribute name
if (II->isStr("readonly"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_readonly);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_readonly);
else if (II->isStr("assign"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_assign);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_assign);
else if (II->isStr("unsafe_unretained"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_unsafe_unretained);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_unsafe_unretained);
else if (II->isStr("readwrite"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_readwrite);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_readwrite);
else if (II->isStr("retain"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_retain);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_retain);
else if (II->isStr("strong"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_strong);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_strong);
else if (II->isStr("copy"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_copy);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_copy);
else if (II->isStr("nonatomic"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nonatomic);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_nonatomic);
else if (II->isStr("atomic"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_atomic);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_atomic);
else if (II->isStr("weak"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_weak);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_weak);
else if (II->isStr("getter") || II->isStr("setter")) {
bool IsSetter = II->getNameStart()[0] == 's';
@@ -910,7 +911,7 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS) {
}
if (IsSetter) {
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_setter);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_setter);
DS.setSetterName(SelIdent, SelLoc);
if (ExpectAndConsume(tok::colon,
@@ -919,44 +920,44 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS) {
return;
}
} else {
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_getter);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_getter);
DS.setGetterName(SelIdent, SelLoc);
}
} else if (II->isStr("nonnull")) {
- if (DS.getPropertyAttributes() & ObjCDeclSpec::DQ_PR_nullability)
+ if (DS.getPropertyAttributes() & ObjCPropertyAttribute::kind_nullability)
diagnoseRedundantPropertyNullability(*this, DS,
NullabilityKind::NonNull,
Tok.getLocation());
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nullability);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_nullability);
DS.setNullability(Tok.getLocation(), NullabilityKind::NonNull);
} else if (II->isStr("nullable")) {
- if (DS.getPropertyAttributes() & ObjCDeclSpec::DQ_PR_nullability)
+ if (DS.getPropertyAttributes() & ObjCPropertyAttribute::kind_nullability)
diagnoseRedundantPropertyNullability(*this, DS,
NullabilityKind::Nullable,
Tok.getLocation());
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nullability);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_nullability);
DS.setNullability(Tok.getLocation(), NullabilityKind::Nullable);
} else if (II->isStr("null_unspecified")) {
- if (DS.getPropertyAttributes() & ObjCDeclSpec::DQ_PR_nullability)
+ if (DS.getPropertyAttributes() & ObjCPropertyAttribute::kind_nullability)
diagnoseRedundantPropertyNullability(*this, DS,
NullabilityKind::Unspecified,
Tok.getLocation());
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nullability);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_nullability);
DS.setNullability(Tok.getLocation(), NullabilityKind::Unspecified);
} else if (II->isStr("null_resettable")) {
- if (DS.getPropertyAttributes() & ObjCDeclSpec::DQ_PR_nullability)
+ if (DS.getPropertyAttributes() & ObjCPropertyAttribute::kind_nullability)
diagnoseRedundantPropertyNullability(*this, DS,
NullabilityKind::Unspecified,
Tok.getLocation());
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nullability);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_nullability);
DS.setNullability(Tok.getLocation(), NullabilityKind::Unspecified);
// Also set the null_resettable bit.
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_null_resettable);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_null_resettable);
} else if (II->isStr("class")) {
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_class);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_class);
} else if (II->isStr("direct")) {
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_direct);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_direct);
} else {
Diag(AttrName, diag::err_objc_expected_property_attr) << II;
SkipUntil(tok::r_paren, StopAtSemi);
@@ -1550,7 +1551,7 @@ ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &Protocols,
}
// Consume the '>'.
- if (ParseGreaterThanInTemplateList(EndLoc, consumeLastToken,
+ if (ParseGreaterThanInTemplateList(LAngleLoc, EndLoc, consumeLastToken,
/*ObjCGenericList=*/false))
return true;
@@ -1648,7 +1649,7 @@ void Parser::parseObjCTypeArgsOrProtocolQualifiers(
if (allSingleIdentifiers) {
// Parse the closing '>'.
SourceLocation rAngleLoc;
- (void)ParseGreaterThanInTemplateList(rAngleLoc, consumeLastToken,
+ (void)ParseGreaterThanInTemplateList(lAngleLoc, rAngleLoc, consumeLastToken,
/*ObjCGenericList=*/true);
// Let Sema figure out what we parsed.
@@ -1754,7 +1755,7 @@ void Parser::parseObjCTypeArgsOrProtocolQualifiers(
// Parse the closing '>'.
SourceLocation rAngleLoc;
- (void)ParseGreaterThanInTemplateList(rAngleLoc, consumeLastToken,
+ (void)ParseGreaterThanInTemplateList(lAngleLoc, rAngleLoc, consumeLastToken,
/*ObjCGenericList=*/true);
if (invalid) {
@@ -2978,7 +2979,7 @@ bool Parser::isStartOfObjCClassMessageMissingOpenBracket() {
InMessageExpression)
return false;
- ParsedType Type;
+ TypeResult Type;
if (Tok.is(tok::annot_typename))
Type = getTypeAnnotation(Tok);
@@ -2988,7 +2989,8 @@ bool Parser::isStartOfObjCClassMessageMissingOpenBracket() {
else
return false;
- if (!Type.get().isNull() && Type.get()->isObjCObjectOrInterfaceType()) {
+ // FIXME: Should not be querying properties of types from the parser.
+ if (Type.isUsable() && Type.get().get()->isObjCObjectOrInterfaceType()) {
const Token &AfterNext = GetLookAheadToken(2);
if (AfterNext.isOneOf(tok::colon, tok::r_square)) {
if (Tok.is(tok::identifier))
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp b/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
index 1095919baa7d..5223755c8fdf 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
@@ -11,14 +11,18 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
+#include "clang/AST/OpenMPClause.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/Basic/OpenMPKinds.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TokenKinds.h"
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/UniqueVector.h"
+#include "llvm/Frontend/OpenMP/OMPContext.h"
using namespace clang;
using namespace llvm::omp;
@@ -29,7 +33,7 @@ using namespace llvm::omp;
namespace {
enum OpenMPDirectiveKindEx {
- OMPD_cancellation = unsigned(OMPD_unknown) + 1,
+ OMPD_cancellation = llvm::omp::Directive_enumSize + 1,
OMPD_data,
OMPD_declare,
OMPD_end,
@@ -46,6 +50,8 @@ enum OpenMPDirectiveKindEx {
OMPD_target_teams_distribute_parallel,
OMPD_mapper,
OMPD_variant,
+ OMPD_begin,
+ OMPD_begin_declare,
};
// Helper to unify the enum class OpenMPDirectiveKind with its extension
@@ -99,6 +105,7 @@ static unsigned getOpenMPDirectiveKindEx(StringRef S) {
.Case("update", OMPD_update)
.Case("mapper", OMPD_mapper)
.Case("variant", OMPD_variant)
+ .Case("begin", OMPD_begin)
.Default(OMPD_unknown);
}
@@ -107,18 +114,21 @@ static OpenMPDirectiveKindExWrapper parseOpenMPDirectiveKind(Parser &P) {
// E.g.: OMPD_for OMPD_simd ===> OMPD_for_simd
// TODO: add other combined directives in topological order.
static const OpenMPDirectiveKindExWrapper F[][3] = {
+ {OMPD_begin, OMPD_declare, OMPD_begin_declare},
+ {OMPD_end, OMPD_declare, OMPD_end_declare},
{OMPD_cancellation, OMPD_point, OMPD_cancellation_point},
{OMPD_declare, OMPD_reduction, OMPD_declare_reduction},
{OMPD_declare, OMPD_mapper, OMPD_declare_mapper},
{OMPD_declare, OMPD_simd, OMPD_declare_simd},
{OMPD_declare, OMPD_target, OMPD_declare_target},
{OMPD_declare, OMPD_variant, OMPD_declare_variant},
+ {OMPD_begin_declare, OMPD_variant, OMPD_begin_declare_variant},
+ {OMPD_end_declare, OMPD_variant, OMPD_end_declare_variant},
{OMPD_distribute, OMPD_parallel, OMPD_distribute_parallel},
{OMPD_distribute_parallel, OMPD_for, OMPD_distribute_parallel_for},
{OMPD_distribute_parallel_for, OMPD_simd,
OMPD_distribute_parallel_for_simd},
{OMPD_distribute, OMPD_simd, OMPD_distribute_simd},
- {OMPD_end, OMPD_declare, OMPD_end_declare},
{OMPD_end_declare, OMPD_target, OMPD_end_declare_target},
{OMPD_target, OMPD_data, OMPD_target_data},
{OMPD_target, OMPD_enter, OMPD_target_enter},
@@ -184,8 +194,9 @@ static OpenMPDirectiveKindExWrapper parseOpenMPDirectiveKind(Parser &P) {
DKind = F[I][2];
}
}
- return DKind < OMPD_unknown ? static_cast<OpenMPDirectiveKind>(DKind)
- : OMPD_unknown;
+ return unsigned(DKind) < llvm::omp::Directive_enumSize
+ ? static_cast<OpenMPDirectiveKind>(DKind)
+ : OMPD_unknown;
}
static DeclarationName parseOpenMPReductionId(Parser &P) {
@@ -637,16 +648,14 @@ namespace {
class FNContextRAII final {
Parser &P;
Sema::CXXThisScopeRAII *ThisScope;
- Parser::ParseScope *TempScope;
- Parser::ParseScope *FnScope;
- bool HasTemplateScope = false;
+ Parser::MultiParseScope Scopes;
bool HasFunScope = false;
FNContextRAII() = delete;
FNContextRAII(const FNContextRAII &) = delete;
FNContextRAII &operator=(const FNContextRAII &) = delete;
public:
- FNContextRAII(Parser &P, Parser::DeclGroupPtrTy Ptr) : P(P) {
+ FNContextRAII(Parser &P, Parser::DeclGroupPtrTy Ptr) : P(P), Scopes(P) {
Decl *D = *Ptr.get().begin();
NamedDecl *ND = dyn_cast<NamedDecl>(D);
RecordDecl *RD = dyn_cast_or_null<RecordDecl>(D->getDeclContext());
@@ -657,29 +666,20 @@ public:
ND && ND->isCXXInstanceMember());
// If the Decl is templatized, add template parameters to scope.
- HasTemplateScope = D->isTemplateDecl();
- TempScope =
- new Parser::ParseScope(&P, Scope::TemplateParamScope, HasTemplateScope);
- if (HasTemplateScope)
- Actions.ActOnReenterTemplateScope(Actions.getCurScope(), D);
+ // FIXME: Track CurTemplateDepth?
+ P.ReenterTemplateScopes(Scopes, D);
// If the Decl is on a function, add function parameters to the scope.
- HasFunScope = D->isFunctionOrFunctionTemplate();
- FnScope = new Parser::ParseScope(
- &P, Scope::FnScope | Scope::DeclScope | Scope::CompoundStmtScope,
- HasFunScope);
- if (HasFunScope)
+ if (D->isFunctionOrFunctionTemplate()) {
+ HasFunScope = true;
+ Scopes.Enter(Scope::FnScope | Scope::DeclScope |
+ Scope::CompoundStmtScope);
Actions.ActOnReenterFunctionContext(Actions.getCurScope(), D);
+ }
}
~FNContextRAII() {
- if (HasFunScope) {
+ if (HasFunScope)
P.getActions().ActOnExitFunctionContext();
- FnScope->Exit(); // Pop scope, and remove Decls from IdResolver
- }
- if (HasTemplateScope)
- TempScope->Exit();
- delete FnScope;
- delete TempScope;
delete ThisScope;
}
};
@@ -746,18 +746,19 @@ static bool parseDeclareSimdClauses(
getOpenMPClauseKind(ClauseName), *Vars, Data))
IsError = true;
if (CKind == OMPC_aligned) {
- Alignments.append(Aligneds.size() - Alignments.size(), Data.TailExpr);
+ Alignments.append(Aligneds.size() - Alignments.size(),
+ Data.DepModOrTailExpr);
} else if (CKind == OMPC_linear) {
assert(0 <= Data.ExtraModifier &&
Data.ExtraModifier <= OMPC_LINEAR_unknown &&
"Unexpected linear modifier.");
if (P.getActions().CheckOpenMPLinearModifier(
static_cast<OpenMPLinearClauseKind>(Data.ExtraModifier),
- Data.DepLinMapLastLoc))
+ Data.ExtraModifierLoc))
Data.ExtraModifier = OMPC_LINEAR_val;
LinModifiers.append(Linears.size() - LinModifiers.size(),
Data.ExtraModifier);
- Steps.append(Linears.size() - Steps.size(), Data.TailExpr);
+ Steps.append(Linears.size() - Steps.size(), Data.DepModOrTailExpr);
}
} else
// TODO: add parsing of other clauses.
@@ -794,13 +795,7 @@ Parser::ParseOMPDeclareSimdClauses(Parser::DeclGroupPtrTy Ptr,
bool IsError =
parseDeclareSimdClauses(*this, BS, Simdlen, Uniforms, Aligneds,
Alignments, Linears, LinModifiers, Steps);
- // Need to check for extra tokens.
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(OMPD_declare_simd);
- while (Tok.isNot(tok::annot_pragma_openmp_end))
- ConsumeAnyToken();
- }
+ skipUntilPragmaOpenMPEnd(OMPD_declare_simd);
// Skip the last annot_pragma_openmp_end.
SourceLocation EndLoc = ConsumeAnnotationToken();
if (IsError)
@@ -810,10 +805,268 @@ Parser::ParseOMPDeclareSimdClauses(Parser::DeclGroupPtrTy Ptr,
LinModifiers, Steps, SourceRange(Loc, EndLoc));
}
+namespace {
+/// Constant used in the diagnostics to distinguish the levels in an OpenMP
+/// contexts: selector-set={selector(trait, ...), ...}, ....
+enum OMPContextLvl {
+ CONTEXT_SELECTOR_SET_LVL = 0,
+ CONTEXT_SELECTOR_LVL = 1,
+ CONTEXT_TRAIT_LVL = 2,
+};
+
+static StringRef stringLiteralParser(Parser &P) {
+ ExprResult Res = P.ParseStringLiteralExpression(true);
+ return Res.isUsable() ? Res.getAs<StringLiteral>()->getString() : "";
+}
+
+static StringRef getNameFromIdOrString(Parser &P, Token &Tok,
+ OMPContextLvl Lvl) {
+ if (Tok.is(tok::identifier)) {
+ llvm::SmallString<16> Buffer;
+ StringRef Name = P.getPreprocessor().getSpelling(Tok, Buffer);
+ (void)P.ConsumeToken();
+ return Name;
+ }
+
+ if (tok::isStringLiteral(Tok.getKind()))
+ return stringLiteralParser(P);
+
+ P.Diag(Tok.getLocation(),
+ diag::warn_omp_declare_variant_string_literal_or_identifier)
+ << Lvl;
+ return "";
+}
+
+static bool checkForDuplicates(Parser &P, StringRef Name,
+ SourceLocation NameLoc,
+ llvm::StringMap<SourceLocation> &Seen,
+ OMPContextLvl Lvl) {
+ auto Res = Seen.try_emplace(Name, NameLoc);
+ if (Res.second)
+ return false;
+
+ // Each trait-set-selector-name, trait-selector-name and trait-name can
+ // only be specified once.
+ P.Diag(NameLoc, diag::warn_omp_declare_variant_ctx_mutiple_use)
+ << Lvl << Name;
+ P.Diag(Res.first->getValue(), diag::note_omp_declare_variant_ctx_used_here)
+ << Lvl << Name;
+ return true;
+}
+} // namespace
+
+void Parser::parseOMPTraitPropertyKind(
+ OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set,
+ llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen) {
+ TIProperty.Kind = TraitProperty::invalid;
+
+ SourceLocation NameLoc = Tok.getLocation();
+ StringRef Name =
+ getNameFromIdOrString(*this, Tok, CONTEXT_TRAIT_LVL);
+ if (Name.empty()) {
+ Diag(Tok.getLocation(), diag::note_omp_declare_variant_ctx_options)
+ << CONTEXT_TRAIT_LVL << listOpenMPContextTraitProperties(Set, Selector);
+ return;
+ }
+
+ TIProperty.Kind = getOpenMPContextTraitPropertyKind(Set, Name);
+ if (TIProperty.Kind != TraitProperty::invalid) {
+ if (checkForDuplicates(*this, Name, NameLoc, Seen, CONTEXT_TRAIT_LVL))
+ TIProperty.Kind = TraitProperty::invalid;
+ return;
+ }
+
+ // It follows diagnosis and helping notes.
+ // FIXME: We should move the diagnosis string generation into libFrontend.
+ Diag(NameLoc, diag::warn_omp_declare_variant_ctx_not_a_property)
+ << Name << getOpenMPContextTraitSelectorName(Selector)
+ << getOpenMPContextTraitSetName(Set);
+
+ TraitSet SetForName = getOpenMPContextTraitSetKind(Name);
+ if (SetForName != TraitSet::invalid) {
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_is_a)
+ << Name << CONTEXT_SELECTOR_SET_LVL << CONTEXT_TRAIT_LVL;
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_try)
+ << Name << "<selector-name>"
+ << "(<property-name>)";
+ return;
+ }
+ TraitSelector SelectorForName = getOpenMPContextTraitSelectorKind(Name);
+ if (SelectorForName != TraitSelector::invalid) {
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_is_a)
+ << Name << CONTEXT_SELECTOR_LVL << CONTEXT_TRAIT_LVL;
+ bool AllowsTraitScore = false;
+ bool RequiresProperty = false;
+ isValidTraitSelectorForTraitSet(
+ SelectorForName, getOpenMPContextTraitSetForSelector(SelectorForName),
+ AllowsTraitScore, RequiresProperty);
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_try)
+ << getOpenMPContextTraitSetName(
+ getOpenMPContextTraitSetForSelector(SelectorForName))
+ << Name << (RequiresProperty ? "(<property-name>)" : "");
+ return;
+ }
+ for (const auto &PotentialSet :
+ {TraitSet::construct, TraitSet::user, TraitSet::implementation,
+ TraitSet::device}) {
+ TraitProperty PropertyForName =
+ getOpenMPContextTraitPropertyKind(PotentialSet, Name);
+ if (PropertyForName == TraitProperty::invalid)
+ continue;
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_try)
+ << getOpenMPContextTraitSetName(
+ getOpenMPContextTraitSetForProperty(PropertyForName))
+ << getOpenMPContextTraitSelectorName(
+ getOpenMPContextTraitSelectorForProperty(PropertyForName))
+ << ("(" + Name + ")").str();
+ return;
+ }
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_options)
+ << CONTEXT_TRAIT_LVL << listOpenMPContextTraitProperties(Set, Selector);
+}
+
+static bool checkExtensionProperty(Parser &P, SourceLocation Loc,
+ OMPTraitProperty &TIProperty,
+ OMPTraitSelector &TISelector,
+ llvm::StringMap<SourceLocation> &Seen) {
+ assert(TISelector.Kind ==
+ llvm::omp::TraitSelector::implementation_extension &&
+ "Only for extension properties, e.g., "
+ "`implementation={extension(PROPERTY)}`");
+ if (TIProperty.Kind == TraitProperty::invalid)
+ return false;
+
+ auto IsMatchExtension = [](OMPTraitProperty &TP) {
+ return (TP.Kind ==
+ llvm::omp::TraitProperty::implementation_extension_match_all ||
+ TP.Kind ==
+ llvm::omp::TraitProperty::implementation_extension_match_any ||
+ TP.Kind ==
+ llvm::omp::TraitProperty::implementation_extension_match_none);
+ };
+
+ if (IsMatchExtension(TIProperty)) {
+ for (OMPTraitProperty &SeenProp : TISelector.Properties)
+ if (IsMatchExtension(SeenProp)) {
+ P.Diag(Loc, diag::err_omp_variant_ctx_second_match_extension);
+ StringRef SeenName =
+ llvm::omp::getOpenMPContextTraitPropertyName(SeenProp.Kind);
+ SourceLocation SeenLoc = Seen[SeenName];
+ P.Diag(SeenLoc, diag::note_omp_declare_variant_ctx_used_here)
+ << CONTEXT_TRAIT_LVL << SeenName;
+ return false;
+ }
+ return true;
+ }
+
+ llvm_unreachable("Unknown extension property!");
+}
+
+void Parser::parseOMPContextProperty(OMPTraitSelector &TISelector,
+ llvm::omp::TraitSet Set,
+ llvm::StringMap<SourceLocation> &Seen) {
+ assert(TISelector.Kind != TraitSelector::user_condition &&
+ "User conditions are special properties not handled here!");
+
+ SourceLocation PropertyLoc = Tok.getLocation();
+ OMPTraitProperty TIProperty;
+ parseOMPTraitPropertyKind(TIProperty, Set, TISelector.Kind, Seen);
+
+ if (TISelector.Kind == llvm::omp::TraitSelector::implementation_extension)
+ if (!checkExtensionProperty(*this, Tok.getLocation(), TIProperty,
+ TISelector, Seen))
+ TIProperty.Kind = TraitProperty::invalid;
+
+ // If we have an invalid property here we already issued a warning.
+ if (TIProperty.Kind == TraitProperty::invalid) {
+ if (PropertyLoc != Tok.getLocation())
+ Diag(Tok.getLocation(), diag::note_omp_declare_variant_ctx_continue_here)
+ << CONTEXT_TRAIT_LVL;
+ return;
+ }
+
+ if (isValidTraitPropertyForTraitSetAndSelector(TIProperty.Kind,
+ TISelector.Kind, Set)) {
+
+ // If we make it here the property, selector, set, score, condition, ... are
+ // all valid (or have been corrected). Thus we can record the property.
+ TISelector.Properties.push_back(TIProperty);
+ return;
+ }
+
+ Diag(PropertyLoc, diag::warn_omp_ctx_incompatible_property_for_selector)
+ << getOpenMPContextTraitPropertyName(TIProperty.Kind)
+ << getOpenMPContextTraitSelectorName(TISelector.Kind)
+ << getOpenMPContextTraitSetName(Set);
+ Diag(PropertyLoc, diag::note_omp_ctx_compatible_set_and_selector_for_property)
+ << getOpenMPContextTraitPropertyName(TIProperty.Kind)
+ << getOpenMPContextTraitSelectorName(
+ getOpenMPContextTraitSelectorForProperty(TIProperty.Kind))
+ << getOpenMPContextTraitSetName(
+ getOpenMPContextTraitSetForProperty(TIProperty.Kind));
+ Diag(Tok.getLocation(), diag::note_omp_declare_variant_ctx_continue_here)
+ << CONTEXT_TRAIT_LVL;
+}
+
+void Parser::parseOMPTraitSelectorKind(
+ OMPTraitSelector &TISelector, llvm::omp::TraitSet Set,
+ llvm::StringMap<SourceLocation> &Seen) {
+ TISelector.Kind = TraitSelector::invalid;
+
+ SourceLocation NameLoc = Tok.getLocation();
+ StringRef Name = getNameFromIdOrString(*this, Tok, CONTEXT_SELECTOR_LVL
+ );
+ if (Name.empty()) {
+ Diag(Tok.getLocation(), diag::note_omp_declare_variant_ctx_options)
+ << CONTEXT_SELECTOR_LVL << listOpenMPContextTraitSelectors(Set);
+ return;
+ }
+
+ TISelector.Kind = getOpenMPContextTraitSelectorKind(Name);
+ if (TISelector.Kind != TraitSelector::invalid) {
+ if (checkForDuplicates(*this, Name, NameLoc, Seen, CONTEXT_SELECTOR_LVL))
+ TISelector.Kind = TraitSelector::invalid;
+ return;
+ }
+
+ // It follows diagnosis and helping notes.
+ Diag(NameLoc, diag::warn_omp_declare_variant_ctx_not_a_selector)
+ << Name << getOpenMPContextTraitSetName(Set);
+
+ TraitSet SetForName = getOpenMPContextTraitSetKind(Name);
+ if (SetForName != TraitSet::invalid) {
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_is_a)
+ << Name << CONTEXT_SELECTOR_SET_LVL << CONTEXT_SELECTOR_LVL;
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_try)
+ << Name << "<selector-name>"
+ << "<property-name>";
+ return;
+ }
+ for (const auto &PotentialSet :
+ {TraitSet::construct, TraitSet::user, TraitSet::implementation,
+ TraitSet::device}) {
+ TraitProperty PropertyForName =
+ getOpenMPContextTraitPropertyKind(PotentialSet, Name);
+ if (PropertyForName == TraitProperty::invalid)
+ continue;
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_is_a)
+ << Name << CONTEXT_TRAIT_LVL << CONTEXT_SELECTOR_LVL;
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_try)
+ << getOpenMPContextTraitSetName(
+ getOpenMPContextTraitSetForProperty(PropertyForName))
+ << getOpenMPContextTraitSelectorName(
+ getOpenMPContextTraitSelectorForProperty(PropertyForName))
+ << ("(" + Name + ")").str();
+ return;
+ }
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_options)
+ << CONTEXT_SELECTOR_LVL << listOpenMPContextTraitSelectors(Set);
+}
+
/// Parse optional 'score' '(' <expr> ')' ':'.
static ExprResult parseContextScore(Parser &P) {
ExprResult ScoreExpr;
- Sema::OMPCtxStringType Buffer;
+ llvm::SmallString<16> Buffer;
StringRef SelectorName =
P.getPreprocessor().getSpelling(P.getCurToken(), Buffer);
if (!SelectorName.equals("score"))
@@ -825,246 +1078,272 @@ static ExprResult parseContextScore(Parser &P) {
if (P.getCurToken().is(tok::colon))
(void)P.ConsumeAnyToken();
else
- P.Diag(P.getCurToken(), diag::warn_pragma_expected_colon)
- << "context selector score clause";
+ P.Diag(P.getCurToken(), diag::warn_omp_declare_variant_expected)
+ << "':'"
+ << "score expression";
return ScoreExpr;
}
-/// Parse context selector for 'implementation' selector set:
-/// 'vendor' '(' [ 'score' '(' <score _expr> ')' ':' ] <vendor> { ',' <vendor> }
-/// ')'
-static void
-parseImplementationSelector(Parser &P, SourceLocation Loc,
- llvm::StringMap<SourceLocation> &UsedCtx,
- SmallVectorImpl<Sema::OMPCtxSelectorData> &Data) {
- const Token &Tok = P.getCurToken();
- // Parse inner context selector set name, if any.
- if (!Tok.is(tok::identifier)) {
- P.Diag(Tok.getLocation(), diag::warn_omp_declare_variant_cs_name_expected)
- << "implementation";
- // Skip until either '}', ')', or end of directive.
- while (!P.SkipUntil(tok::r_brace, tok::r_paren,
- tok::annot_pragma_openmp_end, Parser::StopBeforeMatch))
- ;
- return;
- }
- Sema::OMPCtxStringType Buffer;
- StringRef CtxSelectorName = P.getPreprocessor().getSpelling(Tok, Buffer);
- auto Res = UsedCtx.try_emplace(CtxSelectorName, Tok.getLocation());
- if (!Res.second) {
- // OpenMP 5.0, 2.3.2 Context Selectors, Restrictions.
- // Each trait-selector-name can only be specified once.
- P.Diag(Tok.getLocation(), diag::err_omp_declare_variant_ctx_mutiple_use)
- << CtxSelectorName << "implementation";
- P.Diag(Res.first->getValue(), diag::note_omp_declare_variant_ctx_used_here)
- << CtxSelectorName;
- }
- OpenMPContextSelectorKind CSKind = getOpenMPContextSelector(CtxSelectorName);
- (void)P.ConsumeToken();
- switch (CSKind) {
- case OMP_CTX_vendor: {
- // Parse '('.
- BalancedDelimiterTracker T(P, tok::l_paren, tok::annot_pragma_openmp_end);
- (void)T.expectAndConsume(diag::err_expected_lparen_after,
- CtxSelectorName.data());
- ExprResult Score = parseContextScore(P);
- llvm::UniqueVector<Sema::OMPCtxStringType> Vendors;
- do {
- // Parse <vendor>.
- StringRef VendorName;
- if (Tok.is(tok::identifier)) {
- Buffer.clear();
- VendorName = P.getPreprocessor().getSpelling(P.getCurToken(), Buffer);
- (void)P.ConsumeToken();
- if (!VendorName.empty())
- Vendors.insert(VendorName);
- } else {
- P.Diag(Tok.getLocation(), diag::err_omp_declare_variant_item_expected)
- << "vendor identifier"
- << "vendor"
- << "implementation";
+/// Parses an OpenMP context selector.
+///
+/// <trait-selector-name> ['('[<trait-score>] <trait-property> [, <t-p>]* ')']
+void Parser::parseOMPContextSelector(
+ OMPTraitSelector &TISelector, llvm::omp::TraitSet Set,
+ llvm::StringMap<SourceLocation> &SeenSelectors) {
+ unsigned short OuterPC = ParenCount;
+
+ // If anything went wrong we issue an error or warning and then skip the rest
+ // of the selector. However, commas are ambiguous so we look for the nesting
+ // of parentheses here as well.
+ auto FinishSelector = [OuterPC, this]() -> void {
+ bool Done = false;
+ while (!Done) {
+ while (!SkipUntil({tok::r_brace, tok::r_paren, tok::comma,
+ tok::annot_pragma_openmp_end},
+ StopBeforeMatch))
+ ;
+ if (Tok.is(tok::r_paren) && OuterPC > ParenCount)
+ (void)ConsumeParen();
+ if (OuterPC <= ParenCount) {
+ Done = true;
+ break;
}
- if (!P.TryConsumeToken(tok::comma) && Tok.isNot(tok::r_paren)) {
- P.Diag(Tok, diag::err_expected_punc)
- << (VendorName.empty() ? "vendor name" : VendorName);
+ if (!Tok.is(tok::comma) && !Tok.is(tok::r_paren)) {
+ Done = true;
+ break;
}
- } while (Tok.is(tok::identifier));
- // Parse ')'.
- (void)T.consumeClose();
- if (!Vendors.empty())
- Data.emplace_back(OMP_CTX_SET_implementation, CSKind, Score, Vendors);
- break;
+ (void)ConsumeAnyToken();
+ }
+ Diag(Tok.getLocation(), diag::note_omp_declare_variant_ctx_continue_here)
+ << CONTEXT_SELECTOR_LVL;
+ };
+
+ SourceLocation SelectorLoc = Tok.getLocation();
+ parseOMPTraitSelectorKind(TISelector, Set, SeenSelectors);
+ if (TISelector.Kind == TraitSelector::invalid)
+ return FinishSelector();
+
+ bool AllowsTraitScore = false;
+ bool RequiresProperty = false;
+ if (!isValidTraitSelectorForTraitSet(TISelector.Kind, Set, AllowsTraitScore,
+ RequiresProperty)) {
+ Diag(SelectorLoc, diag::warn_omp_ctx_incompatible_selector_for_set)
+ << getOpenMPContextTraitSelectorName(TISelector.Kind)
+ << getOpenMPContextTraitSetName(Set);
+ Diag(SelectorLoc, diag::note_omp_ctx_compatible_set_for_selector)
+ << getOpenMPContextTraitSelectorName(TISelector.Kind)
+ << getOpenMPContextTraitSetName(
+ getOpenMPContextTraitSetForSelector(TISelector.Kind))
+ << RequiresProperty;
+ return FinishSelector();
+ }
+
+ if (!RequiresProperty) {
+ TISelector.Properties.push_back(
+ {getOpenMPContextTraitPropertyForSelector(TISelector.Kind)});
+ return;
}
- case OMP_CTX_kind:
- case OMP_CTX_unknown:
- P.Diag(Tok.getLocation(), diag::warn_omp_declare_variant_cs_name_expected)
- << "implementation";
- // Skip until either '}', ')', or end of directive.
- while (!P.SkipUntil(tok::r_brace, tok::r_paren,
- tok::annot_pragma_openmp_end, Parser::StopBeforeMatch))
- ;
+
+ if (!Tok.is(tok::l_paren)) {
+ Diag(SelectorLoc, diag::warn_omp_ctx_selector_without_properties)
+ << getOpenMPContextTraitSelectorName(TISelector.Kind)
+ << getOpenMPContextTraitSetName(Set);
+ return FinishSelector();
+ }
+
+ if (TISelector.Kind == TraitSelector::user_condition) {
+ SourceLocation RLoc;
+ ExprResult Condition = ParseOpenMPParensExpr("user condition", RLoc);
+ if (!Condition.isUsable())
+ return FinishSelector();
+ TISelector.ScoreOrCondition = Condition.get();
+ TISelector.Properties.push_back({TraitProperty::user_condition_unknown});
return;
}
+
+ BalancedDelimiterTracker BDT(*this, tok::l_paren,
+ tok::annot_pragma_openmp_end);
+ // Parse '('.
+ (void)BDT.consumeOpen();
+
+ SourceLocation ScoreLoc = Tok.getLocation();
+ ExprResult Score = parseContextScore(*this);
+
+ if (!AllowsTraitScore && !Score.isUnset()) {
+ if (Score.isUsable()) {
+ Diag(ScoreLoc, diag::warn_omp_ctx_incompatible_score_for_property)
+ << getOpenMPContextTraitSelectorName(TISelector.Kind)
+ << getOpenMPContextTraitSetName(Set) << Score.get();
+ } else {
+ Diag(ScoreLoc, diag::warn_omp_ctx_incompatible_score_for_property)
+ << getOpenMPContextTraitSelectorName(TISelector.Kind)
+ << getOpenMPContextTraitSetName(Set) << "<invalid>";
+ }
+ Score = ExprResult();
+ }
+
+ if (Score.isUsable())
+ TISelector.ScoreOrCondition = Score.get();
+
+ llvm::StringMap<SourceLocation> SeenProperties;
+ do {
+ parseOMPContextProperty(TISelector, Set, SeenProperties);
+ } while (TryConsumeToken(tok::comma));
+
+ // Parse ')'.
+ BDT.consumeClose();
}
-/// Parse context selector for 'device' selector set:
-/// 'kind' '(' <kind> { ',' <kind> } ')'
-static void
-parseDeviceSelector(Parser &P, SourceLocation Loc,
- llvm::StringMap<SourceLocation> &UsedCtx,
- SmallVectorImpl<Sema::OMPCtxSelectorData> &Data) {
- const Token &Tok = P.getCurToken();
- // Parse inner context selector set name, if any.
- if (!Tok.is(tok::identifier)) {
- P.Diag(Tok.getLocation(), diag::warn_omp_declare_variant_cs_name_expected)
- << "device";
- // Skip until either '}', ')', or end of directive.
- while (!P.SkipUntil(tok::r_brace, tok::r_paren,
- tok::annot_pragma_openmp_end, Parser::StopBeforeMatch))
- ;
+void Parser::parseOMPTraitSetKind(OMPTraitSet &TISet,
+ llvm::StringMap<SourceLocation> &Seen) {
+ TISet.Kind = TraitSet::invalid;
+
+ SourceLocation NameLoc = Tok.getLocation();
+ StringRef Name = getNameFromIdOrString(*this, Tok, CONTEXT_SELECTOR_SET_LVL
+ );
+ if (Name.empty()) {
+ Diag(Tok.getLocation(), diag::note_omp_declare_variant_ctx_options)
+ << CONTEXT_SELECTOR_SET_LVL << listOpenMPContextTraitSets();
return;
}
- Sema::OMPCtxStringType Buffer;
- StringRef CtxSelectorName = P.getPreprocessor().getSpelling(Tok, Buffer);
- auto Res = UsedCtx.try_emplace(CtxSelectorName, Tok.getLocation());
- if (!Res.second) {
- // OpenMP 5.0, 2.3.2 Context Selectors, Restrictions.
- // Each trait-selector-name can only be specified once.
- P.Diag(Tok.getLocation(), diag::err_omp_declare_variant_ctx_mutiple_use)
- << CtxSelectorName << "device";
- P.Diag(Res.first->getValue(), diag::note_omp_declare_variant_ctx_used_here)
- << CtxSelectorName;
- }
- OpenMPContextSelectorKind CSKind = getOpenMPContextSelector(CtxSelectorName);
- (void)P.ConsumeToken();
- switch (CSKind) {
- case OMP_CTX_kind: {
- // Parse '('.
- BalancedDelimiterTracker T(P, tok::l_paren, tok::annot_pragma_openmp_end);
- (void)T.expectAndConsume(diag::err_expected_lparen_after,
- CtxSelectorName.data());
- llvm::UniqueVector<Sema::OMPCtxStringType> Kinds;
- do {
- // Parse <kind>.
- StringRef KindName;
- if (Tok.is(tok::identifier)) {
- Buffer.clear();
- KindName = P.getPreprocessor().getSpelling(P.getCurToken(), Buffer);
- SourceLocation SLoc = P.getCurToken().getLocation();
- (void)P.ConsumeToken();
- if (llvm::StringSwitch<bool>(KindName)
- .Case("host", false)
- .Case("nohost", false)
- .Case("cpu", false)
- .Case("gpu", false)
- .Case("fpga", false)
- .Default(true)) {
- P.Diag(SLoc, diag::err_omp_wrong_device_kind_trait) << KindName;
- } else {
- Kinds.insert(KindName);
- }
- } else {
- P.Diag(Tok.getLocation(), diag::err_omp_declare_variant_item_expected)
- << "'host', 'nohost', 'cpu', 'gpu', or 'fpga'"
- << "kind"
- << "device";
+
+ TISet.Kind = getOpenMPContextTraitSetKind(Name);
+ if (TISet.Kind != TraitSet::invalid) {
+ if (checkForDuplicates(*this, Name, NameLoc, Seen,
+ CONTEXT_SELECTOR_SET_LVL))
+ TISet.Kind = TraitSet::invalid;
+ return;
+ }
+
+ // It follows diagnosis and helping notes.
+ Diag(NameLoc, diag::warn_omp_declare_variant_ctx_not_a_set) << Name;
+
+ TraitSelector SelectorForName = getOpenMPContextTraitSelectorKind(Name);
+ if (SelectorForName != TraitSelector::invalid) {
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_is_a)
+ << Name << CONTEXT_SELECTOR_LVL << CONTEXT_SELECTOR_SET_LVL;
+ bool AllowsTraitScore = false;
+ bool RequiresProperty = false;
+ isValidTraitSelectorForTraitSet(
+ SelectorForName, getOpenMPContextTraitSetForSelector(SelectorForName),
+ AllowsTraitScore, RequiresProperty);
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_try)
+ << getOpenMPContextTraitSetName(
+ getOpenMPContextTraitSetForSelector(SelectorForName))
+ << Name << (RequiresProperty ? "(<property-name>)" : "");
+ return;
+ }
+ for (const auto &PotentialSet :
+ {TraitSet::construct, TraitSet::user, TraitSet::implementation,
+ TraitSet::device}) {
+ TraitProperty PropertyForName =
+ getOpenMPContextTraitPropertyKind(PotentialSet, Name);
+ if (PropertyForName == TraitProperty::invalid)
+ continue;
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_is_a)
+ << Name << CONTEXT_TRAIT_LVL << CONTEXT_SELECTOR_SET_LVL;
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_try)
+ << getOpenMPContextTraitSetName(
+ getOpenMPContextTraitSetForProperty(PropertyForName))
+ << getOpenMPContextTraitSelectorName(
+ getOpenMPContextTraitSelectorForProperty(PropertyForName))
+ << ("(" + Name + ")").str();
+ return;
+ }
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_options)
+ << CONTEXT_SELECTOR_SET_LVL << listOpenMPContextTraitSets();
+}
+
+/// Parses an OpenMP context selector set.
+///
+/// <trait-set-selector-name> '=' '{' <trait-selector> [, <trait-selector>]* '}'
+void Parser::parseOMPContextSelectorSet(
+ OMPTraitSet &TISet,
+ llvm::StringMap<SourceLocation> &SeenSets) {
+ auto OuterBC = BraceCount;
+
+ // If anything went wrong we issue an error or warning and then skip the rest
+ // of the set. However, commas are ambiguous so we look for the nesting
+ // of braces here as well.
+ auto FinishSelectorSet = [this, OuterBC]() -> void {
+ bool Done = false;
+ while (!Done) {
+ while (!SkipUntil({tok::comma, tok::r_brace, tok::r_paren,
+ tok::annot_pragma_openmp_end},
+ StopBeforeMatch))
+ ;
+ if (Tok.is(tok::r_brace) && OuterBC > BraceCount)
+ (void)ConsumeBrace();
+ if (OuterBC <= BraceCount) {
+ Done = true;
+ break;
}
- if (!P.TryConsumeToken(tok::comma) && Tok.isNot(tok::r_paren)) {
- P.Diag(Tok, diag::err_expected_punc)
- << (KindName.empty() ? "kind of device" : KindName);
+ if (!Tok.is(tok::comma) && !Tok.is(tok::r_brace)) {
+ Done = true;
+ break;
}
- } while (Tok.is(tok::identifier));
- // Parse ')'.
- (void)T.consumeClose();
- if (!Kinds.empty())
- Data.emplace_back(OMP_CTX_SET_device, CSKind, ExprResult(), Kinds);
- break;
+ (void)ConsumeAnyToken();
+ }
+ Diag(Tok.getLocation(), diag::note_omp_declare_variant_ctx_continue_here)
+ << CONTEXT_SELECTOR_SET_LVL;
+ };
+
+ parseOMPTraitSetKind(TISet, SeenSets);
+ if (TISet.Kind == TraitSet::invalid)
+ return FinishSelectorSet();
+
+ // Parse '='.
+ if (!TryConsumeToken(tok::equal))
+ Diag(Tok.getLocation(), diag::warn_omp_declare_variant_expected)
+ << "="
+ << ("context set name \"" + getOpenMPContextTraitSetName(TISet.Kind) +
+ "\"")
+ .str();
+
+ // Parse '{'.
+ if (Tok.is(tok::l_brace)) {
+ (void)ConsumeBrace();
+ } else {
+ Diag(Tok.getLocation(), diag::warn_omp_declare_variant_expected)
+ << "{"
+ << ("'=' that follows the context set name \"" +
+ getOpenMPContextTraitSetName(TISet.Kind) + "\"")
+ .str();
}
- case OMP_CTX_vendor:
- case OMP_CTX_unknown:
- P.Diag(Tok.getLocation(), diag::warn_omp_declare_variant_cs_name_expected)
- << "device";
- // Skip until either '}', ')', or end of directive.
- while (!P.SkipUntil(tok::r_brace, tok::r_paren,
- tok::annot_pragma_openmp_end, Parser::StopBeforeMatch))
- ;
- return;
+
+ llvm::StringMap<SourceLocation> SeenSelectors;
+ do {
+ OMPTraitSelector TISelector;
+ parseOMPContextSelector(TISelector, TISet.Kind, SeenSelectors);
+ if (TISelector.Kind != TraitSelector::invalid &&
+ !TISelector.Properties.empty())
+ TISet.Selectors.push_back(TISelector);
+ } while (TryConsumeToken(tok::comma));
+
+ // Parse '}'.
+ if (Tok.is(tok::r_brace)) {
+ (void)ConsumeBrace();
+ } else {
+ Diag(Tok.getLocation(), diag::warn_omp_declare_variant_expected)
+ << "}"
+ << ("context selectors for the context set \"" +
+ getOpenMPContextTraitSetName(TISet.Kind) + "\"")
+ .str();
}
}
-/// Parses clauses for 'declare variant' directive.
-/// clause:
-/// <selector_set_name> '=' '{' <context_selectors> '}'
-/// [ ',' <selector_set_name> '=' '{' <context_selectors> '}' ]
-bool Parser::parseOpenMPContextSelectors(
- SourceLocation Loc, SmallVectorImpl<Sema::OMPCtxSelectorData> &Data) {
- llvm::StringMap<SourceLocation> UsedCtxSets;
+/// Parse OpenMP context selectors:
+///
+/// <trait-set-selector> [, <trait-set-selector>]*
+bool Parser::parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo& TI) {
+ llvm::StringMap<SourceLocation> SeenSets;
do {
- // Parse inner context selector set name.
- if (!Tok.is(tok::identifier)) {
- Diag(Tok.getLocation(), diag::err_omp_declare_variant_no_ctx_selector)
- << getOpenMPClauseName(OMPC_match);
- return true;
- }
- Sema::OMPCtxStringType Buffer;
- StringRef CtxSelectorSetName = PP.getSpelling(Tok, Buffer);
- auto Res = UsedCtxSets.try_emplace(CtxSelectorSetName, Tok.getLocation());
- if (!Res.second) {
- // OpenMP 5.0, 2.3.2 Context Selectors, Restrictions.
- // Each trait-set-selector-name can only be specified once.
- Diag(Tok.getLocation(), diag::err_omp_declare_variant_ctx_set_mutiple_use)
- << CtxSelectorSetName;
- Diag(Res.first->getValue(),
- diag::note_omp_declare_variant_ctx_set_used_here)
- << CtxSelectorSetName;
- }
- // Parse '='.
- (void)ConsumeToken();
- if (Tok.isNot(tok::equal)) {
- Diag(Tok.getLocation(), diag::err_omp_declare_variant_equal_expected)
- << CtxSelectorSetName;
- return true;
- }
- (void)ConsumeToken();
- // TBD: add parsing of known context selectors.
- // Unknown selector - just ignore it completely.
- {
- // Parse '{'.
- BalancedDelimiterTracker TBr(*this, tok::l_brace,
- tok::annot_pragma_openmp_end);
- if (TBr.expectAndConsume(diag::err_expected_lbrace_after, "="))
- return true;
- OpenMPContextSelectorSetKind CSSKind =
- getOpenMPContextSelectorSet(CtxSelectorSetName);
- llvm::StringMap<SourceLocation> UsedCtx;
- do {
- switch (CSSKind) {
- case OMP_CTX_SET_implementation:
- parseImplementationSelector(*this, Loc, UsedCtx, Data);
- break;
- case OMP_CTX_SET_device:
- parseDeviceSelector(*this, Loc, UsedCtx, Data);
- break;
- case OMP_CTX_SET_unknown:
- // Skip until either '}', ')', or end of directive.
- while (!SkipUntil(tok::r_brace, tok::r_paren,
- tok::annot_pragma_openmp_end, StopBeforeMatch))
- ;
- break;
- }
- const Token PrevTok = Tok;
- if (!TryConsumeToken(tok::comma) && Tok.isNot(tok::r_brace))
- Diag(Tok, diag::err_omp_expected_comma_brace)
- << (PrevTok.isAnnotation() ? "context selector trait"
- : PP.getSpelling(PrevTok));
- } while (Tok.is(tok::identifier));
- // Parse '}'.
- (void)TBr.consumeClose();
- }
- // Consume ','
- if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::annot_pragma_openmp_end))
- (void)ExpectAndConsume(tok::comma);
- } while (Tok.isAnyIdentifier());
+ OMPTraitSet TISet;
+ parseOMPContextSelectorSet(TISet, SeenSets);
+ if (TISet.Kind != TraitSet::invalid && !TISet.Selectors.empty())
+ TI.Sets.push_back(TISet);
+ } while (TryConsumeToken(tok::comma));
+
return false;
}
@@ -1102,10 +1381,30 @@ void Parser::ParseOMPDeclareVariantClauses(Parser::DeclGroupPtrTy Ptr,
(void)ConsumeAnnotationToken();
return;
}
+
+ OMPTraitInfo &TI = Actions.getASTContext().getNewOMPTraitInfo();
+ if (parseOMPDeclareVariantMatchClause(Loc, TI))
+ return;
+
Optional<std::pair<FunctionDecl *, Expr *>> DeclVarData =
Actions.checkOpenMPDeclareVariantFunction(
- Ptr, AssociatedFunction.get(), SourceRange(Loc, Tok.getLocation()));
+ Ptr, AssociatedFunction.get(), TI,
+ SourceRange(Loc, Tok.getLocation()));
+ // Skip last tokens.
+ while (Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
+ if (DeclVarData && !TI.Sets.empty())
+ Actions.ActOnOpenMPDeclareVariantDirective(
+ DeclVarData->first, DeclVarData->second, TI,
+ SourceRange(Loc, Tok.getLocation()));
+
+ // Skip the last annot_pragma_openmp_end.
+ (void)ConsumeAnnotationToken();
+}
+
+bool Parser::parseOMPDeclareVariantMatchClause(SourceLocation Loc,
+ OMPTraitInfo &TI) {
// Parse 'match'.
OpenMPClauseKind CKind = Tok.isAnnotation()
? OMPC_unknown
@@ -1117,47 +1416,32 @@ void Parser::ParseOMPDeclareVariantClauses(Parser::DeclGroupPtrTy Ptr,
;
// Skip the last annot_pragma_openmp_end.
(void)ConsumeAnnotationToken();
- return;
+ return true;
}
(void)ConsumeToken();
// Parse '('.
BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
if (T.expectAndConsume(diag::err_expected_lparen_after,
- getOpenMPClauseName(OMPC_match))) {
+ getOpenMPClauseName(OMPC_match).data())) {
while (!SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch))
;
// Skip the last annot_pragma_openmp_end.
(void)ConsumeAnnotationToken();
- return;
+ return true;
}
// Parse inner context selectors.
- SmallVector<Sema::OMPCtxSelectorData, 4> Data;
- if (!parseOpenMPContextSelectors(Loc, Data)) {
- // Parse ')'.
- (void)T.consumeClose();
- // Need to check for extra tokens.
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(OMPD_declare_variant);
- }
- }
+ parseOMPContextSelectors(Loc, TI);
- // Skip last tokens.
- while (Tok.isNot(tok::annot_pragma_openmp_end))
- ConsumeAnyToken();
- if (DeclVarData.hasValue())
- Actions.ActOnOpenMPDeclareVariantDirective(
- DeclVarData.getValue().first, DeclVarData.getValue().second,
- SourceRange(Loc, Tok.getLocation()), Data);
- // Skip the last annot_pragma_openmp_end.
- (void)ConsumeAnnotationToken();
+ // Parse ')'
+ (void)T.consumeClose();
+ return false;
}
/// Parsing of simple OpenMP clauses like 'default' or 'proc_bind'.
///
/// default-clause:
-/// 'default' '(' 'none' | 'shared' ')
+/// 'default' '(' 'none' | 'shared' | 'firstprivate' ')
///
/// proc_bind-clause:
/// 'proc_bind' '(' 'master' | 'close' | 'spread' ')
@@ -1185,7 +1469,7 @@ parseOpenMPSimpleClause(Parser &P, OpenMPClauseKind Kind) {
// Parse '('.
BalancedDelimiterTracker T(P, tok::l_paren, tok::annot_pragma_openmp_end);
if (T.expectAndConsume(diag::err_expected_lparen_after,
- getOpenMPClauseName(Kind)))
+ getOpenMPClauseName(Kind).data()))
return llvm::None;
unsigned Type = getOpenMPSimpleClauseType(
@@ -1289,21 +1573,48 @@ Parser::DeclGroupPtrTy Parser::ParseOMPDeclareTargetClauses() {
return Actions.BuildDeclaratorGroup(Decls);
}
-void Parser::ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
- SourceLocation DTLoc) {
- if (DKind != OMPD_end_declare_target) {
- Diag(Tok, diag::err_expected_end_declare_target);
- Diag(DTLoc, diag::note_matching) << "'#pragma omp declare target'";
+void Parser::skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind) {
+ // The last seen token is annot_pragma_openmp_end - need to check for
+ // extra tokens.
+ if (Tok.is(tok::annot_pragma_openmp_end))
+ return;
+
+ Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
+ << getOpenMPDirectiveName(DKind);
+ while (Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
+}
+
+void Parser::parseOMPEndDirective(OpenMPDirectiveKind BeginKind,
+ OpenMPDirectiveKind ExpectedKind,
+ OpenMPDirectiveKind FoundKind,
+ SourceLocation BeginLoc,
+ SourceLocation FoundLoc,
+ bool SkipUntilOpenMPEnd) {
+ int DiagSelection = ExpectedKind == OMPD_end_declare_target ? 0 : 1;
+
+ if (FoundKind == ExpectedKind) {
+ ConsumeAnyToken();
+ skipUntilPragmaOpenMPEnd(ExpectedKind);
return;
}
- ConsumeAnyToken();
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(OMPD_end_declare_target);
+
+ Diag(FoundLoc, diag::err_expected_end_declare_target_or_variant)
+ << DiagSelection;
+ Diag(BeginLoc, diag::note_matching)
+ << ("'#pragma omp " + getOpenMPDirectiveName(BeginKind) + "'").str();
+ if (SkipUntilOpenMPEnd)
SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
- }
+}
+
+void Parser::ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
+ SourceLocation DKLoc) {
+ parseOMPEndDirective(OMPD_declare_target, OMPD_end_declare_target, DKind,
+ DKLoc, Tok.getLocation(),
+ /* SkipUntilOpenMPEnd */ false);
// Skip the last annot_pragma_openmp_end.
- ConsumeAnyToken();
+ if (Tok.is(tok::annot_pragma_openmp_end))
+ ConsumeAnnotationToken();
}
/// Parsing of declarative OpenMP directives.
@@ -1381,13 +1692,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
DeclDirectiveListParserHelper Helper(this, DKind);
if (!ParseOpenMPSimpleVarList(DKind, Helper,
/*AllowScopeSpecifier=*/true)) {
- // The last seen token is annot_pragma_openmp_end - need to check for
- // extra tokens.
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(DKind);
- SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
- }
+ skipUntilPragmaOpenMPEnd(DKind);
// Skip the last annot_pragma_openmp_end.
ConsumeAnnotationToken();
return Actions.ActOnOpenMPThreadprivateDirective(Loc,
@@ -1403,18 +1708,18 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
SmallVector<OMPClause *, 1> Clauses;
if (Tok.isNot(tok::annot_pragma_openmp_end)) {
SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>,
- OMPC_unknown + 1>
- FirstClauses(OMPC_unknown + 1);
+ llvm::omp::Clause_enumSize + 1>
+ FirstClauses(llvm::omp::Clause_enumSize + 1);
while (Tok.isNot(tok::annot_pragma_openmp_end)) {
OpenMPClauseKind CKind =
Tok.isAnnotation() ? OMPC_unknown
: getOpenMPClauseKind(PP.getSpelling(Tok));
Actions.StartOpenMPClause(CKind);
- OMPClause *Clause = ParseOpenMPClause(OMPD_allocate, CKind,
- !FirstClauses[CKind].getInt());
+ OMPClause *Clause = ParseOpenMPClause(
+ OMPD_allocate, CKind, !FirstClauses[unsigned(CKind)].getInt());
SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end,
StopBeforeMatch);
- FirstClauses[CKind].setInt(true);
+ FirstClauses[unsigned(CKind)].setInt(true);
if (Clause != nullptr)
Clauses.push_back(Clause);
if (Tok.is(tok::annot_pragma_openmp_end)) {
@@ -1426,13 +1731,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
ConsumeToken();
Actions.EndOpenMPClause();
}
- // The last seen token is annot_pragma_openmp_end - need to check for
- // extra tokens.
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(DKind);
- SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
- }
+ skipUntilPragmaOpenMPEnd(DKind);
}
// Skip the last annot_pragma_openmp_end.
ConsumeAnnotationToken();
@@ -1444,8 +1743,9 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_requires: {
SourceLocation StartLoc = ConsumeToken();
SmallVector<OMPClause *, 5> Clauses;
- SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>, OMPC_unknown + 1>
- FirstClauses(OMPC_unknown + 1);
+ SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>,
+ llvm::omp::Clause_enumSize + 1>
+ FirstClauses(llvm::omp::Clause_enumSize + 1);
if (Tok.is(tok::annot_pragma_openmp_end)) {
Diag(Tok, diag::err_omp_expected_clause)
<< getOpenMPDirectiveName(OMPD_requires);
@@ -1456,11 +1756,11 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
? OMPC_unknown
: getOpenMPClauseKind(PP.getSpelling(Tok));
Actions.StartOpenMPClause(CKind);
- OMPClause *Clause = ParseOpenMPClause(OMPD_requires, CKind,
- !FirstClauses[CKind].getInt());
+ OMPClause *Clause = ParseOpenMPClause(
+ OMPD_requires, CKind, !FirstClauses[unsigned(CKind)].getInt());
SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end,
StopBeforeMatch);
- FirstClauses[CKind].setInt(true);
+ FirstClauses[unsigned(CKind)].setInt(true);
if (Clause != nullptr)
Clauses.push_back(Clause);
if (Tok.is(tok::annot_pragma_openmp_end)) {
@@ -1473,7 +1773,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
Actions.EndOpenMPClause();
}
// Consume final annot_pragma_openmp_end
- if (Clauses.size() == 0) {
+ if (Clauses.empty()) {
Diag(Tok, diag::err_omp_expected_clause)
<< getOpenMPDirectiveName(OMPD_requires);
ConsumeAnnotationToken();
@@ -1485,14 +1785,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_declare_reduction:
ConsumeToken();
if (DeclGroupPtrTy Res = ParseOpenMPDeclareReductionDirective(AS)) {
- // The last seen token is annot_pragma_openmp_end - need to check for
- // extra tokens.
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(OMPD_declare_reduction);
- while (Tok.isNot(tok::annot_pragma_openmp_end))
- ConsumeAnyToken();
- }
+ skipUntilPragmaOpenMPEnd(OMPD_declare_reduction);
// Skip the last annot_pragma_openmp_end.
ConsumeAnnotationToken();
return Res;
@@ -1507,6 +1800,63 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
}
break;
}
+ case OMPD_begin_declare_variant: {
+ // The syntax is:
+ // { #pragma omp begin declare variant clause }
+ // <function-declaration-or-definition-sequence>
+ // { #pragma omp end declare variant }
+ //
+ ConsumeToken();
+ OMPTraitInfo &TI = Actions.getASTContext().getNewOMPTraitInfo();
+ if (parseOMPDeclareVariantMatchClause(Loc, TI))
+ break;
+
+ // Skip last tokens.
+ skipUntilPragmaOpenMPEnd(OMPD_begin_declare_variant);
+
+ ParsingOpenMPDirectiveRAII NormalScope(*this, /*Value=*/false);
+
+ VariantMatchInfo VMI;
+ ASTContext &ASTCtx = Actions.getASTContext();
+ TI.getAsVariantMatchInfo(ASTCtx, VMI);
+ OMPContext OMPCtx(ASTCtx.getLangOpts().OpenMPIsDevice,
+ ASTCtx.getTargetInfo().getTriple());
+
+ if (isVariantApplicableInContext(VMI, OMPCtx, /* DeviceSetOnly */ true)) {
+ Actions.ActOnOpenMPBeginDeclareVariant(Loc, TI);
+ break;
+ }
+
+ // Elide all the code till the matching end declare variant was found.
+ unsigned Nesting = 1;
+ SourceLocation DKLoc;
+ OpenMPDirectiveKind DK = OMPD_unknown;
+ do {
+ DKLoc = Tok.getLocation();
+ DK = parseOpenMPDirectiveKind(*this);
+ if (DK == OMPD_end_declare_variant)
+ --Nesting;
+ else if (DK == OMPD_begin_declare_variant)
+ ++Nesting;
+ if (!Nesting || isEofOrEom())
+ break;
+ ConsumeAnyToken();
+ } while (true);
+
+ parseOMPEndDirective(OMPD_begin_declare_variant, OMPD_end_declare_variant,
+ DK, Loc, DKLoc, /* SkipUntilOpenMPEnd */ true);
+ if (isEofOrEom())
+ return nullptr;
+ break;
+ }
+ case OMPD_end_declare_variant: {
+ if (Actions.isInOpenMPDeclareVariantScope())
+ Actions.ActOnOpenMPEndDeclareVariant();
+ else
+ Diag(Loc, diag::err_expected_begin_declare_variant);
+ ConsumeToken();
+ break;
+ }
case OMPD_declare_variant:
case OMPD_declare_simd: {
// The syntax is:
@@ -1563,6 +1913,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
if (!Actions.ActOnStartOpenMPDeclareTargetDirective(DTLoc))
return DeclGroupPtrTy();
+ ParsingOpenMPDirectiveRAII NormalScope(*this, /*Value=*/false);
llvm::SmallVector<Decl *, 4> Decls;
DKind = parseOpenMPDirectiveKind(*this);
while (DKind != OMPD_end_declare_target && Tok.isNot(tok::eof) &&
@@ -1608,6 +1959,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_taskwait:
case OMPD_taskgroup:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_for:
case OMPD_for_simd:
case OMPD_sections:
@@ -1656,6 +2009,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
Diag(Tok, diag::err_omp_unexpected_directive)
<< 1 << getOpenMPDirectiveName(DKind);
break;
+ default:
+ break;
}
while (Tok.isNot(tok::annot_pragma_openmp_end))
ConsumeAnyToken();
@@ -1709,8 +2064,9 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
ParsingOpenMPDirectiveRAII DirScope(*this);
ParenBraceBracketBalancer BalancerRAIIObj(*this);
SmallVector<OMPClause *, 5> Clauses;
- SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>, OMPC_unknown + 1>
- FirstClauses(OMPC_unknown + 1);
+ SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>,
+ llvm::omp::Clause_enumSize + 1>
+ FirstClauses(llvm::omp::Clause_enumSize + 1);
unsigned ScopeFlags = Scope::FnScope | Scope::DeclScope |
Scope::CompoundStmtScope | Scope::OpenMPDirectiveScope;
SourceLocation Loc = ConsumeAnnotationToken(), EndLoc;
@@ -1720,7 +2076,6 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
DeclarationNameInfo DirName;
StmtResult Directive = StmtError();
bool HasAssociatedStatement = true;
- bool FlushHasClause = false;
switch (DKind) {
case OMPD_threadprivate: {
@@ -1734,13 +2089,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
DeclDirectiveListParserHelper Helper(this, DKind);
if (!ParseOpenMPSimpleVarList(DKind, Helper,
/*AllowScopeSpecifier=*/false)) {
- // The last seen token is annot_pragma_openmp_end - need to check for
- // extra tokens.
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(DKind);
- SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
- }
+ skipUntilPragmaOpenMPEnd(DKind);
DeclGroupPtrTy Res = Actions.ActOnOpenMPThreadprivateDirective(
Loc, Helper.getIdentifiers());
Directive = Actions.ActOnDeclStmt(Res, Loc, Tok.getLocation());
@@ -1762,18 +2111,18 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
SmallVector<OMPClause *, 1> Clauses;
if (Tok.isNot(tok::annot_pragma_openmp_end)) {
SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>,
- OMPC_unknown + 1>
- FirstClauses(OMPC_unknown + 1);
+ llvm::omp::Clause_enumSize + 1>
+ FirstClauses(llvm::omp::Clause_enumSize + 1);
while (Tok.isNot(tok::annot_pragma_openmp_end)) {
OpenMPClauseKind CKind =
Tok.isAnnotation() ? OMPC_unknown
: getOpenMPClauseKind(PP.getSpelling(Tok));
Actions.StartOpenMPClause(CKind);
- OMPClause *Clause = ParseOpenMPClause(OMPD_allocate, CKind,
- !FirstClauses[CKind].getInt());
+ OMPClause *Clause = ParseOpenMPClause(
+ OMPD_allocate, CKind, !FirstClauses[unsigned(CKind)].getInt());
SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end,
StopBeforeMatch);
- FirstClauses[CKind].setInt(true);
+ FirstClauses[unsigned(CKind)].setInt(true);
if (Clause != nullptr)
Clauses.push_back(Clause);
if (Tok.is(tok::annot_pragma_openmp_end)) {
@@ -1785,13 +2134,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
ConsumeToken();
Actions.EndOpenMPClause();
}
- // The last seen token is annot_pragma_openmp_end - need to check for
- // extra tokens.
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(DKind);
- SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
- }
+ skipUntilPragmaOpenMPEnd(DKind);
}
DeclGroupPtrTy Res = Actions.ActOnOpenMPAllocateDirective(
Loc, Helper.getIdentifiers(), Clauses);
@@ -1804,14 +2147,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
ConsumeToken();
if (DeclGroupPtrTy Res =
ParseOpenMPDeclareReductionDirective(/*AS=*/AS_none)) {
- // The last seen token is annot_pragma_openmp_end - need to check for
- // extra tokens.
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(OMPD_declare_reduction);
- while (Tok.isNot(tok::annot_pragma_openmp_end))
- ConsumeAnyToken();
- }
+ skipUntilPragmaOpenMPEnd(OMPD_declare_reduction);
ConsumeAnyToken();
Directive = Actions.ActOnDeclStmt(Res, Loc, Tok.getLocation());
} else {
@@ -1831,13 +2167,8 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
break;
}
case OMPD_flush:
- if (PP.LookAhead(0).is(tok::l_paren)) {
- FlushHasClause = true;
- // Push copy of the current token back to stream to properly parse
- // pseudo-clause OMPFlushClause.
- PP.EnterToken(Tok, /*IsReinject*/ true);
- }
- LLVM_FALLTHROUGH;
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
@@ -1897,6 +2228,13 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
case OMPD_target_teams_distribute_simd: {
+ // Special processing for flush and depobj clauses.
+ Token ImplicitTok;
+ bool ImplicitClauseAllowed = false;
+ if (DKind == OMPD_flush || DKind == OMPD_depobj) {
+ ImplicitTok = Tok;
+ ImplicitClauseAllowed = true;
+ }
ConsumeToken();
// Parse directive name of the 'critical' directive if any.
if (DKind == OMPD_critical) {
@@ -1926,18 +2264,37 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
Actions.StartOpenMPDSABlock(DKind, DirName, Actions.getCurScope(), Loc);
while (Tok.isNot(tok::annot_pragma_openmp_end)) {
- OpenMPClauseKind CKind =
- Tok.isAnnotation()
- ? OMPC_unknown
- : FlushHasClause ? OMPC_flush
- : getOpenMPClauseKind(PP.getSpelling(Tok));
+ bool HasImplicitClause = false;
+ if (ImplicitClauseAllowed && Tok.is(tok::l_paren)) {
+ HasImplicitClause = true;
+ // Push copy of the current token back to stream to properly parse
+ // pseudo-clause OMPFlushClause or OMPDepobjClause.
+ PP.EnterToken(Tok, /*IsReinject*/ true);
+ PP.EnterToken(ImplicitTok, /*IsReinject*/ true);
+ ConsumeAnyToken();
+ }
+ OpenMPClauseKind CKind = Tok.isAnnotation()
+ ? OMPC_unknown
+ : getOpenMPClauseKind(PP.getSpelling(Tok));
+ if (HasImplicitClause) {
+ assert(CKind == OMPC_unknown && "Must be unknown implicit clause.");
+ if (DKind == OMPD_flush) {
+ CKind = OMPC_flush;
+ } else {
+ assert(DKind == OMPD_depobj &&
+ "Expected flush or depobj directives.");
+ CKind = OMPC_depobj;
+ }
+ }
+ // No more implicit clauses allowed.
+ ImplicitClauseAllowed = false;
Actions.StartOpenMPClause(CKind);
- FlushHasClause = false;
- OMPClause *Clause =
- ParseOpenMPClause(DKind, CKind, !FirstClauses[CKind].getInt());
- FirstClauses[CKind].setInt(true);
+ HasImplicitClause = false;
+ OMPClause *Clause = ParseOpenMPClause(
+ DKind, CKind, !FirstClauses[unsigned(CKind)].getInt());
+ FirstClauses[unsigned(CKind)].setInt(true);
if (Clause) {
- FirstClauses[CKind].setPointer(Clause);
+ FirstClauses[unsigned(CKind)].setPointer(Clause);
Clauses.push_back(Clause);
}
@@ -1954,7 +2311,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
// OpenMP [2.13.8, ordered Construct, Syntax]
// If the depend clause is specified, the ordered construct is a stand-alone
// directive.
- if (DKind == OMPD_ordered && FirstClauses[OMPC_depend].getInt()) {
+ if (DKind == OMPD_ordered && FirstClauses[unsigned(OMPC_depend)].getInt()) {
if ((StmtCtx & ParsedStmtContext::AllowStandaloneOpenMPDirectives) ==
ParsedStmtContext()) {
Diag(Loc, diag::err_omp_immediate_directive)
@@ -1971,6 +2328,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
// FIXME: We create a bogus CompoundStmt scope to hold the contents of
// the captured region. Code elsewhere assumes that any FunctionScopeInfo
// should have at least one compound statement scope within it.
+ ParsingOpenMPDirectiveRAII NormalScope(*this, /*Value=*/false);
AssociatedStmt = (Sema::CompoundScopeRAII(Actions), ParseStatement());
AssociatedStmt = Actions.ActOnOpenMPRegionEnd(AssociatedStmt, Clauses);
} else if (DKind == OMPD_target_update || DKind == OMPD_target_enter_data ||
@@ -1994,12 +2352,15 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_requires:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_variant:
Diag(Tok, diag::err_omp_unexpected_directive)
<< 1 << getOpenMPDirectiveName(DKind);
SkipUntil(tok::annot_pragma_openmp_end);
break;
case OMPD_unknown:
+ default:
Diag(Tok, diag::err_omp_unknown_directive);
SkipUntil(tok::annot_pragma_openmp_end);
break;
@@ -2033,12 +2394,14 @@ bool Parser::ParseOpenMPSimpleVarList(
NoIdentIsFound = false;
if (AllowScopeSpecifier && getLangOpts().CPlusPlus &&
- ParseOptionalCXXScopeSpecifier(SS, nullptr, false)) {
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, false)) {
IsCorrect = false;
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
- } else if (ParseUnqualifiedId(SS, false, false, false, false, nullptr,
- nullptr, Name)) {
+ } else if (ParseUnqualifiedId(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, false, false,
+ false, false, nullptr, Name)) {
IsCorrect = false;
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
@@ -2070,6 +2433,50 @@ bool Parser::ParseOpenMPSimpleVarList(
return !IsCorrect;
}
+OMPClause *Parser::ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind) {
+ SourceLocation Loc = Tok.getLocation();
+ ConsumeAnyToken();
+
+ // Parse '('.
+ BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "uses_allocator"))
+ return nullptr;
+ SmallVector<Sema::UsesAllocatorsData, 4> Data;
+ do {
+ ExprResult Allocator = ParseCXXIdExpression();
+ if (Allocator.isInvalid()) {
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ break;
+ }
+ Sema::UsesAllocatorsData &D = Data.emplace_back();
+ D.Allocator = Allocator.get();
+ if (Tok.is(tok::l_paren)) {
+ BalancedDelimiterTracker T(*this, tok::l_paren,
+ tok::annot_pragma_openmp_end);
+ T.consumeOpen();
+ ExprResult AllocatorTraits = ParseCXXIdExpression();
+ T.consumeClose();
+ if (AllocatorTraits.isInvalid()) {
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ break;
+ }
+ D.AllocatorTraits = AllocatorTraits.get();
+ D.LParenLoc = T.getOpenLocation();
+ D.RParenLoc = T.getCloseLocation();
+ }
+ if (Tok.isNot(tok::comma) && Tok.isNot(tok::r_paren))
+ Diag(Tok, diag::err_omp_expected_punc) << "uses_allocators" << 0;
+ // Parse ','
+ if (Tok.is(tok::comma))
+ ConsumeAnyToken();
+ } while (Tok.isNot(tok::r_paren) && Tok.isNot(tok::annot_pragma_openmp_end));
+ T.consumeClose();
+ return Actions.ActOnOpenMPUsesAllocatorClause(Loc, T.getOpenLocation(),
+ T.getCloseLocation(), Data);
+}
+
/// Parsing of OpenMP clauses.
///
/// clause:
@@ -2084,10 +2491,14 @@ bool Parser::ParseOpenMPSimpleVarList(
/// thread_limit-clause | priority-clause | grainsize-clause |
/// nogroup-clause | num_tasks-clause | hint-clause | to-clause |
/// from-clause | is_device_ptr-clause | task_reduction-clause |
-/// in_reduction-clause | allocator-clause | allocate-clause
+/// in_reduction-clause | allocator-clause | allocate-clause |
+/// acq_rel-clause | acquire-clause | release-clause | relaxed-clause |
+/// depobj-clause | destroy-clause | detach-clause | inclusive-clause |
+/// exclusive-clause | uses_allocators-clause | use_device_addr-clause
///
OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause) {
+ OMPClauseKind = CKind;
OMPClause *Clause = nullptr;
bool ErrorFound = false;
bool WrongDirective = false;
@@ -2107,7 +2518,6 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_simdlen:
case OMPC_collapse:
case OMPC_ordered:
- case OMPC_device:
case OMPC_num_teams:
case OMPC_thread_limit:
case OMPC_priority:
@@ -2115,14 +2525,14 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_num_tasks:
case OMPC_hint:
case OMPC_allocator:
+ case OMPC_depobj:
+ case OMPC_detach:
// OpenMP [2.5, Restrictions]
// At most one num_threads clause can appear on the directive.
// OpenMP [2.8.1, simd construct, Restrictions]
// Only one safelen clause can appear on a simd directive.
// Only one simdlen clause can appear on a simd directive.
// Only one collapse clause can appear on a simd directive.
- // OpenMP [2.9.1, target data construct, Restrictions]
- // At most one device clause can appear on the directive.
// OpenMP [2.11.1, task Construct, Restrictions]
// At most one if clause can appear on the directive.
// At most one final clause can appear on the directive.
@@ -2137,6 +2547,8 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
// At most one num_tasks clause can appear on the directive.
// OpenMP [2.11.3, allocate Directive, Restrictions]
// At most one allocator clause can appear on the directive.
+ // OpenMP 5.0, 2.10.1 task Construct, Restrictions.
+ // At most one detach clause can appear on the directive.
if (!FirstClause) {
Diag(Tok, diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
@@ -2151,6 +2563,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_default:
case OMPC_proc_bind:
case OMPC_atomic_default_mem_order:
+ case OMPC_order:
// OpenMP [2.14.3.1, Restrictions]
// Only a single default clause may be specified on a parallel, task or
// teams directive.
@@ -2159,7 +2572,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
// OpenMP [5.0, Requires directive, Restrictions]
// At most one atomic_default_mem_order clause can appear
// on the directive
- if (!FirstClause) {
+ if (!FirstClause && CKind != OMPC_order) {
Diag(Tok, diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
ErrorFound = true;
@@ -2167,6 +2580,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
Clause = ParseOpenMPSimpleClause(CKind, WrongDirective);
break;
+ case OMPC_device:
case OMPC_schedule:
case OMPC_dist_schedule:
case OMPC_defaultmap:
@@ -2174,6 +2588,8 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
// Only one schedule clause can appear on a loop directive.
// OpenMP 4.5 [2.10.4, Restrictions, p. 106]
// At most one defaultmap clause can appear on the directive.
+ // OpenMP 5.0 [2.12.5, target construct, Restrictions]
+ // At most one device clause can appear on the directive.
if ((getLangOpts().OpenMP < 50 || CKind != OMPC_defaultmap) &&
!FirstClause) {
Diag(Tok, diag::err_omp_more_one_clause)
@@ -2182,16 +2598,19 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
}
LLVM_FALLTHROUGH;
case OMPC_if:
- Clause = ParseOpenMPSingleExprWithArgClause(CKind, WrongDirective);
+ Clause = ParseOpenMPSingleExprWithArgClause(DKind, CKind, WrongDirective);
break;
case OMPC_nowait:
case OMPC_untied:
case OMPC_mergeable:
case OMPC_read:
case OMPC_write:
- case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_threads:
case OMPC_simd:
case OMPC_nogroup:
@@ -2199,6 +2618,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_unified_shared_memory:
case OMPC_reverse_offload:
case OMPC_dynamic_allocators:
+ case OMPC_destroy:
// OpenMP [2.7.1, Restrictions, p. 9]
// Only one ordered clause can appear on a loop directive.
// OpenMP [2.7.1, Restrictions, C/C++, p. 4]
@@ -2213,6 +2633,17 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
Clause = ParseOpenMPClause(CKind, WrongDirective);
break;
+ case OMPC_update:
+ if (!FirstClause) {
+ Diag(Tok, diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
+ ErrorFound = true;
+ }
+
+ Clause = (DKind == OMPD_depobj)
+ ? ParseOpenMPSimpleClause(CKind, WrongDirective)
+ : ParseOpenMPClause(CKind, WrongDirective);
+ break;
case OMPC_private:
case OMPC_firstprivate:
case OMPC_lastprivate:
@@ -2230,16 +2661,21 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_allocate:
case OMPC_nontemporal:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_affinity:
Clause = ParseOpenMPVarListClause(DKind, CKind, WrongDirective);
break;
+ case OMPC_uses_allocators:
+ Clause = ParseOpenMPUsesAllocatorClause(DKind);
+ break;
case OMPC_device_type:
case OMPC_unknown:
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(DKind);
- SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
+ skipUntilPragmaOpenMPEnd(DKind);
break;
case OMPC_threadprivate:
case OMPC_uniform:
@@ -2249,6 +2685,8 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
<< getOpenMPClauseName(CKind) << getOpenMPDirectiveName(DKind);
SkipUntil(tok::comma, tok::annot_pragma_openmp_end, StopBeforeMatch);
break;
+ default:
+ break;
}
return ErrorFound ? nullptr : Clause;
}
@@ -2279,7 +2717,8 @@ ExprResult Parser::ParseOpenMPParensExpr(StringRef ClauseName,
/// Parsing of OpenMP clauses with single expressions like 'final',
/// 'collapse', 'safelen', 'num_threads', 'simdlen', 'num_teams',
-/// 'thread_limit', 'simdlen', 'priority', 'grainsize', 'num_tasks' or 'hint'.
+/// 'thread_limit', 'simdlen', 'priority', 'grainsize', 'num_tasks', 'hint' or
+/// 'detach'.
///
/// final-clause:
/// 'final' '(' expression ')'
@@ -2311,6 +2750,9 @@ ExprResult Parser::ParseOpenMPParensExpr(StringRef ClauseName,
/// allocator-clause:
/// 'allocator' '(' expression ')'
///
+/// detach-clause:
+/// 'detach' '(' event-handler-expression ')'
+///
OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly) {
SourceLocation Loc = ConsumeToken();
@@ -2330,16 +2772,27 @@ OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
/// Parsing of simple OpenMP clauses like 'default' or 'proc_bind'.
///
/// default-clause:
-/// 'default' '(' 'none' | 'shared' ')
+/// 'default' '(' 'none' | 'shared' | 'firstprivate' ')'
///
/// proc_bind-clause:
-/// 'proc_bind' '(' 'master' | 'close' | 'spread' ')
+/// 'proc_bind' '(' 'master' | 'close' | 'spread' ')'
+///
+/// update-clause:
+/// 'update' '(' 'in' | 'out' | 'inout' | 'mutexinoutset' ')'
///
OMPClause *Parser::ParseOpenMPSimpleClause(OpenMPClauseKind Kind,
bool ParseOnly) {
llvm::Optional<SimpleClauseData> Val = parseOpenMPSimpleClause(*this, Kind);
if (!Val || ParseOnly)
return nullptr;
+ if (getLangOpts().OpenMP < 51 && Kind == OMPC_default &&
+ static_cast<DefaultKind>(Val.getValue().Type) ==
+ OMP_DEFAULT_firstprivate) {
+ Diag(Val.getValue().LOpen, diag::err_omp_invalid_dsa)
+ << getOpenMPClauseName(OMPC_firstprivate)
+ << getOpenMPClauseName(OMPC_default) << "5.1";
+ return nullptr;
+ }
return Actions.ActOnOpenMPSimpleClause(
Kind, Val.getValue().Type, Val.getValue().TypeLoc, Val.getValue().LOpen,
Val.getValue().Loc, Val.getValue().RLoc);
@@ -2380,7 +2833,6 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly) {
return Actions.ActOnOpenMPClause(Kind, Loc, Tok.getLocation());
}
-
/// Parsing of OpenMP clauses with single expressions and some additional
/// argument like 'schedule' or 'dist_schedule'.
///
@@ -2392,16 +2844,20 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly) {
/// 'if' '(' [ directive-name-modifier ':' ] expression ')'
///
/// defaultmap:
-/// 'defaultmap' '(' modifier ':' kind ')'
+/// 'defaultmap' '(' modifier [ ':' kind ] ')'
+///
+/// device-clause:
+/// 'device' '(' [ device-modifier ':' ] expression ')'
///
-OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
+OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
+ OpenMPClauseKind Kind,
bool ParseOnly) {
SourceLocation Loc = ConsumeToken();
SourceLocation DelimLoc;
// Parse '('.
BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
if (T.expectAndConsume(diag::err_expected_lparen_after,
- getOpenMPClauseName(Kind)))
+ getOpenMPClauseName(Kind).data()))
return nullptr;
ExprResult Val;
@@ -2477,17 +2933,37 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
Tok.isNot(tok::annot_pragma_openmp_end))
ConsumeAnyToken();
// Parse ':'
- if (Tok.is(tok::colon))
+ if (Tok.is(tok::colon) || getLangOpts().OpenMP < 50) {
+ if (Tok.is(tok::colon))
+ ConsumeAnyToken();
+ else if (Arg.back() != OMPC_DEFAULTMAP_MODIFIER_unknown)
+ Diag(Tok, diag::warn_pragma_expected_colon) << "defaultmap modifier";
+ // Get a defaultmap kind
+ Arg.push_back(getOpenMPSimpleClauseType(
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok)));
+ KLoc.push_back(Tok.getLocation());
+ if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
+ Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
+ } else {
+ Arg.push_back(OMPC_DEFAULTMAP_unknown);
+ KLoc.push_back(SourceLocation());
+ }
+ } else if (Kind == OMPC_device) {
+ // Only target executable directives support extended device construct.
+ if (isOpenMPTargetExecutionDirective(DKind) && getLangOpts().OpenMP >= 50 &&
+ NextToken().is(tok::colon)) {
+ // Parse optional <device modifier> ':'
+ Arg.push_back(getOpenMPSimpleClauseType(
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok)));
+ KLoc.push_back(Tok.getLocation());
ConsumeAnyToken();
- else if (Arg.back() != OMPC_DEFAULTMAP_MODIFIER_unknown)
- Diag(Tok, diag::warn_pragma_expected_colon) << "defaultmap modifier";
- // Get a defaultmap kind
- Arg.push_back(getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok)));
- KLoc.push_back(Tok.getLocation());
- if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
- Tok.isNot(tok::annot_pragma_openmp_end))
+ // Parse ':'
ConsumeAnyToken();
+ } else {
+ Arg.push_back(OMPC_DEVICE_unknown);
+ KLoc.emplace_back();
+ }
} else {
assert(Kind == OMPC_if);
KLoc.push_back(Tok.getLocation());
@@ -2510,7 +2986,7 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool NeedAnExpression = (Kind == OMPC_schedule && DelimLoc.isValid()) ||
(Kind == OMPC_dist_schedule && DelimLoc.isValid()) ||
- Kind == OMPC_if;
+ Kind == OMPC_if || Kind == OMPC_device;
if (NeedAnExpression) {
SourceLocation ELoc = Tok.getLocation();
ExprResult LHS(ParseCastExpression(AnyCastExpr, false, NotTypeCast));
@@ -2572,11 +3048,12 @@ static bool ParseReductionId(Parser &P, CXXScopeSpec &ReductionIdScopeSpec,
return false;
}
}
- return P.ParseUnqualifiedId(ReductionIdScopeSpec, /*EnteringContext*/ false,
- /*AllowDestructorName*/ false,
- /*AllowConstructorName*/ false,
- /*AllowDeductionGuide*/ false,
- nullptr, nullptr, ReductionId);
+ return P.ParseUnqualifiedId(
+ ReductionIdScopeSpec, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, /*EnteringContext*/ false,
+ /*AllowDestructorName*/ false,
+ /*AllowConstructorName*/ false,
+ /*AllowDeductionGuide*/ false, nullptr, ReductionId);
}
/// Checks if the token is a valid map-type-modifier.
@@ -2604,6 +3081,7 @@ bool Parser::parseMapperModifier(OpenMPVarListDataTy &Data) {
if (getLangOpts().CPlusPlus)
ParseOptionalCXXScopeSpecifier(Data.ReductionOrMapperIdScopeSpec,
/*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/false);
if (Tok.isNot(tok::identifier) && Tok.isNot(tok::kw_default)) {
Diag(Tok.getLocation(), diag::err_omp_mapper_illegal_identifier);
@@ -2684,6 +3162,114 @@ static void parseMapType(Parser &P, Parser::OpenMPVarListDataTy &Data) {
P.ConsumeToken();
}
+/// Parses simple expression in parens for single-expression clauses of OpenMP
+/// constructs.
+/// \param RLoc Returned location of right paren.
+ExprResult Parser::ParseOpenMPIteratorsExpr() {
+ assert(Tok.is(tok::identifier) && PP.getSpelling(Tok) == "iterator" &&
+ "Expected 'iterator' token.");
+ SourceLocation IteratorKwLoc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "iterator"))
+ return ExprError();
+
+ SourceLocation LLoc = T.getOpenLocation();
+ SmallVector<Sema::OMPIteratorData, 4> Data;
+ while (Tok.isNot(tok::r_paren) && Tok.isNot(tok::annot_pragma_openmp_end)) {
+ // Check if the type parsing is required.
+ ParsedType IteratorType;
+ if (Tok.isNot(tok::identifier) || NextToken().isNot(tok::equal)) {
+ // identifier '=' is not found - parse type.
+ TypeResult TR = ParseTypeName();
+ if (TR.isInvalid()) {
+ T.skipToEnd();
+ return ExprError();
+ }
+ IteratorType = TR.get();
+ }
+
+ // Parse identifier.
+ IdentifierInfo *II = nullptr;
+ SourceLocation IdLoc;
+ if (Tok.is(tok::identifier)) {
+ II = Tok.getIdentifierInfo();
+ IdLoc = ConsumeToken();
+ } else {
+ Diag(Tok, diag::err_expected_unqualified_id) << 0;
+ }
+
+ // Parse '='.
+ SourceLocation AssignLoc;
+ if (Tok.is(tok::equal))
+ AssignLoc = ConsumeToken();
+ else
+ Diag(Tok, diag::err_omp_expected_equal_in_iterator);
+
+ // Parse range-specification - <begin> ':' <end> [ ':' <step> ]
+ ColonProtectionRAIIObject ColonRAII(*this);
+ // Parse <begin>
+ SourceLocation Loc = Tok.getLocation();
+ ExprResult LHS = ParseCastExpression(AnyCastExpr);
+ ExprResult Begin = Actions.CorrectDelayedTyposInExpr(
+ ParseRHSOfBinaryExpression(LHS, prec::Conditional));
+ Begin = Actions.ActOnFinishFullExpr(Begin.get(), Loc,
+ /*DiscardedValue=*/false);
+ // Parse ':'.
+ SourceLocation ColonLoc;
+ if (Tok.is(tok::colon))
+ ColonLoc = ConsumeToken();
+
+ // Parse <end>
+ Loc = Tok.getLocation();
+ LHS = ParseCastExpression(AnyCastExpr);
+ ExprResult End = Actions.CorrectDelayedTyposInExpr(
+ ParseRHSOfBinaryExpression(LHS, prec::Conditional));
+ End = Actions.ActOnFinishFullExpr(End.get(), Loc,
+ /*DiscardedValue=*/false);
+
+ SourceLocation SecColonLoc;
+ ExprResult Step;
+ // Parse optional step.
+ if (Tok.is(tok::colon)) {
+ // Parse ':'
+ SecColonLoc = ConsumeToken();
+ // Parse <step>
+ Loc = Tok.getLocation();
+ LHS = ParseCastExpression(AnyCastExpr);
+ Step = Actions.CorrectDelayedTyposInExpr(
+ ParseRHSOfBinaryExpression(LHS, prec::Conditional));
+ Step = Actions.ActOnFinishFullExpr(Step.get(), Loc,
+ /*DiscardedValue=*/false);
+ }
+
+ // Parse ',' or ')'
+ if (Tok.isNot(tok::comma) && Tok.isNot(tok::r_paren))
+ Diag(Tok, diag::err_omp_expected_punc_after_iterator);
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+
+ Sema::OMPIteratorData &D = Data.emplace_back();
+ D.DeclIdent = II;
+ D.DeclIdentLoc = IdLoc;
+ D.Type = IteratorType;
+ D.AssignLoc = AssignLoc;
+ D.ColonLoc = ColonLoc;
+ D.SecColonLoc = SecColonLoc;
+ D.Range.Begin = Begin.get();
+ D.Range.End = End.get();
+ D.Range.Step = Step.get();
+ }
+
+ // Parse ')'.
+ SourceLocation RLoc = Tok.getLocation();
+ if (!T.consumeClose())
+ RLoc = T.getCloseLocation();
+
+ return Actions.ActOnOMPIteratorExpr(getCurScope(), IteratorKwLoc, LLoc, RLoc,
+ Data);
+}
+
/// Parses clauses with list.
bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
@@ -2696,19 +3282,32 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
// Parse '('.
BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
if (T.expectAndConsume(diag::err_expected_lparen_after,
- getOpenMPClauseName(Kind)))
+ getOpenMPClauseName(Kind).data()))
return true;
+ bool HasIterator = false;
bool NeedRParenForLinear = false;
BalancedDelimiterTracker LinearT(*this, tok::l_paren,
tok::annot_pragma_openmp_end);
// Handle reduction-identifier for reduction clause.
if (Kind == OMPC_reduction || Kind == OMPC_task_reduction ||
Kind == OMPC_in_reduction) {
+ Data.ExtraModifier = OMPC_REDUCTION_unknown;
+ if (Kind == OMPC_reduction && getLangOpts().OpenMP >= 50 &&
+ (Tok.is(tok::identifier) || Tok.is(tok::kw_default)) &&
+ NextToken().is(tok::comma)) {
+ // Parse optional reduction modifier.
+ Data.ExtraModifier = getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok));
+ Data.ExtraModifierLoc = Tok.getLocation();
+ ConsumeToken();
+ assert(Tok.is(tok::comma) && "Expected comma.");
+ (void)ConsumeToken();
+ }
ColonProtectionRAIIObject ColonRAII(*this);
if (getLangOpts().CPlusPlus)
ParseOptionalCXXScopeSpecifier(Data.ReductionOrMapperIdScopeSpec,
/*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/false);
InvalidReductionId = ParseReductionId(
*this, Data.ReductionOrMapperIdScopeSpec, UnqualifiedReductionId);
@@ -2724,11 +3323,27 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
Data.ReductionOrMapperId =
Actions.GetNameFromUnqualifiedId(UnqualifiedReductionId);
} else if (Kind == OMPC_depend) {
+ if (getLangOpts().OpenMP >= 50) {
+ if (Tok.is(tok::identifier) && PP.getSpelling(Tok) == "iterator") {
+ // Handle optional dependence modifier.
+ // iterator(iterators-definition)
+ // where iterators-definition is iterator-specifier [,
+ // iterators-definition ]
+ // where iterator-specifier is [ iterator-type ] identifier =
+ // range-specification
+ HasIterator = true;
+ EnterScope(Scope::OpenMPDirectiveScope | Scope::DeclScope);
+ ExprResult IteratorRes = ParseOpenMPIteratorsExpr();
+ Data.DepModOrTailExpr = IteratorRes.get();
+ // Parse ','
+ ExpectAndConsume(tok::comma);
+ }
+ }
// Handle dependency type for depend clause.
ColonProtectionRAIIObject ColonRAII(*this);
Data.ExtraModifier = getOpenMPSimpleClauseType(
Kind, Tok.is(tok::identifier) ? PP.getSpelling(Tok) : "");
- Data.DepLinMapLastLoc = Tok.getLocation();
+ Data.ExtraModifierLoc = Tok.getLocation();
if (Data.ExtraModifier == OMPC_DEPEND_unknown) {
SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
@@ -2753,7 +3368,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
Data.ExtraModifier = OMPC_LINEAR_val;
if (Tok.is(tok::identifier) && PP.LookAhead(0).is(tok::l_paren)) {
Data.ExtraModifier = getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok));
- Data.DepLinMapLastLoc = ConsumeToken();
+ Data.ExtraModifierLoc = ConsumeToken();
LinearT.consumeOpen();
NeedRParenForLinear = true;
}
@@ -2766,13 +3381,8 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
!isOpenMPTaskLoopDirective(DKind)) &&
Tok.is(tok::identifier) && PP.LookAhead(0).is(tok::colon)) {
Data.ExtraModifier = getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok));
- Data.DepLinMapLastLoc = Tok.getLocation();
- if (Data.ExtraModifier == OMPC_LASTPRIVATE_unknown) {
- SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
- StopBeforeMatch);
- } else {
- ConsumeToken();
- }
+ Data.ExtraModifierLoc = Tok.getLocation();
+ ConsumeToken();
assert(Tok.is(tok::colon) && "Expected colon.");
Data.ColonLoc = ConsumeToken();
}
@@ -2784,7 +3394,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
// map-type-modifier. The map-type can also be delete which has the same
// spelling of the C++ delete keyword.
Data.ExtraModifier = OMPC_MAP_unknown;
- Data.DepLinMapLastLoc = Tok.getLocation();
+ Data.ExtraModifierLoc = Tok.getLocation();
// Check for presence of a colon in the map clause.
TentativeParsingAction TPA(*this);
@@ -2840,22 +3450,33 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
ConsumeToken();
}
}
- } else if (Kind == OMPC_allocate) {
+ } else if (Kind == OMPC_allocate ||
+ (Kind == OMPC_affinity && Tok.is(tok::identifier) &&
+ PP.getSpelling(Tok) == "iterator")) {
// Handle optional allocator expression followed by colon delimiter.
ColonProtectionRAIIObject ColonRAII(*this);
TentativeParsingAction TPA(*this);
- ExprResult Tail =
- Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ // OpenMP 5.0, 2.10.1, task Construct.
+ // where aff-modifier is one of the following:
+ // iterator(iterators-definition)
+ ExprResult Tail;
+ if (Kind == OMPC_allocate) {
+ Tail = ParseAssignmentExpression();
+ } else {
+ HasIterator = true;
+ EnterScope(Scope::OpenMPDirectiveScope | Scope::DeclScope);
+ Tail = ParseOpenMPIteratorsExpr();
+ }
+ Tail = Actions.CorrectDelayedTyposInExpr(Tail);
Tail = Actions.ActOnFinishFullExpr(Tail.get(), T.getOpenLocation(),
/*DiscardedValue=*/false);
if (Tail.isUsable()) {
if (Tok.is(tok::colon)) {
- Data.TailExpr = Tail.get();
+ Data.DepModOrTailExpr = Tail.get();
Data.ColonLoc = ConsumeToken();
TPA.Commit();
} else {
- // colon not found, no allocator specified, parse only list of
- // variables.
+ // Colon not found, parse only list of variables.
TPA.Revert();
}
} else {
@@ -2876,6 +3497,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
const bool MayHaveTail = (Kind == OMPC_linear || Kind == OMPC_aligned);
while (IsComma || (Tok.isNot(tok::r_paren) && Tok.isNot(tok::colon) &&
Tok.isNot(tok::annot_pragma_openmp_end))) {
+ ParseScope OMPListScope(this, Scope::OpenMPDirectiveScope);
ColonProtectionRAIIObject ColonRAII(*this, MayHaveTail);
// Parse variable
ExprResult VarExpr =
@@ -2912,7 +3534,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
Tail =
Actions.ActOnFinishFullExpr(Tail.get(), ELoc, /*DiscardedValue*/ false);
if (Tail.isUsable())
- Data.TailExpr = Tail.get();
+ Data.DepModOrTailExpr = Tail.get();
else
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
@@ -2922,16 +3544,17 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
Data.RLoc = Tok.getLocation();
if (!T.consumeClose())
Data.RLoc = T.getCloseLocation();
- return (Kind == OMPC_depend && Data.ExtraModifier != OMPC_DEPEND_unknown &&
- Vars.empty()) ||
- (Kind != OMPC_depend && Kind != OMPC_map && Vars.empty()) ||
- (MustHaveTail && !Data.TailExpr) || InvalidReductionId ||
+ // Exit from scope when the iterator is used in depend clause.
+ if (HasIterator)
+ ExitScope();
+ return (Kind != OMPC_depend && Kind != OMPC_map && Vars.empty()) ||
+ (MustHaveTail && !Data.DepModOrTailExpr) || InvalidReductionId ||
IsInvalidMapperModifier;
}
/// Parsing of OpenMP clause 'private', 'firstprivate', 'lastprivate',
-/// 'shared', 'copyin', 'copyprivate', 'flush', 'reduction', 'task_reduction' or
-/// 'in_reduction'.
+/// 'shared', 'copyin', 'copyprivate', 'flush', 'reduction', 'task_reduction',
+/// 'in_reduction', 'nontemporal', 'exclusive' or 'inclusive'.
///
/// private-clause:
/// 'private' '(' list ')'
@@ -2946,7 +3569,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
/// aligned-clause:
/// 'aligned' '(' list [ ':' alignment ] ')'
/// reduction-clause:
-/// 'reduction' '(' reduction-identifier ':' list ')'
+/// 'reduction' '(' [ modifier ',' ] reduction-identifier ':' list ')'
/// task_reduction-clause:
/// 'task_reduction' '(' reduction-identifier ':' list ')'
/// in_reduction-clause:
@@ -2967,10 +3590,18 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
/// 'from' '(' [ mapper '(' mapper-identifier ')' ':' ] list ')'
/// use_device_ptr-clause:
/// 'use_device_ptr' '(' list ')'
+/// use_device_addr-clause:
+/// 'use_device_addr' '(' list ')'
/// is_device_ptr-clause:
/// 'is_device_ptr' '(' list ')'
/// allocate-clause:
/// 'allocate' '(' [ allocator ':' ] list ')'
+/// nontemporal-clause:
+/// 'nontemporal' '(' list ')'
+/// inclusive-clause:
+/// 'inclusive' '(' list ')'
+/// exclusive-clause:
+/// 'exclusive' '(' list ')'
///
/// For 'linear' clause linear-list may have the following forms:
/// list
@@ -2991,9 +3622,9 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
return nullptr;
OMPVarListLocTy Locs(Loc, LOpen, Data.RLoc);
return Actions.ActOnOpenMPVarListClause(
- Kind, Vars, Data.TailExpr, Locs, Data.ColonLoc,
+ Kind, Vars, Data.DepModOrTailExpr, Locs, Data.ColonLoc,
Data.ReductionOrMapperIdScopeSpec, Data.ReductionOrMapperId,
Data.ExtraModifier, Data.MapTypeModifiers, Data.MapTypeModifiersLoc,
- Data.IsMapTypeImplicit, Data.DepLinMapLastLoc);
+ Data.IsMapTypeImplicit, Data.ExtraModifierLoc);
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp b/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
index df411e1928d6..6402b31d00b2 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
@@ -108,6 +108,7 @@ struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
return;
if (OOS == tok::OOS_ON) {
PP.Diag(Tok, diag::warn_stdc_fenv_access_not_supported);
+ return;
}
MutableArrayRef<Token> Toks(PP.getPreprocessorAllocator().Allocate<Token>(1),
@@ -184,6 +185,13 @@ private:
Sema &Actions;
};
+struct PragmaFloatControlHandler : public PragmaHandler {
+ PragmaFloatControlHandler(Sema &Actions)
+ : PragmaHandler("float_control") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
+ Token &FirstToken) override;
+};
+
struct PragmaMSPointersToMembers : public PragmaHandler {
explicit PragmaMSPointersToMembers() : PragmaHandler("pointers_to_members") {}
void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
@@ -262,6 +270,18 @@ struct PragmaAttributeHandler : public PragmaHandler {
ParsedAttributes AttributesForPragmaAttribute;
};
+struct PragmaMaxTokensHereHandler : public PragmaHandler {
+ PragmaMaxTokensHereHandler() : PragmaHandler("max_tokens_here") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
+ Token &FirstToken) override;
+};
+
+struct PragmaMaxTokensTotalHandler : public PragmaHandler {
+ PragmaMaxTokensTotalHandler() : PragmaHandler("max_tokens_total") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
+ Token &FirstToken) override;
+};
+
} // end namespace
void Parser::initializePragmaHandlers() {
@@ -322,6 +342,8 @@ void Parser::initializePragmaHandlers() {
PP.AddPragmaHandler(MSCommentHandler.get());
}
+ FloatControlHandler = std::make_unique<PragmaFloatControlHandler>(Actions);
+ PP.AddPragmaHandler(FloatControlHandler.get());
if (getLangOpts().MicrosoftExt) {
MSDetectMismatchHandler =
std::make_unique<PragmaDetectMismatchHandler>(Actions);
@@ -382,6 +404,12 @@ void Parser::initializePragmaHandlers() {
AttributePragmaHandler =
std::make_unique<PragmaAttributeHandler>(AttrFactory);
PP.AddPragmaHandler("clang", AttributePragmaHandler.get());
+
+ MaxTokensHerePragmaHandler = std::make_unique<PragmaMaxTokensHereHandler>();
+ PP.AddPragmaHandler("clang", MaxTokensHerePragmaHandler.get());
+
+ MaxTokensTotalPragmaHandler = std::make_unique<PragmaMaxTokensTotalHandler>();
+ PP.AddPragmaHandler("clang", MaxTokensTotalPragmaHandler.get());
}
void Parser::resetPragmaHandlers() {
@@ -420,6 +448,8 @@ void Parser::resetPragmaHandlers() {
PP.RemovePragmaHandler("clang", PCSectionHandler.get());
PCSectionHandler.reset();
+ PP.RemovePragmaHandler(FloatControlHandler.get());
+ FloatControlHandler.reset();
if (getLangOpts().MicrosoftExt) {
PP.RemovePragmaHandler(MSDetectMismatchHandler.get());
MSDetectMismatchHandler.reset();
@@ -487,6 +517,12 @@ void Parser::resetPragmaHandlers() {
PP.RemovePragmaHandler("clang", AttributePragmaHandler.get());
AttributePragmaHandler.reset();
+
+ PP.RemovePragmaHandler("clang", MaxTokensHerePragmaHandler.get());
+ MaxTokensHerePragmaHandler.reset();
+
+ PP.RemovePragmaHandler("clang", MaxTokensTotalPragmaHandler.get());
+ MaxTokensTotalPragmaHandler.reset();
}
/// Handle the annotation token produced for #pragma unused(...)
@@ -605,21 +641,37 @@ void Parser::HandlePragmaFPContract() {
static_cast<tok::OnOffSwitch>(
reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
- LangOptions::FPContractModeKind FPC;
+ LangOptions::FPModeKind FPC;
switch (OOS) {
case tok::OOS_ON:
- FPC = LangOptions::FPC_On;
+ FPC = LangOptions::FPM_On;
break;
case tok::OOS_OFF:
- FPC = LangOptions::FPC_Off;
+ FPC = LangOptions::FPM_Off;
break;
case tok::OOS_DEFAULT:
FPC = getLangOpts().getDefaultFPContractMode();
break;
}
- Actions.ActOnPragmaFPContract(FPC);
- ConsumeAnnotationToken();
+ SourceLocation PragmaLoc = ConsumeAnnotationToken();
+ Actions.ActOnPragmaFPContract(PragmaLoc, FPC);
+}
+
+void Parser::HandlePragmaFloatControl() {
+ assert(Tok.is(tok::annot_pragma_float_control));
+
+ // The value that is held on the PragmaFloatControlStack encodes
+ // the PragmaFloatControl kind and the MSStackAction kind
+ // into a single 32-bit word. The MsStackAction is the high 16 bits
+ // and the FloatControl is the lower 16 bits. Use shift and bit-and
+ // to decode the parts.
+ uintptr_t Value = reinterpret_cast<uintptr_t>(Tok.getAnnotationValue());
+ Sema::PragmaMsStackAction Action =
+ static_cast<Sema::PragmaMsStackAction>((Value >> 16) & 0xFFFF);
+ PragmaFloatControlKind Kind = PragmaFloatControlKind(Value & 0xFFFF);
+ SourceLocation PragmaLoc = ConsumeAnnotationToken();
+ Actions.ActOnPragmaFloatControl(PragmaLoc, Action, Kind);
}
void Parser::HandlePragmaFEnvAccess() {
@@ -628,21 +680,21 @@ void Parser::HandlePragmaFEnvAccess() {
static_cast<tok::OnOffSwitch>(
reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
- LangOptions::FEnvAccessModeKind FPC;
+ bool IsEnabled;
switch (OOS) {
case tok::OOS_ON:
- FPC = LangOptions::FEA_On;
+ IsEnabled = true;
break;
case tok::OOS_OFF:
- FPC = LangOptions::FEA_Off;
+ IsEnabled = false;
break;
case tok::OOS_DEFAULT: // FIXME: Add this cli option when it makes sense.
- FPC = LangOptions::FEA_Off;
+ IsEnabled = false;
break;
}
- Actions.ActOnPragmaFEnvAccess(FPC);
- ConsumeAnnotationToken();
+ SourceLocation PragmaLoc = ConsumeAnnotationToken();
+ Actions.ActOnPragmaFEnvAccess(PragmaLoc, IsEnabled);
}
@@ -1008,11 +1060,11 @@ struct PragmaLoopHintInfo {
static std::string PragmaLoopHintString(Token PragmaName, Token Option) {
StringRef Str = PragmaName.getIdentifierInfo()->getName();
std::string ClangLoopStr = (llvm::Twine("clang loop ") + Str).str();
- return llvm::StringSwitch<StringRef>(Str)
- .Case("loop", ClangLoopStr)
- .Case("unroll_and_jam", Str)
- .Case("unroll", Str)
- .Default("");
+ return std::string(llvm::StringSwitch<StringRef>(Str)
+ .Case("loop", ClangLoopStr)
+ .Case("unroll_and_jam", Str)
+ .Case("unroll", Str)
+ .Default(""));
}
bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
@@ -1821,6 +1873,7 @@ void PragmaClangSectionHandler::HandlePragma(Preprocessor &PP,
return;
}
+ SourceLocation PragmaLocation = Tok.getLocation();
PP.Lex(Tok); // eat ['bss'|'data'|'rodata'|'text']
if (Tok.isNot(tok::equal)) {
PP.Diag(Tok.getLocation(), diag::err_pragma_clang_section_expected_equal) << SecKind;
@@ -1831,10 +1884,11 @@ void PragmaClangSectionHandler::HandlePragma(Preprocessor &PP,
if (!PP.LexStringLiteral(Tok, SecName, "pragma clang section", false))
return;
- Actions.ActOnPragmaClangSection(Tok.getLocation(),
- (SecName.size()? Sema::PragmaClangSectionAction::PCSA_Set :
- Sema::PragmaClangSectionAction::PCSA_Clear),
- SecKind, SecName);
+ Actions.ActOnPragmaClangSection(
+ PragmaLocation,
+ (SecName.size() ? Sema::PragmaClangSectionAction::PCSA_Set
+ : Sema::PragmaClangSectionAction::PCSA_Clear),
+ SecKind, SecName);
}
}
@@ -2465,6 +2519,129 @@ void PragmaMSPragma::HandlePragma(Preprocessor &PP,
PP.EnterToken(AnnotTok, /*IsReinject*/ false);
}
+/// Handle the \#pragma float_control extension.
+///
+/// The syntax is:
+/// \code
+/// #pragma float_control(keyword[, setting] [,push])
+/// \endcode
+/// Where 'keyword' and 'setting' are identifiers.
+// 'keyword' can be: precise, except, push, pop
+// 'setting' can be: on, off
+/// The optional arguments 'setting' and 'push' are supported only
+/// when the keyword is 'precise' or 'except'.
+void PragmaFloatControlHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducer Introducer,
+ Token &Tok) {
+ Sema::PragmaMsStackAction Action = Sema::PSK_Set;
+ SourceLocation FloatControlLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(FloatControlLoc, diag::err_expected) << tok::l_paren;
+ return;
+ }
+
+ // Read the identifier.
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ }
+
+ // Verify that this is one of the float control options.
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ PragmaFloatControlKind Kind =
+ llvm::StringSwitch<PragmaFloatControlKind>(II->getName())
+ .Case("precise", PFC_Precise)
+ .Case("except", PFC_Except)
+ .Case("push", PFC_Push)
+ .Case("pop", PFC_Pop)
+ .Default(PFC_Unknown);
+ PP.Lex(Tok); // the identifier
+ if (Kind == PFC_Unknown) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ } else if (Kind == PFC_Push || Kind == PFC_Pop) {
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ }
+ PP.Lex(Tok); // Eat the r_paren
+ Action = (Kind == PFC_Pop) ? Sema::PSK_Pop : Sema::PSK_Push;
+ } else {
+ if (Tok.is(tok::r_paren))
+ // Selecting Precise or Except
+ PP.Lex(Tok); // the r_paren
+ else if (Tok.isNot(tok::comma)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ } else {
+ PP.Lex(Tok); // ,
+ if (!Tok.isAnyIdentifier()) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ }
+ StringRef PushOnOff = Tok.getIdentifierInfo()->getName();
+ if (PushOnOff == "on")
+ // Kind is set correctly
+ ;
+ else if (PushOnOff == "off") {
+ if (Kind == PFC_Precise)
+ Kind = PFC_NoPrecise;
+ if (Kind == PFC_Except)
+ Kind = PFC_NoExcept;
+ } else if (PushOnOff == "push") {
+ Action = Sema::PSK_Push_Set;
+ } else {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ }
+ PP.Lex(Tok); // the identifier
+ if (Tok.is(tok::comma)) {
+ PP.Lex(Tok); // ,
+ if (!Tok.isAnyIdentifier()) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ }
+ StringRef ExpectedPush = Tok.getIdentifierInfo()->getName();
+ if (ExpectedPush == "push") {
+ Action = Sema::PSK_Push_Set;
+ } else {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ }
+ PP.Lex(Tok); // the push identifier
+ }
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ }
+ PP.Lex(Tok); // the r_paren
+ }
+ }
+ SourceLocation EndLoc = Tok.getLocation();
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "float_control";
+ return;
+ }
+
+ // Note: there is no accomodation for PP callback for this pragma.
+
+ // Enter the annotation.
+ auto TokenArray = std::make_unique<Token[]>(1);
+ TokenArray[0].startToken();
+ TokenArray[0].setKind(tok::annot_pragma_float_control);
+ TokenArray[0].setLocation(FloatControlLoc);
+ TokenArray[0].setAnnotationEndLoc(EndLoc);
+ // Create an encoding of Action and Value by shifting the Action into
+ // the high 16 bits then union with the Kind.
+ TokenArray[0].setAnnotationValue(reinterpret_cast<void *>(
+ static_cast<uintptr_t>((Action << 16) | (Kind & 0xFFFF))));
+ PP.EnterTokenStream(std::move(TokenArray), 1,
+ /*DisableMacroExpansion=*/false, /*IsReinject=*/false);
+}
+
/// Handle the Microsoft \#pragma detect_mismatch extension.
///
/// The syntax is:
@@ -2548,7 +2725,7 @@ void PragmaCommentHandler::HandlePragma(Preprocessor &PP,
return;
}
- // Verify that this is one of the 5 whitelisted options.
+ // Verify that this is one of the 5 explicitly listed options.
IdentifierInfo *II = Tok.getIdentifierInfo();
PragmaMSCommentKind Kind =
llvm::StringSwitch<PragmaMSCommentKind>(II->getName())
@@ -2589,7 +2766,7 @@ void PragmaCommentHandler::HandlePragma(Preprocessor &PP,
// FIXME: If the kind is "compiler" warn if the string is present (it is
// ignored).
// The MSDN docs say that "lib" and "linker" require a string and have a short
- // whitelist of linker options they support, but in practice MSVC doesn't
+ // list of linker options they support, but in practice MSVC doesn't
// issue a diagnostic. Therefore neither does clang.
if (Tok.isNot(tok::r_paren)) {
@@ -2651,7 +2828,7 @@ void PragmaOptimizeHandler::HandlePragma(Preprocessor &PP,
namespace {
/// Used as the annotation value for tok::annot_pragma_fp.
struct TokFPAnnotValue {
- enum FlagKinds { Contract };
+ enum FlagKinds { Contract, Reassociate };
enum FlagValues { On, Off, Fast };
FlagKinds FlagKind;
@@ -2679,6 +2856,7 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
llvm::StringSwitch<llvm::Optional<TokFPAnnotValue::FlagKinds>>(
OptionInfo->getName())
.Case("contract", TokFPAnnotValue::Contract)
+ .Case("reassociate", TokFPAnnotValue::Reassociate)
.Default(None);
if (!FlagKind) {
PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_option)
@@ -2696,7 +2874,8 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
if (Tok.isNot(tok::identifier)) {
PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_argument)
- << PP.getSpelling(Tok) << OptionInfo->getName();
+ << PP.getSpelling(Tok) << OptionInfo->getName()
+ << (FlagKind == TokFPAnnotValue::Reassociate);
return;
}
const IdentifierInfo *II = Tok.getIdentifierInfo();
@@ -2709,9 +2888,11 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
.Case("fast", TokFPAnnotValue::Fast)
.Default(llvm::None);
- if (!FlagValue) {
+ if (!FlagValue || (FlagKind == TokFPAnnotValue::Reassociate &&
+ FlagValue == TokFPAnnotValue::Fast)) {
PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_argument)
- << PP.getSpelling(Tok) << OptionInfo->getName();
+ << PP.getSpelling(Tok) << OptionInfo->getName()
+ << (FlagKind == TokFPAnnotValue::Reassociate);
return;
}
PP.Lex(Tok);
@@ -2725,7 +2906,7 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
auto *AnnotValue = new (PP.getPreprocessorAllocator())
TokFPAnnotValue{*FlagKind, *FlagValue};
- // Generate the loop hint token.
+ // Generate the fp annotation token.
Token FPTok;
FPTok.startToken();
FPTok.setKind(tok::annot_pragma_fp);
@@ -2753,20 +2934,24 @@ void Parser::HandlePragmaFP() {
auto *AnnotValue =
reinterpret_cast<TokFPAnnotValue *>(Tok.getAnnotationValue());
- LangOptions::FPContractModeKind FPC;
- switch (AnnotValue->FlagValue) {
- case TokFPAnnotValue::On:
- FPC = LangOptions::FPC_On;
- break;
- case TokFPAnnotValue::Fast:
- FPC = LangOptions::FPC_Fast;
- break;
- case TokFPAnnotValue::Off:
- FPC = LangOptions::FPC_Off;
- break;
+ if (AnnotValue->FlagKind == TokFPAnnotValue::Reassociate)
+ Actions.ActOnPragmaFPReassociate(
+ Tok.getLocation(), AnnotValue->FlagValue == TokFPAnnotValue::On);
+ else {
+ LangOptions::FPModeKind FPC;
+ switch (AnnotValue->FlagValue) {
+ case TokFPAnnotValue::Off:
+ FPC = LangOptions::FPM_Off;
+ break;
+ case TokFPAnnotValue::On:
+ FPC = LangOptions::FPM_On;
+ break;
+ case TokFPAnnotValue::Fast:
+ FPC = LangOptions::FPM_Fast;
+ break;
+ }
+ Actions.ActOnPragmaFPContract(Tok.getLocation(), FPC);
}
-
- Actions.ActOnPragmaFPContract(FPC);
ConsumeAnnotationToken();
}
@@ -2914,7 +3099,7 @@ void PragmaLoopHintHandler::HandlePragma(Preprocessor &PP,
Token LoopHintTok;
LoopHintTok.startToken();
LoopHintTok.setKind(tok::annot_pragma_loop_hint);
- LoopHintTok.setLocation(PragmaName.getLocation());
+ LoopHintTok.setLocation(Introducer.Loc);
LoopHintTok.setAnnotationEndLoc(PragmaName.getLocation());
LoopHintTok.setAnnotationValue(static_cast<void *>(Info));
TokenList.push_back(LoopHintTok);
@@ -3001,7 +3186,7 @@ void PragmaUnrollHintHandler::HandlePragma(Preprocessor &PP,
auto TokenArray = std::make_unique<Token[]>(1);
TokenArray[0].startToken();
TokenArray[0].setKind(tok::annot_pragma_loop_hint);
- TokenArray[0].setLocation(PragmaName.getLocation());
+ TokenArray[0].setLocation(Introducer.Loc);
TokenArray[0].setAnnotationEndLoc(PragmaName.getLocation());
TokenArray[0].setAnnotationValue(static_cast<void *>(Info));
PP.EnterTokenStream(std::move(TokenArray), 1,
@@ -3279,3 +3464,64 @@ void PragmaAttributeHandler::HandlePragma(Preprocessor &PP,
PP.EnterTokenStream(std::move(TokenArray), 1,
/*DisableMacroExpansion=*/false, /*IsReinject=*/false);
}
+
+// Handle '#pragma clang max_tokens 12345'.
+void PragmaMaxTokensHereHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducer Introducer,
+ Token &Tok) {
+ PP.Lex(Tok);
+ if (Tok.is(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_missing_argument)
+ << "clang max_tokens_here" << /*Expected=*/true << "integer";
+ return;
+ }
+
+ SourceLocation Loc = Tok.getLocation();
+ uint64_t MaxTokens;
+ if (Tok.isNot(tok::numeric_constant) ||
+ !PP.parseSimpleIntegerLiteral(Tok, MaxTokens)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_expected_integer)
+ << "clang max_tokens_here";
+ return;
+ }
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "clang max_tokens_here";
+ return;
+ }
+
+ if (PP.getTokenCount() > MaxTokens) {
+ PP.Diag(Loc, diag::warn_max_tokens)
+ << PP.getTokenCount() << (unsigned)MaxTokens;
+ }
+}
+
+// Handle '#pragma clang max_tokens_total 12345'.
+void PragmaMaxTokensTotalHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducer Introducer,
+ Token &Tok) {
+ PP.Lex(Tok);
+ if (Tok.is(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_missing_argument)
+ << "clang max_tokens_total" << /*Expected=*/true << "integer";
+ return;
+ }
+
+ SourceLocation Loc = Tok.getLocation();
+ uint64_t MaxTokens;
+ if (Tok.isNot(tok::numeric_constant) ||
+ !PP.parseSimpleIntegerLiteral(Tok, MaxTokens)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_expected_integer)
+ << "clang max_tokens_total";
+ return;
+ }
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "clang max_tokens_total";
+ return;
+ }
+
+ PP.overrideMaxTokens(MaxTokens, Loc);
+}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp b/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
index 0339328ca513..89a6a2b829ae 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
@@ -105,6 +105,7 @@ Parser::ParseStatementOrDeclaration(StmtVector &Stmts,
StmtResult Res = ParseStatementOrDeclarationAfterAttributes(
Stmts, StmtCtx, TrailingElseLoc, Attrs);
+ MaybeDestroyTemplateIds();
assert((Attrs.empty() || Res.isInvalid() || Res.isUsable()) &&
"attributes on empty statement");
@@ -353,13 +354,13 @@ Retry:
case tok::annot_pragma_fp_contract:
ProhibitAttributes(Attrs);
- Diag(Tok, diag::err_pragma_fp_contract_scope);
+ Diag(Tok, diag::err_pragma_file_or_compound_scope) << "fp_contract";
ConsumeAnnotationToken();
return StmtError();
case tok::annot_pragma_fp:
ProhibitAttributes(Attrs);
- Diag(Tok, diag::err_pragma_fp_scope);
+ Diag(Tok, diag::err_pragma_file_or_compound_scope) << "clang fp";
ConsumeAnnotationToken();
return StmtError();
@@ -368,6 +369,12 @@ Retry:
HandlePragmaFEnvAccess();
return StmtEmpty();
+ case tok::annot_pragma_float_control:
+ ProhibitAttributes(Attrs);
+ Diag(Tok, diag::err_pragma_file_or_compound_scope) << "float_control";
+ ConsumeAnnotationToken();
+ return StmtError();
+
case tok::annot_pragma_opencl_extension:
ProhibitAttributes(Attrs);
HandlePragmaOpenCLExtension();
@@ -936,6 +943,9 @@ void Parser::ParseCompoundStatementLeadingPragmas() {
case tok::annot_pragma_fenv_access:
HandlePragmaFEnvAccess();
break;
+ case tok::annot_pragma_float_control:
+ HandlePragmaFloatControl();
+ break;
case tok::annot_pragma_ms_pointers_to_members:
HandlePragmaMSPointersToMembers();
break;
@@ -1014,9 +1024,9 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
Tok.getLocation(),
"in compound statement ('{}')");
- // Record the state of the FP_CONTRACT pragma, restore on leaving the
+ // Record the state of the FPFeatures, restore on leaving the
// compound statement.
- Sema::FPContractStateRAII SaveFPContractState(Actions);
+ Sema::FPFeaturesStateRAII SaveFPContractState(Actions);
InMessageExpressionRAIIObject InMessage(*this, false);
BalancedDelimiterTracker T(*this, tok::l_brace);
@@ -1146,10 +1156,14 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
/// should try to recover harder. It returns false if the condition is
/// successfully parsed. Note that a successful parse can still have semantic
/// errors in the condition.
+/// Additionally, if LParenLoc and RParenLoc are non-null, it will assign
+/// the location of the outer-most '(' and ')', respectively, to them.
bool Parser::ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &Cond,
SourceLocation Loc,
- Sema::ConditionKind CK) {
+ Sema::ConditionKind CK,
+ SourceLocation *LParenLoc,
+ SourceLocation *RParenLoc) {
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
@@ -1179,6 +1193,13 @@ bool Parser::ParseParenExprOrCondition(StmtResult *InitStmt,
// Otherwise the condition is valid or the rparen is present.
T.consumeClose();
+ if (LParenLoc != nullptr) {
+ *LParenLoc = T.getOpenLocation();
+ }
+ if (RParenLoc != nullptr) {
+ *RParenLoc = T.getCloseLocation();
+ }
+
// Check for extraneous ')'s to catch things like "if (foo())) {". We know
// that all callers are looking for a statement after the condition, so ")"
// isn't valid.
@@ -1338,6 +1359,8 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
if (IsConstexpr)
ConstexprCondition = Cond.getKnownValue();
+ bool IsBracedThen = Tok.is(tok::l_brace);
+
// C99 6.8.4p3 - In C99, the body of the if statement is a scope, even if
// there is no compound stmt. C90 does not have this clause. We only do this
// if the body isn't a compound statement to avoid push/pop in common cases.
@@ -1356,7 +1379,7 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
// would have to notify ParseStatement not to create a new scope. It's
// simpler to let it create a new scope.
//
- ParseScope InnerScope(this, Scope::DeclScope, C99orCXX, Tok.is(tok::l_brace));
+ ParseScope InnerScope(this, Scope::DeclScope, C99orCXX, IsBracedThen);
MisleadingIndentationChecker MIChecker(*this, MSK_if, IfLoc);
@@ -1417,7 +1440,7 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
// Pop the 'else' scope if needed.
InnerScope.Exit();
} else if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteAfterIf(getCurScope());
+ Actions.CodeCompleteAfterIf(getCurScope(), IsBracedThen);
cutOffParsing();
return StmtError();
} else if (InnerStatementTrailingElseLoc.isValid()) {
@@ -1570,8 +1593,10 @@ StmtResult Parser::ParseWhileStatement(SourceLocation *TrailingElseLoc) {
// Parse the condition.
Sema::ConditionResult Cond;
+ SourceLocation LParen;
+ SourceLocation RParen;
if (ParseParenExprOrCondition(nullptr, Cond, WhileLoc,
- Sema::ConditionKind::Boolean))
+ Sema::ConditionKind::Boolean, &LParen, &RParen))
return StmtError();
// C99 6.8.5p5 - In C99, the body of the while statement is a scope, even if
@@ -1601,7 +1626,7 @@ StmtResult Parser::ParseWhileStatement(SourceLocation *TrailingElseLoc) {
if (Cond.isInvalid() || Body.isInvalid())
return StmtError();
- return Actions.ActOnWhileStmt(WhileLoc, Cond, Body.get());
+ return Actions.ActOnWhileStmt(WhileLoc, LParen, Cond, RParen, Body.get());
}
/// ParseDoStatement
@@ -1921,7 +1946,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
if (ForRangeInfo.ParsedForRangeDecl()) {
Diag(FirstPart.get() ? FirstPart.get()->getBeginLoc()
: ForRangeInfo.ColonLoc,
- getLangOpts().CPlusPlus2a
+ getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_for_range_init_stmt
: diag::ext_for_range_init_stmt)
<< (FirstPart.get() ? FirstPart.get()->getSourceRange()
@@ -2162,6 +2187,8 @@ StmtResult Parser::ParsePragmaLoopHint(StmtVector &Stmts,
// Create temporary attribute list.
ParsedAttributesWithRange TempAttrs(AttrFactory);
+ SourceLocation StartLoc = Tok.getLocation();
+
// Get loop hints and consume annotated token.
while (Tok.is(tok::annot_pragma_loop_hint)) {
LoopHint Hint;
@@ -2182,6 +2209,12 @@ StmtResult Parser::ParsePragmaLoopHint(StmtVector &Stmts,
Stmts, StmtCtx, TrailingElseLoc, Attrs);
Attrs.takeAllFrom(TempAttrs);
+
+ // Start of attribute range may already be set for some invalid input.
+ // See PR46336.
+ if (Attrs.Range.getBegin().isInvalid())
+ Attrs.Range.setBegin(StartLoc);
+
return S;
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseStmtAsm.cpp b/contrib/llvm-project/clang/lib/Parse/ParseStmtAsm.cpp
index ea2c871d6a82..7d0818840a4f 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseStmtAsm.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseStmtAsm.cpp
@@ -220,9 +220,10 @@ ExprResult Parser::ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
// Parse an optional scope-specifier if we're in C++.
CXXScopeSpec SS;
- if (getLangOpts().CPlusPlus) {
- ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext=*/false);
- }
+ if (getLangOpts().CPlusPlus)
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false);
// Require an identifier here.
SourceLocation TemplateKWLoc;
@@ -233,12 +234,13 @@ ExprResult Parser::ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
Result = ParseCXXThis();
Invalid = false;
} else {
- Invalid = ParseUnqualifiedId(SS,
- /*EnteringContext=*/false,
- /*AllowDestructorName=*/false,
- /*AllowConstructorName=*/false,
- /*AllowDeductionGuide=*/false,
- /*ObjectType=*/nullptr, &TemplateKWLoc, Id);
+ Invalid =
+ ParseUnqualifiedId(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false,
+ /*AllowDestructorName=*/false,
+ /*AllowConstructorName=*/false,
+ /*AllowDeductionGuide=*/false, &TemplateKWLoc, Id);
// Perform the lookup.
Result = Actions.LookupInlineAsmIdentifier(SS, TemplateKWLoc, Id,
IsUnevaluatedContext);
@@ -349,31 +351,13 @@ static bool buildMSAsmString(Preprocessor &PP, SourceLocation AsmLoc,
return false;
}
-/// isTypeQualifier - Return true if the current token could be the
-/// start of a type-qualifier-list.
-static bool isTypeQualifier(const Token &Tok) {
- switch (Tok.getKind()) {
- default: return false;
- // type-qualifier
- case tok::kw_const:
- case tok::kw_volatile:
- case tok::kw_restrict:
- case tok::kw___private:
- case tok::kw___local:
- case tok::kw___global:
- case tok::kw___constant:
- case tok::kw___generic:
- case tok::kw___read_only:
- case tok::kw___read_write:
- case tok::kw___write_only:
- return true;
- }
+// Determine if this is a GCC-style asm statement.
+bool Parser::isGCCAsmStatement(const Token &TokAfterAsm) const {
+ return TokAfterAsm.is(tok::l_paren) || isGNUAsmQualifier(TokAfterAsm);
}
-// Determine if this is a GCC-style asm statement.
-static bool isGCCAsmStatement(const Token &TokAfterAsm) {
- return TokAfterAsm.is(tok::l_paren) || TokAfterAsm.is(tok::kw_goto) ||
- isTypeQualifier(TokAfterAsm);
+bool Parser::isGNUAsmQualifier(const Token &TokAfterAsm) const {
+ return getGNUAsmQualifier(TokAfterAsm) != GNUAsmQualifiers::AQ_unspecified;
}
/// ParseMicrosoftAsmStatement. When -fms-extensions/-fasm-blocks is enabled,
@@ -631,8 +615,8 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
// Change to the Intel dialect.
Parser->setAssemblerDialect(1);
Parser->setTargetParser(*TargetParser.get());
- Parser->setParsingInlineAsm(true);
- TargetParser->setParsingInlineAsm(true);
+ Parser->setParsingMSInlineAsm(true);
+ TargetParser->setParsingMSInlineAsm(true);
ClangAsmParserCallback Callback(*this, AsmLoc, AsmString, AsmToks,
TokOffsets);
@@ -684,13 +668,41 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
ClobberRefs, Exprs, EndLoc);
}
+/// parseGNUAsmQualifierListOpt - Parse a GNU extended asm qualifier list.
+/// asm-qualifier:
+/// volatile
+/// inline
+/// goto
+///
+/// asm-qualifier-list:
+/// asm-qualifier
+/// asm-qualifier-list asm-qualifier
+bool Parser::parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ) {
+ while (1) {
+ const GNUAsmQualifiers::AQ A = getGNUAsmQualifier(Tok);
+ if (A == GNUAsmQualifiers::AQ_unspecified) {
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok.getLocation(), diag::err_asm_qualifier_ignored);
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return true;
+ }
+ return false;
+ }
+ if (AQ.setAsmQualifier(A))
+ Diag(Tok.getLocation(), diag::err_asm_duplicate_qual)
+ << GNUAsmQualifiers::getQualifierName(A);
+ ConsumeToken();
+ }
+ return false;
+}
+
/// ParseAsmStatement - Parse a GNU extended asm statement.
/// asm-statement:
/// gnu-asm-statement
/// ms-asm-statement
///
/// [GNU] gnu-asm-statement:
-/// 'asm' type-qualifier[opt] '(' asm-argument ')' ';'
+/// 'asm' asm-qualifier-list[opt] '(' asm-argument ')' ';'
///
/// [GNU] asm-argument:
/// asm-string-literal
@@ -712,34 +724,14 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
return ParseMicrosoftAsmStatement(AsmLoc);
}
- DeclSpec DS(AttrFactory);
SourceLocation Loc = Tok.getLocation();
- ParseTypeQualifierListOpt(DS, AR_VendorAttributesParsed);
-
- // GNU asms accept, but warn, about type-qualifiers other than volatile.
- if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
- Diag(Loc, diag::warn_asm_qualifier_ignored) << "const";
- if (DS.getTypeQualifiers() & DeclSpec::TQ_restrict)
- Diag(Loc, diag::warn_asm_qualifier_ignored) << "restrict";
- // FIXME: Once GCC supports _Atomic, check whether it permits it here.
- if (DS.getTypeQualifiers() & DeclSpec::TQ_atomic)
- Diag(Loc, diag::warn_asm_qualifier_ignored) << "_Atomic";
-
- // Remember if this was a volatile asm.
- bool isVolatile = DS.getTypeQualifiers() & DeclSpec::TQ_volatile;
- // Remember if this was a goto asm.
- bool isGotoAsm = false;
-
- if (Tok.is(tok::kw_goto)) {
- isGotoAsm = true;
- ConsumeToken();
- }
-
- if (Tok.isNot(tok::l_paren)) {
- Diag(Tok, diag::err_expected_lparen_after) << "asm";
- SkipUntil(tok::r_paren, StopAtSemi);
+ GNUAsmQualifiers GAQ;
+ if (parseGNUAsmQualifierListOpt(GAQ))
return StmtError();
- }
+
+ if (GAQ.isGoto() && getLangOpts().SpeculativeLoadHardening)
+ Diag(Loc, diag::warn_slh_does_not_support_asm_goto);
+
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
@@ -767,11 +759,10 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
if (Tok.is(tok::r_paren)) {
// We have a simple asm expression like 'asm("foo")'.
T.consumeClose();
- return Actions.ActOnGCCAsmStmt(AsmLoc, /*isSimple*/ true, isVolatile,
- /*NumOutputs*/ 0, /*NumInputs*/ 0, nullptr,
- Constraints, Exprs, AsmString.get(),
- Clobbers, /*NumLabels*/ 0,
- T.getCloseLocation());
+ return Actions.ActOnGCCAsmStmt(
+ AsmLoc, /*isSimple*/ true, GAQ.isVolatile(),
+ /*NumOutputs*/ 0, /*NumInputs*/ 0, nullptr, Constraints, Exprs,
+ AsmString.get(), Clobbers, /*NumLabels*/ 0, T.getCloseLocation());
}
// Parse Outputs, if present.
@@ -781,12 +772,6 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
AteExtraColon = Tok.is(tok::coloncolon);
ConsumeToken();
- if (!AteExtraColon && isGotoAsm && Tok.isNot(tok::colon)) {
- Diag(Tok, diag::err_asm_goto_cannot_have_output);
- SkipUntil(tok::r_paren, StopAtSemi);
- return StmtError();
- }
-
if (!AteExtraColon && ParseAsmOperandsOpt(Names, Constraints, Exprs))
return StmtError();
}
@@ -835,7 +820,7 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
}
}
}
- if (!isGotoAsm && (Tok.isNot(tok::r_paren) || AteExtraColon)) {
+ if (!GAQ.isGoto() && (Tok.isNot(tok::r_paren) || AteExtraColon)) {
Diag(Tok, diag::err_expected) << tok::r_paren;
SkipUntil(tok::r_paren, StopAtSemi);
return StmtError();
@@ -868,16 +853,16 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
if (!TryConsumeToken(tok::comma))
break;
}
- } else if (isGotoAsm) {
+ } else if (GAQ.isGoto()) {
Diag(Tok, diag::err_expected) << tok::colon;
SkipUntil(tok::r_paren, StopAtSemi);
return StmtError();
}
T.consumeClose();
- return Actions.ActOnGCCAsmStmt(
- AsmLoc, false, isVolatile, NumOutputs, NumInputs, Names.data(),
- Constraints, Exprs, AsmString.get(), Clobbers, NumLabels,
- T.getCloseLocation());
+ return Actions.ActOnGCCAsmStmt(AsmLoc, false, GAQ.isVolatile(), NumOutputs,
+ NumInputs, Names.data(), Constraints, Exprs,
+ AsmString.get(), Clobbers, NumLabels,
+ T.getCloseLocation());
}
/// ParseAsmOperands - Parse the asm-operands production as used by
@@ -948,3 +933,28 @@ bool Parser::ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
return false;
}
}
+
+const char *Parser::GNUAsmQualifiers::getQualifierName(AQ Qualifier) {
+ switch (Qualifier) {
+ case AQ_volatile: return "volatile";
+ case AQ_inline: return "inline";
+ case AQ_goto: return "goto";
+ case AQ_unspecified: return "unspecified";
+ }
+ llvm_unreachable("Unknown GNUAsmQualifier");
+}
+
+Parser::GNUAsmQualifiers::AQ
+Parser::getGNUAsmQualifier(const Token &Tok) const {
+ switch (Tok.getKind()) {
+ case tok::kw_volatile: return GNUAsmQualifiers::AQ_volatile;
+ case tok::kw_inline: return GNUAsmQualifiers::AQ_inline;
+ case tok::kw_goto: return GNUAsmQualifiers::AQ_goto;
+ default: return GNUAsmQualifiers::AQ_unspecified;
+ }
+}
+bool Parser::GNUAsmQualifiers::setAsmQualifier(AQ Qualifier) {
+ bool IsDuplicate = Qualifiers & Qualifier;
+ Qualifiers |= Qualifier;
+ return IsDuplicate;
+}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp b/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
index 609640576e9e..3ef73f579123 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
@@ -22,6 +22,16 @@
#include "llvm/Support/TimeProfiler.h"
using namespace clang;
+/// Re-enter a possible template scope, creating as many template parameter
+/// scopes as necessary.
+/// \return The number of template parameter scopes entered.
+unsigned Parser::ReenterTemplateScopes(MultiParseScope &S, Decl *D) {
+ return Actions.ActOnReenterTemplateScope(D, [&] {
+ S.Enter(Scope::TemplateParamScope);
+ return Actions.getCurScope();
+ });
+}
+
/// Parse a template declaration, explicit instantiation, or
/// explicit specialization.
Decl *Parser::ParseDeclarationStartingWithTemplate(
@@ -67,8 +77,7 @@ Decl *Parser::ParseTemplateDeclarationOrSpecialization(
assert(Tok.isOneOf(tok::kw_export, tok::kw_template) &&
"Token does not start a template declaration.");
- // Enter template-parameter scope.
- ParseScope TemplateParmScope(this, Scope::TemplateParamScope);
+ MultiParseScope TemplateParamScopes(*this);
// Tell the action that names should be checked in the context of
// the declaration to come.
@@ -116,7 +125,8 @@ Decl *Parser::ParseTemplateDeclarationOrSpecialization(
// Parse the '<' template-parameter-list '>'
SourceLocation LAngleLoc, RAngleLoc;
SmallVector<NamedDecl*, 4> TemplateParams;
- if (ParseTemplateParameters(CurTemplateDepthTracker.getDepth(),
+ if (ParseTemplateParameters(TemplateParamScopes,
+ CurTemplateDepthTracker.getDepth(),
TemplateParams, LAngleLoc, RAngleLoc)) {
// Skip until the semi-colon or a '}'.
SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
@@ -150,9 +160,6 @@ Decl *Parser::ParseTemplateDeclarationOrSpecialization(
TemplateParams, RAngleLoc, OptionalRequiresClauseConstraintER.get()));
} while (Tok.isOneOf(tok::kw_export, tok::kw_template));
- unsigned NewFlags = getCurScope()->getFlags() & ~Scope::TemplateParamScope;
- ParseScopeFlags TemplateScopeFlags(this, NewFlags, isSpecialization);
-
// Parse the actual template declaration.
if (Tok.is(tok::kw_concept))
return ParseConceptDefinition(
@@ -253,9 +260,9 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
}
llvm::TimeTraceScope TimeScope("ParseTemplate", [&]() {
- return DeclaratorInfo.getIdentifier() != nullptr
- ? DeclaratorInfo.getIdentifier()->getName()
- : "<unknown>";
+ return std::string(DeclaratorInfo.getIdentifier() != nullptr
+ ? DeclaratorInfo.getIdentifier()->getName()
+ : "<unknown>");
});
LateParsedAttrList LateParsedAttrs(true);
@@ -363,9 +370,11 @@ Parser::ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
DiagnoseAndSkipCXX11Attributes();
CXXScopeSpec SS;
- if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
- /*EnteringContext=*/false, /*MayBePseudoDestructor=*/nullptr,
- /*IsTypename=*/false, /*LastII=*/nullptr, /*OnlyNamespace=*/true) ||
+ if (ParseOptionalCXXScopeSpecifier(
+ SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, /*EnteringContext=*/false,
+ /*MayBePseudoDestructor=*/nullptr,
+ /*IsTypename=*/false, /*LastII=*/nullptr, /*OnlyNamespace=*/true) ||
SS.isInvalid()) {
SkipUntil(tok::semi);
return nullptr;
@@ -376,12 +385,12 @@ Parser::ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
diag::err_concept_definition_not_identifier);
UnqualifiedId Result;
- if (ParseUnqualifiedId(SS, /*EnteringContext=*/false,
+ if (ParseUnqualifiedId(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, /*EnteringContext=*/false,
/*AllowDestructorName=*/false,
/*AllowConstructorName=*/false,
/*AllowDeductionGuide=*/false,
- /*ObjectType=*/ParsedType(), /*TemplateKWLoc=*/nullptr,
- Result)) {
+ /*TemplateKWLoc=*/nullptr, Result)) {
SkipUntil(tok::semi);
return nullptr;
}
@@ -428,8 +437,9 @@ Parser::ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
///
/// \returns true if an error occurred, false otherwise.
bool Parser::ParseTemplateParameters(
- unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams,
- SourceLocation &LAngleLoc, SourceLocation &RAngleLoc) {
+ MultiParseScope &TemplateScopes, unsigned Depth,
+ SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc,
+ SourceLocation &RAngleLoc) {
// Get the template parameter list.
if (!TryConsumeToken(tok::less, LAngleLoc)) {
Diag(Tok.getLocation(), diag::err_expected_less_after) << "template";
@@ -438,8 +448,11 @@ bool Parser::ParseTemplateParameters(
// Try to parse the template parameter list.
bool Failed = false;
- if (!Tok.is(tok::greater) && !Tok.is(tok::greatergreater))
+ // FIXME: Missing greatergreatergreater support.
+ if (!Tok.is(tok::greater) && !Tok.is(tok::greatergreater)) {
+ TemplateScopes.Enter(Scope::TemplateParamScope);
Failed = ParseTemplateParameterList(Depth, TemplateParams);
+ }
if (Tok.is(tok::greatergreater)) {
// No diagnostic required here: a template-parameter-list can only be
@@ -678,23 +691,23 @@ bool Parser::isTypeConstraintAnnotation() {
///
/// \returns true if an error occurred, and false otherwise.
bool Parser::TryAnnotateTypeConstraint() {
- if (!getLangOpts().CPlusPlus2a)
+ if (!getLangOpts().CPlusPlus20)
return false;
CXXScopeSpec SS;
bool WasScopeAnnotation = Tok.is(tok::annot_cxxscope);
- if (ParseOptionalCXXScopeSpecifier(
- SS, ParsedType(),
- /*EnteringContext=*/false,
- /*MayBePseudoDestructor=*/nullptr,
- // If this is not a type-constraint, then
- // this scope-spec is part of the typename
- // of a non-type template parameter
- /*IsTypename=*/true, /*LastII=*/nullptr,
- // We won't find concepts in
- // non-namespaces anyway, so might as well
- // parse this correctly for possible type
- // names.
- /*OnlyNamespace=*/false))
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false,
+ /*MayBePseudoDestructor=*/nullptr,
+ // If this is not a type-constraint, then
+ // this scope-spec is part of the typename
+ // of a non-type template parameter
+ /*IsTypename=*/true, /*LastII=*/nullptr,
+ // We won't find concepts in
+ // non-namespaces anyway, so might as well
+ // parse this correctly for possible type
+ // names.
+ /*OnlyNamespace=*/false))
return true;
if (Tok.is(tok::identifier)) {
@@ -754,7 +767,8 @@ NamedDecl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
TemplateIdAnnotation *TypeConstraint = nullptr;
bool TypenameKeyword = false;
SourceLocation KeyLoc;
- ParseOptionalCXXScopeSpecifier(TypeConstraintSS, nullptr,
+ ParseOptionalCXXScopeSpecifier(TypeConstraintSS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext*/ false);
if (Tok.is(tok::annot_template_id)) {
// Consume the 'type-constraint'.
@@ -847,9 +861,9 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
SmallVector<NamedDecl*,8> TemplateParams;
SourceLocation LAngleLoc, RAngleLoc;
{
- ParseScope TemplateParmScope(this, Scope::TemplateParamScope);
- if (ParseTemplateParameters(Depth + 1, TemplateParams, LAngleLoc,
- RAngleLoc)) {
+ MultiParseScope TemplateParmScope(*this);
+ if (ParseTemplateParameters(TemplateParmScope, Depth + 1, TemplateParams,
+ LAngleLoc, RAngleLoc)) {
return nullptr;
}
}
@@ -987,7 +1001,7 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
// Create the parameter.
return Actions.ActOnNonTypeTemplateParameter(getCurScope(), ParamDecl,
- Depth, Position, EqualLoc,
+ Depth, Position, EqualLoc,
DefaultArg.get());
}
@@ -1028,7 +1042,8 @@ void Parser::DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
/// or argument list.
///
/// \returns true, if current token does not start with '>', false otherwise.
-bool Parser::ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
+bool Parser::ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
+ SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList) {
// What will be left once we've consumed the '>'.
@@ -1038,7 +1053,8 @@ bool Parser::ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
switch (Tok.getKind()) {
default:
- Diag(Tok.getLocation(), diag::err_expected) << tok::greater;
+ Diag(getEndOfPreviousToken(), diag::err_expected) << tok::greater;
+ Diag(LAngleLoc, diag::note_matching) << tok::less;
return true;
case tok::greater:
@@ -1217,16 +1233,17 @@ Parser::ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
if (Invalid) {
// Try to find the closing '>'.
- if (ConsumeLastToken)
- SkipUntil(tok::greater, StopAtSemi);
+ if (getLangOpts().CPlusPlus11)
+ SkipUntil(tok::greater, tok::greatergreater,
+ tok::greatergreatergreater, StopAtSemi | StopBeforeMatch);
else
SkipUntil(tok::greater, StopAtSemi | StopBeforeMatch);
- return true;
}
}
- return ParseGreaterThanInTemplateList(RAngleLoc, ConsumeLastToken,
- /*ObjCGenericList=*/false);
+ return ParseGreaterThanInTemplateList(LAngleLoc, RAngleLoc, ConsumeLastToken,
+ /*ObjCGenericList=*/false) ||
+ Invalid;
}
/// Replace the tokens that form a simple-template-id with an
@@ -1277,12 +1294,13 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
bool AllowTypeAnnotation,
bool TypeConstraint) {
assert(getLangOpts().CPlusPlus && "Can only annotate template-ids in C++");
- assert(Template && (Tok.is(tok::less) || TypeConstraint) &&
+ assert((Tok.is(tok::less) || TypeConstraint) &&
"Parser isn't at the beginning of a template-id");
assert(!(TypeConstraint && AllowTypeAnnotation) && "type-constraint can't be "
"a type annotation");
assert((!TypeConstraint || TNK == TNK_Concept_template) && "type-constraint "
"must accompany a concept name");
+ assert((Template || TNK == TNK_Non_template) && "missing template name");
// Consume the template-name.
SourceLocation TemplateNameLoc = TemplateName.getSourceRange().getBegin();
@@ -1290,40 +1308,31 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
// Parse the enclosed template argument list.
SourceLocation LAngleLoc, RAngleLoc;
TemplateArgList TemplateArgs;
+ bool ArgsInvalid = false;
if (!TypeConstraint || Tok.is(tok::less)) {
- bool Invalid = ParseTemplateIdAfterTemplateName(false, LAngleLoc,
- TemplateArgs,
- RAngleLoc);
-
- if (Invalid) {
- // If we failed to parse the template ID but skipped ahead to a >, we're not
- // going to be able to form a token annotation. Eat the '>' if present.
- TryConsumeToken(tok::greater);
- // FIXME: Annotate the token stream so we don't produce the same errors
- // again if we're doing this annotation as part of a tentative parse.
+ ArgsInvalid = ParseTemplateIdAfterTemplateName(false, LAngleLoc,
+ TemplateArgs, RAngleLoc);
+ // If we couldn't recover from invalid arguments, don't form an annotation
+ // token -- we don't know how much to annotate.
+ // FIXME: This can lead to duplicate diagnostics if we retry parsing this
+ // template-id in another context. Try to annotate anyway?
+ if (RAngleLoc.isInvalid())
return true;
- }
}
ASTTemplateArgsPtr TemplateArgsPtr(TemplateArgs);
// Build the annotation token.
if (TNK == TNK_Type_template && AllowTypeAnnotation) {
- TypeResult Type = Actions.ActOnTemplateIdType(
- getCurScope(), SS, TemplateKWLoc, Template, TemplateName.Identifier,
- TemplateNameLoc, LAngleLoc, TemplateArgsPtr, RAngleLoc);
- if (Type.isInvalid()) {
- // If we failed to parse the template ID but skipped ahead to a >, we're
- // not going to be able to form a token annotation. Eat the '>' if
- // present.
- TryConsumeToken(tok::greater);
- // FIXME: Annotate the token stream so we don't produce the same errors
- // again if we're doing this annotation as part of a tentative parse.
- return true;
- }
+ TypeResult Type = ArgsInvalid
+ ? TypeError()
+ : Actions.ActOnTemplateIdType(
+ getCurScope(), SS, TemplateKWLoc, Template,
+ TemplateName.Identifier, TemplateNameLoc,
+ LAngleLoc, TemplateArgsPtr, RAngleLoc);
Tok.setKind(tok::annot_typename);
- setTypeAnnotation(Tok, Type.get());
+ setTypeAnnotation(Tok, Type);
if (SS.isNotEmpty())
Tok.setLocation(SS.getBeginLoc());
else if (TemplateKWLoc.isValid())
@@ -1347,7 +1356,7 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
TemplateIdAnnotation *TemplateId = TemplateIdAnnotation::Create(
TemplateKWLoc, TemplateNameLoc, TemplateII, OpKind, Template, TNK,
- LAngleLoc, RAngleLoc, TemplateArgs, TemplateIds);
+ LAngleLoc, RAngleLoc, TemplateArgs, ArgsInvalid, TemplateIds);
Tok.setAnnotationValue(TemplateId);
if (TemplateKWLoc.isValid())
@@ -1383,29 +1392,24 @@ void Parser::AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
assert(Tok.is(tok::annot_template_id) && "Requires template-id tokens");
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
- assert((TemplateId->Kind == TNK_Type_template ||
- TemplateId->Kind == TNK_Dependent_template_name ||
- TemplateId->Kind == TNK_Undeclared_template) &&
+ assert(TemplateId->mightBeType() &&
"Only works for type and dependent templates");
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
- TypeResult Type
- = Actions.ActOnTemplateIdType(getCurScope(),
- SS,
- TemplateId->TemplateKWLoc,
- TemplateId->Template,
- TemplateId->Name,
- TemplateId->TemplateNameLoc,
- TemplateId->LAngleLoc,
- TemplateArgsPtr,
- TemplateId->RAngleLoc,
- /*IsCtorOrDtorName*/false,
- IsClassName);
+ TypeResult Type =
+ TemplateId->isInvalid()
+ ? TypeError()
+ : Actions.ActOnTemplateIdType(
+ getCurScope(), SS, TemplateId->TemplateKWLoc,
+ TemplateId->Template, TemplateId->Name,
+ TemplateId->TemplateNameLoc, TemplateId->LAngleLoc,
+ TemplateArgsPtr, TemplateId->RAngleLoc,
+ /*IsCtorOrDtorName*/ false, IsClassName);
// Create the new "type" annotation token.
Tok.setKind(tok::annot_typename);
- setTypeAnnotation(Tok, Type.isInvalid() ? nullptr : Type.get());
+ setTypeAnnotation(Tok, Type);
if (SS.isNotEmpty()) // it was a C++ qualified type name.
Tok.setLocation(SS.getBeginLoc());
// End location stays the same
@@ -1417,7 +1421,9 @@ void Parser::AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
/// Determine whether the given token can end a template argument.
static bool isEndOfTemplateArgument(Token Tok) {
- return Tok.isOneOf(tok::comma, tok::greater, tok::greatergreater);
+ // FIXME: Handle '>>>'.
+ return Tok.isOneOf(tok::comma, tok::greater, tok::greatergreater,
+ tok::greatergreatergreater);
}
/// Parse a C++ template template argument.
@@ -1438,7 +1444,8 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
// followed by a token that terminates a template argument, such as ',',
// '>', or (in some cases) '>>'.
CXXScopeSpec SS; // nested-name-specifier, if present
- ParseOptionalCXXScopeSpecifier(SS, nullptr,
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/false);
ParsedTemplateArgument Result;
@@ -1456,15 +1463,14 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
TryConsumeToken(tok::ellipsis, EllipsisLoc);
- // If the next token signals the end of a template argument,
- // then we have a dependent template name that could be a template
- // template argument.
+ // If the next token signals the end of a template argument, then we have
+ // a (possibly-dependent) template name that could be a template template
+ // argument.
TemplateTy Template;
if (isEndOfTemplateArgument(Tok) &&
- Actions.ActOnDependentTemplateName(
- getCurScope(), SS, TemplateKWLoc, Name,
- /*ObjectType=*/nullptr,
- /*EnteringContext=*/false, Template))
+ Actions.ActOnTemplateName(getCurScope(), SS, TemplateKWLoc, Name,
+ /*ObjectType=*/nullptr,
+ /*EnteringContext=*/false, Template))
Result = ParsedTemplateArgument(SS, Template, Name.StartLocation);
}
} else if (Tok.is(tok::identifier)) {
@@ -1568,10 +1574,8 @@ Parser::ParseTemplateArgumentList(TemplateArgList &TemplateArgs) {
if (TryConsumeToken(tok::ellipsis, EllipsisLoc))
Arg = Actions.ActOnPackExpansion(Arg, EllipsisLoc);
- if (Arg.isInvalid()) {
- SkipUntil(tok::comma, tok::greater, StopAtSemi | StopBeforeMatch);
+ if (Arg.isInvalid())
return true;
- }
// Save this template argument.
TemplateArgs.push_back(Arg);
@@ -1625,6 +1629,9 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) {
if (!LPT.D)
return;
+ // Destroy TemplateIdAnnotations when we're done, if possible.
+ DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(*this);
+
// Get the FunctionDecl.
FunctionDecl *FunD = LPT.D->getAsFunction();
// Track template parameter depth.
@@ -1634,40 +1641,22 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) {
Sema::ContextRAII GlobalSavedContext(
Actions, Actions.Context.getTranslationUnitDecl());
- SmallVector<ParseScope*, 4> TemplateParamScopeStack;
-
- // Get the list of DeclContexts to reenter. For inline methods, we only want
- // to push the DeclContext of the outermost class. This matches the way the
- // parser normally parses bodies of inline methods when the outermost class is
- // complete.
- struct ContainingDC {
- ContainingDC(DeclContext *DC, bool ShouldPush) : Pair(DC, ShouldPush) {}
- llvm::PointerIntPair<DeclContext *, 1, bool> Pair;
- DeclContext *getDC() { return Pair.getPointer(); }
- bool shouldPushDC() { return Pair.getInt(); }
- };
- SmallVector<ContainingDC, 4> DeclContextsToReenter;
- DeclContext *DD = FunD;
- DeclContext *NextContaining = Actions.getContainingDC(DD);
- while (DD && !DD->isTranslationUnit()) {
- bool ShouldPush = DD == NextContaining;
- DeclContextsToReenter.push_back({DD, ShouldPush});
- if (ShouldPush)
- NextContaining = Actions.getContainingDC(DD);
- DD = DD->getLexicalParent();
- }
-
- // Reenter template scopes from outermost to innermost.
- for (ContainingDC CDC : reverse(DeclContextsToReenter)) {
- TemplateParamScopeStack.push_back(
- new ParseScope(this, Scope::TemplateParamScope));
- unsigned NumParamLists = Actions.ActOnReenterTemplateScope(
- getCurScope(), cast<Decl>(CDC.getDC()));
- CurTemplateDepthTracker.addDepth(NumParamLists);
- if (CDC.shouldPushDC()) {
- TemplateParamScopeStack.push_back(new ParseScope(this, Scope::DeclScope));
- Actions.PushDeclContext(Actions.getCurScope(), CDC.getDC());
- }
+ MultiParseScope Scopes(*this);
+
+ // Get the list of DeclContexts to reenter.
+ SmallVector<DeclContext*, 4> DeclContextsToReenter;
+ for (DeclContext *DC = FunD; DC && !DC->isTranslationUnit();
+ DC = DC->getLexicalParent())
+ DeclContextsToReenter.push_back(DC);
+
+ // Reenter scopes from outermost to innermost.
+ for (DeclContext *DC : reverse(DeclContextsToReenter)) {
+ CurTemplateDepthTracker.addDepth(
+ ReenterTemplateScopes(Scopes, cast<Decl>(DC)));
+ Scopes.Enter(Scope::DeclScope);
+ // We'll reenter the function context itself below.
+ if (DC != FunD)
+ Actions.PushDeclContext(Actions.getCurScope(), DC);
}
assert(!LPT.Toks.empty() && "Empty body!");
@@ -1688,8 +1677,7 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) {
Scope::CompoundStmtScope);
// Recreate the containing function DeclContext.
- Sema::ContextRAII FunctionSavedContext(Actions,
- Actions.getContainingDC(FunD));
+ Sema::ContextRAII FunctionSavedContext(Actions, FunD->getLexicalParent());
Actions.ActOnStartOfFunctionDef(getCurScope(), FunD);
@@ -1713,13 +1701,6 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) {
} else
Actions.ActOnFinishFunctionBody(LPT.D, nullptr);
}
-
- // Exit scopes.
- FnScope.Exit();
- SmallVectorImpl<ParseScope *>::reverse_iterator I =
- TemplateParamScopeStack.rbegin();
- for (; I != TemplateParamScopeStack.rend(); ++I)
- delete *I;
}
/// Lex a delayed template function for late parsing.
@@ -1751,7 +1732,7 @@ bool Parser::diagnoseUnknownTemplateId(ExprResult LHS, SourceLocation Less) {
TPA.Commit();
SourceLocation Greater;
- ParseGreaterThanInTemplateList(Greater, true, false);
+ ParseGreaterThanInTemplateList(Less, Greater, true, false);
Actions.diagnoseExprIntendedAsTemplateName(getCurScope(), LHS,
Less, Greater);
return true;
@@ -1780,7 +1761,7 @@ void Parser::checkPotentialAngleBracket(ExprResult &PotentialTemplateName) {
NextToken().isOneOf(tok::greatergreater, tok::greatergreatergreater))) {
SourceLocation Less = ConsumeToken();
SourceLocation Greater;
- ParseGreaterThanInTemplateList(Greater, true, false);
+ ParseGreaterThanInTemplateList(Less, Greater, true, false);
Actions.diagnoseExprIntendedAsTemplateName(
getCurScope(), PotentialTemplateName, Less, Greater);
// FIXME: Perform error recovery.
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp b/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
index ad0a15b0c8a6..f026f3a1bfb2 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
@@ -186,21 +186,8 @@ Parser::TPResult Parser::TryConsumeDeclarationSpecifier() {
ConsumeToken();
// Skip attributes.
- while (Tok.isOneOf(tok::l_square, tok::kw___attribute, tok::kw___declspec,
- tok::kw_alignas)) {
- if (Tok.is(tok::l_square)) {
- ConsumeBracket();
- if (!SkipUntil(tok::r_square))
- return TPResult::Error;
- } else {
- ConsumeToken();
- if (Tok.isNot(tok::l_paren))
- return TPResult::Error;
- ConsumeParen();
- if (!SkipUntil(tok::r_paren))
- return TPResult::Error;
- }
- }
+ if (!TrySkipAttributes())
+ return TPResult::Error;
if (TryAnnotateOptionalCXXScopeToken())
return TPResult::Error;
@@ -441,6 +428,38 @@ struct Parser::ConditionDeclarationOrInitStatementState {
}
};
+bool Parser::isEnumBase(bool AllowSemi) {
+ assert(Tok.is(tok::colon) && "should be looking at the ':'");
+
+ RevertingTentativeParsingAction PA(*this);
+ // ':'
+ ConsumeToken();
+
+ // type-specifier-seq
+ bool InvalidAsDeclSpec = false;
+ // FIXME: We could disallow non-type decl-specifiers here, but it makes no
+ // difference: those specifiers are ill-formed regardless of the
+ // interpretation.
+ TPResult R = isCXXDeclarationSpecifier(/*BracedCastResult*/ TPResult::True,
+ &InvalidAsDeclSpec);
+ if (R == TPResult::Ambiguous) {
+ // We either have a decl-specifier followed by '(' or an undeclared
+ // identifier.
+ if (TryConsumeDeclarationSpecifier() == TPResult::Error)
+ return true;
+
+ // If we get to the end of the enum-base, we hit either a '{' or a ';'.
+ // Don't bother checking the enumerator-list.
+ if (Tok.is(tok::l_brace) || (AllowSemi && Tok.is(tok::semi)))
+ return true;
+
+ // A second decl-specifier unambiguously indicatges an enum-base.
+ R = isCXXDeclarationSpecifier(TPResult::True, &InvalidAsDeclSpec);
+ }
+
+ return R != TPResult::False;
+}
+
/// Disambiguates between a declaration in a condition, a
/// simple-declaration in an init-statement, and an expression for
/// a condition of a if/switch statement.
@@ -781,6 +800,32 @@ Parser::isCXX11AttributeSpecifier(bool Disambiguate,
return CAK_NotAttributeSpecifier;
}
+bool Parser::TrySkipAttributes() {
+ while (Tok.isOneOf(tok::l_square, tok::kw___attribute, tok::kw___declspec,
+ tok::kw_alignas)) {
+ if (Tok.is(tok::l_square)) {
+ ConsumeBracket();
+ if (Tok.isNot(tok::l_square))
+ return false;
+ ConsumeBracket();
+ if (!SkipUntil(tok::r_square) || Tok.isNot(tok::r_square))
+ return false;
+ // Note that explicitly checking for `[[` and `]]` allows to fail as
+ // expected in the case of the Objective-C message send syntax.
+ ConsumeBracket();
+ } else {
+ ConsumeToken();
+ if (Tok.isNot(tok::l_paren))
+ return false;
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren))
+ return false;
+ }
+ }
+
+ return true;
+}
+
Parser::TPResult Parser::TryParsePtrOperatorSeq() {
while (true) {
if (TryAnnotateOptionalCXXScopeToken(true))
@@ -790,9 +835,14 @@ Parser::TPResult Parser::TryParsePtrOperatorSeq() {
(Tok.is(tok::annot_cxxscope) && NextToken().is(tok::star))) {
// ptr-operator
ConsumeAnyToken();
+
+ // Skip attributes.
+ if (!TrySkipAttributes())
+ return TPResult::Error;
+
while (Tok.isOneOf(tok::kw_const, tok::kw_volatile, tok::kw_restrict,
tok::kw__Nonnull, tok::kw__Nullable,
- tok::kw__Null_unspecified))
+ tok::kw__Null_unspecified, tok::kw__Atomic))
ConsumeToken();
} else {
return TPResult::True;
@@ -966,10 +1016,16 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
NextToken().is(tok::kw_operator)))) &&
mayHaveIdentifier) {
// declarator-id
- if (Tok.is(tok::annot_cxxscope))
+ if (Tok.is(tok::annot_cxxscope)) {
+ CXXScopeSpec SS;
+ Actions.RestoreNestedNameSpecifierAnnotation(
+ Tok.getAnnotationValue(), Tok.getAnnotationRange(), SS);
+ if (SS.isInvalid())
+ return TPResult::Error;
ConsumeAnnotationToken();
- else if (Tok.is(tok::identifier))
+ } else if (Tok.is(tok::identifier)) {
TentativelyDeclaredIdentifiers.push_back(Tok.getIdentifierInfo());
+ }
if (Tok.is(tok::kw_operator)) {
if (TryParseOperatorId() == TPResult::Error)
return TPResult::Error;
@@ -1043,130 +1099,6 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
return TPResult::Ambiguous;
}
-Parser::TPResult
-Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
- switch (Kind) {
- // Obviously starts an expression.
- case tok::numeric_constant:
- case tok::char_constant:
- case tok::wide_char_constant:
- case tok::utf8_char_constant:
- case tok::utf16_char_constant:
- case tok::utf32_char_constant:
- case tok::string_literal:
- case tok::wide_string_literal:
- case tok::utf8_string_literal:
- case tok::utf16_string_literal:
- case tok::utf32_string_literal:
- case tok::l_square:
- case tok::l_paren:
- case tok::amp:
- case tok::ampamp:
- case tok::star:
- case tok::plus:
- case tok::plusplus:
- case tok::minus:
- case tok::minusminus:
- case tok::tilde:
- case tok::exclaim:
- case tok::kw_sizeof:
- case tok::kw___func__:
- case tok::kw_const_cast:
- case tok::kw_delete:
- case tok::kw_dynamic_cast:
- case tok::kw_false:
- case tok::kw_new:
- case tok::kw_operator:
- case tok::kw_reinterpret_cast:
- case tok::kw_static_cast:
- case tok::kw_this:
- case tok::kw_throw:
- case tok::kw_true:
- case tok::kw_typeid:
- case tok::kw_alignof:
- case tok::kw_noexcept:
- case tok::kw_nullptr:
- case tok::kw__Alignof:
- case tok::kw___null:
- case tok::kw___alignof:
- case tok::kw___builtin_choose_expr:
- case tok::kw___builtin_offsetof:
- case tok::kw___builtin_va_arg:
- case tok::kw___imag:
- case tok::kw___real:
- case tok::kw___FUNCTION__:
- case tok::kw___FUNCDNAME__:
- case tok::kw___FUNCSIG__:
- case tok::kw_L__FUNCTION__:
- case tok::kw_L__FUNCSIG__:
- case tok::kw___PRETTY_FUNCTION__:
- case tok::kw___uuidof:
-#define TYPE_TRAIT(N,Spelling,K) \
- case tok::kw_##Spelling:
-#include "clang/Basic/TokenKinds.def"
- return TPResult::True;
-
- // Obviously starts a type-specifier-seq:
- case tok::kw_char:
- case tok::kw_const:
- case tok::kw_double:
- case tok::kw__Float16:
- case tok::kw___float128:
- case tok::kw_enum:
- case tok::kw_half:
- case tok::kw_float:
- case tok::kw_int:
- case tok::kw_long:
- case tok::kw___int64:
- case tok::kw___int128:
- case tok::kw_restrict:
- case tok::kw_short:
- case tok::kw_signed:
- case tok::kw_struct:
- case tok::kw_union:
- case tok::kw_unsigned:
- case tok::kw_void:
- case tok::kw_volatile:
- case tok::kw__Bool:
- case tok::kw__Complex:
- case tok::kw_class:
- case tok::kw_typename:
- case tok::kw_wchar_t:
- case tok::kw_char8_t:
- case tok::kw_char16_t:
- case tok::kw_char32_t:
- case tok::kw__Decimal32:
- case tok::kw__Decimal64:
- case tok::kw__Decimal128:
- case tok::kw___interface:
- case tok::kw___thread:
- case tok::kw_thread_local:
- case tok::kw__Thread_local:
- case tok::kw_typeof:
- case tok::kw___underlying_type:
- case tok::kw___cdecl:
- case tok::kw___stdcall:
- case tok::kw___fastcall:
- case tok::kw___thiscall:
- case tok::kw___regcall:
- case tok::kw___vectorcall:
- case tok::kw___unaligned:
- case tok::kw___vector:
- case tok::kw___pixel:
- case tok::kw___bool:
- case tok::kw__Atomic:
-#define GENERIC_IMAGE_TYPE(ImgType, Id) case tok::kw_##ImgType##_t:
-#include "clang/Basic/OpenCLImageTypes.def"
- case tok::kw___unknown_anytype:
- return TPResult::False;
-
- default:
- break;
- }
-
- return TPResult::Ambiguous;
-}
-
bool Parser::isTentativelyDeclared(IdentifierInfo *II) {
return std::find(TentativelyDeclaredIdentifiers.begin(),
TentativelyDeclaredIdentifiers.end(), II)
@@ -1178,8 +1110,9 @@ class TentativeParseCCC final : public CorrectionCandidateCallback {
public:
TentativeParseCCC(const Token &Next) {
WantRemainingKeywords = false;
- WantTypeSpecifiers = Next.isOneOf(tok::l_paren, tok::r_paren, tok::greater,
- tok::l_brace, tok::identifier);
+ WantTypeSpecifiers =
+ Next.isOneOf(tok::l_paren, tok::r_paren, tok::greater, tok::l_brace,
+ tok::identifier, tok::comma);
}
bool ValidateCandidate(const TypoCorrection &Candidate) override {
@@ -1343,6 +1276,15 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
// this is ambiguous. Typo-correct to type and expression keywords and
// to types and identifiers, in order to try to recover from errors.
TentativeParseCCC CCC(Next);
+ // Tentative parsing may not be done in the right evaluation context
+ // for the ultimate expression. Enter an unevaluated context to prevent
+ // Sema from immediately e.g. treating this lookup as a potential ODR-use.
+ // If we generate an expression annotation token and the parser actually
+ // claims it as an expression, we'll transform the expression to a
+ // potentially-evaluated one then.
+ EnterExpressionEvaluationContext Unevaluated(
+ Actions, Sema::ExpressionEvaluationContext::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
switch (TryAnnotateName(&CCC)) {
case ANK_Error:
return TPResult::Error;
@@ -1520,7 +1462,9 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
// If lookup for the template-name found nothing, don't assume we have a
// definitive disambiguation result yet.
- if (TemplateId->Kind == TNK_Undeclared_template && InvalidAsDeclSpec) {
+ if ((TemplateId->hasInvalidName() ||
+ TemplateId->Kind == TNK_Undeclared_template) &&
+ InvalidAsDeclSpec) {
// 'template-id(' can be a valid expression but not a valid decl spec if
// the template-name is not declared, but we don't consider this to be a
// definitive disambiguation. In any other context, it's an error either
@@ -1528,6 +1472,8 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
*InvalidAsDeclSpec = NextToken().is(tok::l_paren);
return TPResult::Ambiguous;
}
+ if (TemplateId->hasInvalidName())
+ return TPResult::Error;
if (IsPlaceholderSpecifier(TemplateId, /*Lookahead=*/0))
return TPResult::True;
if (TemplateId->Kind != TNK_Type_template)
@@ -1547,6 +1493,13 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
NextToken().is(tok::annot_template_id)) {
TemplateIdAnnotation *TemplateId =
takeTemplateIdAnnotation(NextToken());
+ if (TemplateId->hasInvalidName()) {
+ if (InvalidAsDeclSpec) {
+ *InvalidAsDeclSpec = NextToken().is(tok::l_paren);
+ return TPResult::Ambiguous;
+ }
+ return TPResult::Error;
+ }
if (IsPlaceholderSpecifier(TemplateId, /*Lookahead=*/1))
return TPResult::True;
}
@@ -1688,6 +1641,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
+ case tok::kw___bf16:
case tok::kw__Float16:
case tok::kw___float128:
case tok::kw_void:
@@ -1742,6 +1696,24 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::kw__Atomic:
return TPResult::True;
+ case tok::kw__ExtInt: {
+ if (NextToken().isNot(tok::l_paren))
+ return TPResult::Error;
+ RevertingTentativeParsingAction PA(*this);
+ ConsumeToken();
+ ConsumeParen();
+
+ if (!SkipUntil(tok::r_paren, StopAtSemi))
+ return TPResult::Error;
+
+ if (Tok.is(tok::l_paren))
+ return TPResult::Ambiguous;
+
+ if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace))
+ return BracedCastResult;
+
+ return TPResult::True;
+ }
default:
return TPResult::False;
}
@@ -1774,6 +1746,7 @@ bool Parser::isCXXDeclarationSpecifierAType() {
case tok::kw_bool:
case tok::kw_short:
case tok::kw_int:
+ case tok::kw__ExtInt:
case tok::kw_long:
case tok::kw___int64:
case tok::kw___int128:
@@ -1782,6 +1755,7 @@ bool Parser::isCXXDeclarationSpecifierAType() {
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
+ case tok::kw___bf16:
case tok::kw__Float16:
case tok::kw___float128:
case tok::kw_void:
@@ -1993,17 +1967,14 @@ Parser::TryParseParameterDeclarationClause(bool *InvalidAsDeclaration,
// (a) the previous parameter did, and
// (b) this must be the first declaration of the function, so we can't
// inherit any default arguments from elsewhere.
- // If we see an ')', then we've reached the end of a
- // parameter-declaration-clause, and the last param is missing its default
- // argument.
+ // FIXME: If we reach a ')' without consuming any '>'s, then this must
+ // also be a function parameter (that's missing its default argument).
if (VersusTemplateArgument)
- return Tok.isOneOf(tok::equal, tok::r_paren) ? TPResult::True
- : TPResult::False;
+ return Tok.is(tok::equal) ? TPResult::True : TPResult::False;
if (Tok.is(tok::equal)) {
// '=' assignment-expression
// Parse through assignment-expression.
- // FIXME: assignment-expression may contain an unparenthesized comma.
if (!SkipUntil(tok::comma, tok::r_paren, StopAtSemi | StopBeforeMatch))
return TPResult::Error;
}
diff --git a/contrib/llvm-project/clang/lib/Parse/Parser.cpp b/contrib/llvm-project/clang/lib/Parse/Parser.cpp
index 0b778bd24277..764d4e8e9d52 100644
--- a/contrib/llvm-project/clang/lib/Parse/Parser.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/Parser.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
@@ -432,16 +433,7 @@ Parser::~Parser() {
PP.clearCodeCompletionHandler();
- if (getLangOpts().DelayedTemplateParsing &&
- !PP.isIncrementalProcessingEnabled() && !TemplateIds.empty()) {
- // If an ASTConsumer parsed delay-parsed templates in their
- // HandleTranslationUnit() method, TemplateIds created there were not
- // guarded by a DestroyTemplateIdAnnotationsRAIIObj object in
- // ParseTopLevelDecl(). Destroy them here.
- DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(TemplateIds);
- }
-
- assert(TemplateIds.empty() && "Still alive TemplateIdAnnotations around?");
+ DestroyTemplateIds();
}
/// Initialize - Warm up the parser.
@@ -537,11 +529,10 @@ void Parser::Initialize() {
ConsumeToken();
}
-void Parser::LateTemplateParserCleanupCallback(void *P) {
- // While this RAII helper doesn't bracket any actual work, the destructor will
- // clean up annotations that were created during ActOnEndOfTranslationUnit
- // when incremental processing is enabled.
- DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(((Parser *)P)->TemplateIds);
+void Parser::DestroyTemplateIds() {
+ for (TemplateIdAnnotation *Id : TemplateIds)
+ Id->Destroy();
+ TemplateIds.clear();
}
/// Parse the first top-level declaration in a translation unit.
@@ -576,7 +567,7 @@ bool Parser::ParseFirstTopLevelDecl(DeclGroupPtrTy &Result) {
/// declaration
/// [C++20] module-import-declaration
bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
- DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(TemplateIds);
+ DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(*this);
// Skip over the EOF token, flagging end of previous input for incremental
// processing
@@ -650,12 +641,18 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
return false;
case tok::eof:
+ // Check whether -fmax-tokens= was reached.
+ if (PP.getMaxTokens() != 0 && PP.getTokenCount() > PP.getMaxTokens()) {
+ PP.Diag(Tok.getLocation(), diag::warn_max_tokens_total)
+ << PP.getTokenCount() << PP.getMaxTokens();
+ SourceLocation OverrideLoc = PP.getMaxTokensOverrideLoc();
+ if (OverrideLoc.isValid()) {
+ PP.Diag(OverrideLoc, diag::note_max_tokens_total_override);
+ }
+ }
+
// Late template parsing can begin.
- if (getLangOpts().DelayedTemplateParsing)
- Actions.SetLateTemplateParser(LateTemplateParserCallback,
- PP.isIncrementalProcessingEnabled() ?
- LateTemplateParserCleanupCallback : nullptr,
- this);
+ Actions.SetLateTemplateParser(LateTemplateParserCallback, nullptr, this);
if (!PP.isIncrementalProcessingEnabled())
Actions.ActOnEndOfTranslationUnit();
//else don't tell Sema that we ended parsing: more input might come.
@@ -716,7 +713,7 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
Parser::DeclGroupPtrTy
Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS) {
- DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(TemplateIds);
+ DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(*this);
ParenBraceBracketBalancer BalancerRAIIObj(*this);
if (PP.isCodeCompletionReached()) {
@@ -753,6 +750,9 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
case tok::annot_pragma_fenv_access:
HandlePragmaFEnvAccess();
return nullptr;
+ case tok::annot_pragma_float_control:
+ HandlePragmaFloatControl();
+ return nullptr;
case tok::annot_pragma_fp:
HandlePragmaFP();
break;
@@ -1518,13 +1518,13 @@ ExprResult Parser::ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc) {
assert(Tok.is(tok::kw_asm) && "Not an asm!");
SourceLocation Loc = ConsumeToken();
- if (Tok.is(tok::kw_volatile)) {
- // Remove from the end of 'asm' to the end of 'volatile'.
+ if (isGNUAsmQualifier(Tok)) {
+ // Remove from the end of 'asm' to the end of the asm qualifier.
SourceRange RemovalRange(PP.getLocForEndOfToken(Loc),
PP.getLocForEndOfToken(Tok.getLocation()));
-
- Diag(Tok, diag::warn_file_asm_volatile)
- << FixItHint::CreateRemoval(RemovalRange);
+ Diag(Tok, diag::err_global_asm_qualifier_ignored)
+ << GNUAsmQualifiers::getQualifierName(getGNUAsmQualifier(Tok))
+ << FixItHint::CreateRemoval(RemovalRange);
ConsumeToken();
}
@@ -1594,7 +1594,9 @@ Parser::TryAnnotateName(CorrectionCandidateCallback *CCC) {
CXXScopeSpec SS;
if (getLangOpts().CPlusPlus &&
- ParseOptionalCXXScopeSpecifier(SS, nullptr, EnteringContext))
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ EnteringContext))
return ANK_Error;
if (Tok.isNot(tok::identifier) || SS.isInvalid()) {
@@ -1690,7 +1692,8 @@ Parser::TryAnnotateName(CorrectionCandidateCallback *CCC) {
}
case Sema::NC_ContextIndependentExpr:
- Tok.setKind(tok::annot_primary_expr);
+ Tok.setKind(Actions.isUnevaluatedContext() ? tok::annot_uneval_primary_expr
+ : tok::annot_primary_expr);
setExprAnnotation(Tok, Classification.getExpression());
Tok.setAnnotationEndLoc(NameLoc);
if (SS.isNotEmpty())
@@ -1831,6 +1834,7 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
SourceLocation TypenameLoc = ConsumeToken();
CXXScopeSpec SS;
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/false, nullptr,
/*IsTypename*/ true))
return true;
@@ -1864,9 +1868,7 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
Tok.getLocation());
} else if (Tok.is(tok::annot_template_id)) {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
- if (TemplateId->Kind != TNK_Type_template &&
- TemplateId->Kind != TNK_Dependent_template_name &&
- TemplateId->Kind != TNK_Undeclared_template) {
+ if (!TemplateId->mightBeType()) {
Diag(Tok, diag::err_typename_refers_to_non_type_template)
<< Tok.getAnnotationRange();
return true;
@@ -1875,14 +1877,13 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
- Ty = Actions.ActOnTypenameType(getCurScope(), TypenameLoc, SS,
- TemplateId->TemplateKWLoc,
- TemplateId->Template,
- TemplateId->Name,
- TemplateId->TemplateNameLoc,
- TemplateId->LAngleLoc,
- TemplateArgsPtr,
- TemplateId->RAngleLoc);
+ Ty = TemplateId->isInvalid()
+ ? TypeError()
+ : Actions.ActOnTypenameType(
+ getCurScope(), TypenameLoc, SS, TemplateId->TemplateKWLoc,
+ TemplateId->Template, TemplateId->Name,
+ TemplateId->TemplateNameLoc, TemplateId->LAngleLoc,
+ TemplateArgsPtr, TemplateId->RAngleLoc);
} else {
Diag(Tok, diag::err_expected_type_name_after_typename)
<< SS.getRange();
@@ -1891,7 +1892,7 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
SourceLocation EndLoc = Tok.getLastLoc();
Tok.setKind(tok::annot_typename);
- setTypeAnnotation(Tok, Ty.isInvalid() ? nullptr : Ty.get());
+ setTypeAnnotation(Tok, Ty);
Tok.setAnnotationEndLoc(EndLoc);
Tok.setLocation(TypenameLoc);
PP.AnnotateCachedTokens(Tok);
@@ -1903,7 +1904,9 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
CXXScopeSpec SS;
if (getLangOpts().CPlusPlus)
- if (ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext*/false))
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext*/ false))
return true;
return TryAnnotateTypeOrScopeTokenAfterScopeSpec(SS, !WasScopeAnnotation);
@@ -2032,7 +2035,9 @@ bool Parser::TryAnnotateCXXScopeToken(bool EnteringContext) {
assert(MightBeCXXScopeToken() && "Cannot be a type or scope token!");
CXXScopeSpec SS;
- if (ParseOptionalCXXScopeSpecifier(SS, nullptr, EnteringContext))
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ EnteringContext))
return true;
if (SS.isEmpty())
return false;
@@ -2141,7 +2146,8 @@ bool Parser::ParseMicrosoftIfExistsCondition(IfExistsCondition& Result) {
// Parse nested-name-specifier.
if (getLangOpts().CPlusPlus)
- ParseOptionalCXXScopeSpecifier(Result.SS, nullptr,
+ ParseOptionalCXXScopeSpecifier(Result.SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/false);
// Check nested-name specifier.
@@ -2152,10 +2158,12 @@ bool Parser::ParseMicrosoftIfExistsCondition(IfExistsCondition& Result) {
// Parse the unqualified-id.
SourceLocation TemplateKWLoc; // FIXME: parsed, but unused.
- if (ParseUnqualifiedId(
- Result.SS, /*EnteringContext*/false, /*AllowDestructorName*/true,
- /*AllowConstructorName*/true, /*AllowDeductionGuide*/false, nullptr,
- &TemplateKWLoc, Result.Name)) {
+ if (ParseUnqualifiedId(Result.SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, /*EnteringContext*/ false,
+ /*AllowDestructorName*/ true,
+ /*AllowConstructorName*/ true,
+ /*AllowDeductionGuide*/ false, &TemplateKWLoc,
+ Result.Name)) {
T.skipToEnd();
return true;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp b/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
index 04611dadde66..3b7356893833 100644
--- a/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -974,6 +974,14 @@ static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use,
<< Use.getUser()->getSourceRange();
}
+/// Diagnose uninitialized const reference usages.
+static bool DiagnoseUninitializedConstRefUse(Sema &S, const VarDecl *VD,
+ const UninitUse &Use) {
+ S.Diag(Use.getUser()->getBeginLoc(), diag::warn_uninit_const_reference)
+ << VD->getDeclName() << Use.getUser()->getSourceRange();
+ return true;
+}
+
/// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an
/// uninitialized variable. This manages the different forms of diagnostic
/// emitted for particular types of uses. Returns true if the use was diagnosed
@@ -1506,13 +1514,14 @@ class UninitValsDiagReporter : public UninitVariablesHandler {
// order of diagnostics when calling flushDiagnostics().
typedef llvm::MapVector<const VarDecl *, MappedType> UsesMap;
UsesMap uses;
+ UsesMap constRefUses;
public:
UninitValsDiagReporter(Sema &S) : S(S) {}
~UninitValsDiagReporter() override { flushDiagnostics(); }
- MappedType &getUses(const VarDecl *vd) {
- MappedType &V = uses[vd];
+ MappedType &getUses(UsesMap &um, const VarDecl *vd) {
+ MappedType &V = um[vd];
if (!V.getPointer())
V.setPointer(new UsesVec());
return V;
@@ -1520,11 +1529,17 @@ public:
void handleUseOfUninitVariable(const VarDecl *vd,
const UninitUse &use) override {
- getUses(vd).getPointer()->push_back(use);
+ getUses(uses, vd).getPointer()->push_back(use);
+ }
+
+ void handleConstRefUseOfUninitVariable(const VarDecl *vd,
+ const UninitUse &use) override {
+ getUses(constRefUses, vd).getPointer()->push_back(use);
}
void handleSelfInit(const VarDecl *vd) override {
- getUses(vd).setInt(true);
+ getUses(uses, vd).setInt(true);
+ getUses(constRefUses, vd).setInt(true);
}
void flushDiagnostics() {
@@ -1571,6 +1586,32 @@ public:
}
uses.clear();
+
+ // Flush all const reference uses diags.
+ for (const auto &P : constRefUses) {
+ const VarDecl *vd = P.first;
+ const MappedType &V = P.second;
+
+ UsesVec *vec = V.getPointer();
+ bool hasSelfInit = V.getInt();
+
+ if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec))
+ DiagnoseUninitializedUse(S, vd,
+ UninitUse(vd->getInit()->IgnoreParenCasts(),
+ /* isAlwaysUninit */ true),
+ /* alwaysReportSelfInit */ true);
+ else {
+ for (const auto &U : *vec) {
+ if (DiagnoseUninitializedConstRefUse(S, vd, U))
+ break;
+ }
+ }
+
+ // Release the uses vector.
+ delete vec;
+ }
+
+ constRefUses.clear();
}
private:
@@ -1659,6 +1700,14 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
: getNotes();
}
+ OptionalNotes makeUnlockedHereNote(SourceLocation LocUnlocked,
+ StringRef Kind) {
+ return LocUnlocked.isValid()
+ ? getNotes(PartialDiagnosticAt(
+ LocUnlocked, S.PDiag(diag::note_unlocked_here) << Kind))
+ : getNotes();
+ }
+
public:
ThreadSafetyReporter(Sema &S, SourceLocation FL, SourceLocation FEL)
: S(S), FunLocation(FL), FunEndLocation(FEL),
@@ -1685,13 +1734,14 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
Warnings.emplace_back(std::move(Warning), getNotes());
}
- void handleUnmatchedUnlock(StringRef Kind, Name LockName,
- SourceLocation Loc) override {
+ void handleUnmatchedUnlock(StringRef Kind, Name LockName, SourceLocation Loc,
+ SourceLocation LocPreviousUnlock) override {
if (Loc.isInvalid())
Loc = FunLocation;
PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_unlock_but_no_lock)
<< Kind << LockName);
- Warnings.emplace_back(std::move(Warning), getNotes());
+ Warnings.emplace_back(std::move(Warning),
+ makeUnlockedHereNote(LocPreviousUnlock, Kind));
}
void handleIncorrectUnlockKind(StringRef Kind, Name LockName,
@@ -2184,7 +2234,8 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
if (!Diags.isIgnored(diag::warn_uninit_var, D->getBeginLoc()) ||
!Diags.isIgnored(diag::warn_sometimes_uninit_var, D->getBeginLoc()) ||
- !Diags.isIgnored(diag::warn_maybe_uninit_var, D->getBeginLoc())) {
+ !Diags.isIgnored(diag::warn_maybe_uninit_var, D->getBeginLoc()) ||
+ !Diags.isIgnored(diag::warn_uninit_const_reference, D->getBeginLoc())) {
if (CFG *cfg = AC.getCFG()) {
UninitValsDiagReporter reporter(S);
UninitVariablesAnalysisStats stats;
diff --git a/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp b/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
index b88ff9dd64cd..f1ad8aeaacbb 100644
--- a/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
@@ -23,6 +23,7 @@
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Casting.h"
@@ -570,29 +571,10 @@ void PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(
if (const char *BriefComment = CCS->getBriefComment())
OS << " : " << BriefComment;
}
- for (const FixItHint &FixIt : Results[I].FixIts) {
- const SourceLocation BLoc = FixIt.RemoveRange.getBegin();
- const SourceLocation ELoc = FixIt.RemoveRange.getEnd();
-
- SourceManager &SM = SemaRef.SourceMgr;
- std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(BLoc);
- std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(ELoc);
- // Adjust for token ranges.
- if (FixIt.RemoveRange.isTokenRange())
- EInfo.second += Lexer::MeasureTokenLength(ELoc, SM, SemaRef.LangOpts);
-
- OS << " (requires fix-it:"
- << " {" << SM.getLineNumber(BInfo.first, BInfo.second) << ':'
- << SM.getColumnNumber(BInfo.first, BInfo.second) << '-'
- << SM.getLineNumber(EInfo.first, EInfo.second) << ':'
- << SM.getColumnNumber(EInfo.first, EInfo.second) << "}"
- << " to \"" << FixIt.CodeToInsert << "\")";
- }
- OS << '\n';
break;
case CodeCompletionResult::RK_Keyword:
- OS << Results[I].Keyword << '\n';
+ OS << Results[I].Keyword;
break;
case CodeCompletionResult::RK_Macro:
@@ -602,13 +584,31 @@ void PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(
includeBriefComments())) {
OS << " : " << CCS->getAsString();
}
- OS << '\n';
break;
case CodeCompletionResult::RK_Pattern:
- OS << "Pattern : " << Results[I].Pattern->getAsString() << '\n';
+ OS << "Pattern : " << Results[I].Pattern->getAsString();
break;
}
+ for (const FixItHint &FixIt : Results[I].FixIts) {
+ const SourceLocation BLoc = FixIt.RemoveRange.getBegin();
+ const SourceLocation ELoc = FixIt.RemoveRange.getEnd();
+
+ SourceManager &SM = SemaRef.SourceMgr;
+ std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(BLoc);
+ std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(ELoc);
+ // Adjust for token ranges.
+ if (FixIt.RemoveRange.isTokenRange())
+ EInfo.second += Lexer::MeasureTokenLength(ELoc, SM, SemaRef.LangOpts);
+
+ OS << " (requires fix-it:"
+ << " {" << SM.getLineNumber(BInfo.first, BInfo.second) << ':'
+ << SM.getColumnNumber(BInfo.first, BInfo.second) << '-'
+ << SM.getLineNumber(EInfo.first, EInfo.second) << ':'
+ << SM.getColumnNumber(EInfo.first, EInfo.second) << "}"
+ << " to \"" << FixIt.CodeToInsert << "\")";
+ }
+ OS << '\n';
}
}
diff --git a/contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp b/contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp
index 94d87974624e..f4c30c90ad27 100644
--- a/contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/LocInfoType.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Sema.h"
@@ -29,6 +30,9 @@ using namespace clang;
void UnqualifiedId::setTemplateId(TemplateIdAnnotation *TemplateId) {
assert(TemplateId && "NULL template-id annotation?");
+ assert(!TemplateId->isInvalid() &&
+ "should not convert invalid template-ids to unqualified-ids");
+
Kind = UnqualifiedIdKind::IK_TemplateId;
this->TemplateId = TemplateId;
StartLocation = TemplateId->TemplateNameLoc;
@@ -37,6 +41,9 @@ void UnqualifiedId::setTemplateId(TemplateIdAnnotation *TemplateId) {
void UnqualifiedId::setConstructorTemplateId(TemplateIdAnnotation *TemplateId) {
assert(TemplateId && "NULL template-id annotation?");
+ assert(!TemplateId->isInvalid() &&
+ "should not convert invalid template-ids to unqualified-ids");
+
Kind = UnqualifiedIdKind::IK_ConstructorTemplateId;
this->TemplateId = TemplateId;
StartLocation = TemplateId->TemplateNameLoc;
@@ -130,6 +137,8 @@ void CXXScopeSpec::Adopt(NestedNameSpecifierLoc Other) {
Range = Other.getSourceRange();
Builder.Adopt(Other);
+ assert(Range == Builder.getSourceRange() &&
+ "NestedNameSpecifierLoc range computation incorrect");
}
SourceLocation CXXScopeSpec::getLastQualifierNameLoc() const {
@@ -351,6 +360,7 @@ bool Declarator::isDeclarationOfFunction() const {
case TST_half:
case TST_int:
case TST_int128:
+ case TST_extint:
case TST_struct:
case TST_interface:
case TST_union:
@@ -358,6 +368,7 @@ bool Declarator::isDeclarationOfFunction() const {
case TST_unspecified:
case TST_void:
case TST_wchar:
+ case TST_BFloat16:
#define GENERIC_IMAGE_TYPE(ImgType, Id) case TST_##ImgType##_t:
#include "clang/Basic/OpenCLImageTypes.def"
return false;
@@ -529,6 +540,7 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TST T,
case DeclSpec::TST_char32: return "char32_t";
case DeclSpec::TST_int: return "int";
case DeclSpec::TST_int128: return "__int128";
+ case DeclSpec::TST_extint: return "_ExtInt";
case DeclSpec::TST_half: return "half";
case DeclSpec::TST_float: return "float";
case DeclSpec::TST_double: return "double";
@@ -555,6 +567,7 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TST T,
case DeclSpec::TST_underlyingType: return "__underlying_type";
case DeclSpec::TST_unknown_anytype: return "__unknown_anytype";
case DeclSpec::TST_atomic: return "_Atomic";
+ case DeclSpec::TST_BFloat16: return "__bf16";
#define GENERIC_IMAGE_TYPE(ImgType, Id) \
case DeclSpec::TST_##ImgType##_t: \
return #ImgType "_t";
@@ -904,6 +917,27 @@ bool DeclSpec::SetTypeSpecError() {
return false;
}
+bool DeclSpec::SetExtIntType(SourceLocation KWLoc, Expr *BitsExpr,
+ const char *&PrevSpec, unsigned &DiagID,
+ const PrintingPolicy &Policy) {
+ assert(BitsExpr && "no expression provided!");
+ if (TypeSpecType == TST_error)
+ return false;
+
+ if (TypeSpecType != TST_unspecified) {
+ PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
+ DiagID = diag::err_invalid_decl_spec_combination;
+ return true;
+ }
+
+ TypeSpecType = TST_extint;
+ ExprRep = BitsExpr;
+ TSTLoc = KWLoc;
+ TSTNameLoc = KWLoc;
+ TypeSpecOwned = false;
+ return false;
+}
+
bool DeclSpec::SetTypeQual(TQ T, SourceLocation Loc, const char *&PrevSpec,
unsigned &DiagID, const LangOptions &Lang) {
// Duplicates are permitted in C99 onwards, but are not permitted in C89 or
@@ -1116,14 +1150,20 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
S.Diag(TSSLoc, diag::err_invalid_vector_bool_decl_spec)
<< getSpecifierName((TSS)TypeSpecSign);
}
-
- // Only char/int are valid with vector bool. (PIM 2.1)
+ // Only char/int are valid with vector bool prior to Power10.
+ // Power10 adds instructions that produce vector bool data
+ // for quadwords as well so allow vector bool __int128.
if (((TypeSpecType != TST_unspecified) && (TypeSpecType != TST_char) &&
- (TypeSpecType != TST_int)) || TypeAltiVecPixel) {
+ (TypeSpecType != TST_int) && (TypeSpecType != TST_int128)) ||
+ TypeAltiVecPixel) {
S.Diag(TSTLoc, diag::err_invalid_vector_bool_decl_spec)
<< (TypeAltiVecPixel ? "__pixel" :
getSpecifierName((TST)TypeSpecType, Policy));
}
+ // vector bool __int128 requires Power10.
+ if ((TypeSpecType == TST_int128) &&
+ (!S.Context.getTargetInfo().hasFeature("power10-vector")))
+ S.Diag(TSTLoc, diag::err_invalid_vector_bool_int128_decl_spec);
// Only 'short' and 'long long' are valid with vector bool. (PIM 2.1)
if ((TypeSpecWidth != TSW_unspecified) && (TypeSpecWidth != TSW_short) &&
@@ -1140,7 +1180,7 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
// Elements of vector bool are interpreted as unsigned. (PIM 2.1)
if ((TypeSpecType == TST_char) || (TypeSpecType == TST_int) ||
- (TypeSpecWidth != TSW_unspecified))
+ (TypeSpecType == TST_int128) || (TypeSpecWidth != TSW_unspecified))
TypeSpecSign = TSS_unsigned;
} else if (TypeSpecType == TST_double) {
// vector long double and vector long long double are never allowed.
@@ -1185,7 +1225,7 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
TypeSpecType = TST_int; // unsigned -> unsigned int, signed -> signed int.
else if (TypeSpecType != TST_int && TypeSpecType != TST_int128 &&
TypeSpecType != TST_char && TypeSpecType != TST_wchar &&
- !IsFixedPointType) {
+ !IsFixedPointType && TypeSpecType != TST_extint) {
S.Diag(TSSLoc, diag::err_invalid_sign_spec)
<< getSpecifierName((TST)TypeSpecType, Policy);
// signed double -> double.
@@ -1232,11 +1272,13 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
S.getLocForEndOfToken(getTypeSpecComplexLoc()),
" double");
TypeSpecType = TST_double; // _Complex -> _Complex double.
- } else if (TypeSpecType == TST_int || TypeSpecType == TST_char) {
+ } else if (TypeSpecType == TST_int || TypeSpecType == TST_char ||
+ TypeSpecType == TST_extint) {
// Note that this intentionally doesn't include _Complex _Bool.
if (!S.getLangOpts().CPlusPlus)
S.Diag(TSTLoc, diag::ext_integer_complex);
- } else if (TypeSpecType != TST_float && TypeSpecType != TST_double) {
+ } else if (TypeSpecType != TST_float && TypeSpecType != TST_double &&
+ TypeSpecType != TST_float128) {
S.Diag(TSCLoc, diag::err_invalid_complex_spec)
<< getSpecifierName((TST)TypeSpecType, Policy);
TypeSpecComplex = TSC_unspecified;
diff --git a/contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp b/contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp
index 960e62d4a2db..b34243edea35 100644
--- a/contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp
@@ -75,6 +75,7 @@ private:
void BuildScopeInformation(Decl *D, unsigned &ParentScope);
void BuildScopeInformation(VarDecl *D, const BlockDecl *BDecl,
unsigned &ParentScope);
+ void BuildScopeInformation(CompoundLiteralExpr *CLE, unsigned &ParentScope);
void BuildScopeInformation(Stmt *S, unsigned &origParentScope);
void VerifyJumps();
@@ -276,6 +277,16 @@ void JumpScopeChecker::BuildScopeInformation(VarDecl *D,
}
}
+/// Build scope information for compound literals of C struct types that are
+/// non-trivial to destruct.
+void JumpScopeChecker::BuildScopeInformation(CompoundLiteralExpr *CLE,
+ unsigned &ParentScope) {
+ unsigned InDiag = diag::note_enters_compound_literal_scope;
+ unsigned OutDiag = diag::note_exits_compound_literal_scope;
+ Scopes.push_back(GotoScope(ParentScope, InDiag, OutDiag, CLE->getExprLoc()));
+ ParentScope = Scopes.size() - 1;
+}
+
/// BuildScopeInformation - The statements from CI to CE are known to form a
/// coherent VLA scope with a specified parent node. Walk through the
/// statements, adding any labels or gotos to LabelAndGotoScopes and recursively
@@ -529,11 +540,15 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
// implementable but a lot of work which we haven't felt up to doing.
ExprWithCleanups *EWC = cast<ExprWithCleanups>(S);
for (unsigned i = 0, e = EWC->getNumObjects(); i != e; ++i) {
- const BlockDecl *BDecl = EWC->getObject(i);
- for (const auto &CI : BDecl->captures()) {
- VarDecl *variable = CI.getVariable();
- BuildScopeInformation(variable, BDecl, origParentScope);
- }
+ if (auto *BDecl = EWC->getObject(i).dyn_cast<BlockDecl *>())
+ for (const auto &CI : BDecl->captures()) {
+ VarDecl *variable = CI.getVariable();
+ BuildScopeInformation(variable, BDecl, origParentScope);
+ }
+ else if (auto *CLE = EWC->getObject(i).dyn_cast<CompoundLiteralExpr *>())
+ BuildScopeInformation(CLE, origParentScope);
+ else
+ llvm_unreachable("unexpected cleanup object type");
}
break;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp b/contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp
index 2b0cd6b8c4fc..80333e63127e 100644
--- a/contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp
@@ -275,6 +275,12 @@ void MultiplexExternalSemaSource::ReadExtVectorDecls(
Sources[i]->ReadExtVectorDecls(Decls);
}
+void MultiplexExternalSemaSource::ReadDeclsToCheckForDeferredDiags(
+ llvm::SmallVector<Decl *, 4> &Decls) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadDeclsToCheckForDeferredDiags(Decls);
+}
+
void MultiplexExternalSemaSource::ReadUnusedLocalTypedefNameCandidates(
llvm::SmallSetVector<const TypedefNameDecl *, 4> &Decls) {
for(size_t i = 0; i < Sources.size(); ++i)
diff --git a/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td b/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
index 9d6bb411eff8..745363a6b43f 100644
--- a/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
+++ b/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
@@ -60,10 +60,17 @@ def FuncExtKhrLocalInt32ExtendedAtomics : FunctionExtension<"cl_khr_local_int32
def FuncExtKhrInt64BaseAtomics : FunctionExtension<"cl_khr_int64_base_atomics">;
def FuncExtKhrInt64ExtendedAtomics : FunctionExtension<"cl_khr_int64_extended_atomics">;
def FuncExtKhrMipmapImage : FunctionExtension<"cl_khr_mipmap_image">;
+def FuncExtKhrMipmapImageWrites : FunctionExtension<"cl_khr_mipmap_image_writes">;
def FuncExtKhrGlMsaaSharing : FunctionExtension<"cl_khr_gl_msaa_sharing">;
// Multiple extensions
-def FuncExtKhrMipmapAndWrite3d : FunctionExtension<"cl_khr_mipmap_image cl_khr_3d_image_writes">;
+def FuncExtKhrMipmapWritesAndWrite3d : FunctionExtension<"cl_khr_mipmap_image_writes cl_khr_3d_image_writes">;
+
+// Arm extensions.
+def ArmIntegerDotProductInt8 : FunctionExtension<"cl_arm_integer_dot_product_int8">;
+def ArmIntegerDotProductAccumulateInt8 : FunctionExtension<"cl_arm_integer_dot_product_accumulate_int8">;
+def ArmIntegerDotProductAccumulateInt16 : FunctionExtension<"cl_arm_integer_dot_product_accumulate_int16">;
+def ArmIntegerDotProductAccumulateSaturateInt8 : FunctionExtension<"cl_arm_integer_dot_product_accumulate_saturate_int8">;
// Qualified Type. These map to ASTContext::QualType.
class QualType<string _Name, bit _IsAbstract=0> {
@@ -120,7 +127,7 @@ class VectorType<Type _Ty, int _VecWidth> : Type<_Ty.Name, _Ty.QTName> {
// OpenCL pointer types (e.g. int*, float*, ...).
class PointerType<Type _Ty, AddressSpace _AS = DefaultAS> :
- Type<_Ty.Name, _Ty.QTName> {
+ Type<_Ty.Name, _Ty.QTName> {
let AddrSpace = _AS.Name;
// Inherited fields
let VecWidth = _Ty.VecWidth;
@@ -154,7 +161,7 @@ class VolatileType<Type _Ty> : Type<_Ty.Name, _Ty.QTName> {
// OpenCL image types (e.g. image2d).
class ImageType<Type _Ty, string _AccessQualifier> :
- Type<_Ty.Name, QualType<_Ty.QTName.Name#_AccessQualifier#"Ty", 0>> {
+ Type<_Ty.Name, QualType<_Ty.QTName.Name#_AccessQualifier#"Ty", 0>> {
let VecWidth = 0;
let AccessQualifier = _AccessQualifier;
// Inherited fields
@@ -165,8 +172,7 @@ class ImageType<Type _Ty, string _AccessQualifier> :
}
// List of Types.
-class TypeList<string _Name, list<Type> _Type> {
- string Name = _Name;
+class TypeList<list<Type> _Type> {
list<Type> List = _Type;
}
@@ -195,7 +201,7 @@ class TypeList<string _Name, list<Type> _Type> {
// A declaration f(GenT, SGenT) results in the combinations
// f(half, half), f(half2, half), f(int, int), f(int2, int) .
class GenericType<string _Ty, TypeList _TypeList, IntList _VectorList> :
- Type<_Ty, QualType<"null", 1>> {
+ Type<_Ty, QualType<"null", 1>> {
// Possible element types of the generic type.
TypeList TypeList = _TypeList;
// Possible vector sizes of the types in the TypeList.
@@ -259,8 +265,8 @@ def Half : Type<"half", QualType<"HalfTy">>;
def Size : Type<"size_t", QualType<"getSizeType()">>;
def PtrDiff : Type<"ptrdiff_t", QualType<"getPointerDiffType()">>;
def IntPtr : Type<"intptr_t", QualType<"getIntPtrType()">>;
-def UIntPtr : Type<"uintPtr_t", QualType<"getUIntPtrType()">>;
-def Void : Type<"void_t", QualType<"VoidTy">>;
+def UIntPtr : Type<"uintptr_t", QualType<"getUIntPtrType()">>;
+def Void : Type<"void", QualType<"VoidTy">>;
// OpenCL v1.0/1.2/2.0 s6.1.2: Built-in Vector Data Types.
// Built-in vector data types are created by TableGen's OpenCLBuiltinEmitter.
@@ -268,21 +274,36 @@ def Void : Type<"void_t", QualType<"VoidTy">>;
// OpenCL v1.0/1.2/2.0 s6.1.3: Other Built-in Data Types.
// The image definitions are "abstract". They should not be used without
// specifying an access qualifier (RO/WO/RW).
-def Image1d : Type<"Image1d", QualType<"OCLImage1d", 1>>;
-def Image2d : Type<"Image2d", QualType<"OCLImage2d", 1>>;
-def Image3d : Type<"Image3d", QualType<"OCLImage3d", 1>>;
-def Image1dArray : Type<"Image1dArray", QualType<"OCLImage1dArray", 1>>;
-def Image1dBuffer : Type<"Image1dBuffer", QualType<"OCLImage1dBuffer", 1>>;
-def Image2dArray : Type<"Image2dArray", QualType<"OCLImage2dArray", 1>>;
-def Image2dDepth : Type<"Image2dDepth", QualType<"OCLImage2dDepth", 1>>;
-def Image2dArrayDepth : Type<"Image2dArrayDepth", QualType<"OCLImage2dArrayDepth", 1>>;
-def Image2dMsaa : Type<"Image2dMsaa", QualType<"OCLImage2dMSAA", 1>>;
-def Image2dArrayMsaa : Type<"Image2dArrayMsaa", QualType<"OCLImage2dArrayMSAA", 1>>;
-def Image2dMsaaDepth : Type<"Image2dMsaaDepth", QualType<"OCLImage2dMSAADepth", 1>>;
-def Image2dArrayMsaaDepth : Type<"Image2dArrayMsaaDepth", QualType<"OCLImage2dArrayMSAADepth", 1>>;
-
-def Sampler : Type<"Sampler", QualType<"OCLSamplerTy">>;
-def Event : Type<"Event", QualType<"OCLEventTy">>;
+def Image1d : Type<"image1d_t", QualType<"OCLImage1d", 1>>;
+def Image2d : Type<"image2d_t", QualType<"OCLImage2d", 1>>;
+def Image3d : Type<"image3d_t", QualType<"OCLImage3d", 1>>;
+def Image1dArray : Type<"image1d_array_t", QualType<"OCLImage1dArray", 1>>;
+def Image1dBuffer : Type<"image1d_buffer_t", QualType<"OCLImage1dBuffer", 1>>;
+def Image2dArray : Type<"image2d_array_t", QualType<"OCLImage2dArray", 1>>;
+def Image2dDepth : Type<"image2d_depth_t", QualType<"OCLImage2dDepth", 1>>;
+def Image2dArrayDepth : Type<"image2d_array_depth_t", QualType<"OCLImage2dArrayDepth", 1>>;
+def Image2dMsaa : Type<"image2d_msaa_t", QualType<"OCLImage2dMSAA", 1>>;
+def Image2dArrayMsaa : Type<"image2d_array_msaa_t", QualType<"OCLImage2dArrayMSAA", 1>>;
+def Image2dMsaaDepth : Type<"image2d_msaa_depth_t", QualType<"OCLImage2dMSAADepth", 1>>;
+def Image2dArrayMsaaDepth : Type<"image2d_array_msaa_depth_t", QualType<"OCLImage2dArrayMSAADepth", 1>>;
+
+def Sampler : Type<"sampler_t", QualType<"OCLSamplerTy">>;
+def ClkEvent : Type<"clk_event_t", QualType<"OCLClkEventTy">>;
+def Event : Type<"event_t", QualType<"OCLEventTy">>;
+def Queue : Type<"queue_t", QualType<"OCLQueueTy">>;
+def ReserveId : Type<"reserve_id_t", QualType<"OCLReserveIDTy">>;
+
+// OpenCL v2.0 s6.13.11: Atomic integer and floating-point types.
+def AtomicInt : Type<"atomic_int", QualType<"getAtomicType(Context.IntTy)">>;
+def AtomicUInt : Type<"atomic_uint", QualType<"getAtomicType(Context.UnsignedIntTy)">>;
+def AtomicLong : Type<"atomic_long", QualType<"getAtomicType(Context.LongTy)">>;
+def AtomicULong : Type<"atomic_ulong", QualType<"getAtomicType(Context.UnsignedLongTy)">>;
+def AtomicFloat : Type<"atomic_float", QualType<"getAtomicType(Context.FloatTy)">>;
+def AtomicDouble : Type<"atomic_double", QualType<"getAtomicType(Context.DoubleTy)">>;
+def AtomicIntPtr : Type<"atomic_intptr_t", QualType<"getAtomicType(Context.getIntPtrType())">>;
+def AtomicUIntPtr : Type<"atomic_uintptr_t", QualType<"getAtomicType(Context.getUIntPtrType())">>;
+def AtomicSize : Type<"atomic_size_t", QualType<"getAtomicType(Context.getSizeType())">>;
+def AtomicPtrDiff : Type<"atomic_ptrdiff_t", QualType<"getAtomicType(Context.getPointerDiffType())">>;
//===----------------------------------------------------------------------===//
// Definitions of OpenCL gentype variants
@@ -305,20 +326,20 @@ def Vec16 : IntList<"Vec16", [16]>;
def Vec1234 : IntList<"Vec1234", [1, 2, 3, 4]>;
// Type lists.
-def TLAll : TypeList<"TLAll", [Char, UChar, Short, UShort, Int, UInt, Long, ULong, Float, Double, Half]>;
-def TLAllUnsigned : TypeList<"TLAllUnsigned", [UChar, UChar, UShort, UShort, UInt, UInt, ULong, ULong, UInt, ULong, UShort]>;
-def TLFloat : TypeList<"TLFloat", [Float, Double, Half]>;
-def TLSignedInts : TypeList<"TLSignedInts", [Char, Short, Int, Long]>;
-def TLUnsignedInts : TypeList<"TLUnsignedInts", [UChar, UShort, UInt, ULong]>;
+def TLAll : TypeList<[Char, UChar, Short, UShort, Int, UInt, Long, ULong, Float, Double, Half]>;
+def TLAllUnsigned : TypeList<[UChar, UChar, UShort, UShort, UInt, UInt, ULong, ULong, UInt, ULong, UShort]>;
+def TLFloat : TypeList<[Float, Double, Half]>;
+def TLSignedInts : TypeList<[Char, Short, Int, Long]>;
+def TLUnsignedInts : TypeList<[UChar, UShort, UInt, ULong]>;
-def TLIntLongFloats : TypeList<"TLIntLongFloats", [Int, UInt, Long, ULong, Float, Double, Half]>;
+def TLIntLongFloats : TypeList<[Int, UInt, Long, ULong, Float, Double, Half]>;
// All unsigned integer types twice, to facilitate unsigned return types for e.g.
// uchar abs(char) and
// uchar abs(uchar).
-def TLAllUIntsTwice : TypeList<"TLAllUIntsTwice", [UChar, UChar, UShort, UShort, UInt, UInt, ULong, ULong]>;
+def TLAllUIntsTwice : TypeList<[UChar, UChar, UShort, UShort, UInt, UInt, ULong, ULong]>;
-def TLAllInts : TypeList<"TLAllInts", [Char, UChar, Short, UShort, Int, UInt, Long, ULong]>;
+def TLAllInts : TypeList<[Char, UChar, Short, UShort, Int, UInt, Long, ULong]>;
// GenType definitions for multiple base types (e.g. all floating point types,
// or all integer types).
@@ -348,8 +369,7 @@ foreach Type = [Char, UChar, Short, UShort,
foreach VecSizes = [VecAndScalar, VecNoScalar] in {
def "GenType" # Type # VecSizes :
GenericType<"GenType" # Type # VecSizes,
- TypeList<"GL" # Type.Name, [Type]>,
- VecSizes>;
+ TypeList<[Type]>, VecSizes>;
}
}
@@ -357,8 +377,7 @@ foreach Type = [Char, UChar, Short, UShort,
foreach Type = [Float, Double, Half] in {
def "GenType" # Type # Vec1234 :
GenericType<"GenType" # Type # Vec1234,
- TypeList<"GL" # Type.Name, [Type]>,
- Vec1234>;
+ TypeList<[Type]>, Vec1234>;
}
@@ -374,7 +393,11 @@ foreach RType = [Float, Double, Half, Char, UChar, Short,
UShort, Int, UInt, Long, ULong] in {
foreach IType = [Float, Double, Half, Char, UChar, Short,
UShort, Int, UInt, Long, ULong] in {
- foreach sat = ["", "_sat"] in {
+ // Conversions to integer type have a sat and non-sat variant.
+ foreach sat = !cond(!eq(RType.Name, "float") : [""],
+ !eq(RType.Name, "double") : [""],
+ !eq(RType.Name, "half") : [""],
+ 1 : ["", "_sat"]) in {
foreach rnd = ["", "_rte", "_rtn", "_rtp", "_rtz"] in {
def : Builtin<"convert_" # RType.Name # sat # rnd, [RType, IType],
Attr.Const>;
@@ -667,7 +690,7 @@ foreach name = ["isfinite", "isinf", "isnan", "isnormal", "signbit"] in {
def : Builtin<name, [GenTypeShortVecNoScalar, GenTypeHalfVecNoScalar], Attr.Const>;
}
foreach name = ["any", "all"] in {
- def : Builtin<name, [Int, AIGenTypeN], Attr.Const>;
+ def : Builtin<name, [Int, SGenTypeN], Attr.Const>;
}
// --- 2 arguments ---
@@ -722,17 +745,17 @@ let MaxVersion = CL20 in {
def : Builtin<name, [VectorType<Half, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
}
foreach name = ["vstore" # VSize] in {
- def : Builtin<name, [Void, VectorType<Char, VSize>, Size, PointerType<ConstType<Char>, AS>]>;
- def : Builtin<name, [Void, VectorType<UChar, VSize>, Size, PointerType<ConstType<UChar>, AS>]>;
- def : Builtin<name, [Void, VectorType<Short, VSize>, Size, PointerType<ConstType<Short>, AS>]>;
- def : Builtin<name, [Void, VectorType<UShort, VSize>, Size, PointerType<ConstType<UShort>, AS>]>;
- def : Builtin<name, [Void, VectorType<Int, VSize>, Size, PointerType<ConstType<Int>, AS>]>;
- def : Builtin<name, [Void, VectorType<UInt, VSize>, Size, PointerType<ConstType<UInt>, AS>]>;
- def : Builtin<name, [Void, VectorType<Long, VSize>, Size, PointerType<ConstType<Long>, AS>]>;
- def : Builtin<name, [Void, VectorType<ULong, VSize>, Size, PointerType<ConstType<ULong>, AS>]>;
- def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<ConstType<Float>, AS>]>;
- def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<ConstType<Double>, AS>]>;
- def : Builtin<name, [Void, VectorType<Half, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
+ def : Builtin<name, [Void, VectorType<Char, VSize>, Size, PointerType<Char, AS>]>;
+ def : Builtin<name, [Void, VectorType<UChar, VSize>, Size, PointerType<UChar, AS>]>;
+ def : Builtin<name, [Void, VectorType<Short, VSize>, Size, PointerType<Short, AS>]>;
+ def : Builtin<name, [Void, VectorType<UShort, VSize>, Size, PointerType<UShort, AS>]>;
+ def : Builtin<name, [Void, VectorType<Int, VSize>, Size, PointerType<Int, AS>]>;
+ def : Builtin<name, [Void, VectorType<UInt, VSize>, Size, PointerType<UInt, AS>]>;
+ def : Builtin<name, [Void, VectorType<Long, VSize>, Size, PointerType<Long, AS>]>;
+ def : Builtin<name, [Void, VectorType<ULong, VSize>, Size, PointerType<ULong, AS>]>;
+ def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Float, AS>]>;
+ def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<Double, AS>]>;
+ def : Builtin<name, [Void, VectorType<Half, VSize>, Size, PointerType<Half, AS>]>;
}
foreach name = ["vloada_half" # VSize] in {
def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
@@ -764,17 +787,17 @@ let MinVersion = CL20 in {
def : Builtin<name, [VectorType<Half, VSize>, Size, PointerType<ConstType<Half>, GenericAS>]>;
}
foreach name = ["vstore" # VSize] in {
- def : Builtin<name, [Void, VectorType<Char, VSize>, Size, PointerType<ConstType<Char>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<UChar, VSize>, Size, PointerType<ConstType<UChar>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Short, VSize>, Size, PointerType<ConstType<Short>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<UShort, VSize>, Size, PointerType<ConstType<UShort>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Int, VSize>, Size, PointerType<ConstType<Int>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<UInt, VSize>, Size, PointerType<ConstType<UInt>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Long, VSize>, Size, PointerType<ConstType<Long>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<ULong, VSize>, Size, PointerType<ConstType<ULong>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<ConstType<Float>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<ConstType<Double>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Half, VSize>, Size, PointerType<ConstType<Half>, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<Char, VSize>, Size, PointerType<Char, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<UChar, VSize>, Size, PointerType<UChar, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<Short, VSize>, Size, PointerType<Short, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<UShort, VSize>, Size, PointerType<UShort, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<Int, VSize>, Size, PointerType<Int, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<UInt, VSize>, Size, PointerType<UInt, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<Long, VSize>, Size, PointerType<Long, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<ULong, VSize>, Size, PointerType<ULong, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Float, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<Double, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<Half, VSize>, Size, PointerType<Half, GenericAS>]>;
}
foreach name = ["vloada_half" # VSize] in {
def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, GenericAS>]>;
@@ -805,24 +828,21 @@ foreach VSize = [2, 3, 4, 8, 16] in {
foreach name = ["vloada_half" # VSize] in {
def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, ConstantAS>]>;
}
- foreach rnd = ["", "_rte", "_rtz", "_rtp", "_rtn"] in {
- foreach name = ["vstorea_half" # VSize # rnd] in {
- def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Half, ConstantAS>]>;
- def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<Half, ConstantAS>]>;
- }
- }
}
let MaxVersion = CL20 in {
foreach AS = [GlobalAS, LocalAS, PrivateAS] in {
def : Builtin<"vload_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
+ def : Builtin<"vloada_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
foreach VSize = [2, 3, 4, 8, 16] in {
foreach name = ["vload_half" # VSize] in {
def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
}
}
foreach rnd = ["", "_rte", "_rtz", "_rtp", "_rtn"] in {
- def : Builtin<"vstore_half" # rnd, [Void, Float, Size, PointerType<Half, AS>]>;
- def : Builtin<"vstore_half" # rnd, [Void, Double, Size, PointerType<Half, AS>]>;
+ foreach name = ["vstore_half" # rnd, "vstorea_half" # rnd] in {
+ def : Builtin<name, [Void, Float, Size, PointerType<Half, AS>]>;
+ def : Builtin<name, [Void, Double, Size, PointerType<Half, AS>]>;
+ }
foreach VSize = [2, 3, 4, 8, 16] in {
foreach name = ["vstore_half" # VSize # rnd] in {
def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Half, AS>]>;
@@ -835,14 +855,17 @@ let MaxVersion = CL20 in {
let MinVersion = CL20 in {
foreach AS = [GenericAS] in {
def : Builtin<"vload_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
+ def : Builtin<"vloada_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
foreach VSize = [2, 3, 4, 8, 16] in {
foreach name = ["vload_half" # VSize] in {
def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
}
}
foreach rnd = ["", "_rte", "_rtz", "_rtp", "_rtn"] in {
- def : Builtin<"vstore_half" # rnd, [Void, Float, Size, PointerType<Half, AS>]>;
- def : Builtin<"vstore_half" # rnd, [Void, Double, Size, PointerType<Half, AS>]>;
+ foreach name = ["vstore_half" # rnd, "vstorea_half" # rnd] in {
+ def : Builtin<name, [Void, Float, Size, PointerType<Half, AS>]>;
+ def : Builtin<name, [Void, Double, Size, PointerType<Half, AS>]>;
+ }
foreach VSize = [2, 3, 4, 8, 16] in {
foreach name = ["vstore_half" # VSize # rnd] in {
def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Half, AS>]>;
@@ -855,6 +878,7 @@ let MinVersion = CL20 in {
foreach AS = [ConstantAS] in {
def : Builtin<"vload_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
+ def : Builtin<"vloada_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
foreach VSize = [2, 3, 4, 8, 16] in {
foreach name = ["vload_half" # VSize] in {
def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
@@ -976,6 +1000,45 @@ foreach AS = [GlobalAS, LocalAS] in {
}
}
}
+// OpenCL v2.0 s6.13.11 - Atomic Functions.
+let MinVersion = CL20 in {
+ foreach TypePair = [[AtomicInt, Int], [AtomicUInt, UInt],
+ [AtomicLong, Long], [AtomicULong, ULong],
+ [AtomicFloat, Float], [AtomicDouble, Double]] in {
+ def : Builtin<"atomic_init",
+ [Void, PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1]]>;
+ def : Builtin<"atomic_store",
+ [Void, PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1]]>;
+ def : Builtin<"atomic_load",
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>]>;
+ def : Builtin<"atomic_exchange",
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1]]>;
+ foreach Variant = ["weak", "strong"] in {
+ def : Builtin<"atomic_compare_exchange_" # Variant,
+ [Bool, PointerType<VolatileType<TypePair[0]>, GenericAS>,
+ PointerType<TypePair[1], GenericAS>, TypePair[1]]>;
+ }
+ }
+
+ foreach TypePair = [[AtomicInt, Int, Int], [AtomicUInt, UInt, UInt],
+ [AtomicLong, Long, Long], [AtomicULong, ULong, ULong],
+ [AtomicIntPtr, IntPtr, PtrDiff],
+ [AtomicUIntPtr, UIntPtr, PtrDiff]] in {
+ foreach ModOp = ["add", "sub"] in {
+ def : Builtin<"atomic_fetch_" # ModOp,
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[2]]>;
+ }
+ }
+ foreach TypePair = [[AtomicInt, Int, Int], [AtomicUInt, UInt, UInt],
+ [AtomicLong, Long, Long], [AtomicULong, ULong, ULong],
+ [AtomicIntPtr, IntPtr, IntPtr],
+ [AtomicUIntPtr, UIntPtr, UIntPtr]] in {
+ foreach ModOp = ["or", "xor", "and", "min", "max"] in {
+ def : Builtin<"atomic_fetch_" # ModOp,
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[2]]>;
+ }
+ }
+}
//--------------------------------------------------------------------
// OpenCL v1.1 s6.11.12, v1.2 s6.12.12, v2.0 s6.13.12 - Miscellaneous Vector Functions
@@ -1172,14 +1235,43 @@ let MinVersion = CL20 in {
}
-// OpenCL v2.0 s9.17.3: Additions to section 6.13.1: Work-Item Functions
-let MinVersion = CL20 in {
- let Extension = FuncExtKhrSubgroups in {
- def get_sub_group_size : Builtin<"get_sub_group_size", [UInt]>;
- def get_max_sub_group_size : Builtin<"get_max_sub_group_size", [UInt]>;
- def get_num_sub_groups : Builtin<"get_num_sub_groups", [UInt]>;
- }
-}
+//--------------------------------------------------------------------
+// OpenCL2.0 : 6.13.16 : Pipe Functions
+// --- Table 27 ---
+// Defined in Builtins.def
+
+// --- Table 28 ---
+// Builtins taking pipe arguments are defined in Builtins.def
+def : Builtin<"is_valid_reserve_id", [Bool, ReserveId]>;
+
+// --- Table 29 ---
+// Defined in Builtins.def
+
+
+//--------------------------------------------------------------------
+// OpenCL2.0 : 6.13.17 : Enqueuing Kernels
+// --- Table 30 ---
+// Defined in Builtins.def
+
+// --- Table 32 ---
+// Defined in Builtins.def
+
+// --- Table 33 ---
+def : Builtin<"enqueue_marker",
+ [Int, Queue, UInt, PointerType<ConstType<ClkEvent>, GenericAS>, PointerType<ClkEvent, GenericAS>]>;
+
+// --- Table 34 ---
+def : Builtin<"retain_event", [Void, ClkEvent]>;
+def : Builtin<"release_event", [Void, ClkEvent]>;
+def : Builtin<"create_user_event", [ClkEvent]>;
+def : Builtin<"is_valid_event", [Bool, ClkEvent]>;
+def : Builtin<"set_user_event_status", [Void, ClkEvent, Int]>;
+// TODO: capture_event_profiling_info
+
+// --- Table 35 ---
+def : Builtin<"get_default_queue", [Queue]>;
+// TODO: ndrange functions
+
//--------------------------------------------------------------------
// End of the builtin functions defined in the OpenCL C specification.
@@ -1274,6 +1366,16 @@ let Extension = FuncExtKhrMipmapImage in {
}
}
}
+ // Added to section 6.13.14.5
+ foreach aQual = ["RO", "WO", "RW"] in {
+ foreach imgTy = [Image1d, Image2d, Image3d, Image1dArray, Image2dArray, Image2dDepth, Image2dArrayDepth] in {
+ def : Builtin<"get_image_num_mip_levels", [Int, ImageType<imgTy, aQual>]>;
+ }
+ }
+}
+
+// Write functions are enabled using a separate extension.
+let Extension = FuncExtKhrMipmapImageWrites in {
// Added to section 6.13.14.4.
foreach aQual = ["WO"] in {
foreach imgTy = [Image2d] in {
@@ -1298,7 +1400,7 @@ let Extension = FuncExtKhrMipmapImage in {
def : Builtin<"write_imageui", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<UInt, 4>]>;
}
def : Builtin<"write_imagef", [Void, ImageType<Image2dArrayDepth, aQual>, VectorType<Int, 4>, Int, Float]>;
- let Extension = FuncExtKhrMipmapAndWrite3d in {
+ let Extension = FuncExtKhrMipmapWritesAndWrite3d in {
foreach imgTy = [Image3d] in {
def : Builtin<"write_imagef", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<Float, 4>]>;
def : Builtin<"write_imagei", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<Int, 4>]>;
@@ -1306,15 +1408,8 @@ let Extension = FuncExtKhrMipmapImage in {
}
}
}
- // Added to section 6.13.14.5
- foreach aQual = ["RO", "WO", "RW"] in {
- foreach imgTy = [Image1d, Image2d, Image3d, Image1dArray, Image2dArray, Image2dDepth, Image2dArrayDepth] in {
- def : Builtin<"get_image_num_mip_levels", [Int, ImageType<imgTy, aQual>]>;
- }
- }
}
-
//--------------------------------------------------------------------
// OpenCL Extension v2.0 s18.3 - Creating OpenCL Memory Objects from OpenGL MSAA Textures
let Extension = FuncExtKhrGlMsaaSharing in {
@@ -1346,6 +1441,70 @@ let Extension = FuncExtKhrGlMsaaSharing in {
}
def : Builtin<"get_image_dim", [VectorType<Int, 2>, ImageType<imgTy, aQual>], Attr.Const>;
}
- def : Builtin<"get_image_array_size", [Size, ImageType<Image2dArrayMsaaDepth, aQual>], Attr.Const>;
+ foreach imgTy = [Image2dArrayMsaa, Image2dArrayMsaaDepth] in {
+ def : Builtin<"get_image_array_size", [Size, ImageType<imgTy, aQual>], Attr.Const>;
+ }
+ }
+}
+
+//--------------------------------------------------------------------
+// OpenCL Extension v2.0 s28 - Subgroups
+// --- Table 28.2.1 ---
+let Extension = FuncExtKhrSubgroups in {
+ foreach name = ["get_sub_group_size", "get_max_sub_group_size",
+ "get_num_sub_groups", "get_sub_group_id",
+ "get_sub_group_local_id"] in {
+ def : Builtin<name, [UInt]>;
+ }
+ let MinVersion = CL20 in {
+ foreach name = ["get_enqueued_num_sub_groups"] in {
+ def : Builtin<name, [UInt]>;
+ }
+ }
+}
+
+// --- Table 28.2.2 ---
+// TODO: sub_group_barrier
+
+// --- Table 28.2.4 ---
+let Extension = FuncExtKhrSubgroups in {
+ foreach name = ["sub_group_all", "sub_group_any"] in {
+ def : Builtin<name, [Int, Int], Attr.Convergent>;
+ }
+ foreach name = ["sub_group_broadcast"] in {
+ def : Builtin<name, [IntLongFloatGenType1, IntLongFloatGenType1, UInt], Attr.Convergent>;
+ }
+ foreach name = ["sub_group_reduce_", "sub_group_scan_exclusive_",
+ "sub_group_scan_inclusive_"] in {
+ foreach op = ["add", "min", "max"] in {
+ def : Builtin<name # op, [IntLongFloatGenType1, IntLongFloatGenType1], Attr.Convergent>;
+ }
+ }
+}
+
+//--------------------------------------------------------------------
+// Arm extensions.
+let Extension = ArmIntegerDotProductInt8 in {
+ foreach name = ["arm_dot"] in {
+ def : Builtin<name, [UInt, VectorType<UChar, 4>, VectorType<UChar, 4>]>;
+ def : Builtin<name, [Int, VectorType<Char, 4>, VectorType<Char, 4>]>;
+ }
+}
+let Extension = ArmIntegerDotProductAccumulateInt8 in {
+ foreach name = ["arm_dot_acc"] in {
+ def : Builtin<name, [UInt, VectorType<UChar, 4>, VectorType<UChar, 4>, UInt]>;
+ def : Builtin<name, [Int, VectorType<Char, 4>, VectorType<Char, 4>, Int]>;
+ }
+}
+let Extension = ArmIntegerDotProductAccumulateInt16 in {
+ foreach name = ["arm_dot_acc"] in {
+ def : Builtin<name, [UInt, VectorType<UShort, 2>, VectorType<UShort, 2>, UInt]>;
+ def : Builtin<name, [Int, VectorType<Short, 2>, VectorType<Short, 2>, Int]>;
+ }
+}
+let Extension = ArmIntegerDotProductAccumulateSaturateInt8 in {
+ foreach name = ["arm_dot_acc_sat"] in {
+ def : Builtin<name, [UInt, VectorType<UChar, 4>, VectorType<UChar, 4>, UInt]>;
+ def : Builtin<name, [Int, VectorType<Char, 4>, VectorType<Char, 4>, Int]>;
}
}
diff --git a/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp b/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp
index 5d0a734f237a..3ef8498baffd 100644
--- a/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp
@@ -19,12 +19,15 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ManagedStatic.h"
#include <cassert>
#include <cstddef>
#include <utility>
using namespace clang;
+LLVM_INSTANTIATE_REGISTRY(ParsedAttrInfoRegistry)
+
IdentifierLoc *IdentifierLoc::create(ASTContext &Ctx, SourceLocation Loc,
IdentifierInfo *Ident) {
IdentifierLoc *Result = new (Ctx) IdentifierLoc;
@@ -100,47 +103,60 @@ void AttributePool::takePool(AttributePool &pool) {
pool.Attrs.clear();
}
-struct ParsedAttrInfo {
- unsigned NumArgs : 4;
- unsigned OptArgs : 4;
- unsigned HasCustomParsing : 1;
- unsigned IsTargetSpecific : 1;
- unsigned IsType : 1;
- unsigned IsStmt : 1;
- unsigned IsKnownToGCC : 1;
- unsigned IsSupportedByPragmaAttribute : 1;
-
- bool (*DiagAppertainsToDecl)(Sema &S, const ParsedAttr &Attr, const Decl *);
- bool (*DiagLangOpts)(Sema &S, const ParsedAttr &Attr);
- bool (*ExistsInTarget)(const TargetInfo &Target);
- unsigned (*SpellingIndexToSemanticSpelling)(const ParsedAttr &Attr);
- void (*GetPragmaAttributeMatchRules)(
- llvm::SmallVectorImpl<std::pair<attr::SubjectMatchRule, bool>> &Rules,
- const LangOptions &LangOpts);
-};
-
namespace {
#include "clang/Sema/AttrParsedAttrImpl.inc"
} // namespace
-static const ParsedAttrInfo &getInfo(const ParsedAttr &A) {
- return AttrInfoMap[A.getKind()];
+const ParsedAttrInfo &ParsedAttrInfo::get(const AttributeCommonInfo &A) {
+ // If we have a ParsedAttrInfo for this ParsedAttr then return that.
+ if ((size_t)A.getParsedKind() < llvm::array_lengthof(AttrInfoMap))
+ return *AttrInfoMap[A.getParsedKind()];
+
+ // If this is an ignored attribute then return an appropriate ParsedAttrInfo.
+ static const ParsedAttrInfo IgnoredParsedAttrInfo(
+ AttributeCommonInfo::IgnoredAttribute);
+ if (A.getParsedKind() == AttributeCommonInfo::IgnoredAttribute)
+ return IgnoredParsedAttrInfo;
+
+ // Otherwise this may be an attribute defined by a plugin. First instantiate
+ // all plugin attributes if we haven't already done so.
+ static llvm::ManagedStatic<std::list<std::unique_ptr<ParsedAttrInfo>>>
+ PluginAttrInstances;
+ if (PluginAttrInstances->empty())
+ for (auto It : ParsedAttrInfoRegistry::entries())
+ PluginAttrInstances->emplace_back(It.instantiate());
+
+ // Search for a ParsedAttrInfo whose name and syntax match.
+ std::string FullName = A.getNormalizedFullName();
+ AttributeCommonInfo::Syntax SyntaxUsed = A.getSyntax();
+ if (SyntaxUsed == AttributeCommonInfo::AS_ContextSensitiveKeyword)
+ SyntaxUsed = AttributeCommonInfo::AS_Keyword;
+
+ for (auto &Ptr : *PluginAttrInstances)
+ for (auto &S : Ptr->Spellings)
+ if (S.Syntax == SyntaxUsed && S.NormalizedFullName == FullName)
+ return *Ptr;
+
+ // If we failed to find a match then return a default ParsedAttrInfo.
+ static const ParsedAttrInfo DefaultParsedAttrInfo(
+ AttributeCommonInfo::UnknownAttribute);
+ return DefaultParsedAttrInfo;
}
-unsigned ParsedAttr::getMinArgs() const { return getInfo(*this).NumArgs; }
+unsigned ParsedAttr::getMinArgs() const { return getInfo().NumArgs; }
unsigned ParsedAttr::getMaxArgs() const {
- return getMinArgs() + getInfo(*this).OptArgs;
+ return getMinArgs() + getInfo().OptArgs;
}
bool ParsedAttr::hasCustomParsing() const {
- return getInfo(*this).HasCustomParsing;
+ return getInfo().HasCustomParsing;
}
bool ParsedAttr::diagnoseAppertainsTo(Sema &S, const Decl *D) const {
- return getInfo(*this).DiagAppertainsToDecl(S, *this, D);
+ return getInfo().diagAppertainsToDecl(S, *this, D);
}
bool ParsedAttr::appliesToDecl(const Decl *D,
@@ -152,33 +168,33 @@ void ParsedAttr::getMatchRules(
const LangOptions &LangOpts,
SmallVectorImpl<std::pair<attr::SubjectMatchRule, bool>> &MatchRules)
const {
- return getInfo(*this).GetPragmaAttributeMatchRules(MatchRules, LangOpts);
+ return getInfo().getPragmaAttributeMatchRules(MatchRules, LangOpts);
}
bool ParsedAttr::diagnoseLangOpts(Sema &S) const {
- return getInfo(*this).DiagLangOpts(S, *this);
+ return getInfo().diagLangOpts(S, *this);
}
bool ParsedAttr::isTargetSpecificAttr() const {
- return getInfo(*this).IsTargetSpecific;
+ return getInfo().IsTargetSpecific;
}
-bool ParsedAttr::isTypeAttr() const { return getInfo(*this).IsType; }
+bool ParsedAttr::isTypeAttr() const { return getInfo().IsType; }
-bool ParsedAttr::isStmtAttr() const { return getInfo(*this).IsStmt; }
+bool ParsedAttr::isStmtAttr() const { return getInfo().IsStmt; }
bool ParsedAttr::existsInTarget(const TargetInfo &Target) const {
- return getInfo(*this).ExistsInTarget(Target);
+ return getInfo().existsInTarget(Target);
}
-bool ParsedAttr::isKnownToGCC() const { return getInfo(*this).IsKnownToGCC; }
+bool ParsedAttr::isKnownToGCC() const { return getInfo().IsKnownToGCC; }
bool ParsedAttr::isSupportedByPragmaAttribute() const {
- return getInfo(*this).IsSupportedByPragmaAttribute;
+ return getInfo().IsSupportedByPragmaAttribute;
}
unsigned ParsedAttr::getSemanticSpelling() const {
- return getInfo(*this).SpellingIndexToSemanticSpelling(*this);
+ return getInfo().spellingIndexToSemanticSpelling(*this);
}
bool ParsedAttr::hasVariadicArg() const {
@@ -186,5 +202,5 @@ bool ParsedAttr::hasVariadicArg() const {
// claim that as being variadic. If we someday get an attribute that
// legitimately bumps up against that maximum, we can use another bit to track
// whether it's truly variadic or not.
- return getInfo(*this).OptArgs == 15;
+ return getInfo().OptArgs == 15;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/Sema.cpp b/contrib/llvm-project/clang/lib/Sema/Sema.cpp
index 9cfce5a63b1d..2f2b52106f3d 100644
--- a/contrib/llvm-project/clang/lib/Sema/Sema.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/Sema.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "UsedDeclVisitor.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/DeclCXX.h"
@@ -22,6 +23,7 @@
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Stack.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/HeaderSearch.h"
@@ -142,10 +144,13 @@ public:
} // end namespace sema
} // end namespace clang
+const unsigned Sema::MaxAlignmentExponent;
+const unsigned Sema::MaximumAlignment;
+
Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter)
: ExternalSource(nullptr), isMultiplexExternalSource(false),
- FPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp),
+ CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp),
Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()),
SourceMgr(PP.getSourceManager()), CollectStats(false),
CodeCompleter(CodeCompleter), CurContext(nullptr),
@@ -154,8 +159,8 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
LangOpts.getMSPointerToMemberRepresentationMethod()),
VtorDispStack(LangOpts.getVtorDispMode()), PackStack(0),
DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
- CodeSegStack(nullptr), CurInitSeg(nullptr), VisContext(nullptr),
- PragmaAttributeCurrentTargetDecl(nullptr),
+ CodeSegStack(nullptr), FpPragmaStack(0xffffffff), CurInitSeg(nullptr),
+ VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr),
IsBuildingRecoveryCallExpr(false), Cleanup{}, LateTemplateParser(nullptr),
LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp),
StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr),
@@ -951,9 +956,7 @@ void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
PerformPendingInstantiations();
}
- // Finalize analysis of OpenMP-specific constructs.
- if (LangOpts.OpenMP)
- finalizeOpenMPDelayedAnalysis();
+ emitDeferredDiags();
assert(LateParsedInstantiations.empty() &&
"end of TU template instantiation should not create more "
@@ -1006,6 +1009,11 @@ void Sema::ActOnEndOfTranslationUnit() {
LateParsedInstantiations.begin(),
LateParsedInstantiations.end());
LateParsedInstantiations.clear();
+
+ if (LangOpts.PCHInstantiateTemplates) {
+ llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
+ PerformPendingInstantiations();
+ }
}
DiagnoseUnterminatedPragmaPack();
@@ -1437,38 +1445,184 @@ Sema::Diag(SourceLocation Loc, const PartialDiagnostic& PD) {
static void emitCallStackNotes(Sema &S, FunctionDecl *FD) {
auto FnIt = S.DeviceKnownEmittedFns.find(FD);
while (FnIt != S.DeviceKnownEmittedFns.end()) {
+ // Respect error limit.
+ if (S.Diags.hasFatalErrorOccurred())
+ return;
DiagnosticBuilder Builder(
S.Diags.Report(FnIt->second.Loc, diag::note_called_by));
Builder << FnIt->second.FD;
- Builder.setForceEmit();
-
FnIt = S.DeviceKnownEmittedFns.find(FnIt->second.FD);
}
}
-// Emit any deferred diagnostics for FD and erase them from the map in which
-// they're stored.
-static void emitDeferredDiags(Sema &S, FunctionDecl *FD, bool ShowCallStack) {
- auto It = S.DeviceDeferredDiags.find(FD);
- if (It == S.DeviceDeferredDiags.end())
- return;
- bool HasWarningOrError = false;
- for (PartialDiagnosticAt &PDAt : It->second) {
- const SourceLocation &Loc = PDAt.first;
- const PartialDiagnostic &PD = PDAt.second;
- HasWarningOrError |= S.getDiagnostics().getDiagnosticLevel(
- PD.getDiagID(), Loc) >= DiagnosticsEngine::Warning;
- DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID()));
- Builder.setForceEmit();
- PD.Emit(Builder);
+namespace {
+
+/// Helper class that emits deferred diagnostic messages if an entity directly
+/// or indirectly using the function that causes the deferred diagnostic
+/// messages is known to be emitted.
+///
+/// During parsing of AST, certain diagnostic messages are recorded as deferred
+/// diagnostics since it is unknown whether the functions containing such
+/// diagnostics will be emitted. A list of potentially emitted functions and
+/// variables that may potentially trigger emission of functions are also
+/// recorded. DeferredDiagnosticsEmitter recursively visits used functions
+/// by each function to emit deferred diagnostics.
+///
+/// During the visit, certain OpenMP directives or initializer of variables
+/// with certain OpenMP attributes will cause subsequent visiting of any
+/// functions enter a state which is called OpenMP device context in this
+/// implementation. The state is exited when the directive or initializer is
+/// exited. This state can change the emission states of subsequent uses
+/// of functions.
+///
+/// Conceptually the functions or variables to be visited form a use graph
+/// where the parent node uses the child node. At any point of the visit,
+/// the tree nodes traversed from the tree root to the current node form a use
+/// stack. The emission state of the current node depends on two factors:
+/// 1. the emission state of the root node
+/// 2. whether the current node is in OpenMP device context
+/// If the function is decided to be emitted, its contained deferred diagnostics
+/// are emitted, together with the information about the use stack.
+///
+class DeferredDiagnosticsEmitter
+ : public UsedDeclVisitor<DeferredDiagnosticsEmitter> {
+public:
+ typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited;
+
+ // Whether the function is already in the current use-path.
+ llvm::SmallSet<CanonicalDeclPtr<Decl>, 4> InUsePath;
+
+ // The current use-path.
+ llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath;
+
+ // Whether the visiting of the function has been done. Done[0] is for the
+ // case not in OpenMP device context. Done[1] is for the case in OpenMP
+ // device context. We need two sets because diagnostics emission may be
+ // different depending on whether it is in OpenMP device context.
+ llvm::SmallSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2];
+
+ // Emission state of the root node of the current use graph.
+ bool ShouldEmitRootNode;
+
+ // Current OpenMP device context level. It is initialized to 0 and each
+ // entering of device context increases it by 1 and each exit decreases
+ // it by 1. Non-zero value indicates it is currently in device context.
+ unsigned InOMPDeviceContext;
+
+ DeferredDiagnosticsEmitter(Sema &S)
+ : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {}
+
+ void VisitOMPTargetDirective(OMPTargetDirective *Node) {
+ ++InOMPDeviceContext;
+ Inherited::VisitOMPTargetDirective(Node);
+ --InOMPDeviceContext;
}
- S.DeviceDeferredDiags.erase(It);
- // FIXME: Should this be called after every warning/error emitted in the loop
- // above, instead of just once per function? That would be consistent with
- // how we handle immediate errors, but it also seems like a bit much.
- if (HasWarningOrError && ShowCallStack)
- emitCallStackNotes(S, FD);
+ void visitUsedDecl(SourceLocation Loc, Decl *D) {
+ if (isa<VarDecl>(D))
+ return;
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ checkFunc(Loc, FD);
+ else
+ Inherited::visitUsedDecl(Loc, D);
+ }
+
+ void checkVar(VarDecl *VD) {
+ assert(VD->isFileVarDecl() &&
+ "Should only check file-scope variables");
+ if (auto *Init = VD->getInit()) {
+ auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD);
+ bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost ||
+ *DevTy == OMPDeclareTargetDeclAttr::DT_Any);
+ if (IsDev)
+ ++InOMPDeviceContext;
+ this->Visit(Init);
+ if (IsDev)
+ --InOMPDeviceContext;
+ }
+ }
+
+ void checkFunc(SourceLocation Loc, FunctionDecl *FD) {
+ auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0];
+ FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back();
+ if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) ||
+ S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD))
+ return;
+ // Finalize analysis of OpenMP-specific constructs.
+ if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1)
+ S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc);
+ if (Caller)
+ S.DeviceKnownEmittedFns[FD] = {Caller, Loc};
+ // Always emit deferred diagnostics for the direct users. This does not
+ // lead to explosion of diagnostics since each user is visited at most
+ // twice.
+ if (ShouldEmitRootNode || InOMPDeviceContext)
+ emitDeferredDiags(FD, Caller);
+ // Do not revisit a function if the function body has been completely
+ // visited before.
+ if (!Done.insert(FD).second)
+ return;
+ InUsePath.insert(FD);
+ UsePath.push_back(FD);
+ if (auto *S = FD->getBody()) {
+ this->Visit(S);
+ }
+ UsePath.pop_back();
+ InUsePath.erase(FD);
+ }
+
+ void checkRecordedDecl(Decl *D) {
+ if (auto *FD = dyn_cast<FunctionDecl>(D)) {
+ ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) ==
+ Sema::FunctionEmissionStatus::Emitted;
+ checkFunc(SourceLocation(), FD);
+ } else
+ checkVar(cast<VarDecl>(D));
+ }
+
+ // Emit any deferred diagnostics for FD
+ void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) {
+ auto It = S.DeviceDeferredDiags.find(FD);
+ if (It == S.DeviceDeferredDiags.end())
+ return;
+ bool HasWarningOrError = false;
+ bool FirstDiag = true;
+ for (PartialDiagnosticAt &PDAt : It->second) {
+ // Respect error limit.
+ if (S.Diags.hasFatalErrorOccurred())
+ return;
+ const SourceLocation &Loc = PDAt.first;
+ const PartialDiagnostic &PD = PDAt.second;
+ HasWarningOrError |=
+ S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >=
+ DiagnosticsEngine::Warning;
+ {
+ DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID()));
+ PD.Emit(Builder);
+ }
+ // Emit the note on the first diagnostic in case too many diagnostics
+ // cause the note not emitted.
+ if (FirstDiag && HasWarningOrError && ShowCallStack) {
+ emitCallStackNotes(S, FD);
+ FirstDiag = false;
+ }
+ }
+ }
+};
+} // namespace
+
+void Sema::emitDeferredDiags() {
+ if (ExternalSource)
+ ExternalSource->ReadDeclsToCheckForDeferredDiags(
+ DeclsToCheckForDeferredDiags);
+
+ if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) ||
+ DeclsToCheckForDeferredDiags.empty())
+ return;
+
+ DeferredDiagnosticsEmitter DDE(*this);
+ for (auto D : DeclsToCheckForDeferredDiags)
+ DDE.checkRecordedDecl(D);
}
// In CUDA, there are some constructs which may appear in semantically-valid
@@ -1541,71 +1695,6 @@ Sema::DeviceDiagBuilder::~DeviceDiagBuilder() {
}
}
-// Indicate that this function (and thus everything it transtively calls) will
-// be codegen'ed, and emit any deferred diagnostics on this function and its
-// (transitive) callees.
-void Sema::markKnownEmitted(
- Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
- SourceLocation OrigLoc,
- const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted) {
- // Nothing to do if we already know that FD is emitted.
- if (IsKnownEmitted(S, OrigCallee)) {
- assert(!S.DeviceCallGraph.count(OrigCallee));
- return;
- }
-
- // We've just discovered that OrigCallee is known-emitted. Walk our call
- // graph to see what else we can now discover also must be emitted.
-
- struct CallInfo {
- FunctionDecl *Caller;
- FunctionDecl *Callee;
- SourceLocation Loc;
- };
- llvm::SmallVector<CallInfo, 4> Worklist = {{OrigCaller, OrigCallee, OrigLoc}};
- llvm::SmallSet<CanonicalDeclPtr<FunctionDecl>, 4> Seen;
- Seen.insert(OrigCallee);
- while (!Worklist.empty()) {
- CallInfo C = Worklist.pop_back_val();
- assert(!IsKnownEmitted(S, C.Callee) &&
- "Worklist should not contain known-emitted functions.");
- S.DeviceKnownEmittedFns[C.Callee] = {C.Caller, C.Loc};
- emitDeferredDiags(S, C.Callee, C.Caller);
-
- // If this is a template instantiation, explore its callgraph as well:
- // Non-dependent calls are part of the template's callgraph, while dependent
- // calls are part of to the instantiation's call graph.
- if (auto *Templ = C.Callee->getPrimaryTemplate()) {
- FunctionDecl *TemplFD = Templ->getAsFunction();
- if (!Seen.count(TemplFD) && !S.DeviceKnownEmittedFns.count(TemplFD)) {
- Seen.insert(TemplFD);
- Worklist.push_back(
- {/* Caller = */ C.Caller, /* Callee = */ TemplFD, C.Loc});
- }
- }
-
- // Add all functions called by Callee to our worklist.
- auto CGIt = S.DeviceCallGraph.find(C.Callee);
- if (CGIt == S.DeviceCallGraph.end())
- continue;
-
- for (std::pair<CanonicalDeclPtr<FunctionDecl>, SourceLocation> FDLoc :
- CGIt->second) {
- FunctionDecl *NewCallee = FDLoc.first;
- SourceLocation CallLoc = FDLoc.second;
- if (Seen.count(NewCallee) || IsKnownEmitted(S, NewCallee))
- continue;
- Seen.insert(NewCallee);
- Worklist.push_back(
- {/* Caller = */ C.Callee, /* Callee = */ NewCallee, CallLoc});
- }
-
- // C.Callee is now known-emitted, so we no longer need to maintain its list
- // of callees in DeviceCallGraph.
- S.DeviceCallGraph.erase(CGIt);
- }
-}
-
Sema::DeviceDiagBuilder Sema::targetDiag(SourceLocation Loc, unsigned DiagID) {
if (LangOpts.OpenMP)
return LangOpts.OpenMPIsDevice ? diagIfOpenMPDeviceCode(Loc, DiagID)
@@ -1613,10 +1702,59 @@ Sema::DeviceDiagBuilder Sema::targetDiag(SourceLocation Loc, unsigned DiagID) {
if (getLangOpts().CUDA)
return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID)
: CUDADiagIfHostCode(Loc, DiagID);
+
+ if (getLangOpts().SYCLIsDevice)
+ return SYCLDiagIfDeviceCode(Loc, DiagID);
+
return DeviceDiagBuilder(DeviceDiagBuilder::K_Immediate, Loc, DiagID,
getCurFunctionDecl(), *this);
}
+void Sema::checkDeviceDecl(const ValueDecl *D, SourceLocation Loc) {
+ if (isUnevaluatedContext())
+ return;
+
+ Decl *C = cast<Decl>(getCurLexicalContext());
+
+ // Memcpy operations for structs containing a member with unsupported type
+ // are ok, though.
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) {
+ if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
+ MD->isTrivial())
+ return;
+
+ if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD))
+ if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial())
+ return;
+ }
+
+ auto CheckType = [&](QualType Ty) {
+ if (Ty->isDependentType())
+ return;
+
+ if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
+ ((Ty->isFloat128Type() ||
+ (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128)) &&
+ !Context.getTargetInfo().hasFloat128Type()) ||
+ (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
+ !Context.getTargetInfo().hasInt128Type())) {
+ targetDiag(Loc, diag::err_device_unsupported_type)
+ << D << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
+ << Context.getTargetInfo().getTriple().str();
+ targetDiag(D->getLocation(), diag::note_defined_here) << D;
+ }
+ };
+
+ QualType Ty = D->getType();
+ CheckType(Ty);
+
+ if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) {
+ for (const auto &ParamTy : FPTy->param_types())
+ CheckType(ParamTy);
+ CheckType(FPTy->getReturnType());
+ }
+}
+
/// Looks through the macro-expansion chain for the given
/// location, looking for a macro expansion with the given name.
/// If one is found, returns true and sets the location to that
@@ -1818,7 +1956,7 @@ void Sema::PopCompoundScope() {
/// Determine whether any errors occurred within this function/method/
/// block.
bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
- return getCurFunction()->ErrorTrap.hasUnrecoverableErrorOccurred();
+ return getCurFunction()->hasUnrecoverableErrorOccurred();
}
void Sema::setFunctionHasBranchIntoScope() {
@@ -2270,16 +2408,8 @@ std::string Sema::getOpenCLExtensionsFromTypeExtMap(FunctionType *FT) {
template <typename T, typename MapT>
std::string Sema::getOpenCLExtensionsFromExtMap(T *FDT, MapT &Map) {
- std::string ExtensionNames = "";
auto Loc = Map.find(FDT);
-
- for (auto const& I : Loc->second) {
- ExtensionNames += I;
- ExtensionNames += " ";
- }
- ExtensionNames.pop_back();
-
- return ExtensionNames;
+ return llvm::join(Loc->second, " ");
}
bool Sema::isOpenCLDisabledDecl(Decl *FD) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
index cd2a65276b09..b354e810974c 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
@@ -256,12 +256,15 @@ void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
void Sema::ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName) {
PragmaClangSection *CSec;
+ int SectionFlags = ASTContext::PSF_Read;
switch (SecKind) {
case PragmaClangSectionKind::PCSK_BSS:
CSec = &PragmaClangBSSSection;
+ SectionFlags |= ASTContext::PSF_Write | ASTContext::PSF_ZeroInit;
break;
case PragmaClangSectionKind::PCSK_Data:
CSec = &PragmaClangDataSection;
+ SectionFlags |= ASTContext::PSF_Write;
break;
case PragmaClangSectionKind::PCSK_Rodata:
CSec = &PragmaClangRodataSection;
@@ -271,6 +274,7 @@ void Sema::ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionA
break;
case PragmaClangSectionKind::PCSK_Text:
CSec = &PragmaClangTextSection;
+ SectionFlags |= ASTContext::PSF_Execute;
break;
default:
llvm_unreachable("invalid clang section kind");
@@ -281,8 +285,11 @@ void Sema::ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionA
return;
}
+ if (UnifySection(SecName, SectionFlags, PragmaLoc))
+ return;
+
CSec->Valid = true;
- CSec->SectionName = SecName;
+ CSec->SectionName = std::string(SecName);
CSec->PragmaLocation = PragmaLoc;
}
@@ -407,6 +414,70 @@ void Sema::ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
Consumer.HandleTopLevelDecl(DeclGroupRef(PDMD));
}
+void Sema::ActOnPragmaFloatControl(SourceLocation Loc,
+ PragmaMsStackAction Action,
+ PragmaFloatControlKind Value) {
+ unsigned NewValue = FpPragmaStack.hasValue()
+ ? FpPragmaStack.CurrentValue
+ : CurFPFeatureOverrides().getAsOpaqueInt();
+ FPOptionsOverride NewFPFeatures(NewValue);
+ if ((Action == PSK_Push_Set || Action == PSK_Push || Action == PSK_Pop) &&
+ !(CurContext->isTranslationUnit()) && !CurContext->isNamespace()) {
+ // Push and pop can only occur at file or namespace scope.
+ Diag(Loc, diag::err_pragma_fc_pp_scope);
+ return;
+ }
+ switch (Value) {
+ default:
+ llvm_unreachable("invalid pragma float_control kind");
+ case PFC_Precise:
+ NewFPFeatures.setFPPreciseEnabled(true);
+ NewValue = NewFPFeatures.getAsOpaqueInt();
+ FpPragmaStack.Act(Loc, Action, StringRef(), NewValue);
+ break;
+ case PFC_NoPrecise:
+ if (CurFPFeatures.getFPExceptionMode() == LangOptions::FPE_Strict)
+ Diag(Loc, diag::err_pragma_fc_noprecise_requires_noexcept);
+ else if (CurFPFeatures.getAllowFEnvAccess())
+ Diag(Loc, diag::err_pragma_fc_noprecise_requires_nofenv);
+ else
+ NewFPFeatures.setFPPreciseEnabled(false);
+ NewValue = NewFPFeatures.getAsOpaqueInt();
+ FpPragmaStack.Act(Loc, Action, StringRef(), NewValue);
+ break;
+ case PFC_Except:
+ if (!isPreciseFPEnabled())
+ Diag(Loc, diag::err_pragma_fc_except_requires_precise);
+ else
+ NewFPFeatures.setFPExceptionModeOverride(LangOptions::FPE_Strict);
+ NewValue = NewFPFeatures.getAsOpaqueInt();
+ FpPragmaStack.Act(Loc, Action, StringRef(), NewValue);
+ break;
+ case PFC_NoExcept:
+ NewFPFeatures.setFPExceptionModeOverride(LangOptions::FPE_Ignore);
+ NewValue = NewFPFeatures.getAsOpaqueInt();
+ FpPragmaStack.Act(Loc, Action, StringRef(), NewValue);
+ break;
+ case PFC_Push:
+ FpPragmaStack.Act(Loc, Sema::PSK_Push_Set, StringRef(),
+ NewFPFeatures.getAsOpaqueInt());
+ break;
+ case PFC_Pop:
+ if (FpPragmaStack.Stack.empty()) {
+ Diag(Loc, diag::warn_pragma_pop_failed) << "float_control"
+ << "stack empty";
+ return;
+ }
+ FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures.getAsOpaqueInt());
+ NewValue = FpPragmaStack.CurrentValue;
+ break;
+ }
+ FPOptionsOverride NewOverrides;
+ if (NewValue != FpPragmaStack.DefaultValue)
+ NewOverrides.getFromOpaqueInt(NewValue);
+ CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
+}
+
void Sema::ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind RepresentationMethod,
SourceLocation PragmaLoc) {
@@ -423,83 +494,52 @@ void Sema::ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
VtorDispStack.Act(PragmaLoc, Action, StringRef(), Mode);
}
-template<typename ValueType>
-void Sema::PragmaStack<ValueType>::Act(SourceLocation PragmaLocation,
- PragmaMsStackAction Action,
- llvm::StringRef StackSlotLabel,
- ValueType Value) {
- if (Action == PSK_Reset) {
- CurrentValue = DefaultValue;
- CurrentPragmaLocation = PragmaLocation;
- return;
- }
- if (Action & PSK_Push)
- Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
- PragmaLocation);
- else if (Action & PSK_Pop) {
- if (!StackSlotLabel.empty()) {
- // If we've got a label, try to find it and jump there.
- auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
- return x.StackSlotLabel == StackSlotLabel;
- });
- // If we found the label so pop from there.
- if (I != Stack.rend()) {
- CurrentValue = I->Value;
- CurrentPragmaLocation = I->PragmaLocation;
- Stack.erase(std::prev(I.base()), Stack.end());
- }
- } else if (!Stack.empty()) {
- // We do not have a label, just pop the last entry.
- CurrentValue = Stack.back().Value;
- CurrentPragmaLocation = Stack.back().PragmaLocation;
- Stack.pop_back();
- }
- }
- if (Action & PSK_Set) {
- CurrentValue = Value;
- CurrentPragmaLocation = PragmaLocation;
- }
-}
-
bool Sema::UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *Decl) {
- auto Section = Context.SectionInfos.find(SectionName);
- if (Section == Context.SectionInfos.end()) {
+ SourceLocation PragmaLocation;
+ if (auto A = Decl->getAttr<SectionAttr>())
+ if (A->isImplicit())
+ PragmaLocation = A->getLocation();
+ auto SectionIt = Context.SectionInfos.find(SectionName);
+ if (SectionIt == Context.SectionInfos.end()) {
Context.SectionInfos[SectionName] =
- ASTContext::SectionInfo(Decl, SourceLocation(), SectionFlags);
+ ASTContext::SectionInfo(Decl, PragmaLocation, SectionFlags);
return false;
}
// A pre-declared section takes precedence w/o diagnostic.
- if (Section->second.SectionFlags == SectionFlags ||
- !(Section->second.SectionFlags & ASTContext::PSF_Implicit))
+ const auto &Section = SectionIt->second;
+ if (Section.SectionFlags == SectionFlags ||
+ ((SectionFlags & ASTContext::PSF_Implicit) &&
+ !(Section.SectionFlags & ASTContext::PSF_Implicit)))
return false;
- auto OtherDecl = Section->second.Decl;
- Diag(Decl->getLocation(), diag::err_section_conflict)
- << Decl << OtherDecl;
- Diag(OtherDecl->getLocation(), diag::note_declared_at)
- << OtherDecl->getName();
- if (auto A = Decl->getAttr<SectionAttr>())
- if (A->isImplicit())
- Diag(A->getLocation(), diag::note_pragma_entered_here);
- if (auto A = OtherDecl->getAttr<SectionAttr>())
- if (A->isImplicit())
- Diag(A->getLocation(), diag::note_pragma_entered_here);
+ Diag(Decl->getLocation(), diag::err_section_conflict) << Decl << Section;
+ if (Section.Decl)
+ Diag(Section.Decl->getLocation(), diag::note_declared_at)
+ << Section.Decl->getName();
+ if (PragmaLocation.isValid())
+ Diag(PragmaLocation, diag::note_pragma_entered_here);
+ if (Section.PragmaSectionLocation.isValid())
+ Diag(Section.PragmaSectionLocation, diag::note_pragma_entered_here);
return true;
}
bool Sema::UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation) {
- auto Section = Context.SectionInfos.find(SectionName);
- if (Section != Context.SectionInfos.end()) {
- if (Section->second.SectionFlags == SectionFlags)
+ auto SectionIt = Context.SectionInfos.find(SectionName);
+ if (SectionIt != Context.SectionInfos.end()) {
+ const auto &Section = SectionIt->second;
+ if (Section.SectionFlags == SectionFlags)
return false;
- if (!(Section->second.SectionFlags & ASTContext::PSF_Implicit)) {
+ if (!(Section.SectionFlags & ASTContext::PSF_Implicit)) {
Diag(PragmaSectionLocation, diag::err_section_conflict)
- << "this" << "a prior #pragma section";
- Diag(Section->second.PragmaSectionLocation,
- diag::note_pragma_entered_here);
+ << "this" << Section;
+ if (Section.Decl)
+ Diag(Section.Decl->getLocation(), diag::note_declared_at)
+ << Section.Decl->getName();
+ if (Section.PragmaSectionLocation.isValid())
+ Diag(Section.PragmaSectionLocation, diag::note_pragma_entered_here);
return true;
}
}
@@ -926,31 +966,85 @@ void Sema::ActOnPragmaVisibility(const IdentifierInfo* VisType,
}
}
-void Sema::ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC) {
+void Sema::ActOnPragmaFPContract(SourceLocation Loc,
+ LangOptions::FPModeKind FPC) {
+ unsigned NewValue = FpPragmaStack.hasValue()
+ ? FpPragmaStack.CurrentValue
+ : CurFPFeatureOverrides().getAsOpaqueInt();
+ FPOptionsOverride NewFPFeatures(NewValue);
switch (FPC) {
- case LangOptions::FPC_On:
- FPFeatures.setAllowFPContractWithinStatement();
+ case LangOptions::FPM_On:
+ NewFPFeatures.setAllowFPContractWithinStatement();
break;
- case LangOptions::FPC_Fast:
- FPFeatures.setAllowFPContractAcrossStatement();
+ case LangOptions::FPM_Fast:
+ NewFPFeatures.setAllowFPContractAcrossStatement();
break;
- case LangOptions::FPC_Off:
- FPFeatures.setDisallowFPContract();
+ case LangOptions::FPM_Off:
+ NewFPFeatures.setDisallowFPContract();
break;
}
+ CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
+ FpPragmaStack.Act(Loc, Sema::PSK_Set, StringRef(),
+ NewFPFeatures.getAsOpaqueInt());
}
-void Sema::ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC) {
- switch (FPC) {
- case LangOptions::FEA_On:
- FPFeatures.setAllowFEnvAccess();
- break;
- case LangOptions::FEA_Off:
- FPFeatures.setDisallowFEnvAccess();
- break;
- }
+void Sema::ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled) {
+ unsigned NewValue = FpPragmaStack.hasValue()
+ ? FpPragmaStack.CurrentValue
+ : CurFPFeatureOverrides().getAsOpaqueInt();
+ FPOptionsOverride NewFPFeatures(NewValue);
+ NewFPFeatures.setAllowFPReassociateOverride(IsEnabled);
+ NewValue = NewFPFeatures.getAsOpaqueInt();
+ FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewValue);
+ FPOptionsOverride NewOverrides(NewValue);
+ CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
}
+void Sema::setRoundingMode(SourceLocation Loc, llvm::RoundingMode FPR) {
+ unsigned NewValue = FpPragmaStack.hasValue()
+ ? FpPragmaStack.CurrentValue
+ : CurFPFeatureOverrides().getAsOpaqueInt();
+ FPOptionsOverride NewFPFeatures(NewValue);
+ NewFPFeatures.setRoundingModeOverride(FPR);
+ NewValue = NewFPFeatures.getAsOpaqueInt();
+ FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewValue);
+ FPOptionsOverride NewOverrides(NewValue);
+ CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
+}
+
+void Sema::setExceptionMode(SourceLocation Loc,
+ LangOptions::FPExceptionModeKind FPE) {
+ unsigned NewValue = FpPragmaStack.hasValue()
+ ? FpPragmaStack.CurrentValue
+ : CurFPFeatureOverrides().getAsOpaqueInt();
+ FPOptionsOverride NewFPFeatures(NewValue);
+ NewFPFeatures.setFPExceptionModeOverride(FPE);
+ NewValue = NewFPFeatures.getAsOpaqueInt();
+ FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewValue);
+ FPOptionsOverride NewOverrides(NewValue);
+ CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
+}
+
+void Sema::ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled) {
+ unsigned NewValue = FpPragmaStack.hasValue()
+ ? FpPragmaStack.CurrentValue
+ : CurFPFeatureOverrides().getAsOpaqueInt();
+ FPOptionsOverride NewFPFeatures(NewValue);
+ if (IsEnabled) {
+ // Verify Microsoft restriction:
+ // You can't enable fenv_access unless precise semantics are enabled.
+ // Precise semantics can be enabled either by the float_control
+ // pragma, or by using the /fp:precise or /fp:strict compiler options
+ if (!isPreciseFPEnabled())
+ Diag(Loc, diag::err_pragma_fenv_requires_precise);
+ NewFPFeatures.setAllowFEnvAccessOverride(true);
+ } else
+ NewFPFeatures.setAllowFEnvAccessOverride(false);
+ NewValue = NewFPFeatures.getAsOpaqueInt();
+ FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewValue);
+ FPOptionsOverride NewOverrides(NewValue);
+ CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
+}
void Sema::PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp
new file mode 100644
index 000000000000..74c4b9e16f74
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp
@@ -0,0 +1,964 @@
+//===--- SemaAvailability.cpp - Availability attribute handling -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file processes the availability attribute.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/Sema.h"
+
+using namespace clang;
+using namespace sema;
+
+static const AvailabilityAttr *getAttrForPlatform(ASTContext &Context,
+ const Decl *D) {
+ // Check each AvailabilityAttr to find the one for this platform.
+ for (const auto *A : D->attrs()) {
+ if (const auto *Avail = dyn_cast<AvailabilityAttr>(A)) {
+ // FIXME: this is copied from CheckAvailability. We should try to
+ // de-duplicate.
+
+ // Check if this is an App Extension "platform", and if so chop off
+ // the suffix for matching with the actual platform.
+ StringRef ActualPlatform = Avail->getPlatform()->getName();
+ StringRef RealizedPlatform = ActualPlatform;
+ if (Context.getLangOpts().AppExt) {
+ size_t suffix = RealizedPlatform.rfind("_app_extension");
+ if (suffix != StringRef::npos)
+ RealizedPlatform = RealizedPlatform.slice(0, suffix);
+ }
+
+ StringRef TargetPlatform = Context.getTargetInfo().getPlatformName();
+
+ // Match the platform name.
+ if (RealizedPlatform == TargetPlatform)
+ return Avail;
+ }
+ }
+ return nullptr;
+}
+
+/// The diagnostic we should emit for \c D, and the declaration that
+/// originated it, or \c AR_Available.
+///
+/// \param D The declaration to check.
+/// \param Message If non-null, this will be populated with the message from
+/// the availability attribute that is selected.
+/// \param ClassReceiver If we're checking the the method of a class message
+/// send, the class. Otherwise nullptr.
+static std::pair<AvailabilityResult, const NamedDecl *>
+ShouldDiagnoseAvailabilityOfDecl(Sema &S, const NamedDecl *D,
+ std::string *Message,
+ ObjCInterfaceDecl *ClassReceiver) {
+ AvailabilityResult Result = D->getAvailability(Message);
+
+ // For typedefs, if the typedef declaration appears available look
+ // to the underlying type to see if it is more restrictive.
+ while (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
+ if (Result == AR_Available) {
+ if (const auto *TT = TD->getUnderlyingType()->getAs<TagType>()) {
+ D = TT->getDecl();
+ Result = D->getAvailability(Message);
+ continue;
+ }
+ }
+ break;
+ }
+
+ // Forward class declarations get their attributes from their definition.
+ if (const auto *IDecl = dyn_cast<ObjCInterfaceDecl>(D)) {
+ if (IDecl->getDefinition()) {
+ D = IDecl->getDefinition();
+ Result = D->getAvailability(Message);
+ }
+ }
+
+ if (const auto *ECD = dyn_cast<EnumConstantDecl>(D))
+ if (Result == AR_Available) {
+ const DeclContext *DC = ECD->getDeclContext();
+ if (const auto *TheEnumDecl = dyn_cast<EnumDecl>(DC)) {
+ Result = TheEnumDecl->getAvailability(Message);
+ D = TheEnumDecl;
+ }
+ }
+
+ // For +new, infer availability from -init.
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (S.NSAPIObj && ClassReceiver) {
+ ObjCMethodDecl *Init = ClassReceiver->lookupInstanceMethod(
+ S.NSAPIObj->getInitSelector());
+ if (Init && Result == AR_Available && MD->isClassMethod() &&
+ MD->getSelector() == S.NSAPIObj->getNewSelector() &&
+ MD->definedInNSObject(S.getASTContext())) {
+ Result = Init->getAvailability(Message);
+ D = Init;
+ }
+ }
+ }
+
+ return {Result, D};
+}
+
+
+/// whether we should emit a diagnostic for \c K and \c DeclVersion in
+/// the context of \c Ctx. For example, we should emit an unavailable diagnostic
+/// in a deprecated context, but not the other way around.
+static bool
+ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
+ VersionTuple DeclVersion, Decl *Ctx,
+ const NamedDecl *OffendingDecl) {
+ assert(K != AR_Available && "Expected an unavailable declaration here!");
+
+ // Checks if we should emit the availability diagnostic in the context of C.
+ auto CheckContext = [&](const Decl *C) {
+ if (K == AR_NotYetIntroduced) {
+ if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, C))
+ if (AA->getIntroduced() >= DeclVersion)
+ return true;
+ } else if (K == AR_Deprecated) {
+ if (C->isDeprecated())
+ return true;
+ } else if (K == AR_Unavailable) {
+ // It is perfectly fine to refer to an 'unavailable' Objective-C method
+ // when it is referenced from within the @implementation itself. In this
+ // context, we interpret unavailable as a form of access control.
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(OffendingDecl)) {
+ if (const auto *Impl = dyn_cast<ObjCImplDecl>(C)) {
+ if (MD->getClassInterface() == Impl->getClassInterface())
+ return true;
+ }
+ }
+ }
+
+ if (C->isUnavailable())
+ return true;
+ return false;
+ };
+
+ do {
+ if (CheckContext(Ctx))
+ return false;
+
+ // An implementation implicitly has the availability of the interface.
+ // Unless it is "+load" method.
+ if (const auto *MethodD = dyn_cast<ObjCMethodDecl>(Ctx))
+ if (MethodD->isClassMethod() &&
+ MethodD->getSelector().getAsString() == "load")
+ return true;
+
+ if (const auto *CatOrImpl = dyn_cast<ObjCImplDecl>(Ctx)) {
+ if (const ObjCInterfaceDecl *Interface = CatOrImpl->getClassInterface())
+ if (CheckContext(Interface))
+ return false;
+ }
+ // A category implicitly has the availability of the interface.
+ else if (const auto *CatD = dyn_cast<ObjCCategoryDecl>(Ctx))
+ if (const ObjCInterfaceDecl *Interface = CatD->getClassInterface())
+ if (CheckContext(Interface))
+ return false;
+ } while ((Ctx = cast_or_null<Decl>(Ctx->getDeclContext())));
+
+ return true;
+}
+
+static bool
+shouldDiagnoseAvailabilityByDefault(const ASTContext &Context,
+ const VersionTuple &DeploymentVersion,
+ const VersionTuple &DeclVersion) {
+ const auto &Triple = Context.getTargetInfo().getTriple();
+ VersionTuple ForceAvailabilityFromVersion;
+ switch (Triple.getOS()) {
+ case llvm::Triple::IOS:
+ case llvm::Triple::TvOS:
+ ForceAvailabilityFromVersion = VersionTuple(/*Major=*/11);
+ break;
+ case llvm::Triple::WatchOS:
+ ForceAvailabilityFromVersion = VersionTuple(/*Major=*/4);
+ break;
+ case llvm::Triple::Darwin:
+ case llvm::Triple::MacOSX:
+ ForceAvailabilityFromVersion = VersionTuple(/*Major=*/10, /*Minor=*/13);
+ break;
+ default:
+ // New targets should always warn about availability.
+ return Triple.getVendor() == llvm::Triple::Apple;
+ }
+ return DeploymentVersion >= ForceAvailabilityFromVersion ||
+ DeclVersion >= ForceAvailabilityFromVersion;
+}
+
+static NamedDecl *findEnclosingDeclToAnnotate(Decl *OrigCtx) {
+ for (Decl *Ctx = OrigCtx; Ctx;
+ Ctx = cast_or_null<Decl>(Ctx->getDeclContext())) {
+ if (isa<TagDecl>(Ctx) || isa<FunctionDecl>(Ctx) || isa<ObjCMethodDecl>(Ctx))
+ return cast<NamedDecl>(Ctx);
+ if (auto *CD = dyn_cast<ObjCContainerDecl>(Ctx)) {
+ if (auto *Imp = dyn_cast<ObjCImplDecl>(Ctx))
+ return Imp->getClassInterface();
+ return CD;
+ }
+ }
+
+ return dyn_cast<NamedDecl>(OrigCtx);
+}
+
+namespace {
+
+struct AttributeInsertion {
+ StringRef Prefix;
+ SourceLocation Loc;
+ StringRef Suffix;
+
+ static AttributeInsertion createInsertionAfter(const NamedDecl *D) {
+ return {" ", D->getEndLoc(), ""};
+ }
+ static AttributeInsertion createInsertionAfter(SourceLocation Loc) {
+ return {" ", Loc, ""};
+ }
+ static AttributeInsertion createInsertionBefore(const NamedDecl *D) {
+ return {"", D->getBeginLoc(), "\n"};
+ }
+};
+
+} // end anonymous namespace
+
+/// Tries to parse a string as ObjC method name.
+///
+/// \param Name The string to parse. Expected to originate from availability
+/// attribute argument.
+/// \param SlotNames The vector that will be populated with slot names. In case
+/// of unsuccessful parsing can contain invalid data.
+/// \returns A number of method parameters if parsing was successful, None
+/// otherwise.
+static Optional<unsigned>
+tryParseObjCMethodName(StringRef Name, SmallVectorImpl<StringRef> &SlotNames,
+ const LangOptions &LangOpts) {
+ // Accept replacements starting with - or + as valid ObjC method names.
+ if (!Name.empty() && (Name.front() == '-' || Name.front() == '+'))
+ Name = Name.drop_front(1);
+ if (Name.empty())
+ return None;
+ Name.split(SlotNames, ':');
+ unsigned NumParams;
+ if (Name.back() == ':') {
+ // Remove an empty string at the end that doesn't represent any slot.
+ SlotNames.pop_back();
+ NumParams = SlotNames.size();
+ } else {
+ if (SlotNames.size() != 1)
+ // Not a valid method name, just a colon-separated string.
+ return None;
+ NumParams = 0;
+ }
+ // Verify all slot names are valid.
+ bool AllowDollar = LangOpts.DollarIdents;
+ for (StringRef S : SlotNames) {
+ if (S.empty())
+ continue;
+ if (!isValidIdentifier(S, AllowDollar))
+ return None;
+ }
+ return NumParams;
+}
+
+/// Returns a source location in which it's appropriate to insert a new
+/// attribute for the given declaration \D.
+static Optional<AttributeInsertion>
+createAttributeInsertion(const NamedDecl *D, const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ if (isa<ObjCPropertyDecl>(D))
+ return AttributeInsertion::createInsertionAfter(D);
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (MD->hasBody())
+ return None;
+ return AttributeInsertion::createInsertionAfter(D);
+ }
+ if (const auto *TD = dyn_cast<TagDecl>(D)) {
+ SourceLocation Loc =
+ Lexer::getLocForEndOfToken(TD->getInnerLocStart(), 0, SM, LangOpts);
+ if (Loc.isInvalid())
+ return None;
+ // Insert after the 'struct'/whatever keyword.
+ return AttributeInsertion::createInsertionAfter(Loc);
+ }
+ return AttributeInsertion::createInsertionBefore(D);
+}
+
+/// Actually emit an availability diagnostic for a reference to an unavailable
+/// decl.
+///
+/// \param Ctx The context that the reference occurred in
+/// \param ReferringDecl The exact declaration that was referenced.
+/// \param OffendingDecl A related decl to \c ReferringDecl that has an
+/// availability attribute corresponding to \c K attached to it. Note that this
+/// may not be the same as ReferringDecl, i.e. if an EnumDecl is annotated and
+/// we refer to a member EnumConstantDecl, ReferringDecl is the EnumConstantDecl
+/// and OffendingDecl is the EnumDecl.
+static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
+ Decl *Ctx, const NamedDecl *ReferringDecl,
+ const NamedDecl *OffendingDecl,
+ StringRef Message,
+ ArrayRef<SourceLocation> Locs,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ const ObjCPropertyDecl *ObjCProperty,
+ bool ObjCPropertyAccess) {
+ // Diagnostics for deprecated or unavailable.
+ unsigned diag, diag_message, diag_fwdclass_message;
+ unsigned diag_available_here = diag::note_availability_specified_here;
+ SourceLocation NoteLocation = OffendingDecl->getLocation();
+
+ // Matches 'diag::note_property_attribute' options.
+ unsigned property_note_select;
+
+ // Matches diag::note_availability_specified_here.
+ unsigned available_here_select_kind;
+
+ VersionTuple DeclVersion;
+ if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, OffendingDecl))
+ DeclVersion = AA->getIntroduced();
+
+ if (!ShouldDiagnoseAvailabilityInContext(S, K, DeclVersion, Ctx,
+ OffendingDecl))
+ return;
+
+ SourceLocation Loc = Locs.front();
+
+ // The declaration can have multiple availability attributes, we are looking
+ // at one of them.
+ const AvailabilityAttr *A = getAttrForPlatform(S.Context, OffendingDecl);
+ if (A && A->isInherited()) {
+ for (const Decl *Redecl = OffendingDecl->getMostRecentDecl(); Redecl;
+ Redecl = Redecl->getPreviousDecl()) {
+ const AvailabilityAttr *AForRedecl =
+ getAttrForPlatform(S.Context, Redecl);
+ if (AForRedecl && !AForRedecl->isInherited()) {
+ // If D is a declaration with inherited attributes, the note should
+ // point to the declaration with actual attributes.
+ NoteLocation = Redecl->getLocation();
+ break;
+ }
+ }
+ }
+
+ switch (K) {
+ case AR_NotYetIntroduced: {
+ // We would like to emit the diagnostic even if -Wunguarded-availability is
+ // not specified for deployment targets >= to iOS 11 or equivalent or
+ // for declarations that were introduced in iOS 11 (macOS 10.13, ...) or
+ // later.
+ const AvailabilityAttr *AA =
+ getAttrForPlatform(S.getASTContext(), OffendingDecl);
+ VersionTuple Introduced = AA->getIntroduced();
+
+ bool UseNewWarning = shouldDiagnoseAvailabilityByDefault(
+ S.Context, S.Context.getTargetInfo().getPlatformMinVersion(),
+ Introduced);
+ unsigned Warning = UseNewWarning ? diag::warn_unguarded_availability_new
+ : diag::warn_unguarded_availability;
+
+ std::string PlatformName(AvailabilityAttr::getPrettyPlatformName(
+ S.getASTContext().getTargetInfo().getPlatformName()));
+
+ S.Diag(Loc, Warning) << OffendingDecl << PlatformName
+ << Introduced.getAsString();
+
+ S.Diag(OffendingDecl->getLocation(),
+ diag::note_partial_availability_specified_here)
+ << OffendingDecl << PlatformName << Introduced.getAsString()
+ << S.Context.getTargetInfo().getPlatformMinVersion().getAsString();
+
+ if (const auto *Enclosing = findEnclosingDeclToAnnotate(Ctx)) {
+ if (const auto *TD = dyn_cast<TagDecl>(Enclosing))
+ if (TD->getDeclName().isEmpty()) {
+ S.Diag(TD->getLocation(),
+ diag::note_decl_unguarded_availability_silence)
+ << /*Anonymous*/ 1 << TD->getKindName();
+ return;
+ }
+ auto FixitNoteDiag =
+ S.Diag(Enclosing->getLocation(),
+ diag::note_decl_unguarded_availability_silence)
+ << /*Named*/ 0 << Enclosing;
+ // Don't offer a fixit for declarations with availability attributes.
+ if (Enclosing->hasAttr<AvailabilityAttr>())
+ return;
+ if (!S.getPreprocessor().isMacroDefined("API_AVAILABLE"))
+ return;
+ Optional<AttributeInsertion> Insertion = createAttributeInsertion(
+ Enclosing, S.getSourceManager(), S.getLangOpts());
+ if (!Insertion)
+ return;
+ std::string PlatformName =
+ AvailabilityAttr::getPlatformNameSourceSpelling(
+ S.getASTContext().getTargetInfo().getPlatformName())
+ .lower();
+ std::string Introduced =
+ OffendingDecl->getVersionIntroduced().getAsString();
+ FixitNoteDiag << FixItHint::CreateInsertion(
+ Insertion->Loc,
+ (llvm::Twine(Insertion->Prefix) + "API_AVAILABLE(" + PlatformName +
+ "(" + Introduced + "))" + Insertion->Suffix)
+ .str());
+ }
+ return;
+ }
+ case AR_Deprecated:
+ diag = !ObjCPropertyAccess ? diag::warn_deprecated
+ : diag::warn_property_method_deprecated;
+ diag_message = diag::warn_deprecated_message;
+ diag_fwdclass_message = diag::warn_deprecated_fwdclass_message;
+ property_note_select = /* deprecated */ 0;
+ available_here_select_kind = /* deprecated */ 2;
+ if (const auto *AL = OffendingDecl->getAttr<DeprecatedAttr>())
+ NoteLocation = AL->getLocation();
+ break;
+
+ case AR_Unavailable:
+ diag = !ObjCPropertyAccess ? diag::err_unavailable
+ : diag::err_property_method_unavailable;
+ diag_message = diag::err_unavailable_message;
+ diag_fwdclass_message = diag::warn_unavailable_fwdclass_message;
+ property_note_select = /* unavailable */ 1;
+ available_here_select_kind = /* unavailable */ 0;
+
+ if (auto AL = OffendingDecl->getAttr<UnavailableAttr>()) {
+ if (AL->isImplicit() && AL->getImplicitReason()) {
+ // Most of these failures are due to extra restrictions in ARC;
+ // reflect that in the primary diagnostic when applicable.
+ auto flagARCError = [&] {
+ if (S.getLangOpts().ObjCAutoRefCount &&
+ S.getSourceManager().isInSystemHeader(
+ OffendingDecl->getLocation()))
+ diag = diag::err_unavailable_in_arc;
+ };
+
+ switch (AL->getImplicitReason()) {
+ case UnavailableAttr::IR_None: break;
+
+ case UnavailableAttr::IR_ARCForbiddenType:
+ flagARCError();
+ diag_available_here = diag::note_arc_forbidden_type;
+ break;
+
+ case UnavailableAttr::IR_ForbiddenWeak:
+ if (S.getLangOpts().ObjCWeakRuntime)
+ diag_available_here = diag::note_arc_weak_disabled;
+ else
+ diag_available_here = diag::note_arc_weak_no_runtime;
+ break;
+
+ case UnavailableAttr::IR_ARCForbiddenConversion:
+ flagARCError();
+ diag_available_here = diag::note_performs_forbidden_arc_conversion;
+ break;
+
+ case UnavailableAttr::IR_ARCInitReturnsUnrelated:
+ flagARCError();
+ diag_available_here = diag::note_arc_init_returns_unrelated;
+ break;
+
+ case UnavailableAttr::IR_ARCFieldWithOwnership:
+ flagARCError();
+ diag_available_here = diag::note_arc_field_with_ownership;
+ break;
+ }
+ }
+ }
+ break;
+
+ case AR_Available:
+ llvm_unreachable("Warning for availability of available declaration?");
+ }
+
+ SmallVector<FixItHint, 12> FixIts;
+ if (K == AR_Deprecated) {
+ StringRef Replacement;
+ if (auto AL = OffendingDecl->getAttr<DeprecatedAttr>())
+ Replacement = AL->getReplacement();
+ if (auto AL = getAttrForPlatform(S.Context, OffendingDecl))
+ Replacement = AL->getReplacement();
+
+ CharSourceRange UseRange;
+ if (!Replacement.empty())
+ UseRange =
+ CharSourceRange::getCharRange(Loc, S.getLocForEndOfToken(Loc));
+ if (UseRange.isValid()) {
+ if (const auto *MethodDecl = dyn_cast<ObjCMethodDecl>(ReferringDecl)) {
+ Selector Sel = MethodDecl->getSelector();
+ SmallVector<StringRef, 12> SelectorSlotNames;
+ Optional<unsigned> NumParams = tryParseObjCMethodName(
+ Replacement, SelectorSlotNames, S.getLangOpts());
+ if (NumParams && NumParams.getValue() == Sel.getNumArgs()) {
+ assert(SelectorSlotNames.size() == Locs.size());
+ for (unsigned I = 0; I < Locs.size(); ++I) {
+ if (!Sel.getNameForSlot(I).empty()) {
+ CharSourceRange NameRange = CharSourceRange::getCharRange(
+ Locs[I], S.getLocForEndOfToken(Locs[I]));
+ FixIts.push_back(FixItHint::CreateReplacement(
+ NameRange, SelectorSlotNames[I]));
+ } else
+ FixIts.push_back(
+ FixItHint::CreateInsertion(Locs[I], SelectorSlotNames[I]));
+ }
+ } else
+ FixIts.push_back(FixItHint::CreateReplacement(UseRange, Replacement));
+ } else
+ FixIts.push_back(FixItHint::CreateReplacement(UseRange, Replacement));
+ }
+ }
+
+ if (!Message.empty()) {
+ S.Diag(Loc, diag_message) << ReferringDecl << Message << FixIts;
+ if (ObjCProperty)
+ S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
+ << ObjCProperty->getDeclName() << property_note_select;
+ } else if (!UnknownObjCClass) {
+ S.Diag(Loc, diag) << ReferringDecl << FixIts;
+ if (ObjCProperty)
+ S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
+ << ObjCProperty->getDeclName() << property_note_select;
+ } else {
+ S.Diag(Loc, diag_fwdclass_message) << ReferringDecl << FixIts;
+ S.Diag(UnknownObjCClass->getLocation(), diag::note_forward_class);
+ }
+
+ S.Diag(NoteLocation, diag_available_here)
+ << OffendingDecl << available_here_select_kind;
+}
+
+void Sema::handleDelayedAvailabilityCheck(DelayedDiagnostic &DD, Decl *Ctx) {
+ assert(DD.Kind == DelayedDiagnostic::Availability &&
+ "Expected an availability diagnostic here");
+
+ DD.Triggered = true;
+ DoEmitAvailabilityWarning(
+ *this, DD.getAvailabilityResult(), Ctx, DD.getAvailabilityReferringDecl(),
+ DD.getAvailabilityOffendingDecl(), DD.getAvailabilityMessage(),
+ DD.getAvailabilitySelectorLocs(), DD.getUnknownObjCClass(),
+ DD.getObjCProperty(), false);
+}
+
+static void EmitAvailabilityWarning(Sema &S, AvailabilityResult AR,
+ const NamedDecl *ReferringDecl,
+ const NamedDecl *OffendingDecl,
+ StringRef Message,
+ ArrayRef<SourceLocation> Locs,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ const ObjCPropertyDecl *ObjCProperty,
+ bool ObjCPropertyAccess) {
+ // Delay if we're currently parsing a declaration.
+ if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
+ S.DelayedDiagnostics.add(
+ DelayedDiagnostic::makeAvailability(
+ AR, Locs, ReferringDecl, OffendingDecl, UnknownObjCClass,
+ ObjCProperty, Message, ObjCPropertyAccess));
+ return;
+ }
+
+ Decl *Ctx = cast<Decl>(S.getCurLexicalContext());
+ DoEmitAvailabilityWarning(S, AR, Ctx, ReferringDecl, OffendingDecl,
+ Message, Locs, UnknownObjCClass, ObjCProperty,
+ ObjCPropertyAccess);
+}
+
+namespace {
+
+/// Returns true if the given statement can be a body-like child of \p Parent.
+bool isBodyLikeChildStmt(const Stmt *S, const Stmt *Parent) {
+ switch (Parent->getStmtClass()) {
+ case Stmt::IfStmtClass:
+ return cast<IfStmt>(Parent)->getThen() == S ||
+ cast<IfStmt>(Parent)->getElse() == S;
+ case Stmt::WhileStmtClass:
+ return cast<WhileStmt>(Parent)->getBody() == S;
+ case Stmt::DoStmtClass:
+ return cast<DoStmt>(Parent)->getBody() == S;
+ case Stmt::ForStmtClass:
+ return cast<ForStmt>(Parent)->getBody() == S;
+ case Stmt::CXXForRangeStmtClass:
+ return cast<CXXForRangeStmt>(Parent)->getBody() == S;
+ case Stmt::ObjCForCollectionStmtClass:
+ return cast<ObjCForCollectionStmt>(Parent)->getBody() == S;
+ case Stmt::CaseStmtClass:
+ case Stmt::DefaultStmtClass:
+ return cast<SwitchCase>(Parent)->getSubStmt() == S;
+ default:
+ return false;
+ }
+}
+
+class StmtUSEFinder : public RecursiveASTVisitor<StmtUSEFinder> {
+ const Stmt *Target;
+
+public:
+ bool VisitStmt(Stmt *S) { return S != Target; }
+
+ /// Returns true if the given statement is present in the given declaration.
+ static bool isContained(const Stmt *Target, const Decl *D) {
+ StmtUSEFinder Visitor;
+ Visitor.Target = Target;
+ return !Visitor.TraverseDecl(const_cast<Decl *>(D));
+ }
+};
+
+/// Traverses the AST and finds the last statement that used a given
+/// declaration.
+class LastDeclUSEFinder : public RecursiveASTVisitor<LastDeclUSEFinder> {
+ const Decl *D;
+
+public:
+ bool VisitDeclRefExpr(DeclRefExpr *DRE) {
+ if (DRE->getDecl() == D)
+ return false;
+ return true;
+ }
+
+ static const Stmt *findLastStmtThatUsesDecl(const Decl *D,
+ const CompoundStmt *Scope) {
+ LastDeclUSEFinder Visitor;
+ Visitor.D = D;
+ for (auto I = Scope->body_rbegin(), E = Scope->body_rend(); I != E; ++I) {
+ const Stmt *S = *I;
+ if (!Visitor.TraverseStmt(const_cast<Stmt *>(S)))
+ return S;
+ }
+ return nullptr;
+ }
+};
+
+/// This class implements -Wunguarded-availability.
+///
+/// This is done with a traversal of the AST of a function that makes reference
+/// to a partially available declaration. Whenever we encounter an \c if of the
+/// form: \c if(@available(...)), we use the version from the condition to visit
+/// the then statement.
+class DiagnoseUnguardedAvailability
+ : public RecursiveASTVisitor<DiagnoseUnguardedAvailability> {
+ typedef RecursiveASTVisitor<DiagnoseUnguardedAvailability> Base;
+
+ Sema &SemaRef;
+ Decl *Ctx;
+
+ /// Stack of potentially nested 'if (@available(...))'s.
+ SmallVector<VersionTuple, 8> AvailabilityStack;
+ SmallVector<const Stmt *, 16> StmtStack;
+
+ void DiagnoseDeclAvailability(NamedDecl *D, SourceRange Range,
+ ObjCInterfaceDecl *ClassReceiver = nullptr);
+
+public:
+ DiagnoseUnguardedAvailability(Sema &SemaRef, Decl *Ctx)
+ : SemaRef(SemaRef), Ctx(Ctx) {
+ AvailabilityStack.push_back(
+ SemaRef.Context.getTargetInfo().getPlatformMinVersion());
+ }
+
+ bool TraverseDecl(Decl *D) {
+ // Avoid visiting nested functions to prevent duplicate warnings.
+ if (!D || isa<FunctionDecl>(D))
+ return true;
+ return Base::TraverseDecl(D);
+ }
+
+ bool TraverseStmt(Stmt *S) {
+ if (!S)
+ return true;
+ StmtStack.push_back(S);
+ bool Result = Base::TraverseStmt(S);
+ StmtStack.pop_back();
+ return Result;
+ }
+
+ void IssueDiagnostics(Stmt *S) { TraverseStmt(S); }
+
+ bool TraverseIfStmt(IfStmt *If);
+
+ bool TraverseLambdaExpr(LambdaExpr *E) { return true; }
+
+ // for 'case X:' statements, don't bother looking at the 'X'; it can't lead
+ // to any useful diagnostics.
+ bool TraverseCaseStmt(CaseStmt *CS) { return TraverseStmt(CS->getSubStmt()); }
+
+ bool VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *PRE) {
+ if (PRE->isClassReceiver())
+ DiagnoseDeclAvailability(PRE->getClassReceiver(), PRE->getReceiverLocation());
+ return true;
+ }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *Msg) {
+ if (ObjCMethodDecl *D = Msg->getMethodDecl()) {
+ ObjCInterfaceDecl *ID = nullptr;
+ QualType ReceiverTy = Msg->getClassReceiver();
+ if (!ReceiverTy.isNull() && ReceiverTy->getAsObjCInterfaceType())
+ ID = ReceiverTy->getAsObjCInterfaceType()->getInterface();
+
+ DiagnoseDeclAvailability(
+ D, SourceRange(Msg->getSelectorStartLoc(), Msg->getEndLoc()), ID);
+ }
+ return true;
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *DRE) {
+ DiagnoseDeclAvailability(DRE->getDecl(),
+ SourceRange(DRE->getBeginLoc(), DRE->getEndLoc()));
+ return true;
+ }
+
+ bool VisitMemberExpr(MemberExpr *ME) {
+ DiagnoseDeclAvailability(ME->getMemberDecl(),
+ SourceRange(ME->getBeginLoc(), ME->getEndLoc()));
+ return true;
+ }
+
+ bool VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
+ SemaRef.Diag(E->getBeginLoc(), diag::warn_at_available_unchecked_use)
+ << (!SemaRef.getLangOpts().ObjC);
+ return true;
+ }
+
+ bool VisitTypeLoc(TypeLoc Ty);
+};
+
+void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
+ NamedDecl *D, SourceRange Range, ObjCInterfaceDecl *ReceiverClass) {
+ AvailabilityResult Result;
+ const NamedDecl *OffendingDecl;
+ std::tie(Result, OffendingDecl) =
+ ShouldDiagnoseAvailabilityOfDecl(SemaRef, D, nullptr, ReceiverClass);
+ if (Result != AR_Available) {
+ // All other diagnostic kinds have already been handled in
+ // DiagnoseAvailabilityOfDecl.
+ if (Result != AR_NotYetIntroduced)
+ return;
+
+ const AvailabilityAttr *AA =
+ getAttrForPlatform(SemaRef.getASTContext(), OffendingDecl);
+ VersionTuple Introduced = AA->getIntroduced();
+
+ if (AvailabilityStack.back() >= Introduced)
+ return;
+
+ // If the context of this function is less available than D, we should not
+ // emit a diagnostic.
+ if (!ShouldDiagnoseAvailabilityInContext(SemaRef, Result, Introduced, Ctx,
+ OffendingDecl))
+ return;
+
+ // We would like to emit the diagnostic even if -Wunguarded-availability is
+ // not specified for deployment targets >= to iOS 11 or equivalent or
+ // for declarations that were introduced in iOS 11 (macOS 10.13, ...) or
+ // later.
+ unsigned DiagKind =
+ shouldDiagnoseAvailabilityByDefault(
+ SemaRef.Context,
+ SemaRef.Context.getTargetInfo().getPlatformMinVersion(), Introduced)
+ ? diag::warn_unguarded_availability_new
+ : diag::warn_unguarded_availability;
+
+ std::string PlatformName(AvailabilityAttr::getPrettyPlatformName(
+ SemaRef.getASTContext().getTargetInfo().getPlatformName()));
+
+ SemaRef.Diag(Range.getBegin(), DiagKind)
+ << Range << D << PlatformName << Introduced.getAsString();
+
+ SemaRef.Diag(OffendingDecl->getLocation(),
+ diag::note_partial_availability_specified_here)
+ << OffendingDecl << PlatformName << Introduced.getAsString()
+ << SemaRef.Context.getTargetInfo()
+ .getPlatformMinVersion()
+ .getAsString();
+
+ auto FixitDiag =
+ SemaRef.Diag(Range.getBegin(), diag::note_unguarded_available_silence)
+ << Range << D
+ << (SemaRef.getLangOpts().ObjC ? /*@available*/ 0
+ : /*__builtin_available*/ 1);
+
+ // Find the statement which should be enclosed in the if @available check.
+ if (StmtStack.empty())
+ return;
+ const Stmt *StmtOfUse = StmtStack.back();
+ const CompoundStmt *Scope = nullptr;
+ for (const Stmt *S : llvm::reverse(StmtStack)) {
+ if (const auto *CS = dyn_cast<CompoundStmt>(S)) {
+ Scope = CS;
+ break;
+ }
+ if (isBodyLikeChildStmt(StmtOfUse, S)) {
+ // The declaration won't be seen outside of the statement, so we don't
+ // have to wrap the uses of any declared variables in if (@available).
+ // Therefore we can avoid setting Scope here.
+ break;
+ }
+ StmtOfUse = S;
+ }
+ const Stmt *LastStmtOfUse = nullptr;
+ if (isa<DeclStmt>(StmtOfUse) && Scope) {
+ for (const Decl *D : cast<DeclStmt>(StmtOfUse)->decls()) {
+ if (StmtUSEFinder::isContained(StmtStack.back(), D)) {
+ LastStmtOfUse = LastDeclUSEFinder::findLastStmtThatUsesDecl(D, Scope);
+ break;
+ }
+ }
+ }
+
+ const SourceManager &SM = SemaRef.getSourceManager();
+ SourceLocation IfInsertionLoc =
+ SM.getExpansionLoc(StmtOfUse->getBeginLoc());
+ SourceLocation StmtEndLoc =
+ SM.getExpansionRange(
+ (LastStmtOfUse ? LastStmtOfUse : StmtOfUse)->getEndLoc())
+ .getEnd();
+ if (SM.getFileID(IfInsertionLoc) != SM.getFileID(StmtEndLoc))
+ return;
+
+ StringRef Indentation = Lexer::getIndentationForLine(IfInsertionLoc, SM);
+ const char *ExtraIndentation = " ";
+ std::string FixItString;
+ llvm::raw_string_ostream FixItOS(FixItString);
+ FixItOS << "if (" << (SemaRef.getLangOpts().ObjC ? "@available"
+ : "__builtin_available")
+ << "("
+ << AvailabilityAttr::getPlatformNameSourceSpelling(
+ SemaRef.getASTContext().getTargetInfo().getPlatformName())
+ << " " << Introduced.getAsString() << ", *)) {\n"
+ << Indentation << ExtraIndentation;
+ FixitDiag << FixItHint::CreateInsertion(IfInsertionLoc, FixItOS.str());
+ SourceLocation ElseInsertionLoc = Lexer::findLocationAfterToken(
+ StmtEndLoc, tok::semi, SM, SemaRef.getLangOpts(),
+ /*SkipTrailingWhitespaceAndNewLine=*/false);
+ if (ElseInsertionLoc.isInvalid())
+ ElseInsertionLoc =
+ Lexer::getLocForEndOfToken(StmtEndLoc, 0, SM, SemaRef.getLangOpts());
+ FixItOS.str().clear();
+ FixItOS << "\n"
+ << Indentation << "} else {\n"
+ << Indentation << ExtraIndentation
+ << "// Fallback on earlier versions\n"
+ << Indentation << "}";
+ FixitDiag << FixItHint::CreateInsertion(ElseInsertionLoc, FixItOS.str());
+ }
+}
+
+bool DiagnoseUnguardedAvailability::VisitTypeLoc(TypeLoc Ty) {
+ const Type *TyPtr = Ty.getTypePtr();
+ SourceRange Range{Ty.getBeginLoc(), Ty.getEndLoc()};
+
+ if (Range.isInvalid())
+ return true;
+
+ if (const auto *TT = dyn_cast<TagType>(TyPtr)) {
+ TagDecl *TD = TT->getDecl();
+ DiagnoseDeclAvailability(TD, Range);
+
+ } else if (const auto *TD = dyn_cast<TypedefType>(TyPtr)) {
+ TypedefNameDecl *D = TD->getDecl();
+ DiagnoseDeclAvailability(D, Range);
+
+ } else if (const auto *ObjCO = dyn_cast<ObjCObjectType>(TyPtr)) {
+ if (NamedDecl *D = ObjCO->getInterface())
+ DiagnoseDeclAvailability(D, Range);
+ }
+
+ return true;
+}
+
+bool DiagnoseUnguardedAvailability::TraverseIfStmt(IfStmt *If) {
+ VersionTuple CondVersion;
+ if (auto *E = dyn_cast<ObjCAvailabilityCheckExpr>(If->getCond())) {
+ CondVersion = E->getVersion();
+
+ // If we're using the '*' case here or if this check is redundant, then we
+ // use the enclosing version to check both branches.
+ if (CondVersion.empty() || CondVersion <= AvailabilityStack.back())
+ return TraverseStmt(If->getThen()) && TraverseStmt(If->getElse());
+ } else {
+ // This isn't an availability checking 'if', we can just continue.
+ return Base::TraverseIfStmt(If);
+ }
+
+ AvailabilityStack.push_back(CondVersion);
+ bool ShouldContinue = TraverseStmt(If->getThen());
+ AvailabilityStack.pop_back();
+
+ return ShouldContinue && TraverseStmt(If->getElse());
+}
+
+} // end anonymous namespace
+
+void Sema::DiagnoseUnguardedAvailabilityViolations(Decl *D) {
+ Stmt *Body = nullptr;
+
+ if (auto *FD = D->getAsFunction()) {
+ // FIXME: We only examine the pattern decl for availability violations now,
+ // but we should also examine instantiated templates.
+ if (FD->isTemplateInstantiation())
+ return;
+
+ Body = FD->getBody();
+ } else if (auto *MD = dyn_cast<ObjCMethodDecl>(D))
+ Body = MD->getBody();
+ else if (auto *BD = dyn_cast<BlockDecl>(D))
+ Body = BD->getBody();
+
+ assert(Body && "Need a body here!");
+
+ DiagnoseUnguardedAvailability(*this, D).IssueDiagnostics(Body);
+}
+
+void Sema::DiagnoseAvailabilityOfDecl(NamedDecl *D,
+ ArrayRef<SourceLocation> Locs,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ bool ObjCPropertyAccess,
+ bool AvoidPartialAvailabilityChecks,
+ ObjCInterfaceDecl *ClassReceiver) {
+ std::string Message;
+ AvailabilityResult Result;
+ const NamedDecl* OffendingDecl;
+ // See if this declaration is unavailable, deprecated, or partial.
+ std::tie(Result, OffendingDecl) =
+ ShouldDiagnoseAvailabilityOfDecl(*this, D, &Message, ClassReceiver);
+ if (Result == AR_Available)
+ return;
+
+ if (Result == AR_NotYetIntroduced) {
+ if (AvoidPartialAvailabilityChecks)
+ return;
+
+ // We need to know the @available context in the current function to
+ // diagnose this use, let DiagnoseUnguardedAvailabilityViolations do that
+ // when we're done parsing the current function.
+ if (getCurFunctionOrMethodDecl()) {
+ getEnclosingFunction()->HasPotentialAvailabilityViolations = true;
+ return;
+ } else if (getCurBlock() || getCurLambda()) {
+ getCurFunction()->HasPotentialAvailabilityViolations = true;
+ return;
+ }
+ }
+
+ const ObjCPropertyDecl *ObjCPDecl = nullptr;
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (const ObjCPropertyDecl *PD = MD->findPropertyDecl()) {
+ AvailabilityResult PDeclResult = PD->getAvailability(nullptr);
+ if (PDeclResult == Result)
+ ObjCPDecl = PD;
+ }
+ }
+
+ EmitAvailabilityWarning(*this, Result, D, OffendingDecl, Message, Locs,
+ UnknownObjCClass, ObjCPDecl, ObjCPropertyAccess);
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp
index 0c61057e1072..283a04683a32 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp
@@ -14,8 +14,10 @@
#include "clang/AST/Decl.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Basic/Cuda.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Sema/SemaInternal.h"
@@ -210,6 +212,20 @@ Sema::IdentifyCUDAPreference(const FunctionDecl *Caller,
llvm_unreachable("All cases should've been handled by now.");
}
+template <typename AttrT> static bool hasImplicitAttr(const FunctionDecl *D) {
+ if (!D)
+ return false;
+ if (auto *A = D->getAttr<AttrT>())
+ return A->isImplicit();
+ return D->isImplicit();
+}
+
+bool Sema::isCUDAImplicitHostDeviceFunction(const FunctionDecl *D) {
+ bool IsImplicitDevAttr = hasImplicitAttr<CUDADeviceAttr>(D);
+ bool IsImplicitHostAttr = hasImplicitAttr<CUDAHostAttr>(D);
+ return IsImplicitDevAttr && IsImplicitHostAttr;
+}
+
void Sema::EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches) {
@@ -425,6 +441,10 @@ bool Sema::isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD) {
if (CD->getParent()->isDynamicClass())
return false;
+ // Union ctor does not call ctors of its data members.
+ if (CD->getParent()->isUnion())
+ return true;
+
// The only form of initializer allowed is an empty constructor.
// This will recursively check all base classes and member initializers
if (!llvm::all_of(CD->inits(), [&](const CXXCtorInitializer *CI) {
@@ -464,6 +484,11 @@ bool Sema::isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *DD) {
if (ClassDecl->isDynamicClass())
return false;
+ // Union does not have base class and union dtor does not call dtors of its
+ // data members.
+ if (DD->getParent()->isUnion())
+ return true;
+
// Only empty destructors are allowed. This will recursively check
// destructors for all base classes...
if (!llvm::all_of(ClassDecl->bases(), [&](const CXXBaseSpecifier &BS) {
@@ -503,9 +528,14 @@ void Sema::checkAllowedCUDAInitializer(VarDecl *VD) {
// constructor according to CUDA rules. This deviates from NVCC,
// but allows us to handle things like constexpr constructors.
if (!AllowedInit &&
- (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>()))
- AllowedInit = VD->getInit()->isConstantInitializer(
- Context, VD->getType()->isReferenceType());
+ (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>())) {
+ auto *Init = VD->getInit();
+ AllowedInit =
+ ((VD->getType()->isDependentType() || Init->isValueDependent()) &&
+ VD->isConstexpr()) ||
+ Init->isConstantInitializer(Context,
+ VD->getType()->isReferenceType());
+ }
// Also make sure that destructor, if there is one, is empty.
if (AllowedInit)
@@ -602,6 +632,13 @@ void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD,
NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
}
+void Sema::MaybeAddCUDAConstantAttr(VarDecl *VD) {
+ if (getLangOpts().CUDAIsDevice && VD->isConstexpr() &&
+ (VD->isFileVarDecl() || VD->isStaticDataMember())) {
+ VD->addAttr(CUDAConstantAttr::CreateImplicit(getASTContext()));
+ }
+}
+
Sema::DeviceDiagBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
@@ -674,25 +711,6 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
// Otherwise, mark the call in our call graph so we can traverse it later.
bool CallerKnownEmitted =
getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted;
- if (CallerKnownEmitted) {
- // Host-side references to a __global__ function refer to the stub, so the
- // function itself is never emitted and therefore should not be marked.
- if (!shouldIgnoreInHostDeviceCheck(Callee))
- markKnownEmitted(
- *this, Caller, Callee, Loc, [](Sema &S, FunctionDecl *FD) {
- return S.getEmissionStatus(FD) == FunctionEmissionStatus::Emitted;
- });
- } else {
- // If we have
- // host fn calls kernel fn calls host+device,
- // the HD function does not get instantiated on the host. We model this by
- // omitting at the call to the kernel from the callgraph. This ensures
- // that, when compiling for host, only HD functions actually called from the
- // host get marked as known-emitted.
- if (!shouldIgnoreInHostDeviceCheck(Callee))
- DeviceCallGraph[Caller].insert({Callee, Loc});
- }
-
DeviceDiagBuilder::Kind DiagKind = [this, Caller, Callee,
CallerKnownEmitted] {
switch (IdentifyCUDAPreference(Caller, Callee)) {
@@ -729,20 +747,58 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
DiagKind != DeviceDiagBuilder::K_ImmediateWithCallStack;
}
+// Check the wrong-sided reference capture of lambda for CUDA/HIP.
+// A lambda function may capture a stack variable by reference when it is
+// defined and uses the capture by reference when the lambda is called. When
+// the capture and use happen on different sides, the capture is invalid and
+// should be diagnosed.
+void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee,
+ const sema::Capture &Capture) {
+ // In host compilation we only need to check lambda functions emitted on host
+ // side. In such lambda functions, a reference capture is invalid only
+ // if the lambda structure is populated by a device function or kernel then
+ // is passed to and called by a host function. However that is impossible,
+ // since a device function or kernel can only call a device function, also a
+ // kernel cannot pass a lambda back to a host function since we cannot
+ // define a kernel argument type which can hold the lambda before the lambda
+ // itself is defined.
+ if (!LangOpts.CUDAIsDevice)
+ return;
+
+ // File-scope lambda can only do init captures for global variables, which
+ // results in passing by value for these global variables.
+ FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
+ if (!Caller)
+ return;
+
+ // In device compilation, we only need to check lambda functions which are
+ // emitted on device side. For such lambdas, a reference capture is invalid
+ // only if the lambda structure is populated by a host function then passed
+ // to and called in a device function or kernel.
+ bool CalleeIsDevice = Callee->hasAttr<CUDADeviceAttr>();
+ bool CallerIsHost =
+ !Caller->hasAttr<CUDAGlobalAttr>() && !Caller->hasAttr<CUDADeviceAttr>();
+ bool ShouldCheck = CalleeIsDevice && CallerIsHost;
+ if (!ShouldCheck || !Capture.isReferenceCapture())
+ return;
+ auto DiagKind = DeviceDiagBuilder::K_Deferred;
+ if (Capture.isVariableCapture()) {
+ DeviceDiagBuilder(DiagKind, Capture.getLocation(),
+ diag::err_capture_bad_target, Callee, *this)
+ << Capture.getVariable();
+ } else if (Capture.isThisCapture()) {
+ DeviceDiagBuilder(DiagKind, Capture.getLocation(),
+ diag::err_capture_bad_target_this_ptr, Callee, *this);
+ }
+ return;
+}
+
void Sema::CUDASetLambdaAttrs(CXXMethodDecl *Method) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
if (Method->hasAttr<CUDAHostAttr>() || Method->hasAttr<CUDADeviceAttr>())
return;
- FunctionDecl *CurFn = dyn_cast<FunctionDecl>(CurContext);
- if (!CurFn)
- return;
- CUDAFunctionTarget Target = IdentifyCUDATarget(CurFn);
- if (Target == CFT_Global || Target == CFT_Device) {
- Method->addAttr(CUDADeviceAttr::CreateImplicit(Context));
- } else if (Target == CFT_HostDevice) {
- Method->addAttr(CUDADeviceAttr::CreateImplicit(Context));
- Method->addAttr(CUDAHostAttr::CreateImplicit(Context));
- }
+ Method->addAttr(CUDADeviceAttr::CreateImplicit(Context));
+ Method->addAttr(CUDAHostAttr::CreateImplicit(Context));
}
void Sema::checkCUDATargetOverload(FunctionDecl *NewFD,
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp
index 7a8cbca1e3f1..2efe26052c78 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp
@@ -48,7 +48,8 @@ enum CastType {
CT_Reinterpret, ///< reinterpret_cast
CT_Dynamic, ///< dynamic_cast
CT_CStyle, ///< (Type)expr
- CT_Functional ///< Type(expr)
+ CT_Functional, ///< Type(expr)
+ CT_Addrspace ///< addrspace_cast
};
namespace {
@@ -88,6 +89,7 @@ namespace {
void CheckCXXCStyleCast(bool FunctionalCast, bool ListInitialization);
void CheckCStyleCast();
void CheckBuiltinBitCast();
+ void CheckAddrspaceCast();
void updatePartOfExplicitCastFlags(CastExpr *CE) {
// Walk down from the CE to the OrigSrcExpr, and mark all immediate
@@ -159,6 +161,30 @@ namespace {
PlaceholderKind = (BuiltinType::Kind) 0;
}
};
+
+ void CheckNoDeref(Sema &S, const QualType FromType, const QualType ToType,
+ SourceLocation OpLoc) {
+ if (const auto *PtrType = dyn_cast<PointerType>(FromType)) {
+ if (PtrType->getPointeeType()->hasAttr(attr::NoDeref)) {
+ if (const auto *DestType = dyn_cast<PointerType>(ToType)) {
+ if (!DestType->getPointeeType()->hasAttr(attr::NoDeref)) {
+ S.Diag(OpLoc, diag::warn_noderef_to_dereferenceable_pointer);
+ }
+ }
+ }
+ }
+ }
+
+ struct CheckNoDerefRAII {
+ CheckNoDerefRAII(CastOperation &Op) : Op(Op) {}
+ ~CheckNoDerefRAII() {
+ if (!Op.SrcExpr.isInvalid())
+ CheckNoDeref(Op.Self, Op.SrcExpr.get()->getType(), Op.ResultType,
+ Op.OpRange.getBegin());
+ }
+
+ CastOperation &Op;
+ };
}
static void DiagnoseCastQual(Sema &Self, const ExprResult &SrcExpr,
@@ -225,12 +251,14 @@ static TryCastResult TryConstCast(Sema &Self, ExprResult &SrcExpr,
unsigned &msg);
static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
- SourceRange OpRange,
- unsigned &msg,
+ SourceRange OpRange, unsigned &msg,
CastKind &Kind);
+static TryCastResult TryAddressSpaceCast(Sema &Self, ExprResult &SrcExpr,
+ QualType DestType, bool CStyle,
+ unsigned &msg, CastKind &Kind);
-
-/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
+/// ActOnCXXNamedCast - Parse
+/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult
Sema::ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
SourceLocation LAngleBracketLoc, Declarator &D,
@@ -272,6 +300,16 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
switch (Kind) {
default: llvm_unreachable("Unknown C++ cast!");
+ case tok::kw_addrspace_cast:
+ if (!TypeDependent) {
+ Op.CheckAddrspaceCast();
+ if (Op.SrcExpr.isInvalid())
+ return ExprError();
+ }
+ return Op.complete(CXXAddrspaceCastExpr::Create(
+ Context, Op.ResultType, Op.ValueKind, Op.Kind, Op.SrcExpr.get(),
+ DestTInfo, OpLoc, Parens.getEnd(), AngleBrackets));
+
case tok::kw_const_cast:
if (!TypeDependent) {
Op.CheckConstCast();
@@ -375,6 +413,7 @@ static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
case CT_Const:
case CT_Reinterpret:
case CT_Dynamic:
+ case CT_Addrspace:
return false;
// These do.
@@ -708,6 +747,8 @@ static TryCastResult getCastAwayConstnessCastKind(CastAwayConstnessKind CACK,
/// Refer to C++ 5.2.7 for details. Dynamic casts are used mostly for runtime-
/// checked downcasts in class hierarchies.
void CastOperation::CheckDynamicCast() {
+ CheckNoDerefRAII NoderefCheck(*this);
+
if (ValueKind == VK_RValue)
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
else if (isPlaceholder())
@@ -861,6 +902,8 @@ void CastOperation::CheckDynamicCast() {
/// const char *str = "literal";
/// legacy_function(const_cast\<char*\>(str));
void CastOperation::CheckConstCast() {
+ CheckNoDerefRAII NoderefCheck(*this);
+
if (ValueKind == VK_RValue)
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
else if (isPlaceholder())
@@ -878,6 +921,18 @@ void CastOperation::CheckConstCast() {
SrcExpr = ExprError();
}
+void CastOperation::CheckAddrspaceCast() {
+ unsigned msg = diag::err_bad_cxx_cast_generic;
+ auto TCR =
+ TryAddressSpaceCast(Self, SrcExpr, DestType, /*CStyle*/ false, msg, Kind);
+ if (TCR != TC_Success && msg != 0) {
+ Self.Diag(OpRange.getBegin(), msg)
+ << CT_Addrspace << SrcExpr.get()->getType() << DestType << OpRange;
+ }
+ if (!isValidCast(TCR))
+ SrcExpr = ExprError();
+}
+
/// Check that a reinterpret_cast\<DestType\>(SrcExpr) is not used as upcast
/// or downcast between respective pointers or references.
static void DiagnoseReinterpretUpDownCast(Sema &Self, const Expr *SrcExpr,
@@ -1018,6 +1073,8 @@ void CastOperation::CheckReinterpretCast() {
/// Refer to C++ 5.2.9 for details. Static casts are mostly used for making
/// implicit conversions explicit and getting rid of data loss warnings.
void CastOperation::CheckStaticCast() {
+ CheckNoDerefRAII NoderefCheck(*this);
+
if (isPlaceholder()) {
checkNonOverloadPlaceholders();
if (SrcExpr.isInvalid())
@@ -1961,7 +2018,7 @@ static void DiagnoseCallingConvCast(Sema &Self, const ExprResult &SrcExpr,
<< FD << DstCCName << FixItHint::CreateInsertion(NameLoc, CCAttrText);
}
-static void checkIntToPointerCast(bool CStyle, SourceLocation Loc,
+static void checkIntToPointerCast(bool CStyle, const SourceRange &OpRange,
const Expr *SrcExpr, QualType DestType,
Sema &Self) {
QualType SrcType = SrcExpr->getType();
@@ -1983,7 +2040,7 @@ static void checkIntToPointerCast(bool CStyle, SourceLocation Loc,
unsigned Diag = DestType->isVoidPointerType() ?
diag::warn_int_to_void_pointer_cast
: diag::warn_int_to_pointer_cast;
- Self.Diag(Loc, Diag) << SrcType << DestType;
+ Self.Diag(OpRange.getBegin(), Diag) << SrcType << DestType << OpRange;
}
}
@@ -2062,6 +2119,9 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
return TC_NotApplicable;
// FIXME: Use a specific diagnostic for the rest of these cases.
case OK_VectorComponent: inappropriate = "vector element"; break;
+ case OK_MatrixComponent:
+ inappropriate = "matrix element";
+ break;
case OK_ObjCProperty: inappropriate = "property expression"; break;
case OK_ObjCSubscript: inappropriate = "container subscripting expression";
break;
@@ -2204,13 +2264,19 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
// C++ 5.2.10p4: A pointer can be explicitly converted to any integral
// type large enough to hold it; except in Microsoft mode, where the
// integral type size doesn't matter (except we don't allow bool).
- bool MicrosoftException = Self.getLangOpts().MicrosoftExt &&
- !DestType->isBooleanType();
if ((Self.Context.getTypeSize(SrcType) >
- Self.Context.getTypeSize(DestType)) &&
- !MicrosoftException) {
- msg = diag::err_bad_reinterpret_cast_small_int;
- return TC_Failed;
+ Self.Context.getTypeSize(DestType))) {
+ bool MicrosoftException =
+ Self.getLangOpts().MicrosoftExt && !DestType->isBooleanType();
+ if (MicrosoftException) {
+ unsigned Diag = SrcType->isVoidPointerType()
+ ? diag::warn_void_pointer_to_int_cast
+ : diag::warn_pointer_to_int_cast;
+ Self.Diag(OpRange.getBegin(), Diag) << SrcType << DestType << OpRange;
+ } else {
+ msg = diag::err_bad_reinterpret_cast_small_int;
+ return TC_Failed;
+ }
}
Kind = CK_PointerToIntegral;
return TC_Success;
@@ -2218,8 +2284,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
if (SrcType->isIntegralOrEnumerationType()) {
assert(destIsPtr && "One type must be a pointer");
- checkIntToPointerCast(CStyle, OpRange.getBegin(), SrcExpr.get(), DestType,
- Self);
+ checkIntToPointerCast(CStyle, OpRange, SrcExpr.get(), DestType, Self);
// C++ 5.2.10p5: A value of integral or enumeration type can be explicitly
// converted to a pointer.
// C++ 5.2.10p9: [Note: ...a null pointer constant of integral type is not
@@ -2339,7 +2404,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
static TryCastResult TryAddressSpaceCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
- unsigned &msg) {
+ unsigned &msg, CastKind &Kind) {
if (!Self.getLangOpts().OpenCL)
// FIXME: As compiler doesn't have any information about overlapping addr
// spaces at the moment we have to be permissive here.
@@ -2348,6 +2413,9 @@ static TryCastResult TryAddressSpaceCast(Sema &Self, ExprResult &SrcExpr,
// non-OpenCL mode too, we fast-path above because no other languages
// define overlapping address spaces currently.
auto SrcType = SrcExpr.get()->getType();
+ // FIXME: Should this be generalized to references? The reference parameter
+ // however becomes a reference pointee type here and therefore rejected.
+ // Perhaps this is the right behavior though according to C++.
auto SrcPtrType = SrcType->getAs<PointerType>();
if (!SrcPtrType)
return TC_NotApplicable;
@@ -2356,9 +2424,7 @@ static TryCastResult TryAddressSpaceCast(Sema &Self, ExprResult &SrcExpr,
return TC_NotApplicable;
auto SrcPointeeType = SrcPtrType->getPointeeType();
auto DestPointeeType = DestPtrType->getPointeeType();
- if (SrcPointeeType.getAddressSpace() == DestPointeeType.getAddressSpace())
- return TC_NotApplicable;
- if (!DestPtrType->isAddressSpaceOverlapping(*SrcPtrType)) {
+ if (!DestPointeeType.isAddressSpaceOverlapping(SrcPointeeType)) {
msg = diag::err_bad_cxx_cast_addr_space_mismatch;
return TC_Failed;
}
@@ -2366,10 +2432,15 @@ static TryCastResult TryAddressSpaceCast(Sema &Self, ExprResult &SrcExpr,
Self.Context.removeAddrSpaceQualType(SrcPointeeType.getCanonicalType());
auto DestPointeeTypeWithoutAS =
Self.Context.removeAddrSpaceQualType(DestPointeeType.getCanonicalType());
- return Self.Context.hasSameType(SrcPointeeTypeWithoutAS,
- DestPointeeTypeWithoutAS)
- ? TC_Success
- : TC_NotApplicable;
+ if (Self.Context.hasSameType(SrcPointeeTypeWithoutAS,
+ DestPointeeTypeWithoutAS)) {
+ Kind = SrcPointeeType.getAddressSpace() == DestPointeeType.getAddressSpace()
+ ? CK_NoOp
+ : CK_AddressSpaceConversion;
+ return TC_Success;
+ } else {
+ return TC_NotApplicable;
+ }
}
void CastOperation::checkAddressSpaceCast(QualType SrcType, QualType DestType) {
@@ -2396,9 +2467,9 @@ void CastOperation::checkAddressSpaceCast(QualType SrcType, QualType DestType) {
const PointerType *SrcPPtr = cast<PointerType>(SrcPtr);
QualType DestPPointee = DestPPtr->getPointeeType();
QualType SrcPPointee = SrcPPtr->getPointeeType();
- if (Nested ? DestPPointee.getAddressSpace() !=
- SrcPPointee.getAddressSpace()
- : !DestPPtr->isAddressSpaceOverlapping(*SrcPPtr)) {
+ if (Nested
+ ? DestPPointee.getAddressSpace() != SrcPPointee.getAddressSpace()
+ : !DestPPointee.isAddressSpaceOverlapping(SrcPPointee)) {
Self.Diag(OpRange.getBegin(), DiagID)
<< SrcType << DestType << Sema::AA_Casting
<< SrcExpr.get()->getSourceRange();
@@ -2500,22 +2571,21 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
Sema::CheckedConversionKind CCK =
FunctionalStyle ? Sema::CCK_FunctionalCast : Sema::CCK_CStyleCast;
if (tcr == TC_NotApplicable) {
- tcr = TryAddressSpaceCast(Self, SrcExpr, DestType, /*CStyle*/ true, msg);
+ tcr = TryAddressSpaceCast(Self, SrcExpr, DestType, /*CStyle*/ true, msg,
+ Kind);
if (SrcExpr.isInvalid())
return;
- if (isValidCast(tcr))
- Kind = CK_AddressSpaceConversion;
-
if (tcr == TC_NotApplicable) {
- // ... or if that is not possible, a static_cast, ignoring const, ...
+ // ... or if that is not possible, a static_cast, ignoring const and
+ // addr space, ...
tcr = TryStaticCast(Self, SrcExpr, DestType, CCK, OpRange, msg, Kind,
BasePath, ListInitialization);
if (SrcExpr.isInvalid())
return;
if (tcr == TC_NotApplicable) {
- // ... and finally a reinterpret_cast, ignoring const.
+ // ... and finally a reinterpret_cast, ignoring const and addr space.
tcr = TryReinterpretCast(Self, SrcExpr, DestType, /*CStyle*/ true,
OpRange, msg, Kind);
if (SrcExpr.isInvalid())
@@ -2647,6 +2717,13 @@ void CastOperation::CheckCStyleCast() {
return;
}
+ // Allow casting a sizeless built-in type to itself.
+ if (DestType->isSizelessBuiltinType() &&
+ Self.Context.hasSameUnqualifiedType(DestType, SrcType)) {
+ Kind = CK_NoOp;
+ return;
+ }
+
if (!DestType->isScalarType() && !DestType->isVectorType()) {
const RecordType *DestRecordTy = DestType->getAs<RecordType>();
@@ -2742,6 +2819,20 @@ void CastOperation::CheckCStyleCast() {
return;
}
+ // Can't cast to or from bfloat
+ if (DestType->isBFloat16Type() && !SrcType->isBFloat16Type()) {
+ Self.Diag(SrcExpr.get()->getExprLoc(), diag::err_cast_to_bfloat16)
+ << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+ if (SrcType->isBFloat16Type() && !DestType->isBFloat16Type()) {
+ Self.Diag(SrcExpr.get()->getExprLoc(), diag::err_cast_from_bfloat16)
+ << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+
// If either type is a pointer, the other type has to be either an
// integer or a pointer.
if (!DestType->isArithmeticType()) {
@@ -2752,8 +2843,8 @@ void CastOperation::CheckCStyleCast() {
SrcExpr = ExprError();
return;
}
- checkIntToPointerCast(/* CStyle */ true, OpRange.getBegin(), SrcExpr.get(),
- DestType, Self);
+ checkIntToPointerCast(/* CStyle */ true, OpRange, SrcExpr.get(), DestType,
+ Self);
} else if (!SrcType->isArithmeticType()) {
if (!DestType->isIntegralType(Self.Context) &&
DestType->isArithmeticType()) {
@@ -2763,6 +2854,25 @@ void CastOperation::CheckCStyleCast() {
SrcExpr = ExprError();
return;
}
+
+ if ((Self.Context.getTypeSize(SrcType) >
+ Self.Context.getTypeSize(DestType)) &&
+ !DestType->isBooleanType()) {
+ // C 6.3.2.3p6: Any pointer type may be converted to an integer type.
+ // Except as previously specified, the result is implementation-defined.
+ // If the result cannot be represented in the integer type, the behavior
+ // is undefined. The result need not be in the range of values of any
+ // integer type.
+ unsigned Diag;
+ if (SrcType->isVoidPointerType())
+ Diag = DestType->isEnumeralType() ? diag::warn_void_pointer_to_enum_cast
+ : diag::warn_void_pointer_to_int_cast;
+ else if (DestType->isEnumeralType())
+ Diag = diag::warn_pointer_to_enum_cast;
+ else
+ Diag = diag::warn_pointer_to_int_cast;
+ Self.Diag(OpRange.getBegin(), Diag) << SrcType << DestType << OpRange;
+ }
}
if (Self.getLangOpts().OpenCL &&
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
index 74742023d1b3..509d88e25000 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
@@ -30,6 +30,7 @@
#include "clang/AST/NSAPI.h"
#include "clang/AST/NonTrivialTypeVisitor.h"
#include "clang/AST/OperationKinds.h"
+#include "clang/AST/RecordLayout.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/Type.h"
@@ -87,6 +88,7 @@
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <bitset>
#include <cassert>
#include <cstddef>
#include <cstdint>
@@ -236,8 +238,8 @@ static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
}
Expr::EvalResult AlignResult;
unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1;
- // We can't check validity of alignment if it is type dependent.
- if (!AlignOp->isInstantiationDependent() &&
+ // We can't check validity of alignment if it is value dependent.
+ if (!AlignOp->isValueDependent() &&
AlignOp->EvaluateAsInt(AlignResult, S.Context,
Expr::SE_AllowSideEffects)) {
llvm::APSInt AlignValue = AlignResult.Val.getInt();
@@ -282,48 +284,60 @@ static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
return false;
}
-static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
+static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall,
+ unsigned BuiltinID) {
if (checkArgCount(S, TheCall, 3))
return true;
// First two arguments should be integers.
for (unsigned I = 0; I < 2; ++I) {
- ExprResult Arg = TheCall->getArg(I);
+ ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I));
+ if (Arg.isInvalid()) return true;
+ TheCall->setArg(I, Arg.get());
+
QualType Ty = Arg.get()->getType();
if (!Ty->isIntegerType()) {
S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
<< Ty << Arg.get()->getSourceRange();
return true;
}
- InitializedEntity Entity = InitializedEntity::InitializeParameter(
- S.getASTContext(), Ty, /*consume*/ false);
- Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
- if (Arg.isInvalid())
- return true;
- TheCall->setArg(I, Arg.get());
}
// Third argument should be a pointer to a non-const integer.
// IRGen correctly handles volatile, restrict, and address spaces, and
// the other qualifiers aren't possible.
{
- ExprResult Arg = TheCall->getArg(2);
+ ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2));
+ if (Arg.isInvalid()) return true;
+ TheCall->setArg(2, Arg.get());
+
QualType Ty = Arg.get()->getType();
const auto *PtrTy = Ty->getAs<PointerType>();
- if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() &&
- !PtrTy->getPointeeType().isConstQualified())) {
+ if (!PtrTy ||
+ !PtrTy->getPointeeType()->isIntegerType() ||
+ PtrTy->getPointeeType().isConstQualified()) {
S.Diag(Arg.get()->getBeginLoc(),
diag::err_overflow_builtin_must_be_ptr_int)
- << Ty << Arg.get()->getSourceRange();
+ << Ty << Arg.get()->getSourceRange();
return true;
}
- InitializedEntity Entity = InitializedEntity::InitializeParameter(
- S.getASTContext(), Ty, /*consume*/ false);
- Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
- if (Arg.isInvalid())
- return true;
- TheCall->setArg(2, Arg.get());
}
+
+ // Disallow signed ExtIntType args larger than 128 bits to mul function until
+ // we improve backend support.
+ if (BuiltinID == Builtin::BI__builtin_mul_overflow) {
+ for (unsigned I = 0; I < 3; ++I) {
+ const auto Arg = TheCall->getArg(I);
+ // Third argument will be a pointer.
+ auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType();
+ if (Ty->isExtIntType() && Ty->isSignedIntegerType() &&
+ S.getASTContext().getIntWidth(Ty) > 128)
+ return S.Diag(Arg->getBeginLoc(),
+ diag::err_overflow_builtin_ext_int_max_size)
+ << 128;
+ }
+ }
+
return false;
}
@@ -390,13 +404,194 @@ static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
return false;
}
+namespace {
+
+class EstimateSizeFormatHandler
+ : public analyze_format_string::FormatStringHandler {
+ size_t Size;
+
+public:
+ EstimateSizeFormatHandler(StringRef Format)
+ : Size(std::min(Format.find(0), Format.size()) +
+ 1 /* null byte always written by sprintf */) {}
+
+ bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
+ const char *, unsigned SpecifierLen) override {
+
+ const size_t FieldWidth = computeFieldWidth(FS);
+ const size_t Precision = computePrecision(FS);
+
+ // The actual format.
+ switch (FS.getConversionSpecifier().getKind()) {
+ // Just a char.
+ case analyze_format_string::ConversionSpecifier::cArg:
+ case analyze_format_string::ConversionSpecifier::CArg:
+ Size += std::max(FieldWidth, (size_t)1);
+ break;
+ // Just an integer.
+ case analyze_format_string::ConversionSpecifier::dArg:
+ case analyze_format_string::ConversionSpecifier::DArg:
+ case analyze_format_string::ConversionSpecifier::iArg:
+ case analyze_format_string::ConversionSpecifier::oArg:
+ case analyze_format_string::ConversionSpecifier::OArg:
+ case analyze_format_string::ConversionSpecifier::uArg:
+ case analyze_format_string::ConversionSpecifier::UArg:
+ case analyze_format_string::ConversionSpecifier::xArg:
+ case analyze_format_string::ConversionSpecifier::XArg:
+ Size += std::max(FieldWidth, Precision);
+ break;
+
+ // %g style conversion switches between %f or %e style dynamically.
+ // %f always takes less space, so default to it.
+ case analyze_format_string::ConversionSpecifier::gArg:
+ case analyze_format_string::ConversionSpecifier::GArg:
+
+ // Floating point number in the form '[+]ddd.ddd'.
+ case analyze_format_string::ConversionSpecifier::fArg:
+ case analyze_format_string::ConversionSpecifier::FArg:
+ Size += std::max(FieldWidth, 1 /* integer part */ +
+ (Precision ? 1 + Precision
+ : 0) /* period + decimal */);
+ break;
+
+ // Floating point number in the form '[-]d.ddde[+-]dd'.
+ case analyze_format_string::ConversionSpecifier::eArg:
+ case analyze_format_string::ConversionSpecifier::EArg:
+ Size +=
+ std::max(FieldWidth,
+ 1 /* integer part */ +
+ (Precision ? 1 + Precision : 0) /* period + decimal */ +
+ 1 /* e or E letter */ + 2 /* exponent */);
+ break;
+
+ // Floating point number in the form '[-]0xh.hhhhp±dd'.
+ case analyze_format_string::ConversionSpecifier::aArg:
+ case analyze_format_string::ConversionSpecifier::AArg:
+ Size +=
+ std::max(FieldWidth,
+ 2 /* 0x */ + 1 /* integer part */ +
+ (Precision ? 1 + Precision : 0) /* period + decimal */ +
+ 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */);
+ break;
+
+ // Just a string.
+ case analyze_format_string::ConversionSpecifier::sArg:
+ case analyze_format_string::ConversionSpecifier::SArg:
+ Size += FieldWidth;
+ break;
+
+ // Just a pointer in the form '0xddd'.
+ case analyze_format_string::ConversionSpecifier::pArg:
+ Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision);
+ break;
+
+ // A plain percent.
+ case analyze_format_string::ConversionSpecifier::PercentArg:
+ Size += 1;
+ break;
+
+ default:
+ break;
+ }
+
+ Size += FS.hasPlusPrefix() || FS.hasSpacePrefix();
+
+ if (FS.hasAlternativeForm()) {
+ switch (FS.getConversionSpecifier().getKind()) {
+ default:
+ break;
+ // Force a leading '0'.
+ case analyze_format_string::ConversionSpecifier::oArg:
+ Size += 1;
+ break;
+ // Force a leading '0x'.
+ case analyze_format_string::ConversionSpecifier::xArg:
+ case analyze_format_string::ConversionSpecifier::XArg:
+ Size += 2;
+ break;
+ // Force a period '.' before decimal, even if precision is 0.
+ case analyze_format_string::ConversionSpecifier::aArg:
+ case analyze_format_string::ConversionSpecifier::AArg:
+ case analyze_format_string::ConversionSpecifier::eArg:
+ case analyze_format_string::ConversionSpecifier::EArg:
+ case analyze_format_string::ConversionSpecifier::fArg:
+ case analyze_format_string::ConversionSpecifier::FArg:
+ case analyze_format_string::ConversionSpecifier::gArg:
+ case analyze_format_string::ConversionSpecifier::GArg:
+ Size += (Precision ? 0 : 1);
+ break;
+ }
+ }
+ assert(SpecifierLen <= Size && "no underflow");
+ Size -= SpecifierLen;
+ return true;
+ }
+
+ size_t getSizeLowerBound() const { return Size; }
+
+private:
+ static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) {
+ const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth();
+ size_t FieldWidth = 0;
+ if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant)
+ FieldWidth = FW.getConstantAmount();
+ return FieldWidth;
+ }
+
+ static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) {
+ const analyze_format_string::OptionalAmount &FW = FS.getPrecision();
+ size_t Precision = 0;
+
+ // See man 3 printf for default precision value based on the specifier.
+ switch (FW.getHowSpecified()) {
+ case analyze_format_string::OptionalAmount::NotSpecified:
+ switch (FS.getConversionSpecifier().getKind()) {
+ default:
+ break;
+ case analyze_format_string::ConversionSpecifier::dArg: // %d
+ case analyze_format_string::ConversionSpecifier::DArg: // %D
+ case analyze_format_string::ConversionSpecifier::iArg: // %i
+ Precision = 1;
+ break;
+ case analyze_format_string::ConversionSpecifier::oArg: // %d
+ case analyze_format_string::ConversionSpecifier::OArg: // %D
+ case analyze_format_string::ConversionSpecifier::uArg: // %d
+ case analyze_format_string::ConversionSpecifier::UArg: // %D
+ case analyze_format_string::ConversionSpecifier::xArg: // %d
+ case analyze_format_string::ConversionSpecifier::XArg: // %D
+ Precision = 1;
+ break;
+ case analyze_format_string::ConversionSpecifier::fArg: // %f
+ case analyze_format_string::ConversionSpecifier::FArg: // %F
+ case analyze_format_string::ConversionSpecifier::eArg: // %e
+ case analyze_format_string::ConversionSpecifier::EArg: // %E
+ case analyze_format_string::ConversionSpecifier::gArg: // %g
+ case analyze_format_string::ConversionSpecifier::GArg: // %G
+ Precision = 6;
+ break;
+ case analyze_format_string::ConversionSpecifier::pArg: // %d
+ Precision = 1;
+ break;
+ }
+ break;
+ case analyze_format_string::OptionalAmount::Constant:
+ Precision = FW.getConstantAmount();
+ break;
+ default:
+ break;
+ }
+ return Precision;
+ }
+};
+
+} // namespace
+
/// Check a call to BuiltinID for buffer overflows. If BuiltinID is a
/// __builtin_*_chk function, then use the object size argument specified in the
/// source. Otherwise, infer the object size using __builtin_object_size.
void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
CallExpr *TheCall) {
// FIXME: There are some more useful checks we could be doing here:
- // - Analyze the format string of sprintf to see how much of buffer is used.
// - Evaluate strlen of strcpy arguments, use as object size.
if (TheCall->isValueDependent() || TheCall->isTypeDependent() ||
@@ -407,12 +602,55 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
if (!BuiltinID)
return;
+ const TargetInfo &TI = getASTContext().getTargetInfo();
+ unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
+
unsigned DiagID = 0;
bool IsChkVariant = false;
+ Optional<llvm::APSInt> UsedSize;
unsigned SizeIndex, ObjectIndex;
switch (BuiltinID) {
default:
return;
+ case Builtin::BIsprintf:
+ case Builtin::BI__builtin___sprintf_chk: {
+ size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3;
+ auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
+
+ if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) {
+
+ if (!Format->isAscii() && !Format->isUTF8())
+ return;
+
+ StringRef FormatStrRef = Format->getString();
+ EstimateSizeFormatHandler H(FormatStrRef);
+ const char *FormatBytes = FormatStrRef.data();
+ const ConstantArrayType *T =
+ Context.getAsConstantArrayType(Format->getType());
+ assert(T && "String literal not of constant array type!");
+ size_t TypeSize = T->getSize().getZExtValue();
+
+ // In case there's a null byte somewhere.
+ size_t StrLen =
+ std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0));
+ if (!analyze_format_string::ParsePrintfString(
+ H, FormatBytes, FormatBytes + StrLen, getLangOpts(),
+ Context.getTargetInfo(), false)) {
+ DiagID = diag::warn_fortify_source_format_overflow;
+ UsedSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound())
+ .extOrTrunc(SizeTypeWidth);
+ if (BuiltinID == Builtin::BI__builtin___sprintf_chk) {
+ IsChkVariant = true;
+ ObjectIndex = 2;
+ } else {
+ IsChkVariant = false;
+ ObjectIndex = 0;
+ }
+ break;
+ }
+ }
+ return;
+ }
case Builtin::BI__builtin___memcpy_chk:
case Builtin::BI__builtin___memmove_chk:
case Builtin::BI__builtin___memset_chk:
@@ -505,19 +743,19 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
return;
// Get the object size in the target's size_t width.
- const TargetInfo &TI = getASTContext().getTargetInfo();
- unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth);
}
// Evaluate the number of bytes of the object that this call will use.
- Expr::EvalResult Result;
- Expr *UsedSizeArg = TheCall->getArg(SizeIndex);
- if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext()))
- return;
- llvm::APSInt UsedSize = Result.Val.getInt();
+ if (!UsedSize) {
+ Expr::EvalResult Result;
+ Expr *UsedSizeArg = TheCall->getArg(SizeIndex);
+ if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext()))
+ return;
+ UsedSize = Result.Val.getInt().extOrTrunc(SizeTypeWidth);
+ }
- if (UsedSize.ule(ObjectSize))
+ if (UsedSize.getValue().ule(ObjectSize))
return;
StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID);
@@ -533,7 +771,7 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
PDiag(DiagID)
<< FunctionName << ObjectSize.toString(/*Radix=*/10)
- << UsedSize.toString(/*Radix=*/10));
+ << UsedSize.getValue().toString(/*Radix=*/10));
}
static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
@@ -1152,6 +1390,49 @@ CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
return true;
}
+static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr,
+ SourceLocation CallSiteLoc);
+
+bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
+ switch (TI.getTriple().getArch()) {
+ default:
+ // Some builtins don't require additional checking, so just consider these
+ // acceptable.
+ return false;
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_32:
+ case llvm::Triple::aarch64_be:
+ return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall);
+ case llvm::Triple::bpfeb:
+ case llvm::Triple::bpfel:
+ return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall);
+ case llvm::Triple::hexagon:
+ return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall);
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ case llvm::Triple::systemz:
+ return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall);
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ case llvm::Triple::amdgcn:
+ return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
+ }
+}
+
ExprResult
Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
CallExpr *TheCall) {
@@ -1421,6 +1702,19 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI__builtin_nontemporal_load:
case Builtin::BI__builtin_nontemporal_store:
return SemaBuiltinNontemporalOverloaded(TheCallResult);
+ case Builtin::BI__builtin_memcpy_inline: {
+ clang::Expr *SizeOp = TheCall->getArg(2);
+ // We warn about copying to or from `nullptr` pointers when `size` is
+ // greater than 0. When `size` is value dependent we cannot evaluate its
+ // value so we bail out.
+ if (SizeOp->isValueDependent())
+ break;
+ if (!SizeOp->EvaluateKnownConstInt(Context).isNullValue()) {
+ CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc());
+ CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc());
+ }
+ break;
+ }
#define BUILTIN(ID, TYPE, ATTRS)
#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
case Builtin::BI##ID: \
@@ -1447,7 +1741,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI__builtin_add_overflow:
case Builtin::BI__builtin_sub_overflow:
case Builtin::BI__builtin_mul_overflow:
- if (SemaBuiltinOverflow(*this, TheCall))
+ if (SemaBuiltinOverflow(*this, TheCall, BuiltinID))
return ExprError();
break;
case Builtin::BI__builtin_operator_new:
@@ -1515,6 +1809,36 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
TheCall->setType(Context.IntTy);
break;
}
+ case Builtin::BI__builtin_expect_with_probability: {
+ // We first want to ensure we are called with 3 arguments
+ if (checkArgCount(*this, TheCall, 3))
+ return ExprError();
+ // then check probability is constant float in range [0.0, 1.0]
+ const Expr *ProbArg = TheCall->getArg(2);
+ SmallVector<PartialDiagnosticAt, 8> Notes;
+ Expr::EvalResult Eval;
+ Eval.Diag = &Notes;
+ if ((!ProbArg->EvaluateAsConstantExpr(Eval, Expr::EvaluateForCodeGen,
+ Context)) ||
+ !Eval.Val.isFloat()) {
+ Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float)
+ << ProbArg->getSourceRange();
+ for (const PartialDiagnosticAt &PDiag : Notes)
+ Diag(PDiag.first, PDiag.second);
+ return ExprError();
+ }
+ llvm::APFloat Probability = Eval.Val.getFloat();
+ bool LoseInfo = false;
+ Probability.convert(llvm::APFloat::IEEEdouble(),
+ llvm::RoundingMode::Dynamic, &LoseInfo);
+ if (!(Probability >= llvm::APFloat(0.0) &&
+ Probability <= llvm::APFloat(1.0))) {
+ Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range)
+ << ProbArg->getSourceRange();
+ return ExprError();
+ }
+ break;
+ }
case Builtin::BI__builtin_preserve_access_index:
if (SemaBuiltinPreserveAI(*this, TheCall))
return ExprError();
@@ -1608,62 +1932,55 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
break;
case Builtin::BI__builtin_os_log_format:
+ Cleanup.setExprNeedsCleanups(true);
+ LLVM_FALLTHROUGH;
case Builtin::BI__builtin_os_log_format_buffer_size:
if (SemaBuiltinOSLogFormat(TheCall))
return ExprError();
break;
+ case Builtin::BI__builtin_frame_address:
+ case Builtin::BI__builtin_return_address: {
+ if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF))
+ return ExprError();
+
+ // -Wframe-address warning if non-zero passed to builtin
+ // return/frame address.
+ Expr::EvalResult Result;
+ if (TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) &&
+ Result.Val.getInt() != 0)
+ Diag(TheCall->getBeginLoc(), diag::warn_frame_address)
+ << ((BuiltinID == Builtin::BI__builtin_return_address)
+ ? "__builtin_return_address"
+ : "__builtin_frame_address")
+ << TheCall->getSourceRange();
+ break;
+ }
+
+ case Builtin::BI__builtin_matrix_transpose:
+ return SemaBuiltinMatrixTranspose(TheCall, TheCallResult);
+
+ case Builtin::BI__builtin_matrix_column_major_load:
+ return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult);
+
+ case Builtin::BI__builtin_matrix_column_major_store:
+ return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult);
}
// Since the target specific builtins for each arch overlap, only check those
// of the arch we are compiling for.
if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
- switch (Context.getTargetInfo().getTriple().getArch()) {
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
- if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall))
- return ExprError();
- break;
- case llvm::Triple::aarch64:
- case llvm::Triple::aarch64_32:
- case llvm::Triple::aarch64_be:
- if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall))
- return ExprError();
- break;
- case llvm::Triple::bpfeb:
- case llvm::Triple::bpfel:
- if (CheckBPFBuiltinFunctionCall(BuiltinID, TheCall))
- return ExprError();
- break;
- case llvm::Triple::hexagon:
- if (CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall))
- return ExprError();
- break;
- case llvm::Triple::mips:
- case llvm::Triple::mipsel:
- case llvm::Triple::mips64:
- case llvm::Triple::mips64el:
- if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall))
- return ExprError();
- break;
- case llvm::Triple::systemz:
- if (CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall))
- return ExprError();
- break;
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall))
- return ExprError();
- break;
- case llvm::Triple::ppc:
- case llvm::Triple::ppc64:
- case llvm::Triple::ppc64le:
- if (CheckPPCBuiltinFunctionCall(BuiltinID, TheCall))
- return ExprError();
- break;
- default:
- break;
+ if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
+ assert(Context.getAuxTargetInfo() &&
+ "Aux Target Builtin, but not an aux target?");
+
+ if (CheckTSBuiltinFunctionCall(
+ *Context.getAuxTargetInfo(),
+ Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall))
+ return ExprError();
+ } else {
+ if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID,
+ TheCall))
+ return ExprError();
}
}
@@ -1697,6 +2014,9 @@ static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
case NeonTypeFlags::Float64:
assert(!shift && "cannot shift float types!");
return (1 << IsQuad) - 1;
+ case NeonTypeFlags::BFloat16:
+ assert(!shift && "cannot shift float types!");
+ return (4 << IsQuad) - 1;
}
llvm_unreachable("Invalid NeonTypeFlag!");
}
@@ -1736,11 +2056,135 @@ static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
return Context.FloatTy;
case NeonTypeFlags::Float64:
return Context.DoubleTy;
+ case NeonTypeFlags::BFloat16:
+ return Context.BFloat16Ty;
}
llvm_unreachable("Invalid NeonTypeFlag!");
}
-bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ // Range check SVE intrinsics that take immediate values.
+ SmallVector<std::tuple<int,int,int>, 3> ImmChecks;
+
+ switch (BuiltinID) {
+ default:
+ return false;
+#define GET_SVE_IMMEDIATE_CHECK
+#include "clang/Basic/arm_sve_sema_rangechecks.inc"
+#undef GET_SVE_IMMEDIATE_CHECK
+ }
+
+ // Perform all the immediate checks for this builtin call.
+ bool HasError = false;
+ for (auto &I : ImmChecks) {
+ int ArgNum, CheckTy, ElementSizeInBits;
+ std::tie(ArgNum, CheckTy, ElementSizeInBits) = I;
+
+ typedef bool(*OptionSetCheckFnTy)(int64_t Value);
+
+ // Function that checks whether the operand (ArgNum) is an immediate
+ // that is one of the predefined values.
+ auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm,
+ int ErrDiag) -> bool {
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ llvm::APSInt Imm;
+ if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm))
+ return true;
+
+ if (!CheckImm(Imm.getSExtValue()))
+ return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange();
+ return false;
+ };
+
+ switch ((SVETypeFlags::ImmCheckType)CheckTy) {
+ case SVETypeFlags::ImmCheck0_31:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_13:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck1_16:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_7:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckExtract:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
+ (2048 / ElementSizeInBits) - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckShiftRight:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckShiftRightNarrow:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1,
+ ElementSizeInBits / 2))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckShiftLeft:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
+ ElementSizeInBits - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckLaneIndex:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
+ (128 / (1 * ElementSizeInBits)) - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckLaneIndexCompRotate:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
+ (128 / (2 * ElementSizeInBits)) - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckLaneIndexDot:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
+ (128 / (4 * ElementSizeInBits)) - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckComplexRot90_270:
+ if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; },
+ diag::err_rotation_argument_to_cadd))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckComplexRotAll90:
+ if (CheckImmediateInSet(
+ [](int64_t V) {
+ return V == 0 || V == 90 || V == 180 || V == 270;
+ },
+ diag::err_rotation_argument_to_cmla))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_1:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_2:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_3:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3))
+ HasError = true;
+ break;
+ }
+ }
+
+ return HasError;
+}
+
+bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID, CallExpr *TheCall) {
llvm::APSInt Result;
uint64_t mask = 0;
unsigned TV = 0;
@@ -1774,12 +2218,11 @@ bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
QualType RHSTy = RHS.get()->getType();
- llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
+ llvm::Triple::ArchType Arch = TI.getTriple().getArch();
bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
Arch == llvm::Triple::aarch64_32 ||
Arch == llvm::Triple::aarch64_be;
- bool IsInt64Long =
- Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong;
+ bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
QualType EltTy =
getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
if (HasConstPtr)
@@ -1817,6 +2260,47 @@ bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
}
}
+bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
+ bool Err = false;
+ switch (BuiltinID) {
+ default:
+ return false;
+#include "clang/Basic/arm_cde_builtin_sema.inc"
+ }
+
+ if (Err)
+ return true;
+
+ return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true);
+}
+
+bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI,
+ const Expr *CoprocArg, bool WantCDE) {
+ if (isConstantEvaluated())
+ return false;
+
+ // We can't check the value of a dependent argument.
+ if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
+ return false;
+
+ llvm::APSInt CoprocNoAP;
+ bool IsICE = CoprocArg->isIntegerConstantExpr(CoprocNoAP, Context);
+ (void)IsICE;
+ assert(IsICE && "Coprocossor immediate is not a constant expression");
+ int64_t CoprocNo = CoprocNoAP.getExtValue();
+ assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
+
+ uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
+ bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
+
+ if (IsCDECoproc != WantCDE)
+ return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc)
+ << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
+
+ return false;
+}
+
bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth) {
assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
@@ -1932,7 +2416,8 @@ bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
return false;
}
-bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex ||
BuiltinID == ARM::BI__builtin_arm_strex ||
@@ -1955,10 +2440,12 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
BuiltinID == ARM::BI__builtin_arm_wsrp)
return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
- if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
+ if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
return true;
if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
return true;
+ if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
+ return true;
// For intrinsics which take an immediate value as part of the instruction,
// range check them here.
@@ -1981,11 +2468,33 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case ARM::BI__builtin_arm_isb:
case ARM::BI__builtin_arm_dbg:
return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15);
- }
-}
-
-bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
- CallExpr *TheCall) {
+ case ARM::BI__builtin_arm_cdp:
+ case ARM::BI__builtin_arm_cdp2:
+ case ARM::BI__builtin_arm_mcr:
+ case ARM::BI__builtin_arm_mcr2:
+ case ARM::BI__builtin_arm_mrc:
+ case ARM::BI__builtin_arm_mrc2:
+ case ARM::BI__builtin_arm_mcrr:
+ case ARM::BI__builtin_arm_mcrr2:
+ case ARM::BI__builtin_arm_mrrc:
+ case ARM::BI__builtin_arm_mrrc2:
+ case ARM::BI__builtin_arm_ldc:
+ case ARM::BI__builtin_arm_ldcl:
+ case ARM::BI__builtin_arm_ldc2:
+ case ARM::BI__builtin_arm_ldc2l:
+ case ARM::BI__builtin_arm_stc:
+ case ARM::BI__builtin_arm_stcl:
+ case ARM::BI__builtin_arm_stc2:
+ case ARM::BI__builtin_arm_stc2l:
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) ||
+ CheckARMCoprocessorImmediate(TI, TheCall->getArg(0),
+ /*WantCDE*/ false);
+ }
+}
+
+bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
BuiltinID == AArch64::BI__builtin_arm_ldaex ||
BuiltinID == AArch64::BI__builtin_arm_strex ||
@@ -2030,7 +2539,10 @@ bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
if (BuiltinID == AArch64::BI__getReg)
return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
- if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
+ if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
+ return true;
+
+ if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
return true;
// For intrinsics which take an immediate value as part of the instruction,
@@ -2049,17 +2561,33 @@ bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
CallExpr *TheCall) {
- assert(BuiltinID == BPF::BI__builtin_preserve_field_info &&
+ assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
+ BuiltinID == BPF::BI__builtin_btf_type_id) &&
"unexpected ARM builtin");
if (checkArgCount(*this, TheCall, 2))
return true;
+ Expr *Arg;
+ if (BuiltinID == BPF::BI__builtin_btf_type_id) {
+ // The second argument needs to be a constant int
+ llvm::APSInt Value;
+ Arg = TheCall->getArg(1);
+ if (!Arg->isIntegerConstantExpr(Value, Context)) {
+ Diag(Arg->getBeginLoc(), diag::err_btf_type_id_not_const)
+ << 2 << Arg->getSourceRange();
+ return true;
+ }
+
+ TheCall->setType(Context.UnsignedIntTy);
+ return false;
+ }
+
// The first argument needs to be a record field access.
// If it is an array element access, we delay decision
// to BPF backend to check whether the access is a
// field access or not.
- Expr *Arg = TheCall->getArg(0);
+ Arg = TheCall->getArg(0);
if (Arg->getType()->getAsPlaceholderType() ||
(Arg->IgnoreParens()->getObjectKind() != OK_BitField &&
!dyn_cast<MemberExpr>(Arg->IgnoreParens()) &&
@@ -2070,8 +2598,9 @@ bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
}
// The second argument needs to be a constant int
+ Arg = TheCall->getArg(1);
llvm::APSInt Value;
- if (!TheCall->getArg(1)->isIntegerConstantExpr(Value, Context)) {
+ if (!Arg->isIntegerConstantExpr(Value, Context)) {
Diag(Arg->getBeginLoc(), diag::err_preserve_field_info_not_const)
<< 2 << Arg->getSourceRange();
return true;
@@ -2081,825 +2610,6 @@ bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
return false;
}
-bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) {
- struct BuiltinAndString {
- unsigned BuiltinID;
- const char *Str;
- };
-
- static BuiltinAndString ValidCPU[] = {
- { Hexagon::BI__builtin_HEXAGON_A6_vcmpbeq_notany, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_A6_vminub_RdP, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_F2_dfadd, "v66" },
- { Hexagon::BI__builtin_HEXAGON_F2_dfsub, "v66" },
- { Hexagon::BI__builtin_HEXAGON_M2_mnaci, "v66" },
- { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffb, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffub, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S2_mask, "v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_vsplatrbp, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_vtrunehb_ppp, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_vtrunohb_ppp, "v62,v65,v66" },
- };
-
- static BuiltinAndString ValidHVX[] = {
- { Hexagon::BI__builtin_HEXAGON_V6_hi, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_hi_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_lo, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_lo_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_extractw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_extractw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplath, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplath_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_not, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_not_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsb, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsb_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat, "v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat_128B, "v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_valignb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_valignb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_valignbi, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vand, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vand_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandqrt, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvqv, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvqv_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvrt, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslhv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslhv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslwv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslwv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasr_into, "v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasr_into_128B, "v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vassign, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vassign_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vassignp, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vassignp_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgb, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgb_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguw, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguw_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vcl0h, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vcl0h_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vcl0w, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vcl0w_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vcombine, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vcombine_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vd0, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vd0_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdd0, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdd0_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdelta, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdelta_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrb, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrb_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlut4, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlut4_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxb, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxb_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminb, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminb_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabus, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybus, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyih, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmux, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmux_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgb, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgb_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnormamth, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnormamth_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnot, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnot_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackeb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackeb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackeh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackeh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackob, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackob_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackoh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackoh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrdelta, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrdelta_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt, "v65" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_128B, "v65" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc, "v65" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B, "v65" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt, "v65" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_128B, "v65" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc, "v65" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B, "v65" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vror, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vror_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrotr, "v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrotr_128B, "v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundhb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundhb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundhub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundhub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundwh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundwh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatdw, "v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatdw_128B, "v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsathub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsathub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatwh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatwh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufeh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufeh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffob, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffob_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vswap, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vswap_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackob, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackob_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vxor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vxor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vzb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vzb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vzh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vzh_128B, "v60,v62,v65,v66" },
- };
-
- // Sort the tables on first execution so we can binary search them.
- auto SortCmp = [](const BuiltinAndString &LHS, const BuiltinAndString &RHS) {
- return LHS.BuiltinID < RHS.BuiltinID;
- };
- static const bool SortOnce =
- (llvm::sort(ValidCPU, SortCmp),
- llvm::sort(ValidHVX, SortCmp), true);
- (void)SortOnce;
- auto LowerBoundCmp = [](const BuiltinAndString &BI, unsigned BuiltinID) {
- return BI.BuiltinID < BuiltinID;
- };
-
- const TargetInfo &TI = Context.getTargetInfo();
-
- const BuiltinAndString *FC =
- llvm::lower_bound(ValidCPU, BuiltinID, LowerBoundCmp);
- if (FC != std::end(ValidCPU) && FC->BuiltinID == BuiltinID) {
- const TargetOptions &Opts = TI.getTargetOpts();
- StringRef CPU = Opts.CPU;
- if (!CPU.empty()) {
- assert(CPU.startswith("hexagon") && "Unexpected CPU name");
- CPU.consume_front("hexagon");
- SmallVector<StringRef, 3> CPUs;
- StringRef(FC->Str).split(CPUs, ',');
- if (llvm::none_of(CPUs, [CPU](StringRef S) { return S == CPU; }))
- return Diag(TheCall->getBeginLoc(),
- diag::err_hexagon_builtin_unsupported_cpu);
- }
- }
-
- const BuiltinAndString *FH =
- llvm::lower_bound(ValidHVX, BuiltinID, LowerBoundCmp);
- if (FH != std::end(ValidHVX) && FH->BuiltinID == BuiltinID) {
- if (!TI.hasFeature("hvx"))
- return Diag(TheCall->getBeginLoc(),
- diag::err_hexagon_builtin_requires_hvx);
-
- SmallVector<StringRef, 3> HVXs;
- StringRef(FH->Str).split(HVXs, ',');
- bool IsValid = llvm::any_of(HVXs,
- [&TI] (StringRef V) {
- std::string F = "hvx" + V.str();
- return TI.hasFeature(F);
- });
- if (!IsValid)
- return Diag(TheCall->getBeginLoc(),
- diag::err_hexagon_builtin_unsupported_hvx);
- }
-
- return false;
-}
-
bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
struct ArgInfo {
uint8_t OpNum;
@@ -2916,7 +2626,7 @@ bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
{ Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
{ Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
{ Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
- { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 0 }} },
+ { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} },
{ Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} },
{ Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} },
{ Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} },
@@ -3137,17 +2847,17 @@ bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
CallExpr *TheCall) {
- return CheckHexagonBuiltinCpu(BuiltinID, TheCall) ||
- CheckHexagonBuiltinArgument(BuiltinID, TheCall);
+ return CheckHexagonBuiltinArgument(BuiltinID, TheCall);
}
-bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
- return CheckMipsBuiltinCpu(BuiltinID, TheCall) ||
+bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID, CallExpr *TheCall) {
+ return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) ||
CheckMipsBuiltinArgument(BuiltinID, TheCall);
}
-bool Sema::CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) {
- const TargetInfo &TI = Context.getTargetInfo();
+bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
if (Mips::BI__builtin_mips_addu_qb <= BuiltinID &&
BuiltinID <= Mips::BI__builtin_mips_lwx) {
@@ -3340,10 +3050,14 @@ bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break;
case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break;
}
if (!m)
@@ -3353,15 +3067,13 @@ bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
SemaBuiltinConstantArgMultiple(TheCall, i, m);
}
-bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
unsigned i = 0, l = 0, u = 0;
bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde ||
BuiltinID == PPC::BI__builtin_divdeu ||
BuiltinID == PPC::BI__builtin_bpermd;
- bool IsTarget64Bit = Context.getTargetInfo()
- .getTypeWidth(Context
- .getTargetInfo()
- .getIntPtrType()) == 64;
+ bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64;
bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe ||
BuiltinID == PPC::BI__builtin_divweu ||
BuiltinID == PPC::BI__builtin_divde ||
@@ -3371,14 +3083,13 @@ bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
<< TheCall->getSourceRange();
- if ((IsBltinExtDiv && !Context.getTargetInfo().hasFeature("extdiv")) ||
- (BuiltinID == PPC::BI__builtin_bpermd &&
- !Context.getTargetInfo().hasFeature("bpermd")))
+ if ((IsBltinExtDiv && !TI.hasFeature("extdiv")) ||
+ (BuiltinID == PPC::BI__builtin_bpermd && !TI.hasFeature("bpermd")))
return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7)
<< TheCall->getSourceRange();
auto SemaVSXCheck = [&](CallExpr *TheCall) -> bool {
- if (!Context.getTargetInfo().hasFeature("vsx"))
+ if (!TI.hasFeature("vsx"))
return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7)
<< TheCall->getSourceRange();
return false;
@@ -3414,10 +3125,75 @@ bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
case PPC::BI__builtin_pack_vector_int128:
return SemaVSXCheck(TheCall);
+ case PPC::BI__builtin_altivec_vgnb:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7);
+ case PPC::BI__builtin_vsx_xxeval:
+ return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255);
+ case PPC::BI__builtin_altivec_vsldbi:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
+ case PPC::BI__builtin_altivec_vsrdbi:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
+ case PPC::BI__builtin_vsx_xxpermx:
+ return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7);
}
return SemaBuiltinConstantArgRange(TheCall, i, l, u);
}
+bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ // position of memory order and scope arguments in the builtin
+ unsigned OrderIndex, ScopeIndex;
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
+ case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
+ case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
+ case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
+ OrderIndex = 2;
+ ScopeIndex = 3;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_fence:
+ OrderIndex = 0;
+ ScopeIndex = 1;
+ break;
+ default:
+ return false;
+ }
+
+ ExprResult Arg = TheCall->getArg(OrderIndex);
+ auto ArgExpr = Arg.get();
+ Expr::EvalResult ArgResult;
+
+ if (!ArgExpr->EvaluateAsInt(ArgResult, Context))
+ return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
+ << ArgExpr->getType();
+ int ord = ArgResult.Val.getInt().getZExtValue();
+
+ // Check valididty of memory ordering as per C11 / C++11's memody model.
+ switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
+ case llvm::AtomicOrderingCABI::acquire:
+ case llvm::AtomicOrderingCABI::release:
+ case llvm::AtomicOrderingCABI::acq_rel:
+ case llvm::AtomicOrderingCABI::seq_cst:
+ break;
+ default: {
+ return Diag(ArgExpr->getBeginLoc(),
+ diag::warn_atomic_op_has_invalid_memory_order)
+ << ArgExpr->getSourceRange();
+ }
+ }
+
+ Arg = TheCall->getArg(ScopeIndex);
+ ArgExpr = Arg.get();
+ Expr::EvalResult ArgResult1;
+ // Check that sync scope is a constant literal
+ if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Expr::EvaluateForCodeGen,
+ Context))
+ return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal)
+ << ArgExpr->getType();
+
+ return false;
+}
+
bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
CallExpr *TheCall) {
if (BuiltinID == SystemZ::BI__builtin_tabort) {
@@ -3486,7 +3262,8 @@ bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
/// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
/// This checks that the target supports __builtin_cpu_supports and
/// that the string argument is constant and valid.
-static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) {
+static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI,
+ CallExpr *TheCall) {
Expr *Arg = TheCall->getArg(0);
// Check if the argument is a string literal.
@@ -3497,7 +3274,7 @@ static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) {
// Check the contents of the string.
StringRef Feature =
cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
- if (!S.Context.getTargetInfo().validateCpuSupports(Feature))
+ if (!TI.validateCpuSupports(Feature))
return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports)
<< Arg->getSourceRange();
return false;
@@ -3506,7 +3283,7 @@ static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) {
/// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *).
/// This checks that the target supports __builtin_cpu_is and
/// that the string argument is constant and valid.
-static bool SemaBuiltinCpuIs(Sema &S, CallExpr *TheCall) {
+static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) {
Expr *Arg = TheCall->getArg(0);
// Check if the argument is a string literal.
@@ -3517,7 +3294,7 @@ static bool SemaBuiltinCpuIs(Sema &S, CallExpr *TheCall) {
// Check the contents of the string.
StringRef Feature =
cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
- if (!S.Context.getTargetInfo().validateCpuIs(Feature))
+ if (!TI.validateCpuIs(Feature))
return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
<< Arg->getSourceRange();
return false;
@@ -3831,6 +3608,64 @@ bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
<< Arg->getSourceRange();
}
+enum { TileRegLow = 0, TileRegHigh = 7 };
+
+bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
+ ArrayRef<int> ArgNums) {
+ for (int ArgNum : ArgNums) {
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh))
+ return true;
+ }
+ return false;
+}
+
+bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, int ArgNum) {
+ return SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh);
+}
+
+bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall,
+ ArrayRef<int> ArgNums) {
+ // Because the max number of tile register is TileRegHigh + 1, so here we use
+ // each bit to represent the usage of them in bitset.
+ std::bitset<TileRegHigh + 1> ArgValues;
+ for (int ArgNum : ArgNums) {
+ llvm::APSInt Arg;
+ SemaBuiltinConstantArg(TheCall, ArgNum, Arg);
+ int ArgExtValue = Arg.getExtValue();
+ assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) &&
+ "Incorrect tile register num.");
+ if (ArgValues.test(ArgExtValue))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_x86_builtin_tile_arg_duplicate)
+ << TheCall->getArg(ArgNum)->getSourceRange();
+ ArgValues.set(ArgExtValue);
+ }
+ return false;
+}
+
+bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
+ ArrayRef<int> ArgNums) {
+ return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) ||
+ CheckX86BuiltinTileDuplicate(TheCall, ArgNums);
+}
+
+bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) {
+ switch (BuiltinID) {
+ default:
+ return false;
+ case X86::BI__builtin_ia32_tileloadd64:
+ case X86::BI__builtin_ia32_tileloaddt164:
+ case X86::BI__builtin_ia32_tilestored64:
+ case X86::BI__builtin_ia32_tilezero:
+ return CheckX86BuiltinTileArgumentsRange(TheCall, 0);
+ case X86::BI__builtin_ia32_tdpbssd:
+ case X86::BI__builtin_ia32_tdpbsud:
+ case X86::BI__builtin_ia32_tdpbusd:
+ case X86::BI__builtin_ia32_tdpbuud:
+ case X86::BI__builtin_ia32_tdpbf16ps:
+ return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2});
+ }
+}
static bool isX86_32Builtin(unsigned BuiltinID) {
// These builtins only work on x86-32 targets.
switch (BuiltinID) {
@@ -3842,15 +3677,16 @@ static bool isX86_32Builtin(unsigned BuiltinID) {
return false;
}
-bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
if (BuiltinID == X86::BI__builtin_cpu_supports)
- return SemaBuiltinCpuSupports(*this, TheCall);
+ return SemaBuiltinCpuSupports(*this, TI, TheCall);
if (BuiltinID == X86::BI__builtin_cpu_is)
- return SemaBuiltinCpuIs(*this, TheCall);
+ return SemaBuiltinCpuIs(*this, TI, TheCall);
// Check for 32-bit only builtins on a 64-bit target.
- const llvm::Triple &TT = Context.getTargetInfo().getTriple();
+ const llvm::Triple &TT = TI.getTriple();
if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
return Diag(TheCall->getCallee()->getBeginLoc(),
diag::err_32_bit_builtin_64_bit_tgt);
@@ -3863,6 +3699,10 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall))
return true;
+ // If the intrinsic has a tile arguments, make sure they are valid.
+ if (CheckX86BuiltinTileArguments(BuiltinID, TheCall))
+ return true;
+
// For intrinsics which take an immediate value as part of the instruction,
// range check them here.
int i = 0, l = 0, u = 0;
@@ -4473,6 +4313,24 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
}
}
+ if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) {
+ auto *AA = FDecl->getAttr<AllocAlignAttr>();
+ const Expr *Arg = Args[AA->getParamIndex().getASTIndex()];
+ if (!Arg->isValueDependent()) {
+ Expr::EvalResult Align;
+ if (Arg->EvaluateAsInt(Align, Context)) {
+ const llvm::APSInt &I = Align.Val.getInt();
+ if (!I.isPowerOf2())
+ Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two)
+ << Arg->getSourceRange();
+
+ if (I > Sema::MaximumAlignment)
+ Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great)
+ << Arg->getSourceRange() << Sema::MaximumAlignment;
+ }
+ }
+ }
+
if (FD)
diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc);
}
@@ -5491,6 +5349,15 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
// gracefully.
TheCall->setType(ResultType);
+ // Prohibit use of _ExtInt with atomic builtins.
+ // The arguments would have already been converted to the first argument's
+ // type, so only need to check the first argument.
+ const auto *ExtIntValType = ValType->getAs<ExtIntType>();
+ if (ExtIntValType && !llvm::isPowerOf2_64(ExtIntValType->getNumBits())) {
+ Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size);
+ return ExprError();
+ }
+
return TheCallResult;
}
@@ -6193,11 +6060,9 @@ bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two)
<< Arg->getSourceRange();
- // Alignment calculations can wrap around if it's greater than 2**29.
- unsigned MaximumAlignment = 536870912;
- if (Result > MaximumAlignment)
+ if (Result > Sema::MaximumAlignment)
Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great)
- << Arg->getSourceRange() << MaximumAlignment;
+ << Arg->getSourceRange() << Sema::MaximumAlignment;
}
if (NumArgs > 2) {
@@ -6412,7 +6277,8 @@ static bool IsShiftedByte(llvm::APSInt Value) {
/// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is
/// a constant expression representing an arbitrary byte value shifted left by
/// a multiple of 8 bits.
-bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum) {
+bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
+ unsigned ArgBits) {
llvm::APSInt Result;
// We can't check the value of a dependent argument.
@@ -6424,6 +6290,10 @@ bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum) {
if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
return true;
+ // Truncate to the given size.
+ Result = Result.getLoBits(ArgBits);
+ Result.setIsUnsigned(true);
+
if (IsShiftedByte(Result))
return false;
@@ -6437,7 +6307,8 @@ bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum) {
/// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some
/// Arm MVE intrinsics.
bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall,
- int ArgNum) {
+ int ArgNum,
+ unsigned ArgBits) {
llvm::APSInt Result;
// We can't check the value of a dependent argument.
@@ -6449,6 +6320,10 @@ bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall,
if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
return true;
+ // Truncate to the given size.
+ Result = Result.getLoBits(ArgBits);
+ Result.setIsUnsigned(true);
+
// Check to see if it's in either of the required forms.
if (IsShiftedByte(Result) ||
(Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF))
@@ -10228,6 +10103,9 @@ struct IntRange {
false/*NonNegative*/);
}
+ if (const auto *EIT = dyn_cast<ExtIntType>(T))
+ return IntRange(EIT->getNumBits(), EIT->isUnsigned());
+
const BuiltinType *BT = cast<BuiltinType>(T);
assert(BT->isInteger());
@@ -10251,6 +10129,9 @@ struct IntRange {
if (const EnumType *ET = dyn_cast<EnumType>(T))
T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr();
+ if (const auto *EIT = dyn_cast<ExtIntType>(T))
+ return IntRange(EIT->getNumBits(), EIT->isUnsigned());
+
const BuiltinType *BT = cast<BuiltinType>(T);
assert(BT->isInteger());
@@ -12064,27 +11945,31 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
}
}
-static void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
+static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E,
SourceLocation CC, QualType T);
static void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
SourceLocation CC, bool &ICContext) {
E = E->IgnoreParenImpCasts();
- if (isa<ConditionalOperator>(E))
- return CheckConditionalOperator(S, cast<ConditionalOperator>(E), CC, T);
+ if (auto *CO = dyn_cast<AbstractConditionalOperator>(E))
+ return CheckConditionalOperator(S, CO, CC, T);
AnalyzeImplicitConversions(S, E, CC);
if (E->getType() != T)
return CheckImplicitConversion(S, E, T, CC, &ICContext);
}
-static void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
+static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E,
SourceLocation CC, QualType T) {
AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc());
+ Expr *TrueExpr = E->getTrueExpr();
+ if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E))
+ TrueExpr = BCO->getCommon();
+
bool Suspicious = false;
- CheckConditionalOperand(S, E->getTrueExpr(), T, CC, Suspicious);
+ CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious);
CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious);
if (T->isBooleanType())
@@ -12103,7 +11988,7 @@ static void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
if (E->getType() == T) return;
Suspicious = false;
- CheckImplicitConversion(S, E->getTrueExpr()->IgnoreParenImpCasts(),
+ CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(),
E->getType(), CC, &Suspicious);
if (!Suspicious)
CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(),
@@ -12120,24 +12005,44 @@ static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) {
CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC);
}
-/// AnalyzeImplicitConversions - Find and report any interesting
-/// implicit conversions in the given expression. There are a couple
-/// of competing diagnostics here, -Wconversion and -Wsign-compare.
-static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
- bool IsListInit/*= false*/) {
+namespace {
+struct AnalyzeImplicitConversionsWorkItem {
+ Expr *E;
+ SourceLocation CC;
+ bool IsListInit;
+};
+}
+
+/// Data recursive variant of AnalyzeImplicitConversions. Subexpressions
+/// that should be visited are added to WorkList.
+static void AnalyzeImplicitConversions(
+ Sema &S, AnalyzeImplicitConversionsWorkItem Item,
+ llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) {
+ Expr *OrigE = Item.E;
+ SourceLocation CC = Item.CC;
+
QualType T = OrigE->getType();
Expr *E = OrigE->IgnoreParenImpCasts();
// Propagate whether we are in a C++ list initialization expression.
// If so, we do not issue warnings for implicit int-float conversion
// precision loss, because C++11 narrowing already handles it.
- IsListInit =
- IsListInit || (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus);
+ bool IsListInit = Item.IsListInit ||
+ (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus);
if (E->isTypeDependent() || E->isValueDependent())
return;
- if (const auto *UO = dyn_cast<UnaryOperator>(E))
+ Expr *SourceExpr = E;
+ // Examine, but don't traverse into the source expression of an
+ // OpaqueValueExpr, since it may have multiple parents and we don't want to
+ // emit duplicate diagnostics. Its fine to examine the form or attempt to
+ // evaluate it in the context of checking the specific conversion to T though.
+ if (auto *OVE = dyn_cast<OpaqueValueExpr>(E))
+ if (auto *Src = OVE->getSourceExpr())
+ SourceExpr = Src;
+
+ if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr))
if (UO->getOpcode() == UO_Not &&
UO->getSubExpr()->isKnownToHaveBooleanValue())
S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool)
@@ -12146,21 +12051,20 @@ static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
// For conditional operators, we analyze the arguments as if they
// were being fed directly into the output.
- if (isa<ConditionalOperator>(E)) {
- ConditionalOperator *CO = cast<ConditionalOperator>(E);
+ if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) {
CheckConditionalOperator(S, CO, CC, T);
return;
}
// Check implicit argument conversions for function calls.
- if (CallExpr *Call = dyn_cast<CallExpr>(E))
+ if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr))
CheckImplicitArgumentConversions(S, Call, CC);
// Go ahead and check any implicit conversions we might have skipped.
// The non-canonical typecheck is just an optimization;
// CheckImplicitConversion will filter out dead implicit conversions.
- if (E->getType() != T)
- CheckImplicitConversion(S, E, T, CC, nullptr, IsListInit);
+ if (SourceExpr->getType() != T)
+ CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit);
// Now continue drilling into this expression.
@@ -12170,7 +12074,7 @@ static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
// FIXME: Use a more uniform representation for this.
for (auto *SE : POE->semantics())
if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE))
- AnalyzeImplicitConversions(S, OVE->getSourceExpr(), CC, IsListInit);
+ WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit});
}
// Skip past explicit casts.
@@ -12178,7 +12082,8 @@ static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
E = CE->getSubExpr()->IgnoreParenImpCasts();
if (!CE->getType()->isVoidType() && E->getType()->isAtomicType())
S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst);
- return AnalyzeImplicitConversions(S, E, CC, IsListInit);
+ WorkList.push_back({E, CC, IsListInit});
+ return;
}
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
@@ -12217,7 +12122,7 @@ static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
// Ignore checking string literals that are in logical and operators.
// This is a common pattern for asserts.
continue;
- AnalyzeImplicitConversions(S, ChildExpr, CC, IsListInit);
+ WorkList.push_back({ChildExpr, CC, IsListInit});
}
if (BO && BO->isLogicalOp()) {
@@ -12241,6 +12146,17 @@ static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
}
}
+/// AnalyzeImplicitConversions - Find and report any interesting
+/// implicit conversions in the given expression. There are a couple
+/// of competing diagnostics here, -Wconversion and -Wsign-compare.
+static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
+ bool IsListInit/*= false*/) {
+ llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList;
+ WorkList.push_back({OrigE, CC, IsListInit});
+ while (!WorkList.empty())
+ AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList);
+}
+
/// Diagnose integer type and any valid implicit conversion to it.
static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) {
// Taking into account implicit conversions,
@@ -13182,6 +13098,11 @@ public:
}
void VisitCallExpr(const CallExpr *CE) {
+ // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions.
+
+ if (CE->isUnevaluatedBuiltinCall(Context))
+ return;
+
// C++11 [intro.execution]p15:
// When calling a function [...], every value computation and side effect
// associated with any argument expression, or with the postfix expression
@@ -13189,10 +13110,165 @@ public:
// expression or statement in the body of the function [and thus before
// the value computation of its result].
SequencedSubexpression Sequenced(*this);
- SemaRef.runWithSufficientStackSpace(CE->getExprLoc(),
- [&] { Base::VisitCallExpr(CE); });
+ SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] {
+ // C++17 [expr.call]p5
+ // The postfix-expression is sequenced before each expression in the
+ // expression-list and any default argument. [...]
+ SequenceTree::Seq CalleeRegion;
+ SequenceTree::Seq OtherRegion;
+ if (SemaRef.getLangOpts().CPlusPlus17) {
+ CalleeRegion = Tree.allocate(Region);
+ OtherRegion = Tree.allocate(Region);
+ } else {
+ CalleeRegion = Region;
+ OtherRegion = Region;
+ }
+ SequenceTree::Seq OldRegion = Region;
- // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions.
+ // Visit the callee expression first.
+ Region = CalleeRegion;
+ if (SemaRef.getLangOpts().CPlusPlus17) {
+ SequencedSubexpression Sequenced(*this);
+ Visit(CE->getCallee());
+ } else {
+ Visit(CE->getCallee());
+ }
+
+ // Then visit the argument expressions.
+ Region = OtherRegion;
+ for (const Expr *Argument : CE->arguments())
+ Visit(Argument);
+
+ Region = OldRegion;
+ if (SemaRef.getLangOpts().CPlusPlus17) {
+ Tree.merge(CalleeRegion);
+ Tree.merge(OtherRegion);
+ }
+ });
+ }
+
+ void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) {
+ // C++17 [over.match.oper]p2:
+ // [...] the operator notation is first transformed to the equivalent
+ // function-call notation as summarized in Table 12 (where @ denotes one
+ // of the operators covered in the specified subclause). However, the
+ // operands are sequenced in the order prescribed for the built-in
+ // operator (Clause 8).
+ //
+ // From the above only overloaded binary operators and overloaded call
+ // operators have sequencing rules in C++17 that we need to handle
+ // separately.
+ if (!SemaRef.getLangOpts().CPlusPlus17 ||
+ (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call))
+ return VisitCallExpr(CXXOCE);
+
+ enum {
+ NoSequencing,
+ LHSBeforeRHS,
+ RHSBeforeLHS,
+ LHSBeforeRest
+ } SequencingKind;
+ switch (CXXOCE->getOperator()) {
+ case OO_Equal:
+ case OO_PlusEqual:
+ case OO_MinusEqual:
+ case OO_StarEqual:
+ case OO_SlashEqual:
+ case OO_PercentEqual:
+ case OO_CaretEqual:
+ case OO_AmpEqual:
+ case OO_PipeEqual:
+ case OO_LessLessEqual:
+ case OO_GreaterGreaterEqual:
+ SequencingKind = RHSBeforeLHS;
+ break;
+
+ case OO_LessLess:
+ case OO_GreaterGreater:
+ case OO_AmpAmp:
+ case OO_PipePipe:
+ case OO_Comma:
+ case OO_ArrowStar:
+ case OO_Subscript:
+ SequencingKind = LHSBeforeRHS;
+ break;
+
+ case OO_Call:
+ SequencingKind = LHSBeforeRest;
+ break;
+
+ default:
+ SequencingKind = NoSequencing;
+ break;
+ }
+
+ if (SequencingKind == NoSequencing)
+ return VisitCallExpr(CXXOCE);
+
+ // This is a call, so all subexpressions are sequenced before the result.
+ SequencedSubexpression Sequenced(*this);
+
+ SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] {
+ assert(SemaRef.getLangOpts().CPlusPlus17 &&
+ "Should only get there with C++17 and above!");
+ assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) &&
+ "Should only get there with an overloaded binary operator"
+ " or an overloaded call operator!");
+
+ if (SequencingKind == LHSBeforeRest) {
+ assert(CXXOCE->getOperator() == OO_Call &&
+ "We should only have an overloaded call operator here!");
+
+ // This is very similar to VisitCallExpr, except that we only have the
+ // C++17 case. The postfix-expression is the first argument of the
+ // CXXOperatorCallExpr. The expressions in the expression-list, if any,
+ // are in the following arguments.
+ //
+ // Note that we intentionally do not visit the callee expression since
+ // it is just a decayed reference to a function.
+ SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region);
+ SequenceTree::Seq ArgsRegion = Tree.allocate(Region);
+ SequenceTree::Seq OldRegion = Region;
+
+ assert(CXXOCE->getNumArgs() >= 1 &&
+ "An overloaded call operator must have at least one argument"
+ " for the postfix-expression!");
+ const Expr *PostfixExpr = CXXOCE->getArgs()[0];
+ llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1,
+ CXXOCE->getNumArgs() - 1);
+
+ // Visit the postfix-expression first.
+ {
+ Region = PostfixExprRegion;
+ SequencedSubexpression Sequenced(*this);
+ Visit(PostfixExpr);
+ }
+
+ // Then visit the argument expressions.
+ Region = ArgsRegion;
+ for (const Expr *Arg : Args)
+ Visit(Arg);
+
+ Region = OldRegion;
+ Tree.merge(PostfixExprRegion);
+ Tree.merge(ArgsRegion);
+ } else {
+ assert(CXXOCE->getNumArgs() == 2 &&
+ "Should only have two arguments here!");
+ assert((SequencingKind == LHSBeforeRHS ||
+ SequencingKind == RHSBeforeLHS) &&
+ "Unexpected sequencing kind!");
+
+ // We do not visit the callee expression since it is just a decayed
+ // reference to a function.
+ const Expr *E1 = CXXOCE->getArg(0);
+ const Expr *E2 = CXXOCE->getArg(1);
+ if (SequencingKind == RHSBeforeLHS)
+ std::swap(E1, E2);
+
+ return VisitSequencedExpressions(E1, E2);
+ }
+ });
}
void VisitCXXConstructExpr(const CXXConstructExpr *CCE) {
@@ -13323,11 +13399,12 @@ bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
// C99 6.9.1p5: If the declarator includes a parameter type list, the
// declaration of each parameter shall include an identifier.
- if (CheckParameterNames &&
- Param->getIdentifier() == nullptr &&
- !Param->isImplicit() &&
- !getLangOpts().CPlusPlus)
- Diag(Param->getLocation(), diag::err_parameter_name_omitted);
+ if (CheckParameterNames && Param->getIdentifier() == nullptr &&
+ !Param->isImplicit() && !getLangOpts().CPlusPlus) {
+ // Diagnose this as an extension in C17 and earlier.
+ if (!getLangOpts().C2x)
+ Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x);
+ }
// C99 6.7.5.3p12:
// If the function declarator is not part of a definition of that
@@ -13380,17 +13457,233 @@ bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
return HasInvalidParm;
}
-/// A helper function to get the alignment of a Decl referred to by DeclRefExpr
-/// or MemberExpr.
-static CharUnits getDeclAlign(Expr *E, CharUnits TypeAlign,
- ASTContext &Context) {
- if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
- return Context.getDeclAlign(DRE->getDecl());
+Optional<std::pair<CharUnits, CharUnits>>
+static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx);
+
+/// Compute the alignment and offset of the base class object given the
+/// derived-to-base cast expression and the alignment and offset of the derived
+/// class object.
+static std::pair<CharUnits, CharUnits>
+getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType,
+ CharUnits BaseAlignment, CharUnits Offset,
+ ASTContext &Ctx) {
+ for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE;
+ ++PathI) {
+ const CXXBaseSpecifier *Base = *PathI;
+ const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl();
+ if (Base->isVirtual()) {
+ // The complete object may have a lower alignment than the non-virtual
+ // alignment of the base, in which case the base may be misaligned. Choose
+ // the smaller of the non-virtual alignment and BaseAlignment, which is a
+ // conservative lower bound of the complete object alignment.
+ CharUnits NonVirtualAlignment =
+ Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment();
+ BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment);
+ Offset = CharUnits::Zero();
+ } else {
+ const ASTRecordLayout &RL =
+ Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl());
+ Offset += RL.getBaseClassOffset(BaseDecl);
+ }
+ DerivedType = Base->getType();
+ }
+
+ return std::make_pair(BaseAlignment, Offset);
+}
- if (const auto *ME = dyn_cast<MemberExpr>(E))
- return Context.getDeclAlign(ME->getMemberDecl());
+/// Compute the alignment and offset of a binary additive operator.
+static Optional<std::pair<CharUnits, CharUnits>>
+getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE,
+ bool IsSub, ASTContext &Ctx) {
+ QualType PointeeType = PtrE->getType()->getPointeeType();
- return TypeAlign;
+ if (!PointeeType->isConstantSizeType())
+ return llvm::None;
+
+ auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx);
+
+ if (!P)
+ return llvm::None;
+
+ llvm::APSInt IdxRes;
+ CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType);
+ if (IntE->isIntegerConstantExpr(IdxRes, Ctx)) {
+ CharUnits Offset = EltSize * IdxRes.getExtValue();
+ if (IsSub)
+ Offset = -Offset;
+ return std::make_pair(P->first, P->second + Offset);
+ }
+
+ // If the integer expression isn't a constant expression, compute the lower
+ // bound of the alignment using the alignment and offset of the pointer
+ // expression and the element size.
+ return std::make_pair(
+ P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize),
+ CharUnits::Zero());
+}
+
+/// This helper function takes an lvalue expression and returns the alignment of
+/// a VarDecl and a constant offset from the VarDecl.
+Optional<std::pair<CharUnits, CharUnits>>
+static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) {
+ E = E->IgnoreParens();
+ switch (E->getStmtClass()) {
+ default:
+ break;
+ case Stmt::CStyleCastExprClass:
+ case Stmt::CXXStaticCastExprClass:
+ case Stmt::ImplicitCastExprClass: {
+ auto *CE = cast<CastExpr>(E);
+ const Expr *From = CE->getSubExpr();
+ switch (CE->getCastKind()) {
+ default:
+ break;
+ case CK_NoOp:
+ return getBaseAlignmentAndOffsetFromLValue(From, Ctx);
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
+ auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx);
+ if (!P)
+ break;
+ return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first,
+ P->second, Ctx);
+ }
+ }
+ break;
+ }
+ case Stmt::ArraySubscriptExprClass: {
+ auto *ASE = cast<ArraySubscriptExpr>(E);
+ return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(),
+ false, Ctx);
+ }
+ case Stmt::DeclRefExprClass: {
+ if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) {
+ // FIXME: If VD is captured by copy or is an escaping __block variable,
+ // use the alignment of VD's type.
+ if (!VD->getType()->isReferenceType())
+ return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero());
+ if (VD->hasInit())
+ return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx);
+ }
+ break;
+ }
+ case Stmt::MemberExprClass: {
+ auto *ME = cast<MemberExpr>(E);
+ auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
+ if (!FD || FD->getType()->isReferenceType())
+ break;
+ Optional<std::pair<CharUnits, CharUnits>> P;
+ if (ME->isArrow())
+ P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx);
+ else
+ P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx);
+ if (!P)
+ break;
+ const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent());
+ uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex());
+ return std::make_pair(P->first,
+ P->second + CharUnits::fromQuantity(Offset));
+ }
+ case Stmt::UnaryOperatorClass: {
+ auto *UO = cast<UnaryOperator>(E);
+ switch (UO->getOpcode()) {
+ default:
+ break;
+ case UO_Deref:
+ return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx);
+ }
+ break;
+ }
+ case Stmt::BinaryOperatorClass: {
+ auto *BO = cast<BinaryOperator>(E);
+ auto Opcode = BO->getOpcode();
+ switch (Opcode) {
+ default:
+ break;
+ case BO_Comma:
+ return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx);
+ }
+ break;
+ }
+ }
+ return llvm::None;
+}
+
+/// This helper function takes a pointer expression and returns the alignment of
+/// a VarDecl and a constant offset from the VarDecl.
+Optional<std::pair<CharUnits, CharUnits>>
+static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) {
+ E = E->IgnoreParens();
+ switch (E->getStmtClass()) {
+ default:
+ break;
+ case Stmt::CStyleCastExprClass:
+ case Stmt::CXXStaticCastExprClass:
+ case Stmt::ImplicitCastExprClass: {
+ auto *CE = cast<CastExpr>(E);
+ const Expr *From = CE->getSubExpr();
+ switch (CE->getCastKind()) {
+ default:
+ break;
+ case CK_NoOp:
+ return getBaseAlignmentAndOffsetFromPtr(From, Ctx);
+ case CK_ArrayToPointerDecay:
+ return getBaseAlignmentAndOffsetFromLValue(From, Ctx);
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
+ auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx);
+ if (!P)
+ break;
+ return getDerivedToBaseAlignmentAndOffset(
+ CE, From->getType()->getPointeeType(), P->first, P->second, Ctx);
+ }
+ }
+ break;
+ }
+ case Stmt::CXXThisExprClass: {
+ auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl();
+ CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment();
+ return std::make_pair(Alignment, CharUnits::Zero());
+ }
+ case Stmt::UnaryOperatorClass: {
+ auto *UO = cast<UnaryOperator>(E);
+ if (UO->getOpcode() == UO_AddrOf)
+ return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx);
+ break;
+ }
+ case Stmt::BinaryOperatorClass: {
+ auto *BO = cast<BinaryOperator>(E);
+ auto Opcode = BO->getOpcode();
+ switch (Opcode) {
+ default:
+ break;
+ case BO_Add:
+ case BO_Sub: {
+ const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS();
+ if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType())
+ std::swap(LHS, RHS);
+ return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub,
+ Ctx);
+ }
+ case BO_Comma:
+ return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx);
+ }
+ break;
+ }
+ }
+ return llvm::None;
+}
+
+static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) {
+ // See if we can compute the alignment of a VarDecl and an offset from it.
+ Optional<std::pair<CharUnits, CharUnits>> P =
+ getBaseAlignmentAndOffsetFromPtr(E, S.Context);
+
+ if (P)
+ return P->first.alignmentAtOffset(P->second);
+
+ // If that failed, return the type's alignment.
+ return S.Context.getTypeAlignInChars(E->getType()->getPointeeType());
}
/// CheckCastAlign - Implements -Wcast-align, which warns when a
@@ -13420,21 +13713,13 @@ void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
if (!SrcPtr) return;
QualType SrcPointee = SrcPtr->getPointeeType();
- // Whitelist casts from cv void*. We already implicitly
- // whitelisted casts to cv void*, since they have alignment 1.
- // Also whitelist casts involving incomplete types, which implicitly
+ // Explicitly allow casts from cv void*. We already implicitly
+ // allowed casts to cv void*, since they have alignment 1.
+ // Also allow casts involving incomplete types, which implicitly
// includes 'void'.
if (SrcPointee->isIncompleteType()) return;
- CharUnits SrcAlign = Context.getTypeAlignInChars(SrcPointee);
-
- if (auto *CE = dyn_cast<CastExpr>(Op)) {
- if (CE->getCastKind() == CK_ArrayToPointerDecay)
- SrcAlign = getDeclAlign(CE->getSubExpr(), SrcAlign, Context);
- } else if (auto *UO = dyn_cast<UnaryOperator>(Op)) {
- if (UO->getOpcode() == UO_AddrOf)
- SrcAlign = getDeclAlign(UO->getSubExpr(), SrcAlign, Context);
- }
+ CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this);
if (SrcAlign >= DestAlign) return;
@@ -13916,7 +14201,7 @@ static bool isSetterLikeSelector(Selector sel) {
if (str.startswith("set"))
str = str.substr(3);
else if (str.startswith("add")) {
- // Specially whitelist 'addOperationWithBlock:'.
+ // Specially allow 'addOperationWithBlock:'.
if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock"))
return false;
str = str.substr(3);
@@ -14242,12 +14527,12 @@ void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
return;
unsigned Attributes = PD->getPropertyAttributes();
- if (Attributes & ObjCPropertyDecl::OBJC_PR_assign) {
+ if (Attributes & ObjCPropertyAttribute::kind_assign) {
// when 'assign' attribute was not explicitly specified
// by user, ignore it and rely on property type itself
// for lifetime info.
unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten();
- if (!(AsWrittenAttr & ObjCPropertyDecl::OBJC_PR_assign) &&
+ if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) &&
LHSType->isObjCRetainableType())
return;
@@ -14259,8 +14544,7 @@ void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
}
RHS = cast->getSubExpr();
}
- }
- else if (Attributes & ObjCPropertyDecl::OBJC_PR_weak) {
+ } else if (Attributes & ObjCPropertyAttribute::kind_weak) {
if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true))
return;
}
@@ -15045,3 +15329,259 @@ void Sema::CheckAddressOfPackedMember(Expr *rhs) {
rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1,
_2, _3, _4));
}
+
+ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall,
+ ExprResult CallResult) {
+ if (checkArgCount(*this, TheCall, 1))
+ return ExprError();
+
+ ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0));
+ if (MatrixArg.isInvalid())
+ return MatrixArg;
+ Expr *Matrix = MatrixArg.get();
+
+ auto *MType = Matrix->getType()->getAs<ConstantMatrixType>();
+ if (!MType) {
+ Diag(Matrix->getBeginLoc(), diag::err_builtin_matrix_arg);
+ return ExprError();
+ }
+
+ // Create returned matrix type by swapping rows and columns of the argument
+ // matrix type.
+ QualType ResultType = Context.getConstantMatrixType(
+ MType->getElementType(), MType->getNumColumns(), MType->getNumRows());
+
+ // Change the return type to the type of the returned matrix.
+ TheCall->setType(ResultType);
+
+ // Update call argument to use the possibly converted matrix argument.
+ TheCall->setArg(0, Matrix);
+ return CallResult;
+}
+
+// Get and verify the matrix dimensions.
+static llvm::Optional<unsigned>
+getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) {
+ llvm::APSInt Value(64);
+ SourceLocation ErrorPos;
+ if (!Expr->isIntegerConstantExpr(Value, S.Context, &ErrorPos)) {
+ S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg)
+ << Name;
+ return {};
+ }
+ uint64_t Dim = Value.getZExtValue();
+ if (!ConstantMatrixType::isDimensionValid(Dim)) {
+ S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension)
+ << Name << ConstantMatrixType::getMaxElementsPerDimension();
+ return {};
+ }
+ return Dim;
+}
+
+ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
+ ExprResult CallResult) {
+ if (!getLangOpts().MatrixTypes) {
+ Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled);
+ return ExprError();
+ }
+
+ if (checkArgCount(*this, TheCall, 4))
+ return ExprError();
+
+ unsigned PtrArgIdx = 0;
+ Expr *PtrExpr = TheCall->getArg(PtrArgIdx);
+ Expr *RowsExpr = TheCall->getArg(1);
+ Expr *ColumnsExpr = TheCall->getArg(2);
+ Expr *StrideExpr = TheCall->getArg(3);
+
+ bool ArgError = false;
+
+ // Check pointer argument.
+ {
+ ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr);
+ if (PtrConv.isInvalid())
+ return PtrConv;
+ PtrExpr = PtrConv.get();
+ TheCall->setArg(0, PtrExpr);
+ if (PtrExpr->isTypeDependent()) {
+ TheCall->setType(Context.DependentTy);
+ return TheCall;
+ }
+ }
+
+ auto *PtrTy = PtrExpr->getType()->getAs<PointerType>();
+ QualType ElementTy;
+ if (!PtrTy) {
+ Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg)
+ << PtrArgIdx + 1;
+ ArgError = true;
+ } else {
+ ElementTy = PtrTy->getPointeeType().getUnqualifiedType();
+
+ if (!ConstantMatrixType::isValidElementType(ElementTy)) {
+ Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg)
+ << PtrArgIdx + 1;
+ ArgError = true;
+ }
+ }
+
+ // Apply default Lvalue conversions and convert the expression to size_t.
+ auto ApplyArgumentConversions = [this](Expr *E) {
+ ExprResult Conv = DefaultLvalueConversion(E);
+ if (Conv.isInvalid())
+ return Conv;
+
+ return tryConvertExprToType(Conv.get(), Context.getSizeType());
+ };
+
+ // Apply conversion to row and column expressions.
+ ExprResult RowsConv = ApplyArgumentConversions(RowsExpr);
+ if (!RowsConv.isInvalid()) {
+ RowsExpr = RowsConv.get();
+ TheCall->setArg(1, RowsExpr);
+ } else
+ RowsExpr = nullptr;
+
+ ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr);
+ if (!ColumnsConv.isInvalid()) {
+ ColumnsExpr = ColumnsConv.get();
+ TheCall->setArg(2, ColumnsExpr);
+ } else
+ ColumnsExpr = nullptr;
+
+ // If any any part of the result matrix type is still pending, just use
+ // Context.DependentTy, until all parts are resolved.
+ if ((RowsExpr && RowsExpr->isTypeDependent()) ||
+ (ColumnsExpr && ColumnsExpr->isTypeDependent())) {
+ TheCall->setType(Context.DependentTy);
+ return CallResult;
+ }
+
+ // Check row and column dimenions.
+ llvm::Optional<unsigned> MaybeRows;
+ if (RowsExpr)
+ MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this);
+
+ llvm::Optional<unsigned> MaybeColumns;
+ if (ColumnsExpr)
+ MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this);
+
+ // Check stride argument.
+ ExprResult StrideConv = ApplyArgumentConversions(StrideExpr);
+ if (StrideConv.isInvalid())
+ return ExprError();
+ StrideExpr = StrideConv.get();
+ TheCall->setArg(3, StrideExpr);
+
+ llvm::APSInt Value(64);
+ if (MaybeRows && StrideExpr->isIntegerConstantExpr(Value, Context)) {
+ uint64_t Stride = Value.getZExtValue();
+ if (Stride < *MaybeRows) {
+ Diag(StrideExpr->getBeginLoc(),
+ diag::err_builtin_matrix_stride_too_small);
+ ArgError = true;
+ }
+ }
+
+ if (ArgError || !MaybeRows || !MaybeColumns)
+ return ExprError();
+
+ TheCall->setType(
+ Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns));
+ return CallResult;
+}
+
+ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
+ ExprResult CallResult) {
+ if (checkArgCount(*this, TheCall, 3))
+ return ExprError();
+
+ unsigned PtrArgIdx = 1;
+ Expr *MatrixExpr = TheCall->getArg(0);
+ Expr *PtrExpr = TheCall->getArg(PtrArgIdx);
+ Expr *StrideExpr = TheCall->getArg(2);
+
+ bool ArgError = false;
+
+ {
+ ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr);
+ if (MatrixConv.isInvalid())
+ return MatrixConv;
+ MatrixExpr = MatrixConv.get();
+ TheCall->setArg(0, MatrixExpr);
+ }
+ if (MatrixExpr->isTypeDependent()) {
+ TheCall->setType(Context.DependentTy);
+ return TheCall;
+ }
+
+ auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>();
+ if (!MatrixTy) {
+ Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_matrix_arg) << 0;
+ ArgError = true;
+ }
+
+ {
+ ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr);
+ if (PtrConv.isInvalid())
+ return PtrConv;
+ PtrExpr = PtrConv.get();
+ TheCall->setArg(1, PtrExpr);
+ if (PtrExpr->isTypeDependent()) {
+ TheCall->setType(Context.DependentTy);
+ return TheCall;
+ }
+ }
+
+ // Check pointer argument.
+ auto *PtrTy = PtrExpr->getType()->getAs<PointerType>();
+ if (!PtrTy) {
+ Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg)
+ << PtrArgIdx + 1;
+ ArgError = true;
+ } else {
+ QualType ElementTy = PtrTy->getPointeeType();
+ if (ElementTy.isConstQualified()) {
+ Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const);
+ ArgError = true;
+ }
+ ElementTy = ElementTy.getUnqualifiedType().getCanonicalType();
+ if (MatrixTy &&
+ !Context.hasSameType(ElementTy, MatrixTy->getElementType())) {
+ Diag(PtrExpr->getBeginLoc(),
+ diag::err_builtin_matrix_pointer_arg_mismatch)
+ << ElementTy << MatrixTy->getElementType();
+ ArgError = true;
+ }
+ }
+
+ // Apply default Lvalue conversions and convert the stride expression to
+ // size_t.
+ {
+ ExprResult StrideConv = DefaultLvalueConversion(StrideExpr);
+ if (StrideConv.isInvalid())
+ return StrideConv;
+
+ StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType());
+ if (StrideConv.isInvalid())
+ return StrideConv;
+ StrideExpr = StrideConv.get();
+ TheCall->setArg(2, StrideExpr);
+ }
+
+ // Check stride argument.
+ llvm::APSInt Value(64);
+ if (MatrixTy && StrideExpr->isIntegerConstantExpr(Value, Context)) {
+ uint64_t Stride = Value.getZExtValue();
+ if (Stride < MatrixTy->getNumRows()) {
+ Diag(StrideExpr->getBeginLoc(),
+ diag::err_builtin_matrix_stride_too_small);
+ ArgError = true;
+ }
+ }
+
+ if (ArgError)
+ return ExprError();
+
+ return CallResult;
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
index 7260977c634d..0a8a27068ebf 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
@@ -9,25 +9,36 @@
// This file defines the code-completion semantic actions.
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/QualTypeNames.h"
+#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Type.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/CodeCompleteConsumer.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Designator.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Overload.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaInternal.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -36,7 +47,9 @@
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
#include <list>
#include <map>
#include <string>
@@ -1676,11 +1689,9 @@ static void AddTypeSpecifierResults(const LangOptions &LangOpts,
Results.AddResult(Result("class", CCP_Type));
Results.AddResult(Result("wchar_t", CCP_Type));
- // typename qualified-id
+ // typename name
Builder.AddTypedTextChunk("typename");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Builder.AddPlaceholderChunk("qualifier");
- Builder.AddTextChunk("::");
Builder.AddPlaceholderChunk("name");
Results.AddResult(Result(Builder.TakeString()));
@@ -1807,6 +1818,18 @@ static void AddTypedefResult(ResultBuilder &Results) {
Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
+// using name = type
+static void AddUsingAliasResult(CodeCompletionBuilder &Builder,
+ ResultBuilder &Results) {
+ Builder.AddTypedTextChunk("using");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("name");
+ Builder.AddChunk(CodeCompletionString::CK_Equal);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ Results.AddResult(CodeCompletionResult(Builder.TakeString()));
+}
+
static bool WantTypesInContext(Sema::ParserCompletionContext CCC,
const LangOptions &LangOpts) {
switch (CCC) {
@@ -2050,6 +2073,9 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
Builder.AddChunk(CodeCompletionString::CK_SemiColon);
Results.AddResult(Result(Builder.TakeString()));
+ if (SemaRef.getLangOpts().CPlusPlus11)
+ AddUsingAliasResult(Builder, Results);
+
// using typename qualifier::name (only in a dependent context)
if (SemaRef.CurContext->isDependentContext()) {
Builder.AddTypedTextChunk("using typename");
@@ -2130,6 +2156,9 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
case Sema::PCC_RecoveryInFunction:
case Sema::PCC_Statement: {
+ if (SemaRef.getLangOpts().CPlusPlus11)
+ AddUsingAliasResult(Builder, Results);
+
AddTypedefResult(Results);
if (SemaRef.getLangOpts().CPlusPlus && Results.includeCodePatterns() &&
@@ -2748,7 +2777,7 @@ FormatFunctionParameter(const PrintingPolicy &Policy, const ParmVarDecl *Param,
std::string Result;
if (Param->getIdentifier() && !ObjCMethodParam && !SuppressName)
- Result = Param->getIdentifier()->getName();
+ Result = std::string(Param->getIdentifier()->getName());
QualType Type = Param->getType();
if (ObjCSubsts)
@@ -2787,7 +2816,7 @@ FormatFunctionParameter(const PrintingPolicy &Policy, const ParmVarDecl *Param,
// for the block; just use the parameter type as a placeholder.
std::string Result;
if (!ObjCMethodParam && Param->getIdentifier())
- Result = Param->getIdentifier()->getName();
+ Result = std::string(Param->getIdentifier()->getName());
QualType Type = Param->getType().getUnqualifiedType();
@@ -3002,7 +3031,7 @@ static void AddTemplateParameterChunks(
} else if (NonTypeTemplateParmDecl *NTTP =
dyn_cast<NonTypeTemplateParmDecl>(*P)) {
if (NTTP->getIdentifier())
- PlaceholderStr = NTTP->getIdentifier()->getName();
+ PlaceholderStr = std::string(NTTP->getIdentifier()->getName());
NTTP->getType().getAsStringInternal(PlaceholderStr, Policy);
HasDefaultArg = NTTP->hasDefaultArgument();
} else {
@@ -3705,8 +3734,11 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
Result.addBriefComment(RC->getBriefText(S.getASTContext()));
}
AddResultTypeChunk(S.Context, Policy, FDecl, QualType(), Result);
- Result.AddTextChunk(
- Result.getAllocator().CopyString(FDecl->getNameAsString()));
+
+ std::string Name;
+ llvm::raw_string_ostream OS(Name);
+ FDecl->getDeclName().print(OS, Policy);
+ Result.AddTextChunk(Result.getAllocator().CopyString(OS.str()));
} else {
Result.AddResultTypeChunk(Result.getAllocator().CopyString(
Proto->getReturnType().getAsString(Policy)));
@@ -4329,7 +4361,7 @@ static void AddLambdaCompletion(ResultBuilder &Results,
First = false;
constexpr llvm::StringLiteral NamePlaceholder = "!#!NAME_GOES_HERE!#!";
- std::string Type = NamePlaceholder;
+ std::string Type = std::string(NamePlaceholder);
Parameter.getAsStringInternal(Type, PrintingPolicy(LangOpts));
llvm::StringRef Prefix, Suffix;
std::tie(Prefix, Suffix) = llvm::StringRef(Type).split(NamePlaceholder);
@@ -4719,6 +4751,386 @@ static void AddRecordMembersCompletionResults(
}
}
+// Returns the RecordDecl inside the BaseType, falling back to primary template
+// in case of specializations. Since we might not have a decl for the
+// instantiation/specialization yet, e.g. dependent code.
+static RecordDecl *getAsRecordDecl(const QualType BaseType) {
+ if (auto *RD = BaseType->getAsRecordDecl())
+ return RD;
+
+ if (const auto *TST = BaseType->getAs<TemplateSpecializationType>()) {
+ if (const auto *TD = dyn_cast_or_null<ClassTemplateDecl>(
+ TST->getTemplateName().getAsTemplateDecl())) {
+ return TD->getTemplatedDecl();
+ }
+ }
+
+ return nullptr;
+}
+
+namespace {
+// Collects completion-relevant information about a concept-constrainted type T.
+// In particular, examines the constraint expressions to find members of T.
+//
+// The design is very simple: we walk down each constraint looking for
+// expressions of the form T.foo().
+// If we're extra lucky, the return type is specified.
+// We don't do any clever handling of && or || in constraint expressions, we
+// take members from both branches.
+//
+// For example, given:
+// template <class T> concept X = requires (T t, string& s) { t.print(s); };
+// template <X U> void foo(U u) { u.^ }
+// We want to suggest the inferred member function 'print(string)'.
+// We see that u has type U, so X<U> holds.
+// X<U> requires t.print(s) to be valid, where t has type U (substituted for T).
+// By looking at the CallExpr we find the signature of print().
+//
+// While we tend to know in advance which kind of members (access via . -> ::)
+// we want, it's simpler just to gather them all and post-filter.
+//
+// FIXME: some of this machinery could be used for non-concept type-parms too,
+// enabling completion for type parameters based on other uses of that param.
+//
+// FIXME: there are other cases where a type can be constrained by a concept,
+// e.g. inside `if constexpr(ConceptSpecializationExpr) { ... }`
+class ConceptInfo {
+public:
+ // Describes a likely member of a type, inferred by concept constraints.
+ // Offered as a code completion for T. T-> and T:: contexts.
+ struct Member {
+ // Always non-null: we only handle members with ordinary identifier names.
+ const IdentifierInfo *Name = nullptr;
+ // Set for functions we've seen called.
+ // We don't have the declared parameter types, only the actual types of
+ // arguments we've seen. These are still valuable, as it's hard to render
+ // a useful function completion with neither parameter types nor names!
+ llvm::Optional<SmallVector<QualType, 1>> ArgTypes;
+ // Whether this is accessed as T.member, T->member, or T::member.
+ enum AccessOperator {
+ Colons,
+ Arrow,
+ Dot,
+ } Operator = Dot;
+ // What's known about the type of a variable or return type of a function.
+ const TypeConstraint *ResultType = nullptr;
+ // FIXME: also track:
+ // - kind of entity (function/variable/type), to expose structured results
+ // - template args kinds/types, as a proxy for template params
+
+ // For now we simply return these results as "pattern" strings.
+ CodeCompletionString *render(Sema &S, CodeCompletionAllocator &Alloc,
+ CodeCompletionTUInfo &Info) const {
+ CodeCompletionBuilder B(Alloc, Info);
+ // Result type
+ if (ResultType) {
+ std::string AsString;
+ {
+ llvm::raw_string_ostream OS(AsString);
+ QualType ExactType = deduceType(*ResultType);
+ if (!ExactType.isNull())
+ ExactType.print(OS, getCompletionPrintingPolicy(S));
+ else
+ ResultType->print(OS, getCompletionPrintingPolicy(S));
+ }
+ B.AddResultTypeChunk(Alloc.CopyString(AsString));
+ }
+ // Member name
+ B.AddTypedTextChunk(Alloc.CopyString(Name->getName()));
+ // Function argument list
+ if (ArgTypes) {
+ B.AddChunk(clang::CodeCompletionString::CK_LeftParen);
+ bool First = true;
+ for (QualType Arg : *ArgTypes) {
+ if (First)
+ First = false;
+ else {
+ B.AddChunk(clang::CodeCompletionString::CK_Comma);
+ B.AddChunk(clang::CodeCompletionString::CK_HorizontalSpace);
+ }
+ B.AddPlaceholderChunk(Alloc.CopyString(
+ Arg.getAsString(getCompletionPrintingPolicy(S))));
+ }
+ B.AddChunk(clang::CodeCompletionString::CK_RightParen);
+ }
+ return B.TakeString();
+ }
+ };
+
+ // BaseType is the type parameter T to infer members from.
+ // T must be accessible within S, as we use it to find the template entity
+ // that T is attached to in order to gather the relevant constraints.
+ ConceptInfo(const TemplateTypeParmType &BaseType, Scope *S) {
+ auto *TemplatedEntity = getTemplatedEntity(BaseType.getDecl(), S);
+ for (const Expr *E : constraintsForTemplatedEntity(TemplatedEntity))
+ believe(E, &BaseType);
+ }
+
+ std::vector<Member> members() {
+ std::vector<Member> Results;
+ for (const auto &E : this->Results)
+ Results.push_back(E.second);
+ llvm::sort(Results, [](const Member &L, const Member &R) {
+ return L.Name->getName() < R.Name->getName();
+ });
+ return Results;
+ }
+
+private:
+ // Infer members of T, given that the expression E (dependent on T) is true.
+ void believe(const Expr *E, const TemplateTypeParmType *T) {
+ if (!E || !T)
+ return;
+ if (auto *CSE = dyn_cast<ConceptSpecializationExpr>(E)) {
+ // If the concept is
+ // template <class A, class B> concept CD = f<A, B>();
+ // And the concept specialization is
+ // CD<int, T>
+ // Then we're substituting T for B, so we want to make f<A, B>() true
+ // by adding members to B - i.e. believe(f<A, B>(), B);
+ //
+ // For simplicity:
+ // - we don't attempt to substitute int for A
+ // - when T is used in other ways (like CD<T*>) we ignore it
+ ConceptDecl *CD = CSE->getNamedConcept();
+ TemplateParameterList *Params = CD->getTemplateParameters();
+ unsigned Index = 0;
+ for (const auto &Arg : CSE->getTemplateArguments()) {
+ if (Index >= Params->size())
+ break; // Won't happen in valid code.
+ if (isApprox(Arg, T)) {
+ auto *TTPD = dyn_cast<TemplateTypeParmDecl>(Params->getParam(Index));
+ if (!TTPD)
+ continue;
+ // T was used as an argument, and bound to the parameter TT.
+ auto *TT = cast<TemplateTypeParmType>(TTPD->getTypeForDecl());
+ // So now we know the constraint as a function of TT is true.
+ believe(CD->getConstraintExpr(), TT);
+ // (concepts themselves have no associated constraints to require)
+ }
+
+ ++Index;
+ }
+ } else if (auto *BO = dyn_cast<BinaryOperator>(E)) {
+ // For A && B, we can infer members from both branches.
+ // For A || B, the union is still more useful than the intersection.
+ if (BO->getOpcode() == BO_LAnd || BO->getOpcode() == BO_LOr) {
+ believe(BO->getLHS(), T);
+ believe(BO->getRHS(), T);
+ }
+ } else if (auto *RE = dyn_cast<RequiresExpr>(E)) {
+ // A requires(){...} lets us infer members from each requirement.
+ for (const concepts::Requirement *Req : RE->getRequirements()) {
+ if (!Req->isDependent())
+ continue; // Can't tell us anything about T.
+ // Now Req cannot a substitution-error: those aren't dependent.
+
+ if (auto *TR = dyn_cast<concepts::TypeRequirement>(Req)) {
+ // Do a full traversal so we get `foo` from `typename T::foo::bar`.
+ QualType AssertedType = TR->getType()->getType();
+ ValidVisitor(this, T).TraverseType(AssertedType);
+ } else if (auto *ER = dyn_cast<concepts::ExprRequirement>(Req)) {
+ ValidVisitor Visitor(this, T);
+ // If we have a type constraint on the value of the expression,
+ // AND the whole outer expression describes a member, then we'll
+ // be able to use the constraint to provide the return type.
+ if (ER->getReturnTypeRequirement().isTypeConstraint()) {
+ Visitor.OuterType =
+ ER->getReturnTypeRequirement().getTypeConstraint();
+ Visitor.OuterExpr = ER->getExpr();
+ }
+ Visitor.TraverseStmt(ER->getExpr());
+ } else if (auto *NR = dyn_cast<concepts::NestedRequirement>(Req)) {
+ believe(NR->getConstraintExpr(), T);
+ }
+ }
+ }
+ }
+
+ // This visitor infers members of T based on traversing expressions/types
+ // that involve T. It is invoked with code known to be valid for T.
+ class ValidVisitor : public RecursiveASTVisitor<ValidVisitor> {
+ ConceptInfo *Outer;
+ const TemplateTypeParmType *T;
+
+ CallExpr *Caller = nullptr;
+ Expr *Callee = nullptr;
+
+ public:
+ // If set, OuterExpr is constrained by OuterType.
+ Expr *OuterExpr = nullptr;
+ const TypeConstraint *OuterType = nullptr;
+
+ ValidVisitor(ConceptInfo *Outer, const TemplateTypeParmType *T)
+ : Outer(Outer), T(T) {
+ assert(T);
+ }
+
+ // In T.foo or T->foo, `foo` is a member function/variable.
+ bool VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E) {
+ const Type *Base = E->getBaseType().getTypePtr();
+ bool IsArrow = E->isArrow();
+ if (Base->isPointerType() && IsArrow) {
+ IsArrow = false;
+ Base = Base->getPointeeType().getTypePtr();
+ }
+ if (isApprox(Base, T))
+ addValue(E, E->getMember(), IsArrow ? Member::Arrow : Member::Dot);
+ return true;
+ }
+
+ // In T::foo, `foo` is a static member function/variable.
+ bool VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
+ if (E->getQualifier() && isApprox(E->getQualifier()->getAsType(), T))
+ addValue(E, E->getDeclName(), Member::Colons);
+ return true;
+ }
+
+ // In T::typename foo, `foo` is a type.
+ bool VisitDependentNameType(DependentNameType *DNT) {
+ const auto *Q = DNT->getQualifier();
+ if (Q && isApprox(Q->getAsType(), T))
+ addType(DNT->getIdentifier());
+ return true;
+ }
+
+ // In T::foo::bar, `foo` must be a type.
+ // VisitNNS() doesn't exist, and TraverseNNS isn't always called :-(
+ bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSL) {
+ if (NNSL) {
+ NestedNameSpecifier *NNS = NNSL.getNestedNameSpecifier();
+ const auto *Q = NNS->getPrefix();
+ if (Q && isApprox(Q->getAsType(), T))
+ addType(NNS->getAsIdentifier());
+ }
+ // FIXME: also handle T::foo<X>::bar
+ return RecursiveASTVisitor::TraverseNestedNameSpecifierLoc(NNSL);
+ }
+
+ // FIXME also handle T::foo<X>
+
+ // Track the innermost caller/callee relationship so we can tell if a
+ // nested expr is being called as a function.
+ bool VisitCallExpr(CallExpr *CE) {
+ Caller = CE;
+ Callee = CE->getCallee();
+ return true;
+ }
+
+ private:
+ void addResult(Member &&M) {
+ auto R = Outer->Results.try_emplace(M.Name);
+ Member &O = R.first->second;
+ // Overwrite existing if the new member has more info.
+ // The preference of . vs :: vs -> is fairly arbitrary.
+ if (/*Inserted*/ R.second ||
+ std::make_tuple(M.ArgTypes.hasValue(), M.ResultType != nullptr,
+ M.Operator) > std::make_tuple(O.ArgTypes.hasValue(),
+ O.ResultType != nullptr,
+ O.Operator))
+ O = std::move(M);
+ }
+
+ void addType(const IdentifierInfo *Name) {
+ if (!Name)
+ return;
+ Member M;
+ M.Name = Name;
+ M.Operator = Member::Colons;
+ addResult(std::move(M));
+ }
+
+ void addValue(Expr *E, DeclarationName Name,
+ Member::AccessOperator Operator) {
+ if (!Name.isIdentifier())
+ return;
+ Member Result;
+ Result.Name = Name.getAsIdentifierInfo();
+ Result.Operator = Operator;
+ // If this is the callee of an immediately-enclosing CallExpr, then
+ // treat it as a method, otherwise it's a variable.
+ if (Caller != nullptr && Callee == E) {
+ Result.ArgTypes.emplace();
+ for (const auto *Arg : Caller->arguments())
+ Result.ArgTypes->push_back(Arg->getType());
+ if (Caller == OuterExpr) {
+ Result.ResultType = OuterType;
+ }
+ } else {
+ if (E == OuterExpr)
+ Result.ResultType = OuterType;
+ }
+ addResult(std::move(Result));
+ }
+ };
+
+ static bool isApprox(const TemplateArgument &Arg, const Type *T) {
+ return Arg.getKind() == TemplateArgument::Type &&
+ isApprox(Arg.getAsType().getTypePtr(), T);
+ }
+
+ static bool isApprox(const Type *T1, const Type *T2) {
+ return T1 && T2 &&
+ T1->getCanonicalTypeUnqualified() ==
+ T2->getCanonicalTypeUnqualified();
+ }
+
+ // Returns the DeclContext immediately enclosed by the template parameter
+ // scope. For primary templates, this is the templated (e.g.) CXXRecordDecl.
+ // For specializations, this is e.g. ClassTemplatePartialSpecializationDecl.
+ static DeclContext *getTemplatedEntity(const TemplateTypeParmDecl *D,
+ Scope *S) {
+ if (D == nullptr)
+ return nullptr;
+ Scope *Inner = nullptr;
+ while (S) {
+ if (S->isTemplateParamScope() && S->isDeclScope(D))
+ return Inner ? Inner->getEntity() : nullptr;
+ Inner = S;
+ S = S->getParent();
+ }
+ return nullptr;
+ }
+
+ // Gets all the type constraint expressions that might apply to the type
+ // variables associated with DC (as returned by getTemplatedEntity()).
+ static SmallVector<const Expr *, 1>
+ constraintsForTemplatedEntity(DeclContext *DC) {
+ SmallVector<const Expr *, 1> Result;
+ if (DC == nullptr)
+ return Result;
+ // Primary templates can have constraints.
+ if (const auto *TD = cast<Decl>(DC)->getDescribedTemplate())
+ TD->getAssociatedConstraints(Result);
+ // Partial specializations may have constraints.
+ if (const auto *CTPSD =
+ dyn_cast<ClassTemplatePartialSpecializationDecl>(DC))
+ CTPSD->getAssociatedConstraints(Result);
+ if (const auto *VTPSD = dyn_cast<VarTemplatePartialSpecializationDecl>(DC))
+ VTPSD->getAssociatedConstraints(Result);
+ return Result;
+ }
+
+ // Attempt to find the unique type satisfying a constraint.
+ // This lets us show e.g. `int` instead of `std::same_as<int>`.
+ static QualType deduceType(const TypeConstraint &T) {
+ // Assume a same_as<T> return type constraint is std::same_as or equivalent.
+ // In this case the return type is T.
+ DeclarationName DN = T.getNamedConcept()->getDeclName();
+ if (DN.isIdentifier() && DN.getAsIdentifierInfo()->isStr("same_as"))
+ if (const auto *Args = T.getTemplateArgsAsWritten())
+ if (Args->getNumTemplateArgs() == 1) {
+ const auto &Arg = Args->arguments().front().getArgument();
+ if (Arg.getKind() == TemplateArgument::Type)
+ return Arg.getAsType();
+ }
+ return {};
+ }
+
+ llvm::DenseMap<const IdentifierInfo *, Member> Results;
+};
+} // namespace
+
void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
@@ -4767,37 +5179,46 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
Base = ConvertedBase.get();
QualType BaseType = Base->getType();
+ if (BaseType.isNull())
+ return false;
ExprValueKind BaseKind = Base->getValueKind();
if (IsArrow) {
if (const PointerType *Ptr = BaseType->getAs<PointerType>()) {
BaseType = Ptr->getPointeeType();
BaseKind = VK_LValue;
- } else if (BaseType->isObjCObjectPointerType())
- /*Do nothing*/;
- else
+ } else if (BaseType->isObjCObjectPointerType() ||
+ BaseType->isTemplateTypeParmType()) {
+ // Both cases (dot/arrow) handled below.
+ } else {
return false;
+ }
}
- if (const RecordType *Record = BaseType->getAs<RecordType>()) {
+ if (RecordDecl *RD = getAsRecordDecl(BaseType)) {
AddRecordMembersCompletionResults(*this, Results, S, BaseType, BaseKind,
- Record->getDecl(),
- std::move(AccessOpFixIt));
- } else if (const auto *TST =
- BaseType->getAs<TemplateSpecializationType>()) {
- TemplateName TN = TST->getTemplateName();
- if (const auto *TD =
- dyn_cast_or_null<ClassTemplateDecl>(TN.getAsTemplateDecl())) {
- CXXRecordDecl *RD = TD->getTemplatedDecl();
- AddRecordMembersCompletionResults(*this, Results, S, BaseType, BaseKind,
- RD, std::move(AccessOpFixIt));
+ RD, std::move(AccessOpFixIt));
+ } else if (const auto *TTPT =
+ dyn_cast<TemplateTypeParmType>(BaseType.getTypePtr())) {
+ auto Operator =
+ IsArrow ? ConceptInfo::Member::Arrow : ConceptInfo::Member::Dot;
+ for (const auto &R : ConceptInfo(*TTPT, S).members()) {
+ if (R.Operator != Operator)
+ continue;
+ CodeCompletionResult Result(
+ R.render(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo()));
+ if (AccessOpFixIt)
+ Result.FixIts.push_back(*AccessOpFixIt);
+ Results.AddResult(std::move(Result));
}
- } else if (const auto *ICNT = BaseType->getAs<InjectedClassNameType>()) {
- if (auto *RD = ICNT->getDecl())
- AddRecordMembersCompletionResults(*this, Results, S, BaseType, BaseKind,
- RD, std::move(AccessOpFixIt));
} else if (!IsArrow && BaseType->isObjCObjectPointerType()) {
- // Objective-C property reference.
+ // Objective-C property reference. Bail if we're performing fix-it code
+ // completion since Objective-C properties are normally backed by ivars,
+ // most Objective-C fix-its here would have little value.
+ if (AccessOpFixIt.hasValue()) {
+ return false;
+ }
AddedPropertiesSet AddedProperties;
if (const ObjCObjectPointerType *ObjCPtr =
@@ -4817,7 +5238,12 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
/*InOriginalClass*/ false);
} else if ((IsArrow && BaseType->isObjCObjectPointerType()) ||
(!IsArrow && BaseType->isObjCObjectType())) {
- // Objective-C instance variable access.
+ // Objective-C instance variable access. Bail if we're performing fix-it
+ // code completion since Objective-C properties are normally backed by
+ // ivars, most Objective-C fix-its here would have little value.
+ if (AccessOpFixIt.hasValue()) {
+ return false;
+ }
ObjCInterfaceDecl *Class = nullptr;
if (const ObjCObjectPointerType *ObjCPtr =
BaseType->getAs<ObjCObjectPointerType>())
@@ -5282,6 +5708,44 @@ QualType Sema::ProduceCtorInitMemberSignatureHelp(
return QualType();
}
+void Sema::CodeCompleteDesignator(const QualType BaseType,
+ llvm::ArrayRef<Expr *> InitExprs,
+ const Designation &D) {
+ if (BaseType.isNull())
+ return;
+ // FIXME: Handle nested designations, e.g. : .x.^
+ if (!D.empty())
+ return;
+
+ const auto *RD = getAsRecordDecl(BaseType);
+ if (!RD)
+ return;
+ if (const auto *CTSD = llvm::dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
+ // Template might not be instantiated yet, fall back to primary template in
+ // such cases.
+ if (CTSD->getTemplateSpecializationKind() == TSK_Undeclared)
+ RD = CTSD->getSpecializedTemplate()->getTemplatedDecl();
+ }
+ if (RD->fields().empty())
+ return;
+
+ CodeCompletionContext CCC(CodeCompletionContext::CCC_DotMemberAccess,
+ BaseType);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(), CCC);
+
+ Results.EnterNewScope();
+ for (const auto *FD : RD->fields()) {
+ // FIXME: Make use of previous designators to mark any fields before those
+ // inaccessible, and also compute the next initializer priority.
+ ResultBuilder::Result Result(FD, Results.getBasePriority(FD));
+ Results.AddResult(Result, CurContext, /*Hiding=*/nullptr);
+ }
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
void Sema::CodeCompleteInitializer(Scope *S, Decl *D) {
ValueDecl *VD = dyn_cast_or_null<ValueDecl>(D);
if (!VD) {
@@ -5297,7 +5761,7 @@ void Sema::CodeCompleteInitializer(Scope *S, Decl *D) {
CodeCompleteExpression(S, Data);
}
-void Sema::CodeCompleteAfterIf(Scope *S) {
+void Sema::CodeCompleteAfterIf(Scope *S, bool IsBracedThen) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
mapCodeCompletionContext(*this, PCC_Statement));
@@ -5314,15 +5778,25 @@ void Sema::CodeCompleteAfterIf(Scope *S) {
// "else" block
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
+
+ auto AddElseBodyPattern = [&] {
+ if (IsBracedThen) {
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ } else {
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("statement");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ }
+ };
Builder.AddTypedTextChunk("else");
- if (Results.includeCodePatterns()) {
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
- Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
- Builder.AddPlaceholderChunk("statements");
- Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
- Builder.AddChunk(CodeCompletionString::CK_RightBrace);
- }
+ if (Results.includeCodePatterns())
+ AddElseBodyPattern();
Results.AddResult(Builder.TakeString());
// "else if" block
@@ -5335,12 +5809,7 @@ void Sema::CodeCompleteAfterIf(Scope *S) {
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
if (Results.includeCodePatterns()) {
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
- Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
- Builder.AddPlaceholderChunk("statements");
- Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
- Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ AddElseBodyPattern();
}
Results.AddResult(Builder.TakeString());
@@ -5393,13 +5862,14 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
// Always pretend to enter a context to ensure that a dependent type
// resolves to a dependent record.
DeclContext *Ctx = computeDeclContext(SS, /*EnteringContext=*/true);
- if (!Ctx)
- return;
// Try to instantiate any non-dependent declaration contexts before
- // we look in them.
- if (!isDependentScopeSpecifier(SS) && RequireCompleteDeclContext(SS, Ctx))
- return;
+ // we look in them. Bail out if we fail.
+ NestedNameSpecifier *NNS = SS.getScopeRep();
+ if (NNS != nullptr && SS.isValid() && !NNS->isDependent()) {
+ if (Ctx == nullptr || RequireCompleteDeclContext(SS, Ctx))
+ return;
+ }
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(), CC);
@@ -5409,21 +5879,34 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
// The "template" keyword can follow "::" in the grammar, but only
// put it into the grammar if the nested-name-specifier is dependent.
- NestedNameSpecifier *NNS = SS.getScopeRep();
+ // FIXME: results is always empty, this appears to be dead.
if (!Results.empty() && NNS->isDependent())
Results.AddResult("template");
+ // If the scope is a concept-constrained type parameter, infer nested
+ // members based on the constraints.
+ if (const auto *TTPT =
+ dyn_cast_or_null<TemplateTypeParmType>(NNS->getAsType())) {
+ for (const auto &R : ConceptInfo(*TTPT, S).members()) {
+ if (R.Operator != ConceptInfo::Member::Colons)
+ continue;
+ Results.AddResult(CodeCompletionResult(
+ R.render(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo())));
+ }
+ }
+
// Add calls to overridden virtual functions, if there are any.
//
// FIXME: This isn't wonderful, because we don't know whether we're actually
// in a context that permits expressions. This is a general issue with
// qualified-id completions.
- if (!EnteringContext)
+ if (Ctx && !EnteringContext)
MaybeAddOverrideCalls(*this, Ctx, Results);
Results.ExitScope();
- if (CodeCompleter->includeNamespaceLevelDecls() ||
- (!Ctx->isNamespace() && !Ctx->isTranslationUnit())) {
+ if (Ctx &&
+ (CodeCompleter->includeNamespaceLevelDecls() || !Ctx->isFileContext())) {
CodeCompletionDeclConsumer Consumer(Results, Ctx, BaseType);
LookupVisibleDecls(Ctx, LookupOrdinaryName, Consumer,
/*IncludeGlobalScope=*/true,
@@ -5785,6 +6268,53 @@ void Sema::CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
Results.data(), Results.size());
}
+void Sema::CodeCompleteAfterFunctionEquals(Declarator &D) {
+ if (!LangOpts.CPlusPlus11)
+ return;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ auto ShouldAddDefault = [&D, this]() {
+ if (!D.isFunctionDeclarator())
+ return false;
+ auto &Id = D.getName();
+ if (Id.getKind() == UnqualifiedIdKind::IK_DestructorName)
+ return true;
+ // FIXME(liuhui): Ideally, we should check the constructor parameter list to
+ // verify that it is the default, copy or move constructor?
+ if (Id.getKind() == UnqualifiedIdKind::IK_ConstructorName &&
+ D.getFunctionTypeInfo().NumParams <= 1)
+ return true;
+ if (Id.getKind() == UnqualifiedIdKind::IK_OperatorFunctionId) {
+ auto Op = Id.OperatorFunctionId.Operator;
+ // FIXME(liuhui): Ideally, we should check the function parameter list to
+ // verify that it is the copy or move assignment?
+ if (Op == OverloadedOperatorKind::OO_Equal)
+ return true;
+ if (LangOpts.CPlusPlus20 &&
+ (Op == OverloadedOperatorKind::OO_EqualEqual ||
+ Op == OverloadedOperatorKind::OO_ExclaimEqual ||
+ Op == OverloadedOperatorKind::OO_Less ||
+ Op == OverloadedOperatorKind::OO_LessEqual ||
+ Op == OverloadedOperatorKind::OO_Greater ||
+ Op == OverloadedOperatorKind::OO_GreaterEqual ||
+ Op == OverloadedOperatorKind::OO_Spaceship))
+ return true;
+ }
+ return false;
+ };
+
+ Results.EnterNewScope();
+ if (ShouldAddDefault())
+ Results.AddResult("default");
+ // FIXME(liuhui): Ideally, we should only provide `delete` completion for the
+ // first function declaration.
+ Results.AddResult("delete");
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
/// Macro that optionally prepends an "@" to the string literal passed in via
/// Keyword, depending on whether NeedAt is true or false.
#define OBJC_AT_KEYWORD_NAME(NeedAt, Keyword) ((NeedAt) ? "@" Keyword : Keyword)
@@ -6063,22 +6593,24 @@ static bool ObjCPropertyFlagConflicts(unsigned Attributes, unsigned NewFlag) {
Attributes |= NewFlag;
// Check for collisions with "readonly".
- if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
- (Attributes & ObjCDeclSpec::DQ_PR_readwrite))
+ if ((Attributes & ObjCPropertyAttribute::kind_readonly) &&
+ (Attributes & ObjCPropertyAttribute::kind_readwrite))
return true;
// Check for more than one of { assign, copy, retain, strong, weak }.
unsigned AssignCopyRetMask =
Attributes &
- (ObjCDeclSpec::DQ_PR_assign | ObjCDeclSpec::DQ_PR_unsafe_unretained |
- ObjCDeclSpec::DQ_PR_copy | ObjCDeclSpec::DQ_PR_retain |
- ObjCDeclSpec::DQ_PR_strong | ObjCDeclSpec::DQ_PR_weak);
- if (AssignCopyRetMask && AssignCopyRetMask != ObjCDeclSpec::DQ_PR_assign &&
- AssignCopyRetMask != ObjCDeclSpec::DQ_PR_unsafe_unretained &&
- AssignCopyRetMask != ObjCDeclSpec::DQ_PR_copy &&
- AssignCopyRetMask != ObjCDeclSpec::DQ_PR_retain &&
- AssignCopyRetMask != ObjCDeclSpec::DQ_PR_strong &&
- AssignCopyRetMask != ObjCDeclSpec::DQ_PR_weak)
+ (ObjCPropertyAttribute::kind_assign |
+ ObjCPropertyAttribute::kind_unsafe_unretained |
+ ObjCPropertyAttribute::kind_copy | ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_strong | ObjCPropertyAttribute::kind_weak);
+ if (AssignCopyRetMask &&
+ AssignCopyRetMask != ObjCPropertyAttribute::kind_assign &&
+ AssignCopyRetMask != ObjCPropertyAttribute::kind_unsafe_unretained &&
+ AssignCopyRetMask != ObjCPropertyAttribute::kind_copy &&
+ AssignCopyRetMask != ObjCPropertyAttribute::kind_retain &&
+ AssignCopyRetMask != ObjCPropertyAttribute::kind_strong &&
+ AssignCopyRetMask != ObjCPropertyAttribute::kind_weak)
return true;
return false;
@@ -6094,32 +6626,41 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_readonly))
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_readonly))
Results.AddResult(CodeCompletionResult("readonly"));
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_assign))
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_assign))
Results.AddResult(CodeCompletionResult("assign"));
if (!ObjCPropertyFlagConflicts(Attributes,
- ObjCDeclSpec::DQ_PR_unsafe_unretained))
+ ObjCPropertyAttribute::kind_unsafe_unretained))
Results.AddResult(CodeCompletionResult("unsafe_unretained"));
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_readwrite))
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_readwrite))
Results.AddResult(CodeCompletionResult("readwrite"));
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_retain))
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_retain))
Results.AddResult(CodeCompletionResult("retain"));
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_strong))
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_strong))
Results.AddResult(CodeCompletionResult("strong"));
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_copy))
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCPropertyAttribute::kind_copy))
Results.AddResult(CodeCompletionResult("copy"));
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_nonatomic))
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_nonatomic))
Results.AddResult(CodeCompletionResult("nonatomic"));
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_atomic))
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_atomic))
Results.AddResult(CodeCompletionResult("atomic"));
// Only suggest "weak" if we're compiling for ARC-with-weak-references or GC.
if (getLangOpts().ObjCWeak || getLangOpts().getGC() != LangOptions::NonGC)
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_weak))
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_weak))
Results.AddResult(CodeCompletionResult("weak"));
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_setter)) {
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_setter)) {
CodeCompletionBuilder Setter(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Setter.AddTypedTextChunk("setter");
@@ -6127,7 +6668,8 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
Setter.AddPlaceholderChunk("method");
Results.AddResult(CodeCompletionResult(Setter.TakeString()));
}
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_getter)) {
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_getter)) {
CodeCompletionBuilder Getter(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Getter.AddTypedTextChunk("getter");
@@ -6135,7 +6677,8 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
Getter.AddPlaceholderChunk("method");
Results.AddResult(CodeCompletionResult(Getter.TakeString()));
}
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_nullability)) {
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_nullability)) {
Results.AddResult(CodeCompletionResult("nonnull"));
Results.AddResult(CodeCompletionResult("nullable"));
Results.AddResult(CodeCompletionResult("null_unspecified"));
@@ -7602,7 +8145,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
} Key(Allocator, PropName->getName());
// The uppercased name of the property name.
- std::string UpperKey = PropName->getName();
+ std::string UpperKey = std::string(PropName->getName());
if (!UpperKey.empty())
UpperKey[0] = toUppercase(UpperKey[0]);
@@ -7660,8 +8203,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
- Builder.AddTypedTextChunk(Allocator.CopyString(SelectorId->getName()));
- Builder.AddTypedTextChunk(":");
+ Builder.AddTypedTextChunk(
+ Allocator.CopyString(SelectorId->getName() + ":"));
AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0, Context, Policy,
Builder);
Builder.AddTextChunk(Key);
@@ -8249,39 +8792,43 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
Selector Sel = Method->getSelector();
- // Add the first part of the selector to the pattern.
- Builder.AddTypedTextChunk(
- Builder.getAllocator().CopyString(Sel.getNameForSlot(0)));
-
- // Add parameters to the pattern.
- unsigned I = 0;
- for (ObjCMethodDecl::param_iterator P = Method->param_begin(),
- PEnd = Method->param_end();
- P != PEnd; (void)++P, ++I) {
- // Add the part of the selector name.
- if (I == 0)
- Builder.AddTypedTextChunk(":");
- else if (I < Sel.getNumArgs()) {
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Builder.AddTypedTextChunk(
- Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
- } else
- break;
-
- // Add the parameter type.
- QualType ParamType;
- if ((*P)->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
- ParamType = (*P)->getType();
- else
- ParamType = (*P)->getOriginalType();
- ParamType = ParamType.substObjCTypeArgs(
- Context, {}, ObjCSubstitutionContext::Parameter);
- AttributedType::stripOuterNullability(ParamType);
- AddObjCPassingTypeChunk(ParamType, (*P)->getObjCDeclQualifier(), Context,
- Policy, Builder);
+ if (Sel.isUnarySelector()) {
+ // Unary selectors have no arguments.
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(0)));
+ } else {
+ // Add all parameters to the pattern.
+ unsigned I = 0;
+ for (ObjCMethodDecl::param_iterator P = Method->param_begin(),
+ PEnd = Method->param_end();
+ P != PEnd; (void)++P, ++I) {
+ // Add the part of the selector name.
+ if (I == 0)
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
+ else if (I < Sel.getNumArgs()) {
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
+ } else
+ break;
- if (IdentifierInfo *Id = (*P)->getIdentifier())
- Builder.AddTextChunk(Builder.getAllocator().CopyString(Id->getName()));
+ // Add the parameter type.
+ QualType ParamType;
+ if ((*P)->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
+ ParamType = (*P)->getType();
+ else
+ ParamType = (*P)->getOriginalType();
+ ParamType = ParamType.substObjCTypeArgs(
+ Context, {}, ObjCSubstitutionContext::Parameter);
+ AttributedType::stripOuterNullability(ParamType);
+ AddObjCPassingTypeChunk(ParamType, (*P)->getObjCDeclQualifier(),
+ Context, Policy, Builder);
+
+ if (IdentifierInfo *Id = (*P)->getIdentifier())
+ Builder.AddTextChunk(
+ Builder.getAllocator().CopyString(Id->getName()));
+ }
}
if (Method->isVariadic()) {
@@ -8723,7 +9270,16 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
if (++Count == 2500) // If we happen to hit a huge directory,
break; // bail out early so we're not too slow.
StringRef Filename = llvm::sys::path::filename(It->path());
- switch (It->type()) {
+
+ // To know whether a symlink should be treated as file or a directory, we
+ // have to stat it. This should be cheap enough as there shouldn't be many
+ // symlinks.
+ llvm::sys::fs::file_type Type = It->type();
+ if (Type == llvm::sys::fs::file_type::symlink_file) {
+ if (auto FileStatus = FS.status(It->path()))
+ Type = FileStatus->getType();
+ }
+ switch (Type) {
case llvm::sys::fs::file_type::directory_file:
// All entries in a framework directory must have a ".framework" suffix,
// but the suffix does not appear in the source code's include/import.
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp b/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
index 290e4cbff4fd..ddd95faebe99 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
@@ -28,21 +28,47 @@
using namespace clang;
using namespace sema;
-bool
-Sema::CheckConstraintExpression(Expr *ConstraintExpression, Token NextToken,
- bool *PossibleNonPrimary,
- bool IsTrailingRequiresClause) {
+namespace {
+class LogicalBinOp {
+ OverloadedOperatorKind Op = OO_None;
+ const Expr *LHS = nullptr;
+ const Expr *RHS = nullptr;
+
+public:
+ LogicalBinOp(const Expr *E) {
+ if (auto *BO = dyn_cast<BinaryOperator>(E)) {
+ Op = BinaryOperator::getOverloadedOperator(BO->getOpcode());
+ LHS = BO->getLHS();
+ RHS = BO->getRHS();
+ } else if (auto *OO = dyn_cast<CXXOperatorCallExpr>(E)) {
+ Op = OO->getOperator();
+ LHS = OO->getArg(0);
+ RHS = OO->getArg(1);
+ }
+ }
+
+ bool isAnd() const { return Op == OO_AmpAmp; }
+ bool isOr() const { return Op == OO_PipePipe; }
+ explicit operator bool() const { return isAnd() || isOr(); }
+
+ const Expr *getLHS() const { return LHS; }
+ const Expr *getRHS() const { return RHS; }
+};
+}
+
+bool Sema::CheckConstraintExpression(const Expr *ConstraintExpression,
+ Token NextToken, bool *PossibleNonPrimary,
+ bool IsTrailingRequiresClause) {
// C++2a [temp.constr.atomic]p1
// ..E shall be a constant expression of type bool.
ConstraintExpression = ConstraintExpression->IgnoreParenImpCasts();
- if (auto *BinOp = dyn_cast<BinaryOperator>(ConstraintExpression)) {
- if (BinOp->getOpcode() == BO_LAnd || BinOp->getOpcode() == BO_LOr)
- return CheckConstraintExpression(BinOp->getLHS(), NextToken,
- PossibleNonPrimary) &&
- CheckConstraintExpression(BinOp->getRHS(), NextToken,
- PossibleNonPrimary);
+ if (LogicalBinOp BO = ConstraintExpression) {
+ return CheckConstraintExpression(BO.getLHS(), NextToken,
+ PossibleNonPrimary) &&
+ CheckConstraintExpression(BO.getRHS(), NextToken,
+ PossibleNonPrimary);
} else if (auto *C = dyn_cast<ExprWithCleanups>(ConstraintExpression))
return CheckConstraintExpression(C->getSubExpr(), NextToken,
PossibleNonPrimary);
@@ -60,7 +86,7 @@ Sema::CheckConstraintExpression(Expr *ConstraintExpression, Token NextToken,
(NextToken.is(tok::l_paren) &&
(IsTrailingRequiresClause ||
(Type->isDependentType() &&
- IsDependentFunctionNameExpr(ConstraintExpression)) ||
+ isa<UnresolvedLookupExpr>(ConstraintExpression)) ||
Type->isFunctionType() ||
Type->isSpecificBuiltinType(BuiltinType::Overload))) ||
// We have the following case:
@@ -99,39 +125,37 @@ calculateConstraintSatisfaction(Sema &S, const Expr *ConstraintExpr,
AtomicEvaluator &&Evaluator) {
ConstraintExpr = ConstraintExpr->IgnoreParenImpCasts();
- if (auto *BO = dyn_cast<BinaryOperator>(ConstraintExpr)) {
- if (BO->getOpcode() == BO_LAnd || BO->getOpcode() == BO_LOr) {
- if (calculateConstraintSatisfaction(S, BO->getLHS(), Satisfaction,
- Evaluator))
- return true;
+ if (LogicalBinOp BO = ConstraintExpr) {
+ if (calculateConstraintSatisfaction(S, BO.getLHS(), Satisfaction,
+ Evaluator))
+ return true;
- bool IsLHSSatisfied = Satisfaction.IsSatisfied;
+ bool IsLHSSatisfied = Satisfaction.IsSatisfied;
- if (BO->getOpcode() == BO_LOr && IsLHSSatisfied)
- // [temp.constr.op] p3
- // A disjunction is a constraint taking two operands. To determine if
- // a disjunction is satisfied, the satisfaction of the first operand
- // is checked. If that is satisfied, the disjunction is satisfied.
- // Otherwise, the disjunction is satisfied if and only if the second
- // operand is satisfied.
- return false;
+ if (BO.isOr() && IsLHSSatisfied)
+ // [temp.constr.op] p3
+ // A disjunction is a constraint taking two operands. To determine if
+ // a disjunction is satisfied, the satisfaction of the first operand
+ // is checked. If that is satisfied, the disjunction is satisfied.
+ // Otherwise, the disjunction is satisfied if and only if the second
+ // operand is satisfied.
+ return false;
- if (BO->getOpcode() == BO_LAnd && !IsLHSSatisfied)
- // [temp.constr.op] p2
- // A conjunction is a constraint taking two operands. To determine if
- // a conjunction is satisfied, the satisfaction of the first operand
- // is checked. If that is not satisfied, the conjunction is not
- // satisfied. Otherwise, the conjunction is satisfied if and only if
- // the second operand is satisfied.
- return false;
+ if (BO.isAnd() && !IsLHSSatisfied)
+ // [temp.constr.op] p2
+ // A conjunction is a constraint taking two operands. To determine if
+ // a conjunction is satisfied, the satisfaction of the first operand
+ // is checked. If that is not satisfied, the conjunction is not
+ // satisfied. Otherwise, the conjunction is satisfied if and only if
+ // the second operand is satisfied.
+ return false;
- return calculateConstraintSatisfaction(S, BO->getRHS(), Satisfaction,
- std::forward<AtomicEvaluator>(Evaluator));
- }
- }
- else if (auto *C = dyn_cast<ExprWithCleanups>(ConstraintExpr))
+ return calculateConstraintSatisfaction(
+ S, BO.getRHS(), Satisfaction, std::forward<AtomicEvaluator>(Evaluator));
+ } else if (auto *C = dyn_cast<ExprWithCleanups>(ConstraintExpr)) {
return calculateConstraintSatisfaction(S, C->getSubExpr(), Satisfaction,
std::forward<AtomicEvaluator>(Evaluator));
+ }
// An atomic constraint expression
ExprResult SubstitutedAtomicExpr = Evaluator(ConstraintExpr);
@@ -725,19 +749,16 @@ NormalizedConstraint::fromConstraintExpr(Sema &S, NamedDecl *D, const Expr *E) {
// - The normal form of an expression (E) is the normal form of E.
// [...]
E = E->IgnoreParenImpCasts();
- if (auto *BO = dyn_cast<const BinaryOperator>(E)) {
- if (BO->getOpcode() == BO_LAnd || BO->getOpcode() == BO_LOr) {
- auto LHS = fromConstraintExpr(S, D, BO->getLHS());
- if (!LHS)
- return None;
- auto RHS = fromConstraintExpr(S, D, BO->getRHS());
- if (!RHS)
- return None;
+ if (LogicalBinOp BO = E) {
+ auto LHS = fromConstraintExpr(S, D, BO.getLHS());
+ if (!LHS)
+ return None;
+ auto RHS = fromConstraintExpr(S, D, BO.getRHS());
+ if (!RHS)
+ return None;
- return NormalizedConstraint(
- S.Context, std::move(*LHS), std::move(*RHS),
- BO->getOpcode() == BO_LAnd ? CCK_Conjunction : CCK_Disjunction);
- }
+ return NormalizedConstraint(S.Context, std::move(*LHS), std::move(*RHS),
+ BO.isAnd() ? CCK_Conjunction : CCK_Disjunction);
} else if (auto *CSE = dyn_cast<const ConceptSpecializationExpr>(E)) {
const NormalizedConstraint *SubNF;
{
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
index 6dc9e342beb9..992cccac6405 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
@@ -24,6 +24,7 @@
#include "clang/Sema/Overload.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
+#include "llvm/ADT/SmallSet.h"
using namespace clang;
using namespace sema;
@@ -390,7 +391,13 @@ static Expr *maybeTailCall(Sema &S, QualType RetType, Expr *E,
return nullptr;
Expr *JustAddress = AddressExpr.get();
- // FIXME: Check that the type of AddressExpr is void*
+
+ // Check that the type of AddressExpr is void*
+ if (!JustAddress->getType().getTypePtr()->isVoidPointerType())
+ S.Diag(cast<CallExpr>(JustAddress)->getCalleeDecl()->getLocation(),
+ diag::warn_coroutine_handle_address_invalid_return_type)
+ << JustAddress->getType();
+
return buildBuiltinCall(S, Loc, Builtin::BI__builtin_coro_resume,
JustAddress);
}
@@ -502,8 +509,9 @@ VarDecl *Sema::buildCoroutinePromise(SourceLocation Loc) {
return nullptr;
auto *ScopeInfo = getCurFunction();
- // Build a list of arguments, based on the coroutine functions arguments,
- // that will be passed to the promise type's constructor.
+
+ // Build a list of arguments, based on the coroutine function's arguments,
+ // that if present will be passed to the promise type's constructor.
llvm::SmallVector<Expr *, 4> CtorArgExprs;
// Add implicit object parameter.
@@ -519,6 +527,7 @@ VarDecl *Sema::buildCoroutinePromise(SourceLocation Loc) {
}
}
+ // Add the coroutine function's parameters.
auto &Moves = ScopeInfo->CoroutineParameterMoves;
for (auto *PD : FD->parameters()) {
if (PD->getType()->isDependentType())
@@ -540,28 +549,33 @@ VarDecl *Sema::buildCoroutinePromise(SourceLocation Loc) {
CtorArgExprs.push_back(RefExpr.get());
}
- // Create an initialization sequence for the promise type using the
- // constructor arguments, wrapped in a parenthesized list expression.
- Expr *PLE = ParenListExpr::Create(Context, FD->getLocation(),
- CtorArgExprs, FD->getLocation());
- InitializedEntity Entity = InitializedEntity::InitializeVariable(VD);
- InitializationKind Kind = InitializationKind::CreateForInit(
- VD->getLocation(), /*DirectInit=*/true, PLE);
- InitializationSequence InitSeq(*this, Entity, Kind, CtorArgExprs,
- /*TopLevelOfInitList=*/false,
- /*TreatUnavailableAsInvalid=*/false);
-
- // Attempt to initialize the promise type with the arguments.
- // If that fails, fall back to the promise type's default constructor.
- if (InitSeq) {
- ExprResult Result = InitSeq.Perform(*this, Entity, Kind, CtorArgExprs);
- if (Result.isInvalid()) {
- VD->setInvalidDecl();
- } else if (Result.get()) {
- VD->setInit(MaybeCreateExprWithCleanups(Result.get()));
- VD->setInitStyle(VarDecl::CallInit);
- CheckCompleteVariableDeclaration(VD);
- }
+ // If we have a non-zero number of constructor arguments, try to use them.
+ // Otherwise, fall back to the promise type's default constructor.
+ if (!CtorArgExprs.empty()) {
+ // Create an initialization sequence for the promise type using the
+ // constructor arguments, wrapped in a parenthesized list expression.
+ Expr *PLE = ParenListExpr::Create(Context, FD->getLocation(),
+ CtorArgExprs, FD->getLocation());
+ InitializedEntity Entity = InitializedEntity::InitializeVariable(VD);
+ InitializationKind Kind = InitializationKind::CreateForInit(
+ VD->getLocation(), /*DirectInit=*/true, PLE);
+ InitializationSequence InitSeq(*this, Entity, Kind, CtorArgExprs,
+ /*TopLevelOfInitList=*/false,
+ /*TreatUnavailableAsInvalid=*/false);
+
+ // Attempt to initialize the promise type with the arguments.
+ // If that fails, fall back to the promise type's default constructor.
+ if (InitSeq) {
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind, CtorArgExprs);
+ if (Result.isInvalid()) {
+ VD->setInvalidDecl();
+ } else if (Result.get()) {
+ VD->setInit(MaybeCreateExprWithCleanups(Result.get()));
+ VD->setInitStyle(VarDecl::CallInit);
+ CheckCompleteVariableDeclaration(VD);
+ }
+ } else
+ ActOnUninitializedDecl(VD);
} else
ActOnUninitializedDecl(VD);
@@ -597,6 +611,80 @@ static FunctionScopeInfo *checkCoroutineContext(Sema &S, SourceLocation Loc,
return ScopeInfo;
}
+/// Recursively check \p E and all its children to see if any call target
+/// (including constructor call) is declared noexcept. Also any value returned
+/// from the call has a noexcept destructor.
+static void checkNoThrow(Sema &S, const Stmt *E,
+ llvm::SmallPtrSetImpl<const Decl *> &ThrowingDecls) {
+ auto checkDeclNoexcept = [&](const Decl *D, bool IsDtor = false) {
+ // In the case of dtor, the call to dtor is implicit and hence we should
+ // pass nullptr to canCalleeThrow.
+ if (Sema::canCalleeThrow(S, IsDtor ? nullptr : cast<Expr>(E), D)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ // co_await promise.final_suspend() could end up calling
+ // __builtin_coro_resume for symmetric transfer if await_suspend()
+ // returns a handle. In that case, even __builtin_coro_resume is not
+ // declared as noexcept and may throw, it does not throw _into_ the
+ // coroutine that just suspended, but rather throws back out from
+ // whoever called coroutine_handle::resume(), hence we claim that
+ // logically it does not throw.
+ if (FD->getBuiltinID() == Builtin::BI__builtin_coro_resume)
+ return;
+ }
+ if (ThrowingDecls.empty()) {
+ // First time seeing an error, emit the error message.
+ S.Diag(cast<FunctionDecl>(S.CurContext)->getLocation(),
+ diag::err_coroutine_promise_final_suspend_requires_nothrow);
+ }
+ ThrowingDecls.insert(D);
+ }
+ };
+ auto SC = E->getStmtClass();
+ if (SC == Expr::CXXConstructExprClass) {
+ auto const *Ctor = cast<CXXConstructExpr>(E)->getConstructor();
+ checkDeclNoexcept(Ctor);
+ // Check the corresponding destructor of the constructor.
+ checkDeclNoexcept(Ctor->getParent()->getDestructor(), true);
+ } else if (SC == Expr::CallExprClass || SC == Expr::CXXMemberCallExprClass ||
+ SC == Expr::CXXOperatorCallExprClass) {
+ if (!cast<CallExpr>(E)->isTypeDependent()) {
+ checkDeclNoexcept(cast<CallExpr>(E)->getCalleeDecl());
+ auto ReturnType = cast<CallExpr>(E)->getCallReturnType(S.getASTContext());
+ // Check the destructor of the call return type, if any.
+ if (ReturnType.isDestructedType() ==
+ QualType::DestructionKind::DK_cxx_destructor) {
+ const auto *T =
+ cast<RecordType>(ReturnType.getCanonicalType().getTypePtr());
+ checkDeclNoexcept(
+ dyn_cast<CXXRecordDecl>(T->getDecl())->getDestructor(), true);
+ }
+ }
+ }
+ for (const auto *Child : E->children()) {
+ if (!Child)
+ continue;
+ checkNoThrow(S, Child, ThrowingDecls);
+ }
+}
+
+bool Sema::checkFinalSuspendNoThrow(const Stmt *FinalSuspend) {
+ llvm::SmallPtrSet<const Decl *, 4> ThrowingDecls;
+ // We first collect all declarations that should not throw but not declared
+ // with noexcept. We then sort them based on the location before printing.
+ // This is to avoid emitting the same note multiple times on the same
+ // declaration, and also provide a deterministic order for the messages.
+ checkNoThrow(*this, FinalSuspend, ThrowingDecls);
+ auto SortedDecls = llvm::SmallVector<const Decl *, 4>{ThrowingDecls.begin(),
+ ThrowingDecls.end()};
+ sort(SortedDecls, [](const Decl *A, const Decl *B) {
+ return A->getEndLoc() < B->getEndLoc();
+ });
+ for (const auto *D : SortedDecls) {
+ Diag(D->getEndLoc(), diag::note_coroutine_function_declare_noexcept);
+ }
+ return ThrowingDecls.empty();
+}
+
bool Sema::ActOnCoroutineBodyStart(Scope *SC, SourceLocation KWLoc,
StringRef Keyword) {
if (!checkCoroutineContext(*this, KWLoc, Keyword))
@@ -639,7 +727,7 @@ bool Sema::ActOnCoroutineBodyStart(Scope *SC, SourceLocation KWLoc,
return true;
StmtResult FinalSuspend = buildSuspends("final_suspend");
- if (FinalSuspend.isInvalid())
+ if (FinalSuspend.isInvalid() || !checkFinalSuspendNoThrow(FinalSuspend.get()))
return true;
ScopeInfo->setCoroutineSuspends(InitSuspend.get(), FinalSuspend.get());
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
index 64146f4a912f..3e2b61ae8cdf 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
@@ -10,7 +10,6 @@
//
//===----------------------------------------------------------------------===//
-#include "TreeTransform.h"
#include "TypeLocBuilder.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
@@ -22,6 +21,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/NonTrivialTypeVisitor.h"
#include "clang/AST/StmtCXX.h"
@@ -48,6 +48,7 @@
#include <algorithm>
#include <cstring>
#include <functional>
+#include <unordered_map>
using namespace clang;
using namespace sema;
@@ -137,6 +138,7 @@ bool Sema::isSimpleTypeSpecifier(tok::TokenKind Kind) const {
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
+ case tok::kw___bf16:
case tok::kw__Float16:
case tok::kw___float128:
case tok::kw_wchar_t:
@@ -748,7 +750,10 @@ void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
Diag(IILoc, IsTemplateName ? diag::err_no_member_template
: diag::err_typename_nested_not_found)
<< II << DC << SS->getRange();
- else if (isDependentScopeSpecifier(*SS)) {
+ else if (SS->isValid() && SS->getScopeRep()->containsErrors()) {
+ SuggestedType =
+ ActOnTypenameType(S, SourceLocation(), *SS, *II, IILoc).get();
+ } else if (isDependentScopeSpecifier(*SS)) {
unsigned DiagID = diag::err_typename_missing;
if (getLangOpts().MSVCCompat && isMicrosoftMissingTypename(SS, S))
DiagID = diag::ext_typename_missing;
@@ -925,7 +930,7 @@ Corrected:
return NameClassification::NonType(D);
}
- if (getLangOpts().CPlusPlus2a && SS.isEmpty() && NextToken.is(tok::less)) {
+ if (getLangOpts().CPlusPlus20 && SS.isEmpty() && NextToken.is(tok::less)) {
// In C++20 onwards, this could be an ADL-only call to a function
// template, and we're required to assume that this is a template name.
//
@@ -1068,7 +1073,7 @@ Corrected:
Result, /*AllowFunctionTemplates=*/true,
/*AllowDependent=*/false,
/*AllowNonTemplateFunctions*/ SS.isEmpty() &&
- getLangOpts().CPlusPlus2a))) {
+ getLangOpts().CPlusPlus20))) {
// C++ [temp.names]p3:
// After name lookup (3.4) finds that a name is a template-name or that
// an operator-function-id or a literal- operator-id refers to a set of
@@ -1255,47 +1260,8 @@ Sema::getTemplateNameKindForDiagnostics(TemplateName Name) {
return TemplateNameKindForDiagnostics::DependentTemplate;
}
-// Determines the context to return to after temporarily entering a
-// context. This depends in an unnecessarily complicated way on the
-// exact ordering of callbacks from the parser.
-DeclContext *Sema::getContainingDC(DeclContext *DC) {
-
- // Functions defined inline within classes aren't parsed until we've
- // finished parsing the top-level class, so the top-level class is
- // the context we'll need to return to.
- // A Lambda call operator whose parent is a class must not be treated
- // as an inline member function. A Lambda can be used legally
- // either as an in-class member initializer or a default argument. These
- // are parsed once the class has been marked complete and so the containing
- // context would be the nested class (when the lambda is defined in one);
- // If the class is not complete, then the lambda is being used in an
- // ill-formed fashion (such as to specify the width of a bit-field, or
- // in an array-bound) - in which case we still want to return the
- // lexically containing DC (which could be a nested class).
- if (isa<FunctionDecl>(DC) && !isLambdaCallOperator(DC)) {
- DC = DC->getLexicalParent();
-
- // A function not defined within a class will always return to its
- // lexical context.
- if (!isa<CXXRecordDecl>(DC))
- return DC;
-
- // A C++ inline method/friend is parsed *after* the topmost class
- // it was declared in is fully parsed ("complete"); the topmost
- // class is the context we need to return to.
- while (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC->getLexicalParent()))
- DC = RD;
-
- // Return the declaration context of the topmost class the inline method is
- // declared in.
- return DC;
- }
-
- return DC->getLexicalParent();
-}
-
void Sema::PushDeclContext(Scope *S, DeclContext *DC) {
- assert(getContainingDC(DC) == CurContext &&
+ assert(DC->getLexicalParent() == CurContext &&
"The next DeclContext should be lexically contained in the current one.");
CurContext = DC;
S->setEntity(DC);
@@ -1304,7 +1270,7 @@ void Sema::PushDeclContext(Scope *S, DeclContext *DC) {
void Sema::PopDeclContext() {
assert(CurContext && "DeclContext imbalance!");
- CurContext = getContainingDC(CurContext);
+ CurContext = CurContext->getLexicalParent();
assert(CurContext && "Popped translation unit!");
}
@@ -1356,6 +1322,12 @@ void Sema::EnterDeclaratorContext(Scope *S, DeclContext *DC) {
CurContext = DC;
S->setEntity(DC);
+
+ if (S->getParent()->isTemplateParamScope()) {
+ // Also set the corresponding entities for all immediately-enclosing
+ // template parameter scopes.
+ EnterTemplatedContext(S->getParent(), DC);
+ }
}
void Sema::ExitDeclaratorContext(Scope *S) {
@@ -1371,6 +1343,49 @@ void Sema::ExitDeclaratorContext(Scope *S) {
// disappear.
}
+void Sema::EnterTemplatedContext(Scope *S, DeclContext *DC) {
+ assert(S->isTemplateParamScope() &&
+ "expected to be initializing a template parameter scope");
+
+ // C++20 [temp.local]p7:
+ // In the definition of a member of a class template that appears outside
+ // of the class template definition, the name of a member of the class
+ // template hides the name of a template-parameter of any enclosing class
+ // templates (but not a template-parameter of the member if the member is a
+ // class or function template).
+ // C++20 [temp.local]p9:
+ // In the definition of a class template or in the definition of a member
+ // of such a template that appears outside of the template definition, for
+ // each non-dependent base class (13.8.2.1), if the name of the base class
+ // or the name of a member of the base class is the same as the name of a
+ // template-parameter, the base class name or member name hides the
+ // template-parameter name (6.4.10).
+ //
+ // This means that a template parameter scope should be searched immediately
+ // after searching the DeclContext for which it is a template parameter
+ // scope. For example, for
+ // template<typename T> template<typename U> template<typename V>
+ // void N::A<T>::B<U>::f(...)
+ // we search V then B<U> (and base classes) then U then A<T> (and base
+ // classes) then T then N then ::.
+ unsigned ScopeDepth = getTemplateDepth(S);
+ for (; S && S->isTemplateParamScope(); S = S->getParent(), --ScopeDepth) {
+ DeclContext *SearchDCAfterScope = DC;
+ for (; DC; DC = DC->getLookupParent()) {
+ if (const TemplateParameterList *TPL =
+ cast<Decl>(DC)->getDescribedTemplateParams()) {
+ unsigned DCDepth = TPL->getDepth() + 1;
+ if (DCDepth > ScopeDepth)
+ continue;
+ if (ScopeDepth == DCDepth)
+ SearchDCAfterScope = DC = DC->getLookupParent();
+ break;
+ }
+ }
+ S->setLookupEntity(SearchDCAfterScope);
+ }
+}
+
void Sema::ActOnReenterFunctionContext(Scope* S, Decl *D) {
// We assume that the caller has already called
// ActOnReenterTemplateScope so getTemplatedDecl() works.
@@ -2591,11 +2606,15 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
AMK == Sema::AMK_ProtocolImplementation))
NewAttr = nullptr;
else if (const auto *UA = dyn_cast<UuidAttr>(Attr))
- NewAttr = S.mergeUuidAttr(D, *UA, UA->getGuid());
+ NewAttr = S.mergeUuidAttr(D, *UA, UA->getGuid(), UA->getGuidDecl());
else if (const auto *SLHA = dyn_cast<SpeculativeLoadHardeningAttr>(Attr))
NewAttr = S.mergeSpeculativeLoadHardeningAttr(D, *SLHA);
else if (const auto *SLHA = dyn_cast<NoSpeculativeLoadHardeningAttr>(Attr))
NewAttr = S.mergeNoSpeculativeLoadHardeningAttr(D, *SLHA);
+ else if (const auto *IMA = dyn_cast<WebAssemblyImportModuleAttr>(Attr))
+ NewAttr = S.mergeImportModuleAttr(D, *IMA);
+ else if (const auto *INA = dyn_cast<WebAssemblyImportNameAttr>(Attr))
+ NewAttr = S.mergeImportNameAttr(D, *INA);
else if (Attr->shouldInheritEvenIfAlreadyPresent() || !DeclHasAttr(D, Attr))
NewAttr = cast<InheritableAttr>(Attr->clone(S.Context));
@@ -2712,6 +2731,18 @@ static void checkNewAttributesAfterDef(Sema &S, Decl *New, const Decl *Old) {
--E;
continue;
}
+ } else if (isa<LoaderUninitializedAttr>(NewAttribute)) {
+ // If there is a C definition followed by a redeclaration with this
+ // attribute then there are two different definitions. In C++, prefer the
+ // standard diagnostics.
+ if (!S.getLangOpts().CPlusPlus) {
+ S.Diag(NewAttribute->getLocation(),
+ diag::err_loader_uninitialized_redeclaration);
+ S.Diag(Def->getLocation(), diag::note_previous_definition);
+ NewAttributes.erase(NewAttributes.begin() + I);
+ --E;
+ continue;
+ }
} else if (isa<SelectAnyAttr>(NewAttribute) &&
cast<VarDecl>(New)->isInline() &&
!cast<VarDecl>(New)->isInlineSpecified()) {
@@ -2721,6 +2752,11 @@ static void checkNewAttributesAfterDef(Sema &S, Decl *New, const Decl *Old) {
// honored it.
++I;
continue;
+ } else if (isa<OMPDeclareVariantAttr>(NewAttribute)) {
+ // We allow to add OMP[Begin]DeclareVariantAttr to be added to
+ // declarations after defintions.
+ ++I;
+ continue;
}
S.Diag(NewAttribute->getLocation(),
@@ -2741,23 +2777,21 @@ static void diagnoseMissingConstinit(Sema &S, const VarDecl *InitDecl,
// enough of the attribute list spelling information to extract that without
// heroics.
std::string SuitableSpelling;
- if (S.getLangOpts().CPlusPlus2a)
- SuitableSpelling =
- S.PP.getLastMacroWithSpelling(InsertLoc, {tok::kw_constinit});
+ if (S.getLangOpts().CPlusPlus20)
+ SuitableSpelling = std::string(
+ S.PP.getLastMacroWithSpelling(InsertLoc, {tok::kw_constinit}));
if (SuitableSpelling.empty() && S.getLangOpts().CPlusPlus11)
- SuitableSpelling = S.PP.getLastMacroWithSpelling(
- InsertLoc,
- {tok::l_square, tok::l_square, S.PP.getIdentifierInfo("clang"),
- tok::coloncolon,
- S.PP.getIdentifierInfo("require_constant_initialization"),
- tok::r_square, tok::r_square});
+ SuitableSpelling = std::string(S.PP.getLastMacroWithSpelling(
+ InsertLoc, {tok::l_square, tok::l_square,
+ S.PP.getIdentifierInfo("clang"), tok::coloncolon,
+ S.PP.getIdentifierInfo("require_constant_initialization"),
+ tok::r_square, tok::r_square}));
if (SuitableSpelling.empty())
- SuitableSpelling = S.PP.getLastMacroWithSpelling(
- InsertLoc,
- {tok::kw___attribute, tok::l_paren, tok::r_paren,
- S.PP.getIdentifierInfo("require_constant_initialization"),
- tok::r_paren, tok::r_paren});
- if (SuitableSpelling.empty() && S.getLangOpts().CPlusPlus2a)
+ SuitableSpelling = std::string(S.PP.getLastMacroWithSpelling(
+ InsertLoc, {tok::kw___attribute, tok::l_paren, tok::r_paren,
+ S.PP.getIdentifierInfo("require_constant_initialization"),
+ tok::r_paren, tok::r_paren}));
+ if (SuitableSpelling.empty() && S.getLangOpts().CPlusPlus20)
SuitableSpelling = "constinit";
if (SuitableSpelling.empty() && S.getLangOpts().CPlusPlus11)
SuitableSpelling = "[[clang::require_constant_initialization]]";
@@ -3889,11 +3923,11 @@ void Sema::MergeVarDeclTypes(VarDecl *New, VarDecl *Old,
if (!NewArray->isIncompleteArrayType() && !NewArray->isDependentType()) {
for (VarDecl *PrevVD = Old->getMostRecentDecl(); PrevVD;
PrevVD = PrevVD->getPreviousDecl()) {
- const ArrayType *PrevVDTy = Context.getAsArrayType(PrevVD->getType());
+ QualType PrevVDTy = PrevVD->getType();
if (PrevVDTy->isIncompleteArrayType() || PrevVDTy->isDependentType())
continue;
- if (!Context.hasSameType(NewArray, PrevVDTy))
+ if (!Context.hasSameType(New->getType(), PrevVDTy))
return diagnoseVarDeclTypeMismatch(*this, New, PrevVD);
}
}
@@ -4349,6 +4383,87 @@ void Sema::handleTagNumbering(const TagDecl *Tag, Scope *TagScope) {
}
}
+namespace {
+struct NonCLikeKind {
+ enum {
+ None,
+ BaseClass,
+ DefaultMemberInit,
+ Lambda,
+ Friend,
+ OtherMember,
+ Invalid,
+ } Kind = None;
+ SourceRange Range;
+
+ explicit operator bool() { return Kind != None; }
+};
+}
+
+/// Determine whether a class is C-like, according to the rules of C++
+/// [dcl.typedef] for anonymous classes with typedef names for linkage.
+static NonCLikeKind getNonCLikeKindForAnonymousStruct(const CXXRecordDecl *RD) {
+ if (RD->isInvalidDecl())
+ return {NonCLikeKind::Invalid, {}};
+
+ // C++ [dcl.typedef]p9: [P1766R1]
+ // An unnamed class with a typedef name for linkage purposes shall not
+ //
+ // -- have any base classes
+ if (RD->getNumBases())
+ return {NonCLikeKind::BaseClass,
+ SourceRange(RD->bases_begin()->getBeginLoc(),
+ RD->bases_end()[-1].getEndLoc())};
+ bool Invalid = false;
+ for (Decl *D : RD->decls()) {
+ // Don't complain about things we already diagnosed.
+ if (D->isInvalidDecl()) {
+ Invalid = true;
+ continue;
+ }
+
+ // -- have any [...] default member initializers
+ if (auto *FD = dyn_cast<FieldDecl>(D)) {
+ if (FD->hasInClassInitializer()) {
+ auto *Init = FD->getInClassInitializer();
+ return {NonCLikeKind::DefaultMemberInit,
+ Init ? Init->getSourceRange() : D->getSourceRange()};
+ }
+ continue;
+ }
+
+ // FIXME: We don't allow friend declarations. This violates the wording of
+ // P1766, but not the intent.
+ if (isa<FriendDecl>(D))
+ return {NonCLikeKind::Friend, D->getSourceRange()};
+
+ // -- declare any members other than non-static data members, member
+ // enumerations, or member classes,
+ if (isa<StaticAssertDecl>(D) || isa<IndirectFieldDecl>(D) ||
+ isa<EnumDecl>(D))
+ continue;
+ auto *MemberRD = dyn_cast<CXXRecordDecl>(D);
+ if (!MemberRD) {
+ if (D->isImplicit())
+ continue;
+ return {NonCLikeKind::OtherMember, D->getSourceRange()};
+ }
+
+ // -- contain a lambda-expression,
+ if (MemberRD->isLambda())
+ return {NonCLikeKind::Lambda, MemberRD->getSourceRange()};
+
+ // and all member classes shall also satisfy these requirements
+ // (recursively).
+ if (MemberRD->isThisDeclarationADefinition()) {
+ if (auto Kind = getNonCLikeKindForAnonymousStruct(MemberRD))
+ return Kind;
+ }
+ }
+
+ return {Invalid ? NonCLikeKind::Invalid : NonCLikeKind::None, {}};
+}
+
void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD) {
if (TagFromDeclSpec->isInvalidDecl())
@@ -4369,27 +4484,51 @@ void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
return;
}
- // If we've already computed linkage for the anonymous tag, then
- // adding a typedef name for the anonymous decl can change that
- // linkage, which might be a serious problem. Diagnose this as
- // unsupported and ignore the typedef name. TODO: we should
- // pursue this as a language defect and establish a formal rule
- // for how to handle it.
- if (TagFromDeclSpec->hasLinkageBeenComputed()) {
- Diag(NewTD->getLocation(), diag::err_typedef_changes_linkage);
+ // C++ [dcl.typedef]p9: [P1766R1, applied as DR]
+ // An unnamed class with a typedef name for linkage purposes shall [be
+ // C-like].
+ //
+ // FIXME: Also diagnose if we've already computed the linkage. That ideally
+ // shouldn't happen, but there are constructs that the language rule doesn't
+ // disallow for which we can't reasonably avoid computing linkage early.
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(TagFromDeclSpec);
+ NonCLikeKind NonCLike = RD ? getNonCLikeKindForAnonymousStruct(RD)
+ : NonCLikeKind();
+ bool ChangesLinkage = TagFromDeclSpec->hasLinkageBeenComputed();
+ if (NonCLike || ChangesLinkage) {
+ if (NonCLike.Kind == NonCLikeKind::Invalid)
+ return;
+
+ unsigned DiagID = diag::ext_non_c_like_anon_struct_in_typedef;
+ if (ChangesLinkage) {
+ // If the linkage changes, we can't accept this as an extension.
+ if (NonCLike.Kind == NonCLikeKind::None)
+ DiagID = diag::err_typedef_changes_linkage;
+ else
+ DiagID = diag::err_non_c_like_anon_struct_in_typedef;
+ }
- SourceLocation tagLoc = TagFromDeclSpec->getInnerLocStart();
- tagLoc = getLocForEndOfToken(tagLoc);
+ SourceLocation FixitLoc =
+ getLocForEndOfToken(TagFromDeclSpec->getInnerLocStart());
+ llvm::SmallString<40> TextToInsert;
+ TextToInsert += ' ';
+ TextToInsert += NewTD->getIdentifier()->getName();
- llvm::SmallString<40> textToInsert;
- textToInsert += ' ';
- textToInsert += NewTD->getIdentifier()->getName();
- Diag(tagLoc, diag::note_typedef_changes_linkage)
- << FixItHint::CreateInsertion(tagLoc, textToInsert);
- return;
+ Diag(FixitLoc, DiagID)
+ << isa<TypeAliasDecl>(NewTD)
+ << FixItHint::CreateInsertion(FixitLoc, TextToInsert);
+ if (NonCLike.Kind != NonCLikeKind::None) {
+ Diag(NonCLike.Range.getBegin(), diag::note_non_c_like_anon_struct)
+ << NonCLike.Kind - 1 << NonCLike.Range;
+ }
+ Diag(NewTD->getLocation(), diag::note_typedef_for_linkage_here)
+ << NewTD << isa<TypeAliasDecl>(NewTD);
+
+ if (ChangesLinkage)
+ return;
}
- // Otherwise, set this is the anon-decl typedef for the tag.
+ // Otherwise, set this as the anon-decl typedef for the tag.
TagFromDeclSpec->setTypedefNameForAnonDecl(NewTD);
}
@@ -4920,6 +5059,10 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
// define non-static data members. [Note: nested types and
// functions cannot be declared within an anonymous union. ]
for (auto *Mem : Record->decls()) {
+ // Ignore invalid declarations; we already diagnosed them.
+ if (Mem->isInvalidDecl())
+ continue;
+
if (auto *FD = dyn_cast<FieldDecl>(Mem)) {
// C++ [class.union]p3:
// An anonymous union shall not have private or protected
@@ -5143,8 +5286,8 @@ Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
Chain.push_back(Anon);
RecordDecl *RecordDef = Record->getDefinition();
- if (RequireCompleteType(Anon->getLocation(), RecTy,
- diag::err_field_incomplete) ||
+ if (RequireCompleteSizedType(Anon->getLocation(), RecTy,
+ diag::err_field_incomplete_or_sizeless) ||
InjectAnonymousStructOrUnionMembers(*this, S, CurContext, RecordDef,
AS_none, Chain)) {
Anon->setInvalidDecl();
@@ -6147,6 +6290,8 @@ bool Sema::inferObjCARCLifetime(ValueDecl *decl) {
void Sema::deduceOpenCLAddressSpace(ValueDecl *Decl) {
if (Decl->getType().hasAddressSpace())
return;
+ if (Decl->getType()->isDependentType())
+ return;
if (VarDecl *Var = dyn_cast<VarDecl>(Decl)) {
QualType Type = Var->getType();
if (Type->isSamplerT() || Type->isVoidType())
@@ -6760,28 +6905,49 @@ NamedDecl *Sema::ActOnVariableDeclarator(
if (SC == SC_Static && CurContext->isRecord()) {
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC)) {
- if (RD->isLocalClass())
+ // Walk up the enclosing DeclContexts to check for any that are
+ // incompatible with static data members.
+ const DeclContext *FunctionOrMethod = nullptr;
+ const CXXRecordDecl *AnonStruct = nullptr;
+ for (DeclContext *Ctxt = DC; Ctxt; Ctxt = Ctxt->getParent()) {
+ if (Ctxt->isFunctionOrMethod()) {
+ FunctionOrMethod = Ctxt;
+ break;
+ }
+ const CXXRecordDecl *ParentDecl = dyn_cast<CXXRecordDecl>(Ctxt);
+ if (ParentDecl && !ParentDecl->getDeclName()) {
+ AnonStruct = ParentDecl;
+ break;
+ }
+ }
+ if (FunctionOrMethod) {
+ // C++ [class.static.data]p5: A local class shall not have static data
+ // members.
Diag(D.getIdentifierLoc(),
diag::err_static_data_member_not_allowed_in_local_class)
- << Name << RD->getDeclName();
-
- // C++98 [class.union]p1: If a union contains a static data member,
- // the program is ill-formed. C++11 drops this restriction.
- if (RD->isUnion())
+ << Name << RD->getDeclName() << RD->getTagKind();
+ } else if (AnonStruct) {
+ // C++ [class.static.data]p4: Unnamed classes and classes contained
+ // directly or indirectly within unnamed classes shall not contain
+ // static data members.
+ Diag(D.getIdentifierLoc(),
+ diag::err_static_data_member_not_allowed_in_anon_struct)
+ << Name << AnonStruct->getTagKind();
+ Invalid = true;
+ } else if (RD->isUnion()) {
+ // C++98 [class.union]p1: If a union contains a static data member,
+ // the program is ill-formed. C++11 drops this restriction.
Diag(D.getIdentifierLoc(),
getLangOpts().CPlusPlus11
? diag::warn_cxx98_compat_static_data_member_in_union
: diag::ext_static_data_member_in_union) << Name;
- // We conservatively disallow static data members in anonymous structs.
- else if (!RD->getDeclName())
- Diag(D.getIdentifierLoc(),
- diag::err_static_data_member_not_allowed_in_anon_struct)
- << Name << RD->isUnion();
+ }
}
}
// Match up the template parameter lists with the scope specifier, then
// determine whether we have a template or a template specialization.
+ bool InvalidScope = false;
TemplateParams = MatchTemplateParametersToScopeSpecifier(
D.getDeclSpec().getBeginLoc(), D.getIdentifierLoc(),
D.getCXXScopeSpec(),
@@ -6789,7 +6955,8 @@ NamedDecl *Sema::ActOnVariableDeclarator(
? D.getName().TemplateId
: nullptr,
TemplateParamLists,
- /*never a friend*/ false, IsMemberSpecialization, Invalid);
+ /*never a friend*/ false, IsMemberSpecialization, InvalidScope);
+ Invalid |= InvalidScope;
if (TemplateParams) {
if (!TemplateParams->size() &&
@@ -6925,7 +7092,8 @@ NamedDecl *Sema::ActOnVariableDeclarator(
diag::err_thread_non_global)
<< DeclSpec::getSpecifierName(TSCS);
else if (!Context.getTargetInfo().isTLSSupported()) {
- if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice) {
+ if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
+ getLangOpts().SYCLIsDevice) {
// Postpone error emission until we've collected attributes required to
// figure out whether it's a host or device variable and whether the
// error should be ignored.
@@ -6953,6 +7121,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
case CSK_constexpr:
NewVD->setConstexpr(true);
+ MaybeAddCUDAConstantAttr(NewVD);
// C++1z [dcl.spec.constexpr]p1:
// A static data member declared with the constexpr specifier is
// implicitly an inline variable.
@@ -7026,13 +7195,18 @@ NamedDecl *Sema::ActOnVariableDeclarator(
// Handle attributes prior to checking for duplicates in MergeVarDecl
ProcessDeclAttributes(S, NewVD, D);
- if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice) {
+ if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
+ getLangOpts().SYCLIsDevice) {
if (EmitTLSUnsupportedError &&
((getLangOpts().CUDA && DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) ||
(getLangOpts().OpenMPIsDevice &&
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(NewVD))))
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_thread_unsupported);
+
+ if (EmitTLSUnsupportedError &&
+ (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice)))
+ targetDiag(D.getIdentifierLoc(), diag::err_thread_unsupported);
// CUDA B.2.5: "__shared__ and __constant__ variables have implied static
// storage [duration]."
if (SC == SC_None && S->getFnParent() != nullptr &&
@@ -7687,6 +7861,7 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
if (NewVD->isFileVarDecl() || NewVD->isStaticLocal() ||
NewVD->hasExternalStorage()) {
if (!T->isSamplerT() &&
+ !T->isDependentType() &&
!(T.getAddressSpace() == LangAS::opencl_constant ||
(T.getAddressSpace() == LangAS::opencl_global &&
(getLangOpts().OpenCLVersion == 200 ||
@@ -7829,6 +8004,12 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
return;
}
+ if (!NewVD->hasLocalStorage() && T->isSizelessType()) {
+ Diag(NewVD->getLocation(), diag::err_sizeless_nonlocal) << T;
+ NewVD->setInvalidDecl();
+ return;
+ }
+
if (isVM && NewVD->hasAttr<BlocksAttr>()) {
Diag(NewVD->getLocation(), diag::err_block_on_vm);
NewVD->setInvalidDecl();
@@ -7916,30 +8097,8 @@ struct FindOverriddenMethod {
return false;
}
};
-
-enum OverrideErrorKind { OEK_All, OEK_NonDeleted, OEK_Deleted };
} // end anonymous namespace
-/// Report an error regarding overriding, along with any relevant
-/// overridden methods.
-///
-/// \param DiagID the primary error to report.
-/// \param MD the overriding method.
-/// \param OEK which overrides to include as notes.
-static void ReportOverrides(Sema& S, unsigned DiagID, const CXXMethodDecl *MD,
- OverrideErrorKind OEK = OEK_All) {
- S.Diag(MD->getLocation(), DiagID) << MD->getDeclName();
- for (const CXXMethodDecl *O : MD->overridden_methods()) {
- // This check (& the OEK parameter) could be replaced by a predicate, but
- // without lambdas that would be overkill. This is still nicer than writing
- // out the diag loop 3 times.
- if ((OEK == OEK_All) ||
- (OEK == OEK_NonDeleted && !O->isDeleted()) ||
- (OEK == OEK_Deleted && O->isDeleted()))
- S.Diag(O->getLocation(), diag::note_overridden_virtual_function);
- }
-}
-
/// AddOverriddenMethods - See if a method overrides any in the base classes,
/// and if so, check that it's a valid override and remember it.
bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
@@ -7948,8 +8107,6 @@ bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
FindOverriddenMethod FOM;
FOM.Method = MD;
FOM.S = this;
- bool hasDeletedOverridenMethods = false;
- bool hasNonDeletedOverridenMethods = false;
bool AddedAny = false;
if (DC->lookupInBases(FOM, Paths)) {
for (auto *I : Paths.found_decls()) {
@@ -7959,21 +8116,12 @@ bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
!CheckOverridingFunctionAttributes(MD, OldMD) &&
!CheckOverridingFunctionExceptionSpec(MD, OldMD) &&
!CheckIfOverriddenFunctionIsMarkedFinal(MD, OldMD)) {
- hasDeletedOverridenMethods |= OldMD->isDeleted();
- hasNonDeletedOverridenMethods |= !OldMD->isDeleted();
AddedAny = true;
}
}
}
}
- if (hasDeletedOverridenMethods && !MD->isDeleted()) {
- ReportOverrides(*this, diag::err_non_deleted_override, MD, OEK_Deleted);
- }
- if (hasNonDeletedOverridenMethods && MD->isDeleted()) {
- ReportOverrides(*this, diag::err_deleted_override, MD, OEK_NonDeleted);
- }
-
return AddedAny;
}
@@ -8666,6 +8814,9 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
QualType R = TInfo->getType();
assert(R->isFunctionType());
+ if (R.getCanonicalType()->castAs<FunctionType>()->getCmseNSCallAttr())
+ Diag(D.getIdentifierLoc(), diag::err_function_decl_cmse_ns_call);
+
SmallVector<TemplateParameterList *, 4> TemplateParamLists;
for (TemplateParameterList *TPL : TemplateParamListsRef)
TemplateParamLists.push_back(TPL);
@@ -8933,9 +9084,24 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// C++11 [dcl.constexpr]p3: functions declared constexpr are required to
// be either constructors or to return a literal type. Therefore,
// destructors cannot be declared constexpr.
- if (isa<CXXDestructorDecl>(NewFD) && !getLangOpts().CPlusPlus2a) {
+ if (isa<CXXDestructorDecl>(NewFD) &&
+ (!getLangOpts().CPlusPlus20 || ConstexprKind == CSK_consteval)) {
Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_constexpr_dtor)
<< ConstexprKind;
+ NewFD->setConstexprKind(getLangOpts().CPlusPlus20 ? CSK_unspecified : CSK_constexpr);
+ }
+ // C++20 [dcl.constexpr]p2: An allocation function, or a
+ // deallocation function shall not be declared with the consteval
+ // specifier.
+ if (ConstexprKind == CSK_consteval &&
+ (NewFD->getOverloadedOperator() == OO_New ||
+ NewFD->getOverloadedOperator() == OO_Array_New ||
+ NewFD->getOverloadedOperator() == OO_Delete ||
+ NewFD->getOverloadedOperator() == OO_Array_Delete)) {
+ Diag(D.getDeclSpec().getConstexprSpecLoc(),
+ diag::err_invalid_consteval_decl_kind)
+ << NewFD;
+ NewFD->setConstexprKind(CSK_constexpr);
}
}
@@ -8964,8 +9130,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
// If a function is defined as defaulted or deleted, mark it as such now.
- // FIXME: Does this ever happen? ActOnStartOfFunctionDef forces the function
- // definition kind to FDK_Definition.
+ // We'll do the relevant checks on defaulted / deleted functions later.
switch (D.getFunctionDefinitionKind()) {
case FDK_Declaration:
case FDK_Definition:
@@ -9827,6 +9992,18 @@ static bool CheckMultiVersionValue(Sema &S, const FunctionDecl *FD) {
return false;
}
+// Provide a white-list of attributes that are allowed to be combined with
+// multiversion functions.
+static bool AttrCompatibleWithMultiVersion(attr::Kind Kind,
+ MultiVersionKind MVType) {
+ switch (Kind) {
+ default:
+ return false;
+ case attr::Used:
+ return MVType == MultiVersionKind::Target;
+ }
+}
+
static bool HasNonMultiVersionAttributes(const FunctionDecl *FD,
MultiVersionKind MVType) {
for (const Attr *A : FD->attrs()) {
@@ -9842,7 +10019,9 @@ static bool HasNonMultiVersionAttributes(const FunctionDecl *FD,
return true;
break;
default:
- return true;
+ if (!AttrCompatibleWithMultiVersion(A->getKind(), MVType))
+ return true;
+ break;
}
}
return false;
@@ -10581,9 +10760,6 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
return Redeclaration;
}
}
- } else if (CXXConversionDecl *Conversion
- = dyn_cast<CXXConversionDecl>(NewFD)) {
- ActOnConversionDeclarator(Conversion);
} else if (auto *Guide = dyn_cast<CXXDeductionGuideDecl>(NewFD)) {
if (auto *TD = Guide->getDescribedFunctionTemplate())
CheckDeductionGuideTemplate(TD);
@@ -10600,12 +10776,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
if (!Method->isFunctionTemplateSpecialization() &&
!Method->getDescribedFunctionTemplate() &&
Method->isCanonicalDecl()) {
- if (AddOverriddenMethods(Method->getParent(), Method)) {
- // If the function was marked as "static", we have a problem.
- if (NewFD->getStorageClass() == SC_Static) {
- ReportOverrides(*this, diag::err_static_overrides_virtual, Method);
- }
- }
+ AddOverriddenMethods(Method->getParent(), Method);
}
if (Method->isVirtual() && NewFD->getTrailingRequiresClause())
// C++2a [class.virtual]p6
@@ -10617,6 +10788,9 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
checkThisInStaticMemberFunctionType(Method);
}
+ if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(NewFD))
+ ActOnConversionDeclarator(Conversion);
+
// Extra checking for C++ overloaded operators (C++ [over.oper]).
if (NewFD->isOverloadedOperator() &&
CheckOverloadedOperatorDeclaration(NewFD)) {
@@ -11382,6 +11556,7 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
bool Sema::DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init) {
+ assert(!Init || !Init->containsErrors());
QualType DeducedType = deduceVarTypeFromInitializer(
VDecl, VDecl->getDeclName(), VDecl->getType(), VDecl->getTypeSourceInfo(),
VDecl->getSourceRange(), DirectInit, Init);
@@ -11415,6 +11590,9 @@ bool Sema::DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
void Sema::checkNonTrivialCUnionInInitializer(const Expr *Init,
SourceLocation Loc) {
+ if (auto *EWC = dyn_cast<ExprWithCleanups>(Init))
+ Init = EWC->getSubExpr();
+
if (auto *CE = dyn_cast<ConstantExpr>(Init))
Init = CE->getSubExpr();
@@ -11717,9 +11895,17 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// TypoExpr.
ExprResult Res = CorrectDelayedTyposInExpr(Init, VDecl);
if (!Res.isUsable()) {
+ // There are unresolved typos in Init, just drop them.
+ // FIXME: improve the recovery strategy to preserve the Init.
RealDecl->setInvalidDecl();
return;
}
+ if (Res.get()->containsErrors()) {
+ // Invalidate the decl as we don't know the type for recovery-expr yet.
+ RealDecl->setInvalidDecl();
+ VDecl->setInit(Res.get());
+ return;
+ }
Init = Res.get();
if (DeduceVariableDeclarationType(VDecl, DirectInit, Init))
@@ -11809,6 +11995,13 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
return;
}
+ // The LoaderUninitialized attribute acts as a definition (of undef).
+ if (VDecl->hasAttr<LoaderUninitializedAttr>()) {
+ Diag(VDecl->getLocation(), diag::err_loader_uninitialized_cant_init);
+ VDecl->setInvalidDecl();
+ return;
+ }
+
// Get the decls type and save a reference for later, since
// CheckInitializerTypes may change it.
QualType DclT = VDecl->getType(), SavT = DclT;
@@ -11840,7 +12033,8 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// Try to correct any TypoExprs in the initialization arguments.
for (size_t Idx = 0; Idx < Args.size(); ++Idx) {
ExprResult Res = CorrectDelayedTyposInExpr(
- Args[Idx], VDecl, [this, Entity, Kind](Expr *E) {
+ Args[Idx], VDecl, /*RecoverUncorrectedTypos=*/false,
+ [this, Entity, Kind](Expr *E) {
InitializationSequence Init(*this, Entity, Kind, MultiExprArg(E));
return Init.Failed() ? ExprError() : E;
});
@@ -11858,7 +12052,12 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
/*TreatUnavailableAsInvalid=*/false);
ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Args, &DclT);
if (Result.isInvalid()) {
- VDecl->setInvalidDecl();
+ // If the provied initializer fails to initialize the var decl,
+ // we attach a recovery expr for better recovery.
+ auto RecoveryExpr =
+ CreateRecoveryExpr(Init->getBeginLoc(), Init->getEndLoc(), Args);
+ if (RecoveryExpr.get())
+ VDecl->setInit(RecoveryExpr.get());
return;
}
@@ -12119,6 +12318,8 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
VDecl->setInitStyle(VarDecl::ListInit);
}
+ if (LangOpts.OpenMP && VDecl->isFileVarDecl())
+ DeclsToCheckForDeferredDiags.push_back(VDecl);
CheckCompleteVariableDeclaration(VDecl);
}
@@ -12139,7 +12340,7 @@ void Sema::ActOnInitializerError(Decl *D) {
BD->setInvalidDecl();
// Auto types are meaningless if we can't make sense of the initializer.
- if (ParsingInitForAutoVars.count(D)) {
+ if (VD->getType()->isUndeducedType()) {
D->setInvalidDecl();
return;
}
@@ -12222,6 +12423,22 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
return;
}
+ if (!Var->isInvalidDecl() && RealDecl->hasAttr<LoaderUninitializedAttr>()) {
+ if (CXXRecordDecl *RD = Var->getType()->getAsCXXRecordDecl()) {
+ if (!RD->hasTrivialDefaultConstructor()) {
+ Diag(Var->getLocation(), diag::err_loader_uninitialized_trivial_ctor);
+ Var->setInvalidDecl();
+ return;
+ }
+ }
+ if (Var->getStorageClass() == SC_Extern) {
+ Diag(Var->getLocation(), diag::err_loader_uninitialized_extern_decl)
+ << Var;
+ Var->setInvalidDecl();
+ return;
+ }
+ }
+
VarDecl::DefinitionKind DefKind = Var->isThisDeclarationADefinition();
if (!Var->isInvalidDecl() && DefKind != VarDecl::DeclarationOnly &&
Var->getType().hasNonTrivialToPrimitiveDefaultInitializeCUnion())
@@ -12279,9 +12496,9 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
if (!Var->isInvalidDecl()) {
if (const IncompleteArrayType *ArrayT
= Context.getAsIncompleteArrayType(Type)) {
- if (RequireCompleteType(Var->getLocation(),
- ArrayT->getElementType(),
- diag::err_illegal_decl_array_incomplete_type))
+ if (RequireCompleteSizedType(
+ Var->getLocation(), ArrayT->getElementType(),
+ diag::err_array_incomplete_or_sizeless_type))
Var->setInvalidDecl();
} else if (Var->getStorageClass() == SC_Static) {
// C99 6.9.2p3: If the declaration of an identifier for an object is
@@ -12397,12 +12614,18 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
InitializationSequence InitSeq(*this, Entity, Kind, None);
ExprResult Init = InitSeq.Perform(*this, Entity, Kind, None);
- if (Init.isInvalid())
- Var->setInvalidDecl();
- else if (Init.get()) {
+
+ if (Init.get()) {
Var->setInit(MaybeCreateExprWithCleanups(Init.get()));
// This is important for template substitution.
Var->setInitStyle(VarDecl::CallInit);
+ } else if (Init.isInvalid()) {
+ // If default-init fails, attach a recovery-expr initializer to track
+ // that initialization was attempted and failed.
+ auto RecoveryExpr =
+ CreateRecoveryExpr(Var->getLocation(), Var->getLocation(), {});
+ if (RecoveryExpr.get())
+ Var->setInit(RecoveryExpr.get());
}
CheckCompleteVariableDeclaration(Var);
@@ -12579,7 +12802,7 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
if (GlobalStorage && var->isThisDeclarationADefinition() &&
!inTemplateInstantiation()) {
PragmaStack<StringLiteral *> *Stack = nullptr;
- int SectionFlags = ASTContext::PSF_Implicit | ASTContext::PSF_Read;
+ int SectionFlags = ASTContext::PSF_Read;
if (var->getType().isConstQualified())
Stack = &ConstSegStack;
else if (!var->getInit()) {
@@ -12589,14 +12812,19 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
Stack = &DataSegStack;
SectionFlags |= ASTContext::PSF_Write;
}
- if (Stack->CurrentValue && !var->hasAttr<SectionAttr>())
+ if (const SectionAttr *SA = var->getAttr<SectionAttr>()) {
+ if (SA->getSyntax() == AttributeCommonInfo::AS_Declspec)
+ SectionFlags |= ASTContext::PSF_Implicit;
+ UnifySection(SA->getName(), SectionFlags, var);
+ } else if (Stack->CurrentValue) {
+ SectionFlags |= ASTContext::PSF_Implicit;
+ auto SectionName = Stack->CurrentValue->getString();
var->addAttr(SectionAttr::CreateImplicit(
- Context, Stack->CurrentValue->getString(),
- Stack->CurrentPragmaLocation, AttributeCommonInfo::AS_Pragma,
- SectionAttr::Declspec_allocate));
- if (const SectionAttr *SA = var->getAttr<SectionAttr>())
- if (UnifySection(SA->getName(), SectionFlags, var))
+ Context, SectionName, Stack->CurrentPragmaLocation,
+ AttributeCommonInfo::AS_Pragma, SectionAttr::Declspec_allocate));
+ if (UnifySection(SectionName, SectionFlags, var))
var->dropAttr<SectionAttr>();
+ }
// Apply the init_seg attribute if this has an initializer. If the
// initializer turns out to not be dynamic, we'll end up ignoring this
@@ -13033,13 +13261,15 @@ Sema::BuildDeclaratorGroup(MutableArrayRef<Decl *> Group) {
DeducedDecl = D;
} else if (!Context.hasSameType(DT->getDeducedType(), Deduced)) {
auto *AT = dyn_cast<AutoType>(DT);
- Diag(D->getTypeSourceInfo()->getTypeLoc().getBeginLoc(),
- diag::err_auto_different_deductions)
- << (AT ? (unsigned)AT->getKeyword() : 3)
- << Deduced << DeducedDecl->getDeclName()
- << DT->getDeducedType() << D->getDeclName()
- << DeducedDecl->getInit()->getSourceRange()
- << D->getInit()->getSourceRange();
+ auto Dia = Diag(D->getTypeSourceInfo()->getTypeLoc().getBeginLoc(),
+ diag::err_auto_different_deductions)
+ << (AT ? (unsigned)AT->getKeyword() : 3) << Deduced
+ << DeducedDecl->getDeclName() << DT->getDeducedType()
+ << D->getDeclName();
+ if (DeducedDecl->hasInit())
+ Dia << DeducedDecl->getInit()->getSourceRange();
+ if (D->getInit())
+ Dia << D->getInit()->getSourceRange();
D->setInvalidDecl();
break;
}
@@ -13418,9 +13648,28 @@ Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D,
assert(D.isFunctionDeclarator() && "Not a function declarator!");
Scope *ParentScope = FnBodyScope->getParent();
+ // Check if we are in an `omp begin/end declare variant` scope. If we are, and
+ // we define a non-templated function definition, we will create a declaration
+ // instead (=BaseFD), and emit the definition with a mangled name afterwards.
+ // The base function declaration will have the equivalent of an `omp declare
+ // variant` annotation which specifies the mangled definition as a
+ // specialization function under the OpenMP context defined as part of the
+ // `omp begin declare variant`.
+ FunctionDecl *BaseFD = nullptr;
+ if (LangOpts.OpenMP && isInOpenMPDeclareVariantScope() &&
+ TemplateParameterLists.empty())
+ BaseFD = ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
+ ParentScope, D);
+
D.setFunctionDefinitionKind(FDK_Definition);
Decl *DP = HandleDeclarator(ParentScope, D, TemplateParameterLists);
- return ActOnStartOfFunctionDef(FnBodyScope, DP, SkipBody);
+ Decl *Dcl = ActOnStartOfFunctionDef(FnBodyScope, DP, SkipBody);
+
+ if (BaseFD)
+ ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
+ cast<FunctionDecl>(Dcl), BaseFD);
+
+ return Dcl;
}
void Sema::ActOnFinishInlineFunctionDef(FunctionDecl *D) {
@@ -13613,13 +13862,12 @@ static void RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator,
VarDecl *VD = C.getCapturedVar();
if (VD->isInitCapture())
S.CurrentInstantiationScope->InstantiatedLocal(VD, VD);
- QualType CaptureType = VD->getType();
const bool ByRef = C.getCaptureKind() == LCK_ByRef;
LSI->addCapture(VD, /*IsBlock*/false, ByRef,
/*RefersToEnclosingVariableOrCapture*/true, C.getLocation(),
/*EllipsisLoc*/C.isPackExpansion()
? C.getEllipsisLoc() : SourceLocation(),
- CaptureType, /*Invalid*/false);
+ I->getType(), /*Invalid*/false);
} else if (C.capturesThis()) {
LSI->addThisCapture(/*Nested*/ false, C.getLocation(), I->getType(),
@@ -13652,7 +13900,9 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
// Do not push if it is a lambda because one is already pushed when building
// the lambda in ActOnStartOfLambdaDefinition().
if (!isLambdaCallOperator(FD))
- PushExpressionEvaluationContext(ExprEvalContexts.back().Context);
+ PushExpressionEvaluationContext(
+ FD->isConsteval() ? ExpressionEvaluationContext::ConstantEvaluated
+ : ExprEvalContexts.back().Context);
// Check for defining attributes before the check for redefinition.
if (const auto *Attr = FD->getAttr<AliasAttr>()) {
@@ -14016,11 +14266,48 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
: FixItHint{});
}
} else {
+ // Returns true if the token beginning at this Loc is `const`.
+ auto isLocAtConst = [&](SourceLocation Loc, const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+ if (LocInfo.first.isInvalid())
+ return false;
+
+ bool Invalid = false;
+ StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
+ if (Invalid)
+ return false;
+
+ if (LocInfo.second > Buffer.size())
+ return false;
+
+ const char *LexStart = Buffer.data() + LocInfo.second;
+ StringRef StartTok(LexStart, Buffer.size() - LocInfo.second);
+
+ return StartTok.consume_front("const") &&
+ (StartTok.empty() || isWhitespace(StartTok[0]) ||
+ StartTok.startswith("/*") || StartTok.startswith("//"));
+ };
+
+ auto findBeginLoc = [&]() {
+ // If the return type has `const` qualifier, we want to insert
+ // `static` before `const` (and not before the typename).
+ if ((FD->getReturnType()->isAnyPointerType() &&
+ FD->getReturnType()->getPointeeType().isConstQualified()) ||
+ FD->getReturnType().isConstQualified()) {
+ // But only do this if we can determine where the `const` is.
+
+ if (isLocAtConst(FD->getBeginLoc(), getSourceManager(),
+ getLangOpts()))
+
+ return FD->getBeginLoc();
+ }
+ return FD->getTypeSpecStartLoc();
+ };
Diag(FD->getTypeSpecStartLoc(), diag::note_static_for_internal_linkage)
<< /* function */ 1
<< (FD->getStorageClass() == SC_None
- ? FixItHint::CreateInsertion(FD->getTypeSpecStartLoc(),
- "static ")
+ ? FixItHint::CreateInsertion(findBeginLoc(), "static ")
: FixItHint{});
}
@@ -14028,11 +14315,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// Warn if K&R function is defined without a previous declaration.
// This warning is issued only if the definition itself does not provide
// a prototype. Only K&R definitions do not provide a prototype.
- // An empty list in a function declarator that is part of a definition
- // of that function specifies that the function has no parameters
- // (C99 6.7.5.3p14)
- if (!FD->hasWrittenPrototype() && FD->getNumParams() > 0 &&
- !LangOpts.CPlusPlus) {
+ if (!FD->hasWrittenPrototype()) {
TypeSourceInfo *TI = FD->getTypeSourceInfo();
TypeLoc TL = TI->getTypeLoc();
FunctionTypeLoc FTL = TL.getAsAdjusted<FunctionTypeLoc>();
@@ -14162,7 +14445,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// If any errors have occurred, clear out any temporaries that may have
// been leftover. This ensures that these temporaries won't be picked up for
// deletion in some later function.
- if (getDiagnostics().hasErrorOccurred() ||
+ if (getDiagnostics().hasUncompilableErrorOccurred() ||
getDiagnostics().getSuppressAllDiagnostics()) {
DiscardCleanupsInEvaluationContext();
}
@@ -14218,10 +14501,17 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// If any errors have occurred, clear out any temporaries that may have
// been leftover. This ensures that these temporaries won't be picked up for
// deletion in some later function.
- if (getDiagnostics().hasErrorOccurred()) {
+ if (getDiagnostics().hasUncompilableErrorOccurred()) {
DiscardCleanupsInEvaluationContext();
}
+ if (LangOpts.OpenMP || LangOpts.CUDA || LangOpts.SYCLIsDevice) {
+ auto ES = getEmissionStatus(FD);
+ if (ES == Sema::FunctionEmissionStatus::Emitted ||
+ ES == Sema::FunctionEmissionStatus::Unknown)
+ DeclsToCheckForDeferredDiags.push_back(FD);
+ }
+
return dcl;
}
@@ -14353,6 +14643,77 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
return FD;
}
+/// If this function is a C++ replaceable global allocation function
+/// (C++2a [basic.stc.dynamic.allocation], C++2a [new.delete]),
+/// adds any function attributes that we know a priori based on the standard.
+///
+/// We need to check for duplicate attributes both here and where user-written
+/// attributes are applied to declarations.
+void Sema::AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
+ FunctionDecl *FD) {
+ if (FD->isInvalidDecl())
+ return;
+
+ if (FD->getDeclName().getCXXOverloadedOperator() != OO_New &&
+ FD->getDeclName().getCXXOverloadedOperator() != OO_Array_New)
+ return;
+
+ Optional<unsigned> AlignmentParam;
+ bool IsNothrow = false;
+ if (!FD->isReplaceableGlobalAllocationFunction(&AlignmentParam, &IsNothrow))
+ return;
+
+ // C++2a [basic.stc.dynamic.allocation]p4:
+ // An allocation function that has a non-throwing exception specification
+ // indicates failure by returning a null pointer value. Any other allocation
+ // function never returns a null pointer value and indicates failure only by
+ // throwing an exception [...]
+ if (!IsNothrow && !FD->hasAttr<ReturnsNonNullAttr>())
+ FD->addAttr(ReturnsNonNullAttr::CreateImplicit(Context, FD->getLocation()));
+
+ // C++2a [basic.stc.dynamic.allocation]p2:
+ // An allocation function attempts to allocate the requested amount of
+ // storage. [...] If the request succeeds, the value returned by a
+ // replaceable allocation function is a [...] pointer value p0 different
+ // from any previously returned value p1 [...]
+ //
+ // However, this particular information is being added in codegen,
+ // because there is an opt-out switch for it (-fno-assume-sane-operator-new)
+
+ // C++2a [basic.stc.dynamic.allocation]p2:
+ // An allocation function attempts to allocate the requested amount of
+ // storage. If it is successful, it returns the address of the start of a
+ // block of storage whose length in bytes is at least as large as the
+ // requested size.
+ if (!FD->hasAttr<AllocSizeAttr>()) {
+ FD->addAttr(AllocSizeAttr::CreateImplicit(
+ Context, /*ElemSizeParam=*/ParamIdx(1, FD),
+ /*NumElemsParam=*/ParamIdx(), FD->getLocation()));
+ }
+
+ // C++2a [basic.stc.dynamic.allocation]p3:
+ // For an allocation function [...], the pointer returned on a successful
+ // call shall represent the address of storage that is aligned as follows:
+ // (3.1) If the allocation function takes an argument of type
+ // std​::​align_­val_­t, the storage will have the alignment
+ // specified by the value of this argument.
+ if (AlignmentParam.hasValue() && !FD->hasAttr<AllocAlignAttr>()) {
+ FD->addAttr(AllocAlignAttr::CreateImplicit(
+ Context, ParamIdx(AlignmentParam.getValue(), FD), FD->getLocation()));
+ }
+
+ // FIXME:
+ // C++2a [basic.stc.dynamic.allocation]p3:
+ // For an allocation function [...], the pointer returned on a successful
+ // call shall represent the address of storage that is aligned as follows:
+ // (3.2) Otherwise, if the allocation function is named operator new[],
+ // the storage is aligned for any object that does not have
+ // new-extended alignment ([basic.align]) and is no larger than the
+ // requested size.
+ // (3.3) Otherwise, the storage is aligned for any object that does not
+ // have new-extended alignment and is of the requested size.
+}
+
/// Adds any function attributes that we know a priori based on
/// the declaration of this function.
///
@@ -14453,6 +14814,8 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
}
}
+ AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(FD);
+
// If C++ exceptions are enabled but we are told extern "C" functions cannot
// throw, add an implicit nothrow attribute to any extern "C" function we come
// across.
@@ -14558,12 +14921,16 @@ bool Sema::CheckEnumUnderlyingType(TypeSourceInfo *TI) {
if (T->isDependentType())
return false;
+ // This doesn't use 'isIntegralType' despite the error message mentioning
+ // integral type because isIntegralType would also allow enum types in C.
if (const BuiltinType *BT = T->getAs<BuiltinType>())
if (BT->isInteger())
return false;
- Diag(UnderlyingLoc, diag::err_enum_invalid_underlying) << T;
- return true;
+ if (T->isExtIntType())
+ return false;
+
+ return Diag(UnderlyingLoc, diag::err_enum_invalid_underlying) << T;
}
/// Check whether this is a valid redeclaration of a previous enumeration.
@@ -15322,16 +15689,8 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (Kind == TTK_Enum && PrevTagDecl->getTagKind() == TTK_Enum) {
const EnumDecl *PrevEnum = cast<EnumDecl>(PrevTagDecl);
-
- // If this is an elaborated-type-specifier for a scoped enumeration,
- // the 'class' keyword is not necessary and not permitted.
- if (TUK == TUK_Reference || TUK == TUK_Friend) {
- if (ScopedEnum)
- Diag(ScopedEnumKWLoc, diag::err_enum_class_reference)
- << PrevEnum->isScoped()
- << FixItHint::CreateRemoval(ScopedEnumKWLoc);
+ if (TUK == TUK_Reference || TUK == TUK_Friend)
return PrevTagDecl;
- }
QualType EnumUnderlyingTy;
if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>())
@@ -15809,7 +16168,7 @@ Decl *Sema::ActOnObjCContainerStartDefinition(Decl *IDecl) {
assert(isa<ObjCContainerDecl>(IDecl) &&
"ActOnObjCContainerStartDefinition - Not ObjCContainerDecl");
DeclContext *OCD = cast<DeclContext>(IDecl);
- assert(getContainingDC(OCD) == CurContext &&
+ assert(OCD->getLexicalParent() == CurContext &&
"The next DeclContext should be lexically contained in the current one.");
CurContext = OCD;
return IDecl;
@@ -15920,6 +16279,10 @@ ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth) {
+ assert(BitWidth);
+ if (BitWidth->containsErrors())
+ return ExprError();
+
// Default to true; that shouldn't confuse checks for emptiness
if (ZeroWidth)
*ZeroWidth = true;
@@ -15927,8 +16290,9 @@ ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
// C99 6.7.2.1p4 - verify the field type.
// C++ 9.6p3: A bit-field shall have integral or enumeration type.
if (!FieldTy->isDependentType() && !FieldTy->isIntegralOrEnumerationType()) {
- // Handle incomplete types with specific error.
- if (RequireCompleteType(FieldLoc, FieldTy, diag::err_field_incomplete))
+ // Handle incomplete and sizeless types with a specific error.
+ if (RequireCompleteSizedType(FieldLoc, FieldTy,
+ diag::err_field_incomplete_or_sizeless))
return ExprError();
if (FieldName)
return Diag(FieldLoc, diag::err_not_integral_type_bitfield)
@@ -16138,14 +16502,15 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
// If we receive a broken type, recover by assuming 'int' and
// marking this declaration as invalid.
- if (T.isNull()) {
+ if (T.isNull() || T->containsErrors()) {
InvalidDecl = true;
T = Context.IntTy;
}
QualType EltTy = Context.getBaseElementType(T);
- if (!EltTy->isDependentType()) {
- if (RequireCompleteType(Loc, EltTy, diag::err_field_incomplete)) {
+ if (!EltTy->isDependentType() && !EltTy->containsErrors()) {
+ if (RequireCompleteSizedType(Loc, EltTy,
+ diag::err_field_incomplete_or_sizeless)) {
// Fields of incomplete type force their record to be invalid.
Record->setInvalidDecl();
InvalidDecl = true;
@@ -16234,6 +16599,14 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
BitWidth = nullptr;
ZeroWidth = false;
}
+
+ // Only data members can have in-class initializers.
+ if (BitWidth && !II && InitStyle) {
+ Diag(Loc, diag::err_anon_bitfield_init);
+ InvalidDecl = true;
+ BitWidth = nullptr;
+ ZeroWidth = false;
+ }
}
// Check that 'mutable' is consistent with the type of the declaration.
@@ -16689,8 +17062,9 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
// elsewhere, after synthesized ivars are known.
}
} else if (!FDTy->isDependentType() &&
- RequireCompleteType(FD->getLocation(), FD->getType(),
- diag::err_field_incomplete)) {
+ RequireCompleteSizedType(
+ FD->getLocation(), FD->getType(),
+ diag::err_field_incomplete_or_sizeless)) {
// Incomplete type
FD->setInvalidDecl();
EnclosingDecl->setInvalidDecl();
@@ -16748,8 +17122,8 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
Context, "", UnavailableAttr::IR_ARCFieldWithOwnership,
FD->getLocation()));
} else if (getLangOpts().ObjC &&
- getLangOpts().getGC() != LangOptions::NonGC &&
- Record && !Record->hasObjectMember()) {
+ getLangOpts().getGC() != LangOptions::NonGC && Record &&
+ !Record->hasObjectMember()) {
if (FD->getType()->isObjCObjectPointerType() ||
FD->getType().isObjCGCStrong())
Record->setHasObjectMember(true);
@@ -16813,10 +17187,10 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
I.setAccess((*I)->getAccess());
}
- if (!CXXRecord->isDependentType()) {
- // Add any implicitly-declared members to this class.
- AddImplicitlyDeclaredMembersToClass(CXXRecord);
+ // Add any implicitly-declared members to this class.
+ AddImplicitlyDeclaredMembersToClass(CXXRecord);
+ if (!CXXRecord->isDependentType()) {
if (!CXXRecord->isInvalidDecl()) {
// If we have virtual base classes, we may end up finding multiple
// final overriders for a given virtual function. Check for this
@@ -17375,9 +17749,11 @@ static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements,
typedef SmallVector<std::unique_ptr<ECDVector>, 3> DuplicatesVector;
typedef llvm::PointerUnion<EnumConstantDecl*, ECDVector*> DeclOrVector;
+
+ // DenseMaps cannot contain the all ones int64_t value, so use unordered_map.
typedef std::unordered_map<int64_t, DeclOrVector> ValueToVectorMap;
- // Use int64_t as a key to avoid needing special handling for DenseMap keys.
+ // Use int64_t as a key to avoid needing special handling for map keys.
auto EnumConstantToKey = [](const EnumConstantDecl *D) {
llvm::APSInt Val = D->getInitVal();
return Val.isSigned() ? Val.getSExtValue() : Val.getZExtValue();
@@ -17807,7 +18183,13 @@ Decl *Sema::getObjCDeclContext() const {
return (dyn_cast_or_null<ObjCContainerDecl>(CurContext));
}
-Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD) {
+Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD,
+ bool Final) {
+ // SYCL functions can be template, so we check if they have appropriate
+ // attribute prior to checking if it is a template.
+ if (LangOpts.SYCLIsDevice && FD->hasAttr<SYCLKernelAttr>())
+ return FunctionEmissionStatus::Emitted;
+
// Templates are emitted when they're instantiated.
if (FD->isDependentContext())
return FunctionEmissionStatus::TemplateDiscarded;
@@ -17819,8 +18201,10 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD) {
if (DevTy.hasValue()) {
if (*DevTy == OMPDeclareTargetDeclAttr::DT_Host)
OMPES = FunctionEmissionStatus::OMPDiscarded;
- else if (DeviceKnownEmittedFns.count(FD) > 0)
+ else if (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost ||
+ *DevTy == OMPDeclareTargetDeclAttr::DT_Any) {
OMPES = FunctionEmissionStatus::Emitted;
+ }
}
} else if (LangOpts.OpenMP) {
// In OpenMP 4.5 all the functions are host functions.
@@ -17836,10 +18220,11 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD) {
if (DevTy.hasValue()) {
if (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
OMPES = FunctionEmissionStatus::OMPDiscarded;
- } else if (DeviceKnownEmittedFns.count(FD) > 0) {
+ } else if (*DevTy == OMPDeclareTargetDeclAttr::DT_Host ||
+ *DevTy == OMPDeclareTargetDeclAttr::DT_Any)
OMPES = FunctionEmissionStatus::Emitted;
- }
- }
+ } else if (Final)
+ OMPES = FunctionEmissionStatus::Emitted;
}
}
if (OMPES == FunctionEmissionStatus::OMPDiscarded ||
@@ -17874,9 +18259,7 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD) {
// Otherwise, the function is known-emitted if it's in our set of
// known-emitted functions.
- return (DeviceKnownEmittedFns.count(FD) > 0)
- ? FunctionEmissionStatus::Emitted
- : FunctionEmissionStatus::Unknown;
+ return FunctionEmissionStatus::Unknown;
}
bool Sema::shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
index 849bc09063b3..1a0594512a60 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
@@ -225,8 +225,7 @@ static bool checkAttributeAtMostNumArgs(Sema &S, const ParsedAttr &AL,
/// A helper function to provide Attribute Location for the Attr types
/// AND the ParsedAttr.
template <typename AttrInfo>
-static typename std::enable_if<std::is_base_of<Attr, AttrInfo>::value,
- SourceLocation>::type
+static std::enable_if_t<std::is_base_of<Attr, AttrInfo>::value, SourceLocation>
getAttrLoc(const AttrInfo &AL) {
return AL.getLocation();
}
@@ -1100,7 +1099,7 @@ static void handleNoBuiltinAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
AddBuiltinName(BuiltinName);
else
S.Diag(LiteralLoc, diag::warn_attribute_no_builtin_invalid_builtin_name)
- << BuiltinName << AL.getAttrName()->getName();
+ << BuiltinName << AL;
}
// Repeating the same attribute is fine.
@@ -1111,7 +1110,7 @@ static void handleNoBuiltinAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (HasWildcard && Names.size() > 1)
S.Diag(D->getLocation(),
diag::err_attribute_no_builtin_wildcard_or_builtin_name)
- << AL.getAttrName()->getName();
+ << AL;
if (D->hasAttr<NoBuiltinAttr>())
D->dropAttr<NoBuiltinAttr>();
@@ -1177,8 +1176,7 @@ static bool checkForConsumableClass(Sema &S, const CXXMethodDecl *MD,
if (const CXXRecordDecl *RD = ThisType->getAsCXXRecordDecl()) {
if (!RD->hasAttr<ConsumableAttr>()) {
- S.Diag(AL.getLoc(), diag::warn_attr_on_unconsumable_class) <<
- RD->getNameAsString();
+ S.Diag(AL.getLoc(), diag::warn_attr_on_unconsumable_class) << RD;
return false;
}
@@ -1625,6 +1623,10 @@ void Sema::AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
<< E->getSourceRange();
return;
}
+
+ if (I > Sema::MaximumAlignment)
+ Diag(CI.getLoc(), diag::warn_assume_aligned_too_great)
+ << CI.getRange() << Sema::MaximumAlignment;
}
if (OE) {
@@ -1663,7 +1665,8 @@ void Sema::AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
return;
QualType Ty = getFunctionOrMethodParamType(D, Idx.getASTIndex());
- if (!Ty->isDependentType() && !Ty->isIntegralType(Context)) {
+ if (!Ty->isDependentType() && !Ty->isIntegralType(Context) &&
+ !Ty->isAlignValT()) {
Diag(ParamExpr->getBeginLoc(), diag::err_attribute_integers_only)
<< &TmpAttr
<< FuncDecl->getParamDecl(Idx.getASTIndex())->getSourceRange();
@@ -1989,6 +1992,21 @@ static void handleCommonAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(CA);
}
+static void handleCmseNSEntryAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (S.LangOpts.CPlusPlus && !D->getDeclContext()->isExternCContext()) {
+ S.Diag(AL.getLoc(), diag::err_attribute_not_clinkage) << AL;
+ return;
+ }
+
+ const auto *FD = cast<FunctionDecl>(D);
+ if (!FD->isExternallyVisible()) {
+ S.Diag(AL.getLoc(), diag::warn_attribute_cmse_entry_static);
+ return;
+ }
+
+ D->addAttr(::new (S.Context) CmseNSEntryAttr(S.Context, AL));
+}
+
static void handleNakedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (checkAttrMutualExclusion<DisableTailCallsAttr>(S, D, AL))
return;
@@ -2809,6 +2827,12 @@ static void handleWarnUnusedResult(Sema &S, Decl *D, const ParsedAttr &AL) {
StringRef Str;
if ((AL.isCXX11Attribute() || AL.isC2xAttribute()) && !AL.getScopeName()) {
+ // The standard attribute cannot be applied to variable declarations such
+ // as a function pointer.
+ if (isa<VarDecl>(D))
+ S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type_str)
+ << AL << "functions, classes, or enumerations";
+
// If this is spelled as the standard C++17 attribute, but not in C++17,
// warn about using it as an extension. If there are attribute arguments,
// then claim it's a C++2a extension instead.
@@ -2816,8 +2840,8 @@ static void handleWarnUnusedResult(Sema &S, Decl *D, const ParsedAttr &AL) {
// extension warning for C2x mode.
const LangOptions &LO = S.getLangOpts();
if (AL.getNumArgs() == 1) {
- if (LO.CPlusPlus && !LO.CPlusPlus2a)
- S.Diag(AL.getLoc(), diag::ext_cxx2a_attr) << AL;
+ if (LO.CPlusPlus && !LO.CPlusPlus20)
+ S.Diag(AL.getLoc(), diag::ext_cxx20_attr) << AL;
// Since this this is spelled [[nodiscard]], get the optional string
// literal. If in C++ mode, but not in C++2a mode, diagnose as an
@@ -3672,7 +3696,7 @@ void Sema::AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E) {
if (!T->isDependentType() && !T->isAnyPointerType() &&
!T->isReferenceType() && !T->isMemberPointerType()) {
Diag(AttrLoc, diag::warn_attribute_pointer_or_reference_only)
- << &TmpAttr /*TmpAttr.getName()*/ << T << D->getSourceRange();
+ << &TmpAttr << T << D->getSourceRange();
return;
}
@@ -3809,13 +3833,12 @@ void Sema::AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
}
}
- // Alignment calculations can wrap around if it's greater than 2**28.
- unsigned MaxValidAlignment =
- Context.getTargetInfo().getTriple().isOSBinFormatCOFF() ? 8192
- : 268435456;
- if (AlignVal > MaxValidAlignment) {
- Diag(AttrLoc, diag::err_attribute_aligned_too_great) << MaxValidAlignment
- << E->getSourceRange();
+ unsigned MaximumAlignment = Sema::MaximumAlignment;
+ if (Context.getTargetInfo().getTriple().isOSBinFormatCOFF())
+ MaximumAlignment = std::min(MaximumAlignment, 8192u);
+ if (AlignVal > MaximumAlignment) {
+ Diag(AttrLoc, diag::err_attribute_aligned_too_great)
+ << MaximumAlignment << E->getSourceRange();
return;
}
@@ -3865,6 +3888,7 @@ void Sema::CheckAlignasUnderalignment(Decl *D) {
// not specify an alignment that is less strict than the alignment that
// would otherwise be required for the entity being declared.
AlignedAttr *AlignasAttr = nullptr;
+ AlignedAttr *LastAlignedAttr = nullptr;
unsigned Align = 0;
for (auto *I : D->specific_attrs<AlignedAttr>()) {
if (I->isAlignmentDependent())
@@ -3872,9 +3896,13 @@ void Sema::CheckAlignasUnderalignment(Decl *D) {
if (I->isAlignas())
AlignasAttr = I;
Align = std::max(Align, I->getAlignment(Context));
+ LastAlignedAttr = I;
}
- if (AlignasAttr && Align) {
+ if (Align && DiagTy->isSizelessType()) {
+ Diag(LastAlignedAttr->getLocation(), diag::err_attribute_sizeless_type)
+ << LastAlignedAttr << DiagTy;
+ } else if (AlignasAttr && Align) {
CharUnits RequestedAlign = Context.toCharUnitsFromBits(Align);
CharUnits NaturalAlign = Context.getTypeAlignInChars(UnderlyingTy);
if (NaturalAlign > RequestedAlign)
@@ -3907,15 +3935,15 @@ bool Sema::checkMSInheritanceAttrOnDefinition(
Diag(Range.getBegin(), diag::err_mismatched_ms_inheritance)
<< 0 /*definition*/;
- Diag(RD->getDefinition()->getLocation(), diag::note_defined_here)
- << RD->getNameAsString();
+ Diag(RD->getDefinition()->getLocation(), diag::note_defined_here) << RD;
return true;
}
/// parseModeAttrArg - Parses attribute mode string and returns parsed type
/// attribute.
static void parseModeAttrArg(Sema &S, StringRef Str, unsigned &DestWidth,
- bool &IntegerMode, bool &ComplexMode) {
+ bool &IntegerMode, bool &ComplexMode,
+ bool &ExplicitIEEE) {
IntegerMode = true;
ComplexMode = false;
switch (Str.size()) {
@@ -3936,7 +3964,12 @@ static void parseModeAttrArg(Sema &S, StringRef Str, unsigned &DestWidth,
case 'X':
DestWidth = 96;
break;
+ case 'K': // KFmode - IEEE quad precision (__float128)
+ ExplicitIEEE = true;
+ DestWidth = Str[1] == 'I' ? 0 : 128;
+ break;
case 'T':
+ ExplicitIEEE = false;
DestWidth = 128;
break;
}
@@ -3997,6 +4030,7 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
unsigned DestWidth = 0;
bool IntegerMode = true;
bool ComplexMode = false;
+ bool ExplicitIEEE = false;
llvm::APInt VectorSize(64, 0);
if (Str.size() >= 4 && Str[0] == 'V') {
// Minimal length of vector mode is 4: 'V' + NUMBER(>=1) + TYPE(>=2).
@@ -4009,7 +4043,7 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
!Str.substr(1, VectorStringLength).getAsInteger(10, VectorSize) &&
VectorSize.isPowerOf2()) {
parseModeAttrArg(*this, Str.substr(VectorStringLength + 1), DestWidth,
- IntegerMode, ComplexMode);
+ IntegerMode, ComplexMode, ExplicitIEEE);
// Avoid duplicate warning from template instantiation.
if (!InInstantiation)
Diag(AttrLoc, diag::warn_vector_mode_deprecated);
@@ -4019,7 +4053,8 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
}
if (!VectorSize)
- parseModeAttrArg(*this, Str, DestWidth, IntegerMode, ComplexMode);
+ parseModeAttrArg(*this, Str, DestWidth, IntegerMode, ComplexMode,
+ ExplicitIEEE);
// FIXME: Sync this with InitializePredefinedMacros; we need to match int8_t
// and friends, at least with glibc.
@@ -4061,8 +4096,9 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
Diag(AttrLoc, diag::err_enum_mode_vector_type) << Name << CI.getRange();
return;
}
- bool IntegralOrAnyEnumType =
- OldElemTy->isIntegralOrEnumerationType() || OldElemTy->getAs<EnumType>();
+ bool IntegralOrAnyEnumType = (OldElemTy->isIntegralOrEnumerationType() &&
+ !OldElemTy->isExtIntType()) ||
+ OldElemTy->getAs<EnumType>();
if (!OldElemTy->getAs<BuiltinType>() && !OldElemTy->isComplexType() &&
!IntegralOrAnyEnumType)
@@ -4084,7 +4120,7 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
NewElemTy = Context.getIntTypeForBitwidth(DestWidth,
OldElemTy->isSignedIntegerType());
else
- NewElemTy = Context.getRealTypeForBitwidth(DestWidth);
+ NewElemTy = Context.getRealTypeForBitwidth(DestWidth, ExplicitIEEE);
if (NewElemTy.isNull()) {
Diag(AttrLoc, diag::err_machine_mode) << 1 /*Unsupported*/ << Name;
@@ -4333,6 +4369,12 @@ static void handleGlobalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
S.Diag(FD->getBeginLoc(), diag::warn_kern_is_inline) << FD;
D->addAttr(::new (S.Context) CUDAGlobalAttr(S.Context, AL));
+ // In host compilation the kernel is emitted as a stub function, which is
+ // a helper function for launching the kernel. The instructions in the helper
+ // function has nothing to do with the source code of the kernel. Do not emit
+ // debug info for the stub function to avoid confusing the debugger.
+ if (S.LangOpts.HIP && !S.LangOpts.CUDAIsDevice)
+ D->addAttr(NoDebugAttr::CreateImplicit(S.Context));
}
static void handleGNUInlineAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -4934,17 +4976,58 @@ static void handlePatchableFunctionEntryAttr(Sema &S, Decl *D,
PatchableFunctionEntryAttr(S.Context, AL, Count, Offset));
}
-static bool ArmMveAliasValid(unsigned BuiltinID, StringRef AliasName) {
+namespace {
+struct IntrinToName {
+ uint32_t Id;
+ int32_t FullName;
+ int32_t ShortName;
+};
+} // unnamed namespace
+
+static bool ArmBuiltinAliasValid(unsigned BuiltinID, StringRef AliasName,
+ ArrayRef<IntrinToName> Map,
+ const char *IntrinNames) {
if (AliasName.startswith("__arm_"))
AliasName = AliasName.substr(6);
- switch (BuiltinID) {
+ const IntrinToName *It = std::lower_bound(
+ Map.begin(), Map.end(), BuiltinID,
+ [](const IntrinToName &L, unsigned Id) { return L.Id < Id; });
+ if (It == Map.end() || It->Id != BuiltinID)
+ return false;
+ StringRef FullName(&IntrinNames[It->FullName]);
+ if (AliasName == FullName)
+ return true;
+ if (It->ShortName == -1)
+ return false;
+ StringRef ShortName(&IntrinNames[It->ShortName]);
+ return AliasName == ShortName;
+}
+
+static bool ArmMveAliasValid(unsigned BuiltinID, StringRef AliasName) {
#include "clang/Basic/arm_mve_builtin_aliases.inc"
+ // The included file defines:
+ // - ArrayRef<IntrinToName> Map
+ // - const char IntrinNames[]
+ return ArmBuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
+}
+
+static bool ArmCdeAliasValid(unsigned BuiltinID, StringRef AliasName) {
+#include "clang/Basic/arm_cde_builtin_aliases.inc"
+ return ArmBuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
+}
+
+static bool ArmSveAliasValid(unsigned BuiltinID, StringRef AliasName) {
+ switch (BuiltinID) {
default:
return false;
+#define GET_SVE_BUILTINS
+#define BUILTIN(name, types, attr) case SVE::BI##name:
+#include "clang/Basic/arm_sve_builtins.inc"
+ return true;
}
}
-static void handleArmMveAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+static void handleArmBuiltinAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!AL.isArgIdent(0)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
<< AL << 1 << AANT_ArgumentIdentifier;
@@ -4953,14 +5036,17 @@ static void handleArmMveAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
IdentifierInfo *Ident = AL.getArgAsIdent(0)->Ident;
unsigned BuiltinID = Ident->getBuiltinID();
+ StringRef AliasName = cast<FunctionDecl>(D)->getIdentifier()->getName();
- if (!ArmMveAliasValid(BuiltinID,
- cast<FunctionDecl>(D)->getIdentifier()->getName())) {
- S.Diag(AL.getLoc(), diag::err_attribute_arm_mve_alias);
+ bool IsAArch64 = S.Context.getTargetInfo().getTriple().isAArch64();
+ if ((IsAArch64 && !ArmSveAliasValid(BuiltinID, AliasName)) ||
+ (!IsAArch64 && !ArmMveAliasValid(BuiltinID, AliasName) &&
+ !ArmCdeAliasValid(BuiltinID, AliasName))) {
+ S.Diag(AL.getLoc(), diag::err_attribute_arm_builtin_alias);
return;
}
- D->addAttr(::new (S.Context) ArmMveAliasAttr(S.Context, AL, Ident));
+ D->addAttr(::new (S.Context) ArmBuiltinAliasAttr(S.Context, AL, Ident));
}
//===----------------------------------------------------------------------===//
@@ -5407,9 +5493,9 @@ static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D,
//===----------------------------------------------------------------------===//
UuidAttr *Sema::mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
- StringRef Uuid) {
+ StringRef UuidAsWritten, MSGuidDecl *GuidDecl) {
if (const auto *UA = D->getAttr<UuidAttr>()) {
- if (UA->getGuid().equals_lower(Uuid))
+ if (declaresSameEntity(UA->getGuidDecl(), GuidDecl))
return nullptr;
if (!UA->getGuid().empty()) {
Diag(UA->getLocation(), diag::err_mismatched_uuid);
@@ -5418,7 +5504,7 @@ UuidAttr *Sema::mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
}
}
- return ::new (Context) UuidAttr(Context, CI, Uuid);
+ return ::new (Context) UuidAttr(Context, CI, UuidAsWritten, GuidDecl);
}
static void handleUuidAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -5428,13 +5514,14 @@ static void handleUuidAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
- StringRef StrRef;
+ StringRef OrigStrRef;
SourceLocation LiteralLoc;
- if (!S.checkStringLiteralArgumentAttr(AL, 0, StrRef, &LiteralLoc))
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, OrigStrRef, &LiteralLoc))
return;
// GUID format is "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" or
// "{XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}", normalize to the former.
+ StringRef StrRef = OrigStrRef;
if (StrRef.size() == 38 && StrRef.front() == '{' && StrRef.back() == '}')
StrRef = StrRef.drop_front().drop_back();
@@ -5456,6 +5543,16 @@ static void handleUuidAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
}
+ // Convert to our parsed format and canonicalize.
+ MSGuidDecl::Parts Parsed;
+ StrRef.substr(0, 8).getAsInteger(16, Parsed.Part1);
+ StrRef.substr(9, 4).getAsInteger(16, Parsed.Part2);
+ StrRef.substr(14, 4).getAsInteger(16, Parsed.Part3);
+ for (unsigned i = 0; i != 8; ++i)
+ StrRef.substr(19 + 2 * i + (i >= 2 ? 1 : 0), 2)
+ .getAsInteger(16, Parsed.Part4And5[i]);
+ MSGuidDecl *Guid = S.Context.getMSGuidDecl(Parsed);
+
// FIXME: It'd be nice to also emit a fixit removing uuid(...) (and, if it's
// the only thing in the [] list, the [] too), and add an insertion of
// __declspec(uuid(...)). But sadly, neither the SourceLocs of the commas
@@ -5465,7 +5562,7 @@ static void handleUuidAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (AL.isMicrosoftAttribute()) // Check for [uuid(...)] spelling.
S.Diag(AL.getLoc(), diag::warn_atl_uuid_deprecated);
- UuidAttr *UA = S.mergeUuidAttr(D, AL, StrRef);
+ UuidAttr *UA = S.mergeUuidAttr(D, AL, OrigStrRef, Guid);
if (UA)
D->addAttr(UA);
}
@@ -5795,45 +5892,75 @@ static void handleWebAssemblyExportNameAttr(Sema &S, Decl *D, const ParsedAttr &
D->addAttr(UsedAttr::CreateImplicit(S.Context));
}
-static void handleWebAssemblyImportModuleAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!isFunctionOrMethod(D)) {
- S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'import_module'" << ExpectedFunction;
- return;
+WebAssemblyImportModuleAttr *
+Sema::mergeImportModuleAttr(Decl *D, const WebAssemblyImportModuleAttr &AL) {
+ auto *FD = cast<FunctionDecl>(D);
+
+ if (const auto *ExistingAttr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
+ if (ExistingAttr->getImportModule() == AL.getImportModule())
+ return nullptr;
+ Diag(ExistingAttr->getLocation(), diag::warn_mismatched_import) << 0
+ << ExistingAttr->getImportModule() << AL.getImportModule();
+ Diag(AL.getLoc(), diag::note_previous_attribute);
+ return nullptr;
}
+ if (FD->hasBody()) {
+ Diag(AL.getLoc(), diag::warn_import_on_definition) << 0;
+ return nullptr;
+ }
+ return ::new (Context) WebAssemblyImportModuleAttr(Context, AL,
+ AL.getImportModule());
+}
+WebAssemblyImportNameAttr *
+Sema::mergeImportNameAttr(Decl *D, const WebAssemblyImportNameAttr &AL) {
auto *FD = cast<FunctionDecl>(D);
- if (FD->isThisDeclarationADefinition()) {
- S.Diag(D->getLocation(), diag::err_alias_is_definition) << FD << 0;
- return;
+
+ if (const auto *ExistingAttr = FD->getAttr<WebAssemblyImportNameAttr>()) {
+ if (ExistingAttr->getImportName() == AL.getImportName())
+ return nullptr;
+ Diag(ExistingAttr->getLocation(), diag::warn_mismatched_import) << 1
+ << ExistingAttr->getImportName() << AL.getImportName();
+ Diag(AL.getLoc(), diag::note_previous_attribute);
+ return nullptr;
+ }
+ if (FD->hasBody()) {
+ Diag(AL.getLoc(), diag::warn_import_on_definition) << 1;
+ return nullptr;
}
+ return ::new (Context) WebAssemblyImportNameAttr(Context, AL,
+ AL.getImportName());
+}
+
+static void
+handleWebAssemblyImportModuleAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ auto *FD = cast<FunctionDecl>(D);
StringRef Str;
SourceLocation ArgLoc;
if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
return;
+ if (FD->hasBody()) {
+ S.Diag(AL.getLoc(), diag::warn_import_on_definition) << 0;
+ return;
+ }
FD->addAttr(::new (S.Context)
WebAssemblyImportModuleAttr(S.Context, AL, Str));
}
-static void handleWebAssemblyImportNameAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!isFunctionOrMethod(D)) {
- S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'import_name'" << ExpectedFunction;
- return;
- }
-
+static void
+handleWebAssemblyImportNameAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
auto *FD = cast<FunctionDecl>(D);
- if (FD->isThisDeclarationADefinition()) {
- S.Diag(D->getLocation(), diag::err_alias_is_definition) << FD << 0;
- return;
- }
StringRef Str;
SourceLocation ArgLoc;
if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
return;
+ if (FD->hasBody()) {
+ S.Diag(AL.getLoc(), diag::warn_import_on_definition) << 1;
+ return;
+ }
FD->addAttr(::new (S.Context) WebAssemblyImportNameAttr(S.Context, AL, Str));
}
@@ -6199,11 +6326,6 @@ static void handleCapabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
!S.checkStringLiteralArgumentAttr(AL, 0, N, &LiteralLoc))
return;
- // Currently, there are only two names allowed for a capability: role and
- // mutex (case insensitive). Diagnose other capability names.
- if (!N.equals_lower("mutex") && !N.equals_lower("role"))
- S.Diag(LiteralLoc, diag::warn_invalid_capability_name) << N;
-
D->addAttr(::new (S.Context) CapabilityAttr(S.Context, AL, N));
}
@@ -6567,7 +6689,9 @@ static void handleObjCExternallyRetainedAttr(Sema &S, Decl *D,
// If D is a function-like declaration (method, block, or function), then we
// make every parameter psuedo-strong.
- for (unsigned I = 0, E = getFunctionOrMethodNumParams(D); I != E; ++I) {
+ unsigned NumParams =
+ hasFunctionProto(D) ? getFunctionOrMethodNumParams(D) : 0;
+ for (unsigned I = 0; I != NumParams; ++I) {
auto *PVD = const_cast<ParmVarDecl *>(getFunctionOrMethodParam(D, I));
QualType Ty = PVD->getType();
@@ -6620,7 +6744,7 @@ static void handleMSAllocatorAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
handleSimpleAttribute<MSAllocatorAttr>(S, D, AL);
}
-static void handeAcquireHandleAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+static void handleAcquireHandleAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (AL.isUsedAsTypeAttr())
return;
// Warn if the parameter is definitely not an output parameter.
@@ -6700,6 +6824,8 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
switch (AL.getKind()) {
default:
+ if (AL.getInfo().handleDeclAttribute(S, D, AL) != ParsedAttrInfo::NotHandled)
+ break;
if (!AL.isStmtAttr()) {
// Type attributes are handled elsewhere; silently move on.
assert(AL.isTypeAttr() && "Non-type attribute not handled");
@@ -6722,15 +6848,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleSimpleAttributeWithExclusions<Mips16Attr, MicroMipsAttr,
MipsInterruptAttr>(S, D, AL);
break;
- case ParsedAttr::AT_NoMips16:
- handleSimpleAttribute<NoMips16Attr>(S, D, AL);
- break;
case ParsedAttr::AT_MicroMips:
handleSimpleAttributeWithExclusions<MicroMipsAttr, Mips16Attr>(S, D, AL);
break;
- case ParsedAttr::AT_NoMicroMips:
- handleSimpleAttribute<NoMicroMipsAttr>(S, D, AL);
- break;
case ParsedAttr::AT_MipsLongCall:
handleSimpleAttributeWithExclusions<MipsLongCallAttr, MipsShortCallAttr>(
S, D, AL);
@@ -6766,9 +6886,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_WebAssemblyImportName:
handleWebAssemblyImportNameAttr(S, D, AL);
break;
- case ParsedAttr::AT_IBAction:
- handleSimpleAttribute<IBActionAttr>(S, D, AL);
- break;
case ParsedAttr::AT_IBOutlet:
handleIBOutlet(S, D, AL);
break;
@@ -6793,9 +6910,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_AlwaysInline:
handleAlwaysInlineAttr(S, D, AL);
break;
- case ParsedAttr::AT_Artificial:
- handleSimpleAttribute<ArtificialAttr>(S, D, AL);
- break;
case ParsedAttr::AT_AnalyzerNoReturn:
handleAnalyzerNoReturnAttr(S, D, AL);
break;
@@ -6825,16 +6939,20 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handlePassObjectSizeAttr(S, D, AL);
break;
case ParsedAttr::AT_Constructor:
- handleConstructorAttr(S, D, AL);
- break;
- case ParsedAttr::AT_CXX11NoReturn:
- handleSimpleAttribute<CXX11NoReturnAttr>(S, D, AL);
+ if (S.Context.getTargetInfo().getTriple().isOSAIX())
+ llvm::report_fatal_error(
+ "'constructor' attribute is not yet supported on AIX");
+ else
+ handleConstructorAttr(S, D, AL);
break;
case ParsedAttr::AT_Deprecated:
handleDeprecatedAttr(S, D, AL);
break;
case ParsedAttr::AT_Destructor:
- handleDestructorAttr(S, D, AL);
+ if (S.Context.getTargetInfo().getTriple().isOSAIX())
+ llvm::report_fatal_error("'destructor' attribute is not yet supported on AIX");
+ else
+ handleDestructorAttr(S, D, AL);
break;
case ParsedAttr::AT_EnableIf:
handleEnableIfAttr(S, D, AL);
@@ -6857,15 +6975,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_OptimizeNone:
handleOptimizeNoneAttr(S, D, AL);
break;
- case ParsedAttr::AT_FlagEnum:
- handleSimpleAttribute<FlagEnumAttr>(S, D, AL);
- break;
case ParsedAttr::AT_EnumExtensibility:
handleEnumExtensibilityAttr(S, D, AL);
break;
- case ParsedAttr::AT_Flatten:
- handleSimpleAttribute<FlattenAttr>(S, D, AL);
- break;
case ParsedAttr::AT_SYCLKernel:
handleSYCLKernelAttr(S, D, AL);
break;
@@ -6888,9 +7000,15 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_CUDAHost:
handleSimpleAttributeWithExclusions<CUDAHostAttr, CUDAGlobalAttr>(S, D, AL);
break;
- case ParsedAttr::AT_HIPPinnedShadow:
- handleSimpleAttributeWithExclusions<HIPPinnedShadowAttr, CUDADeviceAttr,
- CUDAConstantAttr>(S, D, AL);
+ case ParsedAttr::AT_CUDADeviceBuiltinSurfaceType:
+ handleSimpleAttributeWithExclusions<CUDADeviceBuiltinSurfaceTypeAttr,
+ CUDADeviceBuiltinTextureTypeAttr>(S, D,
+ AL);
+ break;
+ case ParsedAttr::AT_CUDADeviceBuiltinTextureType:
+ handleSimpleAttributeWithExclusions<CUDADeviceBuiltinTextureTypeAttr,
+ CUDADeviceBuiltinSurfaceTypeAttr>(S, D,
+ AL);
break;
case ParsedAttr::AT_GNUInline:
handleGNUInlineAttr(S, D, AL);
@@ -6901,27 +7019,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Restrict:
handleRestrictAttr(S, D, AL);
break;
- case ParsedAttr::AT_LifetimeBound:
- handleSimpleAttribute<LifetimeBoundAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_MayAlias:
- handleSimpleAttribute<MayAliasAttr>(S, D, AL);
- break;
case ParsedAttr::AT_Mode:
handleModeAttr(S, D, AL);
break;
- case ParsedAttr::AT_NoAlias:
- handleSimpleAttribute<NoAliasAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_NoCommon:
- handleSimpleAttribute<NoCommonAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_NoSplitStack:
- handleSimpleAttribute<NoSplitStackAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_NoUniqueAddress:
- handleSimpleAttribute<NoUniqueAddressAttr>(S, D, AL);
- break;
case ParsedAttr::AT_NonNull:
if (auto *PVD = dyn_cast<ParmVarDecl>(D))
handleNonNullAttrParameter(S, PVD, AL);
@@ -6940,9 +7040,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_AllocAlign:
handleAllocAlignAttr(S, D, AL);
break;
- case ParsedAttr::AT_Overloadable:
- handleSimpleAttribute<OverloadableAttr>(S, D, AL);
- break;
case ParsedAttr::AT_Ownership:
handleOwnershipAttr(S, D, AL);
break;
@@ -6998,9 +7095,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_ObjCRuntimeName:
handleObjCRuntimeName(S, D, AL);
break;
- case ParsedAttr::AT_ObjCRuntimeVisible:
- handleSimpleAttribute<ObjCRuntimeVisibleAttr>(S, D, AL);
- break;
case ParsedAttr::AT_ObjCBoxable:
handleObjCBoxable(S, D, AL);
break;
@@ -7018,12 +7112,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
S.AddXConsumedAttr(D, AL, parsedAttrToRetainOwnershipKind(AL),
/*IsTemplateInstantiation=*/false);
break;
- case ParsedAttr::AT_NSConsumesSelf:
- handleSimpleAttribute<NSConsumesSelfAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_OSConsumesThis:
- handleSimpleAttribute<OSConsumesThisAttr>(S, D, AL);
- break;
case ParsedAttr::AT_OSReturnsRetainedOnZero:
handleSimpleAttributeOrDiagnose<OSReturnsRetainedOnZeroAttr>(
S, D, AL, isValidOSObjectOutParameter(D),
@@ -7057,11 +7145,12 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_VecTypeHint:
handleVecTypeHint(S, D, AL);
break;
- case ParsedAttr::AT_ConstInit:
- handleSimpleAttribute<ConstInitAttr>(S, D, AL);
- break;
case ParsedAttr::AT_InitPriority:
- handleInitPriorityAttr(S, D, AL);
+ if (S.Context.getTargetInfo().getTriple().isOSAIX())
+ llvm::report_fatal_error(
+ "'init_priority' attribute is not yet supported on AIX");
+ else
+ handleInitPriorityAttr(S, D, AL);
break;
case ParsedAttr::AT_Packed:
handlePackedAttr(S, D, AL);
@@ -7090,12 +7179,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Unavailable:
handleAttrWithMessage<UnavailableAttr>(S, D, AL);
break;
- case ParsedAttr::AT_ArcWeakrefUnavailable:
- handleSimpleAttribute<ArcWeakrefUnavailableAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_ObjCRootClass:
- handleSimpleAttribute<ObjCRootClassAttr>(S, D, AL);
- break;
case ParsedAttr::AT_ObjCDirect:
handleObjCDirectAttr(S, D, AL);
break;
@@ -7103,27 +7186,12 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleObjCDirectMembersAttr(S, D, AL);
handleSimpleAttribute<ObjCDirectMembersAttr>(S, D, AL);
break;
- case ParsedAttr::AT_ObjCNonLazyClass:
- handleSimpleAttribute<ObjCNonLazyClassAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_ObjCSubclassingRestricted:
- handleSimpleAttribute<ObjCSubclassingRestrictedAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_ObjCClassStub:
- handleSimpleAttribute<ObjCClassStubAttr>(S, D, AL);
- break;
case ParsedAttr::AT_ObjCExplicitProtocolImpl:
handleObjCSuppresProtocolAttr(S, D, AL);
break;
- case ParsedAttr::AT_ObjCRequiresPropertyDefs:
- handleSimpleAttribute<ObjCRequiresPropertyDefsAttr>(S, D, AL);
- break;
case ParsedAttr::AT_Unused:
handleUnusedAttr(S, D, AL);
break;
- case ParsedAttr::AT_ReturnsTwice:
- handleSimpleAttribute<ReturnsTwiceAttr>(S, D, AL);
- break;
case ParsedAttr::AT_NotTailCalled:
handleSimpleAttributeWithExclusions<NotTailCalledAttr, AlwaysInlineAttr>(
S, D, AL);
@@ -7132,24 +7200,15 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleSimpleAttributeWithExclusions<DisableTailCallsAttr, NakedAttr>(S, D,
AL);
break;
- case ParsedAttr::AT_Used:
- handleSimpleAttribute<UsedAttr>(S, D, AL);
- break;
case ParsedAttr::AT_Visibility:
handleVisibilityAttr(S, D, AL, false);
break;
case ParsedAttr::AT_TypeVisibility:
handleVisibilityAttr(S, D, AL, true);
break;
- case ParsedAttr::AT_WarnUnused:
- handleSimpleAttribute<WarnUnusedAttr>(S, D, AL);
- break;
case ParsedAttr::AT_WarnUnusedResult:
handleWarnUnusedResult(S, D, AL);
break;
- case ParsedAttr::AT_Weak:
- handleSimpleAttribute<WeakAttr>(S, D, AL);
- break;
case ParsedAttr::AT_WeakRef:
handleWeakRefAttr(S, D, AL);
break;
@@ -7159,9 +7218,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_TransparentUnion:
handleTransparentUnionAttr(S, D, AL);
break;
- case ParsedAttr::AT_ObjCException:
- handleSimpleAttribute<ObjCExceptionAttr>(S, D, AL);
- break;
case ParsedAttr::AT_ObjCMethodFamily:
handleObjCMethodFamilyAttr(S, D, AL);
break;
@@ -7177,36 +7233,14 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Sentinel:
handleSentinelAttr(S, D, AL);
break;
- case ParsedAttr::AT_Const:
- handleSimpleAttribute<ConstAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_Pure:
- handleSimpleAttribute<PureAttr>(S, D, AL);
- break;
case ParsedAttr::AT_Cleanup:
handleCleanupAttr(S, D, AL);
break;
case ParsedAttr::AT_NoDebug:
handleNoDebugAttr(S, D, AL);
break;
- case ParsedAttr::AT_NoDuplicate:
- handleSimpleAttribute<NoDuplicateAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_Convergent:
- handleSimpleAttribute<ConvergentAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_NoInline:
- handleSimpleAttribute<NoInlineAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_NoInstrumentFunction: // Interacts with -pg.
- handleSimpleAttribute<NoInstrumentFunctionAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_NoStackProtector:
- // Interacts with -fstack-protector options.
- handleSimpleAttribute<NoStackProtectorAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_CFICanonicalJumpTable:
- handleSimpleAttribute<CFICanonicalJumpTableAttr>(S, D, AL);
+ case ParsedAttr::AT_CmseNSEntry:
+ handleCmseNSEntryAttr(S, D, AL);
break;
case ParsedAttr::AT_StdCall:
case ParsedAttr::AT_CDecl:
@@ -7232,9 +7266,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Pointer:
handleLifetimeCategoryAttr(S, D, AL);
break;
- case ParsedAttr::AT_OpenCLKernel:
- handleSimpleAttribute<OpenCLKernelAttr>(S, D, AL);
- break;
case ParsedAttr::AT_OpenCLAccess:
handleOpenCLAccessAttr(S, D, AL);
break;
@@ -7253,38 +7284,17 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_InternalLinkage:
handleInternalLinkageAttr(S, D, AL);
break;
- case ParsedAttr::AT_ExcludeFromExplicitInstantiation:
- handleSimpleAttribute<ExcludeFromExplicitInstantiationAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_LTOVisibilityPublic:
- handleSimpleAttribute<LTOVisibilityPublicAttr>(S, D, AL);
- break;
// Microsoft attributes:
- case ParsedAttr::AT_EmptyBases:
- handleSimpleAttribute<EmptyBasesAttr>(S, D, AL);
- break;
case ParsedAttr::AT_LayoutVersion:
handleLayoutVersion(S, D, AL);
break;
- case ParsedAttr::AT_TrivialABI:
- handleSimpleAttribute<TrivialABIAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_MSNoVTable:
- handleSimpleAttribute<MSNoVTableAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_MSStruct:
- handleSimpleAttribute<MSStructAttr>(S, D, AL);
- break;
case ParsedAttr::AT_Uuid:
handleUuidAttr(S, D, AL);
break;
case ParsedAttr::AT_MSInheritance:
handleMSInheritanceAttr(S, D, AL);
break;
- case ParsedAttr::AT_SelectAny:
- handleSimpleAttribute<SelectAnyAttr>(S, D, AL);
- break;
case ParsedAttr::AT_Thread:
handleDeclspecThreadAttr(S, D, AL);
break;
@@ -7303,24 +7313,15 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_AssertSharedLock:
handleAssertSharedLockAttr(S, D, AL);
break;
- case ParsedAttr::AT_GuardedVar:
- handleSimpleAttribute<GuardedVarAttr>(S, D, AL);
- break;
case ParsedAttr::AT_PtGuardedVar:
handlePtGuardedVarAttr(S, D, AL);
break;
- case ParsedAttr::AT_ScopedLockable:
- handleSimpleAttribute<ScopedLockableAttr>(S, D, AL);
- break;
case ParsedAttr::AT_NoSanitize:
handleNoSanitizeAttr(S, D, AL);
break;
case ParsedAttr::AT_NoSanitizeSpecific:
handleNoSanitizeSpecificAttr(S, D, AL);
break;
- case ParsedAttr::AT_NoThreadSafetyAnalysis:
- handleSimpleAttribute<NoThreadSafetyAnalysisAttr>(S, D, AL);
- break;
case ParsedAttr::AT_GuardedBy:
handleGuardedByAttr(S, D, AL);
break;
@@ -7372,12 +7373,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Consumable:
handleConsumableAttr(S, D, AL);
break;
- case ParsedAttr::AT_ConsumableAutoCast:
- handleSimpleAttribute<ConsumableAutoCastAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_ConsumableSetOnRead:
- handleSimpleAttribute<ConsumableSetOnReadAttr>(S, D, AL);
- break;
case ParsedAttr::AT_CallableWhen:
handleCallableWhenAttr(S, D, AL);
break;
@@ -7401,16 +7396,8 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_TypeTagForDatatype:
handleTypeTagForDatatypeAttr(S, D, AL);
break;
- case ParsedAttr::AT_AnyX86NoCallerSavedRegisters:
- handleSimpleAttribute<AnyX86NoCallerSavedRegistersAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_RenderScriptKernel:
- handleSimpleAttribute<RenderScriptKernelAttr>(S, D, AL);
- break;
+
// XRay attributes.
- case ParsedAttr::AT_XRayInstrument:
- handleSimpleAttribute<XRayInstrumentAttr>(S, D, AL);
- break;
case ParsedAttr::AT_XRayLogArgs:
handleXRayLogArgsAttr(S, D, AL);
break;
@@ -7419,11 +7406,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handlePatchableFunctionEntryAttr(S, D, AL);
break;
- // Move semantics attribute.
- case ParsedAttr::AT_Reinitializes:
- handleSimpleAttribute<ReinitializesAttr>(S, D, AL);
- break;
-
case ParsedAttr::AT_AlwaysDestroy:
case ParsedAttr::AT_NoDestroy:
handleDestroyAttr(S, D, AL);
@@ -7433,6 +7415,10 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleUninitializedAttr(S, D, AL);
break;
+ case ParsedAttr::AT_LoaderUninitialized:
+ handleSimpleAttribute<LoaderUninitializedAttr>(S, D, AL);
+ break;
+
case ParsedAttr::AT_ObjCExternallyRetained:
handleObjCExternallyRetainedAttr(S, D, AL);
break;
@@ -7445,12 +7431,12 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleMSAllocatorAttr(S, D, AL);
break;
- case ParsedAttr::AT_ArmMveAlias:
- handleArmMveAliasAttr(S, D, AL);
+ case ParsedAttr::AT_ArmBuiltinAlias:
+ handleArmBuiltinAliasAttr(S, D, AL);
break;
case ParsedAttr::AT_AcquireHandle:
- handeAcquireHandleAttr(S, D, AL);
+ handleAcquireHandleAttr(S, D, AL);
break;
case ParsedAttr::AT_ReleaseHandle:
@@ -7782,534 +7768,6 @@ static void handleDelayedForbiddenType(Sema &S, DelayedDiagnostic &DD,
DD.Triggered = true;
}
-static const AvailabilityAttr *getAttrForPlatform(ASTContext &Context,
- const Decl *D) {
- // Check each AvailabilityAttr to find the one for this platform.
- for (const auto *A : D->attrs()) {
- if (const auto *Avail = dyn_cast<AvailabilityAttr>(A)) {
- // FIXME: this is copied from CheckAvailability. We should try to
- // de-duplicate.
-
- // Check if this is an App Extension "platform", and if so chop off
- // the suffix for matching with the actual platform.
- StringRef ActualPlatform = Avail->getPlatform()->getName();
- StringRef RealizedPlatform = ActualPlatform;
- if (Context.getLangOpts().AppExt) {
- size_t suffix = RealizedPlatform.rfind("_app_extension");
- if (suffix != StringRef::npos)
- RealizedPlatform = RealizedPlatform.slice(0, suffix);
- }
-
- StringRef TargetPlatform = Context.getTargetInfo().getPlatformName();
-
- // Match the platform name.
- if (RealizedPlatform == TargetPlatform)
- return Avail;
- }
- }
- return nullptr;
-}
-
-/// The diagnostic we should emit for \c D, and the declaration that
-/// originated it, or \c AR_Available.
-///
-/// \param D The declaration to check.
-/// \param Message If non-null, this will be populated with the message from
-/// the availability attribute that is selected.
-/// \param ClassReceiver If we're checking the the method of a class message
-/// send, the class. Otherwise nullptr.
-static std::pair<AvailabilityResult, const NamedDecl *>
-ShouldDiagnoseAvailabilityOfDecl(Sema &S, const NamedDecl *D,
- std::string *Message,
- ObjCInterfaceDecl *ClassReceiver) {
- AvailabilityResult Result = D->getAvailability(Message);
-
- // For typedefs, if the typedef declaration appears available look
- // to the underlying type to see if it is more restrictive.
- while (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
- if (Result == AR_Available) {
- if (const auto *TT = TD->getUnderlyingType()->getAs<TagType>()) {
- D = TT->getDecl();
- Result = D->getAvailability(Message);
- continue;
- }
- }
- break;
- }
-
- // Forward class declarations get their attributes from their definition.
- if (const auto *IDecl = dyn_cast<ObjCInterfaceDecl>(D)) {
- if (IDecl->getDefinition()) {
- D = IDecl->getDefinition();
- Result = D->getAvailability(Message);
- }
- }
-
- if (const auto *ECD = dyn_cast<EnumConstantDecl>(D))
- if (Result == AR_Available) {
- const DeclContext *DC = ECD->getDeclContext();
- if (const auto *TheEnumDecl = dyn_cast<EnumDecl>(DC)) {
- Result = TheEnumDecl->getAvailability(Message);
- D = TheEnumDecl;
- }
- }
-
- // For +new, infer availability from -init.
- if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
- if (S.NSAPIObj && ClassReceiver) {
- ObjCMethodDecl *Init = ClassReceiver->lookupInstanceMethod(
- S.NSAPIObj->getInitSelector());
- if (Init && Result == AR_Available && MD->isClassMethod() &&
- MD->getSelector() == S.NSAPIObj->getNewSelector() &&
- MD->definedInNSObject(S.getASTContext())) {
- Result = Init->getAvailability(Message);
- D = Init;
- }
- }
- }
-
- return {Result, D};
-}
-
-
-/// whether we should emit a diagnostic for \c K and \c DeclVersion in
-/// the context of \c Ctx. For example, we should emit an unavailable diagnostic
-/// in a deprecated context, but not the other way around.
-static bool
-ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
- VersionTuple DeclVersion, Decl *Ctx,
- const NamedDecl *OffendingDecl) {
- assert(K != AR_Available && "Expected an unavailable declaration here!");
-
- // Checks if we should emit the availability diagnostic in the context of C.
- auto CheckContext = [&](const Decl *C) {
- if (K == AR_NotYetIntroduced) {
- if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, C))
- if (AA->getIntroduced() >= DeclVersion)
- return true;
- } else if (K == AR_Deprecated) {
- if (C->isDeprecated())
- return true;
- } else if (K == AR_Unavailable) {
- // It is perfectly fine to refer to an 'unavailable' Objective-C method
- // when it is referenced from within the @implementation itself. In this
- // context, we interpret unavailable as a form of access control.
- if (const auto *MD = dyn_cast<ObjCMethodDecl>(OffendingDecl)) {
- if (const auto *Impl = dyn_cast<ObjCImplDecl>(C)) {
- if (MD->getClassInterface() == Impl->getClassInterface())
- return true;
- }
- }
- }
-
- if (C->isUnavailable())
- return true;
- return false;
- };
-
- do {
- if (CheckContext(Ctx))
- return false;
-
- // An implementation implicitly has the availability of the interface.
- // Unless it is "+load" method.
- if (const auto *MethodD = dyn_cast<ObjCMethodDecl>(Ctx))
- if (MethodD->isClassMethod() &&
- MethodD->getSelector().getAsString() == "load")
- return true;
-
- if (const auto *CatOrImpl = dyn_cast<ObjCImplDecl>(Ctx)) {
- if (const ObjCInterfaceDecl *Interface = CatOrImpl->getClassInterface())
- if (CheckContext(Interface))
- return false;
- }
- // A category implicitly has the availability of the interface.
- else if (const auto *CatD = dyn_cast<ObjCCategoryDecl>(Ctx))
- if (const ObjCInterfaceDecl *Interface = CatD->getClassInterface())
- if (CheckContext(Interface))
- return false;
- } while ((Ctx = cast_or_null<Decl>(Ctx->getDeclContext())));
-
- return true;
-}
-
-static bool
-shouldDiagnoseAvailabilityByDefault(const ASTContext &Context,
- const VersionTuple &DeploymentVersion,
- const VersionTuple &DeclVersion) {
- const auto &Triple = Context.getTargetInfo().getTriple();
- VersionTuple ForceAvailabilityFromVersion;
- switch (Triple.getOS()) {
- case llvm::Triple::IOS:
- case llvm::Triple::TvOS:
- ForceAvailabilityFromVersion = VersionTuple(/*Major=*/11);
- break;
- case llvm::Triple::WatchOS:
- ForceAvailabilityFromVersion = VersionTuple(/*Major=*/4);
- break;
- case llvm::Triple::Darwin:
- case llvm::Triple::MacOSX:
- ForceAvailabilityFromVersion = VersionTuple(/*Major=*/10, /*Minor=*/13);
- break;
- default:
- // New targets should always warn about availability.
- return Triple.getVendor() == llvm::Triple::Apple;
- }
- return DeploymentVersion >= ForceAvailabilityFromVersion ||
- DeclVersion >= ForceAvailabilityFromVersion;
-}
-
-static NamedDecl *findEnclosingDeclToAnnotate(Decl *OrigCtx) {
- for (Decl *Ctx = OrigCtx; Ctx;
- Ctx = cast_or_null<Decl>(Ctx->getDeclContext())) {
- if (isa<TagDecl>(Ctx) || isa<FunctionDecl>(Ctx) || isa<ObjCMethodDecl>(Ctx))
- return cast<NamedDecl>(Ctx);
- if (auto *CD = dyn_cast<ObjCContainerDecl>(Ctx)) {
- if (auto *Imp = dyn_cast<ObjCImplDecl>(Ctx))
- return Imp->getClassInterface();
- return CD;
- }
- }
-
- return dyn_cast<NamedDecl>(OrigCtx);
-}
-
-namespace {
-
-struct AttributeInsertion {
- StringRef Prefix;
- SourceLocation Loc;
- StringRef Suffix;
-
- static AttributeInsertion createInsertionAfter(const NamedDecl *D) {
- return {" ", D->getEndLoc(), ""};
- }
- static AttributeInsertion createInsertionAfter(SourceLocation Loc) {
- return {" ", Loc, ""};
- }
- static AttributeInsertion createInsertionBefore(const NamedDecl *D) {
- return {"", D->getBeginLoc(), "\n"};
- }
-};
-
-} // end anonymous namespace
-
-/// Tries to parse a string as ObjC method name.
-///
-/// \param Name The string to parse. Expected to originate from availability
-/// attribute argument.
-/// \param SlotNames The vector that will be populated with slot names. In case
-/// of unsuccessful parsing can contain invalid data.
-/// \returns A number of method parameters if parsing was successful, None
-/// otherwise.
-static Optional<unsigned>
-tryParseObjCMethodName(StringRef Name, SmallVectorImpl<StringRef> &SlotNames,
- const LangOptions &LangOpts) {
- // Accept replacements starting with - or + as valid ObjC method names.
- if (!Name.empty() && (Name.front() == '-' || Name.front() == '+'))
- Name = Name.drop_front(1);
- if (Name.empty())
- return None;
- Name.split(SlotNames, ':');
- unsigned NumParams;
- if (Name.back() == ':') {
- // Remove an empty string at the end that doesn't represent any slot.
- SlotNames.pop_back();
- NumParams = SlotNames.size();
- } else {
- if (SlotNames.size() != 1)
- // Not a valid method name, just a colon-separated string.
- return None;
- NumParams = 0;
- }
- // Verify all slot names are valid.
- bool AllowDollar = LangOpts.DollarIdents;
- for (StringRef S : SlotNames) {
- if (S.empty())
- continue;
- if (!isValidIdentifier(S, AllowDollar))
- return None;
- }
- return NumParams;
-}
-
-/// Returns a source location in which it's appropriate to insert a new
-/// attribute for the given declaration \D.
-static Optional<AttributeInsertion>
-createAttributeInsertion(const NamedDecl *D, const SourceManager &SM,
- const LangOptions &LangOpts) {
- if (isa<ObjCPropertyDecl>(D))
- return AttributeInsertion::createInsertionAfter(D);
- if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
- if (MD->hasBody())
- return None;
- return AttributeInsertion::createInsertionAfter(D);
- }
- if (const auto *TD = dyn_cast<TagDecl>(D)) {
- SourceLocation Loc =
- Lexer::getLocForEndOfToken(TD->getInnerLocStart(), 0, SM, LangOpts);
- if (Loc.isInvalid())
- return None;
- // Insert after the 'struct'/whatever keyword.
- return AttributeInsertion::createInsertionAfter(Loc);
- }
- return AttributeInsertion::createInsertionBefore(D);
-}
-
-/// Actually emit an availability diagnostic for a reference to an unavailable
-/// decl.
-///
-/// \param Ctx The context that the reference occurred in
-/// \param ReferringDecl The exact declaration that was referenced.
-/// \param OffendingDecl A related decl to \c ReferringDecl that has an
-/// availability attribute corresponding to \c K attached to it. Note that this
-/// may not be the same as ReferringDecl, i.e. if an EnumDecl is annotated and
-/// we refer to a member EnumConstantDecl, ReferringDecl is the EnumConstantDecl
-/// and OffendingDecl is the EnumDecl.
-static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
- Decl *Ctx, const NamedDecl *ReferringDecl,
- const NamedDecl *OffendingDecl,
- StringRef Message,
- ArrayRef<SourceLocation> Locs,
- const ObjCInterfaceDecl *UnknownObjCClass,
- const ObjCPropertyDecl *ObjCProperty,
- bool ObjCPropertyAccess) {
- // Diagnostics for deprecated or unavailable.
- unsigned diag, diag_message, diag_fwdclass_message;
- unsigned diag_available_here = diag::note_availability_specified_here;
- SourceLocation NoteLocation = OffendingDecl->getLocation();
-
- // Matches 'diag::note_property_attribute' options.
- unsigned property_note_select;
-
- // Matches diag::note_availability_specified_here.
- unsigned available_here_select_kind;
-
- VersionTuple DeclVersion;
- if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, OffendingDecl))
- DeclVersion = AA->getIntroduced();
-
- if (!ShouldDiagnoseAvailabilityInContext(S, K, DeclVersion, Ctx,
- OffendingDecl))
- return;
-
- SourceLocation Loc = Locs.front();
-
- // The declaration can have multiple availability attributes, we are looking
- // at one of them.
- const AvailabilityAttr *A = getAttrForPlatform(S.Context, OffendingDecl);
- if (A && A->isInherited()) {
- for (const Decl *Redecl = OffendingDecl->getMostRecentDecl(); Redecl;
- Redecl = Redecl->getPreviousDecl()) {
- const AvailabilityAttr *AForRedecl =
- getAttrForPlatform(S.Context, Redecl);
- if (AForRedecl && !AForRedecl->isInherited()) {
- // If D is a declaration with inherited attributes, the note should
- // point to the declaration with actual attributes.
- NoteLocation = Redecl->getLocation();
- break;
- }
- }
- }
-
- switch (K) {
- case AR_NotYetIntroduced: {
- // We would like to emit the diagnostic even if -Wunguarded-availability is
- // not specified for deployment targets >= to iOS 11 or equivalent or
- // for declarations that were introduced in iOS 11 (macOS 10.13, ...) or
- // later.
- const AvailabilityAttr *AA =
- getAttrForPlatform(S.getASTContext(), OffendingDecl);
- VersionTuple Introduced = AA->getIntroduced();
-
- bool UseNewWarning = shouldDiagnoseAvailabilityByDefault(
- S.Context, S.Context.getTargetInfo().getPlatformMinVersion(),
- Introduced);
- unsigned Warning = UseNewWarning ? diag::warn_unguarded_availability_new
- : diag::warn_unguarded_availability;
-
- std::string PlatformName = AvailabilityAttr::getPrettyPlatformName(
- S.getASTContext().getTargetInfo().getPlatformName());
-
- S.Diag(Loc, Warning) << OffendingDecl << PlatformName
- << Introduced.getAsString();
-
- S.Diag(OffendingDecl->getLocation(),
- diag::note_partial_availability_specified_here)
- << OffendingDecl << PlatformName << Introduced.getAsString()
- << S.Context.getTargetInfo().getPlatformMinVersion().getAsString();
-
- if (const auto *Enclosing = findEnclosingDeclToAnnotate(Ctx)) {
- if (const auto *TD = dyn_cast<TagDecl>(Enclosing))
- if (TD->getDeclName().isEmpty()) {
- S.Diag(TD->getLocation(),
- diag::note_decl_unguarded_availability_silence)
- << /*Anonymous*/ 1 << TD->getKindName();
- return;
- }
- auto FixitNoteDiag =
- S.Diag(Enclosing->getLocation(),
- diag::note_decl_unguarded_availability_silence)
- << /*Named*/ 0 << Enclosing;
- // Don't offer a fixit for declarations with availability attributes.
- if (Enclosing->hasAttr<AvailabilityAttr>())
- return;
- if (!S.getPreprocessor().isMacroDefined("API_AVAILABLE"))
- return;
- Optional<AttributeInsertion> Insertion = createAttributeInsertion(
- Enclosing, S.getSourceManager(), S.getLangOpts());
- if (!Insertion)
- return;
- std::string PlatformName =
- AvailabilityAttr::getPlatformNameSourceSpelling(
- S.getASTContext().getTargetInfo().getPlatformName())
- .lower();
- std::string Introduced =
- OffendingDecl->getVersionIntroduced().getAsString();
- FixitNoteDiag << FixItHint::CreateInsertion(
- Insertion->Loc,
- (llvm::Twine(Insertion->Prefix) + "API_AVAILABLE(" + PlatformName +
- "(" + Introduced + "))" + Insertion->Suffix)
- .str());
- }
- return;
- }
- case AR_Deprecated:
- diag = !ObjCPropertyAccess ? diag::warn_deprecated
- : diag::warn_property_method_deprecated;
- diag_message = diag::warn_deprecated_message;
- diag_fwdclass_message = diag::warn_deprecated_fwdclass_message;
- property_note_select = /* deprecated */ 0;
- available_here_select_kind = /* deprecated */ 2;
- if (const auto *AL = OffendingDecl->getAttr<DeprecatedAttr>())
- NoteLocation = AL->getLocation();
- break;
-
- case AR_Unavailable:
- diag = !ObjCPropertyAccess ? diag::err_unavailable
- : diag::err_property_method_unavailable;
- diag_message = diag::err_unavailable_message;
- diag_fwdclass_message = diag::warn_unavailable_fwdclass_message;
- property_note_select = /* unavailable */ 1;
- available_here_select_kind = /* unavailable */ 0;
-
- if (auto AL = OffendingDecl->getAttr<UnavailableAttr>()) {
- if (AL->isImplicit() && AL->getImplicitReason()) {
- // Most of these failures are due to extra restrictions in ARC;
- // reflect that in the primary diagnostic when applicable.
- auto flagARCError = [&] {
- if (S.getLangOpts().ObjCAutoRefCount &&
- S.getSourceManager().isInSystemHeader(
- OffendingDecl->getLocation()))
- diag = diag::err_unavailable_in_arc;
- };
-
- switch (AL->getImplicitReason()) {
- case UnavailableAttr::IR_None: break;
-
- case UnavailableAttr::IR_ARCForbiddenType:
- flagARCError();
- diag_available_here = diag::note_arc_forbidden_type;
- break;
-
- case UnavailableAttr::IR_ForbiddenWeak:
- if (S.getLangOpts().ObjCWeakRuntime)
- diag_available_here = diag::note_arc_weak_disabled;
- else
- diag_available_here = diag::note_arc_weak_no_runtime;
- break;
-
- case UnavailableAttr::IR_ARCForbiddenConversion:
- flagARCError();
- diag_available_here = diag::note_performs_forbidden_arc_conversion;
- break;
-
- case UnavailableAttr::IR_ARCInitReturnsUnrelated:
- flagARCError();
- diag_available_here = diag::note_arc_init_returns_unrelated;
- break;
-
- case UnavailableAttr::IR_ARCFieldWithOwnership:
- flagARCError();
- diag_available_here = diag::note_arc_field_with_ownership;
- break;
- }
- }
- }
- break;
-
- case AR_Available:
- llvm_unreachable("Warning for availability of available declaration?");
- }
-
- SmallVector<FixItHint, 12> FixIts;
- if (K == AR_Deprecated) {
- StringRef Replacement;
- if (auto AL = OffendingDecl->getAttr<DeprecatedAttr>())
- Replacement = AL->getReplacement();
- if (auto AL = getAttrForPlatform(S.Context, OffendingDecl))
- Replacement = AL->getReplacement();
-
- CharSourceRange UseRange;
- if (!Replacement.empty())
- UseRange =
- CharSourceRange::getCharRange(Loc, S.getLocForEndOfToken(Loc));
- if (UseRange.isValid()) {
- if (const auto *MethodDecl = dyn_cast<ObjCMethodDecl>(ReferringDecl)) {
- Selector Sel = MethodDecl->getSelector();
- SmallVector<StringRef, 12> SelectorSlotNames;
- Optional<unsigned> NumParams = tryParseObjCMethodName(
- Replacement, SelectorSlotNames, S.getLangOpts());
- if (NumParams && NumParams.getValue() == Sel.getNumArgs()) {
- assert(SelectorSlotNames.size() == Locs.size());
- for (unsigned I = 0; I < Locs.size(); ++I) {
- if (!Sel.getNameForSlot(I).empty()) {
- CharSourceRange NameRange = CharSourceRange::getCharRange(
- Locs[I], S.getLocForEndOfToken(Locs[I]));
- FixIts.push_back(FixItHint::CreateReplacement(
- NameRange, SelectorSlotNames[I]));
- } else
- FixIts.push_back(
- FixItHint::CreateInsertion(Locs[I], SelectorSlotNames[I]));
- }
- } else
- FixIts.push_back(FixItHint::CreateReplacement(UseRange, Replacement));
- } else
- FixIts.push_back(FixItHint::CreateReplacement(UseRange, Replacement));
- }
- }
-
- if (!Message.empty()) {
- S.Diag(Loc, diag_message) << ReferringDecl << Message << FixIts;
- if (ObjCProperty)
- S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
- << ObjCProperty->getDeclName() << property_note_select;
- } else if (!UnknownObjCClass) {
- S.Diag(Loc, diag) << ReferringDecl << FixIts;
- if (ObjCProperty)
- S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
- << ObjCProperty->getDeclName() << property_note_select;
- } else {
- S.Diag(Loc, diag_fwdclass_message) << ReferringDecl << FixIts;
- S.Diag(UnknownObjCClass->getLocation(), diag::note_forward_class);
- }
-
- S.Diag(NoteLocation, diag_available_here)
- << OffendingDecl << available_here_select_kind;
-}
-
-static void handleDelayedAvailabilityCheck(Sema &S, DelayedDiagnostic &DD,
- Decl *Ctx) {
- assert(DD.Kind == DelayedDiagnostic::Availability &&
- "Expected an availability diagnostic here");
-
- DD.Triggered = true;
- DoEmitAvailabilityWarning(
- S, DD.getAvailabilityResult(), Ctx, DD.getAvailabilityReferringDecl(),
- DD.getAvailabilityOffendingDecl(), DD.getAvailabilityMessage(),
- DD.getAvailabilitySelectorLocs(), DD.getUnknownObjCClass(),
- DD.getObjCProperty(), false);
-}
void Sema::PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
assert(DelayedDiagnostics.getCurrentPool());
@@ -8343,7 +7801,7 @@ void Sema::PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
// Don't bother giving deprecation/unavailable diagnostics if
// the decl is invalid.
if (!decl->isInvalidDecl())
- handleDelayedAvailabilityCheck(*this, diag, decl);
+ handleDelayedAvailabilityCheck(diag, decl);
break;
case DelayedDiagnostic::Access:
@@ -8373,415 +7831,3 @@ void Sema::redelayDiagnostics(DelayedDiagnosticPool &pool) {
assert(curPool && "re-emitting in undelayed context not supported");
curPool->steal(pool);
}
-
-static void EmitAvailabilityWarning(Sema &S, AvailabilityResult AR,
- const NamedDecl *ReferringDecl,
- const NamedDecl *OffendingDecl,
- StringRef Message,
- ArrayRef<SourceLocation> Locs,
- const ObjCInterfaceDecl *UnknownObjCClass,
- const ObjCPropertyDecl *ObjCProperty,
- bool ObjCPropertyAccess) {
- // Delay if we're currently parsing a declaration.
- if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
- S.DelayedDiagnostics.add(
- DelayedDiagnostic::makeAvailability(
- AR, Locs, ReferringDecl, OffendingDecl, UnknownObjCClass,
- ObjCProperty, Message, ObjCPropertyAccess));
- return;
- }
-
- Decl *Ctx = cast<Decl>(S.getCurLexicalContext());
- DoEmitAvailabilityWarning(S, AR, Ctx, ReferringDecl, OffendingDecl,
- Message, Locs, UnknownObjCClass, ObjCProperty,
- ObjCPropertyAccess);
-}
-
-namespace {
-
-/// Returns true if the given statement can be a body-like child of \p Parent.
-bool isBodyLikeChildStmt(const Stmt *S, const Stmt *Parent) {
- switch (Parent->getStmtClass()) {
- case Stmt::IfStmtClass:
- return cast<IfStmt>(Parent)->getThen() == S ||
- cast<IfStmt>(Parent)->getElse() == S;
- case Stmt::WhileStmtClass:
- return cast<WhileStmt>(Parent)->getBody() == S;
- case Stmt::DoStmtClass:
- return cast<DoStmt>(Parent)->getBody() == S;
- case Stmt::ForStmtClass:
- return cast<ForStmt>(Parent)->getBody() == S;
- case Stmt::CXXForRangeStmtClass:
- return cast<CXXForRangeStmt>(Parent)->getBody() == S;
- case Stmt::ObjCForCollectionStmtClass:
- return cast<ObjCForCollectionStmt>(Parent)->getBody() == S;
- case Stmt::CaseStmtClass:
- case Stmt::DefaultStmtClass:
- return cast<SwitchCase>(Parent)->getSubStmt() == S;
- default:
- return false;
- }
-}
-
-class StmtUSEFinder : public RecursiveASTVisitor<StmtUSEFinder> {
- const Stmt *Target;
-
-public:
- bool VisitStmt(Stmt *S) { return S != Target; }
-
- /// Returns true if the given statement is present in the given declaration.
- static bool isContained(const Stmt *Target, const Decl *D) {
- StmtUSEFinder Visitor;
- Visitor.Target = Target;
- return !Visitor.TraverseDecl(const_cast<Decl *>(D));
- }
-};
-
-/// Traverses the AST and finds the last statement that used a given
-/// declaration.
-class LastDeclUSEFinder : public RecursiveASTVisitor<LastDeclUSEFinder> {
- const Decl *D;
-
-public:
- bool VisitDeclRefExpr(DeclRefExpr *DRE) {
- if (DRE->getDecl() == D)
- return false;
- return true;
- }
-
- static const Stmt *findLastStmtThatUsesDecl(const Decl *D,
- const CompoundStmt *Scope) {
- LastDeclUSEFinder Visitor;
- Visitor.D = D;
- for (auto I = Scope->body_rbegin(), E = Scope->body_rend(); I != E; ++I) {
- const Stmt *S = *I;
- if (!Visitor.TraverseStmt(const_cast<Stmt *>(S)))
- return S;
- }
- return nullptr;
- }
-};
-
-/// This class implements -Wunguarded-availability.
-///
-/// This is done with a traversal of the AST of a function that makes reference
-/// to a partially available declaration. Whenever we encounter an \c if of the
-/// form: \c if(@available(...)), we use the version from the condition to visit
-/// the then statement.
-class DiagnoseUnguardedAvailability
- : public RecursiveASTVisitor<DiagnoseUnguardedAvailability> {
- typedef RecursiveASTVisitor<DiagnoseUnguardedAvailability> Base;
-
- Sema &SemaRef;
- Decl *Ctx;
-
- /// Stack of potentially nested 'if (@available(...))'s.
- SmallVector<VersionTuple, 8> AvailabilityStack;
- SmallVector<const Stmt *, 16> StmtStack;
-
- void DiagnoseDeclAvailability(NamedDecl *D, SourceRange Range,
- ObjCInterfaceDecl *ClassReceiver = nullptr);
-
-public:
- DiagnoseUnguardedAvailability(Sema &SemaRef, Decl *Ctx)
- : SemaRef(SemaRef), Ctx(Ctx) {
- AvailabilityStack.push_back(
- SemaRef.Context.getTargetInfo().getPlatformMinVersion());
- }
-
- bool TraverseDecl(Decl *D) {
- // Avoid visiting nested functions to prevent duplicate warnings.
- if (!D || isa<FunctionDecl>(D))
- return true;
- return Base::TraverseDecl(D);
- }
-
- bool TraverseStmt(Stmt *S) {
- if (!S)
- return true;
- StmtStack.push_back(S);
- bool Result = Base::TraverseStmt(S);
- StmtStack.pop_back();
- return Result;
- }
-
- void IssueDiagnostics(Stmt *S) { TraverseStmt(S); }
-
- bool TraverseIfStmt(IfStmt *If);
-
- bool TraverseLambdaExpr(LambdaExpr *E) { return true; }
-
- // for 'case X:' statements, don't bother looking at the 'X'; it can't lead
- // to any useful diagnostics.
- bool TraverseCaseStmt(CaseStmt *CS) { return TraverseStmt(CS->getSubStmt()); }
-
- bool VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *PRE) {
- if (PRE->isClassReceiver())
- DiagnoseDeclAvailability(PRE->getClassReceiver(), PRE->getReceiverLocation());
- return true;
- }
-
- bool VisitObjCMessageExpr(ObjCMessageExpr *Msg) {
- if (ObjCMethodDecl *D = Msg->getMethodDecl()) {
- ObjCInterfaceDecl *ID = nullptr;
- QualType ReceiverTy = Msg->getClassReceiver();
- if (!ReceiverTy.isNull() && ReceiverTy->getAsObjCInterfaceType())
- ID = ReceiverTy->getAsObjCInterfaceType()->getInterface();
-
- DiagnoseDeclAvailability(
- D, SourceRange(Msg->getSelectorStartLoc(), Msg->getEndLoc()), ID);
- }
- return true;
- }
-
- bool VisitDeclRefExpr(DeclRefExpr *DRE) {
- DiagnoseDeclAvailability(DRE->getDecl(),
- SourceRange(DRE->getBeginLoc(), DRE->getEndLoc()));
- return true;
- }
-
- bool VisitMemberExpr(MemberExpr *ME) {
- DiagnoseDeclAvailability(ME->getMemberDecl(),
- SourceRange(ME->getBeginLoc(), ME->getEndLoc()));
- return true;
- }
-
- bool VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
- SemaRef.Diag(E->getBeginLoc(), diag::warn_at_available_unchecked_use)
- << (!SemaRef.getLangOpts().ObjC);
- return true;
- }
-
- bool VisitTypeLoc(TypeLoc Ty);
-};
-
-void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
- NamedDecl *D, SourceRange Range, ObjCInterfaceDecl *ReceiverClass) {
- AvailabilityResult Result;
- const NamedDecl *OffendingDecl;
- std::tie(Result, OffendingDecl) =
- ShouldDiagnoseAvailabilityOfDecl(SemaRef, D, nullptr, ReceiverClass);
- if (Result != AR_Available) {
- // All other diagnostic kinds have already been handled in
- // DiagnoseAvailabilityOfDecl.
- if (Result != AR_NotYetIntroduced)
- return;
-
- const AvailabilityAttr *AA =
- getAttrForPlatform(SemaRef.getASTContext(), OffendingDecl);
- VersionTuple Introduced = AA->getIntroduced();
-
- if (AvailabilityStack.back() >= Introduced)
- return;
-
- // If the context of this function is less available than D, we should not
- // emit a diagnostic.
- if (!ShouldDiagnoseAvailabilityInContext(SemaRef, Result, Introduced, Ctx,
- OffendingDecl))
- return;
-
- // We would like to emit the diagnostic even if -Wunguarded-availability is
- // not specified for deployment targets >= to iOS 11 or equivalent or
- // for declarations that were introduced in iOS 11 (macOS 10.13, ...) or
- // later.
- unsigned DiagKind =
- shouldDiagnoseAvailabilityByDefault(
- SemaRef.Context,
- SemaRef.Context.getTargetInfo().getPlatformMinVersion(), Introduced)
- ? diag::warn_unguarded_availability_new
- : diag::warn_unguarded_availability;
-
- std::string PlatformName = AvailabilityAttr::getPrettyPlatformName(
- SemaRef.getASTContext().getTargetInfo().getPlatformName());
-
- SemaRef.Diag(Range.getBegin(), DiagKind)
- << Range << D << PlatformName << Introduced.getAsString();
-
- SemaRef.Diag(OffendingDecl->getLocation(),
- diag::note_partial_availability_specified_here)
- << OffendingDecl << PlatformName << Introduced.getAsString()
- << SemaRef.Context.getTargetInfo()
- .getPlatformMinVersion()
- .getAsString();
-
- auto FixitDiag =
- SemaRef.Diag(Range.getBegin(), diag::note_unguarded_available_silence)
- << Range << D
- << (SemaRef.getLangOpts().ObjC ? /*@available*/ 0
- : /*__builtin_available*/ 1);
-
- // Find the statement which should be enclosed in the if @available check.
- if (StmtStack.empty())
- return;
- const Stmt *StmtOfUse = StmtStack.back();
- const CompoundStmt *Scope = nullptr;
- for (const Stmt *S : llvm::reverse(StmtStack)) {
- if (const auto *CS = dyn_cast<CompoundStmt>(S)) {
- Scope = CS;
- break;
- }
- if (isBodyLikeChildStmt(StmtOfUse, S)) {
- // The declaration won't be seen outside of the statement, so we don't
- // have to wrap the uses of any declared variables in if (@available).
- // Therefore we can avoid setting Scope here.
- break;
- }
- StmtOfUse = S;
- }
- const Stmt *LastStmtOfUse = nullptr;
- if (isa<DeclStmt>(StmtOfUse) && Scope) {
- for (const Decl *D : cast<DeclStmt>(StmtOfUse)->decls()) {
- if (StmtUSEFinder::isContained(StmtStack.back(), D)) {
- LastStmtOfUse = LastDeclUSEFinder::findLastStmtThatUsesDecl(D, Scope);
- break;
- }
- }
- }
-
- const SourceManager &SM = SemaRef.getSourceManager();
- SourceLocation IfInsertionLoc =
- SM.getExpansionLoc(StmtOfUse->getBeginLoc());
- SourceLocation StmtEndLoc =
- SM.getExpansionRange(
- (LastStmtOfUse ? LastStmtOfUse : StmtOfUse)->getEndLoc())
- .getEnd();
- if (SM.getFileID(IfInsertionLoc) != SM.getFileID(StmtEndLoc))
- return;
-
- StringRef Indentation = Lexer::getIndentationForLine(IfInsertionLoc, SM);
- const char *ExtraIndentation = " ";
- std::string FixItString;
- llvm::raw_string_ostream FixItOS(FixItString);
- FixItOS << "if (" << (SemaRef.getLangOpts().ObjC ? "@available"
- : "__builtin_available")
- << "("
- << AvailabilityAttr::getPlatformNameSourceSpelling(
- SemaRef.getASTContext().getTargetInfo().getPlatformName())
- << " " << Introduced.getAsString() << ", *)) {\n"
- << Indentation << ExtraIndentation;
- FixitDiag << FixItHint::CreateInsertion(IfInsertionLoc, FixItOS.str());
- SourceLocation ElseInsertionLoc = Lexer::findLocationAfterToken(
- StmtEndLoc, tok::semi, SM, SemaRef.getLangOpts(),
- /*SkipTrailingWhitespaceAndNewLine=*/false);
- if (ElseInsertionLoc.isInvalid())
- ElseInsertionLoc =
- Lexer::getLocForEndOfToken(StmtEndLoc, 0, SM, SemaRef.getLangOpts());
- FixItOS.str().clear();
- FixItOS << "\n"
- << Indentation << "} else {\n"
- << Indentation << ExtraIndentation
- << "// Fallback on earlier versions\n"
- << Indentation << "}";
- FixitDiag << FixItHint::CreateInsertion(ElseInsertionLoc, FixItOS.str());
- }
-}
-
-bool DiagnoseUnguardedAvailability::VisitTypeLoc(TypeLoc Ty) {
- const Type *TyPtr = Ty.getTypePtr();
- SourceRange Range{Ty.getBeginLoc(), Ty.getEndLoc()};
-
- if (Range.isInvalid())
- return true;
-
- if (const auto *TT = dyn_cast<TagType>(TyPtr)) {
- TagDecl *TD = TT->getDecl();
- DiagnoseDeclAvailability(TD, Range);
-
- } else if (const auto *TD = dyn_cast<TypedefType>(TyPtr)) {
- TypedefNameDecl *D = TD->getDecl();
- DiagnoseDeclAvailability(D, Range);
-
- } else if (const auto *ObjCO = dyn_cast<ObjCObjectType>(TyPtr)) {
- if (NamedDecl *D = ObjCO->getInterface())
- DiagnoseDeclAvailability(D, Range);
- }
-
- return true;
-}
-
-bool DiagnoseUnguardedAvailability::TraverseIfStmt(IfStmt *If) {
- VersionTuple CondVersion;
- if (auto *E = dyn_cast<ObjCAvailabilityCheckExpr>(If->getCond())) {
- CondVersion = E->getVersion();
-
- // If we're using the '*' case here or if this check is redundant, then we
- // use the enclosing version to check both branches.
- if (CondVersion.empty() || CondVersion <= AvailabilityStack.back())
- return TraverseStmt(If->getThen()) && TraverseStmt(If->getElse());
- } else {
- // This isn't an availability checking 'if', we can just continue.
- return Base::TraverseIfStmt(If);
- }
-
- AvailabilityStack.push_back(CondVersion);
- bool ShouldContinue = TraverseStmt(If->getThen());
- AvailabilityStack.pop_back();
-
- return ShouldContinue && TraverseStmt(If->getElse());
-}
-
-} // end anonymous namespace
-
-void Sema::DiagnoseUnguardedAvailabilityViolations(Decl *D) {
- Stmt *Body = nullptr;
-
- if (auto *FD = D->getAsFunction()) {
- // FIXME: We only examine the pattern decl for availability violations now,
- // but we should also examine instantiated templates.
- if (FD->isTemplateInstantiation())
- return;
-
- Body = FD->getBody();
- } else if (auto *MD = dyn_cast<ObjCMethodDecl>(D))
- Body = MD->getBody();
- else if (auto *BD = dyn_cast<BlockDecl>(D))
- Body = BD->getBody();
-
- assert(Body && "Need a body here!");
-
- DiagnoseUnguardedAvailability(*this, D).IssueDiagnostics(Body);
-}
-
-void Sema::DiagnoseAvailabilityOfDecl(NamedDecl *D,
- ArrayRef<SourceLocation> Locs,
- const ObjCInterfaceDecl *UnknownObjCClass,
- bool ObjCPropertyAccess,
- bool AvoidPartialAvailabilityChecks,
- ObjCInterfaceDecl *ClassReceiver) {
- std::string Message;
- AvailabilityResult Result;
- const NamedDecl* OffendingDecl;
- // See if this declaration is unavailable, deprecated, or partial.
- std::tie(Result, OffendingDecl) =
- ShouldDiagnoseAvailabilityOfDecl(*this, D, &Message, ClassReceiver);
- if (Result == AR_Available)
- return;
-
- if (Result == AR_NotYetIntroduced) {
- if (AvoidPartialAvailabilityChecks)
- return;
-
- // We need to know the @available context in the current function to
- // diagnose this use, let DiagnoseUnguardedAvailabilityViolations do that
- // when we're done parsing the current function.
- if (getCurFunctionOrMethodDecl()) {
- getEnclosingFunction()->HasPotentialAvailabilityViolations = true;
- return;
- } else if (getCurBlock() || getCurLambda()) {
- getCurFunction()->HasPotentialAvailabilityViolations = true;
- return;
- }
- }
-
- const ObjCPropertyDecl *ObjCPDecl = nullptr;
- if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
- if (const ObjCPropertyDecl *PD = MD->findPropertyDecl()) {
- AvailabilityResult PDeclResult = PD->getAvailability(nullptr);
- if (PDeclResult == Result)
- ObjCPDecl = PD;
- }
- }
-
- EmitAvailabilityWarning(*this, Result, D, OffendingDecl, Message, Locs,
- UnknownObjCClass, ObjCPDecl, ObjCPropertyAccess);
-}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
index 831e55046e80..22bf35dbd0cb 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
@@ -38,8 +38,9 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include <map>
#include <set>
@@ -51,102 +52,109 @@ using namespace clang;
//===----------------------------------------------------------------------===//
namespace {
- /// CheckDefaultArgumentVisitor - C++ [dcl.fct.default] Traverses
- /// the default argument of a parameter to determine whether it
- /// contains any ill-formed subexpressions. For example, this will
- /// diagnose the use of local variables or parameters within the
- /// default argument expression.
- class CheckDefaultArgumentVisitor
- : public StmtVisitor<CheckDefaultArgumentVisitor, bool> {
- Expr *DefaultArg;
- Sema *S;
+/// CheckDefaultArgumentVisitor - C++ [dcl.fct.default] Traverses
+/// the default argument of a parameter to determine whether it
+/// contains any ill-formed subexpressions. For example, this will
+/// diagnose the use of local variables or parameters within the
+/// default argument expression.
+class CheckDefaultArgumentVisitor
+ : public ConstStmtVisitor<CheckDefaultArgumentVisitor, bool> {
+ Sema &S;
+ const Expr *DefaultArg;
- public:
- CheckDefaultArgumentVisitor(Expr *defarg, Sema *s)
- : DefaultArg(defarg), S(s) {}
-
- bool VisitExpr(Expr *Node);
- bool VisitDeclRefExpr(DeclRefExpr *DRE);
- bool VisitCXXThisExpr(CXXThisExpr *ThisE);
- bool VisitLambdaExpr(LambdaExpr *Lambda);
- bool VisitPseudoObjectExpr(PseudoObjectExpr *POE);
- };
+public:
+ CheckDefaultArgumentVisitor(Sema &S, const Expr *DefaultArg)
+ : S(S), DefaultArg(DefaultArg) {}
+
+ bool VisitExpr(const Expr *Node);
+ bool VisitDeclRefExpr(const DeclRefExpr *DRE);
+ bool VisitCXXThisExpr(const CXXThisExpr *ThisE);
+ bool VisitLambdaExpr(const LambdaExpr *Lambda);
+ bool VisitPseudoObjectExpr(const PseudoObjectExpr *POE);
+};
- /// VisitExpr - Visit all of the children of this expression.
- bool CheckDefaultArgumentVisitor::VisitExpr(Expr *Node) {
- bool IsInvalid = false;
- for (Stmt *SubStmt : Node->children())
- IsInvalid |= Visit(SubStmt);
- return IsInvalid;
- }
-
- /// VisitDeclRefExpr - Visit a reference to a declaration, to
- /// determine whether this declaration can be used in the default
- /// argument expression.
- bool CheckDefaultArgumentVisitor::VisitDeclRefExpr(DeclRefExpr *DRE) {
- NamedDecl *Decl = DRE->getDecl();
- if (ParmVarDecl *Param = dyn_cast<ParmVarDecl>(Decl)) {
- // C++ [dcl.fct.default]p9
- // Default arguments are evaluated each time the function is
- // called. The order of evaluation of function arguments is
- // unspecified. Consequently, parameters of a function shall not
- // be used in default argument expressions, even if they are not
- // evaluated. Parameters of a function declared before a default
- // argument expression are in scope and can hide namespace and
- // class member names.
- return S->Diag(DRE->getBeginLoc(),
- diag::err_param_default_argument_references_param)
+/// VisitExpr - Visit all of the children of this expression.
+bool CheckDefaultArgumentVisitor::VisitExpr(const Expr *Node) {
+ bool IsInvalid = false;
+ for (const Stmt *SubStmt : Node->children())
+ IsInvalid |= Visit(SubStmt);
+ return IsInvalid;
+}
+
+/// VisitDeclRefExpr - Visit a reference to a declaration, to
+/// determine whether this declaration can be used in the default
+/// argument expression.
+bool CheckDefaultArgumentVisitor::VisitDeclRefExpr(const DeclRefExpr *DRE) {
+ const NamedDecl *Decl = DRE->getDecl();
+ if (const auto *Param = dyn_cast<ParmVarDecl>(Decl)) {
+ // C++ [dcl.fct.default]p9:
+ // [...] parameters of a function shall not be used in default
+ // argument expressions, even if they are not evaluated. [...]
+ //
+ // C++17 [dcl.fct.default]p9 (by CWG 2082):
+ // [...] A parameter shall not appear as a potentially-evaluated
+ // expression in a default argument. [...]
+ //
+ if (DRE->isNonOdrUse() != NOUR_Unevaluated)
+ return S.Diag(DRE->getBeginLoc(),
+ diag::err_param_default_argument_references_param)
<< Param->getDeclName() << DefaultArg->getSourceRange();
- } else if (VarDecl *VDecl = dyn_cast<VarDecl>(Decl)) {
- // C++ [dcl.fct.default]p7
- // Local variables shall not be used in default argument
- // expressions.
- if (VDecl->isLocalVarDecl())
- return S->Diag(DRE->getBeginLoc(),
- diag::err_param_default_argument_references_local)
- << VDecl->getDeclName() << DefaultArg->getSourceRange();
- }
-
- return false;
- }
-
- /// VisitCXXThisExpr - Visit a C++ "this" expression.
- bool CheckDefaultArgumentVisitor::VisitCXXThisExpr(CXXThisExpr *ThisE) {
- // C++ [dcl.fct.default]p8:
- // The keyword this shall not be used in a default argument of a
- // member function.
- return S->Diag(ThisE->getBeginLoc(),
- diag::err_param_default_argument_references_this)
- << ThisE->getSourceRange();
+ } else if (const auto *VDecl = dyn_cast<VarDecl>(Decl)) {
+ // C++ [dcl.fct.default]p7:
+ // Local variables shall not be used in default argument
+ // expressions.
+ //
+ // C++17 [dcl.fct.default]p7 (by CWG 2082):
+ // A local variable shall not appear as a potentially-evaluated
+ // expression in a default argument.
+ //
+ // C++20 [dcl.fct.default]p7 (DR as part of P0588R1, see also CWG 2346):
+ // Note: A local variable cannot be odr-used (6.3) in a default argument.
+ //
+ if (VDecl->isLocalVarDecl() && !DRE->isNonOdrUse())
+ return S.Diag(DRE->getBeginLoc(),
+ diag::err_param_default_argument_references_local)
+ << VDecl->getDeclName() << DefaultArg->getSourceRange();
}
- bool CheckDefaultArgumentVisitor::VisitPseudoObjectExpr(PseudoObjectExpr *POE) {
- bool Invalid = false;
- for (PseudoObjectExpr::semantics_iterator
- i = POE->semantics_begin(), e = POE->semantics_end(); i != e; ++i) {
- Expr *E = *i;
+ return false;
+}
- // Look through bindings.
- if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
- E = OVE->getSourceExpr();
- assert(E && "pseudo-object binding without source expression?");
- }
+/// VisitCXXThisExpr - Visit a C++ "this" expression.
+bool CheckDefaultArgumentVisitor::VisitCXXThisExpr(const CXXThisExpr *ThisE) {
+ // C++ [dcl.fct.default]p8:
+ // The keyword this shall not be used in a default argument of a
+ // member function.
+ return S.Diag(ThisE->getBeginLoc(),
+ diag::err_param_default_argument_references_this)
+ << ThisE->getSourceRange();
+}
- Invalid |= Visit(E);
+bool CheckDefaultArgumentVisitor::VisitPseudoObjectExpr(
+ const PseudoObjectExpr *POE) {
+ bool Invalid = false;
+ for (const Expr *E : POE->semantics()) {
+ // Look through bindings.
+ if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) {
+ E = OVE->getSourceExpr();
+ assert(E && "pseudo-object binding without source expression?");
}
- return Invalid;
+
+ Invalid |= Visit(E);
}
+ return Invalid;
+}
- bool CheckDefaultArgumentVisitor::VisitLambdaExpr(LambdaExpr *Lambda) {
- // C++11 [expr.lambda.prim]p13:
- // A lambda-expression appearing in a default argument shall not
- // implicitly or explicitly capture any entity.
- if (Lambda->capture_begin() == Lambda->capture_end())
- return false;
+bool CheckDefaultArgumentVisitor::VisitLambdaExpr(const LambdaExpr *Lambda) {
+ // C++11 [expr.lambda.prim]p13:
+ // A lambda-expression appearing in a default argument shall not
+ // implicitly or explicitly capture any entity.
+ if (Lambda->capture_begin() == Lambda->capture_end())
+ return false;
- return S->Diag(Lambda->getBeginLoc(), diag::err_lambda_capture_default_arg);
- }
+ return S.Diag(Lambda->getBeginLoc(), diag::err_lambda_capture_default_arg);
}
+} // namespace
void
Sema::ImplicitExceptionSpecification::CalledDecl(SourceLocation CallLoc,
@@ -246,14 +254,12 @@ void Sema::ImplicitExceptionSpecification::CalledStmt(Stmt *S) {
ComputedEST = EST_None;
}
-bool
-Sema::SetParamDefaultArgument(ParmVarDecl *Param, Expr *Arg,
- SourceLocation EqualLoc) {
+ExprResult Sema::ConvertParamDefaultArgument(const ParmVarDecl *Param,
+ Expr *Arg,
+ SourceLocation EqualLoc) {
if (RequireCompleteType(Param->getLocation(), Param->getType(),
- diag::err_typecheck_decl_incomplete_type)) {
- Param->setInvalidDecl();
+ diag::err_typecheck_decl_incomplete_type))
return true;
- }
// C++ [dcl.fct.default]p5
// A default argument expression is implicitly converted (clause
@@ -274,7 +280,12 @@ Sema::SetParamDefaultArgument(ParmVarDecl *Param, Expr *Arg,
CheckCompletedExpr(Arg, EqualLoc);
Arg = MaybeCreateExprWithCleanups(Arg);
- // Okay: add the default argument to the parameter
+ return Arg;
+}
+
+void Sema::SetParamDefaultArgument(ParmVarDecl *Param, Expr *Arg,
+ SourceLocation EqualLoc) {
+ // Add the default argument to the parameter
Param->setDefaultArg(Arg);
// We have already instantiated this parameter; provide each of the
@@ -288,8 +299,6 @@ Sema::SetParamDefaultArgument(ParmVarDecl *Param, Expr *Arg,
// We're done tracking this parameter's instantiations.
UnparsedDefaultArgInstantiations.erase(InstPos);
}
-
- return false;
}
/// ActOnParamDefaultArgument - Check whether the default argument
@@ -304,18 +313,22 @@ Sema::ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc,
ParmVarDecl *Param = cast<ParmVarDecl>(param);
UnparsedDefaultArgLocs.erase(Param);
+ auto Fail = [&] {
+ Param->setInvalidDecl();
+ Param->setDefaultArg(new (Context) OpaqueValueExpr(
+ EqualLoc, Param->getType().getNonReferenceType(), VK_RValue));
+ };
+
// Default arguments are only permitted in C++
if (!getLangOpts().CPlusPlus) {
Diag(EqualLoc, diag::err_param_default_argument)
<< DefaultArg->getSourceRange();
- Param->setInvalidDecl();
- return;
+ return Fail();
}
// Check for unexpanded parameter packs.
if (DiagnoseUnexpandedParameterPack(DefaultArg, UPPC_DefaultArgument)) {
- Param->setInvalidDecl();
- return;
+ return Fail();
}
// C++11 [dcl.fct.default]p3
@@ -324,15 +337,21 @@ Sema::ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc,
if (Param->isParameterPack()) {
Diag(EqualLoc, diag::err_param_default_argument_on_parameter_pack)
<< DefaultArg->getSourceRange();
+ // Recover by discarding the default argument.
+ Param->setDefaultArg(nullptr);
return;
}
+ ExprResult Result = ConvertParamDefaultArgument(Param, DefaultArg, EqualLoc);
+ if (Result.isInvalid())
+ return Fail();
+
+ DefaultArg = Result.getAs<Expr>();
+
// Check that the default argument is well-formed
- CheckDefaultArgumentVisitor DefaultArgChecker(DefaultArg, this);
- if (DefaultArgChecker.Visit(DefaultArg)) {
- Param->setInvalidDecl();
- return;
- }
+ CheckDefaultArgumentVisitor DefaultArgChecker(*this, DefaultArg);
+ if (DefaultArgChecker.Visit(DefaultArg))
+ return Fail();
SetParamDefaultArgument(Param, DefaultArg, EqualLoc);
}
@@ -419,14 +438,9 @@ void Sema::CheckExtraCXXDefaultArguments(Declarator &D) {
}
static bool functionDeclHasDefaultArgument(const FunctionDecl *FD) {
- for (unsigned NumParams = FD->getNumParams(); NumParams > 0; --NumParams) {
- const ParmVarDecl *PVD = FD->getParamDecl(NumParams-1);
- if (!PVD->hasDefaultArg())
- return false;
- if (!PVD->hasInheritedDefaultArg())
- return true;
- }
- return false;
+ return std::any_of(FD->param_begin(), FD->param_end(), [](ParmVarDecl *P) {
+ return P->hasDefaultArg() && !P->hasInheritedDefaultArg();
+ });
}
/// MergeCXXFunctionDecl - Merge two declarations of the same C++
@@ -664,7 +678,7 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old,
// for the same class template shall not have equivalent
// parameter-declaration-clauses.
if (isa<CXXDeductionGuideDecl>(New) &&
- !New->isFunctionTemplateSpecialization()) {
+ !New->isFunctionTemplateSpecialization() && isVisible(Old)) {
Diag(New->getLocation(), diag::err_deduction_guide_redeclared);
Diag(Old->getLocation(), diag::note_previous_declaration);
}
@@ -761,7 +775,7 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
Err << SourceRange(Loc, Loc);
} else if (!CPlusPlus20Specifiers.empty()) {
auto &&Warn = Diag(CPlusPlus20SpecifierLocs.front(),
- getLangOpts().CPlusPlus2a
+ getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_decomp_decl_spec
: diag::ext_decomp_decl_spec);
Warn << (int)CPlusPlus20Specifiers.size()
@@ -778,7 +792,7 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
// C++2a [dcl.struct.bind]p1:
// A cv that includes volatile is deprecated
if ((DS.getTypeQualifiers() & DeclSpec::TQ_volatile) &&
- getLangOpts().CPlusPlus2a)
+ getLangOpts().CPlusPlus20)
Diag(DS.getVolatileSpecLoc(),
diag::warn_deprecated_volatile_structured_binding);
@@ -952,7 +966,7 @@ static std::string printTemplateArgs(const PrintingPolicy &PrintingPolicy,
Arg.getArgument().print(PrintingPolicy, OS);
First = false;
}
- return OS.str();
+ return std::string(OS.str());
}
static bool lookupStdTypeTraitMember(Sema &S, LookupResult &TraitMemberLookup,
@@ -1052,7 +1066,7 @@ static IsTupleLike isTupleLike(Sema &S, SourceLocation Loc, QualType T,
TemplateArgumentListInfo &Args;
ICEDiagnoser(LookupResult &R, TemplateArgumentListInfo &Args)
: R(R), Args(Args) {}
- void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) {
+ void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override {
S.Diag(Loc, diag::err_decomp_decl_std_tuple_size_not_constant)
<< printTemplateArgs(S.Context.getPrintingPolicy(), Args);
}
@@ -1100,16 +1114,17 @@ static QualType getTupleLikeElementType(Sema &S, SourceLocation Loc,
}
namespace {
-struct BindingDiagnosticTrap {
+struct InitializingBinding {
Sema &S;
- DiagnosticErrorTrap Trap;
- BindingDecl *BD;
-
- BindingDiagnosticTrap(Sema &S, BindingDecl *BD)
- : S(S), Trap(S.Diags), BD(BD) {}
- ~BindingDiagnosticTrap() {
- if (Trap.hasErrorOccurred())
- S.Diag(BD->getLocation(), diag::note_in_binding_decl_init) << BD;
+ InitializingBinding(Sema &S, BindingDecl *BD) : S(S) {
+ Sema::CodeSynthesisContext Ctx;
+ Ctx.Kind = Sema::CodeSynthesisContext::InitializingStructuredBinding;
+ Ctx.PointOfInstantiation = BD->getLocation();
+ Ctx.Entity = BD;
+ S.pushCodeSynthesisContext(Ctx);
+ }
+ ~InitializingBinding() {
+ S.popCodeSynthesisContext();
}
};
}
@@ -1158,7 +1173,7 @@ static bool checkTupleLikeDecomposition(Sema &S,
unsigned I = 0;
for (auto *B : Bindings) {
- BindingDiagnosticTrap Trap(S, B);
+ InitializingBinding InitContext(S, B);
SourceLocation Loc = B->getLocation();
ExprResult E = S.BuildDeclRefExpr(Src, DecompType, VK_LValue, Loc);
@@ -1528,25 +1543,34 @@ void Sema::MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old) {
/// [dcl.fct.default].
void Sema::CheckCXXDefaultArguments(FunctionDecl *FD) {
unsigned NumParams = FD->getNumParams();
- unsigned p;
+ unsigned ParamIdx = 0;
+
+ // This checking doesn't make sense for explicit specializations; their
+ // default arguments are determined by the declaration we're specializing,
+ // not by FD.
+ if (FD->getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
+ return;
+ if (auto *FTD = FD->getDescribedFunctionTemplate())
+ if (FTD->isMemberSpecialization())
+ return;
// Find first parameter with a default argument
- for (p = 0; p < NumParams; ++p) {
- ParmVarDecl *Param = FD->getParamDecl(p);
+ for (; ParamIdx < NumParams; ++ParamIdx) {
+ ParmVarDecl *Param = FD->getParamDecl(ParamIdx);
if (Param->hasDefaultArg())
break;
}
- // C++11 [dcl.fct.default]p4:
+ // C++20 [dcl.fct.default]p4:
// In a given function declaration, each parameter subsequent to a parameter
// with a default argument shall have a default argument supplied in this or
- // a previous declaration or shall be a function parameter pack. A default
- // argument shall not be redefined by a later declaration (not even to the
- // same value).
- unsigned LastMissingDefaultArg = 0;
- for (; p < NumParams; ++p) {
- ParmVarDecl *Param = FD->getParamDecl(p);
- if (!Param->hasDefaultArg() && !Param->isParameterPack()) {
+ // a previous declaration, unless the parameter was expanded from a
+ // parameter pack, or shall be a function parameter pack.
+ for (; ParamIdx < NumParams; ++ParamIdx) {
+ ParmVarDecl *Param = FD->getParamDecl(ParamIdx);
+ if (!Param->hasDefaultArg() && !Param->isParameterPack() &&
+ !(CurrentInstantiationScope &&
+ CurrentInstantiationScope->isLocalPackExpansion(Param))) {
if (Param->isInvalidDecl())
/* We already complained about this parameter. */;
else if (Param->getIdentifier())
@@ -1556,21 +1580,6 @@ void Sema::CheckCXXDefaultArguments(FunctionDecl *FD) {
else
Diag(Param->getLocation(),
diag::err_param_default_argument_missing);
-
- LastMissingDefaultArg = p;
- }
- }
-
- if (LastMissingDefaultArg > 0) {
- // Some default arguments were missing. Clear out all of the
- // default arguments up to (and including) the last missing
- // default argument, so that we leave the function parameters
- // in a semantically valid state.
- for (p = 0; p <= LastMissingDefaultArg; ++p) {
- ParmVarDecl *Param = FD->getParamDecl(p);
- if (Param->hasDefaultArg()) {
- Param->setDefaultArg(nullptr);
- }
}
}
}
@@ -1716,7 +1725,7 @@ bool Sema::CheckConstexprFunctionDefinition(const FunctionDecl *NewFD,
// - it shall not be virtual; (removed in C++20)
const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(NewFD);
if (Method && Method->isVirtual()) {
- if (getLangOpts().CPlusPlus2a) {
+ if (getLangOpts().CPlusPlus20) {
if (Kind == CheckConstexprKind::Diagnose)
Diag(Method->getLocation(), diag::warn_cxx17_compat_constexpr_virtual);
} else {
@@ -1856,11 +1865,11 @@ static bool CheckConstexprDeclStmt(Sema &SemaRef, const FunctionDecl *Dcl,
if (Kind == Sema::CheckConstexprKind::Diagnose) {
SemaRef.Diag(
VD->getLocation(),
- SemaRef.getLangOpts().CPlusPlus2a
+ SemaRef.getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_constexpr_local_var_no_init
: diag::ext_constexpr_local_var_no_init)
<< isa<CXXConstructorDecl>(Dcl);
- } else if (!SemaRef.getLangOpts().CPlusPlus2a) {
+ } else if (!SemaRef.getLangOpts().CPlusPlus20) {
return false;
}
continue;
@@ -1919,7 +1928,7 @@ static bool CheckConstexprCtorInitializer(Sema &SemaRef,
Sema::CheckConstexprKind Kind) {
// In C++20 onwards, there's nothing to check for validity.
if (Kind == Sema::CheckConstexprKind::CheckValid &&
- SemaRef.getLangOpts().CPlusPlus2a)
+ SemaRef.getLangOpts().CPlusPlus20)
return true;
if (Field->isInvalidDecl())
@@ -1941,14 +1950,14 @@ static bool CheckConstexprCtorInitializer(Sema &SemaRef,
if (Kind == Sema::CheckConstexprKind::Diagnose) {
if (!Diagnosed) {
SemaRef.Diag(Dcl->getLocation(),
- SemaRef.getLangOpts().CPlusPlus2a
+ SemaRef.getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_constexpr_ctor_missing_init
: diag::ext_constexpr_ctor_missing_init);
Diagnosed = true;
}
SemaRef.Diag(Field->getLocation(),
diag::note_constexpr_ctor_missing_init);
- } else if (!SemaRef.getLangOpts().CPlusPlus2a) {
+ } else if (!SemaRef.getLangOpts().CPlusPlus20) {
return false;
}
} else if (Field->isAnonymousStructOrUnion()) {
@@ -2132,14 +2141,14 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
// apply the general constexpr rules.
switch (Kind) {
case Sema::CheckConstexprKind::CheckValid:
- if (!SemaRef.getLangOpts().CPlusPlus2a)
+ if (!SemaRef.getLangOpts().CPlusPlus20)
return false;
break;
case Sema::CheckConstexprKind::Diagnose:
SemaRef.Diag(Body->getBeginLoc(),
- !SemaRef.getLangOpts().CPlusPlus2a
- ? diag::ext_constexpr_function_try_block_cxx2a
+ !SemaRef.getLangOpts().CPlusPlus20
+ ? diag::ext_constexpr_function_try_block_cxx20
: diag::warn_cxx17_compat_constexpr_function_try_block)
<< isa<CXXConstructorDecl>(Dcl);
break;
@@ -2162,14 +2171,14 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
if (Kind == Sema::CheckConstexprKind::CheckValid) {
// If this is only valid as an extension, report that we don't satisfy the
// constraints of the current language.
- if ((Cxx2aLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus2a) ||
+ if ((Cxx2aLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus20) ||
(Cxx1yLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus17))
return false;
} else if (Cxx2aLoc.isValid()) {
SemaRef.Diag(Cxx2aLoc,
- SemaRef.getLangOpts().CPlusPlus2a
+ SemaRef.getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_constexpr_body_invalid_stmt
- : diag::ext_constexpr_body_invalid_stmt_cxx2a)
+ : diag::ext_constexpr_body_invalid_stmt_cxx20)
<< isa<CXXConstructorDecl>(Dcl);
} else if (Cxx1yLoc.isValid()) {
SemaRef.Diag(Cxx1yLoc,
@@ -2194,10 +2203,10 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
if (Kind == Sema::CheckConstexprKind::Diagnose) {
SemaRef.Diag(
Dcl->getLocation(),
- SemaRef.getLangOpts().CPlusPlus2a
+ SemaRef.getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_constexpr_union_ctor_no_init
: diag::ext_constexpr_union_ctor_no_init);
- } else if (!SemaRef.getLangOpts().CPlusPlus2a) {
+ } else if (!SemaRef.getLangOpts().CPlusPlus20) {
return false;
}
}
@@ -2306,7 +2315,7 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
!Expr::isPotentialConstantExpr(Dcl, Diags)) {
SemaRef.Diag(Dcl->getLocation(),
diag::ext_constexpr_function_never_constant_expr)
- << isa<CXXConstructorDecl>(Dcl);
+ << isa<CXXConstructorDecl>(Dcl) << Dcl->isConsteval();
for (size_t I = 0, N = Diags.size(); I != N; ++I)
SemaRef.Diag(Diags[I].first, Diags[I].second);
// Don't return false here: we allow this for compatibility in
@@ -2417,7 +2426,10 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc) {
QualType BaseType = TInfo->getType();
-
+ if (BaseType->containsErrors()) {
+ // Already emitted a diagnostic when parsing the error type.
+ return nullptr;
+ }
// C++ [class.union]p1:
// A union shall not have base classes.
if (Class->isUnion()) {
@@ -2821,13 +2833,13 @@ void Sema::BuildBasePathArray(const CXXBasePaths &Paths,
/// if there is an error, and Range is the source range to highlight
/// if there is an error.
///
-/// If either InaccessibleBaseID or AmbigiousBaseConvID are 0, then the
+/// If either InaccessibleBaseID or AmbiguousBaseConvID are 0, then the
/// diagnostic for the respective type of error will be suppressed, but the
/// check for ill-formed code will still be performed.
bool
Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
- unsigned AmbigiousBaseConvID,
+ unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
@@ -2853,7 +2865,7 @@ Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
for (const CXXBasePath &PossiblePath : Paths) {
if (PossiblePath.size() == 1) {
Path = &PossiblePath;
- if (AmbigiousBaseConvID)
+ if (AmbiguousBaseConvID)
Diag(Loc, diag::ext_ms_ambiguous_direct_base)
<< Base << Derived << Range;
break;
@@ -2881,7 +2893,7 @@ Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
return false;
}
- if (AmbigiousBaseConvID) {
+ if (AmbiguousBaseConvID) {
// We know that the derived-to-base conversion is ambiguous, and
// we're going to produce a diagnostic. Perform the derived-to-base
// search just one more time to compute all of the possible paths so
@@ -2900,7 +2912,7 @@ Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
// to each base class subobject.
std::string PathDisplayStr = getAmbiguousPathsDisplayString(Paths);
- Diag(Loc, AmbigiousBaseConvID)
+ Diag(Loc, AmbiguousBaseConvID)
<< Derived << Base << PathDisplayStr << Range << Name;
}
return true;
@@ -3033,7 +3045,7 @@ void Sema::CheckOverrideControl(NamedDecl *D) {
<< MD->getDeclName();
}
-void Sema::DiagnoseAbsenceOfOverrideControl(NamedDecl *D) {
+void Sema::DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent) {
if (D->isInvalidDecl() || D->hasAttr<OverrideAttr>())
return;
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D);
@@ -3049,12 +3061,22 @@ void Sema::DiagnoseAbsenceOfOverrideControl(NamedDecl *D) {
return;
if (MD->size_overridden_methods() > 0) {
- unsigned DiagID = isa<CXXDestructorDecl>(MD)
- ? diag::warn_destructor_marked_not_override_overriding
- : diag::warn_function_marked_not_override_overriding;
- Diag(MD->getLocation(), DiagID) << MD->getDeclName();
- const CXXMethodDecl *OMD = *MD->begin_overridden_methods();
- Diag(OMD->getLocation(), diag::note_overridden_virtual_function);
+ auto EmitDiag = [&](unsigned DiagInconsistent, unsigned DiagSuggest) {
+ unsigned DiagID =
+ Inconsistent && !Diags.isIgnored(DiagInconsistent, MD->getLocation())
+ ? DiagInconsistent
+ : DiagSuggest;
+ Diag(MD->getLocation(), DiagID) << MD->getDeclName();
+ const CXXMethodDecl *OMD = *MD->begin_overridden_methods();
+ Diag(OMD->getLocation(), diag::note_overridden_virtual_function);
+ };
+ if (isa<CXXDestructorDecl>(MD))
+ EmitDiag(
+ diag::warn_inconsistent_destructor_marked_not_override_overriding,
+ diag::warn_suggest_destructor_marked_not_override_overriding);
+ else
+ EmitDiag(diag::warn_inconsistent_function_marked_not_override_overriding,
+ diag::warn_suggest_function_marked_not_override_overriding);
}
}
@@ -5443,6 +5465,15 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
// subobjects.
bool VisitVirtualBases = !ClassDecl->isAbstract();
+ // If the destructor exists and has already been marked used in the MS ABI,
+ // then virtual base destructors have already been checked and marked used.
+ // Skip checking them again to avoid duplicate diagnostics.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ CXXDestructorDecl *Dtor = ClassDecl->getDestructor();
+ if (Dtor && Dtor->isUsed())
+ VisitVirtualBases = false;
+ }
+
llvm::SmallPtrSet<const RecordType *, 8> DirectVirtualBases;
// Bases.
@@ -5477,16 +5508,21 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
DiagnoseUseOfDecl(Dtor, Location);
}
- if (!VisitVirtualBases)
- return;
+ if (VisitVirtualBases)
+ MarkVirtualBaseDestructorsReferenced(Location, ClassDecl,
+ &DirectVirtualBases);
+}
+void Sema::MarkVirtualBaseDestructorsReferenced(
+ SourceLocation Location, CXXRecordDecl *ClassDecl,
+ llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases) {
// Virtual bases.
for (const auto &VBase : ClassDecl->vbases()) {
// Bases are always records in a well-formed non-dependent class.
const RecordType *RT = VBase.getType()->castAs<RecordType>();
- // Ignore direct virtual bases.
- if (DirectVirtualBases.count(RT))
+ // Ignore already visited direct virtual bases.
+ if (DirectVirtualBases && DirectVirtualBases->count(RT))
continue;
CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(RT->getDecl());
@@ -5788,6 +5824,23 @@ static void ReferenceDllExportedMembers(Sema &S, CXXRecordDecl *Class) {
// declaration.
return;
+ // Add a context note to explain how we got to any diagnostics produced below.
+ struct MarkingClassDllexported {
+ Sema &S;
+ MarkingClassDllexported(Sema &S, CXXRecordDecl *Class,
+ SourceLocation AttrLoc)
+ : S(S) {
+ Sema::CodeSynthesisContext Ctx;
+ Ctx.Kind = Sema::CodeSynthesisContext::MarkingClassDllexported;
+ Ctx.PointOfInstantiation = AttrLoc;
+ Ctx.Entity = Class;
+ S.pushCodeSynthesisContext(Ctx);
+ }
+ ~MarkingClassDllexported() {
+ S.popCodeSynthesisContext();
+ }
+ } MarkingDllexportedContext(S, Class, ClassAttr->getLocation());
+
if (S.Context.getTargetInfo().getTriple().isWindowsGNUEnvironment())
S.MarkVTableUsed(Class->getLocation(), Class, true);
@@ -5823,13 +5876,7 @@ static void ReferenceDllExportedMembers(Sema &S, CXXRecordDecl *Class) {
// defaulted methods, and the copy and move assignment operators. The
// latter are exported even if they are trivial, because the address of
// an operator can be taken and should compare equal across libraries.
- DiagnosticErrorTrap Trap(S.Diags);
S.MarkFunctionReferenced(Class->getLocation(), MD);
- if (Trap.hasErrorOccurred()) {
- S.Diag(ClassAttr->getLocation(), diag::note_due_to_dllexported_class)
- << Class << !S.getLangOpts().CPlusPlus11;
- break;
- }
// There is no later point when we will see the definition of this
// function, so pass it to the consumer now.
@@ -5877,6 +5924,123 @@ static void checkForMultipleExportedDefaultConstructors(Sema &S,
}
}
+static void checkCUDADeviceBuiltinSurfaceClassTemplate(Sema &S,
+ CXXRecordDecl *Class) {
+ bool ErrorReported = false;
+ auto reportIllegalClassTemplate = [&ErrorReported](Sema &S,
+ ClassTemplateDecl *TD) {
+ if (ErrorReported)
+ return;
+ S.Diag(TD->getLocation(),
+ diag::err_cuda_device_builtin_surftex_cls_template)
+ << /*surface*/ 0 << TD;
+ ErrorReported = true;
+ };
+
+ ClassTemplateDecl *TD = Class->getDescribedClassTemplate();
+ if (!TD) {
+ auto *SD = dyn_cast<ClassTemplateSpecializationDecl>(Class);
+ if (!SD) {
+ S.Diag(Class->getLocation(),
+ diag::err_cuda_device_builtin_surftex_ref_decl)
+ << /*surface*/ 0 << Class;
+ S.Diag(Class->getLocation(),
+ diag::note_cuda_device_builtin_surftex_should_be_template_class)
+ << Class;
+ return;
+ }
+ TD = SD->getSpecializedTemplate();
+ }
+
+ TemplateParameterList *Params = TD->getTemplateParameters();
+ unsigned N = Params->size();
+
+ if (N != 2) {
+ reportIllegalClassTemplate(S, TD);
+ S.Diag(TD->getLocation(),
+ diag::note_cuda_device_builtin_surftex_cls_should_have_n_args)
+ << TD << 2;
+ }
+ if (N > 0 && !isa<TemplateTypeParmDecl>(Params->getParam(0))) {
+ reportIllegalClassTemplate(S, TD);
+ S.Diag(TD->getLocation(),
+ diag::note_cuda_device_builtin_surftex_cls_should_have_match_arg)
+ << TD << /*1st*/ 0 << /*type*/ 0;
+ }
+ if (N > 1) {
+ auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Params->getParam(1));
+ if (!NTTP || !NTTP->getType()->isIntegralOrEnumerationType()) {
+ reportIllegalClassTemplate(S, TD);
+ S.Diag(TD->getLocation(),
+ diag::note_cuda_device_builtin_surftex_cls_should_have_match_arg)
+ << TD << /*2nd*/ 1 << /*integer*/ 1;
+ }
+ }
+}
+
+static void checkCUDADeviceBuiltinTextureClassTemplate(Sema &S,
+ CXXRecordDecl *Class) {
+ bool ErrorReported = false;
+ auto reportIllegalClassTemplate = [&ErrorReported](Sema &S,
+ ClassTemplateDecl *TD) {
+ if (ErrorReported)
+ return;
+ S.Diag(TD->getLocation(),
+ diag::err_cuda_device_builtin_surftex_cls_template)
+ << /*texture*/ 1 << TD;
+ ErrorReported = true;
+ };
+
+ ClassTemplateDecl *TD = Class->getDescribedClassTemplate();
+ if (!TD) {
+ auto *SD = dyn_cast<ClassTemplateSpecializationDecl>(Class);
+ if (!SD) {
+ S.Diag(Class->getLocation(),
+ diag::err_cuda_device_builtin_surftex_ref_decl)
+ << /*texture*/ 1 << Class;
+ S.Diag(Class->getLocation(),
+ diag::note_cuda_device_builtin_surftex_should_be_template_class)
+ << Class;
+ return;
+ }
+ TD = SD->getSpecializedTemplate();
+ }
+
+ TemplateParameterList *Params = TD->getTemplateParameters();
+ unsigned N = Params->size();
+
+ if (N != 3) {
+ reportIllegalClassTemplate(S, TD);
+ S.Diag(TD->getLocation(),
+ diag::note_cuda_device_builtin_surftex_cls_should_have_n_args)
+ << TD << 3;
+ }
+ if (N > 0 && !isa<TemplateTypeParmDecl>(Params->getParam(0))) {
+ reportIllegalClassTemplate(S, TD);
+ S.Diag(TD->getLocation(),
+ diag::note_cuda_device_builtin_surftex_cls_should_have_match_arg)
+ << TD << /*1st*/ 0 << /*type*/ 0;
+ }
+ if (N > 1) {
+ auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Params->getParam(1));
+ if (!NTTP || !NTTP->getType()->isIntegralOrEnumerationType()) {
+ reportIllegalClassTemplate(S, TD);
+ S.Diag(TD->getLocation(),
+ diag::note_cuda_device_builtin_surftex_cls_should_have_match_arg)
+ << TD << /*2nd*/ 1 << /*integer*/ 1;
+ }
+ }
+ if (N > 2) {
+ auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Params->getParam(2));
+ if (!NTTP || !NTTP->getType()->isIntegralOrEnumerationType()) {
+ reportIllegalClassTemplate(S, TD);
+ S.Diag(TD->getLocation(),
+ diag::note_cuda_device_builtin_surftex_cls_should_have_match_arg)
+ << TD << /*3rd*/ 2 << /*integer*/ 1;
+ }
+ }
+}
+
void Sema::checkClassLevelCodeSegAttribute(CXXRecordDecl *Class) {
// Mark any compiler-generated routines with the implicit code_seg attribute.
for (auto *Method : Class->methods()) {
@@ -6151,7 +6315,7 @@ Sema::getDefaultedFunctionKind(const FunctionDecl *FD) {
case OO_Spaceship:
// No point allowing this if <=> doesn't exist in the current language mode.
- if (!getLangOpts().CPlusPlus2a)
+ if (!getLangOpts().CPlusPlus20)
break;
return DefaultedComparisonKind::ThreeWay;
@@ -6160,7 +6324,7 @@ Sema::getDefaultedFunctionKind(const FunctionDecl *FD) {
case OO_Greater:
case OO_GreaterEqual:
// No point allowing this if <=> doesn't exist in the current language mode.
- if (!getLangOpts().CPlusPlus2a)
+ if (!getLangOpts().CPlusPlus20)
break;
return DefaultedComparisonKind::Relational;
@@ -6172,27 +6336,31 @@ Sema::getDefaultedFunctionKind(const FunctionDecl *FD) {
return DefaultedFunctionKind();
}
-static void DefineImplicitSpecialMember(Sema &S, CXXMethodDecl *MD,
- SourceLocation DefaultLoc) {
- switch (S.getSpecialMember(MD)) {
+static void DefineDefaultedFunction(Sema &S, FunctionDecl *FD,
+ SourceLocation DefaultLoc) {
+ Sema::DefaultedFunctionKind DFK = S.getDefaultedFunctionKind(FD);
+ if (DFK.isComparison())
+ return S.DefineDefaultedComparison(DefaultLoc, FD, DFK.asComparison());
+
+ switch (DFK.asSpecialMember()) {
case Sema::CXXDefaultConstructor:
S.DefineImplicitDefaultConstructor(DefaultLoc,
- cast<CXXConstructorDecl>(MD));
+ cast<CXXConstructorDecl>(FD));
break;
case Sema::CXXCopyConstructor:
- S.DefineImplicitCopyConstructor(DefaultLoc, cast<CXXConstructorDecl>(MD));
+ S.DefineImplicitCopyConstructor(DefaultLoc, cast<CXXConstructorDecl>(FD));
break;
case Sema::CXXCopyAssignment:
- S.DefineImplicitCopyAssignment(DefaultLoc, MD);
+ S.DefineImplicitCopyAssignment(DefaultLoc, cast<CXXMethodDecl>(FD));
break;
case Sema::CXXDestructor:
- S.DefineImplicitDestructor(DefaultLoc, cast<CXXDestructorDecl>(MD));
+ S.DefineImplicitDestructor(DefaultLoc, cast<CXXDestructorDecl>(FD));
break;
case Sema::CXXMoveConstructor:
- S.DefineImplicitMoveConstructor(DefaultLoc, cast<CXXConstructorDecl>(MD));
+ S.DefineImplicitMoveConstructor(DefaultLoc, cast<CXXConstructorDecl>(FD));
break;
case Sema::CXXMoveAssignment:
- S.DefineImplicitMoveAssignment(DefaultLoc, MD);
+ S.DefineImplicitMoveAssignment(DefaultLoc, cast<CXXMethodDecl>(FD));
break;
case Sema::CXXInvalid:
llvm_unreachable("Invalid special member.");
@@ -6313,6 +6481,27 @@ static bool canPassInRegisters(Sema &S, CXXRecordDecl *D,
return HasNonDeletedCopyOrMove;
}
+/// Report an error regarding overriding, along with any relevant
+/// overridden methods.
+///
+/// \param DiagID the primary error to report.
+/// \param MD the overriding method.
+static bool
+ReportOverrides(Sema &S, unsigned DiagID, const CXXMethodDecl *MD,
+ llvm::function_ref<bool(const CXXMethodDecl *)> Report) {
+ bool IssuedDiagnostic = false;
+ for (const CXXMethodDecl *O : MD->overridden_methods()) {
+ if (Report(O)) {
+ if (!IssuedDiagnostic) {
+ S.Diag(MD->getLocation(), DiagID) << MD->getDeclName();
+ IssuedDiagnostic = true;
+ }
+ S.Diag(O->getLocation(), diag::note_overridden_virtual_function);
+ }
+ }
+ return IssuedDiagnostic;
+}
+
/// Perform semantic checks on a class definition that has been
/// completing, introducing implicitly-declared members, checking for
/// abstract types, etc.
@@ -6427,21 +6616,64 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
// primary comparison functions (==, <=>).
llvm::SmallVector<FunctionDecl*, 5> DefaultedSecondaryComparisons;
- auto CheckForDefaultedFunction = [&](FunctionDecl *FD) {
- if (!FD || FD->isInvalidDecl() || !FD->isExplicitlyDefaulted())
+ // Perform checks that can't be done until we know all the properties of a
+ // member function (whether it's defaulted, deleted, virtual, overriding,
+ // ...).
+ auto CheckCompletedMemberFunction = [&](CXXMethodDecl *MD) {
+ // A static function cannot override anything.
+ if (MD->getStorageClass() == SC_Static) {
+ if (ReportOverrides(*this, diag::err_static_overrides_virtual, MD,
+ [](const CXXMethodDecl *) { return true; }))
+ return;
+ }
+
+ // A deleted function cannot override a non-deleted function and vice
+ // versa.
+ if (ReportOverrides(*this,
+ MD->isDeleted() ? diag::err_deleted_override
+ : diag::err_non_deleted_override,
+ MD, [&](const CXXMethodDecl *V) {
+ return MD->isDeleted() != V->isDeleted();
+ })) {
+ if (MD->isDefaulted() && MD->isDeleted())
+ // Explain why this defaulted function was deleted.
+ DiagnoseDeletedDefaultedFunction(MD);
+ return;
+ }
+
+ // A consteval function cannot override a non-consteval function and vice
+ // versa.
+ if (ReportOverrides(*this,
+ MD->isConsteval() ? diag::err_consteval_override
+ : diag::err_non_consteval_override,
+ MD, [&](const CXXMethodDecl *V) {
+ return MD->isConsteval() != V->isConsteval();
+ })) {
+ if (MD->isDefaulted() && MD->isDeleted())
+ // Explain why this defaulted function was deleted.
+ DiagnoseDeletedDefaultedFunction(MD);
return;
+ }
+ };
+
+ auto CheckForDefaultedFunction = [&](FunctionDecl *FD) -> bool {
+ if (!FD || FD->isInvalidDecl() || !FD->isExplicitlyDefaulted())
+ return false;
DefaultedFunctionKind DFK = getDefaultedFunctionKind(FD);
if (DFK.asComparison() == DefaultedComparisonKind::NotEqual ||
- DFK.asComparison() == DefaultedComparisonKind::Relational)
+ DFK.asComparison() == DefaultedComparisonKind::Relational) {
DefaultedSecondaryComparisons.push_back(FD);
- else
- CheckExplicitlyDefaultedFunction(S, FD);
+ return true;
+ }
+
+ CheckExplicitlyDefaultedFunction(S, FD);
+ return false;
};
auto CompleteMemberFunction = [&](CXXMethodDecl *M) {
// Check whether the explicitly-defaulted members are valid.
- CheckForDefaultedFunction(M);
+ bool Incomplete = CheckForDefaultedFunction(M);
// Skip the rest of the checks for a member of a dependent class.
if (Record->isDependentType())
@@ -6488,7 +6720,10 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
// function right away.
// FIXME: We can defer doing this until the vtable is marked as used.
if (M->isDefaulted() && M->isConstexpr() && M->size_overridden_methods())
- DefineImplicitSpecialMember(*this, M, M->getLocation());
+ DefineDefaultedFunction(*this, M, M->getLocation());
+
+ if (!Incomplete)
+ CheckCompletedMemberFunction(M);
};
// Check the destructor before any other member function. We need to
@@ -6524,19 +6759,21 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
}
}
- if (HasMethodWithOverrideControl &&
- HasOverridingMethodWithoutOverrideControl) {
- // At least one method has the 'override' control declared.
- // Diagnose all other overridden methods which do not have 'override'
- // specified on them.
+ if (HasOverridingMethodWithoutOverrideControl) {
+ bool HasInconsistentOverrideControl = HasMethodWithOverrideControl;
for (auto *M : Record->methods())
- DiagnoseAbsenceOfOverrideControl(M);
+ DiagnoseAbsenceOfOverrideControl(M, HasInconsistentOverrideControl);
}
// Check the defaulted secondary comparisons after any other member functions.
- for (FunctionDecl *FD : DefaultedSecondaryComparisons)
+ for (FunctionDecl *FD : DefaultedSecondaryComparisons) {
CheckExplicitlyDefaultedFunction(S, FD);
+ // If this is a member function, we deferred checking it until now.
+ if (auto *MD = dyn_cast<CXXMethodDecl>(FD))
+ CheckCompletedMemberFunction(MD);
+ }
+
// ms_struct is a request to use the same ABI rules as MSVC. Check
// whether this class uses any C++ features that are implemented
// completely differently in MSVC, and if so, emit a diagnostic.
@@ -6546,7 +6783,11 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
// headers, sweeping up a bunch of types that the project doesn't
// really rely on MSVC-compatible layout for. We must therefore
// support "ms_struct except for C++ stuff" as a secondary ABI.
- if (Record->isMsStruct(Context) &&
+ // Don't emit this diagnostic if the feature was enabled as a
+ // language option (as opposed to via a pragma or attribute), as
+ // the option -mms-bitfields otherwise essentially makes it impossible
+ // to build C++ code, unless this diagnostic is turned off.
+ if (Record->isMsStruct(Context) && !Context.getLangOpts().MSBitfields &&
(Record->isPolymorphic() || Record->getNumBases())) {
Diag(Record->getLocation(), diag::warn_cxx_ms_struct);
}
@@ -6581,6 +6822,13 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
// is especially required for cases like vtable assumption loads.
MarkVTableUsed(Record->getInnerLocStart(), Record);
}
+
+ if (getLangOpts().CUDA) {
+ if (Record->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>())
+ checkCUDADeviceBuiltinSurfaceClassTemplate(*this, Record);
+ else if (Record->hasAttr<CUDADeviceBuiltinTextureTypeAttr>())
+ checkCUDADeviceBuiltinTextureClassTemplate(*this, Record);
+ }
}
/// Look up the special member function that would be called by a special
@@ -6955,7 +7203,7 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
// C++2a changes the second bullet to instead delete the function if it's
// defaulted on its first declaration, unless it's "an assignment operator,
// and its return type differs or its parameter type is not a reference".
- bool DeleteOnTypeMismatch = getLangOpts().CPlusPlus2a && First;
+ bool DeleteOnTypeMismatch = getLangOpts().CPlusPlus20 && First;
bool ShouldDeleteForTypeMismatch = false;
unsigned ExpectedParams = 1;
if (CSM == CXXDefaultConstructor || CSM == CXXDestructor)
@@ -7065,7 +7313,7 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
// FIXME: This should not apply if the member is deleted.
bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, RD, CSM,
HasConstParam);
- if ((getLangOpts().CPlusPlus2a ||
+ if ((getLangOpts().CPlusPlus20 ||
(getLangOpts().CPlusPlus14 ? !isa<CXXDestructorDecl>(MD)
: isa<CXXConstructorDecl>(MD))) &&
MD->isConstexpr() && !Constexpr &&
@@ -7083,7 +7331,9 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
// If a function is explicitly defaulted on its first declaration, it is
// implicitly considered to be constexpr if the implicit declaration
// would be.
- MD->setConstexprKind(Constexpr ? CSK_constexpr : CSK_unspecified);
+ MD->setConstexprKind(
+ Constexpr ? (MD->isConsteval() ? CSK_consteval : CSK_constexpr)
+ : CSK_unspecified);
if (!Type->hasExceptionSpec()) {
// C++2a [except.spec]p3:
@@ -9463,27 +9713,57 @@ void Sema::DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD) {
}
void Sema::checkIllFormedTrivialABIStruct(CXXRecordDecl &RD) {
- auto PrintDiagAndRemoveAttr = [&]() {
+ auto PrintDiagAndRemoveAttr = [&](unsigned N) {
// No diagnostics if this is a template instantiation.
- if (!isTemplateInstantiation(RD.getTemplateSpecializationKind()))
+ if (!isTemplateInstantiation(RD.getTemplateSpecializationKind())) {
Diag(RD.getAttr<TrivialABIAttr>()->getLocation(),
diag::ext_cannot_use_trivial_abi) << &RD;
+ Diag(RD.getAttr<TrivialABIAttr>()->getLocation(),
+ diag::note_cannot_use_trivial_abi_reason) << &RD << N;
+ }
RD.dropAttr<TrivialABIAttr>();
};
+ // Ill-formed if the copy and move constructors are deleted.
+ auto HasNonDeletedCopyOrMoveConstructor = [&]() {
+ // If the type is dependent, then assume it might have
+ // implicit copy or move ctor because we won't know yet at this point.
+ if (RD.isDependentType())
+ return true;
+ if (RD.needsImplicitCopyConstructor() &&
+ !RD.defaultedCopyConstructorIsDeleted())
+ return true;
+ if (RD.needsImplicitMoveConstructor() &&
+ !RD.defaultedMoveConstructorIsDeleted())
+ return true;
+ for (const CXXConstructorDecl *CD : RD.ctors())
+ if (CD->isCopyOrMoveConstructor() && !CD->isDeleted())
+ return true;
+ return false;
+ };
+
+ if (!HasNonDeletedCopyOrMoveConstructor()) {
+ PrintDiagAndRemoveAttr(0);
+ return;
+ }
+
// Ill-formed if the struct has virtual functions.
if (RD.isPolymorphic()) {
- PrintDiagAndRemoveAttr();
+ PrintDiagAndRemoveAttr(1);
return;
}
for (const auto &B : RD.bases()) {
// Ill-formed if the base class is non-trivial for the purpose of calls or a
// virtual base.
- if ((!B.getType()->isDependentType() &&
- !B.getType()->getAsCXXRecordDecl()->canPassInRegisters()) ||
- B.isVirtual()) {
- PrintDiagAndRemoveAttr();
+ if (!B.getType()->isDependentType() &&
+ !B.getType()->getAsCXXRecordDecl()->canPassInRegisters()) {
+ PrintDiagAndRemoveAttr(2);
+ return;
+ }
+
+ if (B.isVirtual()) {
+ PrintDiagAndRemoveAttr(3);
return;
}
}
@@ -9493,14 +9773,14 @@ void Sema::checkIllFormedTrivialABIStruct(CXXRecordDecl &RD) {
// non-trivial for the purpose of calls.
QualType FT = FD->getType();
if (FT.getObjCLifetime() == Qualifiers::OCL_Weak) {
- PrintDiagAndRemoveAttr();
+ PrintDiagAndRemoveAttr(4);
return;
}
if (const auto *RT = FT->getBaseElementTypeUnsafe()->getAs<RecordType>())
if (!RT->isDependentType() &&
!cast<CXXRecordDecl>(RT->getDecl())->canPassInRegisters()) {
- PrintDiagAndRemoveAttr();
+ PrintDiagAndRemoveAttr(5);
return;
}
}
@@ -9573,86 +9853,95 @@ static void findImplicitlyDeclaredEqualityComparisons(
/// [special]p1). This routine can only be executed just before the
/// definition of the class is complete.
void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
- if (ClassDecl->needsImplicitDefaultConstructor()) {
- ++getASTContext().NumImplicitDefaultConstructors;
+ // Don't add implicit special members to templated classes.
+ // FIXME: This means unqualified lookups for 'operator=' within a class
+ // template don't work properly.
+ if (!ClassDecl->isDependentType()) {
+ if (ClassDecl->needsImplicitDefaultConstructor()) {
+ ++getASTContext().NumImplicitDefaultConstructors;
- if (ClassDecl->hasInheritedConstructor())
- DeclareImplicitDefaultConstructor(ClassDecl);
- }
+ if (ClassDecl->hasInheritedConstructor())
+ DeclareImplicitDefaultConstructor(ClassDecl);
+ }
- if (ClassDecl->needsImplicitCopyConstructor()) {
- ++getASTContext().NumImplicitCopyConstructors;
+ if (ClassDecl->needsImplicitCopyConstructor()) {
+ ++getASTContext().NumImplicitCopyConstructors;
- // If the properties or semantics of the copy constructor couldn't be
- // determined while the class was being declared, force a declaration
- // of it now.
- if (ClassDecl->needsOverloadResolutionForCopyConstructor() ||
- ClassDecl->hasInheritedConstructor())
- DeclareImplicitCopyConstructor(ClassDecl);
- // For the MS ABI we need to know whether the copy ctor is deleted. A
- // prerequisite for deleting the implicit copy ctor is that the class has a
- // move ctor or move assignment that is either user-declared or whose
- // semantics are inherited from a subobject. FIXME: We should provide a more
- // direct way for CodeGen to ask whether the constructor was deleted.
- else if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
- (ClassDecl->hasUserDeclaredMoveConstructor() ||
- ClassDecl->needsOverloadResolutionForMoveConstructor() ||
- ClassDecl->hasUserDeclaredMoveAssignment() ||
- ClassDecl->needsOverloadResolutionForMoveAssignment()))
- DeclareImplicitCopyConstructor(ClassDecl);
- }
+ // If the properties or semantics of the copy constructor couldn't be
+ // determined while the class was being declared, force a declaration
+ // of it now.
+ if (ClassDecl->needsOverloadResolutionForCopyConstructor() ||
+ ClassDecl->hasInheritedConstructor())
+ DeclareImplicitCopyConstructor(ClassDecl);
+ // For the MS ABI we need to know whether the copy ctor is deleted. A
+ // prerequisite for deleting the implicit copy ctor is that the class has
+ // a move ctor or move assignment that is either user-declared or whose
+ // semantics are inherited from a subobject. FIXME: We should provide a
+ // more direct way for CodeGen to ask whether the constructor was deleted.
+ else if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ (ClassDecl->hasUserDeclaredMoveConstructor() ||
+ ClassDecl->needsOverloadResolutionForMoveConstructor() ||
+ ClassDecl->hasUserDeclaredMoveAssignment() ||
+ ClassDecl->needsOverloadResolutionForMoveAssignment()))
+ DeclareImplicitCopyConstructor(ClassDecl);
+ }
- if (getLangOpts().CPlusPlus11 && ClassDecl->needsImplicitMoveConstructor()) {
- ++getASTContext().NumImplicitMoveConstructors;
+ if (getLangOpts().CPlusPlus11 &&
+ ClassDecl->needsImplicitMoveConstructor()) {
+ ++getASTContext().NumImplicitMoveConstructors;
- if (ClassDecl->needsOverloadResolutionForMoveConstructor() ||
- ClassDecl->hasInheritedConstructor())
- DeclareImplicitMoveConstructor(ClassDecl);
- }
+ if (ClassDecl->needsOverloadResolutionForMoveConstructor() ||
+ ClassDecl->hasInheritedConstructor())
+ DeclareImplicitMoveConstructor(ClassDecl);
+ }
- if (ClassDecl->needsImplicitCopyAssignment()) {
- ++getASTContext().NumImplicitCopyAssignmentOperators;
+ if (ClassDecl->needsImplicitCopyAssignment()) {
+ ++getASTContext().NumImplicitCopyAssignmentOperators;
- // If we have a dynamic class, then the copy assignment operator may be
- // virtual, so we have to declare it immediately. This ensures that, e.g.,
- // it shows up in the right place in the vtable and that we diagnose
- // problems with the implicit exception specification.
- if (ClassDecl->isDynamicClass() ||
- ClassDecl->needsOverloadResolutionForCopyAssignment() ||
- ClassDecl->hasInheritedAssignment())
- DeclareImplicitCopyAssignment(ClassDecl);
- }
+ // If we have a dynamic class, then the copy assignment operator may be
+ // virtual, so we have to declare it immediately. This ensures that, e.g.,
+ // it shows up in the right place in the vtable and that we diagnose
+ // problems with the implicit exception specification.
+ if (ClassDecl->isDynamicClass() ||
+ ClassDecl->needsOverloadResolutionForCopyAssignment() ||
+ ClassDecl->hasInheritedAssignment())
+ DeclareImplicitCopyAssignment(ClassDecl);
+ }
- if (getLangOpts().CPlusPlus11 && ClassDecl->needsImplicitMoveAssignment()) {
- ++getASTContext().NumImplicitMoveAssignmentOperators;
+ if (getLangOpts().CPlusPlus11 && ClassDecl->needsImplicitMoveAssignment()) {
+ ++getASTContext().NumImplicitMoveAssignmentOperators;
- // Likewise for the move assignment operator.
- if (ClassDecl->isDynamicClass() ||
- ClassDecl->needsOverloadResolutionForMoveAssignment() ||
- ClassDecl->hasInheritedAssignment())
- DeclareImplicitMoveAssignment(ClassDecl);
- }
+ // Likewise for the move assignment operator.
+ if (ClassDecl->isDynamicClass() ||
+ ClassDecl->needsOverloadResolutionForMoveAssignment() ||
+ ClassDecl->hasInheritedAssignment())
+ DeclareImplicitMoveAssignment(ClassDecl);
+ }
- if (ClassDecl->needsImplicitDestructor()) {
- ++getASTContext().NumImplicitDestructors;
+ if (ClassDecl->needsImplicitDestructor()) {
+ ++getASTContext().NumImplicitDestructors;
- // If we have a dynamic class, then the destructor may be virtual, so we
- // have to declare the destructor immediately. This ensures that, e.g., it
- // shows up in the right place in the vtable and that we diagnose problems
- // with the implicit exception specification.
- if (ClassDecl->isDynamicClass() ||
- ClassDecl->needsOverloadResolutionForDestructor())
- DeclareImplicitDestructor(ClassDecl);
+ // If we have a dynamic class, then the destructor may be virtual, so we
+ // have to declare the destructor immediately. This ensures that, e.g., it
+ // shows up in the right place in the vtable and that we diagnose problems
+ // with the implicit exception specification.
+ if (ClassDecl->isDynamicClass() ||
+ ClassDecl->needsOverloadResolutionForDestructor())
+ DeclareImplicitDestructor(ClassDecl);
+ }
}
// C++2a [class.compare.default]p3:
// If the member-specification does not explicitly declare any member or
// friend named operator==, an == operator function is declared implicitly
- // for each defaulted three-way comparison operator function defined in the
- // member-specification
+ // for each defaulted three-way comparison operator function defined in
+ // the member-specification
// FIXME: Consider doing this lazily.
- if (getLangOpts().CPlusPlus2a) {
- llvm::SmallVector<FunctionDecl*, 4> DefaultedSpaceships;
+ // We do this during the initial parse for a class template, not during
+ // instantiation, so that we can handle unqualified lookups for 'operator=='
+ // when parsing the template.
+ if (getLangOpts().CPlusPlus20 && !inTemplateInstantiation()) {
+ llvm::SmallVector<FunctionDecl *, 4> DefaultedSpaceships;
findImplicitlyDeclaredEqualityComparisons(Context, ClassDecl,
DefaultedSpaceships);
for (auto *FD : DefaultedSpaceships)
@@ -9660,19 +9949,17 @@ void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
}
}
-unsigned Sema::ActOnReenterTemplateScope(Scope *S, Decl *D) {
+unsigned
+Sema::ActOnReenterTemplateScope(Decl *D,
+ llvm::function_ref<Scope *()> EnterScope) {
if (!D)
return 0;
+ AdjustDeclIfTemplate(D);
- // The order of template parameters is not important here. All names
- // get added to the same scope.
+ // In order to get name lookup right, reenter template scopes in order from
+ // outermost to innermost.
SmallVector<TemplateParameterList *, 4> ParameterLists;
-
- if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D))
- D = TD->getTemplatedDecl();
-
- if (auto *PSD = dyn_cast<ClassTemplatePartialSpecializationDecl>(D))
- ParameterLists.push_back(PSD->getTemplateParameters());
+ DeclContext *LookupDC = dyn_cast<DeclContext>(D);
if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)) {
for (unsigned i = 0; i < DD->getNumTemplateParameterLists(); ++i)
@@ -9681,31 +9968,49 @@ unsigned Sema::ActOnReenterTemplateScope(Scope *S, Decl *D) {
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
ParameterLists.push_back(FTD->getTemplateParameters());
- }
- }
+ } else if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ LookupDC = VD->getDeclContext();
- if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ if (VarTemplateDecl *VTD = VD->getDescribedVarTemplate())
+ ParameterLists.push_back(VTD->getTemplateParameters());
+ else if (auto *PSD = dyn_cast<VarTemplatePartialSpecializationDecl>(D))
+ ParameterLists.push_back(PSD->getTemplateParameters());
+ }
+ } else if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
for (unsigned i = 0; i < TD->getNumTemplateParameterLists(); ++i)
ParameterLists.push_back(TD->getTemplateParameterList(i));
if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(TD)) {
if (ClassTemplateDecl *CTD = RD->getDescribedClassTemplate())
ParameterLists.push_back(CTD->getTemplateParameters());
+ else if (auto *PSD = dyn_cast<ClassTemplatePartialSpecializationDecl>(D))
+ ParameterLists.push_back(PSD->getTemplateParameters());
}
}
+ // FIXME: Alias declarations and concepts.
unsigned Count = 0;
+ Scope *InnermostTemplateScope = nullptr;
for (TemplateParameterList *Params : ParameterLists) {
- if (Params->size() > 0)
- // Ignore explicit specializations; they don't contribute to the template
- // depth.
- ++Count;
+ // Ignore explicit specializations; they don't contribute to the template
+ // depth.
+ if (Params->size() == 0)
+ continue;
+
+ InnermostTemplateScope = EnterScope();
for (NamedDecl *Param : *Params) {
if (Param->getDeclName()) {
- S->AddDecl(Param);
+ InnermostTemplateScope->AddDecl(Param);
IdResolver.AddDecl(Param);
}
}
+ ++Count;
+ }
+
+ // Associate the new template scopes with the corresponding entities.
+ if (InnermostTemplateScope) {
+ assert(LookupDC && "no enclosing DeclContext for template lookup");
+ EnterTemplatedContext(InnermostTemplateScope, LookupDC);
}
return Count;
@@ -9757,11 +10062,6 @@ void Sema::ActOnDelayedCXXMethodParameter(Scope *S, Decl *ParamD) {
ParmVarDecl *Param = cast<ParmVarDecl>(ParamD);
- // If this parameter has an unparsed default argument, clear it out
- // to make way for the parsed default argument.
- if (Param->hasUnparsedDefaultArg())
- Param->setDefaultArg(nullptr);
-
S->AddDecl(Param);
if (Param->getDeclName())
IdResolver.AddDecl(Param);
@@ -9895,11 +10195,9 @@ void Sema::CheckConstructor(CXXConstructorDecl *Constructor) {
// either there are no other parameters or else all other
// parameters have default arguments.
if (!Constructor->isInvalidDecl() &&
- ((Constructor->getNumParams() == 1) ||
- (Constructor->getNumParams() > 1 &&
- Constructor->getParamDecl(1)->hasDefaultArg())) &&
- Constructor->getTemplateSpecializationKind()
- != TSK_ImplicitInstantiation) {
+ Constructor->hasOneParamOrDefaultArgs() &&
+ Constructor->getTemplateSpecializationKind() !=
+ TSK_ImplicitInstantiation) {
QualType ParamType = Constructor->getParamDecl(0)->getType();
QualType ClassTy = Context.getTagDeclType(ClassDecl);
if (Context.getCanonicalType(ParamType).getUnqualifiedType() == ClassTy) {
@@ -9984,12 +10282,12 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R,
// declaration.
QualType DeclaratorType = GetTypeFromParser(D.getName().DestructorName);
if (const TypedefType *TT = DeclaratorType->getAs<TypedefType>())
- Diag(D.getIdentifierLoc(), diag::err_destructor_typedef_name)
+ Diag(D.getIdentifierLoc(), diag::ext_destructor_typedef_name)
<< DeclaratorType << isa<TypeAliasDecl>(TT->getDecl());
else if (const TemplateSpecializationType *TST =
DeclaratorType->getAs<TemplateSpecializationType>())
if (TST->isTypeAlias())
- Diag(D.getIdentifierLoc(), diag::err_destructor_typedef_name)
+ Diag(D.getIdentifierLoc(), diag::ext_destructor_typedef_name)
<< DeclaratorType << 1;
// C++ [class.dtor]p2:
@@ -10251,7 +10549,7 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
R = Context.getFunctionType(ConvType, None, Proto->getExtProtoInfo());
// C++0x explicit conversion operators.
- if (DS.hasExplicitSpecifier() && !getLangOpts().CPlusPlus2a)
+ if (DS.hasExplicitSpecifier() && !getLangOpts().CPlusPlus20)
Diag(DS.getExplicitSpecLoc(),
getLangOpts().CPlusPlus11
? diag::warn_cxx98_compat_explicit_conversion_functions
@@ -10270,15 +10568,12 @@ Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) {
// Make sure we aren't redeclaring the conversion function.
QualType ConvType = Context.getCanonicalType(Conversion->getConversionType());
-
// C++ [class.conv.fct]p1:
// [...] A conversion function is never used to convert a
// (possibly cv-qualified) object to the (possibly cv-qualified)
// same object type (or a reference to it), to a (possibly
// cv-qualified) base class of that type (or a reference to it),
// or to (possibly cv-qualified) void.
- // FIXME: Suppress this warning if the conversion function ends up being a
- // virtual function that overrides a virtual function in a base class.
QualType ClassType
= Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
if (const ReferenceType *ConvTypeRef = ConvType->getAs<ReferenceType>())
@@ -10286,6 +10581,8 @@ Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) {
if (Conversion->getTemplateSpecializationKind() != TSK_Undeclared &&
Conversion->getTemplateSpecializationKind() != TSK_ExplicitSpecialization)
/* Suppress diagnostics for instantiations. */;
+ else if (Conversion->size_overridden_methods() != 0)
+ /* Suppress diagnostics for overriding virtual function in a base class. */;
else if (ConvType->isRecordType()) {
ConvType = Context.getCanonicalType(ConvType).getUnqualifiedType();
if (ConvType == ClassType)
@@ -10960,8 +11257,7 @@ bool Sema::isInitListConstructor(const FunctionDecl *Ctor) {
// is of type std::initializer_list<E> or reference to possibly cv-qualified
// std::initializer_list<E> for some type E, and either there are no other
// parameters or else all other parameters have default arguments.
- if (Ctor->getNumParams() < 1 ||
- (Ctor->getNumParams() > 1 && !Ctor->getParamDecl(1)->hasDefaultArg()))
+ if (!Ctor->hasOneParamOrDefaultArgs())
return false;
QualType ArgType = Ctor->getParamDecl(0)->getType();
@@ -13000,6 +13296,25 @@ void Sema::DefineImplicitDestructor(SourceLocation CurrentLocation,
}
}
+void Sema::CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
+ CXXDestructorDecl *Destructor) {
+ if (Destructor->isInvalidDecl())
+ return;
+
+ CXXRecordDecl *ClassDecl = Destructor->getParent();
+ assert(Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ "implicit complete dtors unneeded outside MS ABI");
+ assert(ClassDecl->getNumVBases() > 0 &&
+ "complete dtor only exists for classes with vbases");
+
+ SynthesizedFunctionScope Scope(*this, Destructor);
+
+ // Add a context note for diagnostics produced after this point.
+ Scope.addContextNote(CurrentLocation);
+
+ MarkVirtualBaseDestructorsReferenced(Destructor->getLocation(), ClassDecl);
+}
+
/// Perform any semantic analysis which needs to be delayed until all
/// pending class member declarations have been parsed.
void Sema::ActOnFinishCXXMemberDecls() {
@@ -13021,7 +13336,7 @@ void Sema::ActOnFinishCXXNonNestedClass() {
SmallVector<CXXMethodDecl*, 4> WorkList;
std::swap(DelayedDllExportMemberFunctions, WorkList);
for (CXXMethodDecl *M : WorkList) {
- DefineImplicitSpecialMember(*this, M, M->getLocation());
+ DefineDefaultedFunction(*this, M, M->getLocation());
// Pass the method to the consumer to get emitted. This is not necessary
// for explicit instantiation definitions, as they will get emitted
@@ -13220,13 +13535,13 @@ buildMemcpyForAssignmentOp(Sema &S, SourceLocation Loc, QualType T,
// directly construct UnaryOperators here because semantic analysis
// does not permit us to take the address of an xvalue.
Expr *From = FromB.build(S, Loc);
- From = new (S.Context) UnaryOperator(From, UO_AddrOf,
- S.Context.getPointerType(From->getType()),
- VK_RValue, OK_Ordinary, Loc, false);
+ From = UnaryOperator::Create(
+ S.Context, From, UO_AddrOf, S.Context.getPointerType(From->getType()),
+ VK_RValue, OK_Ordinary, Loc, false, S.CurFPFeatureOverrides());
Expr *To = ToB.build(S, Loc);
- To = new (S.Context) UnaryOperator(To, UO_AddrOf,
- S.Context.getPointerType(To->getType()),
- VK_RValue, OK_Ordinary, Loc, false);
+ To = UnaryOperator::Create(
+ S.Context, To, UO_AddrOf, S.Context.getPointerType(To->getType()),
+ VK_RValue, OK_Ordinary, Loc, false, S.CurFPFeatureOverrides());
const Type *E = T->getBaseElementTypeUnsafe();
bool NeedsCollectableMemCpy =
@@ -13460,18 +13775,17 @@ buildSingleCopyAssignRecursively(Sema &S, SourceLocation Loc, QualType T,
// Create the comparison against the array bound.
llvm::APInt Upper
= ArrayTy->getSize().zextOrTrunc(S.Context.getTypeSize(SizeType));
- Expr *Comparison
- = new (S.Context) BinaryOperator(IterationVarRefRVal.build(S, Loc),
- IntegerLiteral::Create(S.Context, Upper, SizeType, Loc),
- BO_NE, S.Context.BoolTy,
- VK_RValue, OK_Ordinary, Loc, FPOptions());
+ Expr *Comparison = BinaryOperator::Create(
+ S.Context, IterationVarRefRVal.build(S, Loc),
+ IntegerLiteral::Create(S.Context, Upper, SizeType, Loc), BO_NE,
+ S.Context.BoolTy, VK_RValue, OK_Ordinary, Loc, S.CurFPFeatureOverrides());
// Create the pre-increment of the iteration variable. We can determine
// whether the increment will overflow based on the value of the array
// bound.
- Expr *Increment = new (S.Context)
- UnaryOperator(IterationVarRef.build(S, Loc), UO_PreInc, SizeType,
- VK_LValue, OK_Ordinary, Loc, Upper.isMaxValue());
+ Expr *Increment = UnaryOperator::Create(
+ S.Context, IterationVarRef.build(S, Loc), UO_PreInc, SizeType, VK_LValue,
+ OK_Ordinary, Loc, Upper.isMaxValue(), S.CurFPFeatureOverrides());
// Construct the loop that copies all elements of this array.
return S.ActOnForStmt(
@@ -13569,8 +13883,10 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
Scope *S = getScopeForContext(ClassDecl);
CheckImplicitSpecialMemberDeclaration(S, CopyAssignment);
- if (ShouldDeleteSpecialMember(CopyAssignment, CXXCopyAssignment))
+ if (ShouldDeleteSpecialMember(CopyAssignment, CXXCopyAssignment)) {
+ ClassDecl->setImplicitCopyAssignmentIsDeleted();
SetDeclDeleted(CopyAssignment, ClassLoc);
+ }
if (S)
PushOnScopeChains(CopyAssignment, S, false);
@@ -14682,13 +14998,18 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
MarkFunctionReferenced(ConstructLoc, Constructor);
if (getLangOpts().CUDA && !CheckCUDACall(ConstructLoc, Constructor))
return ExprError();
+ if (getLangOpts().SYCLIsDevice &&
+ !checkSYCLDeviceFunction(ConstructLoc, Constructor))
+ return ExprError();
- return CXXConstructExpr::Create(
- Context, DeclInitType, ConstructLoc, Constructor, Elidable,
- ExprArgs, HadMultipleCandidates, IsListInitialization,
- IsStdInitListInitialization, RequiresZeroInit,
- static_cast<CXXConstructExpr::ConstructionKind>(ConstructKind),
- ParenRange);
+ return CheckForImmediateInvocation(
+ CXXConstructExpr::Create(
+ Context, DeclInitType, ConstructLoc, Constructor, Elidable, ExprArgs,
+ HadMultipleCandidates, IsListInitialization,
+ IsStdInitListInitialization, RequiresZeroInit,
+ static_cast<CXXConstructExpr::ConstructionKind>(ConstructKind),
+ ParenRange),
+ Constructor);
}
ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
@@ -14766,6 +15087,10 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
if (VD->isInvalidDecl()) return;
+ // If initializing the variable failed, don't also diagnose problems with
+ // the desctructor, they're likely related.
+ if (VD->getInit() && VD->getInit()->containsErrors())
+ return;
CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Record->getDecl());
if (ClassDecl->isInvalidDecl()) return;
@@ -14792,10 +15117,13 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
// If the destructor is constexpr, check whether the variable has constant
// destruction now.
- if (Destructor->isConstexpr() && VD->getInit() &&
- !VD->getInit()->isValueDependent() && VD->evaluateValue()) {
+ if (Destructor->isConstexpr()) {
+ bool HasConstantInit = false;
+ if (VD->getInit() && !VD->getInit()->isValueDependent())
+ HasConstantInit = VD->evaluateValue();
SmallVector<PartialDiagnosticAt, 8> Notes;
- if (!VD->evaluateDestruction(Notes) && VD->isConstexpr()) {
+ if (!VD->evaluateDestruction(Notes) && VD->isConstexpr() &&
+ HasConstantInit) {
Diag(VD->getLocation(),
diag::err_constexpr_var_requires_const_destruction) << VD;
for (unsigned I = 0, N = Notes.size(); I != N; ++I)
@@ -14895,12 +15223,6 @@ CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
QualType ResultType =
FnDecl->getType()->castAs<FunctionType>()->getReturnType();
- // Check that the result type is not dependent.
- if (ResultType->isDependentType())
- return SemaRef.Diag(FnDecl->getLocation(),
- diag::err_operator_new_delete_dependent_result_type)
- << FnDecl->getDeclName() << ExpectedResultType;
-
// The operator is valid on any address space for OpenCL.
if (SemaRef.getLangOpts().OpenCLCPlusPlus) {
if (auto *PtrTy = ResultType->getAs<PointerType>()) {
@@ -14909,10 +15231,16 @@ CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
}
// Check that the result type is what we expect.
- if (SemaRef.Context.getCanonicalType(ResultType) != ExpectedResultType)
- return SemaRef.Diag(FnDecl->getLocation(),
- diag::err_operator_new_delete_invalid_result_type)
- << FnDecl->getDeclName() << ExpectedResultType;
+ if (SemaRef.Context.getCanonicalType(ResultType) != ExpectedResultType) {
+ // Reject even if the type is dependent; an operator delete function is
+ // required to have a non-dependent result type.
+ return SemaRef.Diag(
+ FnDecl->getLocation(),
+ ResultType->isDependentType()
+ ? diag::err_operator_new_delete_dependent_result_type
+ : diag::err_operator_new_delete_invalid_result_type)
+ << FnDecl->getDeclName() << ExpectedResultType;
+ }
// A function template must have at least 2 parameters.
if (FnDecl->getDescribedFunctionTemplate() && FnDecl->getNumParams() < 2)
@@ -14926,13 +15254,7 @@ CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
diag::err_operator_new_delete_too_few_parameters)
<< FnDecl->getDeclName();
- // Check the first parameter type is not dependent.
QualType FirstParamType = FnDecl->getParamDecl(0)->getType();
- if (FirstParamType->isDependentType())
- return SemaRef.Diag(FnDecl->getLocation(), DependentParamTypeDiag)
- << FnDecl->getDeclName() << ExpectedFirstParamType;
-
- // Check that the first parameter type is what we expect.
if (SemaRef.getLangOpts().OpenCLCPlusPlus) {
// The operator is valid on any address space for OpenCL.
if (auto *PtrTy =
@@ -14940,10 +15262,18 @@ CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
FirstParamType = RemoveAddressSpaceFromPtr(SemaRef, PtrTy);
}
}
+
+ // Check that the first parameter type is what we expect.
if (SemaRef.Context.getCanonicalType(FirstParamType).getUnqualifiedType() !=
- ExpectedFirstParamType)
- return SemaRef.Diag(FnDecl->getLocation(), InvalidParamTypeDiag)
- << FnDecl->getDeclName() << ExpectedFirstParamType;
+ ExpectedFirstParamType) {
+ // The first parameter type is not allowed to be dependent. As a tentative
+ // DR resolution, we allow a dependent parameter type if it is the right
+ // type anyway, to allow destroying operator delete in class templates.
+ return SemaRef.Diag(FnDecl->getLocation(), FirstParamType->isDependentType()
+ ? DependentParamTypeDiag
+ : InvalidParamTypeDiag)
+ << FnDecl->getDeclName() << ExpectedFirstParamType;
+ }
return false;
}
@@ -15482,6 +15812,11 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
!BaseType->isDependentType() && RequireCompleteType(Loc, BaseType, DK))
Invalid = true;
+ if (!Invalid && Mode != 1 && BaseType->isSizelessType()) {
+ Diag(Loc, diag::err_catch_sizeless) << (Mode == 2 ? 1 : 0) << BaseType;
+ Invalid = true;
+ }
+
if (!Invalid && !ExDeclType->isDependentType() &&
RequireNonAbstractType(Loc, ExDeclType,
diag::err_abstract_type_in_decl,
@@ -16344,9 +16679,16 @@ void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) {
Diag(Prev->getLocation().isInvalid() ? DelLoc : Prev->getLocation(),
Prev->isImplicit() ? diag::note_previous_implicit_declaration
: diag::note_previous_declaration);
+ // We can't recover from this; the declaration might have already
+ // been used.
+ Fn->setInvalidDecl();
+ return;
}
- // If the declaration wasn't the first, we delete the function anyway for
- // recovery.
+
+ // To maintain the invariant that functions are only deleted on their first
+ // declaration, mark the implicitly-instantiated declaration of the
+ // explicitly-specialized function as deleted instead of marking the
+ // instantiated redeclaration.
Fn = Fn->getCanonicalDecl();
}
@@ -16356,9 +16698,6 @@ void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) {
Fn->setInvalidDecl();
}
- if (Fn->isDeleted())
- return;
-
// C++11 [basic.start.main]p3:
// A program that defines main as deleted [...] is ill-formed.
if (Fn->isMain())
@@ -16368,25 +16707,6 @@ void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) {
// A deleted function is implicitly inline.
Fn->setImplicitlyInline();
Fn->setDeletedAsWritten();
-
- // See if we're deleting a function which is already known to override a
- // non-deleted virtual function.
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn)) {
- bool IssuedDiagnostic = false;
- for (const CXXMethodDecl *O : MD->overridden_methods()) {
- if (!(*MD->begin_overridden_methods())->isDeleted()) {
- if (!IssuedDiagnostic) {
- Diag(DelLoc, diag::err_deleted_override) << MD->getDeclName();
- IssuedDiagnostic = true;
- }
- Diag(O->getLocation(), diag::note_overridden_virtual_function);
- }
- }
- // If this function was implicitly deleted because it was defaulted,
- // explain why it was deleted.
- if (IssuedDiagnostic && MD->isDefaulted())
- DiagnoseDeletedDefaultedFunction(MD);
- }
}
void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
@@ -16403,7 +16723,7 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
}
Diag(DefaultLoc, diag::err_default_special_members)
- << getLangOpts().CPlusPlus2a;
+ << getLangOpts().CPlusPlus20;
return;
}
@@ -16417,7 +16737,7 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
(!isa<CXXConstructorDecl>(FD) &&
FD->getDeclName().getCXXOverloadedOperator() != OO_Equal))) {
Diag(DefaultLoc, diag::err_default_special_members)
- << getLangOpts().CPlusPlus2a;
+ << getLangOpts().CPlusPlus20;
return;
}
@@ -16432,7 +16752,7 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
// 'operator<=>' when parsing the '<=>' token.
if (DefKind.isComparison() &&
DefKind.asComparison() != DefaultedComparisonKind::ThreeWay) {
- Diag(DefaultLoc, getLangOpts().CPlusPlus2a
+ Diag(DefaultLoc, getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_defaulted_comparison
: diag::ext_defaulted_comparison);
}
@@ -16468,10 +16788,12 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
if (Primary->getCanonicalDecl()->isDefaulted())
return;
+ // FIXME: Once we support defining comparisons out of class, check for a
+ // defaulted comparison here.
if (CheckExplicitlyDefaultedSpecialMember(MD, DefKind.asSpecialMember()))
MD->setInvalidDecl();
else
- DefineImplicitSpecialMember(*this, MD, DefaultLoc);
+ DefineDefaultedFunction(*this, MD, DefaultLoc);
}
static void SearchForReturnInStmt(Sema &Self, Stmt *S) {
@@ -16783,7 +17105,7 @@ void Sema::MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
return;
// Do not mark as used if compiling for the device outside of the target
// region.
- if (LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
+ if (TUKind != TU_Prefix && LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
!isInOpenMPDeclareTargetContext() &&
!isInOpenMPTargetExecutionDirective()) {
if (!DefinitionRequired)
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp
index 5fdf6aeed5b4..d376880a40e8 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
@@ -937,8 +938,7 @@ static bool checkTypeParamListConsistency(Sema &S,
// Override the new type parameter's bound type with the previous type,
// so that it's consistent.
- newTypeParam->setTypeSourceInfo(
- S.Context.getTrivialTypeSourceInfo(prevTypeParam->getUnderlyingType()));
+ S.Context.adjustObjCTypeParamBoundType(prevTypeParam, newTypeParam);
continue;
}
@@ -965,8 +965,7 @@ static bool checkTypeParamListConsistency(Sema &S,
}
// Update the new type parameter's bound to match the previous one.
- newTypeParam->setTypeSourceInfo(
- S.Context.getTrivialTypeSourceInfo(prevTypeParam->getUnderlyingType()));
+ S.Context.adjustObjCTypeParamBoundType(prevTypeParam, newTypeParam);
}
return false;
@@ -1273,7 +1272,8 @@ Decl *Sema::ActOnStartProtocolInterface(
static bool NestedProtocolHasNoDefinition(ObjCProtocolDecl *PDecl,
ObjCProtocolDecl *&UndefinedProtocol) {
- if (!PDecl->hasDefinition() || PDecl->getDefinition()->isHidden()) {
+ if (!PDecl->hasDefinition() ||
+ !PDecl->getDefinition()->isUnconditionallyVisible()) {
UndefinedProtocol = PDecl;
return true;
}
@@ -2360,7 +2360,7 @@ static bool CheckMethodOverrideReturn(Sema &S,
: diag::warn_conflicting_ret_types;
// Mismatches between ObjC pointers go into a different warning
- // category, and sometimes they're even completely whitelisted.
+ // category, and sometimes they're even completely explicitly allowed.
if (const ObjCObjectPointerType *ImplPtrTy =
MethodImpl->getReturnType()->getAs<ObjCObjectPointerType>()) {
if (const ObjCObjectPointerType *IfacePtrTy =
@@ -2444,7 +2444,7 @@ static bool CheckMethodOverrideParam(Sema &S,
: diag::warn_conflicting_param_types;
// Mismatches between ObjC pointers go into a different warning
- // category, and sometimes they're even completely whitelisted.
+ // category, and sometimes they're even completely explicitly allowed..
if (const ObjCObjectPointerType *ImplPtrTy =
ImplTy->getAs<ObjCObjectPointerType>()) {
if (const ObjCObjectPointerType *IfacePtrTy =
@@ -3236,7 +3236,7 @@ bool Sema::MatchTwoMethodDeclarations(const ObjCMethodDecl *left,
return false;
// If either is hidden, it is not considered to match.
- if (left->isHidden() || right->isHidden())
+ if (!left->isUnconditionallyVisible() || !right->isUnconditionallyVisible())
return false;
if (left->isDirectMethod() != right->isDirectMethod())
@@ -3495,7 +3495,7 @@ bool Sema::CollectMultipleMethodsInGlobalPool(
ObjCMethodList &MethList = InstanceFirst ? Pos->second.first :
Pos->second.second;
for (ObjCMethodList *M = &MethList; M; M = M->getNext())
- if (M->getMethod() && !M->getMethod()->isHidden()) {
+ if (M->getMethod() && M->getMethod()->isUnconditionallyVisible()) {
if (FilterMethodsByTypeBound(M->getMethod(), TypeBound))
Methods.push_back(M->getMethod());
}
@@ -3511,7 +3511,7 @@ bool Sema::CollectMultipleMethodsInGlobalPool(
ObjCMethodList &MethList2 = InstanceFirst ? Pos->second.second :
Pos->second.first;
for (ObjCMethodList *M = &MethList2; M; M = M->getNext())
- if (M->getMethod() && !M->getMethod()->isHidden()) {
+ if (M->getMethod() && M->getMethod()->isUnconditionallyVisible()) {
if (FilterMethodsByTypeBound(M->getMethod(), TypeBound))
Methods.push_back(M->getMethod());
}
@@ -3558,7 +3558,7 @@ ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R,
ObjCMethodList &MethList = instance ? Pos->second.first : Pos->second.second;
SmallVector<ObjCMethodDecl *, 4> Methods;
for (ObjCMethodList *M = &MethList; M; M = M->getNext()) {
- if (M->getMethod() && !M->getMethod()->isHidden())
+ if (M->getMethod() && M->getMethod()->isUnconditionallyVisible())
return M->getMethod();
}
return nullptr;
@@ -4580,6 +4580,62 @@ static void checkObjCMethodX86VectorTypes(Sema &SemaRef,
<< (Triple.isMacOSX() ? "macOS 10.11" : "iOS 9");
}
+static void mergeObjCDirectMembers(Sema &S, Decl *CD, ObjCMethodDecl *Method) {
+ if (!Method->isDirectMethod() && !Method->hasAttr<UnavailableAttr>() &&
+ CD->hasAttr<ObjCDirectMembersAttr>()) {
+ Method->addAttr(
+ ObjCDirectAttr::CreateImplicit(S.Context, Method->getLocation()));
+ }
+}
+
+static void checkObjCDirectMethodClashes(Sema &S, ObjCInterfaceDecl *IDecl,
+ ObjCMethodDecl *Method,
+ ObjCImplDecl *ImpDecl = nullptr) {
+ auto Sel = Method->getSelector();
+ bool isInstance = Method->isInstanceMethod();
+ bool diagnosed = false;
+
+ auto diagClash = [&](const ObjCMethodDecl *IMD) {
+ if (diagnosed || IMD->isImplicit())
+ return;
+ if (Method->isDirectMethod() || IMD->isDirectMethod()) {
+ S.Diag(Method->getLocation(), diag::err_objc_direct_duplicate_decl)
+ << Method->isDirectMethod() << /* method */ 0 << IMD->isDirectMethod()
+ << Method->getDeclName();
+ S.Diag(IMD->getLocation(), diag::note_previous_declaration);
+ diagnosed = true;
+ }
+ };
+
+ // Look for any other declaration of this method anywhere we can see in this
+ // compilation unit.
+ //
+ // We do not use IDecl->lookupMethod() because we have specific needs:
+ //
+ // - we absolutely do not need to walk protocols, because
+ // diag::err_objc_direct_on_protocol has already been emitted
+ // during parsing if there's a conflict,
+ //
+ // - when we do not find a match in a given @interface container,
+ // we need to attempt looking it up in the @implementation block if the
+ // translation unit sees it to find more clashes.
+
+ if (auto *IMD = IDecl->getMethod(Sel, isInstance))
+ diagClash(IMD);
+ else if (auto *Impl = IDecl->getImplementation())
+ if (Impl != ImpDecl)
+ if (auto *IMD = IDecl->getImplementation()->getMethod(Sel, isInstance))
+ diagClash(IMD);
+
+ for (const auto *Cat : IDecl->visible_categories())
+ if (auto *IMD = Cat->getMethod(Sel, isInstance))
+ diagClash(IMD);
+ else if (auto CatImpl = Cat->getImplementation())
+ if (CatImpl != ImpDecl)
+ if (auto *IMD = Cat->getMethod(Sel, isInstance))
+ diagClash(IMD);
+}
+
Decl *Sema::ActOnMethodDeclaration(
Scope *S, SourceLocation MethodLoc, SourceLocation EndLoc,
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
@@ -4808,9 +4864,9 @@ Decl *Sema::ActOnMethodDeclaration(
Diag(ObjCMethod->getLocation(), diag::warn_dealloc_in_category)
<< ObjCMethod->getDeclName();
}
- } else if (ImpDecl->hasAttr<ObjCDirectMembersAttr>()) {
- ObjCMethod->addAttr(
- ObjCDirectAttr::CreateImplicit(Context, ObjCMethod->getLocation()));
+ } else {
+ mergeObjCDirectMembers(*this, ClassDecl, ObjCMethod);
+ checkObjCDirectMethodClashes(*this, IDecl, ObjCMethod, ImpDecl);
}
// Warn if a method declared in a protocol to which a category or
@@ -4831,39 +4887,16 @@ Decl *Sema::ActOnMethodDeclaration(
}
} else {
if (!isa<ObjCProtocolDecl>(ClassDecl)) {
- if (!ObjCMethod->isDirectMethod() &&
- ClassDecl->hasAttr<ObjCDirectMembersAttr>()) {
- ObjCMethod->addAttr(
- ObjCDirectAttr::CreateImplicit(Context, ObjCMethod->getLocation()));
- }
+ mergeObjCDirectMembers(*this, ClassDecl, ObjCMethod);
- // There can be a single declaration in any @interface container
- // for a given direct method, look for clashes as we add them.
- //
- // For valid code, we should always know the primary interface
- // declaration by now, however for invalid code we'll keep parsing
- // but we won't find the primary interface and IDecl will be nil.
ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(ClassDecl);
if (!IDecl)
IDecl = cast<ObjCCategoryDecl>(ClassDecl)->getClassInterface();
-
+ // For valid code, we should always know the primary interface
+ // declaration by now, however for invalid code we'll keep parsing
+ // but we won't find the primary interface and IDecl will be nil.
if (IDecl)
- if (auto *IMD = IDecl->lookupMethod(ObjCMethod->getSelector(),
- ObjCMethod->isInstanceMethod(),
- /*shallowCategoryLookup=*/false,
- /*followSuper=*/false)) {
- if (isa<ObjCProtocolDecl>(IMD->getDeclContext())) {
- // Do not emit a diagnostic for the Protocol case:
- // diag::err_objc_direct_on_protocol has already been emitted
- // during parsing for these with a nicer diagnostic.
- } else if (ObjCMethod->isDirectMethod() || IMD->isDirectMethod()) {
- Diag(ObjCMethod->getLocation(),
- diag::err_objc_direct_duplicate_decl)
- << ObjCMethod->isDirectMethod() << IMD->isDirectMethod()
- << ObjCMethod->getDeclName();
- Diag(IMD->getLocation(), diag::note_previous_declaration);
- }
- }
+ checkObjCDirectMethodClashes(*this, IDecl, ObjCMethod);
}
cast<DeclContext>(ClassDecl)->addDecl(ObjCMethod);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
index 193eaa3e01f9..d7695f9d7d7a 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -167,6 +167,14 @@ bool Sema::CheckSpecifiedExceptionType(QualType &T, SourceRange Range) {
RequireCompleteType(Range.getBegin(), PointeeT, DiagID, Kind, Range))
return ReturnValueOnError;
+ // The MSVC compatibility mode doesn't extend to sizeless types,
+ // so diagnose them separately.
+ if (PointeeT->isSizelessType() && Kind != 1) {
+ Diag(Range.getBegin(), diag::err_sizeless_in_exception_spec)
+ << (Kind == 2 ? 1 : 0) << PointeeT << Range;
+ return true;
+ }
+
return false;
}
@@ -991,10 +999,8 @@ static CanThrowResult canSubStmtsThrow(Sema &Self, const Stmt *S) {
return R;
}
-/// Determine whether the callee of a particular function call can throw.
-/// E and D are both optional, but at least one of E and Loc must be specified.
-static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
- SourceLocation Loc = SourceLocation()) {
+CanThrowResult Sema::canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
+ SourceLocation Loc) {
// As an extension, we assume that __attribute__((nothrow)) functions don't
// throw.
if (D && isa<FunctionDecl>(D) && D->hasAttr<NoThrowAttr>())
@@ -1040,7 +1046,8 @@ static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
if (!FT)
return CT_Can;
- FT = S.ResolveExceptionSpec(Loc.isInvalid() ? E->getBeginLoc() : Loc, FT);
+ if (Loc.isValid() || (Loc.isInvalid() && E))
+ FT = S.ResolveExceptionSpec(Loc.isInvalid() ? E->getBeginLoc() : Loc, FT);
if (!FT)
return CT_Can;
@@ -1061,7 +1068,7 @@ static CanThrowResult canVarDeclThrow(Sema &Self, const VarDecl *VD) {
VD->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
if (auto *Dtor = RD->getDestructor()) {
CT = mergeCanThrow(
- CT, canCalleeThrow(Self, nullptr, Dtor, VD->getLocation()));
+ CT, Sema::canCalleeThrow(Self, nullptr, Dtor, VD->getLocation()));
}
}
}
@@ -1281,6 +1288,7 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Expr::CompoundLiteralExprClass:
case Expr::CXXConstCastExprClass:
+ case Expr::CXXAddrspaceCastExprClass:
case Expr::CXXReinterpretCastExprClass:
case Expr::BuiltinBitCastExprClass:
// FIXME: Properly determine whether a variably-modified type can throw.
@@ -1290,7 +1298,10 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
// Some might be dependent for other reasons.
case Expr::ArraySubscriptExprClass:
+ case Expr::MatrixSubscriptExprClass:
case Expr::OMPArraySectionExprClass:
+ case Expr::OMPArrayShapingExprClass:
+ case Expr::OMPIteratorExprClass:
case Expr::BinaryOperatorClass:
case Expr::DependentCoawaitExprClass:
case Expr::CompoundAssignOperatorClass:
@@ -1332,6 +1343,7 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Expr::CXXUnresolvedConstructExprClass:
case Expr::DependentScopeDeclRefExprClass:
case Expr::CXXFoldExprClass:
+ case Expr::RecoveryExprClass:
return CT_Dependent;
case Expr::AsTypeExprClass:
@@ -1430,6 +1442,8 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Stmt::OMPDistributeParallelForSimdDirectiveClass:
case Stmt::OMPDistributeSimdDirectiveClass:
case Stmt::OMPFlushDirectiveClass:
+ case Stmt::OMPDepobjDirectiveClass:
+ case Stmt::OMPScanDirectiveClass:
case Stmt::OMPForDirectiveClass:
case Stmt::OMPForSimdDirectiveClass:
case Stmt::OMPMasterDirectiveClass:
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
index 04a7038d6754..ccae79636f32 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "TreeTransform.h"
+#include "UsedDeclVisitor.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
@@ -46,8 +47,10 @@
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
using namespace sema;
+using llvm::RoundingMode;
/// Determine whether the use of this declaration is valid, without
/// emitting diagnostics.
@@ -290,6 +293,9 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
if (getLangOpts().CUDA && !CheckCUDACall(Loc, FD))
return true;
+
+ if (getLangOpts().SYCLIsDevice && !checkSYCLDeviceFunction(Loc, FD))
+ return true;
}
if (auto *MD = dyn_cast<CXXMethodDecl>(D)) {
@@ -349,6 +355,16 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
diagnoseUseOfInternalDeclInInlineFunction(*this, D, Loc);
+ if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice)) {
+ if (const auto *VD = dyn_cast<ValueDecl>(D))
+ checkDeviceDecl(VD, Loc);
+
+ if (!Context.getTargetInfo().isTLSSupported())
+ if (const auto *VD = dyn_cast<VarDecl>(D))
+ if (VD->getTLSKind() != VarDecl::TLS_None)
+ targetDiag(*Locs.begin(), diag::err_thread_unsupported);
+ }
+
if (isa<ParmVarDecl>(D) && isa<RequiresExprBodyDecl>(D->getDeclContext()) &&
!isUnevaluatedContext()) {
// C++ [expr.prim.req.nested] p3
@@ -603,6 +619,10 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
QualType T = E->getType();
assert(!T.isNull() && "r-value conversion on typeless expression?");
+ // lvalue-to-rvalue conversion cannot be applied to function or array types.
+ if (T->isFunctionType() || T->isArrayType())
+ return E;
+
// We don't want to throw lvalue-to-rvalue casts on top of
// expressions of certain types in C++.
if (getLangOpts().CPlusPlus &&
@@ -671,6 +691,9 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
if (E->getType().getObjCLifetime() == Qualifiers::OCL_Weak)
Cleanup.setExprNeedsCleanups(true);
+ if (E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
+ Cleanup.setExprNeedsCleanups(true);
+
// C++ [conv.lval]p3:
// If T is cv std::nullptr_t, the result is a null pointer constant.
CastKind CK = T->isNullPtrType() ? CK_NullToPointer : CK_LValueToRValue;
@@ -707,7 +730,7 @@ ExprResult Sema::CallExprUnaryConversions(Expr *E) {
// to function type.
if (Ty->isFunctionType()) {
Res = ImpCastExprToType(E, Context.getPointerType(Ty),
- CK_FunctionToPointerDecay).get();
+ CK_FunctionToPointerDecay);
if (Res.isInvalid())
return ExprError();
}
@@ -941,6 +964,11 @@ ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
ExprResult ExprRes = DefaultArgumentPromotion(E);
if (ExprRes.isInvalid())
return ExprError();
+
+ // Copy blocks to the heap.
+ if (ExprRes.get()->getType()->isBlockPointerType())
+ maybeExtendBlockObject(ExprRes);
+
E = ExprRes.get();
// Diagnostics regarding non-POD argument types are
@@ -1385,8 +1413,8 @@ static void checkEnumArithmeticConversions(Sema &S, Expr *LHS, Expr *RHS,
bool IsCompAssign = ACK == Sema::ACK_CompAssign;
if ((!IsCompAssign && LEnum && R->isFloatingType()) ||
(REnum && L->isFloatingType())) {
- S.Diag(Loc, S.getLangOpts().CPlusPlus2a
- ? diag::warn_arith_conv_enum_float_cxx2a
+ S.Diag(Loc, S.getLangOpts().CPlusPlus20
+ ? diag::warn_arith_conv_enum_float_cxx20
: diag::warn_arith_conv_enum_float)
<< LHS->getSourceRange() << RHS->getSourceRange()
<< (int)ACK << LEnum << L << R;
@@ -1398,24 +1426,24 @@ static void checkEnumArithmeticConversions(Sema &S, Expr *LHS, Expr *RHS,
// If either enumeration type is unnamed, it's less likely that the
// user cares about this, but this situation is still deprecated in
// C++2a. Use a different warning group.
- DiagID = S.getLangOpts().CPlusPlus2a
- ? diag::warn_arith_conv_mixed_anon_enum_types_cxx2a
+ DiagID = S.getLangOpts().CPlusPlus20
+ ? diag::warn_arith_conv_mixed_anon_enum_types_cxx20
: diag::warn_arith_conv_mixed_anon_enum_types;
} else if (ACK == Sema::ACK_Conditional) {
// Conditional expressions are separated out because they have
// historically had a different warning flag.
- DiagID = S.getLangOpts().CPlusPlus2a
- ? diag::warn_conditional_mixed_enum_types_cxx2a
+ DiagID = S.getLangOpts().CPlusPlus20
+ ? diag::warn_conditional_mixed_enum_types_cxx20
: diag::warn_conditional_mixed_enum_types;
} else if (ACK == Sema::ACK_Comparison) {
// Comparison expressions are separated out because they have
// historically had a different warning flag.
- DiagID = S.getLangOpts().CPlusPlus2a
- ? diag::warn_comparison_mixed_enum_types_cxx2a
+ DiagID = S.getLangOpts().CPlusPlus20
+ ? diag::warn_comparison_mixed_enum_types_cxx20
: diag::warn_comparison_mixed_enum_types;
} else {
- DiagID = S.getLangOpts().CPlusPlus2a
- ? diag::warn_arith_conv_mixed_enum_types_cxx2a
+ DiagID = S.getLangOpts().CPlusPlus20
+ ? diag::warn_arith_conv_mixed_enum_types_cxx20
: diag::warn_arith_conv_mixed_enum_types;
}
S.Diag(Loc, DiagID) << LHS->getSourceRange() << RHS->getSourceRange()
@@ -1476,6 +1504,11 @@ QualType Sema::UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
if (LHSType == RHSType)
return LHSType;
+ // ExtInt types aren't subject to conversions between them or normal integers,
+ // so this fails.
+ if(LHSType->isExtIntType() || RHSType->isExtIntType())
+ return QualType();
+
// At this point, we have two different arithmetic types.
// Diagnose attempts to convert between __float128 and long double where
@@ -1760,15 +1793,15 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
// Warn on initializing an array of char from a u8 string literal; this
// becomes ill-formed in C++2a.
- if (getLangOpts().CPlusPlus && !getLangOpts().CPlusPlus2a &&
+ if (getLangOpts().CPlusPlus && !getLangOpts().CPlusPlus20 &&
!getLangOpts().Char8 && Kind == StringLiteral::UTF8) {
- Diag(StringTokLocs.front(), diag::warn_cxx2a_compat_utf8_string);
+ Diag(StringTokLocs.front(), diag::warn_cxx20_compat_utf8_string);
// Create removals for all 'u8' prefixes in the string literal(s). This
// ensures C++2a compatibility (but may change the program behavior when
// built by non-Clang compilers for which the execution character set is
// not always UTF-8).
- auto RemovalDiag = PDiag(diag::note_cxx2a_compat_utf8_string_remove_u8);
+ auto RemovalDiag = PDiag(diag::note_cxx20_compat_utf8_string_remove_u8);
SourceLocation RemovalDiagLoc;
for (const Token &Tok : StringToks) {
if (Tok.getKind() == tok::utf8_string_literal) {
@@ -1914,7 +1947,7 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
bool RefersToCapturedVariable =
isa<VarDecl>(D) &&
NeedToCaptureVariable(cast<VarDecl>(D), NameInfo.getLoc());
-
+
DeclRefExpr *E = DeclRefExpr::Create(
Context, NNS, TemplateKWLoc, D, RefersToCapturedVariable, NameInfo, Ty,
VK, FoundD, TemplateArgs, getNonOdrUseReasonInCurrentContext(D));
@@ -3123,6 +3156,11 @@ ExprResult Sema::BuildDeclarationNameExpr(
return ExprError();
ExprValueKind valueKind = VK_RValue;
+ // In 'T ...V;', the type of the declaration 'V' is 'T...', but the type of
+ // a reference to 'V' is simply (unexpanded) 'T'. The type, like the value,
+ // is expanded by some outer '...' in the context of the use.
+ type = type.getNonPackExpansionType();
+
switch (D->getKind()) {
// Ignore all the non-ValueDecl kinds.
#define ABSTRACT_DECL(kind)
@@ -3268,6 +3306,9 @@ ExprResult Sema::BuildDeclarationNameExpr(
llvm_unreachable("building reference to deduction guide");
case Decl::MSProperty:
+ case Decl::MSGuid:
+ // FIXME: Should MSGuidDecl be subject to capture in OpenMP,
+ // or duplicated between host and device?
valueKind = VK_LValue;
break;
@@ -3368,6 +3409,70 @@ ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
return PredefinedExpr::Create(Context, Loc, ResTy, IK, SL);
}
+static std::pair<QualType, StringLiteral *>
+GetUniqueStableNameInfo(ASTContext &Context, QualType OpType,
+ SourceLocation OpLoc, PredefinedExpr::IdentKind K) {
+ std::pair<QualType, StringLiteral*> Result{{}, nullptr};
+
+ if (OpType->isDependentType()) {
+ Result.first = Context.DependentTy;
+ return Result;
+ }
+
+ std::string Str = PredefinedExpr::ComputeName(Context, K, OpType);
+ llvm::APInt Length(32, Str.length() + 1);
+ Result.first =
+ Context.adjustStringLiteralBaseType(Context.CharTy.withConst());
+ Result.first = Context.getConstantArrayType(
+ Result.first, Length, nullptr, ArrayType::Normal, /*IndexTypeQuals*/ 0);
+ Result.second = StringLiteral::Create(Context, Str, StringLiteral::Ascii,
+ /*Pascal*/ false, Result.first, OpLoc);
+ return Result;
+}
+
+ExprResult Sema::BuildUniqueStableName(SourceLocation OpLoc,
+ TypeSourceInfo *Operand) {
+ QualType ResultTy;
+ StringLiteral *SL;
+ std::tie(ResultTy, SL) = GetUniqueStableNameInfo(
+ Context, Operand->getType(), OpLoc, PredefinedExpr::UniqueStableNameType);
+
+ return PredefinedExpr::Create(Context, OpLoc, ResultTy,
+ PredefinedExpr::UniqueStableNameType, SL,
+ Operand);
+}
+
+ExprResult Sema::BuildUniqueStableName(SourceLocation OpLoc,
+ Expr *E) {
+ QualType ResultTy;
+ StringLiteral *SL;
+ std::tie(ResultTy, SL) = GetUniqueStableNameInfo(
+ Context, E->getType(), OpLoc, PredefinedExpr::UniqueStableNameExpr);
+
+ return PredefinedExpr::Create(Context, OpLoc, ResultTy,
+ PredefinedExpr::UniqueStableNameExpr, SL, E);
+}
+
+ExprResult Sema::ActOnUniqueStableNameExpr(SourceLocation OpLoc,
+ SourceLocation L, SourceLocation R,
+ ParsedType Ty) {
+ TypeSourceInfo *TInfo = nullptr;
+ QualType T = GetTypeFromParser(Ty, &TInfo);
+
+ if (T.isNull())
+ return ExprError();
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc);
+
+ return BuildUniqueStableName(OpLoc, TInfo);
+}
+
+ExprResult Sema::ActOnUniqueStableNameExpr(SourceLocation OpLoc,
+ SourceLocation L, SourceLocation R,
+ Expr *E) {
+ return BuildUniqueStableName(OpLoc, E);
+}
+
ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) {
PredefinedExpr::IdentKind IK;
@@ -3529,7 +3634,9 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
if (Invalid)
return ExprError();
- NumericLiteralParser Literal(TokSpelling, Tok.getLocation(), PP);
+ NumericLiteralParser Literal(TokSpelling, Tok.getLocation(),
+ PP.getSourceManager(), PP.getLangOpts(),
+ PP.getTargetInfo(), PP.getDiagnostics());
if (Literal.hadError)
return ExprError();
@@ -3882,7 +3989,7 @@ static bool CheckExtensionTraitOperandType(Sema &S, QualType T,
TraitKind == UETT_PreferredAlignOf)) {
// sizeof(function)/alignof(function) is allowed as an extension.
S.Diag(Loc, diag::ext_sizeof_alignof_function_type)
- << TraitKind << ArgRange;
+ << getTraitSpelling(TraitKind) << ArgRange;
return false;
}
@@ -3891,7 +3998,7 @@ static bool CheckExtensionTraitOperandType(Sema &S, QualType T,
if (T->isVoidType()) {
unsigned DiagID = S.LangOpts.OpenCL ? diag::err_opencl_sizeof_alignof_type
: diag::ext_sizeof_alignof_void_type;
- S.Diag(Loc, DiagID) << TraitKind << ArgRange;
+ S.Diag(Loc, DiagID) << getTraitSpelling(TraitKind) << ArgRange;
return false;
}
@@ -3958,7 +4065,7 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
return CheckVecStepTraitOperandType(*this, ExprTy, E->getExprLoc(),
E->getSourceRange());
- // Whitelist some types as extensions
+ // Explicitly list some types as extensions.
if (!CheckExtensionTraitOperandType(*this, ExprTy, E->getExprLoc(),
E->getSourceRange(), ExprKind))
return false;
@@ -3968,14 +4075,15 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
// be complete (and will attempt to complete it if it's an array of unknown
// bound).
if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf) {
- if (RequireCompleteType(E->getExprLoc(),
- Context.getBaseElementType(E->getType()),
- diag::err_sizeof_alignof_incomplete_type, ExprKind,
- E->getSourceRange()))
+ if (RequireCompleteSizedType(
+ E->getExprLoc(), Context.getBaseElementType(E->getType()),
+ diag::err_sizeof_alignof_incomplete_or_sizeless_type,
+ getTraitSpelling(ExprKind), E->getSourceRange()))
return true;
} else {
- if (RequireCompleteExprType(E, diag::err_sizeof_alignof_incomplete_type,
- ExprKind, E->getSourceRange()))
+ if (RequireCompleteSizedExprType(
+ E, diag::err_sizeof_alignof_incomplete_or_sizeless_type,
+ getTraitSpelling(ExprKind), E->getSourceRange()))
return true;
}
@@ -3985,7 +4093,7 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
if (ExprTy->isFunctionType()) {
Diag(E->getExprLoc(), diag::err_sizeof_alignof_function_type)
- << ExprKind << E->getSourceRange();
+ << getTraitSpelling(ExprKind) << E->getSourceRange();
return true;
}
@@ -4067,19 +4175,19 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType,
if (ExprKind == UETT_VecStep)
return CheckVecStepTraitOperandType(*this, ExprType, OpLoc, ExprRange);
- // Whitelist some types as extensions
+ // Explicitly list some types as extensions.
if (!CheckExtensionTraitOperandType(*this, ExprType, OpLoc, ExprRange,
ExprKind))
return false;
- if (RequireCompleteType(OpLoc, ExprType,
- diag::err_sizeof_alignof_incomplete_type,
- ExprKind, ExprRange))
+ if (RequireCompleteSizedType(
+ OpLoc, ExprType, diag::err_sizeof_alignof_incomplete_or_sizeless_type,
+ getTraitSpelling(ExprKind), ExprRange))
return true;
if (ExprType->isFunctionType()) {
Diag(OpLoc, diag::err_sizeof_alignof_function_type)
- << ExprKind << ExprRange;
+ << getTraitSpelling(ExprKind) << ExprRange;
return true;
}
@@ -4178,6 +4286,7 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::Complex:
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
case Type::Record:
case Type::Enum:
case Type::Elaborated:
@@ -4187,6 +4296,7 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::ObjCObjectPointer:
case Type::ObjCTypeParam:
case Type::Pipe:
+ case Type::ExtInt:
llvm_unreachable("type class is never variably-modified!");
case Type::Adjusted:
T = cast<AdjustedType>(Ty)->getOriginalType();
@@ -4452,7 +4562,8 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
if (base && !base->getType().isNull() &&
base->getType()->isSpecificPlaceholderType(BuiltinType::OMPArraySection))
return ActOnOMPArraySectionExpr(base, lbLoc, idx, SourceLocation(),
- /*Length=*/nullptr, rbLoc);
+ SourceLocation(), /*Length*/ nullptr,
+ /*Stride=*/nullptr, rbLoc);
// Since this might be a postfix expression, get rid of ParenListExprs.
if (isa<ParenListExpr>(base)) {
@@ -4461,8 +4572,55 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
base = result.get();
}
+ // Check if base and idx form a MatrixSubscriptExpr.
+ //
+ // Helper to check for comma expressions, which are not allowed as indices for
+ // matrix subscript expressions.
+ auto CheckAndReportCommaError = [this, base, rbLoc](Expr *E) {
+ if (isa<BinaryOperator>(E) && cast<BinaryOperator>(E)->isCommaOp()) {
+ Diag(E->getExprLoc(), diag::err_matrix_subscript_comma)
+ << SourceRange(base->getBeginLoc(), rbLoc);
+ return true;
+ }
+ return false;
+ };
+ // The matrix subscript operator ([][])is considered a single operator.
+ // Separating the index expressions by parenthesis is not allowed.
+ if (base->getType()->isSpecificPlaceholderType(
+ BuiltinType::IncompleteMatrixIdx) &&
+ !isa<MatrixSubscriptExpr>(base)) {
+ Diag(base->getExprLoc(), diag::err_matrix_separate_incomplete_index)
+ << SourceRange(base->getBeginLoc(), rbLoc);
+ return ExprError();
+ }
+ // If the base is either a MatrixSubscriptExpr or a matrix type, try to create
+ // a new MatrixSubscriptExpr.
+ auto *matSubscriptE = dyn_cast<MatrixSubscriptExpr>(base);
+ if (matSubscriptE) {
+ if (CheckAndReportCommaError(idx))
+ return ExprError();
+
+ assert(matSubscriptE->isIncomplete() &&
+ "base has to be an incomplete matrix subscript");
+ return CreateBuiltinMatrixSubscriptExpr(
+ matSubscriptE->getBase(), matSubscriptE->getRowIdx(), idx, rbLoc);
+ }
+ Expr *matrixBase = base;
+ bool IsMSPropertySubscript = isMSPropertySubscriptExpr(*this, base);
+ if (!IsMSPropertySubscript) {
+ ExprResult result = CheckPlaceholderExpr(base);
+ if (!result.isInvalid())
+ matrixBase = result.get();
+ }
+ if (matrixBase->getType()->isMatrixType()) {
+ if (CheckAndReportCommaError(idx))
+ return ExprError();
+
+ return CreateBuiltinMatrixSubscriptExpr(matrixBase, idx, nullptr, rbLoc);
+ }
+
// A comma-expression as the index is deprecated in C++2a onwards.
- if (getLangOpts().CPlusPlus2a &&
+ if (getLangOpts().CPlusPlus20 &&
((isa<BinaryOperator>(idx) && cast<BinaryOperator>(idx)->isCommaOp()) ||
(isa<CXXOperatorCallExpr>(idx) &&
cast<CXXOperatorCallExpr>(idx)->getOperator() == OO_Comma))) {
@@ -4475,7 +4633,6 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
// operand might be an overloadable type, in which case the overload
// resolution for the operator overload should get the first crack
// at the overload.
- bool IsMSPropertySubscript = false;
if (base->getType()->isNonOverloadPlaceholderType()) {
IsMSPropertySubscript = isMSPropertySubscriptExpr(*this, base);
if (!IsMSPropertySubscript) {
@@ -4536,6 +4693,79 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
return Res;
}
+ExprResult Sema::tryConvertExprToType(Expr *E, QualType Ty) {
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(Ty);
+ InitializationKind Kind =
+ InitializationKind::CreateCopy(E->getBeginLoc(), SourceLocation());
+ InitializationSequence InitSeq(*this, Entity, Kind, E);
+ return InitSeq.Perform(*this, Entity, Kind, E);
+}
+
+ExprResult Sema::CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
+ Expr *ColumnIdx,
+ SourceLocation RBLoc) {
+ ExprResult BaseR = CheckPlaceholderExpr(Base);
+ if (BaseR.isInvalid())
+ return BaseR;
+ Base = BaseR.get();
+
+ ExprResult RowR = CheckPlaceholderExpr(RowIdx);
+ if (RowR.isInvalid())
+ return RowR;
+ RowIdx = RowR.get();
+
+ if (!ColumnIdx)
+ return new (Context) MatrixSubscriptExpr(
+ Base, RowIdx, ColumnIdx, Context.IncompleteMatrixIdxTy, RBLoc);
+
+ // Build an unanalyzed expression if any of the operands is type-dependent.
+ if (Base->isTypeDependent() || RowIdx->isTypeDependent() ||
+ ColumnIdx->isTypeDependent())
+ return new (Context) MatrixSubscriptExpr(Base, RowIdx, ColumnIdx,
+ Context.DependentTy, RBLoc);
+
+ ExprResult ColumnR = CheckPlaceholderExpr(ColumnIdx);
+ if (ColumnR.isInvalid())
+ return ColumnR;
+ ColumnIdx = ColumnR.get();
+
+ // Check that IndexExpr is an integer expression. If it is a constant
+ // expression, check that it is less than Dim (= the number of elements in the
+ // corresponding dimension).
+ auto IsIndexValid = [&](Expr *IndexExpr, unsigned Dim,
+ bool IsColumnIdx) -> Expr * {
+ if (!IndexExpr->getType()->isIntegerType() &&
+ !IndexExpr->isTypeDependent()) {
+ Diag(IndexExpr->getBeginLoc(), diag::err_matrix_index_not_integer)
+ << IsColumnIdx;
+ return nullptr;
+ }
+
+ llvm::APSInt Idx;
+ if (IndexExpr->isIntegerConstantExpr(Idx, Context) &&
+ (Idx < 0 || Idx >= Dim)) {
+ Diag(IndexExpr->getBeginLoc(), diag::err_matrix_index_outside_range)
+ << IsColumnIdx << Dim;
+ return nullptr;
+ }
+
+ ExprResult ConvExpr =
+ tryConvertExprToType(IndexExpr, Context.getSizeType());
+ assert(!ConvExpr.isInvalid() &&
+ "should be able to convert any integer type to size type");
+ return ConvExpr.get();
+ };
+
+ auto *MTy = Base->getType()->getAs<ConstantMatrixType>();
+ RowIdx = IsIndexValid(RowIdx, MTy->getNumRows(), false);
+ ColumnIdx = IsIndexValid(ColumnIdx, MTy->getNumColumns(), true);
+ if (!RowIdx || !ColumnIdx)
+ return ExprError();
+
+ return new (Context) MatrixSubscriptExpr(Base, RowIdx, ColumnIdx,
+ MTy->getElementType(), RBLoc);
+}
+
void Sema::CheckAddressOfNoDeref(const Expr *E) {
ExpressionEvaluationContextRecord &LastRecord = ExprEvalContexts.back();
const Expr *StrippedExpr = E->IgnoreParenImpCasts();
@@ -4583,7 +4813,9 @@ void Sema::CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E) {
ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
- SourceLocation ColonLoc, Expr *Length,
+ SourceLocation ColonLocFirst,
+ SourceLocation ColonLocSecond,
+ Expr *Length, Expr *Stride,
SourceLocation RBLoc) {
if (Base->getType()->isPlaceholderType() &&
!Base->getType()->isSpecificPlaceholderType(
@@ -4611,15 +4843,25 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
return ExprError();
Length = Result.get();
}
+ if (Stride && Stride->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult Result = CheckPlaceholderExpr(Stride);
+ if (Result.isInvalid())
+ return ExprError();
+ Result = DefaultLvalueConversion(Result.get());
+ if (Result.isInvalid())
+ return ExprError();
+ Stride = Result.get();
+ }
// Build an unanalyzed expression if either operand is type-dependent.
if (Base->isTypeDependent() ||
(LowerBound &&
(LowerBound->isTypeDependent() || LowerBound->isValueDependent())) ||
- (Length && (Length->isTypeDependent() || Length->isValueDependent()))) {
- return new (Context)
- OMPArraySectionExpr(Base, LowerBound, Length, Context.DependentTy,
- VK_LValue, OK_Ordinary, ColonLoc, RBLoc);
+ (Length && (Length->isTypeDependent() || Length->isValueDependent())) ||
+ (Stride && (Stride->isTypeDependent() || Stride->isValueDependent()))) {
+ return new (Context) OMPArraySectionExpr(
+ Base, LowerBound, Length, Stride, Context.DependentTy, VK_LValue,
+ OK_Ordinary, ColonLocFirst, ColonLocSecond, RBLoc);
}
// Perform default conversions.
@@ -4663,6 +4905,20 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Diag(Length->getExprLoc(), diag::warn_omp_section_is_char)
<< 1 << Length->getSourceRange();
}
+ if (Stride) {
+ ExprResult Res =
+ PerformOpenMPImplicitIntegerConversion(Stride->getExprLoc(), Stride);
+ if (Res.isInvalid())
+ return ExprError(Diag(Stride->getExprLoc(),
+ diag::err_omp_typecheck_section_not_integer)
+ << 1 << Stride->getSourceRange());
+ Stride = Res.get();
+
+ if (Stride->getType()->isSpecificBuiltinType(BuiltinType::Char_S) ||
+ Stride->getType()->isSpecificBuiltinType(BuiltinType::Char_U))
+ Diag(Stride->getExprLoc(), diag::warn_omp_section_is_char)
+ << 1 << Stride->getSourceRange();
+ }
// C99 6.5.2.1p1: "shall have type "pointer to *object* type". Similarly,
// C++ [expr.sub]p1: The type "T" shall be a completely-defined object
@@ -4681,7 +4937,7 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
if (LowerBound && !OriginalTy->isAnyPointerType()) {
Expr::EvalResult Result;
if (LowerBound->EvaluateAsInt(Result, Context)) {
- // OpenMP 4.5, [2.4 Array Sections]
+ // OpenMP 5.0, [2.1.5 Array Sections]
// The array section must be a subset of the original array.
llvm::APSInt LowerBoundValue = Result.Val.getInt();
if (LowerBoundValue.isNegative()) {
@@ -4695,7 +4951,7 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
if (Length) {
Expr::EvalResult Result;
if (Length->EvaluateAsInt(Result, Context)) {
- // OpenMP 4.5, [2.4 Array Sections]
+ // OpenMP 5.0, [2.1.5 Array Sections]
// The length must evaluate to non-negative integers.
llvm::APSInt LengthValue = Result.Val.getInt();
if (LengthValue.isNegative()) {
@@ -4705,17 +4961,32 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
return ExprError();
}
}
- } else if (ColonLoc.isValid() &&
+ } else if (ColonLocFirst.isValid() &&
(OriginalTy.isNull() || (!OriginalTy->isConstantArrayType() &&
!OriginalTy->isVariableArrayType()))) {
- // OpenMP 4.5, [2.4 Array Sections]
+ // OpenMP 5.0, [2.1.5 Array Sections]
// When the size of the array dimension is not known, the length must be
// specified explicitly.
- Diag(ColonLoc, diag::err_omp_section_length_undefined)
+ Diag(ColonLocFirst, diag::err_omp_section_length_undefined)
<< (!OriginalTy.isNull() && OriginalTy->isArrayType());
return ExprError();
}
+ if (Stride) {
+ Expr::EvalResult Result;
+ if (Stride->EvaluateAsInt(Result, Context)) {
+ // OpenMP 5.0, [2.1.5 Array Sections]
+ // The stride must evaluate to a positive integer.
+ llvm::APSInt StrideValue = Result.Val.getInt();
+ if (!StrideValue.isStrictlyPositive()) {
+ Diag(Stride->getExprLoc(), diag::err_omp_section_stride_non_positive)
+ << StrideValue.toString(/*Radix=*/10, /*Signed=*/true)
+ << Stride->getSourceRange();
+ return ExprError();
+ }
+ }
+ }
+
if (!Base->getType()->isSpecificPlaceholderType(
BuiltinType::OMPArraySection)) {
ExprResult Result = DefaultFunctionArrayLvalueConversion(Base);
@@ -4723,9 +4994,371 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
return ExprError();
Base = Result.get();
}
- return new (Context)
- OMPArraySectionExpr(Base, LowerBound, Length, Context.OMPArraySectionTy,
- VK_LValue, OK_Ordinary, ColonLoc, RBLoc);
+ return new (Context) OMPArraySectionExpr(
+ Base, LowerBound, Length, Stride, Context.OMPArraySectionTy, VK_LValue,
+ OK_Ordinary, ColonLocFirst, ColonLocSecond, RBLoc);
+}
+
+ExprResult Sema::ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
+ SourceLocation RParenLoc,
+ ArrayRef<Expr *> Dims,
+ ArrayRef<SourceRange> Brackets) {
+ if (Base->getType()->isPlaceholderType()) {
+ ExprResult Result = CheckPlaceholderExpr(Base);
+ if (Result.isInvalid())
+ return ExprError();
+ Result = DefaultLvalueConversion(Result.get());
+ if (Result.isInvalid())
+ return ExprError();
+ Base = Result.get();
+ }
+ QualType BaseTy = Base->getType();
+ // Delay analysis of the types/expressions if instantiation/specialization is
+ // required.
+ if (!BaseTy->isPointerType() && Base->isTypeDependent())
+ return OMPArrayShapingExpr::Create(Context, Context.DependentTy, Base,
+ LParenLoc, RParenLoc, Dims, Brackets);
+ if (!BaseTy->isPointerType() ||
+ (!Base->isTypeDependent() &&
+ BaseTy->getPointeeType()->isIncompleteType()))
+ return ExprError(Diag(Base->getExprLoc(),
+ diag::err_omp_non_pointer_type_array_shaping_base)
+ << Base->getSourceRange());
+
+ SmallVector<Expr *, 4> NewDims;
+ bool ErrorFound = false;
+ for (Expr *Dim : Dims) {
+ if (Dim->getType()->isPlaceholderType()) {
+ ExprResult Result = CheckPlaceholderExpr(Dim);
+ if (Result.isInvalid()) {
+ ErrorFound = true;
+ continue;
+ }
+ Result = DefaultLvalueConversion(Result.get());
+ if (Result.isInvalid()) {
+ ErrorFound = true;
+ continue;
+ }
+ Dim = Result.get();
+ }
+ if (!Dim->isTypeDependent()) {
+ ExprResult Result =
+ PerformOpenMPImplicitIntegerConversion(Dim->getExprLoc(), Dim);
+ if (Result.isInvalid()) {
+ ErrorFound = true;
+ Diag(Dim->getExprLoc(), diag::err_omp_typecheck_shaping_not_integer)
+ << Dim->getSourceRange();
+ continue;
+ }
+ Dim = Result.get();
+ Expr::EvalResult EvResult;
+ if (!Dim->isValueDependent() && Dim->EvaluateAsInt(EvResult, Context)) {
+ // OpenMP 5.0, [2.1.4 Array Shaping]
+ // Each si is an integral type expression that must evaluate to a
+ // positive integer.
+ llvm::APSInt Value = EvResult.Val.getInt();
+ if (!Value.isStrictlyPositive()) {
+ Diag(Dim->getExprLoc(), diag::err_omp_shaping_dimension_not_positive)
+ << Value.toString(/*Radix=*/10, /*Signed=*/true)
+ << Dim->getSourceRange();
+ ErrorFound = true;
+ continue;
+ }
+ }
+ }
+ NewDims.push_back(Dim);
+ }
+ if (ErrorFound)
+ return ExprError();
+ return OMPArrayShapingExpr::Create(Context, Context.OMPArrayShapingTy, Base,
+ LParenLoc, RParenLoc, NewDims, Brackets);
+}
+
+ExprResult Sema::ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
+ SourceLocation LLoc, SourceLocation RLoc,
+ ArrayRef<OMPIteratorData> Data) {
+ SmallVector<OMPIteratorExpr::IteratorDefinition, 4> ID;
+ bool IsCorrect = true;
+ for (const OMPIteratorData &D : Data) {
+ TypeSourceInfo *TInfo = nullptr;
+ SourceLocation StartLoc;
+ QualType DeclTy;
+ if (!D.Type.getAsOpaquePtr()) {
+ // OpenMP 5.0, 2.1.6 Iterators
+ // In an iterator-specifier, if the iterator-type is not specified then
+ // the type of that iterator is of int type.
+ DeclTy = Context.IntTy;
+ StartLoc = D.DeclIdentLoc;
+ } else {
+ DeclTy = GetTypeFromParser(D.Type, &TInfo);
+ StartLoc = TInfo->getTypeLoc().getBeginLoc();
+ }
+
+ bool IsDeclTyDependent = DeclTy->isDependentType() ||
+ DeclTy->containsUnexpandedParameterPack() ||
+ DeclTy->isInstantiationDependentType();
+ if (!IsDeclTyDependent) {
+ if (!DeclTy->isIntegralType(Context) && !DeclTy->isAnyPointerType()) {
+ // OpenMP 5.0, 2.1.6 Iterators, Restrictions, C/C++
+ // The iterator-type must be an integral or pointer type.
+ Diag(StartLoc, diag::err_omp_iterator_not_integral_or_pointer)
+ << DeclTy;
+ IsCorrect = false;
+ continue;
+ }
+ if (DeclTy.isConstant(Context)) {
+ // OpenMP 5.0, 2.1.6 Iterators, Restrictions, C/C++
+ // The iterator-type must not be const qualified.
+ Diag(StartLoc, diag::err_omp_iterator_not_integral_or_pointer)
+ << DeclTy;
+ IsCorrect = false;
+ continue;
+ }
+ }
+
+ // Iterator declaration.
+ assert(D.DeclIdent && "Identifier expected.");
+ // Always try to create iterator declarator to avoid extra error messages
+ // about unknown declarations use.
+ auto *VD = VarDecl::Create(Context, CurContext, StartLoc, D.DeclIdentLoc,
+ D.DeclIdent, DeclTy, TInfo, SC_None);
+ VD->setImplicit();
+ if (S) {
+ // Check for conflicting previous declaration.
+ DeclarationNameInfo NameInfo(VD->getDeclName(), D.DeclIdentLoc);
+ LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
+ ForVisibleRedeclaration);
+ Previous.suppressDiagnostics();
+ LookupName(Previous, S);
+
+ FilterLookupForScope(Previous, CurContext, S, /*ConsiderLinkage=*/false,
+ /*AllowInlineNamespace=*/false);
+ if (!Previous.empty()) {
+ NamedDecl *Old = Previous.getRepresentativeDecl();
+ Diag(D.DeclIdentLoc, diag::err_redefinition) << VD->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ } else {
+ PushOnScopeChains(VD, S);
+ }
+ } else {
+ CurContext->addDecl(VD);
+ }
+ Expr *Begin = D.Range.Begin;
+ if (!IsDeclTyDependent && Begin && !Begin->isTypeDependent()) {
+ ExprResult BeginRes =
+ PerformImplicitConversion(Begin, DeclTy, AA_Converting);
+ Begin = BeginRes.get();
+ }
+ Expr *End = D.Range.End;
+ if (!IsDeclTyDependent && End && !End->isTypeDependent()) {
+ ExprResult EndRes = PerformImplicitConversion(End, DeclTy, AA_Converting);
+ End = EndRes.get();
+ }
+ Expr *Step = D.Range.Step;
+ if (!IsDeclTyDependent && Step && !Step->isTypeDependent()) {
+ if (!Step->getType()->isIntegralType(Context)) {
+ Diag(Step->getExprLoc(), diag::err_omp_iterator_step_not_integral)
+ << Step << Step->getSourceRange();
+ IsCorrect = false;
+ continue;
+ }
+ llvm::APSInt Result;
+ bool IsConstant = Step->isIntegerConstantExpr(Result, Context);
+ // OpenMP 5.0, 2.1.6 Iterators, Restrictions
+ // If the step expression of a range-specification equals zero, the
+ // behavior is unspecified.
+ if (IsConstant && Result.isNullValue()) {
+ Diag(Step->getExprLoc(), diag::err_omp_iterator_step_constant_zero)
+ << Step << Step->getSourceRange();
+ IsCorrect = false;
+ continue;
+ }
+ }
+ if (!Begin || !End || !IsCorrect) {
+ IsCorrect = false;
+ continue;
+ }
+ OMPIteratorExpr::IteratorDefinition &IDElem = ID.emplace_back();
+ IDElem.IteratorDecl = VD;
+ IDElem.AssignmentLoc = D.AssignLoc;
+ IDElem.Range.Begin = Begin;
+ IDElem.Range.End = End;
+ IDElem.Range.Step = Step;
+ IDElem.ColonLoc = D.ColonLoc;
+ IDElem.SecondColonLoc = D.SecColonLoc;
+ }
+ if (!IsCorrect) {
+ // Invalidate all created iterator declarations if error is found.
+ for (const OMPIteratorExpr::IteratorDefinition &D : ID) {
+ if (Decl *ID = D.IteratorDecl)
+ ID->setInvalidDecl();
+ }
+ return ExprError();
+ }
+ SmallVector<OMPIteratorHelperData, 4> Helpers;
+ if (!CurContext->isDependentContext()) {
+ // Build number of ityeration for each iteration range.
+ // Ni = ((Stepi > 0) ? ((Endi + Stepi -1 - Begini)/Stepi) :
+ // ((Begini-Stepi-1-Endi) / -Stepi);
+ for (OMPIteratorExpr::IteratorDefinition &D : ID) {
+ // (Endi - Begini)
+ ExprResult Res = CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, D.Range.End,
+ D.Range.Begin);
+ if(!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ ExprResult St, St1;
+ if (D.Range.Step) {
+ St = D.Range.Step;
+ // (Endi - Begini) + Stepi
+ Res = CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, Res.get(), St.get());
+ if (!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // (Endi - Begini) + Stepi - 1
+ Res =
+ CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, Res.get(),
+ ActOnIntegerConstant(D.AssignmentLoc, 1).get());
+ if (!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // ((Endi - Begini) + Stepi - 1) / Stepi
+ Res = CreateBuiltinBinOp(D.AssignmentLoc, BO_Div, Res.get(), St.get());
+ if (!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ St1 = CreateBuiltinUnaryOp(D.AssignmentLoc, UO_Minus, D.Range.Step);
+ // (Begini - Endi)
+ ExprResult Res1 = CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub,
+ D.Range.Begin, D.Range.End);
+ if (!Res1.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // (Begini - Endi) - Stepi
+ Res1 =
+ CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, Res1.get(), St1.get());
+ if (!Res1.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // (Begini - Endi) - Stepi - 1
+ Res1 =
+ CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, Res1.get(),
+ ActOnIntegerConstant(D.AssignmentLoc, 1).get());
+ if (!Res1.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // ((Begini - Endi) - Stepi - 1) / (-Stepi)
+ Res1 =
+ CreateBuiltinBinOp(D.AssignmentLoc, BO_Div, Res1.get(), St1.get());
+ if (!Res1.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // Stepi > 0.
+ ExprResult CmpRes =
+ CreateBuiltinBinOp(D.AssignmentLoc, BO_GT, D.Range.Step,
+ ActOnIntegerConstant(D.AssignmentLoc, 0).get());
+ if (!CmpRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ Res = ActOnConditionalOp(D.AssignmentLoc, D.AssignmentLoc, CmpRes.get(),
+ Res.get(), Res1.get());
+ if (!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ }
+ Res = ActOnFinishFullExpr(Res.get(), /*DiscardedValue=*/false);
+ if (!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+
+ // Build counter update.
+ // Build counter.
+ auto *CounterVD =
+ VarDecl::Create(Context, CurContext, D.IteratorDecl->getBeginLoc(),
+ D.IteratorDecl->getBeginLoc(), nullptr,
+ Res.get()->getType(), nullptr, SC_None);
+ CounterVD->setImplicit();
+ ExprResult RefRes =
+ BuildDeclRefExpr(CounterVD, CounterVD->getType(), VK_LValue,
+ D.IteratorDecl->getBeginLoc());
+ // Build counter update.
+ // I = Begini + counter * Stepi;
+ ExprResult UpdateRes;
+ if (D.Range.Step) {
+ UpdateRes = CreateBuiltinBinOp(
+ D.AssignmentLoc, BO_Mul,
+ DefaultLvalueConversion(RefRes.get()).get(), St.get());
+ } else {
+ UpdateRes = DefaultLvalueConversion(RefRes.get());
+ }
+ if (!UpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ UpdateRes = CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, D.Range.Begin,
+ UpdateRes.get());
+ if (!UpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ ExprResult VDRes =
+ BuildDeclRefExpr(cast<VarDecl>(D.IteratorDecl),
+ cast<VarDecl>(D.IteratorDecl)->getType(), VK_LValue,
+ D.IteratorDecl->getBeginLoc());
+ UpdateRes = CreateBuiltinBinOp(D.AssignmentLoc, BO_Assign, VDRes.get(),
+ UpdateRes.get());
+ if (!UpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ UpdateRes =
+ ActOnFinishFullExpr(UpdateRes.get(), /*DiscardedValue=*/true);
+ if (!UpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ ExprResult CounterUpdateRes =
+ CreateBuiltinUnaryOp(D.AssignmentLoc, UO_PreInc, RefRes.get());
+ if (!CounterUpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ CounterUpdateRes =
+ ActOnFinishFullExpr(CounterUpdateRes.get(), /*DiscardedValue=*/true);
+ if (!CounterUpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ OMPIteratorHelperData &HD = Helpers.emplace_back();
+ HD.CounterVD = CounterVD;
+ HD.Upper = Res.get();
+ HD.Update = UpdateRes.get();
+ HD.CounterUpdate = CounterUpdateRes.get();
+ }
+ } else {
+ Helpers.assign(ID.size(), {});
+ }
+ if (!IsCorrect) {
+ // Invalidate all created iterator declarations if error is found.
+ for (const OMPIteratorExpr::IteratorDefinition &D : ID) {
+ if (Decl *ID = D.IteratorDecl)
+ ID->setInvalidDecl();
+ }
+ return ExprError();
+ }
+ return OMPIteratorExpr::Create(Context, Context.OMPIteratorTy, IteratorKwLoc,
+ LLoc, RLoc, ID, Helpers);
}
ExprResult
@@ -4883,8 +5516,9 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
// See IsCForbiddenLValueType.
if (!ResultType.hasQualifiers()) VK = VK_RValue;
} else if (!ResultType->isDependentType() &&
- RequireCompleteType(LLoc, ResultType,
- diag::err_subscript_incomplete_type, BaseExpr))
+ RequireCompleteSizedType(
+ LLoc, ResultType,
+ diag::err_subscript_incomplete_or_sizeless_type, BaseExpr))
return ExprError();
assert(VK == VK_RValue || LangOpts.CPlusPlus ||
@@ -4924,6 +5558,15 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param) {
if (Param->hasUnparsedDefaultArg()) {
+ // If we've already cleared out the location for the default argument,
+ // that means we're parsing it right now.
+ if (!UnparsedDefaultArgLocs.count(Param)) {
+ Diag(Param->getBeginLoc(), diag::err_recursive_default_argument) << FD;
+ Diag(CallLoc, diag::note_recursive_default_argument_used_here);
+ Param->setInvalidDecl();
+ return true;
+ }
+
Diag(CallLoc,
diag::err_use_of_default_argument_to_function_declared_later) <<
FD << cast<CXXRecordDecl>(FD->getDeclContext())->getDeclName();
@@ -4932,90 +5575,11 @@ bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
return true;
}
- if (Param->hasUninstantiatedDefaultArg()) {
- Expr *UninstExpr = Param->getUninstantiatedDefaultArg();
-
- EnterExpressionEvaluationContext EvalContext(
- *this, ExpressionEvaluationContext::PotentiallyEvaluated, Param);
-
- // Instantiate the expression.
- //
- // FIXME: Pass in a correct Pattern argument, otherwise
- // getTemplateInstantiationArgs uses the lexical context of FD, e.g.
- //
- // template<typename T>
- // struct A {
- // static int FooImpl();
- //
- // template<typename Tp>
- // // bug: default argument A<T>::FooImpl() is evaluated with 2-level
- // // template argument list [[T], [Tp]], should be [[Tp]].
- // friend A<Tp> Foo(int a);
- // };
- //
- // template<typename T>
- // A<T> Foo(int a = A<T>::FooImpl());
- MultiLevelTemplateArgumentList MutiLevelArgList
- = getTemplateInstantiationArgs(FD, nullptr, /*RelativeToPrimary=*/true);
-
- InstantiatingTemplate Inst(*this, CallLoc, Param,
- MutiLevelArgList.getInnermost());
- if (Inst.isInvalid())
- return true;
- if (Inst.isAlreadyInstantiating()) {
- Diag(Param->getBeginLoc(), diag::err_recursive_default_argument) << FD;
- Param->setInvalidDecl();
- return true;
- }
-
- ExprResult Result;
- {
- // C++ [dcl.fct.default]p5:
- // The names in the [default argument] expression are bound, and
- // the semantic constraints are checked, at the point where the
- // default argument expression appears.
- ContextRAII SavedContext(*this, FD);
- LocalInstantiationScope Local(*this);
- runWithSufficientStackSpace(CallLoc, [&] {
- Result = SubstInitializer(UninstExpr, MutiLevelArgList,
- /*DirectInit*/false);
- });
- }
- if (Result.isInvalid())
- return true;
-
- // Check the expression as an initializer for the parameter.
- InitializedEntity Entity
- = InitializedEntity::InitializeParameter(Context, Param);
- InitializationKind Kind = InitializationKind::CreateCopy(
- Param->getLocation(),
- /*FIXME:EqualLoc*/ UninstExpr->getBeginLoc());
- Expr *ResultE = Result.getAs<Expr>();
-
- InitializationSequence InitSeq(*this, Entity, Kind, ResultE);
- Result = InitSeq.Perform(*this, Entity, Kind, ResultE);
- if (Result.isInvalid())
- return true;
-
- Result =
- ActOnFinishFullExpr(Result.getAs<Expr>(), Param->getOuterLocStart(),
- /*DiscardedValue*/ false);
- if (Result.isInvalid())
- return true;
-
- // Remember the instantiated default argument.
- Param->setDefaultArg(Result.getAs<Expr>());
- if (ASTMutationListener *L = getASTMutationListener()) {
- L->DefaultArgumentInstantiated(Param);
- }
- }
-
- // If the default argument expression is not set yet, we are building it now.
- if (!Param->hasInit()) {
- Diag(Param->getBeginLoc(), diag::err_recursive_default_argument) << FD;
- Param->setInvalidDecl();
+ if (Param->hasUninstantiatedDefaultArg() &&
+ InstantiateDefaultArgument(CallLoc, FD, Param))
return true;
- }
+
+ assert(Param->hasInit() && "default argument but no initializer?");
// If the default expression creates temporaries, we need to
// push them to the current stack of expression temporaries so they'll
@@ -5048,6 +5612,7 @@ bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD, ParmVarDecl *Param) {
+ assert(Param->hasDefaultArg() && "can't build nonexistent default arg");
if (CheckCXXDefaultArgExpr(CallLoc, FD, Param))
return ExprError();
return CXXDefaultArgExpr::Create(Context, CallLoc, Param, CurContext);
@@ -5193,7 +5758,7 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
// Emit the location of the prototype.
if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
- Diag(FDecl->getBeginLoc(), diag::note_callee_decl) << FDecl;
+ Diag(FDecl->getLocation(), diag::note_callee_decl) << FDecl;
return true;
}
@@ -5238,7 +5803,7 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
// Emit the location of the prototype.
if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
- Diag(FDecl->getBeginLoc(), diag::note_callee_decl) << FDecl;
+ Diag(FDecl->getLocation(), diag::note_callee_decl) << FDecl;
// This deletes the extra arguments.
Call->shrinkNumArgs(NumParams);
@@ -5351,9 +5916,6 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
for (Expr *A : Args.slice(ArgIx)) {
ExprResult Arg = DefaultVariadicArgumentPromotion(A, CallType, FDecl);
Invalid |= Arg.isInvalid();
- // Copy blocks to the heap.
- if (A->getType()->isBlockPointerType())
- maybeExtendBlockObject(Arg);
AllArgs.push_back(Arg.get());
}
}
@@ -5486,7 +6048,10 @@ static bool isPlaceholderToRemoveAsArg(QualType type) {
// These are always invalid as call arguments and should be reported.
case BuiltinType::BoundMember:
case BuiltinType::BuiltinFn:
+ case BuiltinType::IncompleteMatrixIdx:
case BuiltinType::OMPArraySection:
+ case BuiltinType::OMPArrayShaping:
+ case BuiltinType::OMPIterator:
return true;
}
@@ -5609,7 +6174,8 @@ static void checkDirectCallValidity(Sema &S, const Expr *Fn,
if (Callee->getMinRequiredArguments() > ArgExprs.size())
return;
- if (const EnableIfAttr *Attr = S.CheckEnableIf(Callee, ArgExprs, true)) {
+ if (const EnableIfAttr *Attr =
+ S.CheckEnableIf(Callee, Fn->getBeginLoc(), ArgExprs, true)) {
S.Diag(Fn->getBeginLoc(),
isa<CXXMethodDecl>(Callee)
? diag::err_ovl_no_viable_member_function_in_call
@@ -5716,13 +6282,17 @@ ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
if (auto *ULE = dyn_cast<UnresolvedLookupExpr>(Fn)) {
if (ULE->hasExplicitTemplateArgs() &&
ULE->decls_begin() == ULE->decls_end()) {
- Diag(Fn->getExprLoc(), getLangOpts().CPlusPlus2a
+ Diag(Fn->getExprLoc(), getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_adl_only_template_id
: diag::ext_adl_only_template_id)
<< ULE->getName();
}
}
+ if (LangOpts.OpenMP)
+ Call = ActOnOpenMPCall(Call, Scope, LParenLoc, ArgExprs, RParenLoc,
+ ExecConfig);
+
return Call;
}
@@ -6133,6 +6703,18 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
if (NDecl)
DiagnoseSentinelCalls(NDecl, LParenLoc, Args);
+ // Warn for unions passing across security boundary (CMSE).
+ if (FuncT != nullptr && FuncT->getCmseNSCallAttr()) {
+ for (unsigned i = 0, e = Args.size(); i != e; i++) {
+ if (const auto *RT =
+ dyn_cast<RecordType>(Args[i]->getType().getCanonicalType())) {
+ if (RT->getDecl()->isOrContainsUnion())
+ Diag(Args[i]->getBeginLoc(), diag::warn_cmse_nonsecure_union)
+ << 0 << i;
+ }
+ }
+ }
+
// Do special checking on direct calls to functions.
if (FDecl) {
if (CheckFunctionCall(FDecl, TheCall, Proto))
@@ -6150,7 +6732,7 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
return ExprError();
}
- return MaybeBindToTemporary(TheCall);
+ return CheckForImmediateInvocation(MaybeBindToTemporary(TheCall), FDecl);
}
ExprResult
@@ -6173,10 +6755,10 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
QualType literalType = TInfo->getType();
if (literalType->isArrayType()) {
- if (RequireCompleteType(LParenLoc, Context.getBaseElementType(literalType),
- diag::err_illegal_decl_array_incomplete_type,
- SourceRange(LParenLoc,
- LiteralExpr->getSourceRange().getEnd())))
+ if (RequireCompleteSizedType(
+ LParenLoc, Context.getBaseElementType(literalType),
+ diag::err_array_incomplete_or_sizeless_type,
+ SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd())))
return ExprError();
if (literalType->isVariableArrayType())
return ExprError(Diag(LParenLoc, diag::err_variable_object_no_init)
@@ -6250,14 +6832,24 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
return ExprError();
}
- // Compound literals that have automatic storage duration are destroyed at
- // the end of the scope. Emit diagnostics if it is or contains a C union type
- // that is non-trivial to destruct.
- if (!isFileScope)
+ if (!isFileScope && !getLangOpts().CPlusPlus) {
+ // Compound literals that have automatic storage duration are destroyed at
+ // the end of the scope in C; in C++, they're just temporaries.
+
+ // Emit diagnostics if it is or contains a C union type that is non-trivial
+ // to destruct.
if (E->getType().hasNonTrivialToPrimitiveDestructCUnion())
checkNonTrivialCUnion(E->getType(), E->getExprLoc(),
NTCUC_CompoundLiteral, NTCUK_Destruct);
+ // Diagnose jumps that enter or exit the lifetime of the compound literal.
+ if (literalType.isDestructedType()) {
+ Cleanup.setExprNeedsCleanups(true);
+ ExprCleanupObjects.push_back(E);
+ getCurFunction()->setHasBranchProtectedScope();
+ }
+ }
+
if (E->getType().hasNonTrivialToPrimitiveDefaultInitializeCUnion() ||
E->getType().hasNonTrivialToPrimitiveCopyCUnion())
checkNonTrivialCUnionInInitializer(E->getInitializer(),
@@ -6323,7 +6915,7 @@ Sema::ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList,
// already diagnose use of (non-C++20) C99 designator syntax.
if (getLangOpts().CPlusPlus && !DiagnosedArrayDesignator &&
!DiagnosedNestedDesignator && !DiagnosedMixedDesignator) {
- Diag(FirstDesignator, getLangOpts().CPlusPlus2a
+ Diag(FirstDesignator, getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_designated_init
: diag::ext_cxx_designated_init);
} else if (!getLangOpts().CPlusPlus && !getLangOpts().C99) {
@@ -7469,7 +8061,8 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// The OpenCL operator with a vector condition is sufficiently
// different to merit its own checker.
- if (getLangOpts().OpenCL && Cond.get()->getType()->isVectorType())
+ if ((getLangOpts().OpenCL && Cond.get()->getType()->isVectorType()) ||
+ Cond.get()->getType()->isExtVectorType())
return OpenCLCheckVectorConditional(*this, Cond, LHS, RHS, QuestionLoc);
// First, check the condition.
@@ -7519,6 +8112,11 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
return ResTy;
}
+ // And if they're both bfloat (which isn't arithmetic), that's fine too.
+ if (LHSTy->isBFloat16Type() && RHSTy->isBFloat16Type()) {
+ return LHSTy;
+ }
+
// If both operands are the same structure or union type, the result is that
// type.
if (const RecordType *LHSRT = LHSTy->getAs<RecordType>()) { // C99 6.5.15p3
@@ -7569,6 +8167,11 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
/*IsIntFirstExpr=*/false))
return LHSTy;
+ // Allow ?: operations in which both operands have the same
+ // built-in sizeless type.
+ if (LHSTy->isSizelessBuiltinType() && LHSTy == RHSTy)
+ return LHSTy;
+
// Emit a better diagnostic if one of the expressions is a null pointer
// constant and the other is not a pointer type. In this case, the user most
// likely forgot to take the address of the other expression.
@@ -8012,6 +8615,24 @@ ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc,
ColonLoc, result, VK, OK);
}
+// Check if we have a conversion between incompatible cmse function pointer
+// types, that is, a conversion between a function pointer with the
+// cmse_nonsecure_call attribute and one without.
+static bool IsInvalidCmseNSCallConversion(Sema &S, QualType FromType,
+ QualType ToType) {
+ if (const auto *ToFn =
+ dyn_cast<FunctionType>(S.Context.getCanonicalType(ToType))) {
+ if (const auto *FromFn =
+ dyn_cast<FunctionType>(S.Context.getCanonicalType(FromType))) {
+ FunctionType::ExtInfo ToEInfo = ToFn->getExtInfo();
+ FunctionType::ExtInfo FromEInfo = FromFn->getExtInfo();
+
+ return ToEInfo.getCmseNSCall() != FromEInfo.getCmseNSCall();
+ }
+ }
+ return false;
+}
+
// checkPointerTypesForAssignment - This is a very tricky routine (despite
// being closely modeled after the C99 spec:-). The odd characteristic of this
// routine is it effectively iqnores the qualifiers on the top level pointee.
@@ -8143,11 +8764,15 @@ checkPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) {
}
// General pointer incompatibility takes priority over qualifiers.
+ if (RHSType->isFunctionPointerType() && LHSType->isFunctionPointerType())
+ return Sema::IncompatibleFunctionPointer;
return Sema::IncompatiblePointer;
}
if (!S.getLangOpts().CPlusPlus &&
S.IsFunctionConversion(ltrans, rtrans, ltrans))
- return Sema::IncompatiblePointer;
+ return Sema::IncompatibleFunctionPointer;
+ if (IsInvalidCmseNSCallConversion(S, ltrans, rtrans))
+ return Sema::IncompatibleFunctionPointer;
return ConvTy;
}
@@ -8258,7 +8883,7 @@ Sema::CheckAssignmentConstraints(SourceLocation Loc,
/// type ElementType.
static bool isVector(QualType QT, QualType ElementType) {
if (const VectorType *VT = QT->getAs<VectorType>())
- return VT->getElementType() == ElementType;
+ return VT->getElementType().getCanonicalType() == ElementType;
return false;
}
@@ -8701,7 +9326,7 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
ImplicitConversionSequence ICS =
TryImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(),
/*SuppressUserConversions=*/false,
- /*AllowExplicit=*/false,
+ AllowedExplicit::None,
/*InOverloadResolution=*/false,
/*CStyle=*/false,
/*AllowObjCWritebackConversion=*/false);
@@ -8796,7 +9421,7 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
if (getLangOpts().ObjC &&
(CheckObjCBridgeRelatedConversions(E->getBeginLoc(), LHSType,
E->getType(), E, Diagnose) ||
- ConversionToObjCStringLiteralCheck(LHSType, E, Diagnose))) {
+ CheckConversionToObjCLiteral(LHSType, E, Diagnose))) {
if (!Diagnose)
return Incompatible;
// Replace the expression with a corrected version and continue so we
@@ -9093,7 +9718,13 @@ static bool tryGCCVectorConvertAndSplat(Sema &S, ExprResult *Scalar,
// Reject cases where the scalar type is not a constant and has a higher
// Order than the vector element type.
llvm::APFloat Result(0.0);
- bool CstScalar = Scalar->get()->EvaluateAsFloat(Result, S.Context);
+
+ // Determine whether this is a constant scalar. In the event that the
+ // value is dependent (and thus cannot be evaluated by the constant
+ // evaluator), skip the evaluation. This will then diagnose once the
+ // expression is instantiated.
+ bool CstScalar = Scalar->get()->isValueDependent() ||
+ Scalar->get()->EvaluateAsFloat(Result, S.Context);
int Order = S.Context.getFloatingTypeOrder(VectorEltTy, ScalarTy);
if (!CstScalar && Order < 0)
return true;
@@ -9116,7 +9747,8 @@ static bool tryGCCVectorConvertAndSplat(Sema &S, ExprResult *Scalar,
ScalarCast = CK_IntegralToFloating;
} else
return true;
- }
+ } else if (ScalarTy->isEnumeralType())
+ return true;
// Adjust scalar if desired.
if (Scalar) {
@@ -9405,6 +10037,9 @@ QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
/*AllowBothBool*/getLangOpts().AltiVec,
/*AllowBoolConversions*/false);
+ if (!IsDiv && (LHS.get()->getType()->isConstantMatrixType() ||
+ RHS.get()->getType()->isConstantMatrixType()))
+ return CheckMatrixMultiplyOperands(LHS, RHS, Loc, IsCompAssign);
QualType compType = UsualArithmeticConversions(
LHS, RHS, Loc, IsCompAssign ? ACK_CompAssign : ACK_Arithmetic);
@@ -9519,9 +10154,10 @@ static bool checkArithmeticIncompletePointerType(Sema &S, SourceLocation Loc,
assert(ResType->isAnyPointerType() && !ResType->isDependentType());
QualType PointeeTy = ResType->getPointeeType();
- return S.RequireCompleteType(Loc, PointeeTy,
- diag::err_typecheck_arithmetic_incomplete_type,
- PointeeTy, Operand->getSourceRange());
+ return S.RequireCompleteSizedType(
+ Loc, PointeeTy,
+ diag::err_typecheck_arithmetic_incomplete_or_sizeless_type,
+ Operand->getSourceRange());
}
/// Check the validity of an arithmetic pointer operand.
@@ -9575,10 +10211,8 @@ static bool checkArithmeticBinOpPointerOperands(Sema &S, SourceLocation Loc,
if (isRHSPointer) RHSPointeeTy = RHSExpr->getType()->getPointeeType();
// if both are pointers check if operation is valid wrt address spaces
- if (S.getLangOpts().OpenCL && isLHSPointer && isRHSPointer) {
- const PointerType *lhsPtr = LHSExpr->getType()->castAs<PointerType>();
- const PointerType *rhsPtr = RHSExpr->getType()->castAs<PointerType>();
- if (!lhsPtr->isAddressSpaceOverlapping(*rhsPtr)) {
+ if (isLHSPointer && isRHSPointer) {
+ if (!LHSPointeeTy.isAddressSpaceOverlapping(RHSPointeeTy)) {
S.Diag(Loc,
diag::err_typecheck_op_on_nonoverlapping_address_space_pointers)
<< LHSExpr->getType() << RHSExpr->getType() << 1 /*arithmetic op*/
@@ -9725,6 +10359,11 @@ QualType Sema::CheckAdditionOperands(ExprResult &LHS, ExprResult &RHS,
return compType;
}
+ if (LHS.get()->getType()->isConstantMatrixType() ||
+ RHS.get()->getType()->isConstantMatrixType()) {
+ return CheckMatrixElementwiseOperands(LHS, RHS, Loc, CompLHSTy);
+ }
+
QualType compType = UsualArithmeticConversions(
LHS, RHS, Loc, CompLHSTy ? ACK_CompAssign : ACK_Arithmetic);
if (LHS.isInvalid() || RHS.isInvalid())
@@ -9820,6 +10459,11 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
return compType;
}
+ if (LHS.get()->getType()->isConstantMatrixType() ||
+ RHS.get()->getType()->isConstantMatrixType()) {
+ return CheckMatrixElementwiseOperands(LHS, RHS, Loc, CompLHSTy);
+ }
+
QualType compType = UsualArithmeticConversions(
LHS, RHS, Loc, CompLHSTy ? ACK_CompAssign : ACK_Arithmetic);
if (LHS.isInvalid() || RHS.isInvalid())
@@ -9943,14 +10587,19 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
<< RHS.get()->getSourceRange());
return;
}
- llvm::APInt LeftBits(Right.getBitWidth(),
- S.Context.getTypeSize(LHS.get()->getType()));
+
+ QualType LHSExprType = LHS.get()->getType();
+ uint64_t LeftSize = LHSExprType->isExtIntType()
+ ? S.Context.getIntWidth(LHSExprType)
+ : S.Context.getTypeSize(LHSExprType);
+ llvm::APInt LeftBits(Right.getBitWidth(), LeftSize);
if (Right.uge(LeftBits)) {
S.DiagRuntimeBehavior(Loc, RHS.get(),
S.PDiag(diag::warn_shift_gt_typewidth)
<< RHS.get()->getSourceRange());
return;
}
+
if (Opc != BO_Shl)
return;
@@ -9970,7 +10619,7 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
// If LHS does not have a signed type and non-negative value
// then, the behavior is undefined before C++2a. Warn about it.
if (Left.isNegative() && !S.getLangOpts().isSignedOverflowDefined() &&
- !S.getLangOpts().CPlusPlus2a) {
+ !S.getLangOpts().CPlusPlus20) {
S.DiagRuntimeBehavior(Loc, LHS.get(),
S.PDiag(diag::warn_shift_lhs_negative)
<< LHS.get()->getSourceRange());
@@ -10450,7 +11099,7 @@ static void diagnoseTautologicalComparison(Sema &S, SourceLocation Loc,
// C++2a [depr.array.comp]:
// Equality and relational comparisons ([expr.eq], [expr.rel]) between two
// operands of array type are deprecated.
- if (S.getLangOpts().CPlusPlus2a && LHSStripped->getType()->isArrayType() &&
+ if (S.getLangOpts().CPlusPlus20 && LHSStripped->getType()->isArrayType() &&
RHSStripped->getType()->isArrayType()) {
S.Diag(Loc, diag::warn_depr_array_comparison)
<< LHS->getSourceRange() << RHS->getSourceRange()
@@ -10907,11 +11556,22 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
// C99 6.5.9p2 and C99 6.5.8p2
if (Context.typesAreCompatible(LCanPointeeTy.getUnqualifiedType(),
RCanPointeeTy.getUnqualifiedType())) {
- // Valid unless a relational comparison of function pointers
- if (IsRelational && LCanPointeeTy->isFunctionType()) {
- Diag(Loc, diag::ext_typecheck_ordered_comparison_of_function_pointers)
- << LHSType << RHSType << LHS.get()->getSourceRange()
- << RHS.get()->getSourceRange();
+ if (IsRelational) {
+ // Pointers both need to point to complete or incomplete types
+ if ((LCanPointeeTy->isIncompleteType() !=
+ RCanPointeeTy->isIncompleteType()) &&
+ !getLangOpts().C11) {
+ Diag(Loc, diag::ext_typecheck_compare_complete_incomplete_pointers)
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange()
+ << LHSType << RHSType << LCanPointeeTy->isIncompleteType()
+ << RCanPointeeTy->isIncompleteType();
+ }
+ if (LCanPointeeTy->isFunctionType()) {
+ // Valid unless a relational comparison of function pointers
+ Diag(Loc, diag::ext_typecheck_ordered_comparison_of_function_pointers)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ }
}
} else if (!IsRelational &&
(LCanPointeeTy->isVoidType() || RCanPointeeTy->isVoidType())) {
@@ -10927,8 +11587,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
if (LCanPointeeTy != RCanPointeeTy) {
// Treat NULL constant as a special case in OpenCL.
if (getLangOpts().OpenCL && !LHSIsNull && !RHSIsNull) {
- const PointerType *LHSPtr = LHSType->castAs<PointerType>();
- if (!LHSPtr->isAddressSpaceOverlapping(*RHSType->castAs<PointerType>())) {
+ if (!LCanPointeeTy.isAddressSpaceOverlapping(RCanPointeeTy)) {
Diag(Loc,
diag::err_typecheck_op_on_nonoverlapping_address_space_pointers)
<< LHSType << RHSType << 0 /* comparison */
@@ -11326,12 +11985,12 @@ static void diagnoseXorMisusedAsPow(Sema &S, const ExprResult &XorLHS,
if (XorStr == "xor")
return;
- std::string LHSStr = Lexer::getSourceText(
+ std::string LHSStr = std::string(Lexer::getSourceText(
CharSourceRange::getTokenRange(LHSInt->getSourceRange()),
- S.getSourceManager(), S.getLangOpts());
- std::string RHSStr = Lexer::getSourceText(
+ S.getSourceManager(), S.getLangOpts()));
+ std::string RHSStr = std::string(Lexer::getSourceText(
CharSourceRange::getTokenRange(RHSInt->getSourceRange()),
- S.getSourceManager(), S.getLangOpts());
+ S.getSourceManager(), S.getLangOpts()));
if (Negative) {
RightSideValue = -RightSideValue;
@@ -11411,6 +12070,83 @@ QualType Sema::CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
return GetSignedVectorType(LHS.get()->getType());
}
+QualType Sema::CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ bool IsCompAssign) {
+ if (!IsCompAssign) {
+ LHS = DefaultFunctionArrayLvalueConversion(LHS.get());
+ if (LHS.isInvalid())
+ return QualType();
+ }
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.get());
+ if (RHS.isInvalid())
+ return QualType();
+
+ // For conversion purposes, we ignore any qualifiers.
+ // For example, "const float" and "float" are equivalent.
+ QualType LHSType = LHS.get()->getType().getUnqualifiedType();
+ QualType RHSType = RHS.get()->getType().getUnqualifiedType();
+
+ const MatrixType *LHSMatType = LHSType->getAs<MatrixType>();
+ const MatrixType *RHSMatType = RHSType->getAs<MatrixType>();
+ assert((LHSMatType || RHSMatType) && "At least one operand must be a matrix");
+
+ if (Context.hasSameType(LHSType, RHSType))
+ return LHSType;
+
+ // Type conversion may change LHS/RHS. Keep copies to the original results, in
+ // case we have to return InvalidOperands.
+ ExprResult OriginalLHS = LHS;
+ ExprResult OriginalRHS = RHS;
+ if (LHSMatType && !RHSMatType) {
+ RHS = tryConvertExprToType(RHS.get(), LHSMatType->getElementType());
+ if (!RHS.isInvalid())
+ return LHSType;
+
+ return InvalidOperands(Loc, OriginalLHS, OriginalRHS);
+ }
+
+ if (!LHSMatType && RHSMatType) {
+ LHS = tryConvertExprToType(LHS.get(), RHSMatType->getElementType());
+ if (!LHS.isInvalid())
+ return RHSType;
+ return InvalidOperands(Loc, OriginalLHS, OriginalRHS);
+ }
+
+ return InvalidOperands(Loc, LHS, RHS);
+}
+
+QualType Sema::CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ bool IsCompAssign) {
+ if (!IsCompAssign) {
+ LHS = DefaultFunctionArrayLvalueConversion(LHS.get());
+ if (LHS.isInvalid())
+ return QualType();
+ }
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.get());
+ if (RHS.isInvalid())
+ return QualType();
+
+ auto *LHSMatType = LHS.get()->getType()->getAs<ConstantMatrixType>();
+ auto *RHSMatType = RHS.get()->getType()->getAs<ConstantMatrixType>();
+ assert((LHSMatType || RHSMatType) && "At least one operand must be a matrix");
+
+ if (LHSMatType && RHSMatType) {
+ if (LHSMatType->getNumColumns() != RHSMatType->getNumRows())
+ return InvalidOperands(Loc, LHS, RHS);
+
+ if (!Context.hasSameType(LHSMatType->getElementType(),
+ RHSMatType->getElementType()))
+ return InvalidOperands(Loc, LHS, RHS);
+
+ return Context.getConstantMatrixType(LHSMatType->getElementType(),
+ LHSMatType->getNumRows(),
+ RHSMatType->getNumColumns());
+ }
+ return CheckMatrixElementwiseOperands(LHS, RHS, Loc, IsCompAssign);
+}
+
inline QualType Sema::CheckBitwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc) {
@@ -12096,7 +12832,7 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
CheckForNullPointerDereference(*this, LHSExpr);
- if (getLangOpts().CPlusPlus2a && LHSType.isVolatileQualified()) {
+ if (getLangOpts().CPlusPlus20 && LHSType.isVolatileQualified()) {
if (CompoundType.isNull()) {
// C++2a [expr.ass]p5:
// A simple-assignment whose left operand is of a volatile-qualified
@@ -12142,8 +12878,8 @@ static bool IgnoreCommaOperand(const Expr *E) {
}
// Look for instances where it is likely the comma operator is confused with
-// another operator. There is a whitelist of acceptable expressions for the
-// left hand side of the comma operator, otherwise emit a warning.
+// another operator. There is an explicit list of acceptable expressions for
+// the left hand side of the comma operator, otherwise emit a warning.
void Sema::DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc) {
// No warnings in macros
if (Loc.isMacroID())
@@ -12153,10 +12889,10 @@ void Sema::DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc) {
if (inTemplateInstantiation())
return;
- // Scope isn't fine-grained enough to whitelist the specific cases, so
+ // Scope isn't fine-grained enough to explicitly list the specific cases, so
// instead, skip more than needed, then call back into here with the
// CommaVisitor in SemaStmt.cpp.
- // The whitelisted locations are the initialization and increment portions
+ // The listed locations are the initialization and increment portions
// of a for loop. The additional checks are on the condition of
// if statements, do/while loops, and for loops.
// Differences in scope flags for C89 mode requires the extra logic.
@@ -12299,7 +13035,7 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
// Now make sure the operand is a modifiable lvalue.
if (CheckForModifiableLvalue(Op, OpLoc, S))
return QualType();
- if (S.getLangOpts().CPlusPlus2a && ResType.isVolatileQualified()) {
+ if (S.getLangOpts().CPlusPlus20 && ResType.isVolatileQualified()) {
// C++2a [expr.pre.inc]p1, [expr.post.inc]p1:
// An operand with volatile-qualified type is deprecated
S.Diag(OpLoc, diag::warn_deprecated_increment_decrement_volatile)
@@ -12331,6 +13067,9 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
/// - *(x + 1) -> x, if x is an array
/// - &"123"[2] -> 0
/// - & __real__ x -> x
+///
+/// FIXME: We don't recurse to the RHS of a comma, nor handle pointers to
+/// members.
static ValueDecl *getPrimaryDecl(Expr *E) {
switch (E->getStmtClass()) {
case Stmt::DeclRefExprClass:
@@ -12371,19 +13110,22 @@ static ValueDecl *getPrimaryDecl(Expr *E) {
// If the result of an implicit cast is an l-value, we care about
// the sub-expression; otherwise, the result here doesn't matter.
return getPrimaryDecl(cast<ImplicitCastExpr>(E)->getSubExpr());
+ case Stmt::CXXUuidofExprClass:
+ return cast<CXXUuidofExpr>(E)->getGuidDecl();
default:
return nullptr;
}
}
namespace {
- enum {
- AO_Bit_Field = 0,
- AO_Vector_Element = 1,
- AO_Property_Expansion = 2,
- AO_Register_Variable = 3,
- AO_No_Error = 4
- };
+enum {
+ AO_Bit_Field = 0,
+ AO_Vector_Element = 1,
+ AO_Property_Expansion = 2,
+ AO_Register_Variable = 3,
+ AO_Matrix_Element = 4,
+ AO_No_Error = 5
+};
}
/// Diagnose invalid operand for address of operations.
///
@@ -12550,6 +13292,9 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
} else if (op->getObjectKind() == OK_VectorComponent) {
// The operand cannot be an element of a vector
AddressOfError = AO_Vector_Element;
+ } else if (op->getObjectKind() == OK_MatrixComponent) {
+ // The operand cannot be an element of a matrix.
+ AddressOfError = AO_Matrix_Element;
} else if (dcl) { // C99 6.5.3.2p1
// We have an lvalue with a decl. Make sure the decl is not declared
// with the register storage-class specifier.
@@ -12591,7 +13336,7 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
}
}
} else if (!isa<FunctionDecl>(dcl) && !isa<NonTypeTemplateParmDecl>(dcl) &&
- !isa<BindingDecl>(dcl))
+ !isa<BindingDecl>(dcl) && !isa<MSGuidDecl>(dcl))
llvm_unreachable("Unknown/unexpected decl type");
}
@@ -12855,7 +13600,7 @@ static ExprResult convertHalfVecBinOp(Sema &S, ExprResult LHS, ExprResult RHS,
BinaryOperatorKind Opc, QualType ResultTy,
ExprValueKind VK, ExprObjectKind OK,
bool IsCompAssign, SourceLocation OpLoc,
- FPOptions FPFeatures) {
+ FPOptionsOverride FPFeatures) {
auto &Context = S.getASTContext();
assert((isVector(ResultTy, Context.HalfTy) ||
isVector(ResultTy, Context.ShortTy)) &&
@@ -12873,13 +13618,13 @@ static ExprResult convertHalfVecBinOp(Sema &S, ExprResult LHS, ExprResult RHS,
BinOpResTy = S.GetSignedVectorType(BinOpResTy);
if (IsCompAssign)
- return new (Context) CompoundAssignOperator(
- LHS.get(), RHS.get(), Opc, ResultTy, VK, OK, BinOpResTy, BinOpResTy,
- OpLoc, FPFeatures);
+ return CompoundAssignOperator::Create(Context, LHS.get(), RHS.get(), Opc,
+ ResultTy, VK, OK, OpLoc, FPFeatures,
+ BinOpResTy, BinOpResTy);
LHS = convertVector(LHS.get(), Context.FloatTy, S);
- auto *BO = new (Context) BinaryOperator(LHS.get(), RHS.get(), Opc, BinOpResTy,
- VK, OK, OpLoc, FPFeatures);
+ auto *BO = BinaryOperator::Create(Context, LHS.get(), RHS.get(), Opc,
+ BinOpResTy, VK, OK, OpLoc, FPFeatures);
return convertVector(BO, ResultTy->castAs<VectorType>()->getElementType(), S);
}
@@ -12892,13 +13637,15 @@ CorrectDelayedTyposInBinOp(Sema &S, BinaryOperatorKind Opc, Expr *LHSExpr,
// doesn't handle dependent types properly, so make sure any TypoExprs have
// been dealt with before checking the operands.
LHS = S.CorrectDelayedTyposInExpr(LHS);
- RHS = S.CorrectDelayedTyposInExpr(RHS, [Opc, LHS](Expr *E) {
- if (Opc != BO_Assign)
- return ExprResult(E);
- // Avoid correcting the RHS to the same Expr as the LHS.
- Decl *D = getDeclFromExpr(E);
- return (D && D == getDeclFromExpr(LHS.get())) ? ExprError() : E;
- });
+ RHS = S.CorrectDelayedTyposInExpr(
+ RHS, /*InitDecl=*/nullptr, /*RecoverUncorrectedTypos=*/false,
+ [Opc, LHS](Expr *E) {
+ if (Opc != BO_Assign)
+ return ExprResult(E);
+ // Avoid correcting the RHS to the same Expr as the LHS.
+ Decl *D = getDeclFromExpr(E);
+ return (D && D == getDeclFromExpr(LHS.get())) ? ExprError() : E;
+ });
}
return std::make_pair(LHS, RHS);
}
@@ -12906,10 +13653,27 @@ CorrectDelayedTyposInBinOp(Sema &S, BinaryOperatorKind Opc, Expr *LHSExpr,
/// Returns true if conversion between vectors of halfs and vectors of floats
/// is needed.
static bool needsConversionOfHalfVec(bool OpRequiresConversion, ASTContext &Ctx,
- QualType SrcType) {
- return OpRequiresConversion && !Ctx.getLangOpts().NativeHalfType &&
- !Ctx.getTargetInfo().useFP16ConversionIntrinsics() &&
- isVector(SrcType, Ctx.HalfTy);
+ Expr *E0, Expr *E1 = nullptr) {
+ if (!OpRequiresConversion || Ctx.getLangOpts().NativeHalfType ||
+ Ctx.getTargetInfo().useFP16ConversionIntrinsics())
+ return false;
+
+ auto HasVectorOfHalfType = [&Ctx](Expr *E) {
+ QualType Ty = E->IgnoreImplicit()->getType();
+
+ // Don't promote half precision neon vectors like float16x4_t in arm_neon.h
+ // to vectors of floats. Although the element type of the vectors is __fp16,
+ // the vectors shouldn't be treated as storage-only types. See the
+ // discussion here: https://reviews.llvm.org/rG825235c140e7
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ if (VT->getVectorKind() == VectorType::NeonVector)
+ return false;
+ return VT->getElementType().getCanonicalType() == Ctx.HalfTy;
+ }
+ return false;
+ };
+
+ return HasVectorOfHalfType(E0) && (!E1 || HasVectorOfHalfType(E1));
}
/// CreateBuiltinBinOp - Creates a new built-in binary operation with
@@ -12974,14 +13738,6 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
}
}
- // Diagnose operations on the unsupported types for OpenMP device compilation.
- if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) {
- if (Opc != BO_Assign && Opc != BO_Comma) {
- checkOpenMPDeviceExpr(LHSExpr);
- checkOpenMPDeviceExpr(RHSExpr);
- }
- }
-
switch (Opc) {
case BO_Assign:
ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, QualType());
@@ -13129,14 +13885,6 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
if (ResultTy.isNull() || LHS.isInvalid() || RHS.isInvalid())
return ExprError();
- if (ResultTy->isRealFloatingType() &&
- (getLangOpts().getFPRoundingMode() != LangOptions::FPR_ToNearest ||
- getLangOpts().getFPExceptionMode() != LangOptions::FPE_Ignore))
- // Mark the current function as usng floating point constrained intrinsics
- if (FunctionDecl *F = dyn_cast<FunctionDecl>(CurContext)) {
- F->setUsesFPIntrin(true);
- }
-
// Some of the binary operations require promoting operands of half vector to
// float vectors and truncating the result back to half vector. For now, we do
// this only when HalfArgsAndReturn is set (that is, when the target is arm or
@@ -13144,8 +13892,8 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
assert(isVector(RHS.get()->getType(), Context.HalfTy) ==
isVector(LHS.get()->getType(), Context.HalfTy) &&
"both sides are half vectors or neither sides are");
- ConvertHalfVec = needsConversionOfHalfVec(ConvertHalfVec, Context,
- LHS.get()->getType());
+ ConvertHalfVec =
+ needsConversionOfHalfVec(ConvertHalfVec, Context, LHS.get(), RHS.get());
// Check for array bounds violations for both sides of the BinaryOperator
CheckArrayAccess(LHS.get());
@@ -13175,9 +13923,9 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
if (CompResultTy.isNull()) {
if (ConvertHalfVec)
return convertHalfVecBinOp(*this, LHS, RHS, Opc, ResultTy, VK, OK, false,
- OpLoc, FPFeatures);
- return new (Context) BinaryOperator(LHS.get(), RHS.get(), Opc, ResultTy, VK,
- OK, OpLoc, FPFeatures);
+ OpLoc, CurFPFeatureOverrides());
+ return BinaryOperator::Create(Context, LHS.get(), RHS.get(), Opc, ResultTy,
+ VK, OK, OpLoc, CurFPFeatureOverrides());
}
// Handle compound assignments.
@@ -13187,13 +13935,19 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
OK = LHS.get()->getObjectKind();
}
+ // The LHS is not converted to the result type for fixed-point compound
+ // assignment as the common type is computed on demand. Reset the CompLHSTy
+ // to the LHS type we would have gotten after unary conversions.
+ if (CompResultTy->isFixedPointType())
+ CompLHSTy = UsualUnaryConversions(LHS.get()).get()->getType();
+
if (ConvertHalfVec)
return convertHalfVecBinOp(*this, LHS, RHS, Opc, ResultTy, VK, OK, true,
- OpLoc, FPFeatures);
+ OpLoc, CurFPFeatureOverrides());
- return new (Context) CompoundAssignOperator(
- LHS.get(), RHS.get(), Opc, ResultTy, VK, OK, CompLHSTy, CompResultTy,
- OpLoc, FPFeatures);
+ return CompoundAssignOperator::Create(
+ Context, LHS.get(), RHS.get(), Opc, ResultTy, VK, OK, OpLoc,
+ CurFPFeatureOverrides(), CompLHSTy, CompResultTy);
}
/// DiagnoseBitwisePrecedence - Emit a warning when bitwise and comparison
@@ -13446,7 +14200,7 @@ static ExprResult BuildOverloadedBinOp(Sema &S, Scope *Sc, SourceLocation OpLoc,
RHS->getType(), Functions);
// In C++20 onwards, we may have a second operator to look up.
- if (S.getLangOpts().CPlusPlus2a) {
+ if (S.getLangOpts().CPlusPlus20) {
if (OverloadedOperatorKind ExtraOp = getRewrittenOverloadedOperator(OverOp))
S.LookupOverloadedOperatorName(ExtraOp, Sc, LHS->getType(),
RHS->getType(), Functions);
@@ -13596,12 +14350,6 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
<< Input.get()->getSourceRange());
}
}
- // Diagnose operations on the unsupported types for OpenMP device compilation.
- if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) {
- if (UnaryOperator::isIncrementDecrementOp(Opc) ||
- UnaryOperator::isArithmeticOp(Opc))
- checkOpenMPDeviceExpr(InputExpr);
- }
switch (Opc) {
case UO_PreInc:
@@ -13637,8 +14385,7 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
// float vector and truncating the result back to a half vector. For now, we
// do this only when HalfArgsAndReturns is set (that is, when the target is
// arm or arm64).
- ConvertHalfVec =
- needsConversionOfHalfVec(true, Context, Input.get()->getType());
+ ConvertHalfVec = needsConversionOfHalfVec(true, Context, Input.get());
// If the operand is a half vector, promote it to a float vector.
if (ConvertHalfVec)
@@ -13732,9 +14479,16 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
// Vector logical not returns the signed variant of the operand type.
resultType = GetSignedVectorType(resultType);
break;
+ } else if (Context.getLangOpts().CPlusPlus && resultType->isVectorType()) {
+ const VectorType *VTy = resultType->castAs<VectorType>();
+ if (VTy->getVectorKind() != VectorType::GenericVector)
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
+
+ // Vector logical not returns the signed variant of the operand type.
+ resultType = GetSignedVectorType(resultType);
+ break;
} else {
- // FIXME: GCC's vector extension permits the usage of '!' with a vector
- // type in C++. We should allow that here too.
return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
<< resultType << Input.get()->getSourceRange());
}
@@ -13781,8 +14535,9 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
if (Opc != UO_AddrOf && Opc != UO_Deref)
CheckArrayAccess(Input.get());
- auto *UO = new (Context)
- UnaryOperator(Input.get(), Opc, resultType, VK, OK, OpLoc, CanOverflow);
+ auto *UO =
+ UnaryOperator::Create(Context, Input.get(), Opc, resultType, VK, OK,
+ OpLoc, CanOverflow, CurFPFeatureOverrides());
if (Opc == UO_Deref && UO->getType()->hasAttr(attr::NoDeref) &&
!isa<ArrayType>(UO->getType().getDesugaredType(Context)))
@@ -14174,11 +14929,9 @@ ExprResult Sema::ActOnChooseExpr(SourceLocation BuiltinLoc,
ExprValueKind VK = VK_RValue;
ExprObjectKind OK = OK_Ordinary;
QualType resType;
- bool ValueDependent = false;
bool CondIsTrue = false;
if (CondExpr->isTypeDependent() || CondExpr->isValueDependent()) {
resType = Context.DependentTy;
- ValueDependent = true;
} else {
// The conditional expression is required to be a constant expression.
llvm::APSInt condEval(32);
@@ -14194,14 +14947,12 @@ ExprResult Sema::ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *ActiveExpr = CondIsTrue ? LHSExpr : RHSExpr;
resType = ActiveExpr->getType();
- ValueDependent = ActiveExpr->isValueDependent();
VK = ActiveExpr->getValueKind();
OK = ActiveExpr->getObjectKind();
}
- return new (Context)
- ChooseExpr(BuiltinLoc, CondExpr, LHSExpr, RHSExpr, resType, VK, OK, RPLoc,
- CondIsTrue, resType->isDependentType(), ValueDependent);
+ return new (Context) ChooseExpr(BuiltinLoc, CondExpr, LHSExpr, RHSExpr,
+ resType, VK, OK, RPLoc, CondIsTrue);
}
//===----------------------------------------------------------------------===//
@@ -14312,11 +15063,12 @@ void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
if (ExplicitSignature) {
for (unsigned I = 0, E = ExplicitSignature.getNumParams(); I != E; ++I) {
ParmVarDecl *Param = ExplicitSignature.getParam(I);
- if (Param->getIdentifier() == nullptr &&
- !Param->isImplicit() &&
- !Param->isInvalidDecl() &&
- !getLangOpts().CPlusPlus)
- Diag(Param->getLocation(), diag::err_parameter_name_omitted);
+ if (Param->getIdentifier() == nullptr && !Param->isImplicit() &&
+ !Param->isInvalidDecl() && !getLangOpts().CPlusPlus) {
+ // Diagnose this as an extension in C17 and earlier.
+ if (!getLangOpts().C2x)
+ Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x);
+ }
Params.push_back(Param);
}
@@ -14705,21 +15457,15 @@ ExprResult Sema::BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocExpr(Context, Kind, BuiltinLoc, RPLoc, ParentContext);
}
-bool Sema::ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&Exp,
- bool Diagnose) {
+bool Sema::CheckConversionToObjCLiteral(QualType DstType, Expr *&Exp,
+ bool Diagnose) {
if (!getLangOpts().ObjC)
return false;
const ObjCObjectPointerType *PT = DstType->getAs<ObjCObjectPointerType>();
if (!PT)
return false;
-
- if (!PT->isObjCIdType()) {
- // Check if the destination is the 'NSString' interface.
- const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
- if (!ID || !ID->getIdentifier()->isStr("NSString"))
- return false;
- }
+ const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
// Ignore any parens, implicit casts (should only be
// array-to-pointer decays), and not-so-opaque values. The last is
@@ -14729,15 +15475,41 @@ bool Sema::ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&Exp,
if (OV->getSourceExpr())
SrcExpr = OV->getSourceExpr()->IgnoreParenImpCasts();
- StringLiteral *SL = dyn_cast<StringLiteral>(SrcExpr);
- if (!SL || !SL->isAscii())
- return false;
- if (Diagnose) {
- Diag(SL->getBeginLoc(), diag::err_missing_atsign_prefix)
- << FixItHint::CreateInsertion(SL->getBeginLoc(), "@");
- Exp = BuildObjCStringLiteral(SL->getBeginLoc(), SL).get();
+ if (auto *SL = dyn_cast<StringLiteral>(SrcExpr)) {
+ if (!PT->isObjCIdType() &&
+ !(ID && ID->getIdentifier()->isStr("NSString")))
+ return false;
+ if (!SL->isAscii())
+ return false;
+
+ if (Diagnose) {
+ Diag(SL->getBeginLoc(), diag::err_missing_atsign_prefix)
+ << /*string*/0 << FixItHint::CreateInsertion(SL->getBeginLoc(), "@");
+ Exp = BuildObjCStringLiteral(SL->getBeginLoc(), SL).get();
+ }
+ return true;
}
- return true;
+
+ if ((isa<IntegerLiteral>(SrcExpr) || isa<CharacterLiteral>(SrcExpr) ||
+ isa<FloatingLiteral>(SrcExpr) || isa<ObjCBoolLiteralExpr>(SrcExpr) ||
+ isa<CXXBoolLiteralExpr>(SrcExpr)) &&
+ !SrcExpr->isNullPointerConstant(
+ getASTContext(), Expr::NPC_NeverValueDependent)) {
+ if (!ID || !ID->getIdentifier()->isStr("NSNumber"))
+ return false;
+ if (Diagnose) {
+ Diag(SrcExpr->getBeginLoc(), diag::err_missing_atsign_prefix)
+ << /*number*/1
+ << FixItHint::CreateInsertion(SrcExpr->getBeginLoc(), "@");
+ Expr *NumLit =
+ BuildObjCNumericLiteral(SrcExpr->getBeginLoc(), SrcExpr).get();
+ if (NumLit)
+ Exp = NumLit;
+ }
+ return true;
+ }
+
+ return false;
}
static bool maybeDiagnoseAssignmentToFunction(Sema &S, QualType DstType,
@@ -14784,24 +15556,44 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
return false;
case PointerToInt:
- DiagKind = diag::ext_typecheck_convert_pointer_int;
+ if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_typecheck_convert_pointer_int;
+ isInvalid = true;
+ } else {
+ DiagKind = diag::ext_typecheck_convert_pointer_int;
+ }
ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
MayHaveConvFixit = true;
break;
case IntToPointer:
- DiagKind = diag::ext_typecheck_convert_int_pointer;
+ if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_typecheck_convert_int_pointer;
+ isInvalid = true;
+ } else {
+ DiagKind = diag::ext_typecheck_convert_int_pointer;
+ }
+ ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
+ MayHaveConvFixit = true;
+ break;
+ case IncompatibleFunctionPointer:
+ if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_typecheck_convert_incompatible_function_pointer;
+ isInvalid = true;
+ } else {
+ DiagKind = diag::ext_typecheck_convert_incompatible_function_pointer;
+ }
ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
MayHaveConvFixit = true;
break;
case IncompatiblePointer:
- if (Action == AA_Passing_CFAudited)
+ if (Action == AA_Passing_CFAudited) {
DiagKind = diag::err_arc_typecheck_convert_incompatible_pointer;
- else if (SrcType->isFunctionPointerType() &&
- DstType->isFunctionPointerType())
- DiagKind = diag::ext_typecheck_convert_incompatible_function_pointer;
- else
+ } else if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_typecheck_convert_incompatible_pointer;
+ isInvalid = true;
+ } else {
DiagKind = diag::ext_typecheck_convert_incompatible_pointer;
-
+ }
CheckInferredResultType = DstType->isObjCObjectPointerType() &&
SrcType->isObjCObjectPointerType();
if (Hint.isNull() && !CheckInferredResultType) {
@@ -14814,15 +15606,27 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
MayHaveConvFixit = true;
break;
case IncompatiblePointerSign:
- DiagKind = diag::ext_typecheck_convert_incompatible_pointer_sign;
+ if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_typecheck_convert_incompatible_pointer_sign;
+ isInvalid = true;
+ } else {
+ DiagKind = diag::ext_typecheck_convert_incompatible_pointer_sign;
+ }
break;
case FunctionVoidPointer:
- DiagKind = diag::ext_typecheck_convert_pointer_void_func;
+ if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_typecheck_convert_pointer_void_func;
+ isInvalid = true;
+ } else {
+ DiagKind = diag::ext_typecheck_convert_pointer_void_func;
+ }
break;
case IncompatiblePointerDiscardsQualifiers: {
// Perform array-to-pointer decay if necessary.
if (SrcType->isArrayType()) SrcType = Context.getArrayDecayedType(SrcType);
+ isInvalid = true;
+
Qualifiers lhq = SrcType->getPointeeType().getQualifiers();
Qualifiers rhq = DstType->getPointeeType().getQualifiers();
if (lhq.getAddressSpace() != rhq.getAddressSpace()) {
@@ -14850,19 +15654,33 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
if (getLangOpts().CPlusPlus &&
IsStringLiteralToNonConstPointerConversion(SrcExpr, DstType))
return false;
- DiagKind = diag::ext_typecheck_convert_discards_qualifiers;
+ if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_typecheck_convert_discards_qualifiers;
+ isInvalid = true;
+ } else {
+ DiagKind = diag::ext_typecheck_convert_discards_qualifiers;
+ }
+
break;
case IncompatibleNestedPointerQualifiers:
- DiagKind = diag::ext_nested_pointer_qualifier_mismatch;
+ if (getLangOpts().CPlusPlus) {
+ isInvalid = true;
+ DiagKind = diag::err_nested_pointer_qualifier_mismatch;
+ } else {
+ DiagKind = diag::ext_nested_pointer_qualifier_mismatch;
+ }
break;
case IncompatibleNestedPointerAddressSpaceMismatch:
DiagKind = diag::err_typecheck_incompatible_nested_address_space;
+ isInvalid = true;
break;
case IntToBlockPointer:
DiagKind = diag::err_int_to_block_pointer;
+ isInvalid = true;
break;
case IncompatibleBlockPointer:
DiagKind = diag::err_typecheck_convert_incompatible_block_pointer;
+ isInvalid = true;
break;
case IncompatibleObjCQualifiedId: {
if (SrcType->isObjCQualifiedIdType()) {
@@ -14887,14 +15705,25 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
SrcType->castAs<ObjCObjectPointerType>()->getInterfaceType())
IFace = IFaceT->getDecl();
}
- DiagKind = diag::warn_incompatible_qualified_id;
+ if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_incompatible_qualified_id;
+ isInvalid = true;
+ } else {
+ DiagKind = diag::warn_incompatible_qualified_id;
+ }
break;
}
case IncompatibleVectors:
- DiagKind = diag::warn_incompatible_vectors;
+ if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_incompatible_vectors;
+ isInvalid = true;
+ } else {
+ DiagKind = diag::warn_incompatible_vectors;
+ }
break;
case IncompatibleObjCWeakRef:
DiagKind = diag::err_arc_weak_unavailable_assign;
+ isInvalid = true;
break;
case Incompatible:
if (maybeDiagnoseAssignmentToFunction(*this, DstType, SrcExpr)) {
@@ -14952,9 +15781,10 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
HandleFunctionTypeMismatch(FDiag, SecondType, FirstType);
Diag(Loc, FDiag);
- if (DiagKind == diag::warn_incompatible_qualified_id &&
+ if ((DiagKind == diag::warn_incompatible_qualified_id ||
+ DiagKind == diag::err_incompatible_qualified_id) &&
PDecl && IFace && !IFace->hasDefinition())
- Diag(IFace->getLocation(), diag::note_incomplete_class_and_qualified_id)
+ Diag(IFace->getLocation(), diag::note_incomplete_class_and_qualified_id)
<< IFace << PDecl;
if (SecondType == Context.OverloadTy)
@@ -15079,6 +15909,12 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
return ExprError();
}
+ ExprResult RValueExpr = DefaultLvalueConversion(E);
+ if (RValueExpr.isInvalid())
+ return ExprError();
+
+ E = RValueExpr.get();
+
// Circumvent ICE checking in C++11 to avoid evaluating the expression twice
// in the non-ICE case.
if (!getLangOpts().CPlusPlus11 && E->isIntegerConstantExpr(Context)) {
@@ -15266,7 +16102,7 @@ void Sema::WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec) {
/// and if so, remove it from the list of volatile-qualified assignments that
/// we are going to warn are deprecated.
void Sema::CheckUnusedVolatileAssignment(Expr *E) {
- if (!E->getType().isVolatileQualified() || !getLangOpts().CPlusPlus2a)
+ if (!E->getType().isVolatileQualified() || !getLangOpts().CPlusPlus20)
return;
// Note: ignoring parens here is not justified by the standard rules, but
@@ -15281,6 +16117,186 @@ void Sema::CheckUnusedVolatileAssignment(Expr *E) {
}
}
+ExprResult Sema::CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl) {
+ if (!E.isUsable() || !Decl || !Decl->isConsteval() || isConstantEvaluated() ||
+ RebuildingImmediateInvocation)
+ return E;
+
+ /// Opportunistically remove the callee from ReferencesToConsteval if we can.
+ /// It's OK if this fails; we'll also remove this in
+ /// HandleImmediateInvocations, but catching it here allows us to avoid
+ /// walking the AST looking for it in simple cases.
+ if (auto *Call = dyn_cast<CallExpr>(E.get()->IgnoreImplicit()))
+ if (auto *DeclRef =
+ dyn_cast<DeclRefExpr>(Call->getCallee()->IgnoreImplicit()))
+ ExprEvalContexts.back().ReferenceToConsteval.erase(DeclRef);
+
+ E = MaybeCreateExprWithCleanups(E);
+
+ ConstantExpr *Res = ConstantExpr::Create(
+ getASTContext(), E.get(),
+ ConstantExpr::getStorageKind(Decl->getReturnType().getTypePtr(),
+ getASTContext()),
+ /*IsImmediateInvocation*/ true);
+ ExprEvalContexts.back().ImmediateInvocationCandidates.emplace_back(Res, 0);
+ return Res;
+}
+
+static void EvaluateAndDiagnoseImmediateInvocation(
+ Sema &SemaRef, Sema::ImmediateInvocationCandidate Candidate) {
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ Expr::EvalResult Eval;
+ Eval.Diag = &Notes;
+ ConstantExpr *CE = Candidate.getPointer();
+ bool Result = CE->EvaluateAsConstantExpr(Eval, Expr::EvaluateForCodeGen,
+ SemaRef.getASTContext(), true);
+ if (!Result || !Notes.empty()) {
+ Expr *InnerExpr = CE->getSubExpr()->IgnoreImplicit();
+ if (auto *FunctionalCast = dyn_cast<CXXFunctionalCastExpr>(InnerExpr))
+ InnerExpr = FunctionalCast->getSubExpr();
+ FunctionDecl *FD = nullptr;
+ if (auto *Call = dyn_cast<CallExpr>(InnerExpr))
+ FD = cast<FunctionDecl>(Call->getCalleeDecl());
+ else if (auto *Call = dyn_cast<CXXConstructExpr>(InnerExpr))
+ FD = Call->getConstructor();
+ else
+ llvm_unreachable("unhandled decl kind");
+ assert(FD->isConsteval());
+ SemaRef.Diag(CE->getBeginLoc(), diag::err_invalid_consteval_call) << FD;
+ for (auto &Note : Notes)
+ SemaRef.Diag(Note.first, Note.second);
+ return;
+ }
+ CE->MoveIntoResult(Eval.Val, SemaRef.getASTContext());
+}
+
+static void RemoveNestedImmediateInvocation(
+ Sema &SemaRef, Sema::ExpressionEvaluationContextRecord &Rec,
+ SmallVector<Sema::ImmediateInvocationCandidate, 4>::reverse_iterator It) {
+ struct ComplexRemove : TreeTransform<ComplexRemove> {
+ using Base = TreeTransform<ComplexRemove>;
+ llvm::SmallPtrSetImpl<DeclRefExpr *> &DRSet;
+ SmallVector<Sema::ImmediateInvocationCandidate, 4> &IISet;
+ SmallVector<Sema::ImmediateInvocationCandidate, 4>::reverse_iterator
+ CurrentII;
+ ComplexRemove(Sema &SemaRef, llvm::SmallPtrSetImpl<DeclRefExpr *> &DR,
+ SmallVector<Sema::ImmediateInvocationCandidate, 4> &II,
+ SmallVector<Sema::ImmediateInvocationCandidate,
+ 4>::reverse_iterator Current)
+ : Base(SemaRef), DRSet(DR), IISet(II), CurrentII(Current) {}
+ void RemoveImmediateInvocation(ConstantExpr* E) {
+ auto It = std::find_if(CurrentII, IISet.rend(),
+ [E](Sema::ImmediateInvocationCandidate Elem) {
+ return Elem.getPointer() == E;
+ });
+ assert(It != IISet.rend() &&
+ "ConstantExpr marked IsImmediateInvocation should "
+ "be present");
+ It->setInt(1); // Mark as deleted
+ }
+ ExprResult TransformConstantExpr(ConstantExpr *E) {
+ if (!E->isImmediateInvocation())
+ return Base::TransformConstantExpr(E);
+ RemoveImmediateInvocation(E);
+ return Base::TransformExpr(E->getSubExpr());
+ }
+ /// Base::TransfromCXXOperatorCallExpr doesn't traverse the callee so
+ /// we need to remove its DeclRefExpr from the DRSet.
+ ExprResult TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
+ DRSet.erase(cast<DeclRefExpr>(E->getCallee()->IgnoreImplicit()));
+ return Base::TransformCXXOperatorCallExpr(E);
+ }
+ /// Base::TransformInitializer skip ConstantExpr so we need to visit them
+ /// here.
+ ExprResult TransformInitializer(Expr *Init, bool NotCopyInit) {
+ if (!Init)
+ return Init;
+ /// ConstantExpr are the first layer of implicit node to be removed so if
+ /// Init isn't a ConstantExpr, no ConstantExpr will be skipped.
+ if (auto *CE = dyn_cast<ConstantExpr>(Init))
+ if (CE->isImmediateInvocation())
+ RemoveImmediateInvocation(CE);
+ return Base::TransformInitializer(Init, NotCopyInit);
+ }
+ ExprResult TransformDeclRefExpr(DeclRefExpr *E) {
+ DRSet.erase(E);
+ return E;
+ }
+ bool AlwaysRebuild() { return false; }
+ bool ReplacingOriginal() { return true; }
+ bool AllowSkippingCXXConstructExpr() {
+ bool Res = AllowSkippingFirstCXXConstructExpr;
+ AllowSkippingFirstCXXConstructExpr = true;
+ return Res;
+ }
+ bool AllowSkippingFirstCXXConstructExpr = true;
+ } Transformer(SemaRef, Rec.ReferenceToConsteval,
+ Rec.ImmediateInvocationCandidates, It);
+
+ /// CXXConstructExpr with a single argument are getting skipped by
+ /// TreeTransform in some situtation because they could be implicit. This
+ /// can only occur for the top-level CXXConstructExpr because it is used
+ /// nowhere in the expression being transformed therefore will not be rebuilt.
+ /// Setting AllowSkippingFirstCXXConstructExpr to false will prevent from
+ /// skipping the first CXXConstructExpr.
+ if (isa<CXXConstructExpr>(It->getPointer()->IgnoreImplicit()))
+ Transformer.AllowSkippingFirstCXXConstructExpr = false;
+
+ ExprResult Res = Transformer.TransformExpr(It->getPointer()->getSubExpr());
+ assert(Res.isUsable());
+ Res = SemaRef.MaybeCreateExprWithCleanups(Res);
+ It->getPointer()->setSubExpr(Res.get());
+}
+
+static void
+HandleImmediateInvocations(Sema &SemaRef,
+ Sema::ExpressionEvaluationContextRecord &Rec) {
+ if ((Rec.ImmediateInvocationCandidates.size() == 0 &&
+ Rec.ReferenceToConsteval.size() == 0) ||
+ SemaRef.RebuildingImmediateInvocation)
+ return;
+
+ /// When we have more then 1 ImmediateInvocationCandidates we need to check
+ /// for nested ImmediateInvocationCandidates. when we have only 1 we only
+ /// need to remove ReferenceToConsteval in the immediate invocation.
+ if (Rec.ImmediateInvocationCandidates.size() > 1) {
+
+ /// Prevent sema calls during the tree transform from adding pointers that
+ /// are already in the sets.
+ llvm::SaveAndRestore<bool> DisableIITracking(
+ SemaRef.RebuildingImmediateInvocation, true);
+
+ /// Prevent diagnostic during tree transfrom as they are duplicates
+ Sema::TentativeAnalysisScope DisableDiag(SemaRef);
+
+ for (auto It = Rec.ImmediateInvocationCandidates.rbegin();
+ It != Rec.ImmediateInvocationCandidates.rend(); It++)
+ if (!It->getInt())
+ RemoveNestedImmediateInvocation(SemaRef, Rec, It);
+ } else if (Rec.ImmediateInvocationCandidates.size() == 1 &&
+ Rec.ReferenceToConsteval.size()) {
+ struct SimpleRemove : RecursiveASTVisitor<SimpleRemove> {
+ llvm::SmallPtrSetImpl<DeclRefExpr *> &DRSet;
+ SimpleRemove(llvm::SmallPtrSetImpl<DeclRefExpr *> &S) : DRSet(S) {}
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ DRSet.erase(E);
+ return DRSet.size();
+ }
+ } Visitor(Rec.ReferenceToConsteval);
+ Visitor.TraverseStmt(
+ Rec.ImmediateInvocationCandidates.front().getPointer()->getSubExpr());
+ }
+ for (auto CE : Rec.ImmediateInvocationCandidates)
+ if (!CE.getInt())
+ EvaluateAndDiagnoseImmediateInvocation(SemaRef, CE);
+ for (auto DR : Rec.ReferenceToConsteval) {
+ auto *FD = cast<FunctionDecl>(DR->getDecl());
+ SemaRef.Diag(DR->getBeginLoc(), diag::err_invalid_consteval_take_address)
+ << FD;
+ SemaRef.Diag(FD->getLocation(), diag::note_declared_at);
+ }
+}
+
void Sema::PopExpressionEvaluationContext() {
ExpressionEvaluationContextRecord& Rec = ExprEvalContexts.back();
unsigned NumTypos = Rec.NumTypos;
@@ -15314,6 +16330,7 @@ void Sema::PopExpressionEvaluationContext() {
}
WarnOnPendingNoDerefs(Rec);
+ HandleImmediateInvocations(*this, Rec);
// Warn on any volatile-qualified simple-assignments that are not discarded-
// value expressions nor unevaluated operands (those cases get removed from
@@ -15599,6 +16616,9 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
if (getLangOpts().CUDA)
CheckCUDACall(Loc, Func);
+ if (getLangOpts().SYCLIsDevice)
+ checkSYCLDeviceFunction(Loc, Func);
+
// If we need a definition, try to create one.
if (NeedDefinition && !Func->getBody()) {
runWithSufficientStackSpace(Loc, [&] {
@@ -15737,15 +16757,21 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
if (funcHasParameterSizeMangling(*this, Func))
CheckCompleteParameterTypesForMangler(*this, Func, Loc);
- Func->markUsed(Context);
- }
+ // In the MS C++ ABI, the compiler emits destructor variants where they are
+ // used. If the destructor is used here but defined elsewhere, mark the
+ // virtual base destructors referenced. If those virtual base destructors
+ // are inline, this will ensure they are defined when emitting the complete
+ // destructor variant. This checking may be redundant if the destructor is
+ // provided later in this TU.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ if (auto *Dtor = dyn_cast<CXXDestructorDecl>(Func)) {
+ CXXRecordDecl *Parent = Dtor->getParent();
+ if (Parent->getNumVBases() > 0 && !Dtor->getBody())
+ CheckCompleteDestructorVariant(Loc, Dtor);
+ }
+ }
- if (LangOpts.OpenMP) {
- markOpenMPDeclareVariantFuncsReferenced(Loc, Func, MightBeOdrUse);
- if (LangOpts.OpenMPIsDevice)
- checkOpenMPDeviceFunction(Loc, Func);
- else
- checkOpenMPHostFunction(Loc, Func);
+ Func->markUsed(Context);
}
}
@@ -16037,6 +17063,10 @@ static bool captureInCapturedRegion(CapturedRegionScopeInfo *RSI,
if (HasConst)
DeclRefType.addConst();
}
+ // Do not capture firstprivates in tasks.
+ if (S.isOpenMPPrivateDecl(Var, RSI->OpenMPLevel, RSI->OpenMPCaptureLevel) !=
+ OMPC_unknown)
+ return true;
ByRef = S.isOpenMPCapturedByRef(Var, RSI->OpenMPLevel,
RSI->OpenMPCaptureLevel);
}
@@ -16121,9 +17151,10 @@ static bool captureInLambda(LambdaScopeInfo *LSI,
// Make sure that by-copy captures are of a complete and non-abstract type.
if (!Invalid && BuildAndDiagnose) {
if (!CaptureType->isDependentType() &&
- S.RequireCompleteType(Loc, CaptureType,
- diag::err_capture_of_incomplete_type,
- Var->getDeclName()))
+ S.RequireCompleteSizedType(
+ Loc, CaptureType,
+ diag::err_capture_of_incomplete_or_sizeless_type,
+ Var->getDeclName()))
Invalid = true;
else if (S.RequireNonAbstractType(Loc, CaptureType,
diag::err_capture_of_abstract_type))
@@ -16265,12 +17296,14 @@ bool Sema::tryCaptureVariable(
// just break here. Similarly, global variables that are captured in a
// target region should not be captured outside the scope of the region.
if (RSI->CapRegionKind == CR_OpenMP) {
- bool IsOpenMPPrivateDecl = isOpenMPPrivateDecl(Var, RSI->OpenMPLevel);
+ OpenMPClauseKind IsOpenMPPrivateDecl = isOpenMPPrivateDecl(
+ Var, RSI->OpenMPLevel, RSI->OpenMPCaptureLevel);
// If the variable is private (i.e. not captured) and has variably
// modified type, we still need to capture the type for correct
// codegen in all regions, associated with the construct. Currently,
// it is captured in the innermost captured region only.
- if (IsOpenMPPrivateDecl && Var->getType()->isVariablyModifiedType()) {
+ if (IsOpenMPPrivateDecl != OMPC_unknown &&
+ Var->getType()->isVariablyModifiedType()) {
QualType QTy = Var->getType();
if (ParmVarDecl *PVD = dyn_cast_or_null<ParmVarDecl>(Var))
QTy = PVD->getOriginalType();
@@ -16284,15 +17317,23 @@ bool Sema::tryCaptureVariable(
captureVariablyModifiedType(Context, QTy, OuterRSI);
}
}
- bool IsTargetCap = !IsOpenMPPrivateDecl &&
- isOpenMPTargetCapturedDecl(Var, RSI->OpenMPLevel);
+ bool IsTargetCap =
+ IsOpenMPPrivateDecl != OMPC_private &&
+ isOpenMPTargetCapturedDecl(Var, RSI->OpenMPLevel,
+ RSI->OpenMPCaptureLevel);
+ // Do not capture global if it is not privatized in outer regions.
+ bool IsGlobalCap =
+ IsGlobal && isOpenMPGlobalCapturedDecl(Var, RSI->OpenMPLevel,
+ RSI->OpenMPCaptureLevel);
+
// When we detect target captures we are looking from inside the
// target region, therefore we need to propagate the capture from the
// enclosing region. Therefore, the capture is not initially nested.
if (IsTargetCap)
adjustOpenMPTargetScopeIndex(FunctionScopesIndex, RSI->OpenMPLevel);
- if (IsTargetCap || IsOpenMPPrivateDecl) {
+ if (IsTargetCap || IsOpenMPPrivateDecl == OMPC_private ||
+ (IsGlobal && !IsGlobalCap)) {
Nested = !IsTargetCap;
DeclRefType = DeclRefType.getUnqualifiedType();
CaptureType = Context.getLValueReferenceType(DeclRefType);
@@ -16508,7 +17549,7 @@ static ExprResult rebuildPotentialResultsAsNonOdrUsed(Sema &S, Expr *E,
// Mark that this expression does not constitute an odr-use.
auto MarkNotOdrUsed = [&] {
- S.MaybeODRUseExprs.erase(E);
+ S.MaybeODRUseExprs.remove(E);
if (LambdaScopeInfo *LSI = S.getCurLambda())
LSI->markVariableExprAsNonODRUsed(E);
};
@@ -17040,6 +18081,11 @@ void Sema::MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base) {
if (Method->isVirtual() &&
!Method->getDevirtualizedMethod(Base, getLangOpts().AppleKext))
OdrUse = false;
+
+ if (auto *FD = dyn_cast<FunctionDecl>(E->getDecl()))
+ if (!isConstantEvaluated() && FD->isConsteval() &&
+ !RebuildingImmediateInvocation)
+ ExprEvalContexts.back().ReferenceToConsteval.insert(E);
MarkExprReferenced(*this, E->getLocation(), E->getDecl(), E, OdrUse);
}
@@ -17131,71 +18177,36 @@ void Sema::MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T) {
}
namespace {
- /// Helper class that marks all of the declarations referenced by
- /// potentially-evaluated subexpressions as "referenced".
- class EvaluatedExprMarker : public EvaluatedExprVisitor<EvaluatedExprMarker> {
- Sema &S;
- bool SkipLocalVariables;
-
- public:
- typedef EvaluatedExprVisitor<EvaluatedExprMarker> Inherited;
-
- EvaluatedExprMarker(Sema &S, bool SkipLocalVariables)
- : Inherited(S.Context), S(S), SkipLocalVariables(SkipLocalVariables) { }
-
- void VisitDeclRefExpr(DeclRefExpr *E) {
- // If we were asked not to visit local variables, don't.
- if (SkipLocalVariables) {
- if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
- if (VD->hasLocalStorage())
- return;
- }
-
- S.MarkDeclRefReferenced(E);
- }
-
- void VisitMemberExpr(MemberExpr *E) {
- S.MarkMemberReferenced(E);
- Inherited::VisitMemberExpr(E);
- }
-
- void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
- S.MarkFunctionReferenced(
- E->getBeginLoc(),
- const_cast<CXXDestructorDecl *>(E->getTemporary()->getDestructor()));
- Visit(E->getSubExpr());
- }
-
- void VisitCXXNewExpr(CXXNewExpr *E) {
- if (E->getOperatorNew())
- S.MarkFunctionReferenced(E->getBeginLoc(), E->getOperatorNew());
- if (E->getOperatorDelete())
- S.MarkFunctionReferenced(E->getBeginLoc(), E->getOperatorDelete());
- Inherited::VisitCXXNewExpr(E);
- }
+/// Helper class that marks all of the declarations referenced by
+/// potentially-evaluated subexpressions as "referenced".
+class EvaluatedExprMarker : public UsedDeclVisitor<EvaluatedExprMarker> {
+public:
+ typedef UsedDeclVisitor<EvaluatedExprMarker> Inherited;
+ bool SkipLocalVariables;
- void VisitCXXDeleteExpr(CXXDeleteExpr *E) {
- if (E->getOperatorDelete())
- S.MarkFunctionReferenced(E->getBeginLoc(), E->getOperatorDelete());
- QualType Destroyed = S.Context.getBaseElementType(E->getDestroyedType());
- if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
- S.MarkFunctionReferenced(E->getBeginLoc(), S.LookupDestructor(Record));
- }
+ EvaluatedExprMarker(Sema &S, bool SkipLocalVariables)
+ : Inherited(S), SkipLocalVariables(SkipLocalVariables) {}
- Inherited::VisitCXXDeleteExpr(E);
- }
+ void visitUsedDecl(SourceLocation Loc, Decl *D) {
+ S.MarkFunctionReferenced(Loc, cast<FunctionDecl>(D));
+ }
- void VisitCXXConstructExpr(CXXConstructExpr *E) {
- S.MarkFunctionReferenced(E->getBeginLoc(), E->getConstructor());
- Inherited::VisitCXXConstructExpr(E);
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ // If we were asked not to visit local variables, don't.
+ if (SkipLocalVariables) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
+ if (VD->hasLocalStorage())
+ return;
}
+ S.MarkDeclRefReferenced(E);
+ }
- void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
- Visit(E->getExpr());
- }
- };
-}
+ void VisitMemberExpr(MemberExpr *E) {
+ S.MarkMemberReferenced(E);
+ Visit(E->getBase());
+ }
+};
+} // namespace
/// Mark any declarations that appear within this expression or any
/// potentially-evaluated subexpressions as "referenced".
@@ -18075,11 +19086,25 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
return ExprError();
}
+ case BuiltinType::IncompleteMatrixIdx:
+ Diag(cast<MatrixSubscriptExpr>(E->IgnoreParens())
+ ->getRowIdx()
+ ->getBeginLoc(),
+ diag::err_matrix_incomplete_index);
+ return ExprError();
+
// Expressions of unknown type.
case BuiltinType::OMPArraySection:
Diag(E->getBeginLoc(), diag::err_omp_array_section_use);
return ExprError();
+ // Expressions of unknown type.
+ case BuiltinType::OMPArrayShaping:
+ return ExprError(Diag(E->getBeginLoc(), diag::err_omp_array_shaping_use));
+
+ case BuiltinType::OMPIterator:
+ return ExprError(Diag(E->getBeginLoc(), diag::err_omp_iterator_use));
+
// Everything else should be impossible.
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
@@ -18153,7 +19178,16 @@ ExprResult Sema::ActOnObjCAvailabilityCheckExpr(
ObjCAvailabilityCheckExpr(Version, AtLoc, RParen, Context.BoolTy);
}
-bool Sema::IsDependentFunctionNameExpr(Expr *E) {
- assert(E->isTypeDependent());
- return isa<UnresolvedLookupExpr>(E);
+ExprResult Sema::CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
+ ArrayRef<Expr *> SubExprs, QualType T) {
+ if (!Context.getLangOpts().RecoveryAST)
+ return ExprError();
+
+ if (isSFINAEContext())
+ return ExprError();
+
+ if (T.isNull() || !Context.getLangOpts().RecoveryASTType)
+ // We don't know the concrete type, fallback to dependent type.
+ T = Context.DependentTy;
+ return RecoveryExpr::Create(Context, T, Begin, End, SubExprs);
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
index 9e2957fc8545..d885920b6c14 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
@@ -156,196 +156,203 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
// }
//
// See also PR6358 and PR6359.
- // For this reason, we're currently only doing the C++03 version of this
- // code; the C++0x version has to wait until we get a proper spec.
- QualType SearchType;
- DeclContext *LookupCtx = nullptr;
- bool isDependent = false;
- bool LookInScope = false;
+ //
+ // For now, we accept all the cases in which the name given could plausibly
+ // be interpreted as a correct destructor name, issuing off-by-default
+ // extension diagnostics on the cases that don't strictly conform to the
+ // C++20 rules. This basically means we always consider looking in the
+ // nested-name-specifier prefix, the complete nested-name-specifier, and
+ // the scope, and accept if we find the expected type in any of the three
+ // places.
if (SS.isInvalid())
return nullptr;
+ // Whether we've failed with a diagnostic already.
+ bool Failed = false;
+
+ llvm::SmallVector<NamedDecl*, 8> FoundDecls;
+ llvm::SmallSet<CanonicalDeclPtr<Decl>, 8> FoundDeclSet;
+
// If we have an object type, it's because we are in a
// pseudo-destructor-expression or a member access expression, and
// we know what type we're looking for.
- if (ObjectTypePtr)
- SearchType = GetTypeFromParser(ObjectTypePtr);
+ QualType SearchType =
+ ObjectTypePtr ? GetTypeFromParser(ObjectTypePtr) : QualType();
- if (SS.isSet()) {
- NestedNameSpecifier *NNS = SS.getScopeRep();
-
- bool AlreadySearched = false;
- bool LookAtPrefix = true;
- // C++11 [basic.lookup.qual]p6:
- // If a pseudo-destructor-name (5.2.4) contains a nested-name-specifier,
- // the type-names are looked up as types in the scope designated by the
- // nested-name-specifier. Similarly, in a qualified-id of the form:
- //
- // nested-name-specifier[opt] class-name :: ~ class-name
- //
- // the second class-name is looked up in the same scope as the first.
- //
- // Here, we determine whether the code below is permitted to look at the
- // prefix of the nested-name-specifier.
- DeclContext *DC = computeDeclContext(SS, EnteringContext);
- if (DC && DC->isFileContext()) {
- AlreadySearched = true;
- LookupCtx = DC;
- isDependent = false;
- } else if (DC && isa<CXXRecordDecl>(DC)) {
- LookAtPrefix = false;
- LookInScope = true;
- }
-
- // The second case from the C++03 rules quoted further above.
- NestedNameSpecifier *Prefix = nullptr;
- if (AlreadySearched) {
- // Nothing left to do.
- } else if (LookAtPrefix && (Prefix = NNS->getPrefix())) {
- CXXScopeSpec PrefixSS;
- PrefixSS.Adopt(NestedNameSpecifierLoc(Prefix, SS.location_data()));
- LookupCtx = computeDeclContext(PrefixSS, EnteringContext);
- isDependent = isDependentScopeSpecifier(PrefixSS);
- } else if (ObjectTypePtr) {
- LookupCtx = computeDeclContext(SearchType);
- isDependent = SearchType->isDependentType();
- } else {
- LookupCtx = computeDeclContext(SS, EnteringContext);
- isDependent = LookupCtx && LookupCtx->isDependentContext();
- }
- } else if (ObjectTypePtr) {
- // C++ [basic.lookup.classref]p3:
- // If the unqualified-id is ~type-name, the type-name is looked up
- // in the context of the entire postfix-expression. If the type T
- // of the object expression is of a class type C, the type-name is
- // also looked up in the scope of class C. At least one of the
- // lookups shall find a name that refers to (possibly
- // cv-qualified) T.
- LookupCtx = computeDeclContext(SearchType);
- isDependent = SearchType->isDependentType();
- assert((isDependent || !SearchType->isIncompleteType()) &&
- "Caller should have completed object type");
-
- LookInScope = true;
- } else {
- // Perform lookup into the current scope (only).
- LookInScope = true;
- }
-
- TypeDecl *NonMatchingTypeDecl = nullptr;
- LookupResult Found(*this, &II, NameLoc, LookupOrdinaryName);
- for (unsigned Step = 0; Step != 2; ++Step) {
- // Look for the name first in the computed lookup context (if we
- // have one) and, if that fails to find a match, in the scope (if
- // we're allowed to look there).
- Found.clear();
- if (Step == 0 && LookupCtx) {
- if (RequireCompleteDeclContext(SS, LookupCtx))
- return nullptr;
- LookupQualifiedName(Found, LookupCtx);
- } else if (Step == 1 && LookInScope && S) {
- LookupName(Found, S);
- } else {
- continue;
- }
+ auto CheckLookupResult = [&](LookupResult &Found) -> ParsedType {
+ auto IsAcceptableResult = [&](NamedDecl *D) -> bool {
+ auto *Type = dyn_cast<TypeDecl>(D->getUnderlyingDecl());
+ if (!Type)
+ return false;
- // FIXME: Should we be suppressing ambiguities here?
- if (Found.isAmbiguous())
- return nullptr;
+ if (SearchType.isNull() || SearchType->isDependentType())
+ return true;
- if (TypeDecl *Type = Found.getAsSingle<TypeDecl>()) {
QualType T = Context.getTypeDeclType(Type);
- MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
+ return Context.hasSameUnqualifiedType(T, SearchType);
+ };
- if (SearchType.isNull() || SearchType->isDependentType() ||
- Context.hasSameUnqualifiedType(T, SearchType)) {
- // We found our type!
+ unsigned NumAcceptableResults = 0;
+ for (NamedDecl *D : Found) {
+ if (IsAcceptableResult(D))
+ ++NumAcceptableResults;
+
+ // Don't list a class twice in the lookup failure diagnostic if it's
+ // found by both its injected-class-name and by the name in the enclosing
+ // scope.
+ if (auto *RD = dyn_cast<CXXRecordDecl>(D))
+ if (RD->isInjectedClassName())
+ D = cast<NamedDecl>(RD->getParent());
+
+ if (FoundDeclSet.insert(D).second)
+ FoundDecls.push_back(D);
+ }
+
+ // As an extension, attempt to "fix" an ambiguity by erasing all non-type
+ // results, and all non-matching results if we have a search type. It's not
+ // clear what the right behavior is if destructor lookup hits an ambiguity,
+ // but other compilers do generally accept at least some kinds of
+ // ambiguity.
+ if (Found.isAmbiguous() && NumAcceptableResults == 1) {
+ Diag(NameLoc, diag::ext_dtor_name_ambiguous);
+ LookupResult::Filter F = Found.makeFilter();
+ while (F.hasNext()) {
+ NamedDecl *D = F.next();
+ if (auto *TD = dyn_cast<TypeDecl>(D->getUnderlyingDecl()))
+ Diag(D->getLocation(), diag::note_destructor_type_here)
+ << Context.getTypeDeclType(TD);
+ else
+ Diag(D->getLocation(), diag::note_destructor_nontype_here);
+ if (!IsAcceptableResult(D))
+ F.erase();
+ }
+ F.done();
+ }
+
+ if (Found.isAmbiguous())
+ Failed = true;
+
+ if (TypeDecl *Type = Found.getAsSingle<TypeDecl>()) {
+ if (IsAcceptableResult(Type)) {
+ QualType T = Context.getTypeDeclType(Type);
+ MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
return CreateParsedType(T,
Context.getTrivialTypeSourceInfo(T, NameLoc));
}
+ }
- if (!SearchType.isNull())
- NonMatchingTypeDecl = Type;
- }
-
- // If the name that we found is a class template name, and it is
- // the same name as the template name in the last part of the
- // nested-name-specifier (if present) or the object type, then
- // this is the destructor for that class.
- // FIXME: This is a workaround until we get real drafting for core
- // issue 399, for which there isn't even an obvious direction.
- if (ClassTemplateDecl *Template = Found.getAsSingle<ClassTemplateDecl>()) {
- QualType MemberOfType;
- if (SS.isSet()) {
- if (DeclContext *Ctx = computeDeclContext(SS, EnteringContext)) {
- // Figure out the type of the context, if it has one.
- if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Ctx))
- MemberOfType = Context.getTypeDeclType(Record);
- }
- }
- if (MemberOfType.isNull())
- MemberOfType = SearchType;
+ return nullptr;
+ };
- if (MemberOfType.isNull())
- continue;
+ bool IsDependent = false;
- // We're referring into a class template specialization. If the
- // class template we found is the same as the template being
- // specialized, we found what we are looking for.
- if (const RecordType *Record = MemberOfType->getAs<RecordType>()) {
- if (ClassTemplateSpecializationDecl *Spec
- = dyn_cast<ClassTemplateSpecializationDecl>(Record->getDecl())) {
- if (Spec->getSpecializedTemplate()->getCanonicalDecl() ==
- Template->getCanonicalDecl())
- return CreateParsedType(
- MemberOfType,
- Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc));
- }
+ auto LookupInObjectType = [&]() -> ParsedType {
+ if (Failed || SearchType.isNull())
+ return nullptr;
- continue;
- }
+ IsDependent |= SearchType->isDependentType();
- // We're referring to an unresolved class template
- // specialization. Determine whether we class template we found
- // is the same as the template being specialized or, if we don't
- // know which template is being specialized, that it at least
- // has the same name.
- if (const TemplateSpecializationType *SpecType
- = MemberOfType->getAs<TemplateSpecializationType>()) {
- TemplateName SpecName = SpecType->getTemplateName();
-
- // The class template we found is the same template being
- // specialized.
- if (TemplateDecl *SpecTemplate = SpecName.getAsTemplateDecl()) {
- if (SpecTemplate->getCanonicalDecl() == Template->getCanonicalDecl())
- return CreateParsedType(
- MemberOfType,
- Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc));
+ LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
+ DeclContext *LookupCtx = computeDeclContext(SearchType);
+ if (!LookupCtx)
+ return nullptr;
+ LookupQualifiedName(Found, LookupCtx);
+ return CheckLookupResult(Found);
+ };
- continue;
- }
+ auto LookupInNestedNameSpec = [&](CXXScopeSpec &LookupSS) -> ParsedType {
+ if (Failed)
+ return nullptr;
- // The class template we found has the same name as the
- // (dependent) template name being specialized.
- if (DependentTemplateName *DepTemplate
- = SpecName.getAsDependentTemplateName()) {
- if (DepTemplate->isIdentifier() &&
- DepTemplate->getIdentifier() == Template->getIdentifier())
- return CreateParsedType(
- MemberOfType,
- Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc));
+ IsDependent |= isDependentScopeSpecifier(LookupSS);
+ DeclContext *LookupCtx = computeDeclContext(LookupSS, EnteringContext);
+ if (!LookupCtx)
+ return nullptr;
- continue;
- }
- }
+ LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
+ if (RequireCompleteDeclContext(LookupSS, LookupCtx)) {
+ Failed = true;
+ return nullptr;
}
+ LookupQualifiedName(Found, LookupCtx);
+ return CheckLookupResult(Found);
+ };
+
+ auto LookupInScope = [&]() -> ParsedType {
+ if (Failed || !S)
+ return nullptr;
+
+ LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
+ LookupName(Found, S);
+ return CheckLookupResult(Found);
+ };
+
+ // C++2a [basic.lookup.qual]p6:
+ // In a qualified-id of the form
+ //
+ // nested-name-specifier[opt] type-name :: ~ type-name
+ //
+ // the second type-name is looked up in the same scope as the first.
+ //
+ // We interpret this as meaning that if you do a dual-scope lookup for the
+ // first name, you also do a dual-scope lookup for the second name, per
+ // C++ [basic.lookup.classref]p4:
+ //
+ // If the id-expression in a class member access is a qualified-id of the
+ // form
+ //
+ // class-name-or-namespace-name :: ...
+ //
+ // the class-name-or-namespace-name following the . or -> is first looked
+ // up in the class of the object expression and the name, if found, is used.
+ // Otherwise, it is looked up in the context of the entire
+ // postfix-expression.
+ //
+ // This looks in the same scopes as for an unqualified destructor name:
+ //
+ // C++ [basic.lookup.classref]p3:
+ // If the unqualified-id is ~ type-name, the type-name is looked up
+ // in the context of the entire postfix-expression. If the type T
+ // of the object expression is of a class type C, the type-name is
+ // also looked up in the scope of class C. At least one of the
+ // lookups shall find a name that refers to cv T.
+ //
+ // FIXME: The intent is unclear here. Should type-name::~type-name look in
+ // the scope anyway if it finds a non-matching name declared in the class?
+ // If both lookups succeed and find a dependent result, which result should
+ // we retain? (Same question for p->~type-name().)
+
+ if (NestedNameSpecifier *Prefix =
+ SS.isSet() ? SS.getScopeRep()->getPrefix() : nullptr) {
+ // This is
+ //
+ // nested-name-specifier type-name :: ~ type-name
+ //
+ // Look for the second type-name in the nested-name-specifier.
+ CXXScopeSpec PrefixSS;
+ PrefixSS.Adopt(NestedNameSpecifierLoc(Prefix, SS.location_data()));
+ if (ParsedType T = LookupInNestedNameSpec(PrefixSS))
+ return T;
+ } else {
+ // This is one of
+ //
+ // type-name :: ~ type-name
+ // ~ type-name
+ //
+ // Look in the scope and (if any) the object type.
+ if (ParsedType T = LookupInScope())
+ return T;
+ if (ParsedType T = LookupInObjectType())
+ return T;
}
- if (isDependent) {
- // We didn't find our type, but that's okay: it's dependent
- // anyway.
+ if (Failed)
+ return nullptr;
+
+ if (IsDependent) {
+ // We didn't find our type, but that's OK: it's dependent anyway.
// FIXME: What if we have no nested-name-specifier?
QualType T = CheckTypenameType(ETK_None, SourceLocation(),
@@ -354,26 +361,98 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
return ParsedType::make(T);
}
- if (NonMatchingTypeDecl) {
- QualType T = Context.getTypeDeclType(NonMatchingTypeDecl);
- Diag(NameLoc, diag::err_destructor_expr_type_mismatch)
- << T << SearchType;
- Diag(NonMatchingTypeDecl->getLocation(), diag::note_destructor_type_here)
- << T;
- } else if (ObjectTypePtr)
- Diag(NameLoc, diag::err_ident_in_dtor_not_a_type)
- << &II;
- else {
- SemaDiagnosticBuilder DtorDiag = Diag(NameLoc,
- diag::err_destructor_class_name);
- if (S) {
- const DeclContext *Ctx = S->getEntity();
- if (const CXXRecordDecl *Class = dyn_cast_or_null<CXXRecordDecl>(Ctx))
- DtorDiag << FixItHint::CreateReplacement(SourceRange(NameLoc),
- Class->getNameAsString());
+ // The remaining cases are all non-standard extensions imitating the behavior
+ // of various other compilers.
+ unsigned NumNonExtensionDecls = FoundDecls.size();
+
+ if (SS.isSet()) {
+ // For compatibility with older broken C++ rules and existing code,
+ //
+ // nested-name-specifier :: ~ type-name
+ //
+ // also looks for type-name within the nested-name-specifier.
+ if (ParsedType T = LookupInNestedNameSpec(SS)) {
+ Diag(SS.getEndLoc(), diag::ext_dtor_named_in_wrong_scope)
+ << SS.getRange()
+ << FixItHint::CreateInsertion(SS.getEndLoc(),
+ ("::" + II.getName()).str());
+ return T;
+ }
+
+ // For compatibility with other compilers and older versions of Clang,
+ //
+ // nested-name-specifier type-name :: ~ type-name
+ //
+ // also looks for type-name in the scope. Unfortunately, we can't
+ // reasonably apply this fallback for dependent nested-name-specifiers.
+ if (SS.getScopeRep()->getPrefix()) {
+ if (ParsedType T = LookupInScope()) {
+ Diag(SS.getEndLoc(), diag::ext_qualified_dtor_named_in_lexical_scope)
+ << FixItHint::CreateRemoval(SS.getRange());
+ Diag(FoundDecls.back()->getLocation(), diag::note_destructor_type_here)
+ << GetTypeFromParser(T);
+ return T;
+ }
}
}
+ // We didn't find anything matching; tell the user what we did find (if
+ // anything).
+
+ // Don't tell the user about declarations we shouldn't have found.
+ FoundDecls.resize(NumNonExtensionDecls);
+
+ // List types before non-types.
+ std::stable_sort(FoundDecls.begin(), FoundDecls.end(),
+ [](NamedDecl *A, NamedDecl *B) {
+ return isa<TypeDecl>(A->getUnderlyingDecl()) >
+ isa<TypeDecl>(B->getUnderlyingDecl());
+ });
+
+ // Suggest a fixit to properly name the destroyed type.
+ auto MakeFixItHint = [&]{
+ const CXXRecordDecl *Destroyed = nullptr;
+ // FIXME: If we have a scope specifier, suggest its last component?
+ if (!SearchType.isNull())
+ Destroyed = SearchType->getAsCXXRecordDecl();
+ else if (S)
+ Destroyed = dyn_cast_or_null<CXXRecordDecl>(S->getEntity());
+ if (Destroyed)
+ return FixItHint::CreateReplacement(SourceRange(NameLoc),
+ Destroyed->getNameAsString());
+ return FixItHint();
+ };
+
+ if (FoundDecls.empty()) {
+ // FIXME: Attempt typo-correction?
+ Diag(NameLoc, diag::err_undeclared_destructor_name)
+ << &II << MakeFixItHint();
+ } else if (!SearchType.isNull() && FoundDecls.size() == 1) {
+ if (auto *TD = dyn_cast<TypeDecl>(FoundDecls[0]->getUnderlyingDecl())) {
+ assert(!SearchType.isNull() &&
+ "should only reject a type result if we have a search type");
+ QualType T = Context.getTypeDeclType(TD);
+ Diag(NameLoc, diag::err_destructor_expr_type_mismatch)
+ << T << SearchType << MakeFixItHint();
+ } else {
+ Diag(NameLoc, diag::err_destructor_expr_nontype)
+ << &II << MakeFixItHint();
+ }
+ } else {
+ Diag(NameLoc, SearchType.isNull() ? diag::err_destructor_name_nontype
+ : diag::err_destructor_expr_mismatch)
+ << &II << SearchType << MakeFixItHint();
+ }
+
+ for (NamedDecl *FoundD : FoundDecls) {
+ if (auto *TD = dyn_cast<TypeDecl>(FoundD->getUnderlyingDecl()))
+ Diag(FoundD->getLocation(), diag::note_destructor_type_here)
+ << Context.getTypeDeclType(TD);
+ else
+ Diag(FoundD->getLocation(), diag::note_destructor_nontype_here)
+ << FoundD;
+ }
+
return nullptr;
}
@@ -625,11 +704,11 @@ getUuidAttrOfType(Sema &SemaRef, QualType QT,
}
/// Build a Microsoft __uuidof expression with a type operand.
-ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
+ExprResult Sema::BuildCXXUuidof(QualType Type,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc) {
- StringRef UuidStr;
+ MSGuidDecl *Guid = nullptr;
if (!Operand->getType()->isDependentType()) {
llvm::SmallSetVector<const UuidAttr *, 1> UuidAttrs;
getUuidAttrOfType(*this, Operand->getType(), UuidAttrs);
@@ -637,22 +716,21 @@ ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
if (UuidAttrs.size() > 1)
return ExprError(Diag(TypeidLoc, diag::err_uuidof_with_multiple_guids));
- UuidStr = UuidAttrs.back()->getGuid();
+ Guid = UuidAttrs.back()->getGuidDecl();
}
- return new (Context) CXXUuidofExpr(TypeInfoType.withConst(), Operand, UuidStr,
- SourceRange(TypeidLoc, RParenLoc));
+ return new (Context)
+ CXXUuidofExpr(Type, Operand, Guid, SourceRange(TypeidLoc, RParenLoc));
}
/// Build a Microsoft __uuidof expression with an expression operand.
-ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
- SourceLocation TypeidLoc,
- Expr *E,
- SourceLocation RParenLoc) {
- StringRef UuidStr;
+ExprResult Sema::BuildCXXUuidof(QualType Type, SourceLocation TypeidLoc,
+ Expr *E, SourceLocation RParenLoc) {
+ MSGuidDecl *Guid = nullptr;
if (!E->getType()->isDependentType()) {
if (E->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
- UuidStr = "00000000-0000-0000-0000-000000000000";
+ // A null pointer results in {00000000-0000-0000-0000-000000000000}.
+ Guid = Context.getMSGuidDecl(MSGuidDecl::Parts{});
} else {
llvm::SmallSetVector<const UuidAttr *, 1> UuidAttrs;
getUuidAttrOfType(*this, E->getType(), UuidAttrs);
@@ -660,29 +738,20 @@ ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
if (UuidAttrs.size() > 1)
return ExprError(Diag(TypeidLoc, diag::err_uuidof_with_multiple_guids));
- UuidStr = UuidAttrs.back()->getGuid();
+ Guid = UuidAttrs.back()->getGuidDecl();
}
}
- return new (Context) CXXUuidofExpr(TypeInfoType.withConst(), E, UuidStr,
- SourceRange(TypeidLoc, RParenLoc));
+ return new (Context)
+ CXXUuidofExpr(Type, E, Guid, SourceRange(TypeidLoc, RParenLoc));
}
/// ActOnCXXUuidof - Parse __uuidof( type-id ) or __uuidof (expression);
ExprResult
Sema::ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc,
bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
- // If MSVCGuidDecl has not been cached, do the lookup.
- if (!MSVCGuidDecl) {
- IdentifierInfo *GuidII = &PP.getIdentifierTable().get("_GUID");
- LookupResult R(*this, GuidII, SourceLocation(), LookupTagName);
- LookupQualifiedName(R, Context.getTranslationUnitDecl());
- MSVCGuidDecl = R.getAsSingle<RecordDecl>();
- if (!MSVCGuidDecl)
- return ExprError(Diag(OpLoc, diag::err_need_header_before_ms_uuidof));
- }
-
- QualType GuidType = Context.getTypeDeclType(MSVCGuidDecl);
+ QualType GuidType = Context.getMSGuidType();
+ GuidType.addConst();
if (isType) {
// The operand is a type; handle it as such.
@@ -877,6 +946,11 @@ bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc,
E->getSourceRange()))
return true;
+ if (!isPointer && Ty->isSizelessType()) {
+ Diag(ThrowLoc, diag::err_throw_sizeless) << Ty << E->getSourceRange();
+ return true;
+ }
+
if (RequireNonAbstractType(ThrowLoc, ExceptionObjectTy,
diag::err_throw_abstract_type, E))
return true;
@@ -1743,8 +1817,9 @@ Sema::isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const {
return false;
if (FD.isDefined())
return false;
- bool IsAligned = false;
- if (FD.isReplaceableGlobalAllocationFunction(&IsAligned) && IsAligned)
+ Optional<unsigned> AlignmentParam;
+ if (FD.isReplaceableGlobalAllocationFunction(&AlignmentParam) &&
+ AlignmentParam.hasValue())
return true;
return false;
}
@@ -2062,8 +2137,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
SmallVector<Expr *, 8> AllPlaceArgs;
if (OperatorNew) {
- const FunctionProtoType *Proto =
- OperatorNew->getType()->getAs<FunctionProtoType>();
+ auto *Proto = OperatorNew->getType()->castAs<FunctionProtoType>();
VariadicCallType CallType = Proto->isVariadic() ? VariadicFunction
: VariadicDoesNotApply;
@@ -2071,18 +2145,80 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// arguments. Skip the first parameter because we don't have a corresponding
// argument. Skip the second parameter too if we're passing in the
// alignment; we've already filled it in.
+ unsigned NumImplicitArgs = PassAlignment ? 2 : 1;
if (GatherArgumentsForCall(PlacementLParen, OperatorNew, Proto,
- PassAlignment ? 2 : 1, PlacementArgs,
- AllPlaceArgs, CallType))
+ NumImplicitArgs, PlacementArgs, AllPlaceArgs,
+ CallType))
return ExprError();
if (!AllPlaceArgs.empty())
PlacementArgs = AllPlaceArgs;
- // FIXME: This is wrong: PlacementArgs misses out the first (size) argument.
- DiagnoseSentinelCalls(OperatorNew, PlacementLParen, PlacementArgs);
-
- // FIXME: Missing call to CheckFunctionCall or equivalent
+ // We would like to perform some checking on the given `operator new` call,
+ // but the PlacementArgs does not contain the implicit arguments,
+ // namely allocation size and maybe allocation alignment,
+ // so we need to conjure them.
+
+ QualType SizeTy = Context.getSizeType();
+ unsigned SizeTyWidth = Context.getTypeSize(SizeTy);
+
+ llvm::APInt SingleEltSize(
+ SizeTyWidth, Context.getTypeSizeInChars(AllocType).getQuantity());
+
+ // How many bytes do we want to allocate here?
+ llvm::Optional<llvm::APInt> AllocationSize;
+ if (!ArraySize.hasValue() && !AllocType->isDependentType()) {
+ // For non-array operator new, we only want to allocate one element.
+ AllocationSize = SingleEltSize;
+ } else if (KnownArraySize.hasValue() && !AllocType->isDependentType()) {
+ // For array operator new, only deal with static array size case.
+ bool Overflow;
+ AllocationSize = llvm::APInt(SizeTyWidth, *KnownArraySize)
+ .umul_ov(SingleEltSize, Overflow);
+ (void)Overflow;
+ assert(
+ !Overflow &&
+ "Expected that all the overflows would have been handled already.");
+ }
+
+ IntegerLiteral AllocationSizeLiteral(
+ Context,
+ AllocationSize.getValueOr(llvm::APInt::getNullValue(SizeTyWidth)),
+ SizeTy, SourceLocation());
+ // Otherwise, if we failed to constant-fold the allocation size, we'll
+ // just give up and pass-in something opaque, that isn't a null pointer.
+ OpaqueValueExpr OpaqueAllocationSize(SourceLocation(), SizeTy, VK_RValue,
+ OK_Ordinary, /*SourceExpr=*/nullptr);
+
+ // Let's synthesize the alignment argument in case we will need it.
+ // Since we *really* want to allocate these on stack, this is slightly ugly
+ // because there might not be a `std::align_val_t` type.
+ EnumDecl *StdAlignValT = getStdAlignValT();
+ QualType AlignValT =
+ StdAlignValT ? Context.getTypeDeclType(StdAlignValT) : SizeTy;
+ IntegerLiteral AlignmentLiteral(
+ Context,
+ llvm::APInt(Context.getTypeSize(SizeTy),
+ Alignment / Context.getCharWidth()),
+ SizeTy, SourceLocation());
+ ImplicitCastExpr DesiredAlignment(ImplicitCastExpr::OnStack, AlignValT,
+ CK_IntegralCast, &AlignmentLiteral,
+ VK_RValue);
+
+ // Adjust placement args by prepending conjured size and alignment exprs.
+ llvm::SmallVector<Expr *, 8> CallArgs;
+ CallArgs.reserve(NumImplicitArgs + PlacementArgs.size());
+ CallArgs.emplace_back(AllocationSize.hasValue()
+ ? static_cast<Expr *>(&AllocationSizeLiteral)
+ : &OpaqueAllocationSize);
+ if (PassAlignment)
+ CallArgs.emplace_back(&DesiredAlignment);
+ CallArgs.insert(CallArgs.end(), PlacementArgs.begin(), PlacementArgs.end());
+
+ DiagnoseSentinelCalls(OperatorNew, PlacementLParen, CallArgs);
+
+ checkCall(OperatorNew, Proto, /*ThisArg=*/nullptr, CallArgs,
+ /*IsMemberFunction=*/false, StartLoc, Range, CallType);
// Warn if the type is over-aligned and is being allocated by (unaligned)
// global operator new.
@@ -2194,7 +2330,8 @@ bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
return Diag(Loc, diag::err_bad_new_type)
<< AllocType << 1 << R;
else if (!AllocType->isDependentType() &&
- RequireCompleteType(Loc, AllocType, diag::err_new_incomplete_type,R))
+ RequireCompleteSizedType(
+ Loc, AllocType, diag::err_new_incomplete_or_sizeless_type, R))
return true;
else if (RequireNonAbstractType(Loc, AllocType,
diag::err_allocation_of_abstract_type))
@@ -2516,8 +2653,7 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// for template argument deduction and for comparison purposes.
QualType ExpectedFunctionType;
{
- const FunctionProtoType *Proto
- = OperatorNew->getType()->getAs<FunctionProtoType>();
+ auto *Proto = OperatorNew->getType()->castAs<FunctionProtoType>();
SmallVector<QualType, 4> ArgTypes;
ArgTypes.push_back(Context.VoidPtrTy);
@@ -2836,6 +2972,7 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
Alloc->setParams(ParamDecls);
if (ExtraAttr)
Alloc->addAttr(ExtraAttr);
+ AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(Alloc);
Context.getTranslationUnitDecl()->addDecl(Alloc);
IdResolver.tryAddTopLevelDecl(Alloc, Name);
};
@@ -3320,7 +3457,8 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
// this, so we treat it as a warning unless we're in a SFINAE context.
Diag(StartLoc, diag::ext_delete_void_ptr_operand)
<< Type << Ex.get()->getSourceRange();
- } else if (Pointee->isFunctionType() || Pointee->isVoidType()) {
+ } else if (Pointee->isFunctionType() || Pointee->isVoidType() ||
+ Pointee->isSizelessType()) {
return ExprError(Diag(StartLoc, diag::err_delete_operand)
<< Type << Ex.get()->getSourceRange());
} else if (!Pointee->isDependentType()) {
@@ -3866,15 +4004,17 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
ICS.DiagnoseAmbiguousConversion(*this, From->getExprLoc(),
PDiag(diag::err_typecheck_ambiguous_condition)
<< From->getSourceRange());
- return ExprError();
+ return ExprError();
case ImplicitConversionSequence::EllipsisConversion:
llvm_unreachable("Cannot perform an ellipsis conversion");
case ImplicitConversionSequence::BadConversion:
- bool Diagnosed =
- DiagnoseAssignmentResult(Incompatible, From->getExprLoc(), ToType,
- From->getType(), From, Action);
+ Sema::AssignConvertType ConvTy =
+ CheckAssignmentConstraints(From->getExprLoc(), ToType, From->getType());
+ bool Diagnosed = DiagnoseAssignmentResult(
+ ConvTy == Compatible ? Incompatible : ConvTy, From->getExprLoc(),
+ ToType, From->getType(), From, Action);
assert(Diagnosed && "failed to diagnose bad conversion"); (void)Diagnosed;
return ExprError();
}
@@ -4214,9 +4354,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// Case 2. _Complex x -> y
} else {
- const ComplexType *FromComplex = From->getType()->getAs<ComplexType>();
- assert(FromComplex);
-
+ auto *FromComplex = From->getType()->castAs<ComplexType>();
QualType ElType = FromComplex->getElementType();
bool isFloatingComplex = ElType->isRealFloatingType();
@@ -4345,6 +4483,16 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
VK_RValue, nullptr, CCK).get();
}
+ // Materialize a temporary if we're implicitly converting to a reference
+ // type. This is not required by the C++ rules but is necessary to maintain
+ // AST invariants.
+ if (ToType->isReferenceType() && From->isRValue()) {
+ ExprResult Res = TemporaryMaterializationConversion(From);
+ if (Res.isInvalid())
+ return ExprError();
+ From = Res.get();
+ }
+
// If this conversion sequence succeeded and involved implicitly converting a
// _Nullable type to a _Nonnull one, complain.
if (!isCast(CCK))
@@ -4501,8 +4649,7 @@ static bool HasNoThrowOperator(const RecordType *RT, OverloadedOperatorKind Op,
CXXMethodDecl *Operator = cast<CXXMethodDecl>(*Op);
if((Operator->*IsDesiredOp)()) {
FoundOperator = true;
- const FunctionProtoType *CPT =
- Operator->getType()->getAs<FunctionProtoType>();
+ auto *CPT = Operator->getType()->castAs<FunctionProtoType>();
CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
if (!CPT || !CPT->isNothrow())
return false;
@@ -4531,7 +4678,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
case UTT_IsArray:
return T->isArrayType();
case UTT_IsPointer:
- return T->isPointerType();
+ return T->isAnyPointerType();
case UTT_IsLvalueReference:
return T->isLValueReferenceType();
case UTT_IsRvalueReference:
@@ -4751,8 +4898,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
if (C.getLangOpts().AccessControl && Destructor->getAccess() != AS_public)
return false;
if (UTT == UTT_IsNothrowDestructible) {
- const FunctionProtoType *CPT =
- Destructor->getType()->getAs<FunctionProtoType>();
+ auto *CPT = Destructor->getType()->castAs<FunctionProtoType>();
CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
if (!CPT || !CPT->isNothrow())
return false;
@@ -4840,8 +4986,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
auto *Constructor = cast<CXXConstructorDecl>(ND->getUnderlyingDecl());
if (Constructor->isCopyConstructor(FoundTQs)) {
FoundConstructor = true;
- const FunctionProtoType *CPT
- = Constructor->getType()->getAs<FunctionProtoType>();
+ auto *CPT = Constructor->getType()->castAs<FunctionProtoType>();
CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
if (!CPT)
return false;
@@ -4879,8 +5024,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
auto *Constructor = cast<CXXConstructorDecl>(ND->getUnderlyingDecl());
if (Constructor->isDefaultConstructor()) {
FoundConstructor = true;
- const FunctionProtoType *CPT
- = Constructor->getType()->getAs<FunctionProtoType>();
+ auto *CPT = Constructor->getType()->castAs<FunctionProtoType>();
CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
if (!CPT)
return false;
@@ -4973,20 +5117,19 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
if (RD && RD->isAbstract())
return false;
- SmallVector<OpaqueValueExpr, 2> OpaqueArgExprs;
+ llvm::BumpPtrAllocator OpaqueExprAllocator;
SmallVector<Expr *, 2> ArgExprs;
ArgExprs.reserve(Args.size() - 1);
for (unsigned I = 1, N = Args.size(); I != N; ++I) {
QualType ArgTy = Args[I]->getType();
if (ArgTy->isObjectType() || ArgTy->isFunctionType())
ArgTy = S.Context.getRValueReferenceType(ArgTy);
- OpaqueArgExprs.push_back(
- OpaqueValueExpr(Args[I]->getTypeLoc().getBeginLoc(),
- ArgTy.getNonLValueExprType(S.Context),
- Expr::getValueKindForType(ArgTy)));
+ ArgExprs.push_back(
+ new (OpaqueExprAllocator.Allocate<OpaqueValueExpr>())
+ OpaqueValueExpr(Args[I]->getTypeLoc().getBeginLoc(),
+ ArgTy.getNonLValueExprType(S.Context),
+ Expr::getValueKindForType(ArgTy)));
}
- for (Expr &E : OpaqueArgExprs)
- ArgExprs.push_back(&E);
// Perform the initialization in an unevaluated context within a SFINAE
// trap at translation unit scope.
@@ -5536,7 +5679,7 @@ QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
// C++2a allows functions with ref-qualifier & if their cv-qualifier-seq
// is (exactly) 'const'.
if (Proto->isConst() && !Proto->isVolatile())
- Diag(Loc, getLangOpts().CPlusPlus2a
+ Diag(Loc, getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_pointer_to_const_ref_member_on_rvalue
: diag::ext_pointer_to_const_ref_member_on_rvalue);
else
@@ -5765,7 +5908,7 @@ QualType Sema::CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
RHS = DefaultFunctionArrayLvalueConversion(RHS.get());
QualType CondType = Cond.get()->getType();
- const auto *CondVT = CondType->getAs<VectorType>();
+ const auto *CondVT = CondType->castAs<VectorType>();
QualType CondElementTy = CondVT->getElementType();
unsigned CondElementCount = CondVT->getNumElements();
QualType LHSType = LHS.get()->getType();
@@ -5821,7 +5964,7 @@ QualType Sema::CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
return {};
}
ResultType = Context.getVectorType(
- ResultElementTy, CondType->getAs<VectorType>()->getNumElements(),
+ ResultElementTy, CondType->castAs<VectorType>()->getNumElements(),
VectorType::GenericVector);
LHS = ImpCastExprToType(LHS.get(), ResultType, CK_VectorSplat);
@@ -5830,9 +5973,9 @@ QualType Sema::CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
assert(!ResultType.isNull() && ResultType->isVectorType() &&
"Result should have been a vector type");
- QualType ResultElementTy = ResultType->getAs<VectorType>()->getElementType();
- unsigned ResultElementCount =
- ResultType->getAs<VectorType>()->getNumElements();
+ auto *ResultVectorTy = ResultType->castAs<VectorType>();
+ QualType ResultElementTy = ResultVectorTy->getElementType();
+ unsigned ResultElementCount = ResultVectorTy->getNumElements();
if (ResultElementCount != CondElementCount) {
Diag(QuestionLoc, diag::err_conditional_vector_size) << CondType
@@ -6629,8 +6772,7 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
else if (const MemberPointerType *MemPtr = T->getAs<MemberPointerType>())
T = MemPtr->getPointeeType();
- const FunctionType *FTy = T->getAs<FunctionType>();
- assert(FTy && "call to value not of function type?");
+ auto *FTy = T->castAs<FunctionType>();
ReturnsRetained = FTy->getExtInfo().getProducesResult();
// ActOnStmtExpr arranges things so that StmtExprs of retainable
@@ -6694,6 +6836,9 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
VK_RValue);
}
+ if (E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
+ Cleanup.setExprNeedsCleanups(true);
+
if (!getLangOpts().CPlusPlus)
return E;
@@ -6841,9 +6986,10 @@ ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
return ExprError();
if (RHS.get() == BO->getRHS())
return E;
- return new (Context) BinaryOperator(
- BO->getLHS(), RHS.get(), BO_Comma, BO->getType(), BO->getValueKind(),
- BO->getObjectKind(), BO->getOperatorLoc(), BO->getFPFeatures());
+ return BinaryOperator::Create(Context, BO->getLHS(), RHS.get(), BO_Comma,
+ BO->getType(), BO->getValueKind(),
+ BO->getObjectKind(), BO->getOperatorLoc(),
+ BO->getFPFeatures(getLangOpts()));
}
}
@@ -7445,13 +7591,13 @@ ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
// a difference in ARC, but outside of ARC the resulting block literal
// follows the normal lifetime rules for block literals instead of being
// autoreleased.
- DiagnosticErrorTrap Trap(Diags);
PushExpressionEvaluationContext(
ExpressionEvaluationContext::PotentiallyEvaluated);
ExprResult BlockExp = BuildBlockForLambdaConversion(
Exp.get()->getExprLoc(), Exp.get()->getExprLoc(), Method, Exp.get());
PopExpressionEvaluationContext();
+ // FIXME: This note should be produced by a CodeSynthesisContext.
if (BlockExp.isInvalid())
Diag(Exp.get()->getExprLoc(), diag::note_lambda_to_block_conv);
return BlockExp;
@@ -7510,61 +7656,6 @@ ExprResult Sema::ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation,
return BuildCXXNoexceptExpr(KeyLoc, Operand, RParen);
}
-static bool IsSpecialDiscardedValue(Expr *E) {
- // In C++11, discarded-value expressions of a certain form are special,
- // according to [expr]p10:
- // The lvalue-to-rvalue conversion (4.1) is applied only if the
- // expression is an lvalue of volatile-qualified type and it has
- // one of the following forms:
- E = E->IgnoreParens();
-
- // - id-expression (5.1.1),
- if (isa<DeclRefExpr>(E))
- return true;
-
- // - subscripting (5.2.1),
- if (isa<ArraySubscriptExpr>(E))
- return true;
-
- // - class member access (5.2.5),
- if (isa<MemberExpr>(E))
- return true;
-
- // - indirection (5.3.1),
- if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E))
- if (UO->getOpcode() == UO_Deref)
- return true;
-
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
- // - pointer-to-member operation (5.5),
- if (BO->isPtrMemOp())
- return true;
-
- // - comma expression (5.18) where the right operand is one of the above.
- if (BO->getOpcode() == BO_Comma)
- return IsSpecialDiscardedValue(BO->getRHS());
- }
-
- // - conditional expression (5.16) where both the second and the third
- // operands are one of the above, or
- if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E))
- return IsSpecialDiscardedValue(CO->getTrueExpr()) &&
- IsSpecialDiscardedValue(CO->getFalseExpr());
- // The related edge case of "*x ?: *x".
- if (BinaryConditionalOperator *BCO =
- dyn_cast<BinaryConditionalOperator>(E)) {
- if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(BCO->getTrueExpr()))
- return IsSpecialDiscardedValue(OVE->getSourceExpr()) &&
- IsSpecialDiscardedValue(BCO->getFalseExpr());
- }
-
- // Objective-C++ extensions to the rule.
- if (isa<PseudoObjectExpr>(E) || isa<ObjCIvarRefExpr>(E))
- return true;
-
- return false;
-}
-
/// Perform the conversions required for an expression used in a
/// context that ignores the result.
ExprResult Sema::IgnoredValueConversions(Expr *E) {
@@ -7589,23 +7680,20 @@ ExprResult Sema::IgnoredValueConversions(Expr *E) {
return E;
}
- if (getLangOpts().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// The C++11 standard defines the notion of a discarded-value expression;
// normally, we don't need to do anything to handle it, but if it is a
// volatile lvalue with a special form, we perform an lvalue-to-rvalue
// conversion.
- if (getLangOpts().CPlusPlus11 && E->isGLValue() &&
- E->getType().isVolatileQualified()) {
- if (IsSpecialDiscardedValue(E)) {
- ExprResult Res = DefaultLvalueConversion(E);
- if (Res.isInvalid())
- return E;
- E = Res.get();
- } else {
- // Per C++2a [expr.ass]p5, a volatile assignment is not deprecated if
- // it occurs as a discarded-value expression.
- CheckUnusedVolatileAssignment(E);
- }
+ if (getLangOpts().CPlusPlus11 && E->isReadIfDiscardedInCPlusPlus11()) {
+ ExprResult Res = DefaultLvalueConversion(E);
+ if (Res.isInvalid())
+ return E;
+ E = Res.get();
+ } else {
+ // Per C++2a [expr.ass]p5, a volatile assignment is not deprecated if
+ // it occurs as a discarded-value expression.
+ CheckUnusedVolatileAssignment(E);
}
// C++1z:
@@ -8161,6 +8249,7 @@ public:
ExprResult
Sema::CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl,
+ bool RecoverUncorrectedTypos,
llvm::function_ref<ExprResult(Expr *)> Filter) {
// If the current evaluation context indicates there are uncorrected typos
// and the current expression isn't guaranteed to not have typos, try to
@@ -8173,6 +8262,16 @@ Sema::CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl,
TyposResolved -= DelayedTypos.size();
if (Result.isInvalid() || Result.get() != E) {
ExprEvalContexts.back().NumTypos -= TyposResolved;
+ if (Result.isInvalid() && RecoverUncorrectedTypos) {
+ struct TyposReplace : TreeTransform<TyposReplace> {
+ TyposReplace(Sema &SemaRef) : TreeTransform(SemaRef) {}
+ ExprResult TransformTypoExpr(clang::TypoExpr *E) {
+ return this->SemaRef.CreateRecoveryExpr(E->getBeginLoc(),
+ E->getEndLoc(), {});
+ }
+ } TT(*this);
+ return TT.TransformExpr(E);
+ }
return Result;
}
assert(TyposResolved == 0 && "Corrected typo but got same Expr back?");
@@ -8211,7 +8310,8 @@ ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
DiagnoseUnusedExprResult(FullExpr.get());
}
- FullExpr = CorrectDelayedTyposInExpr(FullExpr.get());
+ FullExpr = CorrectDelayedTyposInExpr(FullExpr.get(), /*InitDecl=*/nullptr,
+ /*RecoverUncorrectedTypos=*/true);
if (FullExpr.isInvalid())
return ExprError();
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
index c61b13cf5980..228a1ec3ba1f 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
@@ -894,6 +894,62 @@ ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
ArrayWithObjectsMethod, SR));
}
+/// Check for duplicate keys in an ObjC dictionary literal. For instance:
+/// NSDictionary *nd = @{ @"foo" : @"bar", @"foo" : @"baz" };
+static void
+CheckObjCDictionaryLiteralDuplicateKeys(Sema &S,
+ ObjCDictionaryLiteral *Literal) {
+ if (Literal->isValueDependent() || Literal->isTypeDependent())
+ return;
+
+ // NSNumber has quite relaxed equality semantics (for instance, @YES is
+ // considered equal to @1.0). For now, ignore floating points and just do a
+ // bit-width and sign agnostic integer compare.
+ struct APSIntCompare {
+ bool operator()(const llvm::APSInt &LHS, const llvm::APSInt &RHS) const {
+ return llvm::APSInt::compareValues(LHS, RHS) < 0;
+ }
+ };
+
+ llvm::DenseMap<StringRef, SourceLocation> StringKeys;
+ std::map<llvm::APSInt, SourceLocation, APSIntCompare> IntegralKeys;
+
+ auto checkOneKey = [&](auto &Map, const auto &Key, SourceLocation Loc) {
+ auto Pair = Map.insert({Key, Loc});
+ if (!Pair.second) {
+ S.Diag(Loc, diag::warn_nsdictionary_duplicate_key);
+ S.Diag(Pair.first->second, diag::note_nsdictionary_duplicate_key_here);
+ }
+ };
+
+ for (unsigned Idx = 0, End = Literal->getNumElements(); Idx != End; ++Idx) {
+ Expr *Key = Literal->getKeyValueElement(Idx).Key->IgnoreParenImpCasts();
+
+ if (auto *StrLit = dyn_cast<ObjCStringLiteral>(Key)) {
+ StringRef Bytes = StrLit->getString()->getBytes();
+ SourceLocation Loc = StrLit->getExprLoc();
+ checkOneKey(StringKeys, Bytes, Loc);
+ }
+
+ if (auto *BE = dyn_cast<ObjCBoxedExpr>(Key)) {
+ Expr *Boxed = BE->getSubExpr();
+ SourceLocation Loc = BE->getExprLoc();
+
+ // Check for @("foo").
+ if (auto *Str = dyn_cast<StringLiteral>(Boxed->IgnoreParenImpCasts())) {
+ checkOneKey(StringKeys, Str->getBytes(), Loc);
+ continue;
+ }
+
+ Expr::EvalResult Result;
+ if (Boxed->EvaluateAsInt(Result, S.getASTContext(),
+ Expr::SE_AllowSideEffects)) {
+ checkOneKey(IntegralKeys, Result.Val.getInt(), Loc);
+ }
+ }
+ }
+}
+
ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements) {
SourceLocation Loc = SR.getBegin();
@@ -1061,12 +1117,14 @@ ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
HasPackExpansions = true;
}
- QualType Ty
- = Context.getObjCObjectPointerType(
- Context.getObjCInterfaceType(NSDictionaryDecl));
- return MaybeBindToTemporary(ObjCDictionaryLiteral::Create(
- Context, Elements, HasPackExpansions, Ty,
- DictionaryWithObjectsMethod, SR));
+ QualType Ty = Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(NSDictionaryDecl));
+
+ auto *Literal =
+ ObjCDictionaryLiteral::Create(Context, Elements, HasPackExpansions, Ty,
+ DictionaryWithObjectsMethod, SR);
+ CheckObjCDictionaryLiteralDuplicateKeys(*this, Literal);
+ return MaybeBindToTemporary(Literal);
}
ExprResult Sema::BuildObjCEncodeExpression(SourceLocation AtLoc,
@@ -1170,33 +1228,66 @@ static void DiagnoseMismatchedSelectors(Sema &S, SourceLocation AtLoc,
}
}
-static void HelperToDiagnoseDirectSelectorsExpr(Sema &S, SourceLocation AtLoc,
- Selector Sel,
- ObjCMethodList &MethList,
- bool &onlyDirect) {
+static ObjCMethodDecl *LookupDirectMethodInMethodList(Sema &S, Selector Sel,
+ ObjCMethodList &MethList,
+ bool &onlyDirect,
+ bool &anyDirect) {
+ (void)Sel;
ObjCMethodList *M = &MethList;
- for (M = M->getNext(); M; M = M->getNext()) {
+ ObjCMethodDecl *DirectMethod = nullptr;
+ for (; M; M = M->getNext()) {
ObjCMethodDecl *Method = M->getMethod();
- if (Method->getSelector() != Sel)
+ if (!Method)
continue;
- if (!Method->isDirectMethod())
+ assert(Method->getSelector() == Sel && "Method with wrong selector in method list");
+ if (Method->isDirectMethod()) {
+ anyDirect = true;
+ DirectMethod = Method;
+ } else
onlyDirect = false;
}
+
+ return DirectMethod;
}
-static void DiagnoseDirectSelectorsExpr(Sema &S, SourceLocation AtLoc,
- Selector Sel, bool &onlyDirect) {
- for (Sema::GlobalMethodPool::iterator b = S.MethodPool.begin(),
- e = S.MethodPool.end(); b != e; b++) {
- // first, instance methods
- ObjCMethodList &InstMethList = b->second.first;
- HelperToDiagnoseDirectSelectorsExpr(S, AtLoc, Sel, InstMethList,
- onlyDirect);
+// Search the global pool for (potentially) direct methods matching the given
+// selector. If a non-direct method is found, set \param onlyDirect to false. If
+// a direct method is found, set \param anyDirect to true. Returns a direct
+// method, if any.
+static ObjCMethodDecl *LookupDirectMethodInGlobalPool(Sema &S, Selector Sel,
+ bool &onlyDirect,
+ bool &anyDirect) {
+ auto Iter = S.MethodPool.find(Sel);
+ if (Iter == S.MethodPool.end())
+ return nullptr;
- // second, class methods
- ObjCMethodList &ClsMethList = b->second.second;
- HelperToDiagnoseDirectSelectorsExpr(S, AtLoc, Sel, ClsMethList, onlyDirect);
- }
+ ObjCMethodDecl *DirectInstance = LookupDirectMethodInMethodList(
+ S, Sel, Iter->second.first, onlyDirect, anyDirect);
+ ObjCMethodDecl *DirectClass = LookupDirectMethodInMethodList(
+ S, Sel, Iter->second.second, onlyDirect, anyDirect);
+
+ return DirectInstance ? DirectInstance : DirectClass;
+}
+
+static ObjCMethodDecl *findMethodInCurrentClass(Sema &S, Selector Sel) {
+ auto *CurMD = S.getCurMethodDecl();
+ if (!CurMD)
+ return nullptr;
+ ObjCInterfaceDecl *IFace = CurMD->getClassInterface();
+
+ // The language enforce that only one direct method is present in a given
+ // class, so we just need to find one method in the current class to know
+ // whether Sel is potentially direct in this context.
+ if (ObjCMethodDecl *MD = IFace->lookupMethod(Sel, /*isInstance=*/true))
+ return MD;
+ if (ObjCMethodDecl *MD = IFace->lookupPrivateMethod(Sel, /*isInstance=*/true))
+ return MD;
+ if (ObjCMethodDecl *MD = IFace->lookupMethod(Sel, /*isInstance=*/false))
+ return MD;
+ if (ObjCMethodDecl *MD = IFace->lookupPrivateMethod(Sel, /*isInstance=*/false))
+ return MD;
+
+ return nullptr;
}
ExprResult Sema::ParseObjCSelectorExpression(Selector Sel,
@@ -1222,15 +1313,38 @@ ExprResult Sema::ParseObjCSelectorExpression(Selector Sel,
} else
Diag(SelLoc, diag::warn_undeclared_selector) << Sel;
} else {
- bool onlyDirect = Method->isDirectMethod();
- DiagnoseDirectSelectorsExpr(*this, AtLoc, Sel, onlyDirect);
DiagnoseMismatchedSelectors(*this, AtLoc, Method, LParenLoc, RParenLoc,
WarnMultipleSelectors);
+
+ bool onlyDirect = true;
+ bool anyDirect = false;
+ ObjCMethodDecl *GlobalDirectMethod =
+ LookupDirectMethodInGlobalPool(*this, Sel, onlyDirect, anyDirect);
+
if (onlyDirect) {
Diag(AtLoc, diag::err_direct_selector_expression)
<< Method->getSelector();
Diag(Method->getLocation(), diag::note_direct_method_declared_at)
<< Method->getDeclName();
+ } else if (anyDirect) {
+ // If we saw any direct methods, see if we see a direct member of the
+ // current class. If so, the @selector will likely be used to refer to
+ // this direct method.
+ ObjCMethodDecl *LikelyTargetMethod = findMethodInCurrentClass(*this, Sel);
+ if (LikelyTargetMethod && LikelyTargetMethod->isDirectMethod()) {
+ Diag(AtLoc, diag::warn_potentially_direct_selector_expression) << Sel;
+ Diag(LikelyTargetMethod->getLocation(),
+ diag::note_direct_method_declared_at)
+ << LikelyTargetMethod->getDeclName();
+ } else if (!LikelyTargetMethod) {
+ // Otherwise, emit the "strict" variant of this diagnostic, unless
+ // LikelyTargetMethod is non-direct.
+ Diag(AtLoc, diag::warn_strict_potentially_direct_selector_expression)
+ << Sel;
+ Diag(GlobalDirectMethod->getLocation(),
+ diag::note_direct_method_declared_at)
+ << GlobalDirectMethod->getDeclName();
+ }
}
}
@@ -1953,7 +2067,8 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
if (const ObjCPropertyDecl *PDecl = Setter->findPropertyDecl()) {
// Do not warn if user is using property-dot syntax to make call to
// user named setter.
- if (!(PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter))
+ if (!(PDecl->getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_setter))
Diag(MemberLoc,
diag::warn_property_access_suggest)
<< MemberName << QualType(OPT, 0) << PDecl->getName()
@@ -2570,6 +2685,16 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
diag::err_illegal_message_expr_incomplete_type))
return ExprError();
+ if (Method && Method->isDirectMethod() && SuperLoc.isValid()) {
+ Diag(SuperLoc, diag::err_messaging_super_with_direct_method)
+ << FixItHint::CreateReplacement(
+ SuperLoc, getLangOpts().ObjCAutoRefCount
+ ? "self"
+ : Method->getClassInterface()->getName());
+ Diag(Method->getLocation(), diag::note_direct_method_declared_at)
+ << Method->getDeclName();
+ }
+
// Warn about explicit call of +initialize on its own class. But not on 'super'.
if (Method && Method->getMethodFamily() == OMF_initialize) {
if (!SuperLoc.isValid()) {
@@ -2774,9 +2899,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
ReceiverType->isIntegerType())) {
// Implicitly convert integers and pointers to 'id' but emit a warning.
// But not in ARC.
- Diag(Loc, diag::warn_bad_receiver_type)
- << ReceiverType
- << Receiver->getSourceRange();
+ Diag(Loc, diag::warn_bad_receiver_type) << ReceiverType << RecRange;
if (ReceiverType->isPointerType()) {
Receiver = ImpCastExprToType(Receiver, Context.getObjCIdType(),
CK_CPointerToObjCPointerCast).get();
@@ -2927,11 +3050,10 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// definition is found in a module that's not visible.
const ObjCInterfaceDecl *forwardClass = nullptr;
if (RequireCompleteType(Loc, OCIType->getPointeeType(),
- getLangOpts().ObjCAutoRefCount
- ? diag::err_arc_receiver_forward_instance
- : diag::warn_receiver_forward_instance,
- Receiver? Receiver->getSourceRange()
- : SourceRange(SuperLoc))) {
+ getLangOpts().ObjCAutoRefCount
+ ? diag::err_arc_receiver_forward_instance
+ : diag::warn_receiver_forward_instance,
+ RecRange)) {
if (getLangOpts().ObjCAutoRefCount)
return ExprError();
@@ -2993,8 +3115,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
return ExprError();
} else {
// Reject other random receiver types (e.g. structs).
- Diag(Loc, diag::err_bad_receiver_type)
- << ReceiverType << Receiver->getSourceRange();
+ Diag(Loc, diag::err_bad_receiver_type) << ReceiverType << RecRange;
return ExprError();
}
}
@@ -3012,15 +3133,35 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
<< Method->getDeclName();
}
- if (ReceiverType->isObjCClassType() && !isImplicit) {
- Diag(Receiver->getExprLoc(),
- diag::err_messaging_class_with_direct_method);
+ // Under ARC, self can't be assigned, and doing a direct call to `self`
+ // when it's a Class is hence safe. For other cases, we can't trust `self`
+ // is what we think it is, so we reject it.
+ if (ReceiverType->isObjCClassType() && !isImplicit &&
+ !(Receiver->isObjCSelfExpr() && getLangOpts().ObjCAutoRefCount)) {
+ {
+ DiagnosticBuilder Builder =
+ Diag(Receiver->getExprLoc(),
+ diag::err_messaging_class_with_direct_method);
+ if (Receiver->isObjCSelfExpr()) {
+ Builder.AddFixItHint(FixItHint::CreateReplacement(
+ RecRange, Method->getClassInterface()->getName()));
+ }
+ }
Diag(Method->getLocation(), diag::note_direct_method_declared_at)
<< Method->getDeclName();
}
if (SuperLoc.isValid()) {
- Diag(SuperLoc, diag::err_messaging_super_with_direct_method);
+ {
+ DiagnosticBuilder Builder =
+ Diag(SuperLoc, diag::err_messaging_super_with_direct_method);
+ if (ReceiverType->isObjCClassType()) {
+ Builder.AddFixItHint(FixItHint::CreateReplacement(
+ SuperLoc, Method->getClassInterface()->getName()));
+ } else {
+ Builder.AddFixItHint(FixItHint::CreateReplacement(SuperLoc, "self"));
+ }
+ }
Diag(Method->getLocation(), diag::note_direct_method_declared_at)
<< Method->getDeclName();
}
@@ -3232,7 +3373,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
if (!isImplicit && Method) {
if (const ObjCPropertyDecl *Prop = Method->findPropertyDecl()) {
bool IsWeak =
- Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak;
+ Prop->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak;
if (!IsWeak && Sel.isUnarySelector())
IsWeak = ReturnType.getObjCLifetime() & Qualifiers::OCL_Weak;
if (IsWeak && !isUnevaluatedContext() &&
@@ -4337,7 +4478,7 @@ Sema::CheckObjCConversion(SourceRange castRange, QualType castType,
// to 'NSString *', instead of falling through to report a "bridge cast"
// diagnostic.
if (castACTC == ACTC_retainable && exprACTC == ACTC_none &&
- ConversionToObjCStringLiteralCheck(castType, castExpr, Diagnose))
+ CheckConversionToObjCLiteral(castType, castExpr, Diagnose))
return ACR_error;
// Do not issue "bridge cast" diagnostic when implicit casting
@@ -4400,9 +4541,10 @@ Expr *Sema::stripARCUnbridgedCast(Expr *e) {
} else if (UnaryOperator *uo = dyn_cast<UnaryOperator>(e)) {
assert(uo->getOpcode() == UO_Extension);
Expr *sub = stripARCUnbridgedCast(uo->getSubExpr());
- return new (Context)
- UnaryOperator(sub, UO_Extension, sub->getType(), sub->getValueKind(),
- sub->getObjectKind(), uo->getOperatorLoc(), false);
+ return UnaryOperator::Create(Context, sub, UO_Extension, sub->getType(),
+ sub->getValueKind(), sub->getObjectKind(),
+ uo->getOperatorLoc(), false,
+ CurFPFeatureOverrides());
} else if (GenericSelectionExpr *gse = dyn_cast<GenericSelectionExpr>(e)) {
assert(!gse->isResultDependent());
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp b/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp
index 785637761e71..eb07de65d266 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Designator.h"
#include "clang/Sema/Initialization.h"
@@ -1092,7 +1093,7 @@ void InitListChecker::CheckImplicitInitList(const InitializedEntity &Entity,
auto *CXXRD = T->getAsCXXRecordDecl();
if (!VerifyOnly && CXXRD && CXXRD->hasUserDeclaredConstructor()) {
SemaRef.Diag(StructuredSubobjectInitList->getBeginLoc(),
- diag::warn_cxx2a_compat_aggregate_init_with_ctors)
+ diag::warn_cxx20_compat_aggregate_init_with_ctors)
<< StructuredSubobjectInitList->getSourceRange() << T;
}
}
@@ -1118,14 +1119,14 @@ static void warnBracedScalarInit(Sema &S, const InitializedEntity &Entity,
case InitializedEntity::EK_Parameter_CF_Audited:
case InitializedEntity::EK_Result:
// Extra braces here are suspicious.
- DiagID = diag::warn_braces_around_scalar_init;
+ DiagID = diag::warn_braces_around_init;
break;
case InitializedEntity::EK_Member:
// Warn on aggregate initialization but not on ctor init list or
// default member initializer.
if (Entity.getParent())
- DiagID = diag::warn_braces_around_scalar_init;
+ DiagID = diag::warn_braces_around_init;
break;
case InitializedEntity::EK_Variable:
@@ -1156,9 +1157,9 @@ static void warnBracedScalarInit(Sema &S, const InitializedEntity &Entity,
if (DiagID) {
S.Diag(Braces.getBegin(), DiagID)
- << Braces
- << FixItHint::CreateRemoval(Braces.getBegin())
- << FixItHint::CreateRemoval(Braces.getEnd());
+ << Entity.getType()->isSizelessBuiltinType() << Braces
+ << FixItHint::CreateRemoval(Braces.getBegin())
+ << FixItHint::CreateRemoval(Braces.getEnd());
}
}
@@ -1202,6 +1203,12 @@ void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity,
: diag::ext_excess_initializers_in_char_array_initializer;
SemaRef.Diag(IList->getInit(Index)->getBeginLoc(), DK)
<< IList->getInit(Index)->getSourceRange();
+ } else if (T->isSizelessBuiltinType()) {
+ unsigned DK = ExtraInitsIsError
+ ? diag::err_excess_initializers_for_sizeless_type
+ : diag::ext_excess_initializers_for_sizeless_type;
+ SemaRef.Diag(IList->getInit(Index)->getBeginLoc(), DK)
+ << T << IList->getInit(Index)->getSourceRange();
} else {
int initKind = T->isArrayType() ? 0 :
T->isVectorType() ? 1 :
@@ -1235,7 +1242,7 @@ void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity,
if (!HasEquivCtor) {
SemaRef.Diag(IList->getBeginLoc(),
- diag::warn_cxx2a_compat_aggregate_init_with_ctors)
+ diag::warn_cxx20_compat_aggregate_init_with_ctors)
<< IList->getSourceRange() << T;
}
}
@@ -1294,7 +1301,8 @@ void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
if (!VerifyOnly)
SemaRef.Diag(IList->getBeginLoc(), diag::err_init_objc_class) << DeclType;
hadError = true;
- } else if (DeclType->isOCLIntelSubgroupAVCType()) {
+ } else if (DeclType->isOCLIntelSubgroupAVCType() ||
+ DeclType->isSizelessBuiltinType()) {
// Checks for scalar type are sufficient for these types too.
CheckScalarType(Entity, IList, DeclType, Index, StructuredList,
StructuredIndex);
@@ -1507,12 +1515,20 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
InitListExpr *StructuredList,
unsigned &StructuredIndex) {
if (Index >= IList->getNumInits()) {
- if (!VerifyOnly)
- SemaRef.Diag(IList->getBeginLoc(),
- SemaRef.getLangOpts().CPlusPlus11
- ? diag::warn_cxx98_compat_empty_scalar_initializer
- : diag::err_empty_scalar_initializer)
- << IList->getSourceRange();
+ if (!VerifyOnly) {
+ if (DeclType->isSizelessBuiltinType())
+ SemaRef.Diag(IList->getBeginLoc(),
+ SemaRef.getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_empty_sizeless_initializer
+ : diag::err_empty_sizeless_initializer)
+ << DeclType << IList->getSourceRange();
+ else
+ SemaRef.Diag(IList->getBeginLoc(),
+ SemaRef.getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_empty_scalar_initializer
+ : diag::err_empty_scalar_initializer)
+ << IList->getSourceRange();
+ }
hadError = !SemaRef.getLangOpts().CPlusPlus11;
++Index;
++StructuredIndex;
@@ -1524,17 +1540,18 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
// FIXME: This is invalid, and accepting it causes overload resolution
// to pick the wrong overload in some corner cases.
if (!VerifyOnly)
- SemaRef.Diag(SubIList->getBeginLoc(),
- diag::ext_many_braces_around_scalar_init)
- << SubIList->getSourceRange();
+ SemaRef.Diag(SubIList->getBeginLoc(), diag::ext_many_braces_around_init)
+ << DeclType->isSizelessBuiltinType() << SubIList->getSourceRange();
CheckScalarType(Entity, SubIList, DeclType, Index, StructuredList,
StructuredIndex);
return;
} else if (isa<DesignatedInitExpr>(expr)) {
if (!VerifyOnly)
- SemaRef.Diag(expr->getBeginLoc(), diag::err_designator_for_scalar_init)
- << DeclType << expr->getSourceRange();
+ SemaRef.Diag(expr->getBeginLoc(),
+ diag::err_designator_for_scalar_or_sizeless_init)
+ << DeclType->isSizelessBuiltinType() << DeclType
+ << expr->getSourceRange();
hadError = true;
++Index;
++StructuredIndex;
@@ -1621,7 +1638,7 @@ void InitListChecker::CheckReferenceType(const InitializedEntity &Entity,
expr = Result.getAs<Expr>();
// FIXME: Why are we updating the syntactic init list?
- if (!VerifyOnly)
+ if (!VerifyOnly && expr)
IList->setInit(Index, expr);
if (hadError)
@@ -3477,6 +3494,7 @@ bool InitializationSequence::isAmbiguous() const {
case FK_NonConstLValueReferenceBindingToTemporary:
case FK_NonConstLValueReferenceBindingToBitfield:
case FK_NonConstLValueReferenceBindingToVectorElement:
+ case FK_NonConstLValueReferenceBindingToMatrixElement:
case FK_NonConstLValueReferenceBindingToUnrelated:
case FK_RValueReferenceBindingToLValue:
case FK_ReferenceAddrspaceMismatchTemporary:
@@ -4420,16 +4438,20 @@ static void TryListInitialization(Sema &S,
// direct-list-initialization and copy-initialization otherwise.
// We can't use InitListChecker for this, because it always performs
// copy-initialization. This only matters if we might use an 'explicit'
- // conversion operator, so we only need to handle the cases where the source
- // is of record type.
- if (InitList->getInit(0)->getType()->isRecordType()) {
+ // conversion operator, or for the special case conversion of nullptr_t to
+ // bool, so we only need to handle those cases.
+ //
+ // FIXME: Why not do this in all cases?
+ Expr *Init = InitList->getInit(0);
+ if (Init->getType()->isRecordType() ||
+ (Init->getType()->isNullPtrType() && DestType->isBooleanType())) {
InitializationKind SubKind =
Kind.getKind() == InitializationKind::IK_DirectList
? InitializationKind::CreateDirect(Kind.getLocation(),
InitList->getLBraceLoc(),
InitList->getRBraceLoc())
: Kind;
- Expr *SubInit[1] = { InitList->getInit(0) };
+ Expr *SubInit[1] = { Init };
Sequence.InitializeFrom(S, Entity, SubKind, SubInit,
/*TopLevelOfInitList*/true,
TreatUnavailableAsInvalid);
@@ -4666,10 +4688,14 @@ static void TryReferenceInitialization(Sema &S,
/// which a reference can never bind). Attempting to bind a reference to
/// such a glvalue will always create a temporary.
static bool isNonReferenceableGLValue(Expr *E) {
- return E->refersToBitField() || E->refersToVectorElement();
+ return E->refersToBitField() || E->refersToVectorElement() ||
+ E->refersToMatrixElement();
}
/// Reference initialization without resolving overloaded functions.
+///
+/// We also can get here in C if we call a builtin which is declared as
+/// a function with a parameter of reference type (such as __builtin_va_end()).
static void TryReferenceInitializationCore(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
@@ -4746,15 +4772,20 @@ static void TryReferenceInitializationCore(Sema &S,
// an rvalue. DR1287 removed the "implicitly" here.
if (RefRelationship == Sema::Ref_Incompatible && T2->isRecordType() &&
(isLValueRef || InitCategory.isRValue())) {
- ConvOvlResult = TryRefInitWithConversionFunction(
- S, Entity, Kind, Initializer, /*AllowRValues*/ isRValueRef,
- /*IsLValueRef*/ isLValueRef, Sequence);
- if (ConvOvlResult == OR_Success)
- return;
- if (ConvOvlResult != OR_No_Viable_Function)
- Sequence.SetOverloadFailure(
- InitializationSequence::FK_ReferenceInitOverloadFailed,
- ConvOvlResult);
+ if (S.getLangOpts().CPlusPlus) {
+ // Try conversion functions only for C++.
+ ConvOvlResult = TryRefInitWithConversionFunction(
+ S, Entity, Kind, Initializer, /*AllowRValues*/ isRValueRef,
+ /*IsLValueRef*/ isLValueRef, Sequence);
+ if (ConvOvlResult == OR_Success)
+ return;
+ if (ConvOvlResult != OR_No_Viable_Function)
+ Sequence.SetOverloadFailure(
+ InitializationSequence::FK_ReferenceInitOverloadFailed,
+ ConvOvlResult);
+ } else {
+ ConvOvlResult = OR_No_Viable_Function;
+ }
}
}
@@ -4787,6 +4818,9 @@ static void TryReferenceInitializationCore(Sema &S,
else if (Initializer->refersToVectorElement())
FK = InitializationSequence::
FK_NonConstLValueReferenceBindingToVectorElement;
+ else if (Initializer->refersToMatrixElement())
+ FK = InitializationSequence::
+ FK_NonConstLValueReferenceBindingToMatrixElement;
else
llvm_unreachable("unexpected kind of compatible initializer");
break;
@@ -4924,7 +4958,7 @@ static void TryReferenceInitializationCore(Sema &S,
ImplicitConversionSequence ICS
= S.TryImplicitConversion(Initializer, TempEntity.getType(),
/*SuppressUserConversions=*/false,
- /*AllowExplicit=*/false,
+ Sema::AllowedExplicit::None,
/*FIXME:InOverloadResolution=*/false,
/*CStyle=*/Kind.isCStyleOrFunctionalCast(),
/*AllowObjCWritebackConversion=*/false);
@@ -5620,7 +5654,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
if (S.CheckObjCBridgeRelatedConversions(Initializer->getBeginLoc(),
DestType, Initializer->getType(),
Initializer) ||
- S.ConversionToObjCStringLiteralCheck(DestType, Initializer))
+ S.CheckConversionToObjCLiteral(DestType, Initializer))
Args[0] = Initializer;
}
if (!isa<InitListExpr>(Initializer))
@@ -5854,6 +5888,19 @@ void InitializationSequence::InitializeFrom(Sema &S,
return;
}
+ // - Otherwise, if the initialization is direct-initialization, the source
+ // type is std::nullptr_t, and the destination type is bool, the initial
+ // value of the object being initialized is false.
+ if (!SourceType.isNull() && SourceType->isNullPtrType() &&
+ DestType->isBooleanType() &&
+ Kind.getKind() == InitializationKind::IK_Direct) {
+ AddConversionSequenceStep(
+ ImplicitConversionSequence::getNullptrToBool(SourceType, DestType,
+ Initializer->isGLValue()),
+ DestType);
+ return;
+ }
+
// - Otherwise, the initial value of the object being initialized is the
// (possibly converted) value of the initializer expression. Standard
// conversions (Clause 4) will be used, if necessary, to convert the
@@ -5863,7 +5910,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
ImplicitConversionSequence ICS
= S.TryImplicitConversion(Initializer, DestType,
/*SuppressUserConversions*/true,
- /*AllowExplicitConversions*/ false,
+ Sema::AllowedExplicit::None,
/*InOverloadResolution*/ false,
/*CStyle=*/Kind.isCStyleOrFunctionalCast(),
allowObjCWritebackConversion);
@@ -6416,12 +6463,14 @@ PerformConstructorInitialization(Sema &S,
}
S.MarkFunctionReferenced(Loc, Constructor);
- CurInit = CXXTemporaryObjectExpr::Create(
- S.Context, Constructor,
- Entity.getType().getNonLValueExprType(S.Context), TSInfo,
- ConstructorArgs, ParenOrBraceRange, HadMultipleCandidates,
- IsListInitialization, IsStdInitListInitialization,
- ConstructorInitRequiresZeroInit);
+ CurInit = S.CheckForImmediateInvocation(
+ CXXTemporaryObjectExpr::Create(
+ S.Context, Constructor,
+ Entity.getType().getNonLValueExprType(S.Context), TSInfo,
+ ConstructorArgs, ParenOrBraceRange, HadMultipleCandidates,
+ IsListInitialization, IsStdInitListInitialization,
+ ConstructorInitRequiresZeroInit),
+ Constructor);
} else {
CXXConstructExpr::ConstructionKind ConstructKind =
CXXConstructExpr::CK_Complete;
@@ -8159,9 +8208,13 @@ ExprResult InitializationSequence::Perform(Sema &S,
if (const auto *ToPtrType = Step->Type->getAs<PointerType>()) {
if (FromPtrType->getPointeeType()->hasAttr(attr::NoDeref) &&
!ToPtrType->getPointeeType()->hasAttr(attr::NoDeref)) {
- S.Diag(CurInit.get()->getExprLoc(),
- diag::warn_noderef_to_dereferenceable_pointer)
- << CurInit.get()->getSourceRange();
+ // Do not check static casts here because they are checked earlier
+ // in Sema::ActOnCXXNamedCast()
+ if (!Kind.isStaticCast()) {
+ S.Diag(CurInit.get()->getExprLoc(),
+ diag::warn_noderef_to_dereferenceable_pointer)
+ << CurInit.get()->getSourceRange();
+ }
}
}
}
@@ -8762,7 +8815,7 @@ bool InitializationSequence::Diagnose(Sema &S,
case FK_UTF8StringIntoPlainChar:
S.Diag(Kind.getLocation(),
diag::err_array_init_utf8_string_into_char)
- << S.getLangOpts().CPlusPlus2a;
+ << S.getLangOpts().CPlusPlus20;
break;
case FK_ArrayTypeMismatch:
case FK_NonConstantArrayInit:
@@ -8889,6 +8942,11 @@ bool InitializationSequence::Diagnose(Sema &S,
<< Args[0]->getSourceRange();
break;
+ case FK_NonConstLValueReferenceBindingToMatrixElement:
+ S.Diag(Kind.getLocation(), diag::err_reference_bind_to_matrix_element)
+ << DestType.isVolatileQualified() << Args[0]->getSourceRange();
+ break;
+
case FK_RValueReferenceBindingToLValue:
S.Diag(Kind.getLocation(), diag::err_lvalue_to_rvalue_ref)
<< DestType.getNonReferenceType() << OnlyArg->getType()
@@ -9234,6 +9292,10 @@ void InitializationSequence::dump(raw_ostream &OS) const {
OS << "non-const lvalue reference bound to vector element";
break;
+ case FK_NonConstLValueReferenceBindingToMatrixElement:
+ OS << "non-const lvalue reference bound to matrix element";
+ break;
+
case FK_NonConstLValueReferenceBindingToUnrelated:
OS << "non-const lvalue reference bound to unrelated type";
break;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp b/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp
index ae89b146c409..657ed13f207a 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp
@@ -800,7 +800,7 @@ QualType Sema::buildLambdaInitCaptureInitialization(
}
if (EllipsisLoc.isValid()) {
if (Init->containsUnexpandedParameterPack()) {
- Diag(EllipsisLoc, getLangOpts().CPlusPlus2a
+ Diag(EllipsisLoc, getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_init_capture_pack
: diag::ext_init_capture_pack);
DeductType = Context.getPackExpansionType(DeductType, NumExpansions);
@@ -990,8 +990,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
// Attributes on the lambda apply to the method.
ProcessDeclAttributes(CurScope, Method, ParamInfo);
- // CUDA lambdas get implicit attributes based on the scope in which they're
- // declared.
+ // CUDA lambdas get implicit host and device attributes.
if (getLangOpts().CUDA)
CUDASetLambdaAttrs(Method);
@@ -1053,8 +1052,8 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
// "&identifier", "this", or "* this". [ Note: The form [&,this] is
// redundant but accepted for compatibility with ISO C++14. --end note ]
if (Intro.Default == LCD_ByCopy && C->Kind != LCK_StarThis)
- Diag(C->Loc, !getLangOpts().CPlusPlus2a
- ? diag::ext_equals_this_lambda_capture_cxx2a
+ Diag(C->Loc, !getLangOpts().CPlusPlus20
+ ? diag::ext_equals_this_lambda_capture_cxx20
: diag::warn_cxx17_compat_equals_this_lambda_capture);
// C++11 [expr.prim.lambda]p12:
@@ -1234,7 +1233,9 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
// Enter a new evaluation context to insulate the lambda from any
// cleanups from the enclosing full-expression.
PushExpressionEvaluationContext(
- ExpressionEvaluationContext::PotentiallyEvaluated);
+ LSI->CallOperator->isConsteval()
+ ? ExpressionEvaluationContext::ConstantEvaluated
+ : ExpressionEvaluationContext::PotentiallyEvaluated);
}
void Sema::ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
@@ -1627,7 +1628,8 @@ FieldDecl *Sema::BuildCaptureField(RecordDecl *RD,
// If the variable being captured has an invalid type, mark the class as
// invalid as well.
if (!FieldType->isDependentType()) {
- if (RequireCompleteType(Loc, FieldType, diag::err_field_incomplete)) {
+ if (RequireCompleteSizedType(Loc, FieldType,
+ diag::err_field_incomplete_or_sizeless)) {
RD->setInvalidDecl();
Field->setInvalidDecl();
} else {
@@ -1745,7 +1747,7 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
// Capturing 'this' implicitly with a default of '[=]' is deprecated,
// because it results in a reference capture. Don't warn prior to
// C++2a; there's nothing that can be done about it before then.
- if (getLangOpts().CPlusPlus2a && IsImplicit &&
+ if (getLangOpts().CPlusPlus20 && IsImplicit &&
CaptureDefault == LCD_ByCopy) {
Diag(From.getLocation(), diag::warn_deprecated_this_capture);
Diag(CaptureDefaultLoc, diag::note_deprecated_this_capture)
@@ -1777,8 +1779,13 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
BuildCaptureField(Class, From);
Captures.push_back(Capture);
CaptureInits.push_back(Init.get());
+
+ if (LangOpts.CUDA)
+ CUDACheckLambdaCapture(CallOperator, From);
}
+ Class->setCaptures(Captures);
+
// C++11 [expr.prim.lambda]p6:
// The closure type for a lambda-expression with no lambda-capture
// has a public non-virtual non-explicit const conversion function
@@ -1808,7 +1815,6 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
LambdaExpr *Lambda = LambdaExpr::Create(Context, Class, IntroducerRange,
CaptureDefault, CaptureDefaultLoc,
- Captures,
ExplicitParams, ExplicitResultType,
CaptureInits, EndLoc,
ContainsUnexpandedParameterPack);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
index 8d96404a5c27..5757eaf3fac0 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
@@ -215,6 +215,7 @@ static inline unsigned getIDNS(Sema::LookupNameKind NameKind,
case Sema::LookupOrdinaryName:
case Sema::LookupRedeclarationWithLinkage:
case Sema::LookupLocalFriendName:
+ case Sema::LookupDestructorName:
IDNS = Decl::IDNS_Ordinary;
if (CPlusPlus) {
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Member | Decl::IDNS_Namespace;
@@ -378,11 +379,14 @@ static bool isPreferredLookupResult(Sema &S, Sema::LookupNameKind Kind,
// type), per a generous reading of C++ [dcl.typedef]p3 and p4. The typedef
// might carry additional semantic information, such as an alignment override.
// However, per C++ [dcl.typedef]p5, when looking up a tag name, prefer a tag
- // declaration over a typedef.
+ // declaration over a typedef. Also prefer a tag over a typedef for
+ // destructor name lookup because in some contexts we only accept a
+ // class-name in a destructor declaration.
if (DUnderlying->getCanonicalDecl() != EUnderlying->getCanonicalDecl()) {
assert(isa<TypeDecl>(DUnderlying) && isa<TypeDecl>(EUnderlying));
bool HaveTag = isa<TagDecl>(EUnderlying);
- bool WantTag = Kind == Sema::LookupTagName;
+ bool WantTag =
+ Kind == Sema::LookupTagName || Kind == Sema::LookupDestructorName;
return HaveTag != WantTag;
}
@@ -1149,73 +1153,14 @@ static bool isNamespaceOrTranslationUnitScope(Scope *S) {
return false;
}
-// Find the next outer declaration context from this scope. This
-// routine actually returns the semantic outer context, which may
-// differ from the lexical context (encoded directly in the Scope
-// stack) when we are parsing a member of a class template. In this
-// case, the second element of the pair will be true, to indicate that
-// name lookup should continue searching in this semantic context when
-// it leaves the current template parameter scope.
-static std::pair<DeclContext *, bool> findOuterContext(Scope *S) {
- DeclContext *DC = S->getEntity();
- DeclContext *Lexical = nullptr;
- for (Scope *OuterS = S->getParent(); OuterS;
- OuterS = OuterS->getParent()) {
- if (OuterS->getEntity()) {
- Lexical = OuterS->getEntity();
- break;
- }
- }
-
- // C++ [temp.local]p8:
- // In the definition of a member of a class template that appears
- // outside of the namespace containing the class template
- // definition, the name of a template-parameter hides the name of
- // a member of this namespace.
- //
- // Example:
- //
- // namespace N {
- // class C { };
- //
- // template<class T> class B {
- // void f(T);
- // };
- // }
- //
- // template<class C> void N::B<C>::f(C) {
- // C b; // C is the template parameter, not N::C
- // }
- //
- // In this example, the lexical context we return is the
- // TranslationUnit, while the semantic context is the namespace N.
- if (!Lexical || !DC || !S->getParent() ||
- !S->getParent()->isTemplateParamScope())
- return std::make_pair(Lexical, false);
-
- // Find the outermost template parameter scope.
- // For the example, this is the scope for the template parameters of
- // template<class C>.
- Scope *OutermostTemplateScope = S->getParent();
- while (OutermostTemplateScope->getParent() &&
- OutermostTemplateScope->getParent()->isTemplateParamScope())
- OutermostTemplateScope = OutermostTemplateScope->getParent();
-
- // Find the namespace context in which the original scope occurs. In
- // the example, this is namespace N.
- DeclContext *Semantic = DC;
- while (!Semantic->isFileContext())
- Semantic = Semantic->getParent();
-
- // Find the declaration context just outside of the template
- // parameter scope. This is the context in which the template is
- // being lexically declaration (a namespace context). In the
- // example, this is the global scope.
- if (Lexical->isFileContext() && !Lexical->Equals(Semantic) &&
- Lexical->Encloses(Semantic))
- return std::make_pair(Semantic, true);
-
- return std::make_pair(Lexical, false);
+/// Find the outer declaration context from this scope. This indicates the
+/// context that we should search up to (exclusive) before considering the
+/// parent of the specified scope.
+static DeclContext *findOuterContext(Scope *S) {
+ for (Scope *OuterS = S->getParent(); OuterS; OuterS = OuterS->getParent())
+ if (DeclContext *DC = OuterS->getLookupEntity())
+ return DC;
+ return nullptr;
}
namespace {
@@ -1282,13 +1227,11 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
UnqualUsingDirectiveSet UDirs(*this);
bool VisitedUsingDirectives = false;
bool LeftStartingScope = false;
- DeclContext *OutsideOfTemplateParamDC = nullptr;
// When performing a scope lookup, we want to find local extern decls.
FindLocalExternScope FindLocals(R);
for (; S && !isNamespaceOrTranslationUnitScope(S); S = S->getParent()) {
- DeclContext *Ctx = S->getEntity();
bool SearchNamespaceScope = true;
// Check whether the IdResolver has anything in this scope.
for (; I != IEnd && S->isDeclScope(*I); ++I) {
@@ -1320,7 +1263,8 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
if (!SearchNamespaceScope) {
R.resolveKind();
if (S->isClassScope())
- if (CXXRecordDecl *Record = dyn_cast_or_null<CXXRecordDecl>(Ctx))
+ if (CXXRecordDecl *Record =
+ dyn_cast_or_null<CXXRecordDecl>(S->getEntity()))
R.setNamingClass(Record);
return true;
}
@@ -1334,24 +1278,8 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
return false;
}
- if (!Ctx && S->isTemplateParamScope() && OutsideOfTemplateParamDC &&
- S->getParent() && !S->getParent()->isTemplateParamScope()) {
- // We've just searched the last template parameter scope and
- // found nothing, so look into the contexts between the
- // lexical and semantic declaration contexts returned by
- // findOuterContext(). This implements the name lookup behavior
- // of C++ [temp.local]p8.
- Ctx = OutsideOfTemplateParamDC;
- OutsideOfTemplateParamDC = nullptr;
- }
-
- if (Ctx) {
- DeclContext *OuterCtx;
- bool SearchAfterTemplateScope;
- std::tie(OuterCtx, SearchAfterTemplateScope) = findOuterContext(S);
- if (SearchAfterTemplateScope)
- OutsideOfTemplateParamDC = OuterCtx;
-
+ if (DeclContext *Ctx = S->getLookupEntity()) {
+ DeclContext *OuterCtx = findOuterContext(S);
for (; Ctx && !Ctx->Equals(OuterCtx); Ctx = Ctx->getLookupParent()) {
// We do not directly look into transparent contexts, since
// those entities will be found in the nearest enclosing
@@ -1476,25 +1404,9 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
return true;
}
- DeclContext *Ctx = S->getEntity();
- if (!Ctx && S->isTemplateParamScope() && OutsideOfTemplateParamDC &&
- S->getParent() && !S->getParent()->isTemplateParamScope()) {
- // We've just searched the last template parameter scope and
- // found nothing, so look into the contexts between the
- // lexical and semantic declaration contexts returned by
- // findOuterContext(). This implements the name lookup behavior
- // of C++ [temp.local]p8.
- Ctx = OutsideOfTemplateParamDC;
- OutsideOfTemplateParamDC = nullptr;
- }
-
+ DeclContext *Ctx = S->getLookupEntity();
if (Ctx) {
- DeclContext *OuterCtx;
- bool SearchAfterTemplateScope;
- std::tie(OuterCtx, SearchAfterTemplateScope) = findOuterContext(S);
- if (SearchAfterTemplateScope)
- OutsideOfTemplateParamDC = OuterCtx;
-
+ DeclContext *OuterCtx = findOuterContext(S);
for (; Ctx && !Ctx->Equals(OuterCtx); Ctx = Ctx->getLookupParent()) {
// We do not directly look into transparent contexts, since
// those entities will be found in the nearest enclosing
@@ -1706,7 +1618,8 @@ bool Sema::hasVisibleMemberSpecialization(
/// path (by instantiating a template, you allow it to see the declarations that
/// your module can see, including those later on in your module).
bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) {
- assert(D->isHidden() && "should not call this: not in slow case");
+ assert(!D->isUnconditionallyVisible() &&
+ "should not call this: not in slow case");
Module *DeclModule = SemaRef.getOwningModule(D);
assert(DeclModule && "hidden decl has no owning module");
@@ -2297,6 +2210,7 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
case LookupMemberName:
case LookupRedeclarationWithLinkage:
case LookupLocalFriendName:
+ case LookupDestructorName:
BaseCallback = &CXXRecordDecl::FindOrdinaryMember;
break;
@@ -2961,7 +2875,9 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
// These are fundamental types.
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
case Type::Complex:
+ case Type::ExtInt:
break;
// Non-deduced auto types only get here for error cases.
@@ -3987,14 +3903,12 @@ private:
}
}
- // FIXME: C++ [temp.local]p8
- DeclContext *Entity = nullptr;
- if (S->getEntity()) {
+ DeclContext *Entity = S->getLookupEntity();
+ if (Entity) {
// Look into this scope's declaration context, along with any of its
// parent lookup contexts (e.g., enclosing classes), up to the point
// where we hit the context stored in the next outer scope.
- Entity = S->getEntity();
- DeclContext *OuterCtx = findOuterContext(S).first; // FIXME
+ DeclContext *OuterCtx = findOuterContext(S);
for (DeclContext *Ctx = Entity; Ctx && !Ctx->Equals(OuterCtx);
Ctx = Ctx->getLookupParent()) {
@@ -5160,9 +5074,9 @@ TypoExpr *Sema::CorrectTypoDelayed(
IdentifierInfo *Typo = TypoName.getName().getAsIdentifierInfo();
if (!ExternalTypo && ED > 0 && Typo->getName().size() / ED < 3)
return nullptr;
-
ExprEvalContexts.back().NumTypos++;
- return createDelayedTypo(std::move(Consumer), std::move(TDG), std::move(TRC));
+ return createDelayedTypo(std::move(Consumer), std::move(TDG), std::move(TRC),
+ TypoName.getLoc());
}
void TypoCorrection::addCorrectionDecl(NamedDecl *CDecl) {
@@ -5344,9 +5258,8 @@ void Sema::diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
/// Get a "quoted.h" or <angled.h> include path to use in a diagnostic
/// suggesting the addition of a #include of the specified file.
-static std::string getIncludeStringForHeader(Preprocessor &PP,
- const FileEntry *E,
- llvm::StringRef IncludingFile) {
+static std::string getHeaderNameForHeader(Preprocessor &PP, const FileEntry *E,
+ llvm::StringRef IncludingFile) {
bool IsSystem = false;
auto Path = PP.getHeaderSearchInfo().suggestPathToFileForDiagnostics(
E, IncludingFile, &IsSystem);
@@ -5360,25 +5273,10 @@ void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl,
assert(!Modules.empty());
auto NotePrevious = [&] {
- unsigned DiagID;
- switch (MIK) {
- case MissingImportKind::Declaration:
- DiagID = diag::note_previous_declaration;
- break;
- case MissingImportKind::Definition:
- DiagID = diag::note_previous_definition;
- break;
- case MissingImportKind::DefaultArgument:
- DiagID = diag::note_default_argument_declared_here;
- break;
- case MissingImportKind::ExplicitSpecialization:
- DiagID = diag::note_explicit_specialization_declared_here;
- break;
- case MissingImportKind::PartialSpecialization:
- DiagID = diag::note_partial_specialization_declared_here;
- break;
- }
- Diag(DeclLoc, DiagID);
+ // FIXME: Suppress the note backtrace even under
+ // -fdiagnostics-show-note-include-stack. We don't care how this
+ // declaration was previously reached.
+ Diag(DeclLoc, diag::note_unreachable_entity) << (int)MIK;
};
// Weed out duplicates from module list.
@@ -5391,26 +5289,24 @@ void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl,
UniqueModules.push_back(M);
}
- llvm::StringRef IncludingFile;
- if (const FileEntry *FE =
- SourceMgr.getFileEntryForID(SourceMgr.getFileID(UseLoc)))
- IncludingFile = FE->tryGetRealPathName();
+ // Try to find a suitable header-name to #include.
+ std::string HeaderName;
+ if (const FileEntry *Header =
+ PP.getHeaderToIncludeForDiagnostics(UseLoc, DeclLoc)) {
+ if (const FileEntry *FE =
+ SourceMgr.getFileEntryForID(SourceMgr.getFileID(UseLoc)))
+ HeaderName = getHeaderNameForHeader(PP, Header, FE->tryGetRealPathName());
+ }
- if (UniqueModules.empty()) {
- // All candidates were global module fragments. Try to suggest a #include.
- const FileEntry *E =
- PP.getModuleHeaderToIncludeForDiagnostics(UseLoc, Modules[0], DeclLoc);
+ // If we have a #include we should suggest, or if all definition locations
+ // were in global module fragments, don't suggest an import.
+ if (!HeaderName.empty() || UniqueModules.empty()) {
// FIXME: Find a smart place to suggest inserting a #include, and add
// a FixItHint there.
- Diag(UseLoc, diag::err_module_unimported_use_global_module_fragment)
- << (int)MIK << Decl << !!E
- << (E ? getIncludeStringForHeader(PP, E, IncludingFile) : "");
- // Produce a "previous" note if it will point to a header rather than some
- // random global module fragment.
- // FIXME: Suppress the note backtrace even under
- // -fdiagnostics-show-note-include-stack.
- if (E)
- NotePrevious();
+ Diag(UseLoc, diag::err_module_unimported_use_header)
+ << (int)MIK << Decl << !HeaderName.empty() << HeaderName;
+ // Produce a note showing where the entity was declared.
+ NotePrevious();
if (Recover)
createImplicitModuleImportForErrorRecovery(UseLoc, Modules[0]);
return;
@@ -5432,16 +5328,6 @@ void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl,
Diag(UseLoc, diag::err_module_unimported_use_multiple)
<< (int)MIK << Decl << ModuleList;
- } else if (const FileEntry *E = PP.getModuleHeaderToIncludeForDiagnostics(
- UseLoc, Modules[0], DeclLoc)) {
- // The right way to make the declaration visible is to include a header;
- // suggest doing so.
- //
- // FIXME: Find a smart place to suggest inserting a #include, and add
- // a FixItHint there.
- Diag(UseLoc, diag::err_module_unimported_use_header)
- << (int)MIK << Decl << Modules[0]->getFullModuleName()
- << getIncludeStringForHeader(PP, E, IncludingFile);
} else {
// FIXME: Add a FixItHint that imports the corresponding module.
Diag(UseLoc, diag::err_module_unimported_use)
@@ -5502,9 +5388,10 @@ void Sema::diagnoseTypo(const TypoCorrection &Correction,
TypoExpr *Sema::createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
- TypoRecoveryCallback TRC) {
+ TypoRecoveryCallback TRC,
+ SourceLocation TypoLoc) {
assert(TCC && "createDelayedTypo requires a valid TypoCorrectionConsumer");
- auto TE = new (Context) TypoExpr(Context.DependentTy);
+ auto TE = new (Context) TypoExpr(Context.DependentTy, TypoLoc);
auto &State = DelayedTypos[TE];
State.Consumer = std::move(TCC);
State.DiagHandler = std::move(TDG);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp b/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp
index f6717f4cbe5e..e301c62dd2c0 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp
@@ -35,24 +35,23 @@ using namespace clang;
///
/// Returns OCL_None if the attributes as stated do not imply an ownership.
/// Never returns OCL_Autoreleasing.
-static Qualifiers::ObjCLifetime getImpliedARCOwnership(
- ObjCPropertyDecl::PropertyAttributeKind attrs,
- QualType type) {
+static Qualifiers::ObjCLifetime
+getImpliedARCOwnership(ObjCPropertyAttribute::Kind attrs, QualType type) {
// retain, strong, copy, weak, and unsafe_unretained are only legal
// on properties of retainable pointer type.
- if (attrs & (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_strong |
- ObjCPropertyDecl::OBJC_PR_copy)) {
+ if (attrs &
+ (ObjCPropertyAttribute::kind_retain | ObjCPropertyAttribute::kind_strong |
+ ObjCPropertyAttribute::kind_copy)) {
return Qualifiers::OCL_Strong;
- } else if (attrs & ObjCPropertyDecl::OBJC_PR_weak) {
+ } else if (attrs & ObjCPropertyAttribute::kind_weak) {
return Qualifiers::OCL_Weak;
- } else if (attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained) {
+ } else if (attrs & ObjCPropertyAttribute::kind_unsafe_unretained) {
return Qualifiers::OCL_ExplicitNone;
}
// assign can appear on other types, so we have to check the
// property type.
- if (attrs & ObjCPropertyDecl::OBJC_PR_assign &&
+ if (attrs & ObjCPropertyAttribute::kind_assign &&
type->isObjCRetainableType()) {
return Qualifiers::OCL_ExplicitNone;
}
@@ -66,8 +65,7 @@ static void checkPropertyDeclWithOwnership(Sema &S,
ObjCPropertyDecl *property) {
if (property->isInvalidDecl()) return;
- ObjCPropertyDecl::PropertyAttributeKind propertyKind
- = property->getPropertyAttributes();
+ ObjCPropertyAttribute::Kind propertyKind = property->getPropertyAttributes();
Qualifiers::ObjCLifetime propertyLifetime
= property->getType().getObjCLifetime();
@@ -80,14 +78,14 @@ static void checkPropertyDeclWithOwnership(Sema &S,
// attribute. That's okay, but restore reasonable invariants by
// setting the property attribute according to the lifetime
// qualifier.
- ObjCPropertyDecl::PropertyAttributeKind attr;
+ ObjCPropertyAttribute::Kind attr;
if (propertyLifetime == Qualifiers::OCL_Strong) {
- attr = ObjCPropertyDecl::OBJC_PR_strong;
+ attr = ObjCPropertyAttribute::kind_strong;
} else if (propertyLifetime == Qualifiers::OCL_Weak) {
- attr = ObjCPropertyDecl::OBJC_PR_weak;
+ attr = ObjCPropertyAttribute::kind_weak;
} else {
assert(propertyLifetime == Qualifiers::OCL_ExplicitNone);
- attr = ObjCPropertyDecl::OBJC_PR_unsafe_unretained;
+ attr = ObjCPropertyAttribute::kind_unsafe_unretained;
}
property->setPropertyAttributes(attr);
return;
@@ -130,18 +128,19 @@ CheckPropertyAgainstProtocol(Sema &S, ObjCPropertyDecl *Prop,
static unsigned deducePropertyOwnershipFromType(Sema &S, QualType T) {
// In GC mode, just look for the __weak qualifier.
if (S.getLangOpts().getGC() != LangOptions::NonGC) {
- if (T.isObjCGCWeak()) return ObjCDeclSpec::DQ_PR_weak;
+ if (T.isObjCGCWeak())
+ return ObjCPropertyAttribute::kind_weak;
- // In ARC/MRC, look for an explicit ownership qualifier.
- // For some reason, this only applies to __weak.
+ // In ARC/MRC, look for an explicit ownership qualifier.
+ // For some reason, this only applies to __weak.
} else if (auto ownership = T.getObjCLifetime()) {
switch (ownership) {
case Qualifiers::OCL_Weak:
- return ObjCDeclSpec::DQ_PR_weak;
+ return ObjCPropertyAttribute::kind_weak;
case Qualifiers::OCL_Strong:
- return ObjCDeclSpec::DQ_PR_strong;
+ return ObjCPropertyAttribute::kind_strong;
case Qualifiers::OCL_ExplicitNone:
- return ObjCDeclSpec::DQ_PR_unsafe_unretained;
+ return ObjCPropertyAttribute::kind_unsafe_unretained;
case Qualifiers::OCL_Autoreleasing:
case Qualifiers::OCL_None:
return 0;
@@ -153,22 +152,20 @@ static unsigned deducePropertyOwnershipFromType(Sema &S, QualType T) {
}
static const unsigned OwnershipMask =
- (ObjCPropertyDecl::OBJC_PR_assign |
- ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_copy |
- ObjCPropertyDecl::OBJC_PR_weak |
- ObjCPropertyDecl::OBJC_PR_strong |
- ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
+ (ObjCPropertyAttribute::kind_assign | ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_copy | ObjCPropertyAttribute::kind_weak |
+ ObjCPropertyAttribute::kind_strong |
+ ObjCPropertyAttribute::kind_unsafe_unretained);
static unsigned getOwnershipRule(unsigned attr) {
unsigned result = attr & OwnershipMask;
// From an ownership perspective, assign and unsafe_unretained are
// identical; make sure one also implies the other.
- if (result & (ObjCPropertyDecl::OBJC_PR_assign |
- ObjCPropertyDecl::OBJC_PR_unsafe_unretained)) {
- result |= ObjCPropertyDecl::OBJC_PR_assign |
- ObjCPropertyDecl::OBJC_PR_unsafe_unretained;
+ if (result & (ObjCPropertyAttribute::kind_assign |
+ ObjCPropertyAttribute::kind_unsafe_unretained)) {
+ result |= ObjCPropertyAttribute::kind_assign |
+ ObjCPropertyAttribute::kind_unsafe_unretained;
}
return result;
@@ -183,15 +180,16 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC) {
unsigned Attributes = ODS.getPropertyAttributes();
- FD.D.setObjCWeakProperty((Attributes & ObjCDeclSpec::DQ_PR_weak) != 0);
+ FD.D.setObjCWeakProperty((Attributes & ObjCPropertyAttribute::kind_weak) !=
+ 0);
TypeSourceInfo *TSI = GetTypeForDeclarator(FD.D, S);
QualType T = TSI->getType();
if (!getOwnershipRule(Attributes)) {
Attributes |= deducePropertyOwnershipFromType(*this, T);
}
- bool isReadWrite = ((Attributes & ObjCDeclSpec::DQ_PR_readwrite) ||
+ bool isReadWrite = ((Attributes & ObjCPropertyAttribute::kind_readwrite) ||
// default is readwrite!
- !(Attributes & ObjCDeclSpec::DQ_PR_readonly));
+ !(Attributes & ObjCPropertyAttribute::kind_readonly));
// Proceed with constructing the ObjCPropertyDecls.
ObjCContainerDecl *ClassDecl = cast<ObjCContainerDecl>(CurContext);
@@ -277,39 +275,39 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
return Res;
}
-static ObjCPropertyDecl::PropertyAttributeKind
+static ObjCPropertyAttribute::Kind
makePropertyAttributesAsWritten(unsigned Attributes) {
unsigned attributesAsWritten = 0;
- if (Attributes & ObjCDeclSpec::DQ_PR_readonly)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_readonly;
- if (Attributes & ObjCDeclSpec::DQ_PR_readwrite)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_readwrite;
- if (Attributes & ObjCDeclSpec::DQ_PR_getter)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_getter;
- if (Attributes & ObjCDeclSpec::DQ_PR_setter)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_setter;
- if (Attributes & ObjCDeclSpec::DQ_PR_assign)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_assign;
- if (Attributes & ObjCDeclSpec::DQ_PR_retain)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_retain;
- if (Attributes & ObjCDeclSpec::DQ_PR_strong)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_strong;
- if (Attributes & ObjCDeclSpec::DQ_PR_weak)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_weak;
- if (Attributes & ObjCDeclSpec::DQ_PR_copy)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_copy;
- if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_unsafe_unretained;
- if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_nonatomic;
- if (Attributes & ObjCDeclSpec::DQ_PR_atomic)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_atomic;
- if (Attributes & ObjCDeclSpec::DQ_PR_class)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_class;
- if (Attributes & ObjCDeclSpec::DQ_PR_direct)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_direct;
-
- return (ObjCPropertyDecl::PropertyAttributeKind)attributesAsWritten;
+ if (Attributes & ObjCPropertyAttribute::kind_readonly)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_readonly;
+ if (Attributes & ObjCPropertyAttribute::kind_readwrite)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_readwrite;
+ if (Attributes & ObjCPropertyAttribute::kind_getter)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_getter;
+ if (Attributes & ObjCPropertyAttribute::kind_setter)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_setter;
+ if (Attributes & ObjCPropertyAttribute::kind_assign)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_assign;
+ if (Attributes & ObjCPropertyAttribute::kind_retain)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_retain;
+ if (Attributes & ObjCPropertyAttribute::kind_strong)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_strong;
+ if (Attributes & ObjCPropertyAttribute::kind_weak)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_weak;
+ if (Attributes & ObjCPropertyAttribute::kind_copy)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_copy;
+ if (Attributes & ObjCPropertyAttribute::kind_unsafe_unretained)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_unsafe_unretained;
+ if (Attributes & ObjCPropertyAttribute::kind_nonatomic)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_nonatomic;
+ if (Attributes & ObjCPropertyAttribute::kind_atomic)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_atomic;
+ if (Attributes & ObjCPropertyAttribute::kind_class)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_class;
+ if (Attributes & ObjCPropertyAttribute::kind_direct)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_direct;
+
+ return (ObjCPropertyAttribute::Kind)attributesAsWritten;
}
static bool LocPropertyAttribute( ASTContext &Context, const char *attrName,
@@ -347,12 +345,10 @@ static void checkAtomicPropertyMismatch(Sema &S,
ObjCPropertyDecl *NewProperty,
bool PropagateAtomicity) {
// If the atomicity of both matches, we're done.
- bool OldIsAtomic =
- (OldProperty->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic)
- == 0;
- bool NewIsAtomic =
- (NewProperty->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic)
- == 0;
+ bool OldIsAtomic = (OldProperty->getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_nonatomic) == 0;
+ bool NewIsAtomic = (NewProperty->getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_nonatomic) == 0;
if (OldIsAtomic == NewIsAtomic) return;
// Determine whether the given property is readonly and implicitly
@@ -360,14 +356,16 @@ static void checkAtomicPropertyMismatch(Sema &S,
auto isImplicitlyReadonlyAtomic = [](ObjCPropertyDecl *Property) -> bool {
// Is it readonly?
auto Attrs = Property->getPropertyAttributes();
- if ((Attrs & ObjCPropertyDecl::OBJC_PR_readonly) == 0) return false;
+ if ((Attrs & ObjCPropertyAttribute::kind_readonly) == 0)
+ return false;
// Is it nonatomic?
- if (Attrs & ObjCPropertyDecl::OBJC_PR_nonatomic) return false;
+ if (Attrs & ObjCPropertyAttribute::kind_nonatomic)
+ return false;
// Was 'atomic' specified directly?
if (Property->getPropertyAttributesAsWritten() &
- ObjCPropertyDecl::OBJC_PR_atomic)
+ ObjCPropertyAttribute::kind_atomic)
return false;
return true;
@@ -375,16 +373,16 @@ static void checkAtomicPropertyMismatch(Sema &S,
// If we're allowed to propagate atomicity, and the new property did
// not specify atomicity at all, propagate.
- const unsigned AtomicityMask =
- (ObjCPropertyDecl::OBJC_PR_atomic | ObjCPropertyDecl::OBJC_PR_nonatomic);
+ const unsigned AtomicityMask = (ObjCPropertyAttribute::kind_atomic |
+ ObjCPropertyAttribute::kind_nonatomic);
if (PropagateAtomicity &&
((NewProperty->getPropertyAttributesAsWritten() & AtomicityMask) == 0)) {
unsigned Attrs = NewProperty->getPropertyAttributes();
Attrs = Attrs & ~AtomicityMask;
if (OldIsAtomic)
- Attrs |= ObjCPropertyDecl::OBJC_PR_atomic;
+ Attrs |= ObjCPropertyAttribute::kind_atomic;
else
- Attrs |= ObjCPropertyDecl::OBJC_PR_nonatomic;
+ Attrs |= ObjCPropertyAttribute::kind_nonatomic;
NewProperty->overwritePropertyAttributes(Attrs);
return;
@@ -438,8 +436,9 @@ Sema::HandlePropertyInClassExtension(Scope *S,
return nullptr;
}
- bool isClassProperty = (AttributesAsWritten & ObjCDeclSpec::DQ_PR_class) ||
- (Attributes & ObjCDeclSpec::DQ_PR_class);
+ bool isClassProperty =
+ (AttributesAsWritten & ObjCPropertyAttribute::kind_class) ||
+ (Attributes & ObjCPropertyAttribute::kind_class);
// Find the property in the extended class's primary class or
// extensions.
@@ -464,11 +463,11 @@ Sema::HandlePropertyInClassExtension(Scope *S,
// This is a common error where the user often intended the original
// declaration to be readonly.
unsigned diag =
- (Attributes & ObjCDeclSpec::DQ_PR_readwrite) &&
- (PIDecl->getPropertyAttributesAsWritten() &
- ObjCPropertyDecl::OBJC_PR_readwrite)
- ? diag::err_use_continuation_class_redeclaration_readwrite
- : diag::err_use_continuation_class;
+ (Attributes & ObjCPropertyAttribute::kind_readwrite) &&
+ (PIDecl->getPropertyAttributesAsWritten() &
+ ObjCPropertyAttribute::kind_readwrite)
+ ? diag::err_use_continuation_class_redeclaration_readwrite
+ : diag::err_use_continuation_class;
Diag(AtLoc, diag)
<< CCPrimary->getDeclName();
Diag(PIDecl->getLocation(), diag::note_property_declare);
@@ -478,15 +477,15 @@ Sema::HandlePropertyInClassExtension(Scope *S,
// Check for consistency of getters.
if (PIDecl->getGetterName() != GetterSel) {
// If the getter was written explicitly, complain.
- if (AttributesAsWritten & ObjCDeclSpec::DQ_PR_getter) {
- Diag(AtLoc, diag::warn_property_redecl_getter_mismatch)
- << PIDecl->getGetterName() << GetterSel;
- Diag(PIDecl->getLocation(), diag::note_property_declare);
- }
+ if (AttributesAsWritten & ObjCPropertyAttribute::kind_getter) {
+ Diag(AtLoc, diag::warn_property_redecl_getter_mismatch)
+ << PIDecl->getGetterName() << GetterSel;
+ Diag(PIDecl->getLocation(), diag::note_property_declare);
+ }
// Always adopt the getter from the original declaration.
GetterSel = PIDecl->getGetterName();
- Attributes |= ObjCDeclSpec::DQ_PR_getter;
+ Attributes |= ObjCPropertyAttribute::kind_getter;
}
// Check consistency of ownership.
@@ -505,9 +504,9 @@ Sema::HandlePropertyInClassExtension(Scope *S,
}
// If the redeclaration is 'weak' but the original property is not,
- if ((Attributes & ObjCPropertyDecl::OBJC_PR_weak) &&
- !(PIDecl->getPropertyAttributesAsWritten()
- & ObjCPropertyDecl::OBJC_PR_weak) &&
+ if ((Attributes & ObjCPropertyAttribute::kind_weak) &&
+ !(PIDecl->getPropertyAttributesAsWritten() &
+ ObjCPropertyAttribute::kind_weak) &&
PIDecl->getType()->getAs<ObjCObjectPointerType>() &&
PIDecl->getType().getObjCLifetime() == Qualifiers::OCL_None) {
Diag(AtLoc, diag::warn_property_implicitly_mismatched);
@@ -584,8 +583,8 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
// Property defaults to 'assign' if it is readwrite, unless this is ARC
// and the type is retainable.
bool isAssign;
- if (Attributes & (ObjCDeclSpec::DQ_PR_assign |
- ObjCDeclSpec::DQ_PR_unsafe_unretained)) {
+ if (Attributes & (ObjCPropertyAttribute::kind_assign |
+ ObjCPropertyAttribute::kind_unsafe_unretained)) {
isAssign = true;
} else if (getOwnershipRule(Attributes) || !isReadWrite) {
isAssign = false;
@@ -596,8 +595,8 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
// Issue a warning if property is 'assign' as default and its
// object, which is gc'able conforms to NSCopying protocol
- if (getLangOpts().getGC() != LangOptions::NonGC &&
- isAssign && !(Attributes & ObjCDeclSpec::DQ_PR_assign)) {
+ if (getLangOpts().getGC() != LangOptions::NonGC && isAssign &&
+ !(Attributes & ObjCPropertyAttribute::kind_assign)) {
if (const ObjCObjectPointerType *ObjPtrTy =
T->getAs<ObjCObjectPointerType>()) {
ObjCInterfaceDecl *IDecl = ObjPtrTy->getObjectType()->getInterface();
@@ -625,8 +624,9 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
PropertyId, AtLoc,
LParenLoc, T, TInfo);
- bool isClassProperty = (AttributesAsWritten & ObjCDeclSpec::DQ_PR_class) ||
- (Attributes & ObjCDeclSpec::DQ_PR_class);
+ bool isClassProperty =
+ (AttributesAsWritten & ObjCPropertyAttribute::kind_class) ||
+ (Attributes & ObjCPropertyAttribute::kind_class);
// Class property and instance property can have the same name.
if (ObjCPropertyDecl *prevDecl = ObjCPropertyDecl::findPropertyDecl(
DC, PropertyId, ObjCPropertyDecl::getQueryKind(isClassProperty))) {
@@ -654,68 +654,68 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
PDecl->setPropertyAttributesAsWritten(
makePropertyAttributesAsWritten(AttributesAsWritten));
- if (Attributes & ObjCDeclSpec::DQ_PR_readonly)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readonly);
+ if (Attributes & ObjCPropertyAttribute::kind_readonly)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_readonly);
- if (Attributes & ObjCDeclSpec::DQ_PR_getter)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_getter);
+ if (Attributes & ObjCPropertyAttribute::kind_getter)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_getter);
- if (Attributes & ObjCDeclSpec::DQ_PR_setter)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_setter);
+ if (Attributes & ObjCPropertyAttribute::kind_setter)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_setter);
if (isReadWrite)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readwrite);
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_readwrite);
- if (Attributes & ObjCDeclSpec::DQ_PR_retain)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_retain);
+ if (Attributes & ObjCPropertyAttribute::kind_retain)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_retain);
- if (Attributes & ObjCDeclSpec::DQ_PR_strong)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ if (Attributes & ObjCPropertyAttribute::kind_strong)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_strong);
- if (Attributes & ObjCDeclSpec::DQ_PR_weak)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_weak);
+ if (Attributes & ObjCPropertyAttribute::kind_weak)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_weak);
- if (Attributes & ObjCDeclSpec::DQ_PR_copy)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_copy);
+ if (Attributes & ObjCPropertyAttribute::kind_copy)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_copy);
- if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
+ if (Attributes & ObjCPropertyAttribute::kind_unsafe_unretained)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_unsafe_unretained);
if (isAssign)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_assign);
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_assign);
// In the semantic attributes, one of nonatomic or atomic is always set.
- if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_nonatomic);
+ if (Attributes & ObjCPropertyAttribute::kind_nonatomic)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_nonatomic);
else
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_atomic);
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_atomic);
// 'unsafe_unretained' is alias for 'assign'.
- if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_assign);
+ if (Attributes & ObjCPropertyAttribute::kind_unsafe_unretained)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_assign);
if (isAssign)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_unsafe_unretained);
if (MethodImplKind == tok::objc_required)
PDecl->setPropertyImplementation(ObjCPropertyDecl::Required);
else if (MethodImplKind == tok::objc_optional)
PDecl->setPropertyImplementation(ObjCPropertyDecl::Optional);
- if (Attributes & ObjCDeclSpec::DQ_PR_nullability)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_nullability);
+ if (Attributes & ObjCPropertyAttribute::kind_nullability)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_nullability);
- if (Attributes & ObjCDeclSpec::DQ_PR_null_resettable)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_null_resettable);
+ if (Attributes & ObjCPropertyAttribute::kind_null_resettable)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_null_resettable);
- if (Attributes & ObjCDeclSpec::DQ_PR_class)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_class);
+ if (Attributes & ObjCPropertyAttribute::kind_class)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_class);
- if ((Attributes & ObjCDeclSpec::DQ_PR_direct) ||
+ if ((Attributes & ObjCPropertyAttribute::kind_direct) ||
CDecl->hasAttr<ObjCDirectMembersAttr>()) {
if (isa<ObjCProtocolDecl>(CDecl)) {
Diag(PDecl->getLocation(), diag::err_objc_direct_on_protocol) << true;
} else if (getLangOpts().ObjCRuntime.allowsDirectDispatch()) {
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_direct);
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_direct);
} else {
Diag(PDecl->getLocation(), diag::warn_objc_direct_property_ignored)
<< PDecl->getDeclName();
@@ -781,10 +781,9 @@ static void checkARCPropertyImpl(Sema &S, SourceLocation propertyImplLoc,
case Qualifiers::OCL_ExplicitNone:
S.Diag(ivar->getLocation(), diag::err_arc_assign_property_ownership)
- << property->getDeclName()
- << ivar->getDeclName()
- << ((property->getPropertyAttributesAsWritten()
- & ObjCPropertyDecl::OBJC_PR_assign) != 0);
+ << property->getDeclName() << ivar->getDeclName()
+ << ((property->getPropertyAttributesAsWritten() &
+ ObjCPropertyAttribute::kind_assign) != 0);
break;
case Qualifiers::OCL_Autoreleasing:
@@ -815,21 +814,20 @@ static void setImpliedPropertyAttributeForReadOnlyProperty(
if (!ivar) {
// if no backing ivar, make property 'strong'.
- property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ property->setPropertyAttributes(ObjCPropertyAttribute::kind_strong);
return;
}
// property assumes owenership of backing ivar.
QualType ivarType = ivar->getType();
Qualifiers::ObjCLifetime ivarLifetime = ivarType.getObjCLifetime();
if (ivarLifetime == Qualifiers::OCL_Strong)
- property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ property->setPropertyAttributes(ObjCPropertyAttribute::kind_strong);
else if (ivarLifetime == Qualifiers::OCL_Weak)
- property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_weak);
+ property->setPropertyAttributes(ObjCPropertyAttribute::kind_weak);
}
-static bool
-isIncompatiblePropertyAttribute(unsigned Attr1, unsigned Attr2,
- ObjCPropertyDecl::PropertyAttributeKind Kind) {
+static bool isIncompatiblePropertyAttribute(unsigned Attr1, unsigned Attr2,
+ ObjCPropertyAttribute::Kind Kind) {
return (Attr1 & Kind) != (Attr2 & Kind);
}
@@ -912,30 +910,31 @@ SelectPropertyForSynthesisFromProtocols(Sema &S, SourceLocation AtLoc,
};
// The ownership might be incompatible unless the property has no explicit
// ownership.
- bool HasOwnership = (Attr & (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_strong |
- ObjCPropertyDecl::OBJC_PR_copy |
- ObjCPropertyDecl::OBJC_PR_assign |
- ObjCPropertyDecl::OBJC_PR_unsafe_unretained |
- ObjCPropertyDecl::OBJC_PR_weak)) != 0;
+ bool HasOwnership =
+ (Attr & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_strong |
+ ObjCPropertyAttribute::kind_copy |
+ ObjCPropertyAttribute::kind_assign |
+ ObjCPropertyAttribute::kind_unsafe_unretained |
+ ObjCPropertyAttribute::kind_weak)) != 0;
if (HasOwnership &&
isIncompatiblePropertyAttribute(OriginalAttributes, Attr,
- ObjCPropertyDecl::OBJC_PR_copy)) {
- Diag(OriginalAttributes & ObjCPropertyDecl::OBJC_PR_copy, "copy");
+ ObjCPropertyAttribute::kind_copy)) {
+ Diag(OriginalAttributes & ObjCPropertyAttribute::kind_copy, "copy");
continue;
}
if (HasOwnership && areIncompatiblePropertyAttributes(
OriginalAttributes, Attr,
- ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_strong)) {
- Diag(OriginalAttributes & (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_strong),
+ ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_strong)) {
+ Diag(OriginalAttributes & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_strong),
"retain (or strong)");
continue;
}
if (isIncompatiblePropertyAttribute(OriginalAttributes, Attr,
- ObjCPropertyDecl::OBJC_PR_atomic)) {
- Diag(OriginalAttributes & ObjCPropertyDecl::OBJC_PR_atomic, "atomic");
+ ObjCPropertyAttribute::kind_atomic)) {
+ Diag(OriginalAttributes & ObjCPropertyAttribute::kind_atomic, "atomic");
continue;
}
}
@@ -1126,8 +1125,8 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
return nullptr;
}
unsigned PIkind = property->getPropertyAttributesAsWritten();
- if ((PIkind & (ObjCPropertyDecl::OBJC_PR_atomic |
- ObjCPropertyDecl::OBJC_PR_nonatomic) ) == 0) {
+ if ((PIkind & (ObjCPropertyAttribute::kind_atomic |
+ ObjCPropertyAttribute::kind_nonatomic)) == 0) {
if (AtLoc.isValid())
Diag(AtLoc, diag::warn_implicit_atomic_property);
else
@@ -1143,10 +1142,8 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
return nullptr;
}
}
- if (Synthesize&&
- (PIkind & ObjCPropertyDecl::OBJC_PR_readonly) &&
- property->hasAttr<IBOutletAttr>() &&
- !AtLoc.isValid()) {
+ if (Synthesize && (PIkind & ObjCPropertyAttribute::kind_readonly) &&
+ property->hasAttr<IBOutletAttr>() && !AtLoc.isValid()) {
bool ReadWriteProperty = false;
// Search into the class extensions and see if 'readonly property is
// redeclared 'readwrite', then no warning is to be issued.
@@ -1155,7 +1152,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (!R.empty())
if (ObjCPropertyDecl *ExtProp = dyn_cast<ObjCPropertyDecl>(R[0])) {
PIkind = ExtProp->getPropertyAttributesAsWritten();
- if (PIkind & ObjCPropertyDecl::OBJC_PR_readwrite) {
+ if (PIkind & ObjCPropertyAttribute::kind_readwrite) {
ReadWriteProperty = true;
break;
}
@@ -1232,16 +1229,15 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (getLangOpts().ObjCAutoRefCount &&
(property->getPropertyAttributesAsWritten() &
- ObjCPropertyDecl::OBJC_PR_readonly) &&
+ ObjCPropertyAttribute::kind_readonly) &&
PropertyIvarType->isObjCRetainableType()) {
setImpliedPropertyAttributeForReadOnlyProperty(property, Ivar);
}
- ObjCPropertyDecl::PropertyAttributeKind kind
- = property->getPropertyAttributes();
+ ObjCPropertyAttribute::Kind kind = property->getPropertyAttributes();
bool isARCWeak = false;
- if (kind & ObjCPropertyDecl::OBJC_PR_weak) {
+ if (kind & ObjCPropertyAttribute::kind_weak) {
// Add GC __weak to the ivar type if the property is weak.
if (getLangOpts().getGC() != LangOptions::NonGC) {
assert(!getLangOpts().ObjCAutoRefCount);
@@ -1312,7 +1308,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
// It's an error if we have to do this and the user didn't
// explicitly write an ownership attribute on the property.
if (!hasWrittenStorageAttribute(property, QueryKind) &&
- !(kind & ObjCPropertyDecl::OBJC_PR_strong)) {
+ !(kind & ObjCPropertyAttribute::kind_strong)) {
Diag(PropertyDiagLoc,
diag::err_arc_objc_property_default_assign_on_object);
Diag(property->getLocation(), diag::note_property_declare);
@@ -1456,7 +1452,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
PropertyLoc);
PIDecl->setGetterMethodDecl(OMD);
}
-
+
if (getLangOpts().CPlusPlus && Synthesize && !CompleteTypeErr &&
Ivar->getType()->isRecordType()) {
// For Objective-C++, need to synthesize the AST for the IVAR object to be
@@ -1551,7 +1547,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
ExprResult Res = BuildBinOp(S, PropertyDiagLoc,
BO_Assign, lhs, rhs);
if (property->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_atomic) {
+ ObjCPropertyAttribute::kind_atomic) {
Expr *callExpr = Res.getAs<Expr>();
if (const CXXOperatorCallExpr *CXXCE =
dyn_cast_or_null<CXXOperatorCallExpr>(callExpr))
@@ -1627,6 +1623,15 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
CatImplClass->addPropertyImplementation(PIDecl);
}
+ if (PIDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic &&
+ PIDecl->getPropertyDecl() &&
+ PIDecl->getPropertyDecl()->isDirectProperty()) {
+ Diag(PropertyLoc, diag::err_objc_direct_dynamic_property);
+ Diag(PIDecl->getPropertyDecl()->getLocation(),
+ diag::note_previous_declaration);
+ return nullptr;
+ }
+
return PIDecl;
}
@@ -1642,10 +1647,8 @@ Sema::DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *inheritedName,
bool OverridingProtocolProperty) {
- ObjCPropertyDecl::PropertyAttributeKind CAttr =
- Property->getPropertyAttributes();
- ObjCPropertyDecl::PropertyAttributeKind SAttr =
- SuperProperty->getPropertyAttributes();
+ ObjCPropertyAttribute::Kind CAttr = Property->getPropertyAttributes();
+ ObjCPropertyAttribute::Kind SAttr = SuperProperty->getPropertyAttributes();
// We allow readonly properties without an explicit ownership
// (assign/unsafe_unretained/weak/retain/strong/copy) in super class
@@ -1654,21 +1657,19 @@ Sema::DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
!getOwnershipRule(SAttr) && getOwnershipRule(CAttr))
;
else {
- if ((CAttr & ObjCPropertyDecl::OBJC_PR_readonly)
- && (SAttr & ObjCPropertyDecl::OBJC_PR_readwrite))
+ if ((CAttr & ObjCPropertyAttribute::kind_readonly) &&
+ (SAttr & ObjCPropertyAttribute::kind_readwrite))
Diag(Property->getLocation(), diag::warn_readonly_property)
<< Property->getDeclName() << inheritedName;
- if ((CAttr & ObjCPropertyDecl::OBJC_PR_copy)
- != (SAttr & ObjCPropertyDecl::OBJC_PR_copy))
+ if ((CAttr & ObjCPropertyAttribute::kind_copy) !=
+ (SAttr & ObjCPropertyAttribute::kind_copy))
Diag(Property->getLocation(), diag::warn_property_attribute)
<< Property->getDeclName() << "copy" << inheritedName;
- else if (!(SAttr & ObjCPropertyDecl::OBJC_PR_readonly)){
- unsigned CAttrRetain =
- (CAttr &
- (ObjCPropertyDecl::OBJC_PR_retain | ObjCPropertyDecl::OBJC_PR_strong));
- unsigned SAttrRetain =
- (SAttr &
- (ObjCPropertyDecl::OBJC_PR_retain | ObjCPropertyDecl::OBJC_PR_strong));
+ else if (!(SAttr & ObjCPropertyAttribute::kind_readonly)) {
+ unsigned CAttrRetain = (CAttr & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_strong));
+ unsigned SAttrRetain = (SAttr & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_strong));
bool CStrong = (CAttrRetain != 0);
bool SStrong = (SAttrRetain != 0);
if (CStrong != SStrong)
@@ -1876,7 +1877,7 @@ static bool SuperClassImplementsProperty(ObjCInterfaceDecl *IDecl,
ObjCPropertyDecl *Prop) {
bool SuperClassImplementsGetter = false;
bool SuperClassImplementsSetter = false;
- if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readonly)
+ if (Prop->getPropertyAttributes() & ObjCPropertyAttribute::kind_readonly)
SuperClassImplementsSetter = true;
while (IDecl->getSuperClass()) {
@@ -1919,7 +1920,7 @@ void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
continue;
ObjCMethodDecl *ImpMethod = IMPDecl->getInstanceMethod(Prop->getGetterName());
if (ImpMethod && !ImpMethod->getBody()) {
- if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readonly)
+ if (Prop->getPropertyAttributes() & ObjCPropertyAttribute::kind_readonly)
continue;
ImpMethod = IMPDecl->getInstanceMethod(Prop->getSetterName());
if (ImpMethod && !ImpMethod->getBody())
@@ -1956,16 +1957,16 @@ void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
}
// If property to be implemented in the super class, ignore.
if (PropInSuperClass) {
- if ((Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readwrite) &&
+ if ((Prop->getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_readwrite) &&
(PropInSuperClass->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_readonly) &&
+ ObjCPropertyAttribute::kind_readonly) &&
!IMPDecl->getInstanceMethod(Prop->getSetterName()) &&
!IDecl->HasUserDeclaredSetterMethod(Prop)) {
Diag(Prop->getLocation(), diag::warn_no_autosynthesis_property)
<< Prop->getIdentifier();
Diag(PropInSuperClass->getLocation(), diag::note_property_declare);
- }
- else {
+ } else {
Diag(Prop->getLocation(), diag::warn_autosynthesis_property_in_superclass)
<< Prop->getIdentifier();
Diag(PropInSuperClass->getLocation(), diag::note_property_declare);
@@ -2152,12 +2153,11 @@ void Sema::diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl)
const auto *property = propertyImpl->getPropertyDecl();
// Warn about null_resettable properties with synthesized setters,
// because the setter won't properly handle nil.
- if (propertyImpl->getPropertyImplementation()
- == ObjCPropertyImplDecl::Synthesize &&
+ if (propertyImpl->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Synthesize &&
(property->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_null_resettable) &&
- property->getGetterMethodDecl() &&
- property->getSetterMethodDecl()) {
+ ObjCPropertyAttribute::kind_null_resettable) &&
+ property->getGetterMethodDecl() && property->getSetterMethodDecl()) {
auto *getterImpl = propertyImpl->getGetterMethodDecl();
auto *setterImpl = propertyImpl->getSetterMethodDecl();
if ((!getterImpl || getterImpl->isSynthesizedAccessorStub()) &&
@@ -2195,8 +2195,8 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
unsigned Attributes = Property->getPropertyAttributes();
unsigned AttributesAsWritten = Property->getPropertyAttributesAsWritten();
- if (!(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_atomic) &&
- !(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_nonatomic)) {
+ if (!(AttributesAsWritten & ObjCPropertyAttribute::kind_atomic) &&
+ !(AttributesAsWritten & ObjCPropertyAttribute::kind_nonatomic)) {
GetterMethod = Property->isClassProperty() ?
IMPDecl->getClassMethod(Property->getGetterName()) :
IMPDecl->getInstanceMethod(Property->getGetterName());
@@ -2222,8 +2222,8 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
}
// We only care about readwrite atomic property.
- if ((Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) ||
- !(Attributes & ObjCPropertyDecl::OBJC_PR_readwrite))
+ if ((Attributes & ObjCPropertyAttribute::kind_nonatomic) ||
+ !(Attributes & ObjCPropertyAttribute::kind_readwrite))
continue;
if (const ObjCPropertyImplDecl *PIDecl = IMPDecl->FindPropertyImplDecl(
Property->getIdentifier(), Property->getQueryKind())) {
@@ -2244,7 +2244,7 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
<< (SetterMethod != nullptr);
// fixit stuff.
if (Property->getLParenLoc().isValid() &&
- !(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_atomic)) {
+ !(AttributesAsWritten & ObjCPropertyAttribute::kind_atomic)) {
// @property () ... case.
SourceLocation AfterLParen =
getLocForEndOfToken(Property->getLParenLoc());
@@ -2260,8 +2260,7 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
Diag(Property->getLocation(),
diag::note_atomic_property_fixup_suggest)
<< FixItHint::CreateInsertion(startLoc, "(nonatomic) ");
- }
- else
+ } else
Diag(MethodLoc, diag::note_atomic_property_fixup_suggest);
Diag(Property->getLocation(), diag::note_property_declare);
}
@@ -2421,6 +2420,40 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
DiagnosePropertyAccessorMismatch(property, GetterMethod,
property->getLocation());
+ // synthesizing accessors must not result in a direct method that is not
+ // monomorphic
+ if (!GetterMethod) {
+ if (const ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CD)) {
+ auto *ExistingGetter = CatDecl->getClassInterface()->lookupMethod(
+ property->getGetterName(), !IsClassProperty, true, false, CatDecl);
+ if (ExistingGetter) {
+ if (ExistingGetter->isDirectMethod() || property->isDirectProperty()) {
+ Diag(property->getLocation(), diag::err_objc_direct_duplicate_decl)
+ << property->isDirectProperty() << 1 /* property */
+ << ExistingGetter->isDirectMethod()
+ << ExistingGetter->getDeclName();
+ Diag(ExistingGetter->getLocation(), diag::note_previous_declaration);
+ }
+ }
+ }
+ }
+
+ if (!property->isReadOnly() && !SetterMethod) {
+ if (const ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CD)) {
+ auto *ExistingSetter = CatDecl->getClassInterface()->lookupMethod(
+ property->getSetterName(), !IsClassProperty, true, false, CatDecl);
+ if (ExistingSetter) {
+ if (ExistingSetter->isDirectMethod() || property->isDirectProperty()) {
+ Diag(property->getLocation(), diag::err_objc_direct_duplicate_decl)
+ << property->isDirectProperty() << 1 /* property */
+ << ExistingSetter->isDirectMethod()
+ << ExistingSetter->getDeclName();
+ Diag(ExistingSetter->getLocation(), diag::note_previous_declaration);
+ }
+ }
+ }
+ }
+
if (!property->isReadOnly() && SetterMethod) {
if (Context.getCanonicalType(SetterMethod->getReturnType()) !=
Context.VoidTy)
@@ -2455,7 +2488,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
// If the property is null_resettable, the getter returns nonnull.
if (property->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_null_resettable) {
+ ObjCPropertyAttribute::kind_null_resettable) {
QualType modifiedTy = resultTy;
if (auto nullability = AttributedType::stripOuterNullability(modifiedTy)) {
if (*nullability == NullabilityKind::Unspecified)
@@ -2534,7 +2567,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
// If the property is null_resettable, the setter accepts a
// nullable value.
if (property->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_null_resettable) {
+ ObjCPropertyAttribute::kind_null_resettable) {
QualType modifiedTy = paramTy;
if (auto nullability = AttributedType::stripOuterNullability(modifiedTy)){
if (*nullability == NullabilityKind::Unspecified)
@@ -2622,8 +2655,8 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
if (!PDecl || PDecl->isInvalidDecl())
return;
- if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
- (Attributes & ObjCDeclSpec::DQ_PR_readwrite))
+ if ((Attributes & ObjCPropertyAttribute::kind_readonly) &&
+ (Attributes & ObjCPropertyAttribute::kind_readwrite))
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "readonly" << "readwrite";
@@ -2631,104 +2664,109 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
QualType PropertyTy = PropertyDecl->getType();
// Check for copy or retain on non-object types.
- if ((Attributes & (ObjCDeclSpec::DQ_PR_weak | ObjCDeclSpec::DQ_PR_copy |
- ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong)) &&
+ if ((Attributes &
+ (ObjCPropertyAttribute::kind_weak | ObjCPropertyAttribute::kind_copy |
+ ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_strong)) &&
!PropertyTy->isObjCRetainableType() &&
!PropertyDecl->hasAttr<ObjCNSObjectAttr>()) {
Diag(Loc, diag::err_objc_property_requires_object)
- << (Attributes & ObjCDeclSpec::DQ_PR_weak ? "weak" :
- Attributes & ObjCDeclSpec::DQ_PR_copy ? "copy" : "retain (or strong)");
- Attributes &= ~(ObjCDeclSpec::DQ_PR_weak | ObjCDeclSpec::DQ_PR_copy |
- ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong);
+ << (Attributes & ObjCPropertyAttribute::kind_weak
+ ? "weak"
+ : Attributes & ObjCPropertyAttribute::kind_copy
+ ? "copy"
+ : "retain (or strong)");
+ Attributes &=
+ ~(ObjCPropertyAttribute::kind_weak | ObjCPropertyAttribute::kind_copy |
+ ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_strong);
PropertyDecl->setInvalidDecl();
}
// Check for assign on object types.
- if ((Attributes & ObjCDeclSpec::DQ_PR_assign) &&
- !(Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) &&
+ if ((Attributes & ObjCPropertyAttribute::kind_assign) &&
+ !(Attributes & ObjCPropertyAttribute::kind_unsafe_unretained) &&
PropertyTy->isObjCRetainableType() &&
!PropertyTy->isObjCARCImplicitlyUnretainedType()) {
Diag(Loc, diag::warn_objc_property_assign_on_object);
}
// Check for more than one of { assign, copy, retain }.
- if (Attributes & ObjCDeclSpec::DQ_PR_assign) {
- if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
+ if (Attributes & ObjCPropertyAttribute::kind_assign) {
+ if (Attributes & ObjCPropertyAttribute::kind_copy) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "assign" << "copy";
- Attributes &= ~ObjCDeclSpec::DQ_PR_copy;
+ Attributes &= ~ObjCPropertyAttribute::kind_copy;
}
- if (Attributes & ObjCDeclSpec::DQ_PR_retain) {
+ if (Attributes & ObjCPropertyAttribute::kind_retain) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "assign" << "retain";
- Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
+ Attributes &= ~ObjCPropertyAttribute::kind_retain;
}
- if (Attributes & ObjCDeclSpec::DQ_PR_strong) {
+ if (Attributes & ObjCPropertyAttribute::kind_strong) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "assign" << "strong";
- Attributes &= ~ObjCDeclSpec::DQ_PR_strong;
+ Attributes &= ~ObjCPropertyAttribute::kind_strong;
}
- if (getLangOpts().ObjCAutoRefCount &&
- (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
+ if (getLangOpts().ObjCAutoRefCount &&
+ (Attributes & ObjCPropertyAttribute::kind_weak)) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "assign" << "weak";
- Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ Attributes &= ~ObjCPropertyAttribute::kind_weak;
}
if (PropertyDecl->hasAttr<IBOutletCollectionAttr>())
Diag(Loc, diag::warn_iboutletcollection_property_assign);
- } else if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) {
- if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
+ } else if (Attributes & ObjCPropertyAttribute::kind_unsafe_unretained) {
+ if (Attributes & ObjCPropertyAttribute::kind_copy) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "unsafe_unretained" << "copy";
- Attributes &= ~ObjCDeclSpec::DQ_PR_copy;
+ Attributes &= ~ObjCPropertyAttribute::kind_copy;
}
- if (Attributes & ObjCDeclSpec::DQ_PR_retain) {
+ if (Attributes & ObjCPropertyAttribute::kind_retain) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "unsafe_unretained" << "retain";
- Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
+ Attributes &= ~ObjCPropertyAttribute::kind_retain;
}
- if (Attributes & ObjCDeclSpec::DQ_PR_strong) {
+ if (Attributes & ObjCPropertyAttribute::kind_strong) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "unsafe_unretained" << "strong";
- Attributes &= ~ObjCDeclSpec::DQ_PR_strong;
+ Attributes &= ~ObjCPropertyAttribute::kind_strong;
}
- if (getLangOpts().ObjCAutoRefCount &&
- (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
+ if (getLangOpts().ObjCAutoRefCount &&
+ (Attributes & ObjCPropertyAttribute::kind_weak)) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "unsafe_unretained" << "weak";
- Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ Attributes &= ~ObjCPropertyAttribute::kind_weak;
}
- } else if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
- if (Attributes & ObjCDeclSpec::DQ_PR_retain) {
+ } else if (Attributes & ObjCPropertyAttribute::kind_copy) {
+ if (Attributes & ObjCPropertyAttribute::kind_retain) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "copy" << "retain";
- Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
+ Attributes &= ~ObjCPropertyAttribute::kind_retain;
}
- if (Attributes & ObjCDeclSpec::DQ_PR_strong) {
+ if (Attributes & ObjCPropertyAttribute::kind_strong) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "copy" << "strong";
- Attributes &= ~ObjCDeclSpec::DQ_PR_strong;
+ Attributes &= ~ObjCPropertyAttribute::kind_strong;
}
- if (Attributes & ObjCDeclSpec::DQ_PR_weak) {
+ if (Attributes & ObjCPropertyAttribute::kind_weak) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "copy" << "weak";
- Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ Attributes &= ~ObjCPropertyAttribute::kind_weak;
}
- }
- else if ((Attributes & ObjCDeclSpec::DQ_PR_retain) &&
- (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
- Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
- << "retain" << "weak";
- Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
- }
- else if ((Attributes & ObjCDeclSpec::DQ_PR_strong) &&
- (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
- Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
- << "strong" << "weak";
- Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ } else if ((Attributes & ObjCPropertyAttribute::kind_retain) &&
+ (Attributes & ObjCPropertyAttribute::kind_weak)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "retain"
+ << "weak";
+ Attributes &= ~ObjCPropertyAttribute::kind_retain;
+ } else if ((Attributes & ObjCPropertyAttribute::kind_strong) &&
+ (Attributes & ObjCPropertyAttribute::kind_weak)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "strong"
+ << "weak";
+ Attributes &= ~ObjCPropertyAttribute::kind_weak;
}
- if (Attributes & ObjCDeclSpec::DQ_PR_weak) {
+ if (Attributes & ObjCPropertyAttribute::kind_weak) {
// 'weak' and 'nonnull' are mutually exclusive.
if (auto nullability = PropertyTy->getNullability(Context)) {
if (*nullability == NullabilityKind::NonNull)
@@ -2737,41 +2775,40 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
}
}
- if ((Attributes & ObjCDeclSpec::DQ_PR_atomic) &&
- (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)) {
- Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
- << "atomic" << "nonatomic";
- Attributes &= ~ObjCDeclSpec::DQ_PR_atomic;
+ if ((Attributes & ObjCPropertyAttribute::kind_atomic) &&
+ (Attributes & ObjCPropertyAttribute::kind_nonatomic)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "atomic"
+ << "nonatomic";
+ Attributes &= ~ObjCPropertyAttribute::kind_atomic;
}
// Warn if user supplied no assignment attribute, property is
// readwrite, and this is an object type.
if (!getOwnershipRule(Attributes) && PropertyTy->isObjCRetainableType()) {
- if (Attributes & ObjCDeclSpec::DQ_PR_readonly) {
+ if (Attributes & ObjCPropertyAttribute::kind_readonly) {
// do nothing
} else if (getLangOpts().ObjCAutoRefCount) {
// With arc, @property definitions should default to strong when
// not specified.
- PropertyDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ PropertyDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_strong);
} else if (PropertyTy->isObjCObjectPointerType()) {
- bool isAnyClassTy =
- (PropertyTy->isObjCClassType() ||
- PropertyTy->isObjCQualifiedClassType());
- // In non-gc, non-arc mode, 'Class' is treated as a 'void *' no need to
- // issue any warning.
- if (isAnyClassTy && getLangOpts().getGC() == LangOptions::NonGC)
- ;
- else if (propertyInPrimaryClass) {
- // Don't issue warning on property with no life time in class
- // extension as it is inherited from property in primary class.
- // Skip this warning in gc-only mode.
- if (getLangOpts().getGC() != LangOptions::GCOnly)
- Diag(Loc, diag::warn_objc_property_no_assignment_attribute);
-
- // If non-gc code warn that this is likely inappropriate.
- if (getLangOpts().getGC() == LangOptions::NonGC)
- Diag(Loc, diag::warn_objc_property_default_assign_on_object);
- }
+ bool isAnyClassTy = (PropertyTy->isObjCClassType() ||
+ PropertyTy->isObjCQualifiedClassType());
+ // In non-gc, non-arc mode, 'Class' is treated as a 'void *' no need to
+ // issue any warning.
+ if (isAnyClassTy && getLangOpts().getGC() == LangOptions::NonGC)
+ ;
+ else if (propertyInPrimaryClass) {
+ // Don't issue warning on property with no life time in class
+ // extension as it is inherited from property in primary class.
+ // Skip this warning in gc-only mode.
+ if (getLangOpts().getGC() != LangOptions::GCOnly)
+ Diag(Loc, diag::warn_objc_property_no_assignment_attribute);
+
+ // If non-gc code warn that this is likely inappropriate.
+ if (getLangOpts().getGC() == LangOptions::NonGC)
+ Diag(Loc, diag::warn_objc_property_default_assign_on_object);
+ }
}
// FIXME: Implement warning dependent on NSCopying being
@@ -2780,18 +2817,18 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
// (please trim this list while you are at it).
}
- if (!(Attributes & ObjCDeclSpec::DQ_PR_copy)
- &&!(Attributes & ObjCDeclSpec::DQ_PR_readonly)
- && getLangOpts().getGC() == LangOptions::GCOnly
- && PropertyTy->isBlockPointerType())
+ if (!(Attributes & ObjCPropertyAttribute::kind_copy) &&
+ !(Attributes & ObjCPropertyAttribute::kind_readonly) &&
+ getLangOpts().getGC() == LangOptions::GCOnly &&
+ PropertyTy->isBlockPointerType())
Diag(Loc, diag::warn_objc_property_copy_missing_on_block);
- else if ((Attributes & ObjCDeclSpec::DQ_PR_retain) &&
- !(Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
- !(Attributes & ObjCDeclSpec::DQ_PR_strong) &&
+ else if ((Attributes & ObjCPropertyAttribute::kind_retain) &&
+ !(Attributes & ObjCPropertyAttribute::kind_readonly) &&
+ !(Attributes & ObjCPropertyAttribute::kind_strong) &&
PropertyTy->isBlockPointerType())
- Diag(Loc, diag::warn_objc_property_retain_of_block);
+ Diag(Loc, diag::warn_objc_property_retain_of_block);
- if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
- (Attributes & ObjCDeclSpec::DQ_PR_setter))
+ if ((Attributes & ObjCPropertyAttribute::kind_readonly) &&
+ (Attributes & ObjCPropertyAttribute::kind_setter))
Diag(Loc, diag::warn_objc_readonly_property_has_setter);
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp b/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
index 3fce0e27e9b3..920463da4027 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
@@ -18,12 +18,15 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclOpenMP.h"
+#include "clang/AST/OpenMPClause.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeOrdering.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
@@ -31,7 +34,10 @@
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/PointerEmbeddedInt.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
+#include <set>
+
using namespace clang;
using namespace llvm::omp;
@@ -47,9 +53,10 @@ static const Expr *checkMapClauseExpressionBase(
namespace {
/// Default data sharing attributes, which can be applied to directive.
enum DefaultDataSharingAttributes {
- DSA_unspecified = 0, /// Data sharing attribute not specified.
- DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
- DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
+ DSA_unspecified = 0, /// Data sharing attribute not specified.
+ DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
+ DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
+ DSA_firstprivate = 1 << 2, /// Default data sharing attribute 'firstprivate'.
};
/// Stack for tracking declarations used in OpenMP directives and
@@ -59,24 +66,35 @@ public:
struct DSAVarData {
OpenMPDirectiveKind DKind = OMPD_unknown;
OpenMPClauseKind CKind = OMPC_unknown;
+ unsigned Modifier = 0;
const Expr *RefExpr = nullptr;
DeclRefExpr *PrivateCopy = nullptr;
SourceLocation ImplicitDSALoc;
DSAVarData() = default;
DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
const Expr *RefExpr, DeclRefExpr *PrivateCopy,
- SourceLocation ImplicitDSALoc)
- : DKind(DKind), CKind(CKind), RefExpr(RefExpr),
+ SourceLocation ImplicitDSALoc, unsigned Modifier)
+ : DKind(DKind), CKind(CKind), Modifier(Modifier), RefExpr(RefExpr),
PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc) {}
};
using OperatorOffsetTy =
llvm::SmallVector<std::pair<Expr *, OverloadedOperatorKind>, 4>;
using DoacrossDependMapTy =
llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
+ /// Kind of the declaration used in the uses_allocators clauses.
+ enum class UsesAllocatorsDeclKind {
+ /// Predefined allocator
+ PredefinedAllocator,
+ /// User-defined allocator
+ UserDefinedAllocator,
+ /// The declaration that represent allocator trait
+ AllocatorTrait,
+ };
private:
struct DSAInfo {
OpenMPClauseKind Attributes = OMPC_unknown;
+ unsigned Modifier = 0;
/// Pointer to a reference expression and a flag which shows that the
/// variable is marked as lastprivate(true) or not (false).
llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
@@ -151,13 +169,21 @@ private:
bool CancelRegion = false;
bool LoopStart = false;
bool BodyComplete = false;
+ SourceLocation PrevScanLocation;
+ SourceLocation PrevOrderedLocation;
SourceLocation InnerTeamsRegionLoc;
/// Reference to the taskgroup task_reduction reference expression.
Expr *TaskgroupReductionRef = nullptr;
llvm::DenseSet<QualType> MappedClassesQualTypes;
+ SmallVector<Expr *, 4> InnerUsedAllocators;
+ llvm::DenseSet<CanonicalDeclPtr<Decl>> ImplicitTaskFirstprivates;
/// List of globals marked as declare target link in this target region
/// (isOpenMPTargetExecutionDirective(Directive) == true).
llvm::SmallVector<DeclRefExpr *, 4> DeclareTargetLinkVarDecls;
+ /// List of decls used in inclusive/exclusive clauses of the scan directive.
+ llvm::DenseSet<CanonicalDeclPtr<Decl>> UsedInScanDirective;
+ llvm::DenseMap<CanonicalDeclPtr<const Decl>, UsesAllocatorsDeclKind>
+ UsesAllocatorsDecls;
SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
Scope *CurScope, SourceLocation Loc)
: Directive(DKind), DirectiveName(Name), CurScope(CurScope),
@@ -263,11 +289,18 @@ private:
SmallVector<const OMPRequiresDecl *, 2> RequiresDecls;
/// omp_allocator_handle_t type.
QualType OMPAllocatorHandleT;
+ /// omp_depend_t type.
+ QualType OMPDependT;
+ /// omp_event_handle_t type.
+ QualType OMPEventHandleT;
+ /// omp_alloctrait_t type.
+ QualType OMPAlloctraitT;
/// Expression for the predefined allocators.
Expr *OMPPredefinedAllocators[OMPAllocateDeclAttr::OMPUserDefinedMemAlloc] = {
nullptr};
/// Vector of previously encountered target directives
SmallVector<SourceLocation, 2> TargetLocations;
+ SourceLocation AtomicLocation;
public:
explicit DSAStackTy(Sema &S) : SemaRef(S) {}
@@ -276,6 +309,10 @@ public:
void setOMPAllocatorHandleT(QualType Ty) { OMPAllocatorHandleT = Ty; }
/// Gets omp_allocator_handle_t type.
QualType getOMPAllocatorHandleT() const { return OMPAllocatorHandleT; }
+ /// Sets omp_alloctrait_t type.
+ void setOMPAlloctraitT(QualType Ty) { OMPAlloctraitT = Ty; }
+ /// Gets omp_alloctrait_t type.
+ QualType getOMPAlloctraitT() const { return OMPAlloctraitT; }
/// Sets the given default allocator.
void setAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
Expr *Allocator) {
@@ -285,6 +322,15 @@ public:
Expr *getAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind) const {
return OMPPredefinedAllocators[AllocatorKind];
}
+ /// Sets omp_depend_t type.
+ void setOMPDependT(QualType Ty) { OMPDependT = Ty; }
+ /// Gets omp_depend_t type.
+ QualType getOMPDependT() const { return OMPDependT; }
+
+ /// Sets omp_event_handle_t type.
+ void setOMPEventHandleT(QualType Ty) { OMPEventHandleT = Ty; }
+ /// Gets omp_event_handle_t type.
+ QualType getOMPEventHandleT() const { return OMPEventHandleT; }
bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
OpenMPClauseKind getClauseParsingMode() const {
@@ -439,13 +485,32 @@ public:
/// \return The index of the loop control variable in the list of associated
/// for-loops (from outer to inner).
const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
+ /// Check if the specified variable is a loop control variable for
+ /// current region.
+ /// \return The index of the loop control variable in the list of associated
+ /// for-loops (from outer to inner).
+ const LCDeclInfo isLoopControlVariable(const ValueDecl *D,
+ unsigned Level) const;
/// Get the loop control variable for the I-th loop (or nullptr) in
/// parent directive.
const ValueDecl *getParentLoopControlVariable(unsigned I) const;
+ /// Marks the specified decl \p D as used in scan directive.
+ void markDeclAsUsedInScanDirective(ValueDecl *D) {
+ if (SharingMapTy *Stack = getSecondOnStackOrNull())
+ Stack->UsedInScanDirective.insert(D);
+ }
+
+ /// Checks if the specified declaration was used in the inner scan directive.
+ bool isUsedInScanDirective(ValueDecl *D) const {
+ if (const SharingMapTy *Stack = getTopOfStackOrNull())
+ return Stack->UsedInScanDirective.count(D) > 0;
+ return false;
+ }
+
/// Adds explicit data sharing attribute to the specified declaration.
void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
- DeclRefExpr *PrivateCopy = nullptr);
+ DeclRefExpr *PrivateCopy = nullptr, unsigned Modifier = 0);
/// Adds additional information for the reduction items with the reduction id
/// represented as an operator.
@@ -467,11 +532,15 @@ public:
getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
const Expr *&ReductionRef,
Expr *&TaskgroupDescriptor) const;
- /// Return reduction reference expression for the current taskgroup.
+ /// Return reduction reference expression for the current taskgroup or
+ /// parallel/worksharing directives with task reductions.
Expr *getTaskgroupReductionRef() const {
- assert(getTopOfStack().Directive == OMPD_taskgroup &&
- "taskgroup reference expression requested for non taskgroup "
- "directive.");
+ assert((getTopOfStack().Directive == OMPD_taskgroup ||
+ ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
+ isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
+ !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
+ "taskgroup reference expression requested for non taskgroup or "
+ "parallel/worksharing directive.");
return getTopOfStack().TaskgroupReductionRef;
}
/// Checks if the given \p VD declaration is actually a taskgroup reduction
@@ -487,6 +556,8 @@ public:
const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
/// Returns data-sharing attributes for the specified declaration.
const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
+ /// Returns data-sharing attributes for the specified declaration.
+ const DSAVarData getImplicitDSA(ValueDecl *D, unsigned Level) const;
/// Checks if the specified variables has data-sharing attributes which
/// match specified \a CPred predicate in any directive which matches \a DPred
/// predicate.
@@ -552,7 +623,7 @@ public:
/// Checks if the defined 'requires' directive has specified type of clause.
template <typename ClauseType>
- bool hasRequiresDeclWithClause() {
+ bool hasRequiresDeclWithClause() const {
return llvm::any_of(RequiresDecls, [](const OMPRequiresDecl *D) {
return llvm::any_of(D->clauselists(), [](const OMPClause *C) {
return isa<ClauseType>(C);
@@ -587,6 +658,18 @@ public:
TargetLocations.push_back(LocStart);
}
+ /// Add location for the first encountered atomicc directive.
+ void addAtomicDirectiveLoc(SourceLocation Loc) {
+ if (AtomicLocation.isInvalid())
+ AtomicLocation = Loc;
+ }
+
+ /// Returns the location of the first encountered atomic directive in the
+ /// module.
+ SourceLocation getAtomicDirectiveLoc() const {
+ return AtomicLocation;
+ }
+
// Return previously encountered target region locations.
ArrayRef<SourceLocation> getEncounteredTargetLocs() const {
return TargetLocations;
@@ -602,6 +685,11 @@ public:
getTopOfStack().DefaultAttr = DSA_shared;
getTopOfStack().DefaultAttrLoc = Loc;
}
+ /// Set default data sharing attribute to firstprivate.
+ void setDefaultDSAFirstPrivate(SourceLocation Loc) {
+ getTopOfStack().DefaultAttr = DSA_firstprivate;
+ getTopOfStack().DefaultAttrLoc = Loc;
+ }
/// Set default data mapping attribute to Modifier:Kind
void setDefaultDMAAttr(OpenMPDefaultmapClauseModifier M,
OpenMPDefaultmapClauseKind Kind,
@@ -612,10 +700,24 @@ public:
}
/// Check whether the implicit-behavior has been set in defaultmap
bool checkDefaultmapCategory(OpenMPDefaultmapClauseKind VariableCategory) {
+ if (VariableCategory == OMPC_DEFAULTMAP_unknown)
+ return getTopOfStack()
+ .DefaultmapMap[OMPC_DEFAULTMAP_aggregate]
+ .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown ||
+ getTopOfStack()
+ .DefaultmapMap[OMPC_DEFAULTMAP_scalar]
+ .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown ||
+ getTopOfStack()
+ .DefaultmapMap[OMPC_DEFAULTMAP_pointer]
+ .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown;
return getTopOfStack().DefaultmapMap[VariableCategory].ImplicitBehavior !=
OMPC_DEFAULTMAP_MODIFIER_unknown;
}
+ DefaultDataSharingAttributes getDefaultDSA(unsigned Level) const {
+ return getStackSize() <= Level ? DSA_unspecified
+ : getStackElemAtLevel(Level).DefaultAttr;
+ }
DefaultDataSharingAttributes getDefaultDSA() const {
return isStackEmpty() ? DSA_unspecified
: getTopOfStack().DefaultAttr;
@@ -738,6 +840,37 @@ public:
return Top ? Top->CancelRegion : false;
}
+ /// Mark that parent region already has scan directive.
+ void setParentHasScanDirective(SourceLocation Loc) {
+ if (SharingMapTy *Parent = getSecondOnStackOrNull())
+ Parent->PrevScanLocation = Loc;
+ }
+ /// Return true if current region has inner cancel construct.
+ bool doesParentHasScanDirective() const {
+ const SharingMapTy *Top = getSecondOnStackOrNull();
+ return Top ? Top->PrevScanLocation.isValid() : false;
+ }
+ /// Return true if current region has inner cancel construct.
+ SourceLocation getParentScanDirectiveLoc() const {
+ const SharingMapTy *Top = getSecondOnStackOrNull();
+ return Top ? Top->PrevScanLocation : SourceLocation();
+ }
+ /// Mark that parent region already has ordered directive.
+ void setParentHasOrderedDirective(SourceLocation Loc) {
+ if (SharingMapTy *Parent = getSecondOnStackOrNull())
+ Parent->PrevOrderedLocation = Loc;
+ }
+ /// Return true if current region has inner ordered construct.
+ bool doesParentHasOrderedDirective() const {
+ const SharingMapTy *Top = getSecondOnStackOrNull();
+ return Top ? Top->PrevOrderedLocation.isValid() : false;
+ }
+ /// Returns the location of the previously specified ordered directive.
+ SourceLocation getParentOrderedDirectiveLoc() const {
+ const SharingMapTy *Top = getSecondOnStackOrNull();
+ return Top ? Top->PrevOrderedLocation : SourceLocation();
+ }
+
/// Set collapse value for the region.
void setAssociatedLoops(unsigned Val) {
getTopOfStack().AssociatedLoops = Val;
@@ -899,6 +1032,46 @@ public:
"Expected target executable directive.");
return getTopOfStack().DeclareTargetLinkVarDecls;
}
+
+ /// Adds list of allocators expressions.
+ void addInnerAllocatorExpr(Expr *E) {
+ getTopOfStack().InnerUsedAllocators.push_back(E);
+ }
+ /// Return list of used allocators.
+ ArrayRef<Expr *> getInnerAllocators() const {
+ return getTopOfStack().InnerUsedAllocators;
+ }
+ /// Marks the declaration as implicitly firstprivate nin the task-based
+ /// regions.
+ void addImplicitTaskFirstprivate(unsigned Level, Decl *D) {
+ getStackElemAtLevel(Level).ImplicitTaskFirstprivates.insert(D);
+ }
+ /// Checks if the decl is implicitly firstprivate in the task-based region.
+ bool isImplicitTaskFirstprivate(Decl *D) const {
+ return getTopOfStack().ImplicitTaskFirstprivates.count(D) > 0;
+ }
+
+ /// Marks decl as used in uses_allocators clause as the allocator.
+ void addUsesAllocatorsDecl(const Decl *D, UsesAllocatorsDeclKind Kind) {
+ getTopOfStack().UsesAllocatorsDecls.try_emplace(D, Kind);
+ }
+ /// Checks if specified decl is used in uses allocator clause as the
+ /// allocator.
+ Optional<UsesAllocatorsDeclKind> isUsesAllocatorsDecl(unsigned Level,
+ const Decl *D) const {
+ const SharingMapTy &StackElem = getTopOfStack();
+ auto I = StackElem.UsesAllocatorsDecls.find(D);
+ if (I == StackElem.UsesAllocatorsDecls.end())
+ return None;
+ return I->getSecond();
+ }
+ Optional<UsesAllocatorsDeclKind> isUsesAllocatorsDecl(const Decl *D) const {
+ const SharingMapTy &StackElem = getTopOfStack();
+ auto I = StackElem.UsesAllocatorsDecls.find(D);
+ if (I == StackElem.UsesAllocatorsDecls.end())
+ return None;
+ return I->getSecond();
+ }
};
bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
@@ -1001,6 +1174,7 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
DVar.PrivateCopy = Data.PrivateCopy;
DVar.CKind = Data.Attributes;
DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
+ DVar.Modifier = Data.Modifier;
return DVar;
}
@@ -1015,6 +1189,15 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
return DVar;
case DSA_none:
return DVar;
+ case DSA_firstprivate:
+ if (VD->getStorageDuration() == SD_Static &&
+ VD->getDeclContext()->isFileContext()) {
+ DVar.CKind = OMPC_unknown;
+ } else {
+ DVar.CKind = OMPC_firstprivate;
+ }
+ DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
+ return DVar;
case DSA_unspecified:
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, implicitly determined, p.2]
@@ -1113,6 +1296,19 @@ DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
}
const DSAStackTy::LCDeclInfo
+DSAStackTy::isLoopControlVariable(const ValueDecl *D, unsigned Level) const {
+ assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
+ D = getCanonicalDecl(D);
+ for (unsigned I = Level + 1; I > 0; --I) {
+ const SharingMapTy &StackElem = getStackElemAtLevel(I - 1);
+ auto It = StackElem.LCVMap.find(D);
+ if (It != StackElem.LCVMap.end())
+ return It->second;
+ }
+ return {0, nullptr};
+}
+
+const DSAStackTy::LCDeclInfo
DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
const SharingMapTy *Parent = getSecondOnStackOrNull();
assert(Parent && "Data-sharing attributes stack is empty");
@@ -1135,19 +1331,21 @@ const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
}
void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
- DeclRefExpr *PrivateCopy) {
+ DeclRefExpr *PrivateCopy, unsigned Modifier) {
D = getCanonicalDecl(D);
if (A == OMPC_threadprivate) {
DSAInfo &Data = Threadprivates[D];
Data.Attributes = A;
Data.RefExpr.setPointer(E);
Data.PrivateCopy = nullptr;
+ Data.Modifier = Modifier;
} else {
DSAInfo &Data = getTopOfStack().SharingMap[D];
assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
(A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
(A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
(isLoopControlVariable(D).first && A == OMPC_private));
+ Data.Modifier = Modifier;
if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
Data.RefExpr.setInt(/*IntVal=*/true);
return;
@@ -1159,6 +1357,7 @@ void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
Data.PrivateCopy = PrivateCopy;
if (PrivateCopy) {
DSAInfo &Data = getTopOfStack().SharingMap[PrivateCopy->getDecl()];
+ Data.Modifier = Modifier;
Data.Attributes = A;
Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
Data.PrivateCopy = nullptr;
@@ -1207,7 +1406,10 @@ void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
"Additional reduction info may be specified only for reduction items.");
ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
assert(ReductionData.ReductionRange.isInvalid() &&
- getTopOfStack().Directive == OMPD_taskgroup &&
+ (getTopOfStack().Directive == OMPD_taskgroup ||
+ ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
+ isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
+ !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
"Additional reduction info may be specified only once for reduction "
"items.");
ReductionData.set(BOK, SR);
@@ -1230,7 +1432,10 @@ void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
"Additional reduction info may be specified only for reduction items.");
ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
assert(ReductionData.ReductionRange.isInvalid() &&
- getTopOfStack().Directive == OMPD_taskgroup &&
+ (getTopOfStack().Directive == OMPD_taskgroup ||
+ ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
+ isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
+ !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
"Additional reduction info may be specified only once for reduction "
"items.");
ReductionData.set(ReductionRef, SR);
@@ -1251,7 +1456,8 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
const DSAInfo &Data = I->SharingMap.lookup(D);
- if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
+ if (Data.Attributes != OMPC_reduction ||
+ Data.Modifier != OMPC_REDUCTION_task)
continue;
const ReductionData &ReductionData = I->ReductionMap.lookup(D);
if (!ReductionData.ReductionOp ||
@@ -1263,8 +1469,8 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
"expression for the descriptor is not "
"set.");
TaskgroupDescriptor = I->TaskgroupReductionRef;
- return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
- Data.PrivateCopy, I->DefaultAttrLoc);
+ return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
+ Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task);
}
return DSAVarData();
}
@@ -1276,7 +1482,8 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
const DSAInfo &Data = I->SharingMap.lookup(D);
- if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
+ if (Data.Attributes != OMPC_reduction ||
+ Data.Modifier != OMPC_REDUCTION_task)
continue;
const ReductionData &ReductionData = I->ReductionMap.lookup(D);
if (!ReductionData.ReductionOp ||
@@ -1288,8 +1495,8 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
"expression for the descriptor is not "
"set.");
TaskgroupDescriptor = I->TaskgroupReductionRef;
- return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
- Data.PrivateCopy, I->DefaultAttrLoc);
+ return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
+ Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task);
}
return DSAVarData();
}
@@ -1364,6 +1571,7 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
if (TI != Threadprivates.end()) {
DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
DVar.CKind = OMPC_threadprivate;
+ DVar.Modifier = TI->getSecond().Modifier;
return DVar;
}
if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
@@ -1447,15 +1655,18 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
const_iterator EndI = end();
if (FromParent && I != EndI)
++I;
- auto It = I->SharingMap.find(D);
- if (It != I->SharingMap.end()) {
- const DSAInfo &Data = It->getSecond();
- DVar.RefExpr = Data.RefExpr.getPointer();
- DVar.PrivateCopy = Data.PrivateCopy;
- DVar.CKind = Data.Attributes;
- DVar.ImplicitDSALoc = I->DefaultAttrLoc;
- DVar.DKind = I->Directive;
- return DVar;
+ if (I != EndI) {
+ auto It = I->SharingMap.find(D);
+ if (It != I->SharingMap.end()) {
+ const DSAInfo &Data = It->getSecond();
+ DVar.RefExpr = Data.RefExpr.getPointer();
+ DVar.PrivateCopy = Data.PrivateCopy;
+ DVar.CKind = Data.Attributes;
+ DVar.ImplicitDSALoc = I->DefaultAttrLoc;
+ DVar.DKind = I->Directive;
+ DVar.Modifier = Data.Modifier;
+ return DVar;
+ }
}
DVar.CKind = OMPC_shared;
@@ -1493,6 +1704,8 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
const_iterator EndI = end();
if (FromParent && I != EndI)
++I;
+ if (I == EndI)
+ return DVar;
auto It = I->SharingMap.find(D);
if (It != I->SharingMap.end()) {
const DSAInfo &Data = It->getSecond();
@@ -1501,6 +1714,7 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
DVar.CKind = Data.Attributes;
DVar.ImplicitDSALoc = I->DefaultAttrLoc;
DVar.DKind = I->Directive;
+ DVar.Modifier = Data.Modifier;
}
return DVar;
@@ -1520,6 +1734,15 @@ const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
return getDSA(StartI, D);
}
+const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
+ unsigned Level) const {
+ if (getStackSize() <= Level)
+ return DSAVarData();
+ D = getCanonicalDecl(D);
+ const_iterator StartI = std::next(begin(), getStackSize() - 1 - Level);
+ return getDSA(StartI, D);
+}
+
const DSAStackTy::DSAVarData
DSAStackTy::hasDSA(ValueDecl *D,
const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
@@ -1640,23 +1863,28 @@ Sema::DeviceDiagBuilder Sema::diagIfOpenMPDeviceCode(SourceLocation Loc,
unsigned DiagID) {
assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
"Expected OpenMP device compilation.");
- FunctionEmissionStatus FES = getEmissionStatus(getCurFunctionDecl());
+
+ FunctionDecl *FD = getCurFunctionDecl();
DeviceDiagBuilder::Kind Kind = DeviceDiagBuilder::K_Nop;
- switch (FES) {
- case FunctionEmissionStatus::Emitted:
- Kind = DeviceDiagBuilder::K_Immediate;
- break;
- case FunctionEmissionStatus::Unknown:
- Kind = isOpenMPDeviceDelayedContext(*this) ? DeviceDiagBuilder::K_Deferred
- : DeviceDiagBuilder::K_Immediate;
- break;
- case FunctionEmissionStatus::TemplateDiscarded:
- case FunctionEmissionStatus::OMPDiscarded:
- Kind = DeviceDiagBuilder::K_Nop;
- break;
- case FunctionEmissionStatus::CUDADiscarded:
- llvm_unreachable("CUDADiscarded unexpected in OpenMP device compilation");
- break;
+ if (FD) {
+ FunctionEmissionStatus FES = getEmissionStatus(FD);
+ switch (FES) {
+ case FunctionEmissionStatus::Emitted:
+ Kind = DeviceDiagBuilder::K_Immediate;
+ break;
+ case FunctionEmissionStatus::Unknown:
+ Kind = isOpenMPDeviceDelayedContext(*this)
+ ? DeviceDiagBuilder::K_Deferred
+ : DeviceDiagBuilder::K_Immediate;
+ break;
+ case FunctionEmissionStatus::TemplateDiscarded:
+ case FunctionEmissionStatus::OMPDiscarded:
+ Kind = DeviceDiagBuilder::K_Nop;
+ break;
+ case FunctionEmissionStatus::CUDADiscarded:
+ llvm_unreachable("CUDADiscarded unexpected in OpenMP device compilation");
+ break;
+ }
}
return DeviceDiagBuilder(Kind, Loc, DiagID, getCurFunctionDecl(), *this);
@@ -1685,107 +1913,6 @@ Sema::DeviceDiagBuilder Sema::diagIfOpenMPHostCode(SourceLocation Loc,
return DeviceDiagBuilder(Kind, Loc, DiagID, getCurFunctionDecl(), *this);
}
-void Sema::checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
- bool CheckForDelayedContext) {
- assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
- "Expected OpenMP device compilation.");
- assert(Callee && "Callee may not be null.");
- Callee = Callee->getMostRecentDecl();
- FunctionDecl *Caller = getCurFunctionDecl();
-
- // host only function are not available on the device.
- if (Caller) {
- FunctionEmissionStatus CallerS = getEmissionStatus(Caller);
- FunctionEmissionStatus CalleeS = getEmissionStatus(Callee);
- assert(CallerS != FunctionEmissionStatus::CUDADiscarded &&
- CalleeS != FunctionEmissionStatus::CUDADiscarded &&
- "CUDADiscarded unexpected in OpenMP device function check");
- if ((CallerS == FunctionEmissionStatus::Emitted ||
- (!isOpenMPDeviceDelayedContext(*this) &&
- CallerS == FunctionEmissionStatus::Unknown)) &&
- CalleeS == FunctionEmissionStatus::OMPDiscarded) {
- StringRef HostDevTy = getOpenMPSimpleClauseTypeName(
- OMPC_device_type, OMPC_DEVICE_TYPE_host);
- Diag(Loc, diag::err_omp_wrong_device_function_call) << HostDevTy << 0;
- Diag(Callee->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
- diag::note_omp_marked_device_type_here)
- << HostDevTy;
- return;
- }
- }
- // If the caller is known-emitted, mark the callee as known-emitted.
- // Otherwise, mark the call in our call graph so we can traverse it later.
- if ((CheckForDelayedContext && !isOpenMPDeviceDelayedContext(*this)) ||
- (!Caller && !CheckForDelayedContext) ||
- (Caller && getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted))
- markKnownEmitted(*this, Caller, Callee, Loc,
- [CheckForDelayedContext](Sema &S, FunctionDecl *FD) {
- return CheckForDelayedContext &&
- S.getEmissionStatus(FD) ==
- FunctionEmissionStatus::Emitted;
- });
- else if (Caller)
- DeviceCallGraph[Caller].insert({Callee, Loc});
-}
-
-void Sema::checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
- bool CheckCaller) {
- assert(LangOpts.OpenMP && !LangOpts.OpenMPIsDevice &&
- "Expected OpenMP host compilation.");
- assert(Callee && "Callee may not be null.");
- Callee = Callee->getMostRecentDecl();
- FunctionDecl *Caller = getCurFunctionDecl();
-
- // device only function are not available on the host.
- if (Caller) {
- FunctionEmissionStatus CallerS = getEmissionStatus(Caller);
- FunctionEmissionStatus CalleeS = getEmissionStatus(Callee);
- assert(
- (LangOpts.CUDA || (CallerS != FunctionEmissionStatus::CUDADiscarded &&
- CalleeS != FunctionEmissionStatus::CUDADiscarded)) &&
- "CUDADiscarded unexpected in OpenMP host function check");
- if (CallerS == FunctionEmissionStatus::Emitted &&
- CalleeS == FunctionEmissionStatus::OMPDiscarded) {
- StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
- OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
- Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
- Diag(Callee->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
- diag::note_omp_marked_device_type_here)
- << NoHostDevTy;
- return;
- }
- }
- // If the caller is known-emitted, mark the callee as known-emitted.
- // Otherwise, mark the call in our call graph so we can traverse it later.
- if (!shouldIgnoreInHostDeviceCheck(Callee)) {
- if ((!CheckCaller && !Caller) ||
- (Caller &&
- getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted))
- markKnownEmitted(
- *this, Caller, Callee, Loc, [CheckCaller](Sema &S, FunctionDecl *FD) {
- return CheckCaller &&
- S.getEmissionStatus(FD) == FunctionEmissionStatus::Emitted;
- });
- else if (Caller)
- DeviceCallGraph[Caller].insert({Callee, Loc});
- }
-}
-
-void Sema::checkOpenMPDeviceExpr(const Expr *E) {
- assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
- "OpenMP device compilation mode is expected.");
- QualType Ty = E->getType();
- if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
- ((Ty->isFloat128Type() ||
- (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128)) &&
- !Context.getTargetInfo().hasFloat128Type()) ||
- (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
- !Context.getTargetInfo().hasInt128Type()))
- targetDiag(E->getExprLoc(), diag::err_omp_unsupported_type)
- << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
- << Context.getTargetInfo().getTriple().str() << E->getSourceRange();
-}
-
static OpenMPDefaultmapClauseKind
getVariableCategoryFromDecl(const LangOptions &LO, const ValueDecl *VD) {
if (LO.OpenMP <= 45) {
@@ -1901,7 +2028,8 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
- isa<MemberExpr>(EI->getAssociatedExpression())) {
+ isa<MemberExpr>(EI->getAssociatedExpression()) ||
+ isa<OMPArrayShapingExpr>(EI->getAssociatedExpression())) {
IsVariableAssociatedWithSection = true;
// There is nothing more we need to know about this variable.
return true;
@@ -1935,14 +2063,23 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
((IsVariableUsedInMapClause &&
DSAStack->getCaptureRegion(Level, OpenMPCaptureLevel) ==
OMPD_target) ||
- !DSAStack->hasExplicitDSA(
- D,
- [](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
- Level, /*NotLastprivate=*/true)) &&
+ !(DSAStack->hasExplicitDSA(
+ D,
+ [](OpenMPClauseKind K) -> bool {
+ return K == OMPC_firstprivate;
+ },
+ Level, /*NotLastprivate=*/true) ||
+ DSAStack->isUsesAllocatorsDecl(Level, D))) &&
// If the variable is artificial and must be captured by value - try to
// capture by value.
!(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
- !cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue());
+ !cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue()) &&
+ // If the variable is implicitly firstprivate and scalar - capture by
+ // copy
+ !(DSAStack->getDefaultDSA() == DSA_firstprivate &&
+ !DSAStack->hasExplicitDSA(
+ D, [](OpenMPClauseKind K) { return K != OMPC_unknown; }, Level) &&
+ !DSAStack->isLoopControlVariable(D, Level).first);
}
// When passing data by copy, we need to make sure it fits the uintptr size
@@ -2010,7 +2147,23 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
//
if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
return nullptr;
- return VD;
+ CapturedRegionScopeInfo *CSI = nullptr;
+ for (FunctionScopeInfo *FSI : llvm::drop_begin(
+ llvm::reverse(FunctionScopes),
+ CheckScopeInfo ? (FunctionScopes.size() - (StopAt + 1)) : 0)) {
+ if (!isa<CapturingScopeInfo>(FSI))
+ return nullptr;
+ if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
+ if (RSI->CapRegionKind == CR_OpenMP) {
+ CSI = RSI;
+ break;
+ }
+ }
+ SmallVector<OpenMPDirectiveKind, 4> Regions;
+ getOpenMPCaptureRegions(Regions,
+ DSAStack->getDirective(CSI->OpenMPLevel));
+ if (Regions[CSI->OpenMPCaptureLevel] != OMPD_task)
+ return VD;
}
}
@@ -2039,20 +2192,27 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
isImplicitOrExplicitTaskingRegion(DSAStack->getCurrentDirective())) ||
(VD && DSAStack->isForceVarCapturing()))
return VD ? VD : Info.second;
- DSAStackTy::DSAVarData DVarPrivate =
+ DSAStackTy::DSAVarData DVarTop =
DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
- if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind))
- return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
+ if (DVarTop.CKind != OMPC_unknown && isOpenMPPrivate(DVarTop.CKind))
+ return VD ? VD : cast<VarDecl>(DVarTop.PrivateCopy->getDecl());
// Threadprivate variables must not be captured.
- if (isOpenMPThreadPrivate(DVarPrivate.CKind))
+ if (isOpenMPThreadPrivate(DVarTop.CKind))
return nullptr;
// The variable is not private or it is the variable in the directive with
// default(none) clause and not used in any clause.
- DVarPrivate = DSAStack->hasDSA(D, isOpenMPPrivate,
- [](OpenMPDirectiveKind) { return true; },
- DSAStack->isClauseParsingMode());
+ DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
+ D, isOpenMPPrivate, [](OpenMPDirectiveKind) { return true; },
+ DSAStack->isClauseParsingMode());
+ // Global shared must not be captured.
+ if (VD && !VD->hasLocalStorage() && DVarPrivate.CKind == OMPC_unknown &&
+ ((DSAStack->getDefaultDSA() != DSA_none &&
+ DSAStack->getDefaultDSA() != DSA_firstprivate) ||
+ DVarTop.CKind == OMPC_shared))
+ return nullptr;
if (DVarPrivate.CKind != OMPC_unknown ||
- (VD && DSAStack->getDefaultDSA() == DSA_none))
+ (VD && (DSAStack->getDefaultDSA() == DSA_none ||
+ DSAStack->getDefaultDSA() == DSA_firstprivate)))
return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
}
return nullptr;
@@ -2060,9 +2220,7 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const {
- SmallVector<OpenMPDirectiveKind, 4> Regions;
- getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
- FunctionScopesIndex -= Regions.size();
+ FunctionScopesIndex -= getOpenMPCaptureLevels(DSAStack->getDirective(Level));
}
void Sema::startOpenMPLoop() {
@@ -2079,39 +2237,77 @@ void Sema::startOpenMPCXXRangeFor() {
}
}
-bool Sema::isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const {
+OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
+ unsigned CapLevel) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
+ if (DSAStack->hasExplicitDirective(
+ [](OpenMPDirectiveKind K) { return isOpenMPTaskingDirective(K); },
+ Level)) {
+ bool IsTriviallyCopyable =
+ D->getType().getNonReferenceType().isTriviallyCopyableType(Context);
+ OpenMPDirectiveKind DKind = DSAStack->getDirective(Level);
+ SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
+ getOpenMPCaptureRegions(CaptureRegions, DKind);
+ if (isOpenMPTaskingDirective(CaptureRegions[CapLevel]) &&
+ (IsTriviallyCopyable ||
+ !isOpenMPTaskLoopDirective(CaptureRegions[CapLevel]))) {
+ if (DSAStack->hasExplicitDSA(
+ D, [](OpenMPClauseKind K) { return K == OMPC_firstprivate; },
+ Level, /*NotLastprivate=*/true))
+ return OMPC_firstprivate;
+ DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level);
+ if (DVar.CKind != OMPC_shared &&
+ !DSAStack->isLoopControlVariable(D, Level).first && !DVar.RefExpr) {
+ DSAStack->addImplicitTaskFirstprivate(Level, D);
+ return OMPC_firstprivate;
+ }
+ }
+ }
if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
if (DSAStack->getAssociatedLoops() > 0 &&
!DSAStack->isLoopStarted()) {
DSAStack->resetPossibleLoopCounter(D);
DSAStack->loopStart();
- return true;
+ return OMPC_private;
}
if ((DSAStack->getPossiblyLoopCunter() == D->getCanonicalDecl() ||
DSAStack->isLoopControlVariable(D).first) &&
!DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K) { return K != OMPC_private; }, Level) &&
!isOpenMPSimdDirective(DSAStack->getCurrentDirective()))
- return true;
+ return OMPC_private;
}
if (const auto *VD = dyn_cast<VarDecl>(D)) {
if (DSAStack->isThreadPrivate(const_cast<VarDecl *>(VD)) &&
DSAStack->isForceVarCapturing() &&
!DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K) { return K == OMPC_copyin; }, Level))
- return true;
- }
- return DSAStack->hasExplicitDSA(
- D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
- (DSAStack->isClauseParsingMode() &&
- DSAStack->getClauseParsingMode() == OMPC_private) ||
- // Consider taskgroup reduction descriptor variable a private to avoid
- // possible capture in the region.
- (DSAStack->hasExplicitDirective(
- [](OpenMPDirectiveKind K) { return K == OMPD_taskgroup; },
- Level) &&
- DSAStack->isTaskgroupReductionRef(D, Level));
+ return OMPC_private;
+ }
+ // User-defined allocators are private since they must be defined in the
+ // context of target region.
+ if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level) &&
+ DSAStack->isUsesAllocatorsDecl(Level, D).getValueOr(
+ DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
+ DSAStackTy::UsesAllocatorsDeclKind::UserDefinedAllocator)
+ return OMPC_private;
+ return (DSAStack->hasExplicitDSA(
+ D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
+ (DSAStack->isClauseParsingMode() &&
+ DSAStack->getClauseParsingMode() == OMPC_private) ||
+ // Consider taskgroup reduction descriptor variable a private
+ // to avoid possible capture in the region.
+ (DSAStack->hasExplicitDirective(
+ [](OpenMPDirectiveKind K) {
+ return K == OMPD_taskgroup ||
+ ((isOpenMPParallelDirective(K) ||
+ isOpenMPWorksharingDirective(K)) &&
+ !isOpenMPSimdDirective(K));
+ },
+ Level) &&
+ DSAStack->isTaskgroupReductionRef(D, Level)))
+ ? OMPC_private
+ : OMPC_unknown;
}
void Sema::setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D,
@@ -2148,68 +2344,101 @@ void Sema::setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D,
}
}
if (OMPC != OMPC_unknown)
- FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, OMPC));
+ FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, unsigned(OMPC)));
}
-bool Sema::isOpenMPTargetCapturedDecl(const ValueDecl *D,
- unsigned Level) const {
+bool Sema::isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
+ unsigned CaptureLevel) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
// Return true if the current level is no longer enclosed in a target region.
+ SmallVector<OpenMPDirectiveKind, 4> Regions;
+ getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
const auto *VD = dyn_cast<VarDecl>(D);
return VD && !VD->hasLocalStorage() &&
DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
- Level);
+ Level) &&
+ Regions[CaptureLevel] != OMPD_task;
+}
+
+bool Sema::isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
+ unsigned CaptureLevel) const {
+ assert(LangOpts.OpenMP && "OpenMP is not allowed");
+ // Return true if the current level is no longer enclosed in a target region.
+
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ if (!VD->hasLocalStorage()) {
+ DSAStackTy::DSAVarData TopDVar =
+ DSAStack->getTopDSA(D, /*FromParent=*/false);
+ unsigned NumLevels =
+ getOpenMPCaptureLevels(DSAStack->getDirective(Level));
+ if (Level == 0)
+ return (NumLevels == CaptureLevel + 1) && TopDVar.CKind != OMPC_shared;
+ DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level - 1);
+ return DVar.CKind != OMPC_shared ||
+ isOpenMPGlobalCapturedDecl(
+ D, Level - 1,
+ getOpenMPCaptureLevels(DSAStack->getDirective(Level - 1)) - 1);
+ }
+ }
+ return true;
}
void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
-void Sema::finalizeOpenMPDelayedAnalysis() {
+void Sema::ActOnOpenMPBeginDeclareVariant(SourceLocation Loc,
+ OMPTraitInfo &TI) {
+ if (!OMPDeclareVariantScopes.empty()) {
+ Diag(Loc, diag::warn_nested_declare_variant);
+ return;
+ }
+ OMPDeclareVariantScopes.push_back(OMPDeclareVariantScope(TI));
+}
+
+void Sema::ActOnOpenMPEndDeclareVariant() {
+ assert(isInOpenMPDeclareVariantScope() &&
+ "Not in OpenMP declare variant scope!");
+
+ OMPDeclareVariantScopes.pop_back();
+}
+
+void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
+ const FunctionDecl *Callee,
+ SourceLocation Loc) {
assert(LangOpts.OpenMP && "Expected OpenMP compilation mode.");
- // Diagnose implicit declare target functions and their callees.
- for (const auto &CallerCallees : DeviceCallGraph) {
- Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
- OMPDeclareTargetDeclAttr::getDeviceType(
- CallerCallees.getFirst()->getMostRecentDecl());
- // Ignore host functions during device analyzis.
- if (LangOpts.OpenMPIsDevice && DevTy &&
- *DevTy == OMPDeclareTargetDeclAttr::DT_Host)
- continue;
- // Ignore nohost functions during host analyzis.
- if (!LangOpts.OpenMPIsDevice && DevTy &&
- *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
- continue;
- for (const std::pair<CanonicalDeclPtr<FunctionDecl>, SourceLocation>
- &Callee : CallerCallees.getSecond()) {
- const FunctionDecl *FD = Callee.first->getMostRecentDecl();
- Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
- OMPDeclareTargetDeclAttr::getDeviceType(FD);
- if (LangOpts.OpenMPIsDevice && DevTy &&
- *DevTy == OMPDeclareTargetDeclAttr::DT_Host) {
- // Diagnose host function called during device codegen.
- StringRef HostDevTy = getOpenMPSimpleClauseTypeName(
- OMPC_device_type, OMPC_DEVICE_TYPE_host);
- Diag(Callee.second, diag::err_omp_wrong_device_function_call)
- << HostDevTy << 0;
- Diag(FD->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
- diag::note_omp_marked_device_type_here)
- << HostDevTy;
- continue;
- }
+ Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
+ OMPDeclareTargetDeclAttr::getDeviceType(Caller->getMostRecentDecl());
+ // Ignore host functions during device analyzis.
+ if (LangOpts.OpenMPIsDevice && DevTy &&
+ *DevTy == OMPDeclareTargetDeclAttr::DT_Host)
+ return;
+ // Ignore nohost functions during host analyzis.
+ if (!LangOpts.OpenMPIsDevice && DevTy &&
+ *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
+ return;
+ const FunctionDecl *FD = Callee->getMostRecentDecl();
+ DevTy = OMPDeclareTargetDeclAttr::getDeviceType(FD);
+ if (LangOpts.OpenMPIsDevice && DevTy &&
+ *DevTy == OMPDeclareTargetDeclAttr::DT_Host) {
+ // Diagnose host function called during device codegen.
+ StringRef HostDevTy =
+ getOpenMPSimpleClauseTypeName(OMPC_device_type, OMPC_DEVICE_TYPE_host);
+ Diag(Loc, diag::err_omp_wrong_device_function_call) << HostDevTy << 0;
+ Diag(FD->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
+ diag::note_omp_marked_device_type_here)
+ << HostDevTy;
+ return;
+ }
if (!LangOpts.OpenMPIsDevice && DevTy &&
*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
// Diagnose nohost function called during host codegen.
StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
- Diag(Callee.second, diag::err_omp_wrong_device_function_call)
- << NoHostDevTy << 1;
+ Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
Diag(FD->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
diag::note_omp_marked_device_type_here)
<< NoHostDevTy;
- continue;
}
- }
- }
}
void Sema::StartOpenMPDSABlock(OpenMPDirectiveKind DKind,
@@ -2228,14 +2457,86 @@ void Sema::EndOpenMPClause() {
DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
}
-static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
- ArrayRef<OMPClause *> Clauses);
static std::pair<ValueDecl *, bool>
getPrivateItem(Sema &S, Expr *&RefExpr, SourceLocation &ELoc,
SourceRange &ERange, bool AllowArraySection = false);
+
+/// Check consistency of the reduction clauses.
+static void checkReductionClauses(Sema &S, DSAStackTy *Stack,
+ ArrayRef<OMPClause *> Clauses) {
+ bool InscanFound = false;
+ SourceLocation InscanLoc;
+ // OpenMP 5.0, 2.19.5.4 reduction Clause, Restrictions.
+ // A reduction clause without the inscan reduction-modifier may not appear on
+ // a construct on which a reduction clause with the inscan reduction-modifier
+ // appears.
+ for (OMPClause *C : Clauses) {
+ if (C->getClauseKind() != OMPC_reduction)
+ continue;
+ auto *RC = cast<OMPReductionClause>(C);
+ if (RC->getModifier() == OMPC_REDUCTION_inscan) {
+ InscanFound = true;
+ InscanLoc = RC->getModifierLoc();
+ continue;
+ }
+ if (RC->getModifier() == OMPC_REDUCTION_task) {
+ // OpenMP 5.0, 2.19.5.4 reduction Clause.
+ // A reduction clause with the task reduction-modifier may only appear on
+ // a parallel construct, a worksharing construct or a combined or
+ // composite construct for which any of the aforementioned constructs is a
+ // constituent construct and simd or loop are not constituent constructs.
+ OpenMPDirectiveKind CurDir = Stack->getCurrentDirective();
+ if (!(isOpenMPParallelDirective(CurDir) ||
+ isOpenMPWorksharingDirective(CurDir)) ||
+ isOpenMPSimdDirective(CurDir))
+ S.Diag(RC->getModifierLoc(),
+ diag::err_omp_reduction_task_not_parallel_or_worksharing);
+ continue;
+ }
+ }
+ if (InscanFound) {
+ for (OMPClause *C : Clauses) {
+ if (C->getClauseKind() != OMPC_reduction)
+ continue;
+ auto *RC = cast<OMPReductionClause>(C);
+ if (RC->getModifier() != OMPC_REDUCTION_inscan) {
+ S.Diag(RC->getModifier() == OMPC_REDUCTION_unknown
+ ? RC->getBeginLoc()
+ : RC->getModifierLoc(),
+ diag::err_omp_inscan_reduction_expected);
+ S.Diag(InscanLoc, diag::note_omp_previous_inscan_reduction);
+ continue;
+ }
+ for (Expr *Ref : RC->varlists()) {
+ assert(Ref && "NULL expr in OpenMP nontemporal clause.");
+ SourceLocation ELoc;
+ SourceRange ERange;
+ Expr *SimpleRefExpr = Ref;
+ auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange,
+ /*AllowArraySection=*/true);
+ ValueDecl *D = Res.first;
+ if (!D)
+ continue;
+ if (!Stack->isUsedInScanDirective(getCanonicalDecl(D))) {
+ S.Diag(Ref->getExprLoc(),
+ diag::err_omp_reduction_not_inclusive_exclusive)
+ << Ref->getSourceRange();
+ }
+ }
+ }
+ }
+}
+
+static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
+ ArrayRef<OMPClause *> Clauses);
static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
bool WithInit);
+static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
+ const ValueDecl *D,
+ const DSAStackTy::DSAVarData &DVar,
+ bool IsLoopIterVar = false);
+
void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
// OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
// A variable of class type (or array thereof) that appears in a lastprivate
@@ -2305,10 +2606,56 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
Clause->setPrivateRefs(PrivateRefs);
continue;
}
+ if (auto *Clause = dyn_cast<OMPUsesAllocatorsClause>(C)) {
+ for (unsigned I = 0, E = Clause->getNumberOfAllocators(); I < E; ++I) {
+ OMPUsesAllocatorsClause::Data D = Clause->getAllocatorData(I);
+ auto *DRE = dyn_cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ ValueDecl *VD = DRE->getDecl();
+ if (!VD || !isa<VarDecl>(VD))
+ continue;
+ DSAStackTy::DSAVarData DVar =
+ DSAStack->getTopDSA(VD, /*FromParent=*/false);
+ // OpenMP [2.12.5, target Construct]
+ // Memory allocators that appear in a uses_allocators clause cannot
+ // appear in other data-sharing attribute clauses or data-mapping
+ // attribute clauses in the same construct.
+ Expr *MapExpr = nullptr;
+ if (DVar.RefExpr ||
+ DSAStack->checkMappableExprComponentListsForDecl(
+ VD, /*CurrentRegionOnly=*/true,
+ [VD, &MapExpr](
+ OMPClauseMappableExprCommon::MappableExprComponentListRef
+ MapExprComponents,
+ OpenMPClauseKind C) {
+ auto MI = MapExprComponents.rbegin();
+ auto ME = MapExprComponents.rend();
+ if (MI != ME &&
+ MI->getAssociatedDeclaration()->getCanonicalDecl() ==
+ VD->getCanonicalDecl()) {
+ MapExpr = MI->getAssociatedExpression();
+ return true;
+ }
+ return false;
+ })) {
+ Diag(D.Allocator->getExprLoc(),
+ diag::err_omp_allocator_used_in_clauses)
+ << D.Allocator->getSourceRange();
+ if (DVar.RefExpr)
+ reportOriginalDsa(*this, DSAStack, VD, DVar);
+ else
+ Diag(MapExpr->getExprLoc(), diag::note_used_here)
+ << MapExpr->getSourceRange();
+ }
+ }
+ continue;
+ }
}
// Check allocate clauses.
if (!CurContext->isDependentContext())
checkAllocateClauses(*this, DSAStack, D->clauses());
+ checkReductionClauses(*this, DSAStack, D->clauses());
}
DSAStack->pop();
@@ -2618,15 +2965,14 @@ Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
static OMPAllocateDeclAttr::AllocatorTypeTy
getAllocatorKind(Sema &S, DSAStackTy *Stack, Expr *Allocator) {
if (!Allocator)
- return OMPAllocateDeclAttr::OMPDefaultMemAlloc;
+ return OMPAllocateDeclAttr::OMPNullMemAlloc;
if (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
Allocator->isInstantiationDependent() ||
Allocator->containsUnexpandedParameterPack())
return OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
auto AllocatorKindRes = OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
const Expr *AE = Allocator->IgnoreParenImpCasts();
- for (int I = OMPAllocateDeclAttr::OMPDefaultMemAlloc;
- I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
+ for (int I = 0; I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
const Expr *DefAllocator = Stack->getAllocator(AllocatorKind);
llvm::FoldingSetNodeID AEId, DAEId;
@@ -2799,18 +3145,26 @@ OMPRequiresDecl *Sema::CheckOMPRequiresDecl(SourceLocation Loc,
/// current compilation unit.
ArrayRef<SourceLocation> TargetLocations =
DSAStack->getEncounteredTargetLocs();
- if (!TargetLocations.empty()) {
+ SourceLocation AtomicLoc = DSAStack->getAtomicDirectiveLoc();
+ if (!TargetLocations.empty() || !AtomicLoc.isInvalid()) {
for (const OMPClause *CNew : ClauseList) {
// Check if any of the requires clauses affect target regions.
if (isa<OMPUnifiedSharedMemoryClause>(CNew) ||
isa<OMPUnifiedAddressClause>(CNew) ||
isa<OMPReverseOffloadClause>(CNew) ||
isa<OMPDynamicAllocatorsClause>(CNew)) {
- Diag(Loc, diag::err_omp_target_before_requires)
- << getOpenMPClauseName(CNew->getClauseKind());
+ Diag(Loc, diag::err_omp_directive_before_requires)
+ << "target" << getOpenMPClauseName(CNew->getClauseKind());
for (SourceLocation TargetLoc : TargetLocations) {
- Diag(TargetLoc, diag::note_omp_requires_encountered_target);
+ Diag(TargetLoc, diag::note_omp_requires_encountered_directive)
+ << "target";
}
+ } else if (!AtomicLoc.isInvalid() &&
+ isa<OMPAtomicDefaultMemOrderClause>(CNew)) {
+ Diag(Loc, diag::err_omp_directive_before_requires)
+ << "atomic" << getOpenMPClauseName(CNew->getClauseKind());
+ Diag(AtomicLoc, diag::note_omp_requires_encountered_directive)
+ << "atomic";
}
}
}
@@ -2824,7 +3178,7 @@ OMPRequiresDecl *Sema::CheckOMPRequiresDecl(SourceLocation Loc,
static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
const ValueDecl *D,
const DSAStackTy::DSAVarData &DVar,
- bool IsLoopIterVar = false) {
+ bool IsLoopIterVar) {
if (DVar.RefExpr) {
SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
<< getOpenMPClauseName(DVar.CKind);
@@ -2944,6 +3298,16 @@ class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
Visit(S->getInnermostCapturedStmt()->getCapturedStmt());
TryCaptureCXXThisMembers = SavedTryCaptureCXXThisMembers;
}
+ // In tasks firstprivates are not captured anymore, need to analyze them
+ // explicitly.
+ if (isOpenMPTaskingDirective(S->getDirectiveKind()) &&
+ !isOpenMPTaskLoopDirective(S->getDirectiveKind())) {
+ for (OMPClause *C : S->clauses())
+ if (auto *FC = dyn_cast<OMPFirstprivateClause>(C)) {
+ for (Expr *Ref : FC->varlists())
+ Visit(Ref);
+ }
+ }
}
public:
@@ -2966,7 +3330,11 @@ public:
return;
VD = VD->getCanonicalDecl();
// Skip internally declared variables.
- if (VD->hasLocalStorage() && CS && !CS->capturesVariable(VD))
+ if (VD->hasLocalStorage() && CS && !CS->capturesVariable(VD) &&
+ !Stack->isImplicitTaskFirstprivate(VD))
+ return;
+ // Skip allocators in uses_allocators clauses.
+ if (Stack->isUsesAllocatorsDecl(VD).hasValue())
return;
DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
@@ -2979,7 +3347,8 @@ public:
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (VD->hasGlobalStorage() && CS && !CS->capturesVariable(VD) &&
(Stack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
- !Res || *Res != OMPDeclareTargetDeclAttr::MT_Link))
+ !Res || *Res != OMPDeclareTargetDeclAttr::MT_Link) &&
+ !Stack->isImplicitTaskFirstprivate(VD))
return;
SourceLocation ELoc = E->getExprLoc();
@@ -2988,10 +3357,19 @@ public:
// in the construct, and does not have a predetermined data-sharing
// attribute, must have its data-sharing attribute explicitly determined
// by being listed in a data-sharing attribute clause.
- if (DVar.CKind == OMPC_unknown && Stack->getDefaultDSA() == DSA_none &&
+ if (DVar.CKind == OMPC_unknown &&
+ (Stack->getDefaultDSA() == DSA_none ||
+ Stack->getDefaultDSA() == DSA_firstprivate) &&
isImplicitOrExplicitTaskingRegion(DKind) &&
VarsWithInheritedDSA.count(VD) == 0) {
- VarsWithInheritedDSA[VD] = E;
+ bool InheritedDSA = Stack->getDefaultDSA() == DSA_none;
+ if (!InheritedDSA && Stack->getDefaultDSA() == DSA_firstprivate) {
+ DSAStackTy::DSAVarData DVar =
+ Stack->getImplicitDSA(VD, /*FromParent=*/false);
+ InheritedDSA = DVar.CKind == OMPC_unknown;
+ }
+ if (InheritedDSA)
+ VarsWithInheritedDSA[VD] = E;
return;
}
@@ -3036,7 +3414,7 @@ public:
StackComponents,
OpenMPClauseKind) {
// Variable is used if it has been marked as an array, array
- // section or the variable iself.
+ // section, array shaping or the variable iself.
return StackComponents.size() == 1 ||
std::all_of(
std::next(StackComponents.rbegin()),
@@ -3047,6 +3425,8 @@ public:
nullptr &&
(isa<OMPArraySectionExpr>(
MC.getAssociatedExpression()) ||
+ isa<OMPArrayShapingExpr>(
+ MC.getAssociatedExpression()) ||
isa<ArraySubscriptExpr>(
MC.getAssociatedExpression()));
});
@@ -3091,7 +3471,9 @@ public:
// Define implicit data-sharing attributes for task.
DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
- if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
+ if (((isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared) ||
+ (Stack->getDefaultDSA() == DSA_firstprivate &&
+ DVar.CKind == OMPC_firstprivate && !DVar.RefExpr)) &&
!Stack->isLoopControlVariable(VD).first) {
ImplicitFirstprivate.push_back(E);
return;
@@ -3112,7 +3494,7 @@ public:
return;
auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
- if (auto *TE = dyn_cast<CXXThisExpr>(E->getBase()->IgnoreParens())) {
+ if (auto *TE = dyn_cast<CXXThisExpr>(E->getBase()->IgnoreParenCasts())) {
if (!FD)
return;
DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
@@ -3204,8 +3586,10 @@ public:
// Do both expressions have the same kind?
if (CCI->getAssociatedExpression()->getStmtClass() !=
SC.getAssociatedExpression()->getStmtClass())
- if (!(isa<OMPArraySectionExpr>(
- SC.getAssociatedExpression()) &&
+ if (!((isa<OMPArraySectionExpr>(
+ SC.getAssociatedExpression()) ||
+ isa<OMPArrayShapingExpr>(
+ SC.getAssociatedExpression())) &&
isa<ArraySubscriptExpr>(
CCI->getAssociatedExpression())))
return false;
@@ -3516,7 +3900,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
};
// Start a captured region for 'parallel'.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- ParamsParallel, /*OpenMPCaptureLevel=*/1);
+ ParamsParallel, /*OpenMPCaptureLevel=*/0);
QualType Args[] = {VoidPtrTy};
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = true;
@@ -3537,7 +3921,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- Params, /*OpenMPCaptureLevel=*/2);
+ Params, /*OpenMPCaptureLevel=*/1);
// Mark this captured region as inlined, because we don't use outlined
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
@@ -3688,6 +4072,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_cancellation_point:
case OMPD_cancel:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
@@ -3695,8 +4081,11 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_end_declare_target:
case OMPD_requires:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
llvm_unreachable("OpenMP Directive is not allowed");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
}
@@ -3841,6 +4230,36 @@ void Sema::tryCaptureOpenMPLambdas(ValueDecl *V) {
}
}
+static bool checkOrderedOrderSpecified(Sema &S,
+ const ArrayRef<OMPClause *> Clauses) {
+ const OMPOrderedClause *Ordered = nullptr;
+ const OMPOrderClause *Order = nullptr;
+
+ for (const OMPClause *Clause : Clauses) {
+ if (Clause->getClauseKind() == OMPC_ordered)
+ Ordered = cast<OMPOrderedClause>(Clause);
+ else if (Clause->getClauseKind() == OMPC_order) {
+ Order = cast<OMPOrderClause>(Clause);
+ if (Order->getKind() != OMPC_ORDER_concurrent)
+ Order = nullptr;
+ }
+ if (Ordered && Order)
+ break;
+ }
+
+ if (Ordered && Order) {
+ S.Diag(Order->getKindKwLoc(),
+ diag::err_omp_simple_clause_incompatible_with_ordered)
+ << getOpenMPClauseName(OMPC_order)
+ << getOpenMPSimpleClauseTypeName(OMPC_order, OMPC_ORDER_concurrent)
+ << SourceRange(Order->getBeginLoc(), Order->getEndLoc());
+ S.Diag(Ordered->getBeginLoc(), diag::note_omp_ordered_param)
+ << 0 << SourceRange(Ordered->getBeginLoc(), Ordered->getEndLoc());
+ return true;
+ }
+ return false;
+}
+
StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
ArrayRef<OMPClause *> Clauses) {
bool ErrorFound = false;
@@ -3859,7 +4278,8 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
SmallVector<const OMPClauseWithPreInit *, 4> PICs;
// This is required for proper codegen.
for (OMPClause *Clause : Clauses) {
- if (isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
+ if (!LangOpts.OpenMPSimd &&
+ isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
Clause->getClauseKind() == OMPC_in_reduction) {
// Capture taskgroup task_reduction descriptors inside the tasking regions
// with the corresponding in_reduction items.
@@ -3897,6 +4317,9 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
else if (Clause->getClauseKind() == OMPC_linear)
LCs.push_back(cast<OMPLinearClause>(Clause));
}
+ // Capture allocator expressions if used.
+ for (Expr *E : DSAStack->getInnerAllocators())
+ MarkDeclarationsReferencedInExpr(E);
// OpenMP, 2.7.1 Loop Construct, Restrictions
// The nonmonotonic modifier cannot be specified if an ordered clause is
// specified.
@@ -3908,10 +4331,18 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
? SC->getFirstScheduleModifierLoc()
: SC->getSecondScheduleModifierLoc(),
- diag::err_omp_schedule_nonmonotonic_ordered)
+ diag::err_omp_simple_clause_incompatible_with_ordered)
+ << getOpenMPClauseName(OMPC_schedule)
+ << getOpenMPSimpleClauseTypeName(OMPC_schedule,
+ OMPC_SCHEDULE_MODIFIER_nonmonotonic)
<< SourceRange(OC->getBeginLoc(), OC->getEndLoc());
ErrorFound = true;
}
+ // OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Restrictions.
+ // If an order(concurrent) clause is present, an ordered clause may not appear
+ // on the same directive.
+ if (checkOrderedOrderSpecified(*this, Clauses))
+ ErrorFound = true;
if (!LCs.empty() && OC && OC->getNumForLoops()) {
for (const OMPLinearClause *C : LCs) {
Diag(C->getBeginLoc(), diag::err_omp_linear_ordered)
@@ -3952,6 +4383,21 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
}
}
}
+ if (ThisCaptureRegion == OMPD_target) {
+ // Capture allocator traits in the target region. They are used implicitly
+ // and, thus, are not captured by default.
+ for (OMPClause *C : Clauses) {
+ if (const auto *UAC = dyn_cast<OMPUsesAllocatorsClause>(C)) {
+ for (unsigned I = 0, End = UAC->getNumberOfAllocators(); I < End;
+ ++I) {
+ OMPUsesAllocatorsClause::Data D = UAC->getAllocatorData(I);
+ if (Expr *E = D.AllocatorTraits)
+ MarkDeclarationsReferencedInExpr(E);
+ }
+ continue;
+ }
+ }
+ }
if (++CompletedRegions == CaptureRegions.size())
DSAStack->setBodyComplete();
SR = ActOnCapturedRegionEnd(SR.get());
@@ -3991,12 +4437,14 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
ShouldBeInParallelRegion,
ShouldBeInOrderedRegion,
ShouldBeInTargetRegion,
- ShouldBeInTeamsRegion
+ ShouldBeInTeamsRegion,
+ ShouldBeInLoopSimdRegion,
} Recommend = NoRecommend;
if (isOpenMPSimdDirective(ParentRegion) &&
((SemaRef.LangOpts.OpenMP <= 45 && CurrentRegion != OMPD_ordered) ||
(SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion != OMPD_ordered &&
- CurrentRegion != OMPD_simd && CurrentRegion != OMPD_atomic))) {
+ CurrentRegion != OMPD_simd && CurrentRegion != OMPD_atomic &&
+ CurrentRegion != OMPD_scan))) {
// OpenMP [2.16, Nesting of Regions]
// OpenMP constructs may not be nested inside a simd region.
// OpenMP [2.8.1,simd Construct, Restrictions]
@@ -4041,7 +4489,7 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
if (ParentRegion == OMPD_unknown &&
!isOpenMPNestingTeamsDirective(CurrentRegion) &&
CurrentRegion != OMPD_cancellation_point &&
- CurrentRegion != OMPD_cancel)
+ CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_scan)
return false;
if (CurrentRegion == OMPD_cancellation_point ||
CurrentRegion == OMPD_cancel) {
@@ -4066,7 +4514,12 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
ParentRegion == OMPD_distribute_parallel_for ||
ParentRegion == OMPD_teams_distribute_parallel_for ||
ParentRegion == OMPD_target_teams_distribute_parallel_for)) ||
- (CancelRegion == OMPD_taskgroup && ParentRegion == OMPD_task) ||
+ (CancelRegion == OMPD_taskgroup &&
+ (ParentRegion == OMPD_task ||
+ (SemaRef.getLangOpts().OpenMP >= 50 &&
+ (ParentRegion == OMPD_taskloop ||
+ ParentRegion == OMPD_master_taskloop ||
+ ParentRegion == OMPD_parallel_master_taskloop)))) ||
(CancelRegion == OMPD_sections &&
(ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
ParentRegion == OMPD_parallel_sections)));
@@ -4150,6 +4603,17 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
ParentRegion != OMPD_target);
OrphanSeen = ParentRegion == OMPD_unknown;
Recommend = ShouldBeInTargetRegion;
+ } else if (CurrentRegion == OMPD_scan) {
+ // OpenMP [2.16, Nesting of Regions]
+ // If specified, a teams construct must be contained within a target
+ // construct.
+ NestingProhibited =
+ SemaRef.LangOpts.OpenMP < 50 ||
+ (ParentRegion != OMPD_simd && ParentRegion != OMPD_for &&
+ ParentRegion != OMPD_for_simd && ParentRegion != OMPD_parallel_for &&
+ ParentRegion != OMPD_parallel_for_simd);
+ OrphanSeen = ParentRegion == OMPD_unknown;
+ Recommend = ShouldBeInLoopSimdRegion;
}
if (!NestingProhibited &&
!isOpenMPTargetExecutionDirective(CurrentRegion) &&
@@ -4216,7 +4680,7 @@ static bool checkIfClauses(Sema &S, OpenMPDirectiveKind Kind,
bool ErrorFound = false;
unsigned NamedModifiersNumber = 0;
llvm::IndexedMap<const OMPIfClause *, Kind2Unsigned> FoundNameModifiers;
- FoundNameModifiers.resize(unsigned(OMPD_unknown) + 1);
+ FoundNameModifiers.resize(llvm::omp::Directive_enumSize + 1);
SmallVector<SourceLocation, 4> NameModifierLoc;
for (const OMPClause *C : Clauses) {
if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
@@ -4353,6 +4817,30 @@ static std::pair<ValueDecl *, bool> getPrivateItem(Sema &S, Expr *&RefExpr,
getCanonicalDecl(DE ? DE->getDecl() : ME->getMemberDecl()), false);
}
+namespace {
+/// Checks if the allocator is used in uses_allocators clause to be allowed in
+/// target regions.
+class AllocatorChecker final : public ConstStmtVisitor<AllocatorChecker, bool> {
+ DSAStackTy *S = nullptr;
+
+public:
+ bool VisitDeclRefExpr(const DeclRefExpr *E) {
+ return S->isUsesAllocatorsDecl(E->getDecl())
+ .getValueOr(
+ DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
+ DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait;
+ }
+ bool VisitStmt(const Stmt *S) {
+ for (const Stmt *Child : S->children()) {
+ if (Child && Visit(Child))
+ return true;
+ }
+ return false;
+ }
+ explicit AllocatorChecker(DSAStackTy *S) : S(S) {}
+};
+} // namespace
+
static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
ArrayRef<OMPClause *> Clauses) {
assert(!S.CurContext->isDependentContext() &&
@@ -4421,6 +4909,22 @@ static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
}
for (OMPClause *C : AllocateRange) {
auto *AC = cast<OMPAllocateClause>(C);
+ if (S.getLangOpts().OpenMP >= 50 &&
+ !Stack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>() &&
+ isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
+ AC->getAllocator()) {
+ Expr *Allocator = AC->getAllocator();
+ // OpenMP, 2.12.5 target Construct
+ // Memory allocators that do not appear in a uses_allocators clause cannot
+ // appear as an allocator in an allocate clause or be used in the target
+ // region unless a requires directive with the dynamic_allocators clause
+ // is present in the same compilation unit.
+ AllocatorChecker Checker(Stack);
+ if (Checker.Visit(Allocator))
+ S.Diag(Allocator->getExprLoc(),
+ diag::err_omp_allocator_not_in_uses_allocators)
+ << Allocator->getSourceRange();
+ }
OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
getAllocatorKind(S, Stack, AC->getAllocator());
// OpenMP, 2.11.4 allocate Clause, Restrictions.
@@ -4513,6 +5017,11 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
if (E)
ImplicitFirstprivates.emplace_back(E);
}
+ // OpenMP 5.0, 2.10.1 task Construct
+ // [detach clause]... The event-handle will be considered as if it was
+ // specified on a firstprivate clause.
+ if (auto *DC = dyn_cast<OMPDetachClause>(C))
+ ImplicitFirstprivates.push_back(DC->getEventHandler());
}
if (!ImplicitFirstprivates.empty()) {
if (OMPClause *Implicit = ActOnOpenMPFirstprivateClause(
@@ -4648,6 +5157,16 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
"No associated statement allowed for 'omp flush' directive");
Res = ActOnOpenMPFlushDirective(ClausesWithImplicit, StartLoc, EndLoc);
break;
+ case OMPD_depobj:
+ assert(AStmt == nullptr &&
+ "No associated statement allowed for 'omp depobj' directive");
+ Res = ActOnOpenMPDepobjDirective(ClausesWithImplicit, StartLoc, EndLoc);
+ break;
+ case OMPD_scan:
+ assert(AStmt == nullptr &&
+ "No associated statement allowed for 'omp scan' directive");
+ Res = ActOnOpenMPScanDirective(ClausesWithImplicit, StartLoc, EndLoc);
+ break;
case OMPD_ordered:
Res = ActOnOpenMPOrderedDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc);
@@ -4848,15 +5367,20 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPD_declare_simd:
case OMPD_requires:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
llvm_unreachable("OpenMP Directive is not allowed");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
ErrorFound = Res.isInvalid() || ErrorFound;
- // Check variables in the clauses if default(none) was specified.
- if (DSAStack->getDefaultDSA() == DSA_none) {
+ // Check variables in the clauses if default(none) or
+ // default(firstprivate) was specified.
+ if (DSAStack->getDefaultDSA() == DSA_none ||
+ DSAStack->getDefaultDSA() == DSA_firstprivate) {
DSAAttrChecker DSAChecker(DSAStack, *this, nullptr);
for (OMPClause *C : Clauses) {
switch (C->getClauseKind()) {
@@ -4876,6 +5400,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
break;
continue;
case OMPC_schedule:
+ case OMPC_detach:
break;
case OMPC_grainsize:
case OMPC_num_tasks:
@@ -4915,6 +5440,10 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_depend:
case OMPC_threads:
case OMPC_simd:
@@ -4924,11 +5453,19 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_nontemporal:
+ case OMPC_order:
+ case OMPC_destroy:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
continue;
case OMPC_allocator:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_threadprivate:
case OMPC_uniform:
case OMPC_unknown:
@@ -4939,6 +5476,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPC_atomic_default_mem_order:
case OMPC_device_type:
case OMPC_match:
+ default:
llvm_unreachable("Unexpected clause");
}
for (Stmt *CC : C->children()) {
@@ -4946,14 +5484,15 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
DSAChecker.Visit(CC);
}
}
- for (auto &P : DSAChecker.getVarsWithInheritedDSA())
+ for (const auto &P : DSAChecker.getVarsWithInheritedDSA())
VarsWithInheritedDSA[P.getFirst()] = P.getSecond();
}
for (const auto &P : VarsWithInheritedDSA) {
if (P.getFirst()->isImplicit() || isa<OMPCapturedExprDecl>(P.getFirst()))
continue;
ErrorFound = true;
- if (DSAStack->getDefaultDSA() == DSA_none) {
+ if (DSAStack->getDefaultDSA() == DSA_none ||
+ DSAStack->getDefaultDSA() == DSA_firstprivate) {
Diag(P.second->getExprLoc(), diag::err_omp_no_dsa_for_variable)
<< P.first << P.second->getSourceRange();
Diag(DSAStack->getDefaultDSALocation(), diag::note_omp_default_dsa_none);
@@ -4973,12 +5512,6 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
if (ErrorFound)
return StmtError();
- if (!(Res.getAs<OMPExecutableDirective>()->isStandaloneDirective())) {
- Res.getAs<OMPExecutableDirective>()
- ->getStructuredBlock()
- ->setIsOMPStructuredBlock(true);
- }
-
if (!CurContext->isDependentContext() &&
isOpenMPTargetExecutionDirective(Kind) &&
!(DSAStack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
@@ -5166,7 +5699,8 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareSimdDirective(
E->containsUnexpandedParameterPack())
continue;
(void)CheckOpenMPLinearDecl(CanonPVD, E->getExprLoc(), LinKind,
- PVD->getOriginalType());
+ PVD->getOriginalType(),
+ /*IsDeclareSimd=*/true);
continue;
}
}
@@ -5186,7 +5720,7 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareSimdDirective(
E->isInstantiationDependent() || E->containsUnexpandedParameterPack())
continue;
(void)CheckOpenMPLinearDecl(/*D=*/nullptr, E->getExprLoc(), LinKind,
- E->getType());
+ E->getType(), /*IsDeclareSimd=*/true);
continue;
}
Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
@@ -5264,9 +5798,170 @@ static void setPrototype(Sema &S, FunctionDecl *FD, FunctionDecl *FDWithProto,
FD->setParams(Params);
}
+Sema::OMPDeclareVariantScope::OMPDeclareVariantScope(OMPTraitInfo &TI)
+ : TI(&TI), NameSuffix(TI.getMangledName()) {}
+
+FunctionDecl *
+Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S,
+ Declarator &D) {
+ IdentifierInfo *BaseII = D.getIdentifier();
+ LookupResult Lookup(*this, DeclarationName(BaseII), D.getIdentifierLoc(),
+ LookupOrdinaryName);
+ LookupParsedName(Lookup, S, &D.getCXXScopeSpec());
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType FType = TInfo->getType();
+
+ bool IsConstexpr = D.getDeclSpec().getConstexprSpecifier() == CSK_constexpr;
+ bool IsConsteval = D.getDeclSpec().getConstexprSpecifier() == CSK_consteval;
+
+ FunctionDecl *BaseFD = nullptr;
+ for (auto *Candidate : Lookup) {
+ auto *UDecl = dyn_cast<FunctionDecl>(Candidate->getUnderlyingDecl());
+ if (!UDecl)
+ continue;
+
+ // Don't specialize constexpr/consteval functions with
+ // non-constexpr/consteval functions.
+ if (UDecl->isConstexpr() && !IsConstexpr)
+ continue;
+ if (UDecl->isConsteval() && !IsConsteval)
+ continue;
+
+ QualType NewType = Context.mergeFunctionTypes(
+ FType, UDecl->getType(), /* OfBlockPointer */ false,
+ /* Unqualified */ false, /* AllowCXX */ true);
+ if (NewType.isNull())
+ continue;
+
+ // Found a base!
+ BaseFD = UDecl;
+ break;
+ }
+ if (!BaseFD) {
+ BaseFD = cast<FunctionDecl>(ActOnDeclarator(S, D));
+ BaseFD->setImplicit(true);
+ }
+
+ OMPDeclareVariantScope &DVScope = OMPDeclareVariantScopes.back();
+ std::string MangledName;
+ MangledName += D.getIdentifier()->getName();
+ MangledName += getOpenMPVariantManglingSeparatorStr();
+ MangledName += DVScope.NameSuffix;
+ IdentifierInfo &VariantII = Context.Idents.get(MangledName);
+
+ VariantII.setMangledOpenMPVariantName(true);
+ D.SetIdentifier(&VariantII, D.getBeginLoc());
+ return BaseFD;
+}
+
+void Sema::ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
+ FunctionDecl *FD, FunctionDecl *BaseFD) {
+ // Do not mark function as is used to prevent its emission if this is the
+ // only place where it is used.
+ EnterExpressionEvaluationContext Unevaluated(
+ *this, Sema::ExpressionEvaluationContext::Unevaluated);
+
+ Expr *VariantFuncRef = DeclRefExpr::Create(
+ Context, NestedNameSpecifierLoc(), SourceLocation(), FD,
+ /* RefersToEnclosingVariableOrCapture */ false,
+ /* NameLoc */ FD->getLocation(), FD->getType(), ExprValueKind::VK_RValue);
+
+ OMPDeclareVariantScope &DVScope = OMPDeclareVariantScopes.back();
+ auto *OMPDeclareVariantA = OMPDeclareVariantAttr::CreateImplicit(
+ Context, VariantFuncRef, DVScope.TI);
+ BaseFD->addAttr(OMPDeclareVariantA);
+}
+
+ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope,
+ SourceLocation LParenLoc,
+ MultiExprArg ArgExprs,
+ SourceLocation RParenLoc, Expr *ExecConfig) {
+ // The common case is a regular call we do not want to specialize at all. Try
+ // to make that case fast by bailing early.
+ CallExpr *CE = dyn_cast<CallExpr>(Call.get());
+ if (!CE)
+ return Call;
+
+ FunctionDecl *CalleeFnDecl = CE->getDirectCallee();
+ if (!CalleeFnDecl)
+ return Call;
+
+ if (!CalleeFnDecl->hasAttr<OMPDeclareVariantAttr>())
+ return Call;
+
+ ASTContext &Context = getASTContext();
+ OMPContext OMPCtx(getLangOpts().OpenMPIsDevice,
+ Context.getTargetInfo().getTriple());
+
+ SmallVector<Expr *, 4> Exprs;
+ SmallVector<VariantMatchInfo, 4> VMIs;
+ while (CalleeFnDecl) {
+ for (OMPDeclareVariantAttr *A :
+ CalleeFnDecl->specific_attrs<OMPDeclareVariantAttr>()) {
+ Expr *VariantRef = A->getVariantFuncRef();
+
+ VariantMatchInfo VMI;
+ OMPTraitInfo &TI = A->getTraitInfo();
+ TI.getAsVariantMatchInfo(Context, VMI);
+ if (!isVariantApplicableInContext(VMI, OMPCtx, /* DeviceSetOnly */ false))
+ continue;
+
+ VMIs.push_back(VMI);
+ Exprs.push_back(VariantRef);
+ }
+
+ CalleeFnDecl = CalleeFnDecl->getPreviousDecl();
+ }
+
+ ExprResult NewCall;
+ do {
+ int BestIdx = getBestVariantMatchForContext(VMIs, OMPCtx);
+ if (BestIdx < 0)
+ return Call;
+ Expr *BestExpr = cast<DeclRefExpr>(Exprs[BestIdx]);
+ Decl *BestDecl = cast<DeclRefExpr>(BestExpr)->getDecl();
+
+ {
+ // Try to build a (member) call expression for the current best applicable
+ // variant expression. We allow this to fail in which case we continue
+ // with the next best variant expression. The fail case is part of the
+ // implementation defined behavior in the OpenMP standard when it talks
+ // about what differences in the function prototypes: "Any differences
+ // that the specific OpenMP context requires in the prototype of the
+ // variant from the base function prototype are implementation defined."
+ // This wording is there to allow the specialized variant to have a
+ // different type than the base function. This is intended and OK but if
+ // we cannot create a call the difference is not in the "implementation
+ // defined range" we allow.
+ Sema::TentativeAnalysisScope Trap(*this);
+
+ if (auto *SpecializedMethod = dyn_cast<CXXMethodDecl>(BestDecl)) {
+ auto *MemberCall = dyn_cast<CXXMemberCallExpr>(CE);
+ BestExpr = MemberExpr::CreateImplicit(
+ Context, MemberCall->getImplicitObjectArgument(),
+ /* IsArrow */ false, SpecializedMethod, Context.BoundMemberTy,
+ MemberCall->getValueKind(), MemberCall->getObjectKind());
+ }
+ NewCall = BuildCallExpr(Scope, BestExpr, LParenLoc, ArgExprs, RParenLoc,
+ ExecConfig);
+ if (NewCall.isUsable())
+ break;
+ }
+
+ VMIs.erase(VMIs.begin() + BestIdx);
+ Exprs.erase(Exprs.begin() + BestIdx);
+ } while (!VMIs.empty());
+
+ if (!NewCall.isUsable())
+ return Call;
+ return PseudoObjectExpr::Create(Context, CE, {NewCall.get()}, 0);
+}
+
Optional<std::pair<FunctionDecl *, Expr *>>
Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
- Expr *VariantRef, SourceRange SR) {
+ Expr *VariantRef, OMPTraitInfo &TI,
+ SourceRange SR) {
if (!DG || DG.get().isNull())
return None;
@@ -5319,12 +6014,41 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
return None;
}
+ auto ShouldDelayChecks = [](Expr *&E, bool) {
+ return E && (E->isTypeDependent() || E->isValueDependent() ||
+ E->containsUnexpandedParameterPack() ||
+ E->isInstantiationDependent());
+ };
// Do not check templates, wait until instantiation.
- if (VariantRef->isTypeDependent() || VariantRef->isValueDependent() ||
- VariantRef->containsUnexpandedParameterPack() ||
- VariantRef->isInstantiationDependent() || FD->isDependentContext())
+ if (FD->isDependentContext() || ShouldDelayChecks(VariantRef, false) ||
+ TI.anyScoreOrCondition(ShouldDelayChecks))
return std::make_pair(FD, VariantRef);
+ // Deal with non-constant score and user condition expressions.
+ auto HandleNonConstantScoresAndConditions = [this](Expr *&E,
+ bool IsScore) -> bool {
+ llvm::APSInt Result;
+ if (!E || E->isIntegerConstantExpr(Result, Context))
+ return false;
+
+ if (IsScore) {
+ // We warn on non-constant scores and pretend they were not present.
+ Diag(E->getExprLoc(), diag::warn_omp_declare_variant_score_not_constant)
+ << E;
+ E = nullptr;
+ } else {
+ // We could replace a non-constant user condition with "false" but we
+ // will soon need to handle these anyway for the dynamic version of
+ // OpenMP context selectors.
+ Diag(E->getExprLoc(),
+ diag::err_omp_declare_variant_user_condition_not_constant)
+ << E;
+ }
+ return true;
+ };
+ if (TI.anyScoreOrCondition(HandleNonConstantScoresAndConditions))
+ return None;
+
// Convert VariantRef expression to the type of the original function to
// resolve possible conflicts.
ExprResult VariantRefCast;
@@ -5355,7 +6079,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
ImplicitConversionSequence ICS =
TryImplicitConversion(VariantRef, FnPtrType.getUnqualifiedType(),
/*SuppressUserConversions=*/false,
- /*AllowExplicit=*/false,
+ AllowedExplicit::None,
/*InOverloadResolution=*/false,
/*CStyle=*/false,
/*AllowObjCWritebackConversion=*/false);
@@ -5497,94 +6221,13 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
return std::make_pair(FD, cast<Expr>(DRE));
}
-void Sema::ActOnOpenMPDeclareVariantDirective(
- FunctionDecl *FD, Expr *VariantRef, SourceRange SR,
- ArrayRef<OMPCtxSelectorData> Data) {
- if (Data.empty())
- return;
- SmallVector<Expr *, 4> CtxScores;
- SmallVector<unsigned, 4> CtxSets;
- SmallVector<unsigned, 4> Ctxs;
- SmallVector<StringRef, 4> ImplVendors, DeviceKinds;
- bool IsError = false;
- for (const OMPCtxSelectorData &D : Data) {
- OpenMPContextSelectorSetKind CtxSet = D.CtxSet;
- OpenMPContextSelectorKind Ctx = D.Ctx;
- if (CtxSet == OMP_CTX_SET_unknown || Ctx == OMP_CTX_unknown)
- return;
- Expr *Score = nullptr;
- if (D.Score.isUsable()) {
- Score = D.Score.get();
- if (!Score->isTypeDependent() && !Score->isValueDependent() &&
- !Score->isInstantiationDependent() &&
- !Score->containsUnexpandedParameterPack()) {
- Score =
- PerformOpenMPImplicitIntegerConversion(Score->getExprLoc(), Score)
- .get();
- if (Score)
- Score = VerifyIntegerConstantExpression(Score).get();
- }
- } else {
- // OpenMP 5.0, 2.3.3 Matching and Scoring Context Selectors.
- // The kind, arch, and isa selectors are given the values 2^l, 2^(l+1) and
- // 2^(l+2), respectively, where l is the number of traits in the construct
- // set.
- // TODO: implement correct logic for isa and arch traits.
- // TODO: take the construct context set into account when it is
- // implemented.
- int L = 0; // Currently set the number of traits in construct set to 0,
- // since the construct trait set in not supported yet.
- if (CtxSet == OMP_CTX_SET_device && Ctx == OMP_CTX_kind)
- Score = ActOnIntegerConstant(SourceLocation(), std::pow(2, L)).get();
- else
- Score = ActOnIntegerConstant(SourceLocation(), 0).get();
- }
- switch (Ctx) {
- case OMP_CTX_vendor:
- assert(CtxSet == OMP_CTX_SET_implementation &&
- "Expected implementation context selector set.");
- ImplVendors.append(D.Names.begin(), D.Names.end());
- break;
- case OMP_CTX_kind:
- assert(CtxSet == OMP_CTX_SET_device &&
- "Expected device context selector set.");
- DeviceKinds.append(D.Names.begin(), D.Names.end());
- break;
- case OMP_CTX_unknown:
- llvm_unreachable("Unknown context selector kind.");
- }
- IsError = IsError || !Score;
- CtxSets.push_back(CtxSet);
- Ctxs.push_back(Ctx);
- CtxScores.push_back(Score);
- }
- if (!IsError) {
- auto *NewAttr = OMPDeclareVariantAttr::CreateImplicit(
- Context, VariantRef, CtxScores.begin(), CtxScores.size(),
- CtxSets.begin(), CtxSets.size(), Ctxs.begin(), Ctxs.size(),
- ImplVendors.begin(), ImplVendors.size(), DeviceKinds.begin(),
- DeviceKinds.size(), SR);
- FD->addAttr(NewAttr);
- }
-}
-
-void Sema::markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
- FunctionDecl *Func,
- bool MightBeOdrUse) {
- assert(LangOpts.OpenMP && "Expected OpenMP mode.");
-
- if (!Func->isDependentContext() && Func->hasAttrs()) {
- for (OMPDeclareVariantAttr *A :
- Func->specific_attrs<OMPDeclareVariantAttr>()) {
- // TODO: add checks for active OpenMP context where possible.
- Expr *VariantRef = A->getVariantFuncRef();
- auto *DRE = cast<DeclRefExpr>(VariantRef->IgnoreParenImpCasts());
- auto *F = cast<FunctionDecl>(DRE->getDecl());
- if (!F->isDefined() && F->isTemplateInstantiation())
- InstantiateFunctionDefinition(Loc, F->getFirstDecl());
- MarkFunctionReferenced(Loc, F, MightBeOdrUse);
- }
- }
+void Sema::ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD,
+ Expr *VariantRef,
+ OMPTraitInfo &TI,
+ SourceRange SR) {
+ auto *NewAttr =
+ OMPDeclareVariantAttr::CreateImplicit(Context, VariantRef, &TI, SR);
+ FD->addAttr(NewAttr);
}
StmtResult Sema::ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
@@ -5605,6 +6248,7 @@ StmtResult Sema::ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
setFunctionHasBranchProtectedScope();
return OMPParallelDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->getTaskgroupReductionRef(),
DSAStack->isCancelRegion());
}
@@ -6300,8 +6944,8 @@ bool OpenMPIterationSpaceChecker::checkAndSetInc(Expr *S) {
static ExprResult
tryBuildCapture(Sema &SemaRef, Expr *Capture,
llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
- if (SemaRef.CurContext->isDependentContext())
- return ExprResult(Capture);
+ if (SemaRef.CurContext->isDependentContext() || Capture->containsErrors())
+ return Capture;
if (Capture->isEvaluatable(SemaRef.Context, Expr::SE_AllowSideEffects))
return SemaRef.PerformImplicitConversion(
Capture->IgnoreImpCasts(), Capture->getType(), Sema::AA_Converting,
@@ -6315,221 +6959,344 @@ tryBuildCapture(Sema &SemaRef, Expr *Capture,
return Res;
}
-/// Build the expression to calculate the number of iterations.
-Expr *OpenMPIterationSpaceChecker::buildNumIterations(
- Scope *S, ArrayRef<LoopIterationSpace> ResultIterSpaces, bool LimitedType,
- llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const {
- ExprResult Diff;
- QualType VarType = LCDecl->getType().getNonReferenceType();
- if (VarType->isIntegerType() || VarType->isPointerType() ||
- SemaRef.getLangOpts().CPlusPlus) {
- Expr *LBVal = LB;
- Expr *UBVal = UB;
- // LB = TestIsLessOp.getValue() ? min(LB(MinVal), LB(MaxVal)) :
- // max(LB(MinVal), LB(MaxVal))
- if (InitDependOnLC) {
- const LoopIterationSpace &IS =
- ResultIterSpaces[ResultIterSpaces.size() - 1 -
- InitDependOnLC.getValueOr(
- CondDependOnLC.getValueOr(0))];
- if (!IS.MinValue || !IS.MaxValue)
- return nullptr;
- // OuterVar = Min
- ExprResult MinValue =
- SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, IS.MinValue);
- if (!MinValue.isUsable())
- return nullptr;
-
- ExprResult LBMinVal = SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign,
- IS.CounterVar, MinValue.get());
- if (!LBMinVal.isUsable())
- return nullptr;
- // OuterVar = Min, LBVal
- LBMinVal =
- SemaRef.BuildBinOp(S, DefaultLoc, BO_Comma, LBMinVal.get(), LBVal);
- if (!LBMinVal.isUsable())
- return nullptr;
- // (OuterVar = Min, LBVal)
- LBMinVal = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, LBMinVal.get());
- if (!LBMinVal.isUsable())
- return nullptr;
-
- // OuterVar = Max
- ExprResult MaxValue =
- SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, IS.MaxValue);
- if (!MaxValue.isUsable())
- return nullptr;
-
- ExprResult LBMaxVal = SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign,
- IS.CounterVar, MaxValue.get());
- if (!LBMaxVal.isUsable())
- return nullptr;
- // OuterVar = Max, LBVal
- LBMaxVal =
- SemaRef.BuildBinOp(S, DefaultLoc, BO_Comma, LBMaxVal.get(), LBVal);
- if (!LBMaxVal.isUsable())
- return nullptr;
- // (OuterVar = Max, LBVal)
- LBMaxVal = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, LBMaxVal.get());
- if (!LBMaxVal.isUsable())
- return nullptr;
-
- Expr *LBMin = tryBuildCapture(SemaRef, LBMinVal.get(), Captures).get();
- Expr *LBMax = tryBuildCapture(SemaRef, LBMaxVal.get(), Captures).get();
- if (!LBMin || !LBMax)
- return nullptr;
- // LB(MinVal) < LB(MaxVal)
- ExprResult MinLessMaxRes =
- SemaRef.BuildBinOp(S, DefaultLoc, BO_LT, LBMin, LBMax);
- if (!MinLessMaxRes.isUsable())
- return nullptr;
- Expr *MinLessMax =
- tryBuildCapture(SemaRef, MinLessMaxRes.get(), Captures).get();
- if (!MinLessMax)
- return nullptr;
- if (TestIsLessOp.getValue()) {
- // LB(MinVal) < LB(MaxVal) ? LB(MinVal) : LB(MaxVal) - min(LB(MinVal),
- // LB(MaxVal))
- ExprResult MinLB = SemaRef.ActOnConditionalOp(DefaultLoc, DefaultLoc,
- MinLessMax, LBMin, LBMax);
- if (!MinLB.isUsable())
- return nullptr;
- LBVal = MinLB.get();
- } else {
- // LB(MinVal) < LB(MaxVal) ? LB(MaxVal) : LB(MinVal) - max(LB(MinVal),
- // LB(MaxVal))
- ExprResult MaxLB = SemaRef.ActOnConditionalOp(DefaultLoc, DefaultLoc,
- MinLessMax, LBMax, LBMin);
- if (!MaxLB.isUsable())
- return nullptr;
- LBVal = MaxLB.get();
- }
+/// Calculate number of iterations, transforming to unsigned, if number of
+/// iterations may be larger than the original type.
+static Expr *
+calculateNumIters(Sema &SemaRef, Scope *S, SourceLocation DefaultLoc,
+ Expr *Lower, Expr *Upper, Expr *Step, QualType LCTy,
+ bool TestIsStrictOp, bool RoundToStep,
+ llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
+ ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
+ if (!NewStep.isUsable())
+ return nullptr;
+ llvm::APSInt LRes, URes, SRes;
+ bool IsLowerConst = Lower->isIntegerConstantExpr(LRes, SemaRef.Context);
+ bool IsStepConst = Step->isIntegerConstantExpr(SRes, SemaRef.Context);
+ bool NoNeedToConvert = IsLowerConst && !RoundToStep &&
+ ((!TestIsStrictOp && LRes.isNonNegative()) ||
+ (TestIsStrictOp && LRes.isStrictlyPositive()));
+ bool NeedToReorganize = false;
+ // Check if any subexpressions in Lower -Step [+ 1] lead to overflow.
+ if (!NoNeedToConvert && IsLowerConst &&
+ (TestIsStrictOp || (RoundToStep && IsStepConst))) {
+ NoNeedToConvert = true;
+ if (RoundToStep) {
+ unsigned BW = LRes.getBitWidth() > SRes.getBitWidth()
+ ? LRes.getBitWidth()
+ : SRes.getBitWidth();
+ LRes = LRes.extend(BW + 1);
+ LRes.setIsSigned(true);
+ SRes = SRes.extend(BW + 1);
+ SRes.setIsSigned(true);
+ LRes -= SRes;
+ NoNeedToConvert = LRes.trunc(BW).extend(BW + 1) == LRes;
+ LRes = LRes.trunc(BW);
+ }
+ if (TestIsStrictOp) {
+ unsigned BW = LRes.getBitWidth();
+ LRes = LRes.extend(BW + 1);
+ LRes.setIsSigned(true);
+ ++LRes;
+ NoNeedToConvert =
+ NoNeedToConvert && LRes.trunc(BW).extend(BW + 1) == LRes;
+ // truncate to the original bitwidth.
+ LRes = LRes.trunc(BW);
+ }
+ NeedToReorganize = NoNeedToConvert;
+ }
+ bool IsUpperConst = Upper->isIntegerConstantExpr(URes, SemaRef.Context);
+ if (NoNeedToConvert && IsLowerConst && IsUpperConst &&
+ (!RoundToStep || IsStepConst)) {
+ unsigned BW = LRes.getBitWidth() > URes.getBitWidth() ? LRes.getBitWidth()
+ : URes.getBitWidth();
+ LRes = LRes.extend(BW + 1);
+ LRes.setIsSigned(true);
+ URes = URes.extend(BW + 1);
+ URes.setIsSigned(true);
+ URes -= LRes;
+ NoNeedToConvert = URes.trunc(BW).extend(BW + 1) == URes;
+ NeedToReorganize = NoNeedToConvert;
+ }
+ // If the boundaries are not constant or (Lower - Step [+ 1]) is not constant
+ // or less than zero (Upper - (Lower - Step [+ 1]) may overflow) - promote to
+ // unsigned.
+ if ((!NoNeedToConvert || (LRes.isNegative() && !IsUpperConst)) &&
+ !LCTy->isDependentType() && LCTy->isIntegerType()) {
+ QualType LowerTy = Lower->getType();
+ QualType UpperTy = Upper->getType();
+ uint64_t LowerSize = SemaRef.Context.getTypeSize(LowerTy);
+ uint64_t UpperSize = SemaRef.Context.getTypeSize(UpperTy);
+ if ((LowerSize <= UpperSize && UpperTy->hasSignedIntegerRepresentation()) ||
+ (LowerSize > UpperSize && LowerTy->hasSignedIntegerRepresentation())) {
+ QualType CastType = SemaRef.Context.getIntTypeForBitwidth(
+ LowerSize > UpperSize ? LowerSize : UpperSize, /*Signed=*/0);
+ Upper =
+ SemaRef
+ .PerformImplicitConversion(
+ SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Upper).get(),
+ CastType, Sema::AA_Converting)
+ .get();
+ Lower = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Lower).get();
+ NewStep = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, NewStep.get());
}
- // UB = TestIsLessOp.getValue() ? max(UB(MinVal), UB(MaxVal)) :
- // min(UB(MinVal), UB(MaxVal))
- if (CondDependOnLC) {
- const LoopIterationSpace &IS =
- ResultIterSpaces[ResultIterSpaces.size() - 1 -
- InitDependOnLC.getValueOr(
- CondDependOnLC.getValueOr(0))];
- if (!IS.MinValue || !IS.MaxValue)
- return nullptr;
- // OuterVar = Min
- ExprResult MinValue =
- SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, IS.MinValue);
- if (!MinValue.isUsable())
- return nullptr;
-
- ExprResult UBMinVal = SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign,
- IS.CounterVar, MinValue.get());
- if (!UBMinVal.isUsable())
- return nullptr;
- // OuterVar = Min, UBVal
- UBMinVal =
- SemaRef.BuildBinOp(S, DefaultLoc, BO_Comma, UBMinVal.get(), UBVal);
- if (!UBMinVal.isUsable())
- return nullptr;
- // (OuterVar = Min, UBVal)
- UBMinVal = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, UBMinVal.get());
- if (!UBMinVal.isUsable())
- return nullptr;
+ }
+ if (!Lower || !Upper || NewStep.isInvalid())
+ return nullptr;
- // OuterVar = Max
- ExprResult MaxValue =
- SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, IS.MaxValue);
- if (!MaxValue.isUsable())
+ ExprResult Diff;
+ // If need to reorganize, then calculate the form as Upper - (Lower - Step [+
+ // 1]).
+ if (NeedToReorganize) {
+ Diff = Lower;
+
+ if (RoundToStep) {
+ // Lower - Step
+ Diff =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Diff.get(), NewStep.get());
+ if (!Diff.isUsable())
return nullptr;
+ }
- ExprResult UBMaxVal = SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign,
- IS.CounterVar, MaxValue.get());
- if (!UBMaxVal.isUsable())
- return nullptr;
- // OuterVar = Max, UBVal
- UBMaxVal =
- SemaRef.BuildBinOp(S, DefaultLoc, BO_Comma, UBMaxVal.get(), UBVal);
- if (!UBMaxVal.isUsable())
- return nullptr;
- // (OuterVar = Max, UBVal)
- UBMaxVal = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, UBMaxVal.get());
- if (!UBMaxVal.isUsable())
- return nullptr;
+ // Lower - Step [+ 1]
+ if (TestIsStrictOp)
+ Diff = SemaRef.BuildBinOp(
+ S, DefaultLoc, BO_Add, Diff.get(),
+ SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
+ if (!Diff.isUsable())
+ return nullptr;
- Expr *UBMin = tryBuildCapture(SemaRef, UBMinVal.get(), Captures).get();
- Expr *UBMax = tryBuildCapture(SemaRef, UBMaxVal.get(), Captures).get();
- if (!UBMin || !UBMax)
- return nullptr;
- // UB(MinVal) > UB(MaxVal)
- ExprResult MinGreaterMaxRes =
- SemaRef.BuildBinOp(S, DefaultLoc, BO_GT, UBMin, UBMax);
- if (!MinGreaterMaxRes.isUsable())
- return nullptr;
- Expr *MinGreaterMax =
- tryBuildCapture(SemaRef, MinGreaterMaxRes.get(), Captures).get();
- if (!MinGreaterMax)
- return nullptr;
- if (TestIsLessOp.getValue()) {
- // UB(MinVal) > UB(MaxVal) ? UB(MinVal) : UB(MaxVal) - max(UB(MinVal),
- // UB(MaxVal))
- ExprResult MaxUB = SemaRef.ActOnConditionalOp(
- DefaultLoc, DefaultLoc, MinGreaterMax, UBMin, UBMax);
- if (!MaxUB.isUsable())
- return nullptr;
- UBVal = MaxUB.get();
- } else {
- // UB(MinVal) > UB(MaxVal) ? UB(MaxVal) : UB(MinVal) - min(UB(MinVal),
- // UB(MaxVal))
- ExprResult MinUB = SemaRef.ActOnConditionalOp(
- DefaultLoc, DefaultLoc, MinGreaterMax, UBMax, UBMin);
- if (!MinUB.isUsable())
- return nullptr;
- UBVal = MinUB.get();
- }
- }
- // Upper - Lower
- Expr *UBExpr = TestIsLessOp.getValue() ? UBVal : LBVal;
- Expr *LBExpr = TestIsLessOp.getValue() ? LBVal : UBVal;
- Expr *Upper = tryBuildCapture(SemaRef, UBExpr, Captures).get();
- Expr *Lower = tryBuildCapture(SemaRef, LBExpr, Captures).get();
- if (!Upper || !Lower)
+ Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
+ if (!Diff.isUsable())
return nullptr;
+ // Upper - (Lower - Step [+ 1]).
+ Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Diff.get());
+ if (!Diff.isUsable())
+ return nullptr;
+ } else {
Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
- if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
+ if (!Diff.isUsable() && LCTy->getAsCXXRecordDecl()) {
// BuildBinOp already emitted error, this one is to point user to upper
// and lower bound, and to tell what is passed to 'operator-'.
SemaRef.Diag(Upper->getBeginLoc(), diag::err_omp_loop_diff_cxx)
<< Upper->getSourceRange() << Lower->getSourceRange();
return nullptr;
}
+
+ if (!Diff.isUsable())
+ return nullptr;
+
+ // Upper - Lower [- 1]
+ if (TestIsStrictOp)
+ Diff = SemaRef.BuildBinOp(
+ S, DefaultLoc, BO_Sub, Diff.get(),
+ SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
+ if (!Diff.isUsable())
+ return nullptr;
+
+ if (RoundToStep) {
+ // Upper - Lower [- 1] + Step
+ Diff =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_Add, Diff.get(), NewStep.get());
+ if (!Diff.isUsable())
+ return nullptr;
+ }
}
+ // Parentheses (for dumping/debugging purposes only).
+ Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
if (!Diff.isUsable())
return nullptr;
- // Upper - Lower [- 1]
- if (TestIsStrictOp)
- Diff = SemaRef.BuildBinOp(
- S, DefaultLoc, BO_Sub, Diff.get(),
- SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
+ // (Upper - Lower [- 1] + Step) / Step or (Upper - Lower) / Step
+ Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
if (!Diff.isUsable())
return nullptr;
- // Upper - Lower [- 1] + Step
- ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
- if (!NewStep.isUsable())
- return nullptr;
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Add, Diff.get(), NewStep.get());
- if (!Diff.isUsable())
+ return Diff.get();
+}
+
+/// Build the expression to calculate the number of iterations.
+Expr *OpenMPIterationSpaceChecker::buildNumIterations(
+ Scope *S, ArrayRef<LoopIterationSpace> ResultIterSpaces, bool LimitedType,
+ llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const {
+ QualType VarType = LCDecl->getType().getNonReferenceType();
+ if (!VarType->isIntegerType() && !VarType->isPointerType() &&
+ !SemaRef.getLangOpts().CPlusPlus)
return nullptr;
+ Expr *LBVal = LB;
+ Expr *UBVal = UB;
+ // LB = TestIsLessOp.getValue() ? min(LB(MinVal), LB(MaxVal)) :
+ // max(LB(MinVal), LB(MaxVal))
+ if (InitDependOnLC) {
+ const LoopIterationSpace &IS =
+ ResultIterSpaces[ResultIterSpaces.size() - 1 -
+ InitDependOnLC.getValueOr(
+ CondDependOnLC.getValueOr(0))];
+ if (!IS.MinValue || !IS.MaxValue)
+ return nullptr;
+ // OuterVar = Min
+ ExprResult MinValue =
+ SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, IS.MinValue);
+ if (!MinValue.isUsable())
+ return nullptr;
- // Parentheses (for dumping/debugging purposes only).
- Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
- if (!Diff.isUsable())
+ ExprResult LBMinVal = SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign,
+ IS.CounterVar, MinValue.get());
+ if (!LBMinVal.isUsable())
+ return nullptr;
+ // OuterVar = Min, LBVal
+ LBMinVal =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_Comma, LBMinVal.get(), LBVal);
+ if (!LBMinVal.isUsable())
+ return nullptr;
+ // (OuterVar = Min, LBVal)
+ LBMinVal = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, LBMinVal.get());
+ if (!LBMinVal.isUsable())
+ return nullptr;
+
+ // OuterVar = Max
+ ExprResult MaxValue =
+ SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, IS.MaxValue);
+ if (!MaxValue.isUsable())
+ return nullptr;
+
+ ExprResult LBMaxVal = SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign,
+ IS.CounterVar, MaxValue.get());
+ if (!LBMaxVal.isUsable())
+ return nullptr;
+ // OuterVar = Max, LBVal
+ LBMaxVal =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_Comma, LBMaxVal.get(), LBVal);
+ if (!LBMaxVal.isUsable())
+ return nullptr;
+ // (OuterVar = Max, LBVal)
+ LBMaxVal = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, LBMaxVal.get());
+ if (!LBMaxVal.isUsable())
+ return nullptr;
+
+ Expr *LBMin = tryBuildCapture(SemaRef, LBMinVal.get(), Captures).get();
+ Expr *LBMax = tryBuildCapture(SemaRef, LBMaxVal.get(), Captures).get();
+ if (!LBMin || !LBMax)
+ return nullptr;
+ // LB(MinVal) < LB(MaxVal)
+ ExprResult MinLessMaxRes =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_LT, LBMin, LBMax);
+ if (!MinLessMaxRes.isUsable())
+ return nullptr;
+ Expr *MinLessMax =
+ tryBuildCapture(SemaRef, MinLessMaxRes.get(), Captures).get();
+ if (!MinLessMax)
+ return nullptr;
+ if (TestIsLessOp.getValue()) {
+ // LB(MinVal) < LB(MaxVal) ? LB(MinVal) : LB(MaxVal) - min(LB(MinVal),
+ // LB(MaxVal))
+ ExprResult MinLB = SemaRef.ActOnConditionalOp(DefaultLoc, DefaultLoc,
+ MinLessMax, LBMin, LBMax);
+ if (!MinLB.isUsable())
+ return nullptr;
+ LBVal = MinLB.get();
+ } else {
+ // LB(MinVal) < LB(MaxVal) ? LB(MaxVal) : LB(MinVal) - max(LB(MinVal),
+ // LB(MaxVal))
+ ExprResult MaxLB = SemaRef.ActOnConditionalOp(DefaultLoc, DefaultLoc,
+ MinLessMax, LBMax, LBMin);
+ if (!MaxLB.isUsable())
+ return nullptr;
+ LBVal = MaxLB.get();
+ }
+ }
+ // UB = TestIsLessOp.getValue() ? max(UB(MinVal), UB(MaxVal)) :
+ // min(UB(MinVal), UB(MaxVal))
+ if (CondDependOnLC) {
+ const LoopIterationSpace &IS =
+ ResultIterSpaces[ResultIterSpaces.size() - 1 -
+ InitDependOnLC.getValueOr(
+ CondDependOnLC.getValueOr(0))];
+ if (!IS.MinValue || !IS.MaxValue)
+ return nullptr;
+ // OuterVar = Min
+ ExprResult MinValue =
+ SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, IS.MinValue);
+ if (!MinValue.isUsable())
+ return nullptr;
+
+ ExprResult UBMinVal = SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign,
+ IS.CounterVar, MinValue.get());
+ if (!UBMinVal.isUsable())
+ return nullptr;
+ // OuterVar = Min, UBVal
+ UBMinVal =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_Comma, UBMinVal.get(), UBVal);
+ if (!UBMinVal.isUsable())
+ return nullptr;
+ // (OuterVar = Min, UBVal)
+ UBMinVal = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, UBMinVal.get());
+ if (!UBMinVal.isUsable())
+ return nullptr;
+
+ // OuterVar = Max
+ ExprResult MaxValue =
+ SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, IS.MaxValue);
+ if (!MaxValue.isUsable())
+ return nullptr;
+
+ ExprResult UBMaxVal = SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign,
+ IS.CounterVar, MaxValue.get());
+ if (!UBMaxVal.isUsable())
+ return nullptr;
+ // OuterVar = Max, UBVal
+ UBMaxVal =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_Comma, UBMaxVal.get(), UBVal);
+ if (!UBMaxVal.isUsable())
+ return nullptr;
+ // (OuterVar = Max, UBVal)
+ UBMaxVal = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, UBMaxVal.get());
+ if (!UBMaxVal.isUsable())
+ return nullptr;
+
+ Expr *UBMin = tryBuildCapture(SemaRef, UBMinVal.get(), Captures).get();
+ Expr *UBMax = tryBuildCapture(SemaRef, UBMaxVal.get(), Captures).get();
+ if (!UBMin || !UBMax)
+ return nullptr;
+ // UB(MinVal) > UB(MaxVal)
+ ExprResult MinGreaterMaxRes =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_GT, UBMin, UBMax);
+ if (!MinGreaterMaxRes.isUsable())
+ return nullptr;
+ Expr *MinGreaterMax =
+ tryBuildCapture(SemaRef, MinGreaterMaxRes.get(), Captures).get();
+ if (!MinGreaterMax)
+ return nullptr;
+ if (TestIsLessOp.getValue()) {
+ // UB(MinVal) > UB(MaxVal) ? UB(MinVal) : UB(MaxVal) - max(UB(MinVal),
+ // UB(MaxVal))
+ ExprResult MaxUB = SemaRef.ActOnConditionalOp(
+ DefaultLoc, DefaultLoc, MinGreaterMax, UBMin, UBMax);
+ if (!MaxUB.isUsable())
+ return nullptr;
+ UBVal = MaxUB.get();
+ } else {
+ // UB(MinVal) > UB(MaxVal) ? UB(MaxVal) : UB(MinVal) - min(UB(MinVal),
+ // UB(MaxVal))
+ ExprResult MinUB = SemaRef.ActOnConditionalOp(
+ DefaultLoc, DefaultLoc, MinGreaterMax, UBMax, UBMin);
+ if (!MinUB.isUsable())
+ return nullptr;
+ UBVal = MinUB.get();
+ }
+ }
+ Expr *UBExpr = TestIsLessOp.getValue() ? UBVal : LBVal;
+ Expr *LBExpr = TestIsLessOp.getValue() ? LBVal : UBVal;
+ Expr *Upper = tryBuildCapture(SemaRef, UBExpr, Captures).get();
+ Expr *Lower = tryBuildCapture(SemaRef, LBExpr, Captures).get();
+ if (!Upper || !Lower)
return nullptr;
- // (Upper - Lower [- 1] + Step) / Step
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
+ ExprResult Diff =
+ calculateNumIters(SemaRef, S, DefaultLoc, Lower, Upper, Step, VarType,
+ TestIsStrictOp, /*RoundToStep=*/true, Captures);
if (!Diff.isUsable())
return nullptr;
@@ -6603,55 +7370,37 @@ std::pair<Expr *, Expr *> OpenMPIterationSpaceChecker::buildMinMaxValues(
MaxExpr = Upper;
// Build minimum/maximum value based on number of iterations.
- ExprResult Diff;
QualType VarType = LCDecl->getType().getNonReferenceType();
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
+ ExprResult Diff =
+ calculateNumIters(SemaRef, S, DefaultLoc, Lower, Upper, Step, VarType,
+ TestIsStrictOp, /*RoundToStep=*/false, Captures);
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- // Upper - Lower [- 1]
- if (TestIsStrictOp)
- Diff = SemaRef.BuildBinOp(
- S, DefaultLoc, BO_Sub, Diff.get(),
- SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
+ // ((Upper - Lower [- 1]) / Step) * Step
+ // Parentheses (for dumping/debugging purposes only).
+ Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- // Upper - Lower [- 1] + Step
ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
if (!NewStep.isUsable())
return std::make_pair(nullptr, nullptr);
-
- // Parentheses (for dumping/debugging purposes only).
- Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
- if (!Diff.isUsable())
- return std::make_pair(nullptr, nullptr);
-
- // (Upper - Lower [- 1]) / Step
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
+ Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Mul, Diff.get(), NewStep.get());
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- // ((Upper - Lower [- 1]) / Step) * Step
// Parentheses (for dumping/debugging purposes only).
Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Mul, Diff.get(), NewStep.get());
- if (!Diff.isUsable())
- return std::make_pair(nullptr, nullptr);
-
- // Convert to the original type or ptrdiff_t, if original type is pointer.
- if (!VarType->isAnyPointerType() &&
- !SemaRef.Context.hasSameType(Diff.get()->getType(), VarType)) {
- Diff = SemaRef.PerformImplicitConversion(
- Diff.get(), VarType, Sema::AA_Converting, /*AllowExplicit=*/true);
- } else if (VarType->isAnyPointerType() &&
- !SemaRef.Context.hasSameType(
- Diff.get()->getType(),
- SemaRef.Context.getUnsignedPointerDiffType())) {
+ // Convert to the ptrdiff_t, if original type is pointer.
+ if (VarType->isAnyPointerType() &&
+ !SemaRef.Context.hasSameType(
+ Diff.get()->getType(),
+ SemaRef.Context.getUnsignedPointerDiffType())) {
Diff = SemaRef.PerformImplicitConversion(
Diff.get(), SemaRef.Context.getUnsignedPointerDiffType(),
Sema::AA_Converting, /*AllowExplicit=*/true);
@@ -6659,33 +7408,43 @@ std::pair<Expr *, Expr *> OpenMPIterationSpaceChecker::buildMinMaxValues(
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- // Parentheses (for dumping/debugging purposes only).
- Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
- if (!Diff.isUsable())
- return std::make_pair(nullptr, nullptr);
-
if (TestIsLessOp.getValue()) {
// MinExpr = Lower;
// MaxExpr = Lower + (((Upper - Lower [- 1]) / Step) * Step)
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Add, Lower, Diff.get());
- if (!Diff.isUsable())
- return std::make_pair(nullptr, nullptr);
- Diff = SemaRef.ActOnFinishFullExpr(Diff.get(), /*DiscardedValue*/ false);
+ Diff = SemaRef.BuildBinOp(
+ S, DefaultLoc, BO_Add,
+ SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Lower).get(),
+ Diff.get());
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- MaxExpr = Diff.get();
} else {
// MaxExpr = Upper;
// MinExpr = Upper - (((Upper - Lower [- 1]) / Step) * Step)
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Diff.get());
- if (!Diff.isUsable())
- return std::make_pair(nullptr, nullptr);
- Diff = SemaRef.ActOnFinishFullExpr(Diff.get(), /*DiscardedValue*/ false);
+ Diff = SemaRef.BuildBinOp(
+ S, DefaultLoc, BO_Sub,
+ SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Upper).get(),
+ Diff.get());
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- MinExpr = Diff.get();
}
+ // Convert to the original type.
+ if (SemaRef.Context.hasSameType(Diff.get()->getType(), VarType))
+ Diff = SemaRef.PerformImplicitConversion(Diff.get(), VarType,
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ if (!Diff.isUsable())
+ return std::make_pair(nullptr, nullptr);
+
+ Diff = SemaRef.ActOnFinishFullExpr(Diff.get(), /*DiscardedValue=*/false);
+ if (!Diff.isUsable())
+ return std::make_pair(nullptr, nullptr);
+
+ if (TestIsLessOp.getValue())
+ MaxExpr = Diff.get();
+ else
+ MinExpr = Diff.get();
+
return std::make_pair(MinExpr, MaxExpr);
}
@@ -6791,44 +7550,23 @@ Expr *OpenMPIterationSpaceChecker::buildOrderedLoopData(
if (!Cnt)
return nullptr;
}
- ExprResult Diff;
QualType VarType = LCDecl->getType().getNonReferenceType();
- if (VarType->isIntegerType() || VarType->isPointerType() ||
- SemaRef.getLangOpts().CPlusPlus) {
- // Upper - Lower
- Expr *Upper = TestIsLessOp.getValue()
- ? Cnt
- : tryBuildCapture(SemaRef, UB, Captures).get();
- Expr *Lower = TestIsLessOp.getValue()
- ? tryBuildCapture(SemaRef, LB, Captures).get()
- : Cnt;
- if (!Upper || !Lower)
- return nullptr;
-
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
-
- if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
- // BuildBinOp already emitted error, this one is to point user to upper
- // and lower bound, and to tell what is passed to 'operator-'.
- SemaRef.Diag(Upper->getBeginLoc(), diag::err_omp_loop_diff_cxx)
- << Upper->getSourceRange() << Lower->getSourceRange();
- return nullptr;
- }
- }
-
- if (!Diff.isUsable())
+ if (!VarType->isIntegerType() && !VarType->isPointerType() &&
+ !SemaRef.getLangOpts().CPlusPlus)
return nullptr;
-
- // Parentheses (for dumping/debugging purposes only).
- Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
- if (!Diff.isUsable())
+ // Upper - Lower
+ Expr *Upper = TestIsLessOp.getValue()
+ ? Cnt
+ : tryBuildCapture(SemaRef, LB, Captures).get();
+ Expr *Lower = TestIsLessOp.getValue()
+ ? tryBuildCapture(SemaRef, LB, Captures).get()
+ : Cnt;
+ if (!Upper || !Lower)
return nullptr;
- ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
- if (!NewStep.isUsable())
- return nullptr;
- // (Upper - Lower) / Step
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
+ ExprResult Diff = calculateNumIters(SemaRef, S, DefaultLoc, Lower, Upper,
+ Step, VarType, /*TestIsStrictOp=*/false,
+ /*RoundToStep=*/false, Captures);
if (!Diff.isUsable())
return nullptr;
@@ -8088,8 +8826,9 @@ Sema::ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
}
setFunctionHasBranchProtectedScope();
- return OMPForDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
- Clauses, AStmt, B, DSAStack->isCancelRegion());
+ return OMPForDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPForSimdDirective(
@@ -8166,6 +8905,7 @@ StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
setFunctionHasBranchProtectedScope();
return OMPSectionsDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->getTaskgroupReductionRef(),
DSAStack->isCancelRegion());
}
@@ -8326,9 +9066,9 @@ StmtResult Sema::ActOnOpenMPParallelForDirective(
}
setFunctionHasBranchProtectedScope();
- return OMPParallelForDirective::Create(Context, StartLoc, EndLoc,
- NestedLoopCount, Clauses, AStmt, B,
- DSAStack->isCancelRegion());
+ return OMPParallelForDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPParallelForSimdDirective(
@@ -8392,8 +9132,9 @@ Sema::ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
setFunctionHasBranchProtectedScope();
- return OMPParallelMasterDirective::Create(Context, StartLoc, EndLoc, Clauses,
- AStmt);
+ return OMPParallelMasterDirective::Create(
+ Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->getTaskgroupReductionRef());
}
StmtResult
@@ -8432,7 +9173,31 @@ Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
setFunctionHasBranchProtectedScope();
return OMPParallelSectionsDirective::Create(
- Context, StartLoc, EndLoc, Clauses, AStmt, DSAStack->isCancelRegion());
+ Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
+}
+
+/// detach and mergeable clauses are mutially exclusive, check for it.
+static bool checkDetachMergeableClauses(Sema &S,
+ ArrayRef<OMPClause *> Clauses) {
+ const OMPClause *PrevClause = nullptr;
+ bool ErrorFound = false;
+ for (const OMPClause *C : Clauses) {
+ if (C->getClauseKind() == OMPC_detach ||
+ C->getClauseKind() == OMPC_mergeable) {
+ if (!PrevClause) {
+ PrevClause = C;
+ } else if (PrevClause->getClauseKind() != C->getClauseKind()) {
+ S.Diag(C->getBeginLoc(), diag::err_omp_clauses_mutually_exclusive)
+ << getOpenMPClauseName(C->getClauseKind())
+ << getOpenMPClauseName(PrevClause->getClauseKind());
+ S.Diag(PrevClause->getBeginLoc(), diag::note_omp_previous_clause)
+ << getOpenMPClauseName(PrevClause->getClauseKind());
+ ErrorFound = true;
+ }
+ }
+ }
+ return ErrorFound;
}
StmtResult Sema::ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
@@ -8441,6 +9206,12 @@ StmtResult Sema::ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
+ // OpenMP 5.0, 2.10.1 task Construct
+ // If a detach clause appears on the directive, then a mergeable clause cannot
+ // appear on the same directive.
+ if (checkDetachMergeableClauses(*this, Clauses))
+ return StmtError();
+
auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
@@ -8489,10 +9260,94 @@ StmtResult Sema::ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
StmtResult Sema::ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc) {
- assert(Clauses.size() <= 1 && "Extra clauses in flush directive");
+ OMPFlushClause *FC = nullptr;
+ OMPClause *OrderClause = nullptr;
+ for (OMPClause *C : Clauses) {
+ if (C->getClauseKind() == OMPC_flush)
+ FC = cast<OMPFlushClause>(C);
+ else
+ OrderClause = C;
+ }
+ OpenMPClauseKind MemOrderKind = OMPC_unknown;
+ SourceLocation MemOrderLoc;
+ for (const OMPClause *C : Clauses) {
+ if (C->getClauseKind() == OMPC_acq_rel ||
+ C->getClauseKind() == OMPC_acquire ||
+ C->getClauseKind() == OMPC_release) {
+ if (MemOrderKind != OMPC_unknown) {
+ Diag(C->getBeginLoc(), diag::err_omp_several_mem_order_clauses)
+ << getOpenMPDirectiveName(OMPD_flush) << 1
+ << SourceRange(C->getBeginLoc(), C->getEndLoc());
+ Diag(MemOrderLoc, diag::note_omp_previous_mem_order_clause)
+ << getOpenMPClauseName(MemOrderKind);
+ } else {
+ MemOrderKind = C->getClauseKind();
+ MemOrderLoc = C->getBeginLoc();
+ }
+ }
+ }
+ if (FC && OrderClause) {
+ Diag(FC->getLParenLoc(), diag::err_omp_flush_order_clause_and_list)
+ << getOpenMPClauseName(OrderClause->getClauseKind());
+ Diag(OrderClause->getBeginLoc(), diag::note_omp_flush_order_clause_here)
+ << getOpenMPClauseName(OrderClause->getClauseKind());
+ return StmtError();
+ }
return OMPFlushDirective::Create(Context, StartLoc, EndLoc, Clauses);
}
+StmtResult Sema::ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (Clauses.empty()) {
+ Diag(StartLoc, diag::err_omp_depobj_expected);
+ return StmtError();
+ } else if (Clauses[0]->getClauseKind() != OMPC_depobj) {
+ Diag(Clauses[0]->getBeginLoc(), diag::err_omp_depobj_expected);
+ return StmtError();
+ }
+ // Only depobj expression and another single clause is allowed.
+ if (Clauses.size() > 2) {
+ Diag(Clauses[2]->getBeginLoc(),
+ diag::err_omp_depobj_single_clause_expected);
+ return StmtError();
+ } else if (Clauses.size() < 1) {
+ Diag(Clauses[0]->getEndLoc(), diag::err_omp_depobj_single_clause_expected);
+ return StmtError();
+ }
+ return OMPDepobjDirective::Create(Context, StartLoc, EndLoc, Clauses);
+}
+
+StmtResult Sema::ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ // Check that exactly one clause is specified.
+ if (Clauses.size() != 1) {
+ Diag(Clauses.empty() ? EndLoc : Clauses[1]->getBeginLoc(),
+ diag::err_omp_scan_single_clause_expected);
+ return StmtError();
+ }
+ // Check that scan directive is used in the scopeof the OpenMP loop body.
+ if (Scope *S = DSAStack->getCurScope()) {
+ Scope *ParentS = S->getParent();
+ if (!ParentS || ParentS->getParent() != ParentS->getBreakParent() ||
+ !ParentS->getBreakParent()->isOpenMPLoopScope())
+ return StmtError(Diag(StartLoc, diag::err_omp_orphaned_device_directive)
+ << getOpenMPDirectiveName(OMPD_scan) << 5);
+ }
+ // Check that only one instance of scan directives is used in the same outer
+ // region.
+ if (DSAStack->doesParentHasScanDirective()) {
+ Diag(StartLoc, diag::err_omp_several_directives_in_region) << "scan";
+ Diag(DSAStack->getParentScanDirectiveLoc(),
+ diag::note_omp_previous_directive)
+ << "scan";
+ return StmtError();
+ }
+ DSAStack->setParentHasScanDirective(StartLoc);
+ return OMPScanDirective::Create(Context, StartLoc, EndLoc, Clauses);
+}
+
StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
@@ -8555,13 +9410,29 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation ErrLoc = TC ? TC->getBeginLoc() : StartLoc;
Diag(ErrLoc, diag::err_omp_ordered_directive_with_param)
<< (TC != nullptr);
- Diag(Param->getBeginLoc(), diag::note_omp_ordered_param);
+ Diag(Param->getBeginLoc(), diag::note_omp_ordered_param) << 1;
ErrorFound = true;
}
}
if ((!AStmt && !DependFound) || ErrorFound)
return StmtError();
+ // OpenMP 5.0, 2.17.9, ordered Construct, Restrictions.
+ // During execution of an iteration of a worksharing-loop or a loop nest
+ // within a worksharing-loop, simd, or worksharing-loop SIMD region, a thread
+ // must not execute more than one ordered region corresponding to an ordered
+ // construct without a depend clause.
+ if (!DependFound) {
+ if (DSAStack->doesParentHasOrderedDirective()) {
+ Diag(StartLoc, diag::err_omp_several_directives_in_region) << "ordered";
+ Diag(DSAStack->getParentOrderedDirectiveLoc(),
+ diag::note_omp_previous_directive)
+ << "ordered";
+ return StmtError();
+ }
+ DSAStack->setParentHasOrderedDirective(StartLoc);
+ }
+
if (AStmt) {
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
@@ -8817,6 +9688,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc) {
+ // Register location of the first atomic directive.
+ DSAStack->addAtomicDirectiveLoc(StartLoc);
if (!AStmt)
return StmtError();
@@ -8828,6 +9701,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
// longjmp() and throw() must not violate the entry/exit criteria.
OpenMPClauseKind AtomicKind = OMPC_unknown;
SourceLocation AtomicKindLoc;
+ OpenMPClauseKind MemOrderKind = OMPC_unknown;
+ SourceLocation MemOrderLoc;
for (const OMPClause *C : Clauses) {
if (C->getClauseKind() == OMPC_read || C->getClauseKind() == OMPC_write ||
C->getClauseKind() == OMPC_update ||
@@ -8835,13 +9710,51 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
if (AtomicKind != OMPC_unknown) {
Diag(C->getBeginLoc(), diag::err_omp_atomic_several_clauses)
<< SourceRange(C->getBeginLoc(), C->getEndLoc());
- Diag(AtomicKindLoc, diag::note_omp_atomic_previous_clause)
+ Diag(AtomicKindLoc, diag::note_omp_previous_mem_order_clause)
<< getOpenMPClauseName(AtomicKind);
} else {
AtomicKind = C->getClauseKind();
AtomicKindLoc = C->getBeginLoc();
}
}
+ if (C->getClauseKind() == OMPC_seq_cst ||
+ C->getClauseKind() == OMPC_acq_rel ||
+ C->getClauseKind() == OMPC_acquire ||
+ C->getClauseKind() == OMPC_release ||
+ C->getClauseKind() == OMPC_relaxed) {
+ if (MemOrderKind != OMPC_unknown) {
+ Diag(C->getBeginLoc(), diag::err_omp_several_mem_order_clauses)
+ << getOpenMPDirectiveName(OMPD_atomic) << 0
+ << SourceRange(C->getBeginLoc(), C->getEndLoc());
+ Diag(MemOrderLoc, diag::note_omp_previous_mem_order_clause)
+ << getOpenMPClauseName(MemOrderKind);
+ } else {
+ MemOrderKind = C->getClauseKind();
+ MemOrderLoc = C->getBeginLoc();
+ }
+ }
+ }
+ // OpenMP 5.0, 2.17.7 atomic Construct, Restrictions
+ // If atomic-clause is read then memory-order-clause must not be acq_rel or
+ // release.
+ // If atomic-clause is write then memory-order-clause must not be acq_rel or
+ // acquire.
+ // If atomic-clause is update or not present then memory-order-clause must not
+ // be acq_rel or acquire.
+ if ((AtomicKind == OMPC_read &&
+ (MemOrderKind == OMPC_acq_rel || MemOrderKind == OMPC_release)) ||
+ ((AtomicKind == OMPC_write || AtomicKind == OMPC_update ||
+ AtomicKind == OMPC_unknown) &&
+ (MemOrderKind == OMPC_acq_rel || MemOrderKind == OMPC_acquire))) {
+ SourceLocation Loc = AtomicKindLoc;
+ if (AtomicKind == OMPC_unknown)
+ Loc = StartLoc;
+ Diag(Loc, diag::err_omp_atomic_incompatible_mem_order_clause)
+ << getOpenMPClauseName(AtomicKind)
+ << (AtomicKind == OMPC_unknown ? 1 : 0)
+ << getOpenMPClauseName(MemOrderKind);
+ Diag(MemOrderLoc, diag::note_omp_previous_mem_order_clause)
+ << getOpenMPClauseName(MemOrderKind);
}
Stmt *Body = CS->getCapturedStmt();
@@ -9338,8 +10251,9 @@ Sema::ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
setFunctionHasBranchProtectedScope();
- return OMPTargetParallelDirective::Create(Context, StartLoc, EndLoc, Clauses,
- AStmt);
+ return OMPTargetParallelDirective::Create(
+ Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPTargetParallelForDirective(
@@ -9391,9 +10305,9 @@ StmtResult Sema::ActOnOpenMPTargetParallelForDirective(
}
setFunctionHasBranchProtectedScope();
- return OMPTargetParallelForDirective::Create(Context, StartLoc, EndLoc,
- NestedLoopCount, Clauses, AStmt,
- B, DSAStack->isCancelRegion());
+ return OMPTargetParallelForDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
/// Check for existence of a map clause in the list of clauses.
@@ -9418,12 +10332,18 @@ StmtResult Sema::ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- // OpenMP [2.10.1, Restrictions, p. 97]
- // At least one map clause must appear on the directive.
- if (!hasClauses(Clauses, OMPC_map, OMPC_use_device_ptr)) {
+ // OpenMP [2.12.2, target data Construct, Restrictions]
+ // At least one map, use_device_addr or use_device_ptr clause must appear on
+ // the directive.
+ if (!hasClauses(Clauses, OMPC_map, OMPC_use_device_ptr) &&
+ (LangOpts.OpenMP < 50 || !hasClauses(Clauses, OMPC_use_device_addr))) {
+ StringRef Expected;
+ if (LangOpts.OpenMP < 50)
+ Expected = "'map' or 'use_device_ptr'";
+ else
+ Expected = "'map', 'use_device_ptr', or 'use_device_addr'";
Diag(StartLoc, diag::err_omp_no_clause_for_directive)
- << "'map' or 'use_device_ptr'"
- << getOpenMPDirectiveName(OMPD_target_data);
+ << Expected << getOpenMPDirectiveName(OMPD_target_data);
return StmtError();
}
@@ -9604,12 +10524,10 @@ static bool checkGrainsizeNumTasksClauses(Sema &S,
if (!PrevClause)
PrevClause = C;
else if (PrevClause->getClauseKind() != C->getClauseKind()) {
- S.Diag(C->getBeginLoc(),
- diag::err_omp_grainsize_num_tasks_mutually_exclusive)
+ S.Diag(C->getBeginLoc(), diag::err_omp_clauses_mutually_exclusive)
<< getOpenMPClauseName(C->getClauseKind())
<< getOpenMPClauseName(PrevClause->getClauseKind());
- S.Diag(PrevClause->getBeginLoc(),
- diag::note_omp_previous_grainsize_num_tasks)
+ S.Diag(PrevClause->getBeginLoc(), diag::note_omp_previous_clause)
<< getOpenMPClauseName(PrevClause->getClauseKind());
ErrorFound = true;
}
@@ -9678,7 +10596,8 @@ StmtResult Sema::ActOnOpenMPTaskLoopDirective(
setFunctionHasBranchProtectedScope();
return OMPTaskLoopDirective::Create(Context, StartLoc, EndLoc,
- NestedLoopCount, Clauses, AStmt, B);
+ NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPTaskLoopSimdDirective(
@@ -9763,7 +10682,8 @@ StmtResult Sema::ActOnOpenMPMasterTaskLoopDirective(
setFunctionHasBranchProtectedScope();
return OMPMasterTaskLoopDirective::Create(Context, StartLoc, EndLoc,
- NestedLoopCount, Clauses, AStmt, B);
+ NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPMasterTaskLoopSimdDirective(
@@ -9867,7 +10787,8 @@ StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopDirective(
setFunctionHasBranchProtectedScope();
return OMPParallelMasterTaskLoopDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopSimdDirective(
@@ -10004,7 +10925,7 @@ StmtResult Sema::ActOnOpenMPDistributeParallelForDirective(
setFunctionHasBranchProtectedScope();
return OMPDistributeParallelForDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
- DSAStack->isCancelRegion());
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPDistributeParallelForSimdDirective(
@@ -10301,7 +11222,6 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeSimdDirective(
CS->getCapturedDecl()->setNothrow();
}
-
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
@@ -10446,7 +11366,7 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForDirective(
return OMPTeamsDistributeParallelForDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
- DSAStack->isCancelRegion());
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
@@ -10575,7 +11495,7 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForDirective(
setFunctionHasBranchProtectedScope();
return OMPTargetTeamsDistributeParallelForDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
- DSAStack->isCancelRegion());
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
@@ -10721,9 +11641,6 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_ordered:
Res = ActOnOpenMPOrderedClause(StartLoc, EndLoc, LParenLoc, Expr);
break;
- case OMPC_device:
- Res = ActOnOpenMPDeviceClause(Expr, StartLoc, LParenLoc, EndLoc);
- break;
case OMPC_num_teams:
Res = ActOnOpenMPNumTeamsClause(Expr, StartLoc, LParenLoc, EndLoc);
break;
@@ -10742,6 +11659,13 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_hint:
Res = ActOnOpenMPHintClause(Expr, StartLoc, LParenLoc, EndLoc);
break;
+ case OMPC_depobj:
+ Res = ActOnOpenMPDepobjClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_detach:
+ Res = ActOnOpenMPDetachClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_device:
case OMPC_if:
case OMPC_default:
case OMPC_proc_bind:
@@ -10768,6 +11692,10 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_depend:
case OMPC_threads:
case OMPC_simd:
@@ -10780,6 +11708,7 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -10789,6 +11718,13 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_order:
+ case OMPC_destroy:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ default:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -10918,10 +11854,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_teams:
@@ -10939,6 +11879,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with if-clause");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
break;
@@ -10988,10 +11929,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_teams:
@@ -11013,6 +11958,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with num_threads-clause");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
break;
@@ -11063,10 +12009,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
@@ -11085,6 +12035,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with num_teams-clause");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
break;
@@ -11135,10 +12086,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
@@ -11157,6 +12112,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with thread_limit-clause");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
break;
@@ -11208,10 +12164,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
@@ -11229,6 +12189,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with schedule clause");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
break;
@@ -11280,10 +12241,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
@@ -11301,6 +12266,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with schedule clause");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
break;
@@ -11351,10 +12317,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
@@ -11373,6 +12343,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with num_teams-clause");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
break;
@@ -11425,10 +12396,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
@@ -11447,6 +12422,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with grainsize-clause");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
break;
@@ -11474,11 +12450,16 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPC_threadprivate:
case OMPC_allocate:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_read:
case OMPC_write:
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_depend:
case OMPC_threads:
case OMPC_simd:
@@ -11491,6 +12472,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -11500,6 +12482,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_order:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ default:
llvm_unreachable("Unexpected OpenMP clause.");
}
return CaptureRegion;
@@ -11747,8 +12737,7 @@ static bool findOMPAllocatorHandleT(Sema &S, SourceLocation Loc,
return true;
// Build the predefined allocator expressions.
bool ErrorFound = false;
- for (int I = OMPAllocateDeclAttr::OMPDefaultMemAlloc;
- I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
+ for (int I = 0; I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
StringRef Allocator =
OMPAllocateDeclAttr::ConvertAllocatorTypeTyToStr(AllocatorKind);
@@ -11775,7 +12764,8 @@ static bool findOMPAllocatorHandleT(Sema &S, SourceLocation Loc,
Stack->setAllocator(AllocatorKind, Res.get());
}
if (ErrorFound) {
- S.Diag(Loc, diag::err_implied_omp_allocator_handle_t_not_found);
+ S.Diag(Loc, diag::err_omp_implied_type_not_found)
+ << "omp_allocator_handle_t";
return false;
}
OMPAllocatorHandleT.addConst();
@@ -11852,9 +12842,8 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
OMPClause *Res = nullptr;
switch (Kind) {
case OMPC_default:
- Res =
- ActOnOpenMPDefaultClause(static_cast<OpenMPDefaultClauseKind>(Argument),
- ArgumentLoc, StartLoc, LParenLoc, EndLoc);
+ Res = ActOnOpenMPDefaultClause(static_cast<DefaultKind>(Argument),
+ ArgumentLoc, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_proc_bind:
Res = ActOnOpenMPProcBindClause(static_cast<ProcBindKind>(Argument),
@@ -11865,6 +12854,14 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
static_cast<OpenMPAtomicDefaultMemOrderClauseKind>(Argument),
ArgumentLoc, StartLoc, LParenLoc, EndLoc);
break;
+ case OMPC_order:
+ Res = ActOnOpenMPOrderClause(static_cast<OpenMPOrderClauseKind>(Argument),
+ ArgumentLoc, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_update:
+ Res = ActOnOpenMPUpdateClause(static_cast<OpenMPDependClauseKind>(Argument),
+ ArgumentLoc, StartLoc, LParenLoc, EndLoc);
+ break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
@@ -11891,11 +12888,15 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_threadprivate:
case OMPC_allocate:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_read:
case OMPC_write:
- case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_depend:
case OMPC_device:
case OMPC_threads:
@@ -11915,6 +12916,7 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -11923,6 +12925,13 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ default:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -11946,34 +12955,36 @@ getListOfPossibleValues(OpenMPClauseKind K, unsigned First, unsigned Last,
else if (I + Skipped + 1 != Last)
Out << ", ";
}
- return Out.str();
+ return std::string(Out.str());
}
-OMPClause *Sema::ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
+OMPClause *Sema::ActOnOpenMPDefaultClause(DefaultKind Kind,
SourceLocation KindKwLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- if (Kind == OMPC_DEFAULT_unknown) {
- static_assert(OMPC_DEFAULT_unknown > 0,
- "OMPC_DEFAULT_unknown not greater than 0");
+ if (Kind == OMP_DEFAULT_unknown) {
Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
<< getListOfPossibleValues(OMPC_default, /*First=*/0,
- /*Last=*/OMPC_DEFAULT_unknown)
+ /*Last=*/unsigned(OMP_DEFAULT_unknown))
<< getOpenMPClauseName(OMPC_default);
return nullptr;
}
+
switch (Kind) {
- case OMPC_DEFAULT_none:
+ case OMP_DEFAULT_none:
DSAStack->setDefaultDSANone(KindKwLoc);
break;
- case OMPC_DEFAULT_shared:
+ case OMP_DEFAULT_shared:
DSAStack->setDefaultDSAShared(KindKwLoc);
break;
- case OMPC_DEFAULT_unknown:
- llvm_unreachable("Clause kind is not allowed.");
+ case OMP_DEFAULT_firstprivate:
+ DSAStack->setDefaultDSAFirstPrivate(KindKwLoc);
break;
+ default:
+ llvm_unreachable("DSA unexpected in OpenMP default clause");
}
+
return new (Context)
OMPDefaultClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
}
@@ -12010,6 +13021,43 @@ OMPClause *Sema::ActOnOpenMPAtomicDefaultMemOrderClause(
LParenLoc, EndLoc);
}
+OMPClause *Sema::ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
+ SourceLocation KindKwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ if (Kind == OMPC_ORDER_unknown) {
+ static_assert(OMPC_ORDER_unknown > 0,
+ "OMPC_ORDER_unknown not greater than 0");
+ Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_order, /*First=*/0,
+ /*Last=*/OMPC_ORDER_unknown)
+ << getOpenMPClauseName(OMPC_order);
+ return nullptr;
+ }
+ return new (Context)
+ OMPOrderClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
+ SourceLocation KindKwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ if (Kind == OMPC_DEPEND_unknown || Kind == OMPC_DEPEND_source ||
+ Kind == OMPC_DEPEND_sink || Kind == OMPC_DEPEND_depobj) {
+ unsigned Except[] = {OMPC_DEPEND_source, OMPC_DEPEND_sink,
+ OMPC_DEPEND_depobj};
+ Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_depend, /*First=*/0,
+ /*Last=*/OMPC_DEPEND_unknown, Except)
+ << getOpenMPClauseName(OMPC_update);
+ return nullptr;
+ }
+ return OMPUpdateClause::Create(Context, StartLoc, LParenLoc, KindKwLoc, Kind,
+ EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Argument, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
@@ -12047,6 +13095,12 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
StartLoc, LParenLoc, ArgumentLoc[Modifier], ArgumentLoc[DefaultmapKind],
EndLoc);
break;
+ case OMPC_device:
+ assert(Argument.size() == 1 && ArgumentLoc.size() == 1);
+ Res = ActOnOpenMPDeviceClause(
+ static_cast<OpenMPDeviceClauseModifier>(Argument.back()), Expr,
+ StartLoc, LParenLoc, ArgumentLoc.back(), EndLoc);
+ break;
case OMPC_final:
case OMPC_num_threads:
case OMPC_safelen:
@@ -12073,13 +13127,17 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_threadprivate:
case OMPC_allocate:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_read:
case OMPC_write:
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_depend:
- case OMPC_device:
case OMPC_threads:
case OMPC_simd:
case OMPC_map:
@@ -12095,6 +13153,7 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -12104,6 +13163,14 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_order:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ default:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -12170,7 +13237,9 @@ OMPClause *Sema::ActOnOpenMPScheduleClause(
// OpenMP, 2.7.1, Loop Construct, Restrictions
// The nonmonotonic modifier can only be specified with schedule(dynamic) or
// schedule(guided).
- if ((M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
+ // OpenMP 5.0 does not have this restriction.
+ if (LangOpts.OpenMP < 50 &&
+ (M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
Kind != OMPC_SCHEDULE_dynamic && Kind != OMPC_SCHEDULE_guided) {
Diag(M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ? M1Loc : M2Loc,
@@ -12250,6 +13319,18 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_seq_cst:
Res = ActOnOpenMPSeqCstClause(StartLoc, EndLoc);
break;
+ case OMPC_acq_rel:
+ Res = ActOnOpenMPAcqRelClause(StartLoc, EndLoc);
+ break;
+ case OMPC_acquire:
+ Res = ActOnOpenMPAcquireClause(StartLoc, EndLoc);
+ break;
+ case OMPC_release:
+ Res = ActOnOpenMPReleaseClause(StartLoc, EndLoc);
+ break;
+ case OMPC_relaxed:
+ Res = ActOnOpenMPRelaxedClause(StartLoc, EndLoc);
+ break;
case OMPC_threads:
Res = ActOnOpenMPThreadsClause(StartLoc, EndLoc);
break;
@@ -12271,6 +13352,9 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_dynamic_allocators:
Res = ActOnOpenMPDynamicAllocatorsClause(StartLoc, EndLoc);
break;
+ case OMPC_destroy:
+ Res = ActOnOpenMPDestroyClause(StartLoc, EndLoc);
+ break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
@@ -12295,6 +13379,7 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_threadprivate:
case OMPC_allocate:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_depend:
case OMPC_device:
case OMPC_map:
@@ -12311,11 +13396,19 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_atomic_default_mem_order:
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_order:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ default:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -12349,7 +13442,7 @@ OMPClause *Sema::ActOnOpenMPWriteClause(SourceLocation StartLoc,
OMPClause *Sema::ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc) {
- return new (Context) OMPUpdateClause(StartLoc, EndLoc);
+ return OMPUpdateClause::Create(Context, StartLoc, EndLoc);
}
OMPClause *Sema::ActOnOpenMPCaptureClause(SourceLocation StartLoc,
@@ -12362,6 +13455,26 @@ OMPClause *Sema::ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
return new (Context) OMPSeqCstClause(StartLoc, EndLoc);
}
+OMPClause *Sema::ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPAcqRelClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPAcquireClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPAcquireClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPReleaseClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPReleaseClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPRelaxedClause(StartLoc, EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc) {
return new (Context) OMPThreadsClause(StartLoc, EndLoc);
@@ -12397,14 +13510,19 @@ OMPClause *Sema::ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
return new (Context) OMPDynamicAllocatorsClause(StartLoc, EndLoc);
}
+OMPClause *Sema::ActOnOpenMPDestroyClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPDestroyClause(StartLoc, EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPVarListClause(
- OpenMPClauseKind Kind, ArrayRef<Expr *> VarList, Expr *TailExpr,
+ OpenMPClauseKind Kind, ArrayRef<Expr *> VarList, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
- SourceLocation DepLinMapLastLoc) {
+ SourceLocation ExtraModifierLoc) {
SourceLocation StartLoc = Locs.StartLoc;
SourceLocation LParenLoc = Locs.LParenLoc;
SourceLocation EndLoc = Locs.EndLoc;
@@ -12421,15 +13539,18 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
"Unexpected lastprivate modifier.");
Res = ActOnOpenMPLastprivateClause(
VarList, static_cast<OpenMPLastprivateModifier>(ExtraModifier),
- DepLinMapLastLoc, ColonLoc, StartLoc, LParenLoc, EndLoc);
+ ExtraModifierLoc, ColonLoc, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_shared:
Res = ActOnOpenMPSharedClause(VarList, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_reduction:
- Res = ActOnOpenMPReductionClause(VarList, StartLoc, LParenLoc, ColonLoc,
- EndLoc, ReductionOrMapperIdScopeSpec,
- ReductionOrMapperId);
+ assert(0 <= ExtraModifier && ExtraModifier <= OMPC_REDUCTION_unknown &&
+ "Unexpected lastprivate modifier.");
+ Res = ActOnOpenMPReductionClause(
+ VarList, static_cast<OpenMPReductionClauseModifier>(ExtraModifier),
+ StartLoc, LParenLoc, ExtraModifierLoc, ColonLoc, EndLoc,
+ ReductionOrMapperIdScopeSpec, ReductionOrMapperId);
break;
case OMPC_task_reduction:
Res = ActOnOpenMPTaskReductionClause(VarList, StartLoc, LParenLoc, ColonLoc,
@@ -12445,13 +13566,13 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
assert(0 <= ExtraModifier && ExtraModifier <= OMPC_LINEAR_unknown &&
"Unexpected linear modifier.");
Res = ActOnOpenMPLinearClause(
- VarList, TailExpr, StartLoc, LParenLoc,
- static_cast<OpenMPLinearClauseKind>(ExtraModifier), DepLinMapLastLoc,
+ VarList, DepModOrTailExpr, StartLoc, LParenLoc,
+ static_cast<OpenMPLinearClauseKind>(ExtraModifier), ExtraModifierLoc,
ColonLoc, EndLoc);
break;
case OMPC_aligned:
- Res = ActOnOpenMPAlignedClause(VarList, TailExpr, StartLoc, LParenLoc,
- ColonLoc, EndLoc);
+ Res = ActOnOpenMPAlignedClause(VarList, DepModOrTailExpr, StartLoc,
+ LParenLoc, ColonLoc, EndLoc);
break;
case OMPC_copyin:
Res = ActOnOpenMPCopyinClause(VarList, StartLoc, LParenLoc, EndLoc);
@@ -12466,8 +13587,8 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
assert(0 <= ExtraModifier && ExtraModifier <= OMPC_DEPEND_unknown &&
"Unexpected depend modifier.");
Res = ActOnOpenMPDependClause(
- static_cast<OpenMPDependClauseKind>(ExtraModifier), DepLinMapLastLoc,
- ColonLoc, VarList, StartLoc, LParenLoc, EndLoc);
+ DepModOrTailExpr, static_cast<OpenMPDependClauseKind>(ExtraModifier),
+ ExtraModifierLoc, ColonLoc, VarList, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_map:
assert(0 <= ExtraModifier && ExtraModifier <= OMPC_MAP_unknown &&
@@ -12475,7 +13596,7 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
Res = ActOnOpenMPMapClause(
MapTypeModifiers, MapTypeModifiersLoc, ReductionOrMapperIdScopeSpec,
ReductionOrMapperId, static_cast<OpenMPMapClauseKind>(ExtraModifier),
- IsMapTypeImplicit, DepLinMapLastLoc, ColonLoc, VarList, Locs);
+ IsMapTypeImplicit, ExtraModifierLoc, ColonLoc, VarList, Locs);
break;
case OMPC_to:
Res = ActOnOpenMPToClause(VarList, ReductionOrMapperIdScopeSpec,
@@ -12488,17 +13609,31 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
case OMPC_use_device_ptr:
Res = ActOnOpenMPUseDevicePtrClause(VarList, Locs);
break;
+ case OMPC_use_device_addr:
+ Res = ActOnOpenMPUseDeviceAddrClause(VarList, Locs);
+ break;
case OMPC_is_device_ptr:
Res = ActOnOpenMPIsDevicePtrClause(VarList, Locs);
break;
case OMPC_allocate:
- Res = ActOnOpenMPAllocateClause(TailExpr, VarList, StartLoc, LParenLoc,
- ColonLoc, EndLoc);
+ Res = ActOnOpenMPAllocateClause(DepModOrTailExpr, VarList, StartLoc,
+ LParenLoc, ColonLoc, EndLoc);
break;
case OMPC_nontemporal:
Res = ActOnOpenMPNontemporalClause(VarList, StartLoc, LParenLoc, EndLoc);
break;
+ case OMPC_inclusive:
+ Res = ActOnOpenMPInclusiveClause(VarList, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_exclusive:
+ Res = ActOnOpenMPExclusiveClause(VarList, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_affinity:
+ Res = ActOnOpenMPAffinityClause(StartLoc, LParenLoc, ColonLoc, EndLoc,
+ DepModOrTailExpr, VarList);
+ break;
case OMPC_if:
+ case OMPC_depobj:
case OMPC_final:
case OMPC_num_threads:
case OMPC_safelen:
@@ -12518,6 +13653,10 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_device:
case OMPC_threads:
case OMPC_simd:
@@ -12539,6 +13678,11 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
case OMPC_atomic_default_mem_order:
case OMPC_device_type:
case OMPC_match:
+ case OMPC_order:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_uses_allocators:
+ default:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -12985,7 +14129,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
ExprCaptures.push_back(Ref->getDecl());
}
}
- DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_firstprivate, Ref);
+ if (!IsImplicitClause)
+ DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_firstprivate, Ref);
Vars.push_back((VD || CurContext->isDependentContext())
? RefExpr->IgnoreParens()
: Ref);
@@ -13518,6 +14663,12 @@ struct ReductionData {
SmallVector<Expr *, 8> RHSs;
/// Reduction operation expression.
SmallVector<Expr *, 8> ReductionOps;
+ /// inscan copy operation expressions.
+ SmallVector<Expr *, 8> InscanCopyOps;
+ /// inscan copy temp array expressions for prefix sums.
+ SmallVector<Expr *, 8> InscanCopyArrayTemps;
+ /// inscan copy temp array element expressions for prefix sums.
+ SmallVector<Expr *, 8> InscanCopyArrayElems;
/// Taskgroup descriptors for the corresponding reduction items in
/// in_reduction clauses.
SmallVector<Expr *, 8> TaskgroupDescriptors;
@@ -13525,14 +14676,21 @@ struct ReductionData {
SmallVector<Decl *, 4> ExprCaptures;
/// List of postupdate expressions.
SmallVector<Expr *, 4> ExprPostUpdates;
+ /// Reduction modifier.
+ unsigned RedModifier = 0;
ReductionData() = delete;
/// Reserves required memory for the reduction data.
- ReductionData(unsigned Size) {
+ ReductionData(unsigned Size, unsigned Modifier = 0) : RedModifier(Modifier) {
Vars.reserve(Size);
Privates.reserve(Size);
LHSs.reserve(Size);
RHSs.reserve(Size);
ReductionOps.reserve(Size);
+ if (RedModifier == OMPC_REDUCTION_inscan) {
+ InscanCopyOps.reserve(Size);
+ InscanCopyArrayTemps.reserve(Size);
+ InscanCopyArrayElems.reserve(Size);
+ }
TaskgroupDescriptors.reserve(Size);
ExprCaptures.reserve(Size);
ExprPostUpdates.reserve(Size);
@@ -13546,16 +14704,31 @@ struct ReductionData {
RHSs.emplace_back(nullptr);
ReductionOps.emplace_back(ReductionOp);
TaskgroupDescriptors.emplace_back(nullptr);
+ if (RedModifier == OMPC_REDUCTION_inscan) {
+ InscanCopyOps.push_back(nullptr);
+ InscanCopyArrayTemps.push_back(nullptr);
+ InscanCopyArrayElems.push_back(nullptr);
+ }
}
/// Stores reduction data.
void push(Expr *Item, Expr *Private, Expr *LHS, Expr *RHS, Expr *ReductionOp,
- Expr *TaskgroupDescriptor) {
+ Expr *TaskgroupDescriptor, Expr *CopyOp, Expr *CopyArrayTemp,
+ Expr *CopyArrayElem) {
Vars.emplace_back(Item);
Privates.emplace_back(Private);
LHSs.emplace_back(LHS);
RHSs.emplace_back(RHS);
ReductionOps.emplace_back(ReductionOp);
TaskgroupDescriptors.emplace_back(TaskgroupDescriptor);
+ if (RedModifier == OMPC_REDUCTION_inscan) {
+ InscanCopyOps.push_back(CopyOp);
+ InscanCopyArrayTemps.push_back(CopyArrayTemp);
+ InscanCopyArrayElems.push_back(CopyArrayElem);
+ } else {
+ assert(CopyOp == nullptr && CopyArrayTemp == nullptr &&
+ CopyArrayElem == nullptr &&
+ "Copy operation must be used for inscan reductions only.");
+ }
}
};
} // namespace
@@ -13567,7 +14740,7 @@ static bool checkOMPArraySectionConstantForReduction(
if (Length == nullptr) {
// For array sections of the form [1:] or [:], we would need to analyze
// the lower bound...
- if (OASE->getColonLoc().isValid())
+ if (OASE->getColonLocFirst().isValid())
return false;
// This is an array subscript which has implicit length 1!
@@ -13593,7 +14766,7 @@ static bool checkOMPArraySectionConstantForReduction(
if (Length == nullptr) {
// For array sections of the form [1:] or [:], we would need to analyze
// the lower bound...
- if (OASE->getColonLoc().isValid())
+ if (OASE->getColonLocFirst().isValid())
return false;
// This is an array subscript which has implicit length 1!
@@ -13948,11 +15121,11 @@ static bool actOnOMPReductionKindClause(
if (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective())) {
S.Diag(ELoc, diag::err_omp_reduction_vla_unsupported) << !!OASE;
S.Diag(ELoc, diag::note_vla_unsupported);
+ continue;
} else {
S.targetDiag(ELoc, diag::err_omp_reduction_vla_unsupported) << !!OASE;
S.targetDiag(ELoc, diag::note_vla_unsupported);
}
- continue;
}
// For arrays/array sections only:
// Create pseudo array type for private copy. The size for this array will
@@ -14007,9 +15180,9 @@ static bool actOnOMPReductionKindClause(
if (auto *ComplexTy = OrigType->getAs<ComplexType>())
Type = ComplexTy->getElementType();
if (Type->isRealFloatingType()) {
- llvm::APFloat InitValue =
- llvm::APFloat::getAllOnesValue(Context.getTypeSize(Type),
- /*isIEEE=*/true);
+ llvm::APFloat InitValue = llvm::APFloat::getAllOnesValue(
+ Context.getFloatTypeSemantics(Type),
+ Context.getTypeSize(Type));
Init = FloatingLiteral::Create(Context, InitValue, /*isexact=*/true,
Type, ELoc);
} else if (Type->isScalarType()) {
@@ -14157,6 +15330,53 @@ static bool actOnOMPReductionKindClause(
continue;
}
+ // Add copy operations for inscan reductions.
+ // LHS = RHS;
+ ExprResult CopyOpRes, TempArrayRes, TempArrayElem;
+ if (ClauseKind == OMPC_reduction &&
+ RD.RedModifier == OMPC_REDUCTION_inscan) {
+ ExprResult RHS = S.DefaultLvalueConversion(RHSDRE);
+ CopyOpRes = S.BuildBinOp(Stack->getCurScope(), ELoc, BO_Assign, LHSDRE,
+ RHS.get());
+ if (!CopyOpRes.isUsable())
+ continue;
+ CopyOpRes =
+ S.ActOnFinishFullExpr(CopyOpRes.get(), /*DiscardedValue=*/true);
+ if (!CopyOpRes.isUsable())
+ continue;
+ // For simd directive and simd-based directives in simd mode no need to
+ // construct temp array, need just a single temp element.
+ if (Stack->getCurrentDirective() == OMPD_simd ||
+ (S.getLangOpts().OpenMPSimd &&
+ isOpenMPSimdDirective(Stack->getCurrentDirective()))) {
+ VarDecl *TempArrayVD =
+ buildVarDecl(S, ELoc, PrivateTy, D->getName(),
+ D->hasAttrs() ? &D->getAttrs() : nullptr);
+ // Add a constructor to the temp decl.
+ S.ActOnUninitializedDecl(TempArrayVD);
+ TempArrayRes = buildDeclRefExpr(S, TempArrayVD, PrivateTy, ELoc);
+ } else {
+ // Build temp array for prefix sum.
+ auto *Dim = new (S.Context)
+ OpaqueValueExpr(ELoc, S.Context.getSizeType(), VK_RValue);
+ QualType ArrayTy =
+ S.Context.getVariableArrayType(PrivateTy, Dim, ArrayType::Normal,
+ /*IndexTypeQuals=*/0, {ELoc, ELoc});
+ VarDecl *TempArrayVD =
+ buildVarDecl(S, ELoc, ArrayTy, D->getName(),
+ D->hasAttrs() ? &D->getAttrs() : nullptr);
+ // Add a constructor to the temp decl.
+ S.ActOnUninitializedDecl(TempArrayVD);
+ TempArrayRes = buildDeclRefExpr(S, TempArrayVD, ArrayTy, ELoc);
+ TempArrayElem =
+ S.DefaultFunctionArrayLvalueConversion(TempArrayRes.get());
+ auto *Idx = new (S.Context)
+ OpaqueValueExpr(ELoc, S.Context.getSizeType(), VK_RValue);
+ TempArrayElem = S.CreateBuiltinArraySubscriptExpr(TempArrayElem.get(),
+ ELoc, Idx, ELoc);
+ }
+ }
+
// OpenMP [2.15.4.6, Restrictions, p.2]
// A list item that appears in an in_reduction clause of a task construct
// must appear in a task_reduction clause of a construct associated with a
@@ -14167,8 +15387,8 @@ static bool actOnOMPReductionKindClause(
if (ClauseKind == OMPC_in_reduction) {
SourceRange ParentSR;
BinaryOperatorKind ParentBOK;
- const Expr *ParentReductionOp;
- Expr *ParentBOKTD, *ParentReductionOpTD;
+ const Expr *ParentReductionOp = nullptr;
+ Expr *ParentBOKTD = nullptr, *ParentReductionOpTD = nullptr;
DSAStackTy::DSAVarData ParentBOKDSA =
Stack->getTopMostTaskgroupReductionData(D, ParentSR, ParentBOK,
ParentBOKTD);
@@ -14177,13 +15397,9 @@ static bool actOnOMPReductionKindClause(
D, ParentSR, ParentReductionOp, ParentReductionOpTD);
bool IsParentBOK = ParentBOKDSA.DKind != OMPD_unknown;
bool IsParentReductionOp = ParentReductionOpDSA.DKind != OMPD_unknown;
- if (!IsParentBOK && !IsParentReductionOp) {
- S.Diag(ELoc, diag::err_omp_in_reduction_not_task_reduction);
- continue;
- }
if ((DeclareReductionRef.isUnset() && IsParentReductionOp) ||
- (DeclareReductionRef.isUsable() && IsParentBOK) || BOK != ParentBOK ||
- IsParentReductionOp) {
+ (DeclareReductionRef.isUsable() && IsParentBOK) ||
+ (IsParentBOK && BOK != ParentBOK) || IsParentReductionOp) {
bool EmitError = true;
if (IsParentReductionOp && DeclareReductionRef.isUsable()) {
llvm::FoldingSetNodeID RedId, ParentRedId;
@@ -14206,7 +15422,6 @@ static bool actOnOMPReductionKindClause(
}
}
TaskgroupDescriptor = IsParentBOK ? ParentBOKTD : ParentReductionOpTD;
- assert(TaskgroupDescriptor && "Taskgroup descriptor must be defined.");
}
DeclRefExpr *Ref = nullptr;
@@ -14245,8 +15460,17 @@ static bool actOnOMPReductionKindClause(
}
// All reduction items are still marked as reduction (to do not increase
// code base size).
- Stack->addDSA(D, RefExpr->IgnoreParens(), OMPC_reduction, Ref);
- if (CurrDir == OMPD_taskgroup) {
+ unsigned Modifier = RD.RedModifier;
+ // Consider task_reductions as reductions with task modifier. Required for
+ // correct analysis of in_reduction clauses.
+ if (CurrDir == OMPD_taskgroup && ClauseKind == OMPC_task_reduction)
+ Modifier = OMPC_REDUCTION_task;
+ Stack->addDSA(D, RefExpr->IgnoreParens(), OMPC_reduction, Ref, Modifier);
+ if (Modifier == OMPC_REDUCTION_task &&
+ (CurrDir == OMPD_taskgroup ||
+ ((isOpenMPParallelDirective(CurrDir) ||
+ isOpenMPWorksharingDirective(CurrDir)) &&
+ !isOpenMPSimdDirective(CurrDir)))) {
if (DeclareReductionRef.isUsable())
Stack->addTaskgroupReductionData(D, ReductionIdRange,
DeclareReductionRef.get());
@@ -14254,17 +15478,41 @@ static bool actOnOMPReductionKindClause(
Stack->addTaskgroupReductionData(D, ReductionIdRange, BOK);
}
RD.push(VarsExpr, PrivateDRE, LHSDRE, RHSDRE, ReductionOp.get(),
- TaskgroupDescriptor);
+ TaskgroupDescriptor, CopyOpRes.get(), TempArrayRes.get(),
+ TempArrayElem.get());
}
return RD.Vars.empty();
}
OMPClause *Sema::ActOnOpenMPReductionClause(
- ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation ColonLoc, SourceLocation EndLoc,
+ ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions) {
- ReductionData RD(VarList.size());
+ if (ModifierLoc.isValid() && Modifier == OMPC_REDUCTION_unknown) {
+ Diag(LParenLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_reduction, /*First=*/0,
+ /*Last=*/OMPC_REDUCTION_unknown)
+ << getOpenMPClauseName(OMPC_reduction);
+ return nullptr;
+ }
+ // OpenMP 5.0, 2.19.5.4 reduction Clause, Restrictions
+ // A reduction clause with the inscan reduction-modifier may only appear on a
+ // worksharing-loop construct, a worksharing-loop SIMD construct, a simd
+ // construct, a parallel worksharing-loop construct or a parallel
+ // worksharing-loop SIMD construct.
+ if (Modifier == OMPC_REDUCTION_inscan &&
+ (DSAStack->getCurrentDirective() != OMPD_for &&
+ DSAStack->getCurrentDirective() != OMPD_for_simd &&
+ DSAStack->getCurrentDirective() != OMPD_simd &&
+ DSAStack->getCurrentDirective() != OMPD_parallel_for &&
+ DSAStack->getCurrentDirective() != OMPD_parallel_for_simd)) {
+ Diag(ModifierLoc, diag::err_omp_wrong_inscan_reduction);
+ return nullptr;
+ }
+
+ ReductionData RD(VarList.size(), Modifier);
if (actOnOMPReductionKindClause(*this, DSAStack, OMPC_reduction, VarList,
StartLoc, LParenLoc, ColonLoc, EndLoc,
ReductionIdScopeSpec, ReductionId,
@@ -14272,9 +15520,10 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
return nullptr;
return OMPReductionClause::Create(
- Context, StartLoc, LParenLoc, ColonLoc, EndLoc, RD.Vars,
- ReductionIdScopeSpec.getWithLocInContext(Context), ReductionId,
- RD.Privates, RD.LHSs, RD.RHSs, RD.ReductionOps,
+ Context, StartLoc, LParenLoc, ModifierLoc, ColonLoc, EndLoc, Modifier,
+ RD.Vars, ReductionIdScopeSpec.getWithLocInContext(Context), ReductionId,
+ RD.Privates, RD.LHSs, RD.RHSs, RD.ReductionOps, RD.InscanCopyOps,
+ RD.InscanCopyArrayTemps, RD.InscanCopyArrayElems,
buildPreInits(Context, RD.ExprCaptures),
buildPostUpdate(*this, RD.ExprPostUpdates));
}
@@ -14330,8 +15579,8 @@ bool Sema::CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
}
bool Sema::CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
- OpenMPLinearClauseKind LinKind,
- QualType Type) {
+ OpenMPLinearClauseKind LinKind, QualType Type,
+ bool IsDeclareSimd) {
const auto *VD = dyn_cast_or_null<VarDecl>(D);
// A variable must not have an incomplete type or a reference type.
if (RequireCompleteType(ELoc, Type, diag::err_omp_linear_incomplete_type))
@@ -14347,8 +15596,10 @@ bool Sema::CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
// OpenMP 5.0 [2.19.3, List Item Privatization, Restrictions]
// A variable that is privatized must not have a const-qualified type
// unless it is of class type with a mutable member. This restriction does
- // not apply to the firstprivate clause.
- if (rejectConstNotMutableType(*this, D, Type, OMPC_linear, ELoc))
+ // not apply to the firstprivate clause, nor to the linear clause on
+ // declarative directives (like declare simd).
+ if (!IsDeclareSimd &&
+ rejectConstNotMutableType(*this, D, Type, OMPC_linear, ELoc))
return true;
// A list item must be of integral or pointer type.
@@ -14900,8 +16151,53 @@ OMPClause *Sema::ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
return OMPFlushClause::Create(Context, StartLoc, LParenLoc, EndLoc, VarList);
}
+/// Tries to find omp_depend_t. type.
+static bool findOMPDependT(Sema &S, SourceLocation Loc, DSAStackTy *Stack,
+ bool Diagnose = true) {
+ QualType OMPDependT = Stack->getOMPDependT();
+ if (!OMPDependT.isNull())
+ return true;
+ IdentifierInfo *II = &S.PP.getIdentifierTable().get("omp_depend_t");
+ ParsedType PT = S.getTypeName(*II, Loc, S.getCurScope());
+ if (!PT.getAsOpaquePtr() || PT.get().isNull()) {
+ if (Diagnose)
+ S.Diag(Loc, diag::err_omp_implied_type_not_found) << "omp_depend_t";
+ return false;
+ }
+ Stack->setOMPDependT(PT.get());
+ return true;
+}
+
+OMPClause *Sema::ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ if (!Depobj)
+ return nullptr;
+
+ bool OMPDependTFound = findOMPDependT(*this, StartLoc, DSAStack);
+
+ // OpenMP 5.0, 2.17.10.1 depobj Construct
+ // depobj is an lvalue expression of type omp_depend_t.
+ if (!Depobj->isTypeDependent() && !Depobj->isValueDependent() &&
+ !Depobj->isInstantiationDependent() &&
+ !Depobj->containsUnexpandedParameterPack() &&
+ (OMPDependTFound &&
+ !Context.typesAreCompatible(DSAStack->getOMPDependT(), Depobj->getType(),
+ /*CompareUnqualified=*/true))) {
+ Diag(Depobj->getExprLoc(), diag::err_omp_expected_omp_depend_t_lvalue)
+ << 0 << Depobj->getType() << Depobj->getSourceRange();
+ }
+
+ if (!Depobj->isLValue()) {
+ Diag(Depobj->getExprLoc(), diag::err_omp_expected_omp_depend_t_lvalue)
+ << 1 << Depobj->getSourceRange();
+ }
+
+ return OMPDepobjClause::Create(Context, StartLoc, LParenLoc, EndLoc, Depobj);
+}
+
OMPClause *
-Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
+Sema::ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc) {
@@ -14911,16 +16207,38 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
<< "'source' or 'sink'" << getOpenMPClauseName(OMPC_depend);
return nullptr;
}
- if (DSAStack->getCurrentDirective() != OMPD_ordered &&
+ if ((DSAStack->getCurrentDirective() != OMPD_ordered ||
+ DSAStack->getCurrentDirective() == OMPD_depobj) &&
(DepKind == OMPC_DEPEND_unknown || DepKind == OMPC_DEPEND_source ||
- DepKind == OMPC_DEPEND_sink)) {
- unsigned Except[] = {OMPC_DEPEND_source, OMPC_DEPEND_sink};
+ DepKind == OMPC_DEPEND_sink ||
+ ((LangOpts.OpenMP < 50 ||
+ DSAStack->getCurrentDirective() == OMPD_depobj) &&
+ DepKind == OMPC_DEPEND_depobj))) {
+ SmallVector<unsigned, 3> Except;
+ Except.push_back(OMPC_DEPEND_source);
+ Except.push_back(OMPC_DEPEND_sink);
+ if (LangOpts.OpenMP < 50 || DSAStack->getCurrentDirective() == OMPD_depobj)
+ Except.push_back(OMPC_DEPEND_depobj);
+ std::string Expected = (LangOpts.OpenMP >= 50 && !DepModifier)
+ ? "depend modifier(iterator) or "
+ : "";
Diag(DepLoc, diag::err_omp_unexpected_clause_value)
- << getListOfPossibleValues(OMPC_depend, /*First=*/0,
- /*Last=*/OMPC_DEPEND_unknown, Except)
+ << Expected + getListOfPossibleValues(OMPC_depend, /*First=*/0,
+ /*Last=*/OMPC_DEPEND_unknown,
+ Except)
<< getOpenMPClauseName(OMPC_depend);
return nullptr;
}
+ if (DepModifier &&
+ (DepKind == OMPC_DEPEND_source || DepKind == OMPC_DEPEND_sink)) {
+ Diag(DepModifier->getExprLoc(),
+ diag::err_omp_depend_sink_source_with_modifier);
+ return nullptr;
+ }
+ if (DepModifier &&
+ !DepModifier->getType()->isSpecificBuiltinType(BuiltinType::OMPIterator))
+ Diag(DepModifier->getExprLoc(), diag::err_omp_depend_modifier_not_iterator);
+
SmallVector<Expr *, 8> Vars;
DSAStackTy::OperatorOffsetTy OpsOffs;
llvm::APSInt DepCounter(/*BitWidth=*/32);
@@ -15021,42 +16339,97 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
}
OpsOffs.emplace_back(RHS, OOK);
} else {
- // OpenMP 5.0 [2.17.11, Restrictions]
- // List items used in depend clauses cannot be zero-length array sections.
- const auto *OASE = dyn_cast<OMPArraySectionExpr>(SimpleExpr);
- if (OASE) {
- const Expr *Length = OASE->getLength();
- Expr::EvalResult Result;
- if (Length && !Length->isValueDependent() &&
- Length->EvaluateAsInt(Result, Context) &&
- Result.Val.getInt().isNullValue()) {
- Diag(ELoc,
- diag::err_omp_depend_zero_length_array_section_not_allowed)
- << SimpleExpr->getSourceRange();
+ bool OMPDependTFound = LangOpts.OpenMP >= 50;
+ if (OMPDependTFound)
+ OMPDependTFound = findOMPDependT(*this, StartLoc, DSAStack,
+ DepKind == OMPC_DEPEND_depobj);
+ if (DepKind == OMPC_DEPEND_depobj) {
+ // OpenMP 5.0, 2.17.11 depend Clause, Restrictions, C/C++
+ // List items used in depend clauses with the depobj dependence type
+ // must be expressions of the omp_depend_t type.
+ if (!RefExpr->isValueDependent() && !RefExpr->isTypeDependent() &&
+ !RefExpr->isInstantiationDependent() &&
+ !RefExpr->containsUnexpandedParameterPack() &&
+ (OMPDependTFound &&
+ !Context.hasSameUnqualifiedType(DSAStack->getOMPDependT(),
+ RefExpr->getType()))) {
+ Diag(ELoc, diag::err_omp_expected_omp_depend_t_lvalue)
+ << 0 << RefExpr->getType() << RefExpr->getSourceRange();
continue;
}
- }
+ if (!RefExpr->isLValue()) {
+ Diag(ELoc, diag::err_omp_expected_omp_depend_t_lvalue)
+ << 1 << RefExpr->getType() << RefExpr->getSourceRange();
+ continue;
+ }
+ } else {
+ // OpenMP 5.0 [2.17.11, Restrictions]
+ // List items used in depend clauses cannot be zero-length array
+ // sections.
+ QualType ExprTy = RefExpr->getType().getNonReferenceType();
+ const auto *OASE = dyn_cast<OMPArraySectionExpr>(SimpleExpr);
+ if (OASE) {
+ QualType BaseType =
+ OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
+ if (const auto *ATy = BaseType->getAsArrayTypeUnsafe())
+ ExprTy = ATy->getElementType();
+ else
+ ExprTy = BaseType->getPointeeType();
+ ExprTy = ExprTy.getNonReferenceType();
+ const Expr *Length = OASE->getLength();
+ Expr::EvalResult Result;
+ if (Length && !Length->isValueDependent() &&
+ Length->EvaluateAsInt(Result, Context) &&
+ Result.Val.getInt().isNullValue()) {
+ Diag(ELoc,
+ diag::err_omp_depend_zero_length_array_section_not_allowed)
+ << SimpleExpr->getSourceRange();
+ continue;
+ }
+ }
- auto *ASE = dyn_cast<ArraySubscriptExpr>(SimpleExpr);
- if (!RefExpr->IgnoreParenImpCasts()->isLValue() ||
- (ASE &&
- !ASE->getBase()->getType().getNonReferenceType()->isPointerType() &&
- !ASE->getBase()->getType().getNonReferenceType()->isArrayType())) {
- Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
- << RefExpr->getSourceRange();
- continue;
- }
+ // OpenMP 5.0, 2.17.11 depend Clause, Restrictions, C/C++
+ // List items used in depend clauses with the in, out, inout or
+ // mutexinoutset dependence types cannot be expressions of the
+ // omp_depend_t type.
+ if (!RefExpr->isValueDependent() && !RefExpr->isTypeDependent() &&
+ !RefExpr->isInstantiationDependent() &&
+ !RefExpr->containsUnexpandedParameterPack() &&
+ (OMPDependTFound &&
+ DSAStack->getOMPDependT().getTypePtr() == ExprTy.getTypePtr())) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0) << 1
+ << RefExpr->getSourceRange();
+ continue;
+ }
- ExprResult Res;
- {
- Sema::TentativeAnalysisScope Trap(*this);
- Res = CreateBuiltinUnaryOp(ELoc, UO_AddrOf,
- RefExpr->IgnoreParenImpCasts());
- }
- if (!Res.isUsable() && !isa<OMPArraySectionExpr>(SimpleExpr)) {
- Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
- << RefExpr->getSourceRange();
- continue;
+ auto *ASE = dyn_cast<ArraySubscriptExpr>(SimpleExpr);
+ if (!RefExpr->IgnoreParenImpCasts()->isLValue() ||
+ (ASE && !ASE->getBase()->isTypeDependent() &&
+ !ASE->getBase()
+ ->getType()
+ .getNonReferenceType()
+ ->isPointerType() &&
+ !ASE->getBase()->getType().getNonReferenceType()->isArrayType())) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
+ continue;
+ }
+
+ ExprResult Res;
+ {
+ Sema::TentativeAnalysisScope Trap(*this);
+ Res = CreateBuiltinUnaryOp(ELoc, UO_AddrOf,
+ RefExpr->IgnoreParenImpCasts());
+ }
+ if (!Res.isUsable() && !isa<OMPArraySectionExpr>(SimpleExpr) &&
+ !isa<OMPArrayShapingExpr>(SimpleExpr)) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
+ continue;
+ }
}
}
Vars.push_back(RefExpr->IgnoreParenImpCasts());
@@ -15074,24 +16447,40 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
return nullptr;
auto *C = OMPDependClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- DepKind, DepLoc, ColonLoc, Vars,
- TotalDepCount.getZExtValue());
+ DepModifier, DepKind, DepLoc, ColonLoc,
+ Vars, TotalDepCount.getZExtValue());
if ((DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source) &&
DSAStack->isParentOrderedRegion())
DSAStack->addDoacrossDependClause(C, OpsOffs);
return C;
}
-OMPClause *Sema::ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
+OMPClause *Sema::ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
+ Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
+ SourceLocation ModifierLoc,
SourceLocation EndLoc) {
+ assert((ModifierLoc.isInvalid() || LangOpts.OpenMP >= 50) &&
+ "Unexpected device modifier in OpenMP < 50.");
+
+ bool ErrorFound = false;
+ if (ModifierLoc.isValid() && Modifier == OMPC_DEVICE_unknown) {
+ std::string Values =
+ getListOfPossibleValues(OMPC_device, /*First=*/0, OMPC_DEVICE_unknown);
+ Diag(ModifierLoc, diag::err_omp_unexpected_clause_value)
+ << Values << getOpenMPClauseName(OMPC_device);
+ ErrorFound = true;
+ }
+
Expr *ValExpr = Device;
Stmt *HelperValStmt = nullptr;
// OpenMP [2.9.1, Restrictions]
// The device expression must evaluate to a non-negative integer value.
- if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_device,
- /*StrictlyPositive=*/false))
+ ErrorFound = !isNonNegativeIntegerValue(ValExpr, *this, OMPC_device,
+ /*StrictlyPositive=*/false) ||
+ ErrorFound;
+ if (ErrorFound)
return nullptr;
OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
@@ -15104,8 +16493,9 @@ OMPClause *Sema::ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
HelperValStmt = buildPreInits(Context, Captures);
}
- return new (Context) OMPDeviceClause(ValExpr, HelperValStmt, CaptureRegion,
- StartLoc, LParenLoc, EndLoc);
+ return new (Context)
+ OMPDeviceClause(Modifier, ValExpr, HelperValStmt, CaptureRegion, StartLoc,
+ LParenLoc, ModifierLoc, EndLoc);
}
static bool checkTypeMappable(SourceLocation SL, SourceRange SR, Sema &SemaRef,
@@ -15133,7 +16523,8 @@ static bool checkArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
// If this is an array subscript, it refers to the whole size if the size of
// the dimension is constant and equals 1. Also, an array section assumes the
// format of an array subscript if no colon is used.
- if (isa<ArraySubscriptExpr>(E) || (OASE && OASE->getColonLoc().isInvalid())) {
+ if (isa<ArraySubscriptExpr>(E) ||
+ (OASE && OASE->getColonLocFirst().isInvalid())) {
if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
return ATy->getSize().getSExtValue() != 1;
// Size can't be evaluated statically.
@@ -15189,7 +16580,8 @@ static bool checkArrayExpressionDoesNotReferToUnitySize(Sema &SemaRef,
// An array subscript always refer to a single element. Also, an array section
// assumes the format of an array subscript if no colon is used.
- if (isa<ArraySubscriptExpr>(E) || (OASE && OASE->getColonLoc().isInvalid()))
+ if (isa<ArraySubscriptExpr>(E) ||
+ (OASE && OASE->getColonLocFirst().isInvalid()))
return false;
assert(OASE && "Expecting array section if not an array subscript.");
@@ -15214,256 +16606,338 @@ static bool checkArrayExpressionDoesNotReferToUnitySize(Sema &SemaRef,
return ConstLength.getSExtValue() != 1;
}
-// Return the expression of the base of the mappable expression or null if it
-// cannot be determined and do all the necessary checks to see if the expression
-// is valid as a standalone mappable expression. In the process, record all the
-// components of the expression.
-static const Expr *checkMapClauseExpressionBase(
- Sema &SemaRef, Expr *E,
- OMPClauseMappableExprCommon::MappableExprComponentList &CurComponents,
- OpenMPClauseKind CKind, bool NoDiagnose) {
- SourceLocation ELoc = E->getExprLoc();
- SourceRange ERange = E->getSourceRange();
-
- // The base of elements of list in a map clause have to be either:
- // - a reference to variable or field.
- // - a member expression.
- // - an array expression.
- //
- // E.g. if we have the expression 'r.S.Arr[:12]', we want to retrieve the
- // reference to 'r'.
- //
- // If we have:
- //
- // struct SS {
- // Bla S;
- // foo() {
- // #pragma omp target map (S.Arr[:12]);
- // }
- // }
- //
- // We want to retrieve the member expression 'this->S';
+// The base of elements of list in a map clause have to be either:
+// - a reference to variable or field.
+// - a member expression.
+// - an array expression.
+//
+// E.g. if we have the expression 'r.S.Arr[:12]', we want to retrieve the
+// reference to 'r'.
+//
+// If we have:
+//
+// struct SS {
+// Bla S;
+// foo() {
+// #pragma omp target map (S.Arr[:12]);
+// }
+// }
+//
+// We want to retrieve the member expression 'this->S';
+// OpenMP 5.0 [2.19.7.1, map Clause, Restrictions, p.2]
+// If a list item is an array section, it must specify contiguous storage.
+//
+// For this restriction it is sufficient that we make sure only references
+// to variables or fields and array expressions, and that no array sections
+// exist except in the rightmost expression (unless they cover the whole
+// dimension of the array). E.g. these would be invalid:
+//
+// r.ArrS[3:5].Arr[6:7]
+//
+// r.ArrS[3:5].x
+//
+// but these would be valid:
+// r.ArrS[3].Arr[6:7]
+//
+// r.ArrS[3].x
+namespace {
+class MapBaseChecker final : public StmtVisitor<MapBaseChecker, bool> {
+ Sema &SemaRef;
+ OpenMPClauseKind CKind = OMPC_unknown;
+ OMPClauseMappableExprCommon::MappableExprComponentList &Components;
+ bool NoDiagnose = false;
const Expr *RelevantExpr = nullptr;
-
- // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, p.2]
- // If a list item is an array section, it must specify contiguous storage.
- //
- // For this restriction it is sufficient that we make sure only references
- // to variables or fields and array expressions, and that no array sections
- // exist except in the rightmost expression (unless they cover the whole
- // dimension of the array). E.g. these would be invalid:
- //
- // r.ArrS[3:5].Arr[6:7]
- //
- // r.ArrS[3:5].x
- //
- // but these would be valid:
- // r.ArrS[3].Arr[6:7]
- //
- // r.ArrS[3].x
-
bool AllowUnitySizeArraySection = true;
bool AllowWholeSizeArraySection = true;
+ SourceLocation ELoc;
+ SourceRange ERange;
- while (!RelevantExpr) {
- E = E->IgnoreParenImpCasts();
+ void emitErrorMsg() {
+ // If nothing else worked, this is not a valid map clause expression.
+ if (SemaRef.getLangOpts().OpenMP < 50) {
+ SemaRef.Diag(ELoc,
+ diag::err_omp_expected_named_var_member_or_array_expression)
+ << ERange;
+ } else {
+ SemaRef.Diag(ELoc, diag::err_omp_non_lvalue_in_map_or_motion_clauses)
+ << getOpenMPClauseName(CKind) << ERange;
+ }
+ }
- if (auto *CurE = dyn_cast<DeclRefExpr>(E)) {
- if (!isa<VarDecl>(CurE->getDecl()))
- return nullptr;
+public:
+ bool VisitDeclRefExpr(DeclRefExpr *DRE) {
+ if (!isa<VarDecl>(DRE->getDecl())) {
+ emitErrorMsg();
+ return false;
+ }
+ assert(!RelevantExpr && "RelevantExpr is expected to be nullptr");
+ RelevantExpr = DRE;
+ // Record the component.
+ Components.emplace_back(DRE, DRE->getDecl());
+ return true;
+ }
- RelevantExpr = CurE;
+ bool VisitMemberExpr(MemberExpr *ME) {
+ Expr *E = ME;
+ Expr *BaseE = ME->getBase()->IgnoreParenCasts();
- // If we got a reference to a declaration, we should not expect any array
- // section before that.
- AllowUnitySizeArraySection = false;
- AllowWholeSizeArraySection = false;
+ if (isa<CXXThisExpr>(BaseE)) {
+ assert(!RelevantExpr && "RelevantExpr is expected to be nullptr");
+ // We found a base expression: this->Val.
+ RelevantExpr = ME;
+ } else {
+ E = BaseE;
+ }
- // Record the component.
- CurComponents.emplace_back(CurE, CurE->getDecl());
- } else if (auto *CurE = dyn_cast<MemberExpr>(E)) {
- Expr *BaseE = CurE->getBase()->IgnoreParenImpCasts();
+ if (!isa<FieldDecl>(ME->getMemberDecl())) {
+ if (!NoDiagnose) {
+ SemaRef.Diag(ELoc, diag::err_omp_expected_access_to_data_field)
+ << ME->getSourceRange();
+ return false;
+ }
+ if (RelevantExpr)
+ return false;
+ return Visit(E);
+ }
- if (isa<CXXThisExpr>(BaseE))
- // We found a base expression: this->Val.
- RelevantExpr = CurE;
- else
- E = BaseE;
+ auto *FD = cast<FieldDecl>(ME->getMemberDecl());
- if (!isa<FieldDecl>(CurE->getMemberDecl())) {
- if (!NoDiagnose) {
- SemaRef.Diag(ELoc, diag::err_omp_expected_access_to_data_field)
- << CurE->getSourceRange();
- return nullptr;
- }
- if (RelevantExpr)
- return nullptr;
- continue;
+ // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
+ // A bit-field cannot appear in a map clause.
+ //
+ if (FD->isBitField()) {
+ if (!NoDiagnose) {
+ SemaRef.Diag(ELoc, diag::err_omp_bit_fields_forbidden_in_clause)
+ << ME->getSourceRange() << getOpenMPClauseName(CKind);
+ return false;
}
+ if (RelevantExpr)
+ return false;
+ return Visit(E);
+ }
- auto *FD = cast<FieldDecl>(CurE->getMemberDecl());
+ // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C++, p.1]
+ // If the type of a list item is a reference to a type T then the type
+ // will be considered to be T for all purposes of this clause.
+ QualType CurType = BaseE->getType().getNonReferenceType();
- // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
- // A bit-field cannot appear in a map clause.
- //
- if (FD->isBitField()) {
- if (!NoDiagnose) {
- SemaRef.Diag(ELoc, diag::err_omp_bit_fields_forbidden_in_clause)
- << CurE->getSourceRange() << getOpenMPClauseName(CKind);
- return nullptr;
- }
- if (RelevantExpr)
- return nullptr;
- continue;
+ // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.2]
+ // A list item cannot be a variable that is a member of a structure with
+ // a union type.
+ //
+ if (CurType->isUnionType()) {
+ if (!NoDiagnose) {
+ SemaRef.Diag(ELoc, diag::err_omp_union_type_not_allowed)
+ << ME->getSourceRange();
+ return false;
}
+ return RelevantExpr || Visit(E);
+ }
- // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C++, p.1]
- // If the type of a list item is a reference to a type T then the type
- // will be considered to be T for all purposes of this clause.
- QualType CurType = BaseE->getType().getNonReferenceType();
+ // If we got a member expression, we should not expect any array section
+ // before that:
+ //
+ // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, p.7]
+ // If a list item is an element of a structure, only the rightmost symbol
+ // of the variable reference can be an array section.
+ //
+ AllowUnitySizeArraySection = false;
+ AllowWholeSizeArraySection = false;
- // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.2]
- // A list item cannot be a variable that is a member of a structure with
- // a union type.
- //
- if (CurType->isUnionType()) {
- if (!NoDiagnose) {
- SemaRef.Diag(ELoc, diag::err_omp_union_type_not_allowed)
- << CurE->getSourceRange();
- return nullptr;
- }
- continue;
+ // Record the component.
+ Components.emplace_back(ME, FD);
+ return RelevantExpr || Visit(E);
+ }
+
+ bool VisitArraySubscriptExpr(ArraySubscriptExpr *AE) {
+ Expr *E = AE->getBase()->IgnoreParenImpCasts();
+
+ if (!E->getType()->isAnyPointerType() && !E->getType()->isArrayType()) {
+ if (!NoDiagnose) {
+ SemaRef.Diag(ELoc, diag::err_omp_expected_base_var_name)
+ << 0 << AE->getSourceRange();
+ return false;
}
+ return RelevantExpr || Visit(E);
+ }
- // If we got a member expression, we should not expect any array section
- // before that:
- //
- // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, p.7]
- // If a list item is an element of a structure, only the rightmost symbol
- // of the variable reference can be an array section.
- //
- AllowUnitySizeArraySection = false;
+ // If we got an array subscript that express the whole dimension we
+ // can have any array expressions before. If it only expressing part of
+ // the dimension, we can only have unitary-size array expressions.
+ if (checkArrayExpressionDoesNotReferToWholeSize(SemaRef, AE,
+ E->getType()))
AllowWholeSizeArraySection = false;
- // Record the component.
- CurComponents.emplace_back(CurE, FD);
- } else if (auto *CurE = dyn_cast<ArraySubscriptExpr>(E)) {
- E = CurE->getBase()->IgnoreParenImpCasts();
-
- if (!E->getType()->isAnyPointerType() && !E->getType()->isArrayType()) {
- if (!NoDiagnose) {
- SemaRef.Diag(ELoc, diag::err_omp_expected_base_var_name)
- << 0 << CurE->getSourceRange();
- return nullptr;
- }
- continue;
+ if (const auto *TE = dyn_cast<CXXThisExpr>(E->IgnoreParenCasts())) {
+ Expr::EvalResult Result;
+ if (!AE->getIdx()->isValueDependent() &&
+ AE->getIdx()->EvaluateAsInt(Result, SemaRef.getASTContext()) &&
+ !Result.Val.getInt().isNullValue()) {
+ SemaRef.Diag(AE->getIdx()->getExprLoc(),
+ diag::err_omp_invalid_map_this_expr);
+ SemaRef.Diag(AE->getIdx()->getExprLoc(),
+ diag::note_omp_invalid_subscript_on_this_ptr_map);
}
+ assert(!RelevantExpr && "RelevantExpr is expected to be nullptr");
+ RelevantExpr = TE;
+ }
- // If we got an array subscript that express the whole dimension we
- // can have any array expressions before. If it only expressing part of
- // the dimension, we can only have unitary-size array expressions.
- if (checkArrayExpressionDoesNotReferToWholeSize(SemaRef, CurE,
- E->getType()))
- AllowWholeSizeArraySection = false;
+ // Record the component - we don't have any declaration associated.
+ Components.emplace_back(AE, nullptr);
- if (const auto *TE = dyn_cast<CXXThisExpr>(E)) {
- Expr::EvalResult Result;
- if (CurE->getIdx()->EvaluateAsInt(Result, SemaRef.getASTContext())) {
- if (!Result.Val.getInt().isNullValue()) {
- SemaRef.Diag(CurE->getIdx()->getExprLoc(),
- diag::err_omp_invalid_map_this_expr);
- SemaRef.Diag(CurE->getIdx()->getExprLoc(),
- diag::note_omp_invalid_subscript_on_this_ptr_map);
- }
- }
- RelevantExpr = TE;
- }
+ return RelevantExpr || Visit(E);
+ }
- // Record the component - we don't have any declaration associated.
- CurComponents.emplace_back(CurE, nullptr);
- } else if (auto *CurE = dyn_cast<OMPArraySectionExpr>(E)) {
- assert(!NoDiagnose && "Array sections cannot be implicitly mapped.");
- E = CurE->getBase()->IgnoreParenImpCasts();
+ bool VisitOMPArraySectionExpr(OMPArraySectionExpr *OASE) {
+ assert(!NoDiagnose && "Array sections cannot be implicitly mapped.");
+ Expr *E = OASE->getBase()->IgnoreParenImpCasts();
+ QualType CurType =
+ OMPArraySectionExpr::getBaseOriginalType(E).getCanonicalType();
- QualType CurType =
- OMPArraySectionExpr::getBaseOriginalType(E).getCanonicalType();
+ // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C++, p.1]
+ // If the type of a list item is a reference to a type T then the type
+ // will be considered to be T for all purposes of this clause.
+ if (CurType->isReferenceType())
+ CurType = CurType->getPointeeType();
- // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C++, p.1]
- // If the type of a list item is a reference to a type T then the type
- // will be considered to be T for all purposes of this clause.
- if (CurType->isReferenceType())
- CurType = CurType->getPointeeType();
+ bool IsPointer = CurType->isAnyPointerType();
- bool IsPointer = CurType->isAnyPointerType();
+ if (!IsPointer && !CurType->isArrayType()) {
+ SemaRef.Diag(ELoc, diag::err_omp_expected_base_var_name)
+ << 0 << OASE->getSourceRange();
+ return false;
+ }
- if (!IsPointer && !CurType->isArrayType()) {
- SemaRef.Diag(ELoc, diag::err_omp_expected_base_var_name)
- << 0 << CurE->getSourceRange();
- return nullptr;
- }
+ bool NotWhole =
+ checkArrayExpressionDoesNotReferToWholeSize(SemaRef, OASE, CurType);
+ bool NotUnity =
+ checkArrayExpressionDoesNotReferToUnitySize(SemaRef, OASE, CurType);
- bool NotWhole =
- checkArrayExpressionDoesNotReferToWholeSize(SemaRef, CurE, CurType);
- bool NotUnity =
- checkArrayExpressionDoesNotReferToUnitySize(SemaRef, CurE, CurType);
+ if (AllowWholeSizeArraySection) {
+ // Any array section is currently allowed. Allowing a whole size array
+ // section implies allowing a unity array section as well.
+ //
+ // If this array section refers to the whole dimension we can still
+ // accept other array sections before this one, except if the base is a
+ // pointer. Otherwise, only unitary sections are accepted.
+ if (NotWhole || IsPointer)
+ AllowWholeSizeArraySection = false;
+ } else if (AllowUnitySizeArraySection && NotUnity) {
+ // A unity or whole array section is not allowed and that is not
+ // compatible with the properties of the current array section.
+ SemaRef.Diag(
+ ELoc, diag::err_array_section_does_not_specify_contiguous_storage)
+ << OASE->getSourceRange();
+ return false;
+ }
- if (AllowWholeSizeArraySection) {
- // Any array section is currently allowed. Allowing a whole size array
- // section implies allowing a unity array section as well.
- //
- // If this array section refers to the whole dimension we can still
- // accept other array sections before this one, except if the base is a
- // pointer. Otherwise, only unitary sections are accepted.
- if (NotWhole || IsPointer)
- AllowWholeSizeArraySection = false;
- } else if (AllowUnitySizeArraySection && NotUnity) {
- // A unity or whole array section is not allowed and that is not
- // compatible with the properties of the current array section.
- SemaRef.Diag(
- ELoc, diag::err_array_section_does_not_specify_contiguous_storage)
- << CurE->getSourceRange();
- return nullptr;
+ if (const auto *TE = dyn_cast<CXXThisExpr>(E)) {
+ Expr::EvalResult ResultR;
+ Expr::EvalResult ResultL;
+ if (!OASE->getLength()->isValueDependent() &&
+ OASE->getLength()->EvaluateAsInt(ResultR, SemaRef.getASTContext()) &&
+ !ResultR.Val.getInt().isOneValue()) {
+ SemaRef.Diag(OASE->getLength()->getExprLoc(),
+ diag::err_omp_invalid_map_this_expr);
+ SemaRef.Diag(OASE->getLength()->getExprLoc(),
+ diag::note_omp_invalid_length_on_this_ptr_mapping);
}
-
- if (const auto *TE = dyn_cast<CXXThisExpr>(E)) {
- Expr::EvalResult ResultR;
- Expr::EvalResult ResultL;
- if (CurE->getLength()->EvaluateAsInt(ResultR,
- SemaRef.getASTContext())) {
- if (!ResultR.Val.getInt().isOneValue()) {
- SemaRef.Diag(CurE->getLength()->getExprLoc(),
- diag::err_omp_invalid_map_this_expr);
- SemaRef.Diag(CurE->getLength()->getExprLoc(),
- diag::note_omp_invalid_length_on_this_ptr_mapping);
- }
- }
- if (CurE->getLowerBound() && CurE->getLowerBound()->EvaluateAsInt(
- ResultL, SemaRef.getASTContext())) {
- if (!ResultL.Val.getInt().isNullValue()) {
- SemaRef.Diag(CurE->getLowerBound()->getExprLoc(),
- diag::err_omp_invalid_map_this_expr);
- SemaRef.Diag(CurE->getLowerBound()->getExprLoc(),
- diag::note_omp_invalid_lower_bound_on_this_ptr_mapping);
- }
- }
- RelevantExpr = TE;
+ if (OASE->getLowerBound() && !OASE->getLowerBound()->isValueDependent() &&
+ OASE->getLowerBound()->EvaluateAsInt(ResultL,
+ SemaRef.getASTContext()) &&
+ !ResultL.Val.getInt().isNullValue()) {
+ SemaRef.Diag(OASE->getLowerBound()->getExprLoc(),
+ diag::err_omp_invalid_map_this_expr);
+ SemaRef.Diag(OASE->getLowerBound()->getExprLoc(),
+ diag::note_omp_invalid_lower_bound_on_this_ptr_mapping);
}
+ assert(!RelevantExpr && "RelevantExpr is expected to be nullptr");
+ RelevantExpr = TE;
+ }
- // Record the component - we don't have any declaration associated.
- CurComponents.emplace_back(CurE, nullptr);
- } else {
- if (!NoDiagnose) {
- // If nothing else worked, this is not a valid map clause expression.
- SemaRef.Diag(
- ELoc, diag::err_omp_expected_named_var_member_or_array_expression)
- << ERange;
- }
- return nullptr;
+ // Record the component - we don't have any declaration associated.
+ Components.emplace_back(OASE, nullptr);
+ return RelevantExpr || Visit(E);
+ }
+ bool VisitOMPArrayShapingExpr(OMPArrayShapingExpr *E) {
+ Expr *Base = E->getBase();
+
+ // Record the component - we don't have any declaration associated.
+ Components.emplace_back(E, nullptr);
+
+ return Visit(Base->IgnoreParenImpCasts());
+ }
+
+ bool VisitUnaryOperator(UnaryOperator *UO) {
+ if (SemaRef.getLangOpts().OpenMP < 50 || !UO->isLValue() ||
+ UO->getOpcode() != UO_Deref) {
+ emitErrorMsg();
+ return false;
}
+ if (!RelevantExpr) {
+ // Record the component if haven't found base decl.
+ Components.emplace_back(UO, nullptr);
+ }
+ return RelevantExpr || Visit(UO->getSubExpr()->IgnoreParenImpCasts());
}
+ bool VisitBinaryOperator(BinaryOperator *BO) {
+ if (SemaRef.getLangOpts().OpenMP < 50 || !BO->getType()->isPointerType()) {
+ emitErrorMsg();
+ return false;
+ }
+
+ // Pointer arithmetic is the only thing we expect to happen here so after we
+ // make sure the binary operator is a pointer type, the we only thing need
+ // to to is to visit the subtree that has the same type as root (so that we
+ // know the other subtree is just an offset)
+ Expr *LE = BO->getLHS()->IgnoreParenImpCasts();
+ Expr *RE = BO->getRHS()->IgnoreParenImpCasts();
+ Components.emplace_back(BO, nullptr);
+ assert((LE->getType().getTypePtr() == BO->getType().getTypePtr() ||
+ RE->getType().getTypePtr() == BO->getType().getTypePtr()) &&
+ "Either LHS or RHS have base decl inside");
+ if (BO->getType().getTypePtr() == LE->getType().getTypePtr())
+ return RelevantExpr || Visit(LE);
+ return RelevantExpr || Visit(RE);
+ }
+ bool VisitCXXThisExpr(CXXThisExpr *CTE) {
+ assert(!RelevantExpr && "RelevantExpr is expected to be nullptr");
+ RelevantExpr = CTE;
+ Components.emplace_back(CTE, nullptr);
+ return true;
+ }
+ bool VisitStmt(Stmt *) {
+ emitErrorMsg();
+ return false;
+ }
+ const Expr *getFoundBase() const {
+ return RelevantExpr;
+ }
+ explicit MapBaseChecker(
+ Sema &SemaRef, OpenMPClauseKind CKind,
+ OMPClauseMappableExprCommon::MappableExprComponentList &Components,
+ bool NoDiagnose, SourceLocation &ELoc, SourceRange &ERange)
+ : SemaRef(SemaRef), CKind(CKind), Components(Components),
+ NoDiagnose(NoDiagnose), ELoc(ELoc), ERange(ERange) {}
+};
+} // namespace
- return RelevantExpr;
+/// Return the expression of the base of the mappable expression or null if it
+/// cannot be determined and do all the necessary checks to see if the expression
+/// is valid as a standalone mappable expression. In the process, record all the
+/// components of the expression.
+static const Expr *checkMapClauseExpressionBase(
+ Sema &SemaRef, Expr *E,
+ OMPClauseMappableExprCommon::MappableExprComponentList &CurComponents,
+ OpenMPClauseKind CKind, bool NoDiagnose) {
+ SourceLocation ELoc = E->getExprLoc();
+ SourceRange ERange = E->getSourceRange();
+ MapBaseChecker Checker(SemaRef, CKind, CurComponents, NoDiagnose, ELoc,
+ ERange);
+ if (Checker.Visit(E->IgnoreParens()))
+ return Checker.getFoundBase();
+ return nullptr;
}
// Return true if expression E associated with value VD has conflicts with other
@@ -15520,9 +16994,11 @@ static bool checkMapConflicts(
// variable in map clauses of the same construct.
if (CurrentRegionOnly &&
(isa<ArraySubscriptExpr>(CI->getAssociatedExpression()) ||
- isa<OMPArraySectionExpr>(CI->getAssociatedExpression())) &&
+ isa<OMPArraySectionExpr>(CI->getAssociatedExpression()) ||
+ isa<OMPArrayShapingExpr>(CI->getAssociatedExpression())) &&
(isa<ArraySubscriptExpr>(SI->getAssociatedExpression()) ||
- isa<OMPArraySectionExpr>(SI->getAssociatedExpression()))) {
+ isa<OMPArraySectionExpr>(SI->getAssociatedExpression()) ||
+ isa<OMPArrayShapingExpr>(SI->getAssociatedExpression()))) {
SemaRef.Diag(CI->getAssociatedExpression()->getExprLoc(),
diag::err_omp_multiple_array_items_in_map_clause)
<< CI->getAssociatedExpression()->getSourceRange();
@@ -15554,6 +17030,9 @@ static bool checkMapConflicts(
const Expr *E = OASE->getBase()->IgnoreParenImpCasts();
Type =
OMPArraySectionExpr::getBaseOriginalType(E).getCanonicalType();
+ } else if (const auto *OASE = dyn_cast<OMPArrayShapingExpr>(
+ SI->getAssociatedExpression())) {
+ Type = OASE->getBase()->getType()->getPointeeType();
}
if (Type.isNull() || Type->isAnyPointerType() ||
checkArrayExpressionDoesNotReferToWholeSize(
@@ -15916,10 +17395,15 @@ static void checkMappableExpressionList(
Expr *SimpleExpr = RE->IgnoreParenCasts();
- if (!RE->IgnoreParenImpCasts()->isLValue()) {
- SemaRef.Diag(ELoc,
- diag::err_omp_expected_named_var_member_or_array_expression)
- << RE->getSourceRange();
+ if (!RE->isLValue()) {
+ if (SemaRef.getLangOpts().OpenMP < 50) {
+ SemaRef.Diag(
+ ELoc, diag::err_omp_expected_named_var_member_or_array_expression)
+ << RE->getSourceRange();
+ } else {
+ SemaRef.Diag(ELoc, diag::err_omp_non_lvalue_in_map_or_motion_clauses)
+ << getOpenMPClauseName(CKind) << RE->getSourceRange();
+ }
continue;
}
@@ -16011,6 +17495,7 @@ static void checkMappableExpressionList(
QualType Type;
auto *ASE = dyn_cast<ArraySubscriptExpr>(VE->IgnoreParens());
auto *OASE = dyn_cast<OMPArraySectionExpr>(VE->IgnoreParens());
+ auto *OAShE = dyn_cast<OMPArrayShapingExpr>(VE->IgnoreParens());
if (ASE) {
Type = ASE->getType().getNonReferenceType();
} else if (OASE) {
@@ -16021,6 +17506,8 @@ static void checkMappableExpressionList(
else
Type = BaseType->getPointeeType();
Type = Type.getNonReferenceType();
+ } else if (OAShE) {
+ Type = OAShE->getBase()->getType()->getPointeeType();
} else {
Type = VE->getType();
}
@@ -16064,6 +17551,21 @@ static void checkMappableExpressionList(
continue;
}
+ // target, target data
+ // OpenMP 5.0 [2.12.2, Restrictions, p. 163]
+ // OpenMP 5.0 [2.12.5, Restrictions, p. 174]
+ // A map-type in a map clause must be to, from, tofrom or alloc
+ if ((DKind == OMPD_target_data ||
+ isOpenMPTargetExecutionDirective(DKind)) &&
+ !(MapType == OMPC_MAP_to || MapType == OMPC_MAP_from ||
+ MapType == OMPC_MAP_tofrom || MapType == OMPC_MAP_alloc)) {
+ SemaRef.Diag(StartLoc, diag::err_omp_invalid_map_type_for_directive)
+ << (IsMapTypeImplicit ? 1 : 0)
+ << getOpenMPSimpleClauseTypeName(OMPC_map, MapType)
+ << getOpenMPDirectiveName(DKind);
+ continue;
+ }
+
// OpenMP 4.5 [2.15.5.1, Restrictions, p.3]
// A list item cannot appear in both a map clause and a data-sharing
// attribute clause on the same construct
@@ -16124,7 +17626,7 @@ OMPClause *Sema::ActOnOpenMPMapClause(
OpenMPMapModifierKind Modifiers[] = {OMPC_MAP_MODIFIER_unknown,
OMPC_MAP_MODIFIER_unknown,
OMPC_MAP_MODIFIER_unknown};
- SourceLocation ModifiersLoc[OMPMapClause::NumberOfModifiers];
+ SourceLocation ModifiersLoc[NumberOfOMPMapClauseModifiers];
// Process map-type-modifiers, flag errors for duplicate modifiers.
unsigned Count = 0;
@@ -16134,7 +17636,7 @@ OMPClause *Sema::ActOnOpenMPMapClause(
Diag(MapTypeModifiersLoc[I], diag::err_omp_duplicate_map_type_modifier);
continue;
}
- assert(Count < OMPMapClause::NumberOfModifiers &&
+ assert(Count < NumberOfOMPMapClauseModifiers &&
"Modifiers exceed the allowed number of map type modifiers");
Modifiers[Count] = MapTypeModifiers[I];
ModifiersLoc[Count] = MapTypeModifiersLoc[I];
@@ -16678,6 +18180,69 @@ OMPClause *Sema::ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
OMPHintClause(HintExpr.get(), StartLoc, LParenLoc, EndLoc);
}
+/// Tries to find omp_event_handle_t type.
+static bool findOMPEventHandleT(Sema &S, SourceLocation Loc,
+ DSAStackTy *Stack) {
+ QualType OMPEventHandleT = Stack->getOMPEventHandleT();
+ if (!OMPEventHandleT.isNull())
+ return true;
+ IdentifierInfo *II = &S.PP.getIdentifierTable().get("omp_event_handle_t");
+ ParsedType PT = S.getTypeName(*II, Loc, S.getCurScope());
+ if (!PT.getAsOpaquePtr() || PT.get().isNull()) {
+ S.Diag(Loc, diag::err_omp_implied_type_not_found) << "omp_event_handle_t";
+ return false;
+ }
+ Stack->setOMPEventHandleT(PT.get());
+ return true;
+}
+
+OMPClause *Sema::ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ if (!Evt->isValueDependent() && !Evt->isTypeDependent() &&
+ !Evt->isInstantiationDependent() &&
+ !Evt->containsUnexpandedParameterPack()) {
+ if (!findOMPEventHandleT(*this, Evt->getExprLoc(), DSAStack))
+ return nullptr;
+ // OpenMP 5.0, 2.10.1 task Construct.
+ // event-handle is a variable of the omp_event_handle_t type.
+ auto *Ref = dyn_cast<DeclRefExpr>(Evt->IgnoreParenImpCasts());
+ if (!Ref) {
+ Diag(Evt->getExprLoc(), diag::err_omp_var_expected)
+ << "omp_event_handle_t" << 0 << Evt->getSourceRange();
+ return nullptr;
+ }
+ auto *VD = dyn_cast_or_null<VarDecl>(Ref->getDecl());
+ if (!VD) {
+ Diag(Evt->getExprLoc(), diag::err_omp_var_expected)
+ << "omp_event_handle_t" << 0 << Evt->getSourceRange();
+ return nullptr;
+ }
+ if (!Context.hasSameUnqualifiedType(DSAStack->getOMPEventHandleT(),
+ VD->getType()) ||
+ VD->getType().isConstant(Context)) {
+ Diag(Evt->getExprLoc(), diag::err_omp_var_expected)
+ << "omp_event_handle_t" << 1 << VD->getType()
+ << Evt->getSourceRange();
+ return nullptr;
+ }
+ // OpenMP 5.0, 2.10.1 task Construct
+ // [detach clause]... The event-handle will be considered as if it was
+ // specified on a firstprivate clause.
+ DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(VD, /*FromParent=*/false);
+ if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_firstprivate &&
+ DVar.RefExpr) {
+ Diag(Evt->getExprLoc(), diag::err_omp_wrong_dsa)
+ << getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_firstprivate);
+ reportOriginalDsa(*this, DSAStack, VD, DVar);
+ return nullptr;
+ }
+ }
+
+ return new (Context) OMPDetachClause(Evt, StartLoc, LParenLoc, EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc,
@@ -16758,7 +18323,8 @@ OMPClause *Sema::ActOnOpenMPDefaultmapClause(
}
} else {
bool isDefaultmapModifier = (M != OMPC_DEFAULTMAP_MODIFIER_unknown);
- bool isDefaultmapKind = (Kind != OMPC_DEFAULTMAP_unknown);
+ bool isDefaultmapKind = (Kind != OMPC_DEFAULTMAP_unknown) ||
+ (LangOpts.OpenMP >= 50 && KindLoc.isInvalid());
if (!isDefaultmapKind || !isDefaultmapModifier) {
std::string ModifierValue = "'alloc', 'from', 'to', 'tofrom', "
"'firstprivate', 'none', 'default'";
@@ -16786,7 +18352,14 @@ OMPClause *Sema::ActOnOpenMPDefaultmapClause(
return nullptr;
}
}
- DSAStack->setDefaultDMAAttr(M, Kind, StartLoc);
+ if (Kind == OMPC_DEFAULTMAP_unknown) {
+ // Variable category is not specified - mark all categories.
+ DSAStack->setDefaultDMAAttr(M, OMPC_DEFAULTMAP_aggregate, StartLoc);
+ DSAStack->setDefaultDMAAttr(M, OMPC_DEFAULTMAP_scalar, StartLoc);
+ DSAStack->setDefaultDMAAttr(M, OMPC_DEFAULTMAP_pointer, StartLoc);
+ } else {
+ DSAStack->setDefaultDMAAttr(M, Kind, StartLoc);
+ }
return new (Context)
OMPDefaultmapClause(StartLoc, LParenLoc, MLoc, KindLoc, EndLoc, Kind, M);
@@ -16955,15 +18528,6 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
Diag(FD->getLocation(), diag::note_defined_here) << FD;
return;
}
- // Mark the function as must be emitted for the device.
- Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
- OMPDeclareTargetDeclAttr::getDeviceType(FD);
- if (LangOpts.OpenMPIsDevice && Res.hasValue() && IdLoc.isValid() &&
- *DevTy != OMPDeclareTargetDeclAttr::DT_Host)
- checkOpenMPDeviceFunction(IdLoc, FD, /*CheckForDelayedContext=*/false);
- if (!LangOpts.OpenMPIsDevice && Res.hasValue() && IdLoc.isValid() &&
- *DevTy != OMPDeclareTargetDeclAttr::DT_NoHost)
- checkOpenMPHostFunction(IdLoc, FD, /*CheckCaller=*/false);
}
if (auto *VD = dyn_cast<ValueDecl>(D)) {
// Problem if any with var declared with incomplete type will be reported
@@ -17109,6 +18673,58 @@ OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
MVLI.VarBaseDeclarations, MVLI.VarComponents);
}
+OMPClause *Sema::ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs) {
+ MappableVarListInfo MVLI(VarList);
+
+ for (Expr *RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP use_device_addr clause.");
+ SourceLocation ELoc;
+ SourceRange ERange;
+ Expr *SimpleRefExpr = RefExpr;
+ auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange,
+ /*AllowArraySection=*/true);
+ if (Res.second) {
+ // It will be analyzed later.
+ MVLI.ProcessedVarList.push_back(RefExpr);
+ }
+ ValueDecl *D = Res.first;
+ if (!D)
+ continue;
+ auto *VD = dyn_cast<VarDecl>(D);
+
+ // If required, build a capture to implement the privatization initialized
+ // with the current list item value.
+ DeclRefExpr *Ref = nullptr;
+ if (!VD)
+ Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
+ MVLI.ProcessedVarList.push_back(VD ? RefExpr->IgnoreParens() : Ref);
+
+ // We need to add a data sharing attribute for this variable to make sure it
+ // is correctly captured. A variable that shows up in a use_device_addr has
+ // similar properties of a first private variable.
+ DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_firstprivate, Ref);
+
+ // Create a mappable component for the list item. List items in this clause
+ // only need a component.
+ MVLI.VarBaseDeclarations.push_back(D);
+ MVLI.VarComponents.emplace_back();
+ Expr *Component = SimpleRefExpr;
+ if (VD && (isa<OMPArraySectionExpr>(RefExpr->IgnoreParenImpCasts()) ||
+ isa<ArraySubscriptExpr>(RefExpr->IgnoreParenImpCasts())))
+ Component = DefaultFunctionArrayLvalueConversion(SimpleRefExpr).get();
+ MVLI.VarComponents.back().push_back(
+ OMPClauseMappableExprCommon::MappableComponent(Component, D));
+ }
+
+ if (MVLI.ProcessedVarList.empty())
+ return nullptr;
+
+ return OMPUseDeviceAddrClause::Create(Context, Locs, MVLI.ProcessedVarList,
+ MVLI.VarBaseDeclarations,
+ MVLI.VarComponents);
+}
+
OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs) {
MappableVarListInfo MVLI(VarList);
@@ -17248,6 +18864,8 @@ OMPClause *Sema::ActOnOpenMPAllocateClause(
if (Vars.empty())
return nullptr;
+ if (Allocator)
+ DSAStack->addInnerAllocatorExpr(Allocator);
return OMPAllocateClause::Create(Context, StartLoc, LParenLoc, Allocator,
ColonLoc, EndLoc, Vars);
}
@@ -17290,3 +18908,266 @@ OMPClause *Sema::ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
return OMPNontemporalClause::Create(Context, StartLoc, LParenLoc, EndLoc,
Vars);
}
+
+OMPClause *Sema::ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ SmallVector<Expr *, 8> Vars;
+ for (Expr *RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP nontemporal clause.");
+ SourceLocation ELoc;
+ SourceRange ERange;
+ Expr *SimpleRefExpr = RefExpr;
+ auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange,
+ /*AllowArraySection=*/true);
+ if (Res.second)
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ ValueDecl *D = Res.first;
+ if (!D)
+ continue;
+
+ const DSAStackTy::DSAVarData DVar =
+ DSAStack->getTopDSA(D, /*FromParent=*/true);
+ // OpenMP 5.0, 2.9.6, scan Directive, Restrictions.
+ // A list item that appears in the inclusive or exclusive clause must appear
+ // in a reduction clause with the inscan modifier on the enclosing
+ // worksharing-loop, worksharing-loop SIMD, or simd construct.
+ if (DVar.CKind != OMPC_reduction ||
+ DVar.Modifier != OMPC_REDUCTION_inscan)
+ Diag(ELoc, diag::err_omp_inclusive_exclusive_not_reduction)
+ << RefExpr->getSourceRange();
+
+ if (DSAStack->getParentDirective() != OMPD_unknown)
+ DSAStack->markDeclAsUsedInScanDirective(D);
+ Vars.push_back(RefExpr);
+ }
+
+ if (Vars.empty())
+ return nullptr;
+
+ return OMPInclusiveClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars);
+}
+
+OMPClause *Sema::ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ SmallVector<Expr *, 8> Vars;
+ for (Expr *RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP nontemporal clause.");
+ SourceLocation ELoc;
+ SourceRange ERange;
+ Expr *SimpleRefExpr = RefExpr;
+ auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange,
+ /*AllowArraySection=*/true);
+ if (Res.second)
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ ValueDecl *D = Res.first;
+ if (!D)
+ continue;
+
+ OpenMPDirectiveKind ParentDirective = DSAStack->getParentDirective();
+ DSAStackTy::DSAVarData DVar;
+ if (ParentDirective != OMPD_unknown)
+ DVar = DSAStack->getTopDSA(D, /*FromParent=*/true);
+ // OpenMP 5.0, 2.9.6, scan Directive, Restrictions.
+ // A list item that appears in the inclusive or exclusive clause must appear
+ // in a reduction clause with the inscan modifier on the enclosing
+ // worksharing-loop, worksharing-loop SIMD, or simd construct.
+ if (ParentDirective == OMPD_unknown || DVar.CKind != OMPC_reduction ||
+ DVar.Modifier != OMPC_REDUCTION_inscan) {
+ Diag(ELoc, diag::err_omp_inclusive_exclusive_not_reduction)
+ << RefExpr->getSourceRange();
+ } else {
+ DSAStack->markDeclAsUsedInScanDirective(D);
+ }
+ Vars.push_back(RefExpr);
+ }
+
+ if (Vars.empty())
+ return nullptr;
+
+ return OMPExclusiveClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars);
+}
+
+/// Tries to find omp_alloctrait_t type.
+static bool findOMPAlloctraitT(Sema &S, SourceLocation Loc, DSAStackTy *Stack) {
+ QualType OMPAlloctraitT = Stack->getOMPAlloctraitT();
+ if (!OMPAlloctraitT.isNull())
+ return true;
+ IdentifierInfo &II = S.PP.getIdentifierTable().get("omp_alloctrait_t");
+ ParsedType PT = S.getTypeName(II, Loc, S.getCurScope());
+ if (!PT.getAsOpaquePtr() || PT.get().isNull()) {
+ S.Diag(Loc, diag::err_omp_implied_type_not_found) << "omp_alloctrait_t";
+ return false;
+ }
+ Stack->setOMPAlloctraitT(PT.get());
+ return true;
+}
+
+OMPClause *Sema::ActOnOpenMPUsesAllocatorClause(
+ SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc,
+ ArrayRef<UsesAllocatorsData> Data) {
+ // OpenMP [2.12.5, target Construct]
+ // allocator is an identifier of omp_allocator_handle_t type.
+ if (!findOMPAllocatorHandleT(*this, StartLoc, DSAStack))
+ return nullptr;
+ // OpenMP [2.12.5, target Construct]
+ // allocator-traits-array is an identifier of const omp_alloctrait_t * type.
+ if (llvm::any_of(
+ Data,
+ [](const UsesAllocatorsData &D) { return D.AllocatorTraits; }) &&
+ !findOMPAlloctraitT(*this, StartLoc, DSAStack))
+ return nullptr;
+ llvm::SmallSet<CanonicalDeclPtr<Decl>, 4> PredefinedAllocators;
+ for (int I = 0; I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
+ auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
+ StringRef Allocator =
+ OMPAllocateDeclAttr::ConvertAllocatorTypeTyToStr(AllocatorKind);
+ DeclarationName AllocatorName = &Context.Idents.get(Allocator);
+ PredefinedAllocators.insert(LookupSingleName(
+ TUScope, AllocatorName, StartLoc, Sema::LookupAnyName));
+ }
+
+ SmallVector<OMPUsesAllocatorsClause::Data, 4> NewData;
+ for (const UsesAllocatorsData &D : Data) {
+ Expr *AllocatorExpr = nullptr;
+ // Check allocator expression.
+ if (D.Allocator->isTypeDependent()) {
+ AllocatorExpr = D.Allocator;
+ } else {
+ // Traits were specified - need to assign new allocator to the specified
+ // allocator, so it must be an lvalue.
+ AllocatorExpr = D.Allocator->IgnoreParenImpCasts();
+ auto *DRE = dyn_cast<DeclRefExpr>(AllocatorExpr);
+ bool IsPredefinedAllocator = false;
+ if (DRE)
+ IsPredefinedAllocator = PredefinedAllocators.count(DRE->getDecl());
+ if (!DRE ||
+ !(Context.hasSameUnqualifiedType(
+ AllocatorExpr->getType(), DSAStack->getOMPAllocatorHandleT()) ||
+ Context.typesAreCompatible(AllocatorExpr->getType(),
+ DSAStack->getOMPAllocatorHandleT(),
+ /*CompareUnqualified=*/true)) ||
+ (!IsPredefinedAllocator &&
+ (AllocatorExpr->getType().isConstant(Context) ||
+ !AllocatorExpr->isLValue()))) {
+ Diag(D.Allocator->getExprLoc(), diag::err_omp_var_expected)
+ << "omp_allocator_handle_t" << (DRE ? 1 : 0)
+ << AllocatorExpr->getType() << D.Allocator->getSourceRange();
+ continue;
+ }
+ // OpenMP [2.12.5, target Construct]
+ // Predefined allocators appearing in a uses_allocators clause cannot have
+ // traits specified.
+ if (IsPredefinedAllocator && D.AllocatorTraits) {
+ Diag(D.AllocatorTraits->getExprLoc(),
+ diag::err_omp_predefined_allocator_with_traits)
+ << D.AllocatorTraits->getSourceRange();
+ Diag(D.Allocator->getExprLoc(), diag::note_omp_predefined_allocator)
+ << cast<NamedDecl>(DRE->getDecl())->getName()
+ << D.Allocator->getSourceRange();
+ continue;
+ }
+ // OpenMP [2.12.5, target Construct]
+ // Non-predefined allocators appearing in a uses_allocators clause must
+ // have traits specified.
+ if (!IsPredefinedAllocator && !D.AllocatorTraits) {
+ Diag(D.Allocator->getExprLoc(),
+ diag::err_omp_nonpredefined_allocator_without_traits);
+ continue;
+ }
+ // No allocator traits - just convert it to rvalue.
+ if (!D.AllocatorTraits)
+ AllocatorExpr = DefaultLvalueConversion(AllocatorExpr).get();
+ DSAStack->addUsesAllocatorsDecl(
+ DRE->getDecl(),
+ IsPredefinedAllocator
+ ? DSAStackTy::UsesAllocatorsDeclKind::PredefinedAllocator
+ : DSAStackTy::UsesAllocatorsDeclKind::UserDefinedAllocator);
+ }
+ Expr *AllocatorTraitsExpr = nullptr;
+ if (D.AllocatorTraits) {
+ if (D.AllocatorTraits->isTypeDependent()) {
+ AllocatorTraitsExpr = D.AllocatorTraits;
+ } else {
+ // OpenMP [2.12.5, target Construct]
+ // Arrays that contain allocator traits that appear in a uses_allocators
+ // clause must be constant arrays, have constant values and be defined
+ // in the same scope as the construct in which the clause appears.
+ AllocatorTraitsExpr = D.AllocatorTraits->IgnoreParenImpCasts();
+ // Check that traits expr is a constant array.
+ QualType TraitTy;
+ if (const ArrayType *Ty =
+ AllocatorTraitsExpr->getType()->getAsArrayTypeUnsafe())
+ if (const auto *ConstArrayTy = dyn_cast<ConstantArrayType>(Ty))
+ TraitTy = ConstArrayTy->getElementType();
+ if (TraitTy.isNull() ||
+ !(Context.hasSameUnqualifiedType(TraitTy,
+ DSAStack->getOMPAlloctraitT()) ||
+ Context.typesAreCompatible(TraitTy, DSAStack->getOMPAlloctraitT(),
+ /*CompareUnqualified=*/true))) {
+ Diag(D.AllocatorTraits->getExprLoc(),
+ diag::err_omp_expected_array_alloctraits)
+ << AllocatorTraitsExpr->getType();
+ continue;
+ }
+ // Do not map by default allocator traits if it is a standalone
+ // variable.
+ if (auto *DRE = dyn_cast<DeclRefExpr>(AllocatorTraitsExpr))
+ DSAStack->addUsesAllocatorsDecl(
+ DRE->getDecl(),
+ DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait);
+ }
+ }
+ OMPUsesAllocatorsClause::Data &NewD = NewData.emplace_back();
+ NewD.Allocator = AllocatorExpr;
+ NewD.AllocatorTraits = AllocatorTraitsExpr;
+ NewD.LParenLoc = D.LParenLoc;
+ NewD.RParenLoc = D.RParenLoc;
+ }
+ return OMPUsesAllocatorsClause::Create(Context, StartLoc, LParenLoc, EndLoc,
+ NewData);
+}
+
+OMPClause *Sema::ActOnOpenMPAffinityClause(
+ SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc,
+ SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators) {
+ SmallVector<Expr *, 8> Vars;
+ for (Expr *RefExpr : Locators) {
+ assert(RefExpr && "NULL expr in OpenMP shared clause.");
+ if (isa<DependentScopeDeclRefExpr>(RefExpr) || RefExpr->isTypeDependent()) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ continue;
+ }
+
+ SourceLocation ELoc = RefExpr->getExprLoc();
+ Expr *SimpleExpr = RefExpr->IgnoreParenImpCasts();
+
+ if (!SimpleExpr->isLValue()) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << 1 << 0 << RefExpr->getSourceRange();
+ continue;
+ }
+
+ ExprResult Res;
+ {
+ Sema::TentativeAnalysisScope Trap(*this);
+ Res = CreateBuiltinUnaryOp(ELoc, UO_AddrOf, SimpleExpr);
+ }
+ if (!Res.isUsable() && !isa<OMPArraySectionExpr>(SimpleExpr) &&
+ !isa<OMPArrayShapingExpr>(SimpleExpr)) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << 1 << 0 << RefExpr->getSourceRange();
+ continue;
+ }
+ Vars.push_back(SimpleExpr);
+ }
+
+ return OMPAffinityClause::Create(Context, StartLoc, LParenLoc, ColonLoc,
+ EndLoc, Modifier, Vars);
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp b/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
index 814586251bdd..8635397f4806 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
@@ -10,10 +10,10 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/Overload.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
@@ -21,9 +21,11 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Overload.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
@@ -38,6 +40,8 @@
using namespace clang;
using namespace sema;
+using AllowedExplicit = Sema::AllowedExplicit;
+
static bool functionHasPassObjectSizeParams(const FunctionDecl *FD) {
return llvm::any_of(FD->parameters(), [](const ParmVarDecl *P) {
return P->hasAttr<PassObjectSizeAttr>();
@@ -91,10 +95,9 @@ static OverloadingResult
IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
UserDefinedConversionSequence& User,
OverloadCandidateSet& Conversions,
- bool AllowExplicit,
+ AllowedExplicit AllowExplicit,
bool AllowObjCConversionOnExplicit);
-
static ImplicitConversionSequence::CompareKind
CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
const StandardConversionSequence& SCS1,
@@ -229,7 +232,6 @@ bool StandardConversionSequence::isPointerConversionToBool() const {
getFromType()->isMemberPointerType() ||
getFromType()->isObjCObjectPointerType() ||
getFromType()->isBlockPointerType() ||
- getFromType()->isNullPtrType() ||
First == ICK_Array_To_Pointer || First == ICK_Function_To_Pointer))
return true;
@@ -327,9 +329,8 @@ NarrowingKind StandardConversionSequence::getNarrowingKind(
goto FloatingIntegralConversion;
if (FromType->isIntegralOrUnscopedEnumerationType())
goto IntegralConversion;
- // Boolean conversions can be from pointers and pointers to members
- // [conv.bool], and those aren't considered narrowing conversions.
- return NK_Not_Narrowing;
+ // -- from a pointer type or pointer-to-member type to bool, or
+ return NK_Type_Narrowing;
// -- from a floating-point type to an integer type, or
//
@@ -1317,7 +1318,7 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
static ImplicitConversionSequence
TryUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
bool SuppressUserConversions,
- bool AllowExplicit,
+ AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion,
@@ -1420,7 +1421,7 @@ TryUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
static ImplicitConversionSequence
TryImplicitConversion(Sema &S, Expr *From, QualType ToType,
bool SuppressUserConversions,
- bool AllowExplicit,
+ AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion,
@@ -1475,13 +1476,12 @@ TryImplicitConversion(Sema &S, Expr *From, QualType ToType,
ImplicitConversionSequence
Sema::TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
- bool AllowExplicit,
+ AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion) {
- return ::TryImplicitConversion(*this, From, ToType,
- SuppressUserConversions, AllowExplicit,
- InOverloadResolution, CStyle,
+ return ::TryImplicitConversion(*this, From, ToType, SuppressUserConversions,
+ AllowExplicit, InOverloadResolution, CStyle,
AllowObjCWritebackConversion,
/*AllowObjCConversionOnExplicit=*/false);
}
@@ -1514,10 +1514,10 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
From->getType(), From);
ICS = ::TryImplicitConversion(*this, From, ToType,
/*SuppressUserConversions=*/false,
- AllowExplicit,
+ AllowExplicit ? AllowedExplicit::All
+ : AllowedExplicit::None,
/*InOverloadResolution=*/false,
- /*CStyle=*/false,
- AllowObjCWritebackConversion,
+ /*CStyle=*/false, AllowObjCWritebackConversion,
/*AllowObjCConversionOnExplicit=*/false);
return PerformImplicitConversion(From, ToType, ICS, Action);
}
@@ -1653,9 +1653,13 @@ static bool IsVectorConversion(Sema &S, QualType FromType,
// 1)vector types are equivalent AltiVec and GCC vector types
// 2)lax vector conversions are permitted and the vector types are of the
// same size
+ // 3)the destination type does not have the ARM MVE strict-polymorphism
+ // attribute, which inhibits lax vector conversion for overload resolution
+ // only
if (ToType->isVectorType() && FromType->isVectorType()) {
if (S.Context.areCompatibleVectorTypes(FromType, ToType) ||
- S.isLaxVectorConversion(FromType, ToType)) {
+ (S.isLaxVectorConversion(FromType, ToType) &&
+ !ToType->hasAttr(attr::ArmMveStrictPolymorphism))) {
ICK = ICK_Vector_Conversion;
return true;
}
@@ -1844,8 +1848,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
(FromType->isArithmeticType() ||
FromType->isAnyPointerType() ||
FromType->isBlockPointerType() ||
- FromType->isMemberPointerType() ||
- FromType->isNullPtrType())) {
+ FromType->isMemberPointerType())) {
// Boolean conversions (C++ 4.12).
SCS.Second = ICK_Boolean_Conversion;
FromType = S.Context.BoolTy;
@@ -1867,6 +1870,10 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// FIXME: disable conversions between long double and __float128 if
// their representation is different until there is back end support
// We of course allow this conversion if long double is really double.
+
+ // Conversions between bfloat and other floats are not permitted.
+ if (FromType == S.Context.BFloat16Ty || ToType == S.Context.BFloat16Ty)
+ return false;
if (&S.Context.getFloatTypeSemantics(FromType) !=
&S.Context.getFloatTypeSemantics(ToType)) {
bool Float128AndLongDouble = ((FromType == S.Context.Float128Ty &&
@@ -1885,6 +1892,10 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
ToType->isIntegralType(S.Context)) ||
(FromType->isIntegralOrUnscopedEnumerationType() &&
ToType->isRealFloatingType())) {
+ // Conversions between bfloat and int are not permitted.
+ if (FromType->isBFloat16Type() || ToType->isBFloat16Type())
+ return false;
+
// Floating-integral conversions (C++ 4.9).
SCS.Second = ICK_Floating_Integral;
FromType = ToType.getUnqualifiedType();
@@ -3000,13 +3011,13 @@ bool Sema::CheckPointerConversion(Expr *From, QualType ToType,
// We must have a derived-to-base conversion. Check an
// ambiguous or inaccessible conversion.
unsigned InaccessibleID = 0;
- unsigned AmbigiousID = 0;
+ unsigned AmbiguousID = 0;
if (Diagnose) {
InaccessibleID = diag::err_upcast_to_inaccessible_base;
- AmbigiousID = diag::err_ambiguous_derived_to_base_conv;
+ AmbiguousID = diag::err_ambiguous_derived_to_base_conv;
}
if (CheckDerivedToBaseConversion(
- FromPointeeType, ToPointeeType, InaccessibleID, AmbigiousID,
+ FromPointeeType, ToPointeeType, InaccessibleID, AmbiguousID,
From->getExprLoc(), From->getSourceRange(), DeclarationName(),
&BasePath, IgnoreBaseAccess))
return true;
@@ -3397,9 +3408,10 @@ static OverloadingResult
IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
UserDefinedConversionSequence &User,
OverloadCandidateSet &CandidateSet,
- bool AllowExplicit,
+ AllowedExplicit AllowExplicit,
bool AllowObjCConversionOnExplicit) {
- assert(AllowExplicit || !AllowObjCConversionOnExplicit);
+ assert(AllowExplicit != AllowedExplicit::None ||
+ !AllowObjCConversionOnExplicit);
CandidateSet.clear(OverloadCandidateSet::CSK_InitByUserDefinedConversion);
// Whether we will only visit constructors.
@@ -3432,7 +3444,8 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
if (InitListExpr *InitList = dyn_cast<InitListExpr>(From)) {
// But first, see if there is an init-list-constructor that will work.
OverloadingResult Result = IsInitializerListConstructorConversion(
- S, From, ToType, ToRecordDecl, User, CandidateSet, AllowExplicit);
+ S, From, ToType, ToRecordDecl, User, CandidateSet,
+ AllowExplicit == AllowedExplicit::All);
if (Result != OR_No_Viable_Function)
return Result;
// Never mind.
@@ -3471,14 +3484,16 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
Info.ConstructorTmpl, Info.FoundDecl,
/*ExplicitArgs*/ nullptr, llvm::makeArrayRef(Args, NumArgs),
CandidateSet, SuppressUserConversions,
- /*PartialOverloading*/ false, AllowExplicit);
+ /*PartialOverloading*/ false,
+ AllowExplicit == AllowedExplicit::All);
else
// Allow one user-defined conversion when user specifies a
// From->ToType conversion via an static cast (c-style, etc).
S.AddOverloadCandidate(Info.Constructor, Info.FoundDecl,
llvm::makeArrayRef(Args, NumArgs),
CandidateSet, SuppressUserConversions,
- /*PartialOverloading*/ false, AllowExplicit);
+ /*PartialOverloading*/ false,
+ AllowExplicit == AllowedExplicit::All);
}
}
}
@@ -3511,11 +3526,12 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
if (ConvTemplate)
S.AddTemplateConversionCandidate(
ConvTemplate, FoundDecl, ActingContext, From, ToType,
- CandidateSet, AllowObjCConversionOnExplicit, AllowExplicit);
+ CandidateSet, AllowObjCConversionOnExplicit,
+ AllowExplicit != AllowedExplicit::None);
else
- S.AddConversionCandidate(
- Conv, FoundDecl, ActingContext, From, ToType, CandidateSet,
- AllowObjCConversionOnExplicit, AllowExplicit);
+ S.AddConversionCandidate(Conv, FoundDecl, ActingContext, From, ToType,
+ CandidateSet, AllowObjCConversionOnExplicit,
+ AllowExplicit != AllowedExplicit::None);
}
}
}
@@ -3601,7 +3617,7 @@ Sema::DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType) {
OverloadCandidateSet::CSK_Normal);
OverloadingResult OvResult =
IsUserDefinedConversion(*this, From, ToType, ICS.UserDefined,
- CandidateSet, false, false);
+ CandidateSet, AllowedExplicit::None, false);
if (!(OvResult == OR_Ambiguous ||
(OvResult == OR_No_Viable_Function && !CandidateSet.empty())))
@@ -4862,7 +4878,7 @@ TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
// cv-qualification is subsumed by the initialization itself
// and does not constitute a conversion.
ICS = TryImplicitConversion(S, Init, T1, SuppressUserConversions,
- /*AllowExplicit=*/false,
+ AllowedExplicit::None,
/*InOverloadResolution=*/false,
/*CStyle=*/false,
/*AllowObjCWritebackConversion=*/false,
@@ -5031,7 +5047,7 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
if (ToType->isRecordType() && !ToType->isAggregateType()) {
// This function can deal with initializer lists.
return TryUserDefinedConversion(S, From, ToType, SuppressUserConversions,
- /*AllowExplicit=*/false,
+ AllowedExplicit::None,
InOverloadResolution, /*CStyle=*/false,
AllowObjCWritebackConversion,
/*AllowObjCConversionOnExplicit=*/false);
@@ -5183,7 +5199,7 @@ TryCopyInitialization(Sema &S, Expr *From, QualType ToType,
return TryImplicitConversion(S, From, ToType,
SuppressUserConversions,
- /*AllowExplicit=*/false,
+ AllowedExplicit::None,
InOverloadResolution,
/*CStyle=*/false,
AllowObjCWritebackConversion,
@@ -5429,9 +5445,20 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
/// expression From to bool (C++0x [conv]p3).
static ImplicitConversionSequence
TryContextuallyConvertToBool(Sema &S, Expr *From) {
+ // C++ [dcl.init]/17.8:
+ // - Otherwise, if the initialization is direct-initialization, the source
+ // type is std::nullptr_t, and the destination type is bool, the initial
+ // value of the object being initialized is false.
+ if (From->getType()->isNullPtrType())
+ return ImplicitConversionSequence::getNullptrToBool(From->getType(),
+ S.Context.BoolTy,
+ From->isGLValue());
+
+ // All other direct-initialization of bool is equivalent to an implicit
+ // conversion to bool in which explicit conversions are permitted.
return TryImplicitConversion(S, From, S.Context.BoolTy,
/*SuppressUserConversions=*/false,
- /*AllowExplicit=*/true,
+ AllowedExplicit::Conversions,
/*InOverloadResolution=*/false,
/*CStyle=*/false,
/*AllowObjCWritebackConversion=*/false,
@@ -5703,7 +5730,7 @@ TryContextuallyConvertToObjCPointer(Sema &S, Expr *From) {
= TryImplicitConversion(S, From, Ty,
// FIXME: Are these flags correct?
/*SuppressUserConversions=*/false,
- /*AllowExplicit=*/true,
+ AllowedExplicit::Conversions,
/*InOverloadResolution=*/false,
/*CStyle=*/false,
/*AllowObjCWritebackConversion=*/false,
@@ -6337,7 +6364,8 @@ void Sema::AddOverloadCandidate(
}
}
- if (EnableIfAttr *FailedAttr = CheckEnableIf(Function, Args)) {
+ if (EnableIfAttr *FailedAttr =
+ CheckEnableIf(Function, CandidateSet.getLocation(), Args)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
Candidate.DeductionFailure.Data = FailedAttr;
@@ -6443,11 +6471,10 @@ Sema::SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance,
return nullptr;
}
-static bool
-convertArgsForAvailabilityChecks(Sema &S, FunctionDecl *Function, Expr *ThisArg,
- ArrayRef<Expr *> Args, Sema::SFINAETrap &Trap,
- bool MissingImplicitThis, Expr *&ConvertedThis,
- SmallVectorImpl<Expr *> &ConvertedArgs) {
+static bool convertArgsForAvailabilityChecks(
+ Sema &S, FunctionDecl *Function, Expr *ThisArg, SourceLocation CallLoc,
+ ArrayRef<Expr *> Args, Sema::SFINAETrap &Trap, bool MissingImplicitThis,
+ Expr *&ConvertedThis, SmallVectorImpl<Expr *> &ConvertedArgs) {
if (ThisArg) {
CXXMethodDecl *Method = cast<CXXMethodDecl>(Function);
assert(!isa<CXXConstructorDecl>(Method) &&
@@ -6492,17 +6519,9 @@ convertArgsForAvailabilityChecks(Sema &S, FunctionDecl *Function, Expr *ThisArg,
if (!Function->isVariadic() && Args.size() < Function->getNumParams()) {
for (unsigned i = Args.size(), e = Function->getNumParams(); i != e; ++i) {
ParmVarDecl *P = Function->getParamDecl(i);
- Expr *DefArg = P->hasUninstantiatedDefaultArg()
- ? P->getUninstantiatedDefaultArg()
- : P->getDefaultArg();
- // This can only happen in code completion, i.e. when PartialOverloading
- // is true.
- if (!DefArg)
+ if (!P->hasDefaultArg())
return false;
- ExprResult R =
- S.PerformCopyInitialization(InitializedEntity::InitializeParameter(
- S.Context, Function->getParamDecl(i)),
- SourceLocation(), DefArg);
+ ExprResult R = S.BuildCXXDefaultArgExpr(CallLoc, Function, P);
if (R.isInvalid())
return false;
ConvertedArgs.push_back(R.get());
@@ -6514,7 +6533,9 @@ convertArgsForAvailabilityChecks(Sema &S, FunctionDecl *Function, Expr *ThisArg,
return true;
}
-EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
+EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function,
+ SourceLocation CallLoc,
+ ArrayRef<Expr *> Args,
bool MissingImplicitThis) {
auto EnableIfAttrs = Function->specific_attrs<EnableIfAttr>();
if (EnableIfAttrs.begin() == EnableIfAttrs.end())
@@ -6525,7 +6546,7 @@ EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
// FIXME: We should look into making enable_if late-parsed.
Expr *DiscardedThis;
if (!convertArgsForAvailabilityChecks(
- *this, Function, /*ThisArg=*/nullptr, Args, Trap,
+ *this, Function, /*ThisArg=*/nullptr, CallLoc, Args, Trap,
/*MissingImplicitThis=*/true, DiscardedThis, ConvertedArgs))
return *EnableIfAttrs.begin();
@@ -6855,7 +6876,8 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
}
}
- if (EnableIfAttr *FailedAttr = CheckEnableIf(Method, Args, true)) {
+ if (EnableIfAttr *FailedAttr =
+ CheckEnableIf(Method, CandidateSet.getLocation(), Args, true)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
Candidate.DeductionFailure.Data = FailedAttr;
@@ -7308,7 +7330,8 @@ void Sema::AddConversionCandidate(
"Can only end up with a standard conversion sequence or failure");
}
- if (EnableIfAttr *FailedAttr = CheckEnableIf(Conversion, None)) {
+ if (EnableIfAttr *FailedAttr =
+ CheckEnableIf(Conversion, CandidateSet.getLocation(), None)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
Candidate.DeductionFailure.Data = FailedAttr;
@@ -7478,7 +7501,8 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
}
}
- if (EnableIfAttr *FailedAttr = CheckEnableIf(Conversion, None)) {
+ if (EnableIfAttr *FailedAttr =
+ CheckEnableIf(Conversion, CandidateSet.getLocation(), None)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
Candidate.DeductionFailure.Data = FailedAttr;
@@ -7668,6 +7692,10 @@ class BuiltinCandidateTypeSet {
/// candidates.
TypeSet VectorTypes;
+ /// The set of matrix types that will be used in the built-in
+ /// candidates.
+ TypeSet MatrixTypes;
+
/// A flag indicating non-record types are viable candidates
bool HasNonRecordTypes;
@@ -7725,9 +7753,11 @@ public:
/// enumeration_end - Past the last enumeration type found;
iterator enumeration_end() { return EnumerationTypes.end(); }
- iterator vector_begin() { return VectorTypes.begin(); }
- iterator vector_end() { return VectorTypes.end(); }
+ llvm::iterator_range<iterator> vector_types() { return VectorTypes; }
+
+ llvm::iterator_range<iterator> matrix_types() { return MatrixTypes; }
+ bool containsMatrixType(QualType Ty) const { return MatrixTypes.count(Ty); }
bool hasNonRecordTypes() { return HasNonRecordTypes; }
bool hasArithmeticOrEnumeralTypes() { return HasArithmeticOrEnumeralTypes; }
bool hasNullPtrType() const { return HasNullPtrType; }
@@ -7902,6 +7932,11 @@ BuiltinCandidateTypeSet::AddTypesConvertedFrom(QualType Ty,
// extension.
HasArithmeticOrEnumeralTypes = true;
VectorTypes.insert(Ty);
+ } else if (Ty->isMatrixType()) {
+ // Similar to vector types, we treat vector types as arithmetic types in
+ // many contexts as an extension.
+ HasArithmeticOrEnumeralTypes = true;
+ MatrixTypes.insert(Ty);
} else if (Ty->isNullPtrType()) {
HasNullPtrType = true;
} else if (AllowUserConversions && TyRec) {
@@ -8130,6 +8165,13 @@ class BuiltinOperatorOverloadBuilder {
}
+ /// Helper to add an overload candidate for a binary builtin with types \p L
+ /// and \p R.
+ void AddCandidate(QualType L, QualType R) {
+ QualType LandR[2] = {L, R};
+ S.AddBuiltinCandidate(LandR, Args, CandidateSet);
+ }
+
public:
BuiltinOperatorOverloadBuilder(
Sema &S, ArrayRef<Expr *> Args,
@@ -8257,13 +8299,8 @@ public:
}
// Extension: We also add these operators for vector types.
- for (BuiltinCandidateTypeSet::iterator
- Vec = CandidateTypes[0].vector_begin(),
- VecEnd = CandidateTypes[0].vector_end();
- Vec != VecEnd; ++Vec) {
- QualType VecTy = *Vec;
+ for (QualType VecTy : CandidateTypes[0].vector_types())
S.AddBuiltinCandidate(&VecTy, Args, CandidateSet);
- }
}
// C++ [over.built]p8:
@@ -8297,13 +8334,8 @@ public:
}
// Extension: We also add this operator for vector types.
- for (BuiltinCandidateTypeSet::iterator
- Vec = CandidateTypes[0].vector_begin(),
- VecEnd = CandidateTypes[0].vector_end();
- Vec != VecEnd; ++Vec) {
- QualType VecTy = *Vec;
+ for (QualType VecTy : CandidateTypes[0].vector_types())
S.AddBuiltinCandidate(&VecTy, Args, CandidateSet);
- }
}
// C++ [over.match.oper]p16:
@@ -8383,7 +8415,7 @@ public:
// We interpret "same parameter-type-list" as applying to the
// "synthesized candidate, with the order of the two parameters
// reversed", not to the original function.
- bool Reversed = C->RewriteKind & CRK_Reversed;
+ bool Reversed = C->isReversed();
QualType FirstParamType = C->Function->getParamDecl(Reversed ? 1 : 0)
->getType()
.getUnqualifiedType();
@@ -8534,17 +8566,31 @@ public:
// Extension: Add the binary operators ==, !=, <, <=, >=, >, *, /, and the
// conditional operator for vector types.
- for (BuiltinCandidateTypeSet::iterator
- Vec1 = CandidateTypes[0].vector_begin(),
- Vec1End = CandidateTypes[0].vector_end();
- Vec1 != Vec1End; ++Vec1) {
- for (BuiltinCandidateTypeSet::iterator
- Vec2 = CandidateTypes[1].vector_begin(),
- Vec2End = CandidateTypes[1].vector_end();
- Vec2 != Vec2End; ++Vec2) {
- QualType LandR[2] = { *Vec1, *Vec2 };
+ for (QualType Vec1Ty : CandidateTypes[0].vector_types())
+ for (QualType Vec2Ty : CandidateTypes[1].vector_types()) {
+ QualType LandR[2] = {Vec1Ty, Vec2Ty};
S.AddBuiltinCandidate(LandR, Args, CandidateSet);
}
+ }
+
+ /// Add binary operator overloads for each candidate matrix type M1, M2:
+ /// * (M1, M1) -> M1
+ /// * (M1, M1.getElementType()) -> M1
+ /// * (M2.getElementType(), M2) -> M2
+ /// * (M2, M2) -> M2 // Only if M2 is not part of CandidateTypes[0].
+ void addMatrixBinaryArithmeticOverloads() {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
+
+ for (QualType M1 : CandidateTypes[0].matrix_types()) {
+ AddCandidate(M1, cast<MatrixType>(M1)->getElementType());
+ AddCandidate(M1, M1);
+ }
+
+ for (QualType M2 : CandidateTypes[1].matrix_types()) {
+ AddCandidate(cast<MatrixType>(M2)->getElementType(), M2);
+ if (!CandidateTypes[0].containsMatrixType(M2))
+ AddCandidate(M2, M2);
}
}
@@ -8805,30 +8851,23 @@ public:
}
// Extension: Add the binary operators =, +=, -=, *=, /= for vector types.
- for (BuiltinCandidateTypeSet::iterator
- Vec1 = CandidateTypes[0].vector_begin(),
- Vec1End = CandidateTypes[0].vector_end();
- Vec1 != Vec1End; ++Vec1) {
- for (BuiltinCandidateTypeSet::iterator
- Vec2 = CandidateTypes[1].vector_begin(),
- Vec2End = CandidateTypes[1].vector_end();
- Vec2 != Vec2End; ++Vec2) {
+ for (QualType Vec1Ty : CandidateTypes[0].vector_types())
+ for (QualType Vec2Ty : CandidateTypes[0].vector_types()) {
QualType ParamTypes[2];
- ParamTypes[1] = *Vec2;
+ ParamTypes[1] = Vec2Ty;
// Add this built-in operator as a candidate (VQ is empty).
- ParamTypes[0] = S.Context.getLValueReferenceType(*Vec1);
+ ParamTypes[0] = S.Context.getLValueReferenceType(Vec1Ty);
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssignmentOperator=*/isEqualOp);
// Add this built-in operator as a candidate (VQ is 'volatile').
if (VisibleTypeConversionsQuals.hasVolatile()) {
- ParamTypes[0] = S.Context.getVolatileType(*Vec1);
+ ParamTypes[0] = S.Context.getVolatileType(Vec1Ty);
ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssignmentOperator=*/isEqualOp);
}
}
- }
}
// C++ [over.built]p22:
@@ -9121,14 +9160,17 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
} else {
OpBuilder.addBinaryPlusOrMinusPointerOverloads(Op);
OpBuilder.addGenericBinaryArithmeticOverloads();
+ OpBuilder.addMatrixBinaryArithmeticOverloads();
}
break;
case OO_Star: // '*' is either unary or binary
if (Args.size() == 1)
OpBuilder.addUnaryStarPointerOverloads();
- else
+ else {
OpBuilder.addGenericBinaryArithmeticOverloads();
+ OpBuilder.addMatrixBinaryArithmeticOverloads();
+ }
break;
case OO_Slash:
@@ -9355,16 +9397,22 @@ static Comparison compareEnableIfAttrs(const Sema &S, const FunctionDecl *Cand1,
return Comparison::Equal;
}
-static bool isBetterMultiversionCandidate(const OverloadCandidate &Cand1,
- const OverloadCandidate &Cand2) {
+static Comparison
+isBetterMultiversionCandidate(const OverloadCandidate &Cand1,
+ const OverloadCandidate &Cand2) {
if (!Cand1.Function || !Cand1.Function->isMultiVersion() || !Cand2.Function ||
!Cand2.Function->isMultiVersion())
- return false;
+ return Comparison::Equal;
- // If Cand1 is invalid, it cannot be a better match, if Cand2 is invalid, this
- // is obviously better.
- if (Cand1.Function->isInvalidDecl()) return false;
- if (Cand2.Function->isInvalidDecl()) return true;
+ // If both are invalid, they are equal. If one of them is invalid, the other
+ // is better.
+ if (Cand1.Function->isInvalidDecl()) {
+ if (Cand2.Function->isInvalidDecl())
+ return Comparison::Equal;
+ return Comparison::Worse;
+ }
+ if (Cand2.Function->isInvalidDecl())
+ return Comparison::Better;
// If this is a cpu_dispatch/cpu_specific multiversion situation, prefer
// cpu_dispatch, else arbitrarily based on the identifiers.
@@ -9374,16 +9422,18 @@ static bool isBetterMultiversionCandidate(const OverloadCandidate &Cand1,
const auto *Cand2CPUSpec = Cand2.Function->getAttr<CPUSpecificAttr>();
if (!Cand1CPUDisp && !Cand2CPUDisp && !Cand1CPUSpec && !Cand2CPUSpec)
- return false;
+ return Comparison::Equal;
if (Cand1CPUDisp && !Cand2CPUDisp)
- return true;
+ return Comparison::Better;
if (Cand2CPUDisp && !Cand1CPUDisp)
- return false;
+ return Comparison::Worse;
if (Cand1CPUSpec && Cand2CPUSpec) {
if (Cand1CPUSpec->cpus_size() != Cand2CPUSpec->cpus_size())
- return Cand1CPUSpec->cpus_size() < Cand2CPUSpec->cpus_size();
+ return Cand1CPUSpec->cpus_size() < Cand2CPUSpec->cpus_size()
+ ? Comparison::Better
+ : Comparison::Worse;
std::pair<CPUSpecificAttr::cpus_iterator, CPUSpecificAttr::cpus_iterator>
FirstDiff = std::mismatch(
@@ -9396,11 +9446,56 @@ static bool isBetterMultiversionCandidate(const OverloadCandidate &Cand1,
assert(FirstDiff.first != Cand1CPUSpec->cpus_end() &&
"Two different cpu-specific versions should not have the same "
"identifier list, otherwise they'd be the same decl!");
- return (*FirstDiff.first)->getName() < (*FirstDiff.second)->getName();
+ return (*FirstDiff.first)->getName() < (*FirstDiff.second)->getName()
+ ? Comparison::Better
+ : Comparison::Worse;
}
llvm_unreachable("No way to get here unless both had cpu_dispatch");
}
+/// Compute the type of the implicit object parameter for the given function,
+/// if any. Returns None if there is no implicit object parameter, and a null
+/// QualType if there is a 'matches anything' implicit object parameter.
+static Optional<QualType> getImplicitObjectParamType(ASTContext &Context,
+ const FunctionDecl *F) {
+ if (!isa<CXXMethodDecl>(F) || isa<CXXConstructorDecl>(F))
+ return llvm::None;
+
+ auto *M = cast<CXXMethodDecl>(F);
+ // Static member functions' object parameters match all types.
+ if (M->isStatic())
+ return QualType();
+
+ QualType T = M->getThisObjectType();
+ if (M->getRefQualifier() == RQ_RValue)
+ return Context.getRValueReferenceType(T);
+ return Context.getLValueReferenceType(T);
+}
+
+static bool haveSameParameterTypes(ASTContext &Context, const FunctionDecl *F1,
+ const FunctionDecl *F2, unsigned NumParams) {
+ if (declaresSameEntity(F1, F2))
+ return true;
+
+ auto NextParam = [&](const FunctionDecl *F, unsigned &I, bool First) {
+ if (First) {
+ if (Optional<QualType> T = getImplicitObjectParamType(Context, F))
+ return *T;
+ }
+ assert(I < F->getNumParams());
+ return F->getParamDecl(I++)->getType();
+ };
+
+ unsigned I1 = 0, I2 = 0;
+ for (unsigned I = 0; I != NumParams; ++I) {
+ QualType T1 = NextParam(F1, I1, I == 0);
+ QualType T2 = NextParam(F2, I2, I == 0);
+ if (!T1.isNull() && !T1.isNull() && !Context.hasSameUnqualifiedType(T1, T2))
+ return false;
+ }
+ return true;
+}
+
/// isBetterOverloadCandidate - Determines whether the first overload
/// candidate is a better candidate than the second (C++ 13.3.3p1).
bool clang::isBetterOverloadCandidate(
@@ -9468,18 +9563,20 @@ bool clang::isBetterOverloadCandidate(
break;
case ImplicitConversionSequence::Worse:
- if (Cand1.Function && Cand1.Function == Cand2.Function &&
- (Cand2.RewriteKind & CRK_Reversed) != 0) {
+ if (Cand1.Function && Cand2.Function &&
+ Cand1.isReversed() != Cand2.isReversed() &&
+ haveSameParameterTypes(S.Context, Cand1.Function, Cand2.Function,
+ NumArgs)) {
// Work around large-scale breakage caused by considering reversed
// forms of operator== in C++20:
//
- // When comparing a function against its reversed form, if we have a
- // better conversion for one argument and a worse conversion for the
- // other, we prefer the non-reversed form.
+ // When comparing a function against a reversed function with the same
+ // parameter types, if we have a better conversion for one argument and
+ // a worse conversion for the other, the implicit conversion sequences
+ // are treated as being equally good.
//
- // This prevents a conversion function from being considered ambiguous
- // with its own reversed form in various where it's only incidentally
- // heterogeneous.
+ // This prevents a comparison function from being considered ambiguous
+ // with a reversed form that is written in the same way.
//
// We diagnose this as an extension from CreateOverloadedBinOp.
HasWorseConversion = true;
@@ -9497,10 +9594,8 @@ bool clang::isBetterOverloadCandidate(
// -- for some argument j, ICSj(F1) is a better conversion sequence than
// ICSj(F2), or, if not that,
- if (HasBetterConversion)
+ if (HasBetterConversion && !HasWorseConversion)
return true;
- if (HasWorseConversion)
- return false;
// -- the context is an initialization by user-defined conversion
// (see 8.5, 13.3.1.5) and the standard conversion sequence
@@ -9557,14 +9652,13 @@ bool clang::isBetterOverloadCandidate(
// according to the partial ordering rules described in 14.5.5.2, or,
// if not that,
if (Cand1IsSpecialization && Cand2IsSpecialization) {
- if (FunctionTemplateDecl *BetterTemplate
- = S.getMoreSpecializedTemplate(Cand1.Function->getPrimaryTemplate(),
- Cand2.Function->getPrimaryTemplate(),
- Loc,
- isa<CXXConversionDecl>(Cand1.Function)? TPOC_Conversion
- : TPOC_Call,
- Cand1.ExplicitCallArguments,
- Cand2.ExplicitCallArguments))
+ if (FunctionTemplateDecl *BetterTemplate = S.getMoreSpecializedTemplate(
+ Cand1.Function->getPrimaryTemplate(),
+ Cand2.Function->getPrimaryTemplate(), Loc,
+ isa<CXXConversionDecl>(Cand1.Function) ? TPOC_Conversion
+ : TPOC_Call,
+ Cand1.ExplicitCallArguments, Cand2.ExplicitCallArguments,
+ Cand1.isReversed() ^ Cand2.isReversed()))
return BetterTemplate == Cand1.Function->getPrimaryTemplate();
}
@@ -9661,7 +9755,8 @@ bool clang::isBetterOverloadCandidate(
if (HasPS1 != HasPS2 && HasPS1)
return true;
- return isBetterMultiversionCandidate(Cand1, Cand2);
+ Comparison MV = isBetterMultiversionCandidate(Cand1, Cand2);
+ return MV == Comparison::Better;
}
/// Determine whether two declarations are "equivalent" for the purposes of
@@ -11289,7 +11384,7 @@ CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
unsigned ConvIdx = 0;
unsigned ArgIdx = 0;
ArrayRef<QualType> ParamTypes;
- bool Reversed = Cand->RewriteKind & CRK_Reversed;
+ bool Reversed = Cand->isReversed();
if (Cand->IsSurrogate) {
QualType ConvType
@@ -12702,9 +12797,7 @@ bool Sema::buildOverloadedCallSet(Scope *S, Expr *Fn,
// base classes.
CallExpr *CE = CallExpr::Create(Context, Fn, Args, Context.DependentTy,
VK_RValue, RParenLoc);
- CE->setTypeDependent(true);
- CE->setValueDependent(true);
- CE->setInstantiationDependent(true);
+ CE->markDependentForPostponedNameLookup();
*Result = CE;
return true;
}
@@ -12717,6 +12810,42 @@ bool Sema::buildOverloadedCallSet(Scope *S, Expr *Fn,
return false;
}
+// Guess at what the return type for an unresolvable overload should be.
+static QualType chooseRecoveryType(OverloadCandidateSet &CS,
+ OverloadCandidateSet::iterator *Best) {
+ llvm::Optional<QualType> Result;
+ // Adjust Type after seeing a candidate.
+ auto ConsiderCandidate = [&](const OverloadCandidate &Candidate) {
+ if (!Candidate.Function)
+ return;
+ QualType T = Candidate.Function->getReturnType();
+ if (T.isNull())
+ return;
+ if (!Result)
+ Result = T;
+ else if (Result != T)
+ Result = QualType();
+ };
+
+ // Look for an unambiguous type from a progressively larger subset.
+ // e.g. if types disagree, but all *viable* overloads return int, choose int.
+ //
+ // First, consider only the best candidate.
+ if (Best && *Best != CS.end())
+ ConsiderCandidate(**Best);
+ // Next, consider only viable candidates.
+ if (!Result)
+ for (const auto &C : CS)
+ if (C.Viable)
+ ConsiderCandidate(C);
+ // Finally, consider all candidates.
+ if (!Result)
+ for (const auto &C : CS)
+ ConsiderCandidate(C);
+
+ return Result.getValueOr(QualType());
+}
+
/// FinishOverloadedCallExpr - given an OverloadCandidateSet, builds and returns
/// the completed call expression. If overload resolution fails, emits
/// diagnostics and returns ExprError()
@@ -12806,8 +12935,11 @@ static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
}
}
- // Overload resolution failed.
- return ExprError();
+ // Overload resolution failed, try to recover.
+ SmallVector<Expr *, 8> SubExprs = {Fn};
+ SubExprs.append(Args.begin(), Args.end());
+ return SemaRef.CreateRecoveryExpr(Fn->getBeginLoc(), RParenLoc, SubExprs,
+ chooseRecoveryType(*CandidateSet, Best));
}
static void markUnaddressableCandidatesUnviable(Sema &S,
@@ -12907,8 +13039,9 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
if (Input->isTypeDependent()) {
if (Fns.empty())
- return new (Context) UnaryOperator(Input, Opc, Context.DependentTy,
- VK_RValue, OK_Ordinary, OpLoc, false);
+ return UnaryOperator::Create(Context, Input, Opc, Context.DependentTy,
+ VK_RValue, OK_Ordinary, OpLoc, false,
+ CurFPFeatureOverrides());
CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators
UnresolvedLookupExpr *Fn = UnresolvedLookupExpr::Create(
@@ -12916,7 +13049,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
/*ADL*/ true, IsOverloaded(Fns), Fns.begin(), Fns.end());
return CXXOperatorCallExpr::Create(Context, Op, Fn, ArgsArray,
Context.DependentTy, VK_RValue, OpLoc,
- FPOptions());
+ CurFPFeatureOverrides());
}
// Build an empty overload set.
@@ -12990,7 +13123,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Args[0] = Input;
CallExpr *TheCall = CXXOperatorCallExpr::Create(
Context, Op, FnExpr.get(), ArgsArray, ResultTy, VK, OpLoc,
- FPOptions(), Best->IsADLCandidate);
+ CurFPFeatureOverrides(), Best->IsADLCandidate);
if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall, FnDecl))
return ExprError();
@@ -12998,8 +13131,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
if (CheckFunctionCall(FnDecl, TheCall,
FnDecl->getType()->castAs<FunctionProtoType>()))
return ExprError();
-
- return MaybeBindToTemporary(TheCall);
+ return CheckForImmediateInvocation(MaybeBindToTemporary(TheCall), FnDecl);
} else {
// We matched a built-in operator. Convert the arguments, then
// break out so that we will build the appropriate built-in
@@ -13148,7 +13280,7 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
Expr *Args[2] = { LHS, RHS };
LHS=RHS=nullptr; // Please use only Args instead of LHS/RHS couple
- if (!getLangOpts().CPlusPlus2a)
+ if (!getLangOpts().CPlusPlus20)
AllowRewrittenCandidates = false;
OverloadedOperatorKind Op = BinaryOperator::getOverloadedOperator(Opc);
@@ -13160,14 +13292,13 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// If there are no functions to store, just build a dependent
// BinaryOperator or CompoundAssignment.
if (Opc <= BO_Assign || Opc > BO_OrAssign)
- return new (Context) BinaryOperator(
- Args[0], Args[1], Opc, Context.DependentTy, VK_RValue, OK_Ordinary,
- OpLoc, FPFeatures);
-
- return new (Context) CompoundAssignOperator(
- Args[0], Args[1], Opc, Context.DependentTy, VK_LValue, OK_Ordinary,
- Context.DependentTy, Context.DependentTy, OpLoc,
- FPFeatures);
+ return BinaryOperator::Create(
+ Context, Args[0], Args[1], Opc, Context.DependentTy, VK_RValue,
+ OK_Ordinary, OpLoc, CurFPFeatureOverrides());
+ return CompoundAssignOperator::Create(
+ Context, Args[0], Args[1], Opc, Context.DependentTy, VK_LValue,
+ OK_Ordinary, OpLoc, CurFPFeatureOverrides(), Context.DependentTy,
+ Context.DependentTy);
}
// FIXME: save results of ADL from here?
@@ -13180,7 +13311,7 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
/*ADL*/ PerformADL, IsOverloaded(Fns), Fns.begin(), Fns.end());
return CXXOperatorCallExpr::Create(Context, Op, Fn, Args,
Context.DependentTy, VK_RValue, OpLoc,
- FPFeatures);
+ CurFPFeatureOverrides());
}
// Always do placeholder-like conversions on the RHS.
@@ -13224,7 +13355,7 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// We found a built-in operator or an overloaded operator.
FunctionDecl *FnDecl = Best->Function;
- bool IsReversed = (Best->RewriteKind & CRK_Reversed);
+ bool IsReversed = Best->isReversed();
if (IsReversed)
std::swap(Args[0], Args[1]);
@@ -13241,36 +13372,56 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// resolution for an operator@, its return type shall be cv bool
if (Best->RewriteKind && ChosenOp == OO_EqualEqual &&
!FnDecl->getReturnType()->isBooleanType()) {
- Diag(OpLoc, diag::err_ovl_rewrite_equalequal_not_bool)
+ bool IsExtension =
+ FnDecl->getReturnType()->isIntegralOrUnscopedEnumerationType();
+ Diag(OpLoc, IsExtension ? diag::ext_ovl_rewrite_equalequal_not_bool
+ : diag::err_ovl_rewrite_equalequal_not_bool)
<< FnDecl->getReturnType() << BinaryOperator::getOpcodeStr(Opc)
<< Args[0]->getSourceRange() << Args[1]->getSourceRange();
Diag(FnDecl->getLocation(), diag::note_declared_at);
- return ExprError();
+ if (!IsExtension)
+ return ExprError();
}
if (AllowRewrittenCandidates && !IsReversed &&
- CandidateSet.getRewriteInfo().shouldAddReversed(ChosenOp)) {
- // We could have reversed this operator, but didn't. Check if the
+ CandidateSet.getRewriteInfo().isReversible()) {
+ // We could have reversed this operator, but didn't. Check if some
// reversed form was a viable candidate, and if so, if it had a
// better conversion for either parameter. If so, this call is
// formally ambiguous, and allowing it is an extension.
+ llvm::SmallVector<FunctionDecl*, 4> AmbiguousWith;
for (OverloadCandidate &Cand : CandidateSet) {
- if (Cand.Viable && Cand.Function == FnDecl &&
- Cand.RewriteKind & CRK_Reversed) {
+ if (Cand.Viable && Cand.Function && Cand.isReversed() &&
+ haveSameParameterTypes(Context, Cand.Function, FnDecl, 2)) {
for (unsigned ArgIdx = 0; ArgIdx < 2; ++ArgIdx) {
if (CompareImplicitConversionSequences(
*this, OpLoc, Cand.Conversions[ArgIdx],
Best->Conversions[ArgIdx]) ==
ImplicitConversionSequence::Better) {
- Diag(OpLoc, diag::ext_ovl_ambiguous_oper_binary_reversed)
- << BinaryOperator::getOpcodeStr(Opc)
- << Args[0]->getType() << Args[1]->getType()
- << Args[0]->getSourceRange() << Args[1]->getSourceRange();
- Diag(FnDecl->getLocation(),
- diag::note_ovl_ambiguous_oper_binary_reversed_candidate);
+ AmbiguousWith.push_back(Cand.Function);
+ break;
}
}
- break;
+ }
+ }
+
+ if (!AmbiguousWith.empty()) {
+ bool AmbiguousWithSelf =
+ AmbiguousWith.size() == 1 &&
+ declaresSameEntity(AmbiguousWith.front(), FnDecl);
+ Diag(OpLoc, diag::ext_ovl_ambiguous_oper_binary_reversed)
+ << BinaryOperator::getOpcodeStr(Opc)
+ << Args[0]->getType() << Args[1]->getType() << AmbiguousWithSelf
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ if (AmbiguousWithSelf) {
+ Diag(FnDecl->getLocation(),
+ diag::note_ovl_ambiguous_oper_binary_reversed_self);
+ } else {
+ Diag(FnDecl->getLocation(),
+ diag::note_ovl_ambiguous_oper_binary_selected_candidate);
+ for (auto *F : AmbiguousWith)
+ Diag(F->getLocation(),
+ diag::note_ovl_ambiguous_oper_binary_reversed_candidate);
}
}
}
@@ -13329,7 +13480,7 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
Context, ChosenOp, FnExpr.get(), Args, ResultTy, VK, OpLoc,
- FPFeatures, Best->IsADLCandidate);
+ CurFPFeatureOverrides(), Best->IsADLCandidate);
if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall,
FnDecl))
@@ -13355,6 +13506,10 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
if (R.isInvalid())
return ExprError();
+ R = CheckForImmediateInvocation(R, FnDecl);
+ if (R.isInvalid())
+ return ExprError();
+
// For a rewritten candidate, we've already reversed the arguments
// if needed. Perform the rest of the rewrite now.
if ((Best->RewriteKind & CRK_DifferentOperator) ||
@@ -13594,10 +13749,10 @@ ExprResult Sema::BuildSynthesizedThreeWayComparison(
// Build a PseudoObjectExpr to model the rewriting of an <=> operator, and to
// bind the OpaqueValueExprs before they're (repeatedly) used.
- Expr *SyntacticForm = new (Context)
- BinaryOperator(OrigLHS, OrigRHS, BO_Cmp, Result.get()->getType(),
- Result.get()->getValueKind(),
- Result.get()->getObjectKind(), OpLoc, FPFeatures);
+ Expr *SyntacticForm = BinaryOperator::Create(
+ Context, OrigLHS, OrigRHS, BO_Cmp, Result.get()->getType(),
+ Result.get()->getValueKind(), Result.get()->getObjectKind(), OpLoc,
+ CurFPFeatureOverrides());
Expr *SemanticForm[] = {LHS, RHS, Result.get()};
return PseudoObjectExpr::Create(Context, SyntacticForm, SemanticForm, 2);
}
@@ -13628,7 +13783,7 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
return CXXOperatorCallExpr::Create(Context, OO_Subscript, Fn, Args,
Context.DependentTy, VK_RValue, RLoc,
- FPOptions());
+ CurFPFeatureOverrides());
}
// Handle placeholders on both operands.
@@ -13701,10 +13856,9 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
- CXXOperatorCallExpr *TheCall =
- CXXOperatorCallExpr::Create(Context, OO_Subscript, FnExpr.get(),
- Args, ResultTy, VK, RLoc, FPOptions());
-
+ CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
+ Context, OO_Subscript, FnExpr.get(), Args, ResultTy, VK, RLoc,
+ CurFPFeatureOverrides());
if (CheckCallReturnType(FnDecl->getReturnType(), LLoc, TheCall, FnDecl))
return ExprError();
@@ -14014,7 +14168,8 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
// resolution process, we still need to handle the enable_if attribute. Do
// that here, so it will not hide previous -- and more relevant -- errors.
if (auto *MemE = dyn_cast<MemberExpr>(NakedMemExpr)) {
- if (const EnableIfAttr *Attr = CheckEnableIf(Method, Args, true)) {
+ if (const EnableIfAttr *Attr =
+ CheckEnableIf(Method, LParenLoc, Args, true)) {
Diag(MemE->getMemberLoc(),
diag::err_ovl_no_viable_member_function_in_call)
<< Method << Method->getSourceRange();
@@ -14053,7 +14208,8 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
MemExpr->getMemberLoc());
}
- return MaybeBindToTemporary(TheCall);
+ return CheckForImmediateInvocation(MaybeBindToTemporary(TheCall),
+ TheCall->getMethodDecl());
}
/// BuildCallToObjectOfClassType - Build a call to an object of class
@@ -14324,9 +14480,9 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
- CXXOperatorCallExpr *TheCall =
- CXXOperatorCallExpr::Create(Context, OO_Call, NewFn.get(), MethodArgs,
- ResultTy, VK, RParenLoc, FPOptions());
+ CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
+ Context, OO_Call, NewFn.get(), MethodArgs, ResultTy, VK, RParenLoc,
+ CurFPFeatureOverrides());
if (CheckCallReturnType(Method->getReturnType(), LParenLoc, TheCall, Method))
return true;
@@ -14334,7 +14490,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
if (CheckFunctionCall(Method, TheCall, Proto))
return true;
- return MaybeBindToTemporary(TheCall);
+ return CheckForImmediateInvocation(MaybeBindToTemporary(TheCall), Method);
}
/// BuildOverloadedArrowExpr - Build a call to an overloaded @c operator->
@@ -14442,8 +14598,9 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
QualType ResultTy = Method->getReturnType();
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
- CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
- Context, OO_Arrow, FnExpr.get(), Base, ResultTy, VK, OpLoc, FPOptions());
+ CXXOperatorCallExpr *TheCall =
+ CXXOperatorCallExpr::Create(Context, OO_Arrow, FnExpr.get(), Base,
+ ResultTy, VK, OpLoc, CurFPFeatureOverrides());
if (CheckCallReturnType(Method->getReturnType(), OpLoc, TheCall, Method))
return ExprError();
@@ -14529,7 +14686,7 @@ ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R,
if (CheckFunctionCall(FD, UDL, nullptr))
return ExprError();
- return MaybeBindToTemporary(UDL);
+ return CheckForImmediateInvocation(MaybeBindToTemporary(UDL), FD);
}
/// Build a call to 'begin' or 'end' for a C++11 for-range statement. If the
@@ -14690,9 +14847,9 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
if (Context.getTargetInfo().getCXXABI().isMicrosoft())
(void)isCompleteType(UnOp->getOperatorLoc(), MemPtrType);
- return new (Context) UnaryOperator(SubExpr, UO_AddrOf, MemPtrType,
- VK_RValue, OK_Ordinary,
- UnOp->getOperatorLoc(), false);
+ return UnaryOperator::Create(
+ Context, SubExpr, UO_AddrOf, MemPtrType, VK_RValue, OK_Ordinary,
+ UnOp->getOperatorLoc(), false, CurFPFeatureOverrides());
}
}
Expr *SubExpr = FixOverloadedFunctionReference(UnOp->getSubExpr(),
@@ -14700,10 +14857,10 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
if (SubExpr == UnOp->getSubExpr())
return UnOp;
- return new (Context) UnaryOperator(SubExpr, UO_AddrOf,
- Context.getPointerType(SubExpr->getType()),
- VK_RValue, OK_Ordinary,
- UnOp->getOperatorLoc(), false);
+ return UnaryOperator::Create(Context, SubExpr, UO_AddrOf,
+ Context.getPointerType(SubExpr->getType()),
+ VK_RValue, OK_Ordinary, UnOp->getOperatorLoc(),
+ false, CurFPFeatureOverrides());
}
if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp b/contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp
index 5587e0d24c7f..d17599a6ed14 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp
@@ -127,12 +127,10 @@ namespace {
if (UnaryOperator *uop = dyn_cast<UnaryOperator>(e)) {
assert(uop->getOpcode() == UO_Extension);
e = rebuild(uop->getSubExpr());
- return new (S.Context) UnaryOperator(e, uop->getOpcode(),
- uop->getType(),
- uop->getValueKind(),
- uop->getObjectKind(),
- uop->getOperatorLoc(),
- uop->canOverflow());
+ return UnaryOperator::Create(
+ S.Context, e, uop->getOpcode(), uop->getType(), uop->getValueKind(),
+ uop->getObjectKind(), uop->getOperatorLoc(), uop->canOverflow(),
+ S.CurFPFeatureOverrides());
}
if (GenericSelectionExpr *gse = dyn_cast<GenericSelectionExpr>(e)) {
@@ -167,16 +165,11 @@ namespace {
Expr *&rebuiltExpr = ce->isConditionTrue() ? LHS : RHS;
rebuiltExpr = rebuild(rebuiltExpr);
- return new (S.Context) ChooseExpr(ce->getBuiltinLoc(),
- ce->getCond(),
- LHS, RHS,
- rebuiltExpr->getType(),
- rebuiltExpr->getValueKind(),
- rebuiltExpr->getObjectKind(),
- ce->getRParenLoc(),
- ce->isConditionTrue(),
- rebuiltExpr->isTypeDependent(),
- rebuiltExpr->isValueDependent());
+ return new (S.Context)
+ ChooseExpr(ce->getBuiltinLoc(), ce->getCond(), LHS, RHS,
+ rebuiltExpr->getType(), rebuiltExpr->getValueKind(),
+ rebuiltExpr->getObjectKind(), ce->getRParenLoc(),
+ ce->isConditionTrue());
}
llvm_unreachable("bad expression to rebuild!");
@@ -453,11 +446,11 @@ PseudoOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc,
ExprResult result;
if (opcode == BO_Assign) {
result = semanticRHS;
- syntactic = new (S.Context) BinaryOperator(syntacticLHS, capturedRHS,
- opcode, capturedRHS->getType(),
- capturedRHS->getValueKind(),
- OK_Ordinary, opcLoc,
- FPOptions());
+ syntactic = BinaryOperator::Create(S.Context, syntacticLHS, capturedRHS,
+ opcode, capturedRHS->getType(),
+ capturedRHS->getValueKind(), OK_Ordinary,
+ opcLoc, S.CurFPFeatureOverrides());
+
} else {
ExprResult opLHS = buildGet();
if (opLHS.isInvalid()) return ExprError();
@@ -468,14 +461,11 @@ PseudoOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc,
result = S.BuildBinOp(Sc, opcLoc, nonCompound, opLHS.get(), semanticRHS);
if (result.isInvalid()) return ExprError();
- syntactic =
- new (S.Context) CompoundAssignOperator(syntacticLHS, capturedRHS, opcode,
- result.get()->getType(),
- result.get()->getValueKind(),
- OK_Ordinary,
- opLHS.get()->getType(),
- result.get()->getType(),
- opcLoc, FPOptions());
+ syntactic = CompoundAssignOperator::Create(
+ S.Context, syntacticLHS, capturedRHS, opcode, result.get()->getType(),
+ result.get()->getValueKind(), OK_Ordinary, opcLoc,
+ S.CurFPFeatureOverrides(), opLHS.get()->getType(),
+ result.get()->getType());
}
// The result of the assignment, if not void, is the value set into
@@ -536,12 +526,14 @@ PseudoOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc,
(result.get()->isTypeDependent() || CanCaptureValue(result.get())))
setResultToLastSemantic();
- UnaryOperator *syntactic = new (S.Context) UnaryOperator(
- syntacticOp, opcode, resultType, VK_LValue, OK_Ordinary, opcLoc,
- !resultType->isDependentType()
- ? S.Context.getTypeSize(resultType) >=
- S.Context.getTypeSize(S.Context.IntTy)
- : false);
+ UnaryOperator *syntactic =
+ UnaryOperator::Create(S.Context, syntacticOp, opcode, resultType,
+ VK_LValue, OK_Ordinary, opcLoc,
+ !resultType->isDependentType()
+ ? S.Context.getTypeSize(resultType) >=
+ S.Context.getTypeSize(S.Context.IntTy)
+ : false,
+ S.CurFPFeatureOverrides());
return complete(syntactic);
}
@@ -590,7 +582,7 @@ bool ObjCPropertyOpBuilder::isWeakProperty() const {
QualType T;
if (RefExpr->isExplicitProperty()) {
const ObjCPropertyDecl *Prop = RefExpr->getExplicitProperty();
- if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)
+ if (Prop->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak)
return true;
T = Prop->getType();
@@ -1561,8 +1553,9 @@ ExprResult Sema::checkPseudoObjectIncDec(Scope *Sc, SourceLocation opcLoc,
UnaryOperatorKind opcode, Expr *op) {
// Do nothing if the operand is dependent.
if (op->isTypeDependent())
- return new (Context) UnaryOperator(op, opcode, Context.DependentTy,
- VK_RValue, OK_Ordinary, opcLoc, false);
+ return UnaryOperator::Create(Context, op, opcode, Context.DependentTy,
+ VK_RValue, OK_Ordinary, opcLoc, false,
+ CurFPFeatureOverrides());
assert(UnaryOperator::isIncrementDecrementOp(opcode));
Expr *opaqueRef = op->IgnoreParens();
@@ -1591,9 +1584,9 @@ ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc,
Expr *LHS, Expr *RHS) {
// Do nothing if either argument is dependent.
if (LHS->isTypeDependent() || RHS->isTypeDependent())
- return new (Context) BinaryOperator(LHS, RHS, opcode, Context.DependentTy,
- VK_RValue, OK_Ordinary, opcLoc,
- FPOptions());
+ return BinaryOperator::Create(Context, LHS, RHS, opcode,
+ Context.DependentTy, VK_RValue, OK_Ordinary,
+ opcLoc, CurFPFeatureOverrides());
// Filter out non-overload placeholder types in the RHS.
if (RHS->getType()->isNonOverloadPlaceholderType()) {
@@ -1646,28 +1639,30 @@ Expr *Sema::recreateSyntacticForm(PseudoObjectExpr *E) {
Expr *syntax = E->getSyntacticForm();
if (UnaryOperator *uop = dyn_cast<UnaryOperator>(syntax)) {
Expr *op = stripOpaqueValuesFromPseudoObjectRef(*this, uop->getSubExpr());
- return new (Context) UnaryOperator(
- op, uop->getOpcode(), uop->getType(), uop->getValueKind(),
- uop->getObjectKind(), uop->getOperatorLoc(), uop->canOverflow());
+ return UnaryOperator::Create(Context, op, uop->getOpcode(), uop->getType(),
+ uop->getValueKind(), uop->getObjectKind(),
+ uop->getOperatorLoc(), uop->canOverflow(),
+ CurFPFeatureOverrides());
} else if (CompoundAssignOperator *cop
= dyn_cast<CompoundAssignOperator>(syntax)) {
Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, cop->getLHS());
Expr *rhs = cast<OpaqueValueExpr>(cop->getRHS())->getSourceExpr();
- return new (Context) CompoundAssignOperator(lhs, rhs, cop->getOpcode(),
- cop->getType(),
- cop->getValueKind(),
- cop->getObjectKind(),
- cop->getComputationLHSType(),
- cop->getComputationResultType(),
- cop->getOperatorLoc(),
- FPOptions());
+ return CompoundAssignOperator::Create(
+ Context, lhs, rhs, cop->getOpcode(), cop->getType(),
+ cop->getValueKind(), cop->getObjectKind(), cop->getOperatorLoc(),
+ CurFPFeatureOverrides(), cop->getComputationLHSType(),
+ cop->getComputationResultType());
+
} else if (BinaryOperator *bop = dyn_cast<BinaryOperator>(syntax)) {
Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, bop->getLHS());
Expr *rhs = cast<OpaqueValueExpr>(bop->getRHS())->getSourceExpr();
- return new (Context) BinaryOperator(lhs, rhs, bop->getOpcode(),
- bop->getType(), bop->getValueKind(),
- bop->getObjectKind(),
- bop->getOperatorLoc(), FPOptions());
+ return BinaryOperator::Create(Context, lhs, rhs, bop->getOpcode(),
+ bop->getType(), bop->getValueKind(),
+ bop->getObjectKind(), bop->getOperatorLoc(),
+ CurFPFeatureOverrides());
+
+ } else if (isa<CallExpr>(syntax)) {
+ return syntax;
} else {
assert(syntax->hasPlaceholderType(BuiltinType::PseudoObject));
return stripOpaqueValuesFromPseudoObjectRef(*this, syntax);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp b/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp
new file mode 100644
index 000000000000..db7603b42f7b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp
@@ -0,0 +1,49 @@
+//===- SemaSYCL.cpp - Semantic Analysis for SYCL constructs ---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This implements Semantic Analysis for SYCL constructs.
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaDiagnostic.h"
+
+using namespace clang;
+
+// -----------------------------------------------------------------------------
+// SYCL device specific diagnostics implementation
+// -----------------------------------------------------------------------------
+
+Sema::DeviceDiagBuilder Sema::SYCLDiagIfDeviceCode(SourceLocation Loc,
+ unsigned DiagID) {
+ assert(getLangOpts().SYCLIsDevice &&
+ "Should only be called during SYCL compilation");
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(getCurLexicalContext());
+ DeviceDiagBuilder::Kind DiagKind = [this, FD] {
+ if (!FD)
+ return DeviceDiagBuilder::K_Nop;
+ if (getEmissionStatus(FD) == Sema::FunctionEmissionStatus::Emitted)
+ return DeviceDiagBuilder::K_ImmediateWithCallStack;
+ return DeviceDiagBuilder::K_Deferred;
+ }();
+ return DeviceDiagBuilder(DiagKind, Loc, DiagID, FD, *this);
+}
+
+bool Sema::checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee) {
+ assert(getLangOpts().SYCLIsDevice &&
+ "Should only be called during SYCL compilation");
+ assert(Callee && "Callee may not be null.");
+
+ // Errors in unevaluated context don't need to be generated,
+ // so we can safely skip them.
+ if (isUnevaluatedContext() || isConstantEvaluated())
+ return true;
+
+ DeviceDiagBuilder::Kind DiagKind = DeviceDiagBuilder::K_Nop;
+
+ return DiagKind != DeviceDiagBuilder::K_Immediate &&
+ DiagKind != DeviceDiagBuilder::K_ImmediateWithCallStack;
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
index ff6481006280..73f3183c163f 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
@@ -334,6 +334,11 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
}
} else if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) {
const Expr *Source = POE->getSyntacticForm();
+ // Handle the actually selected call of an OpenMP specialized call.
+ if (LangOpts.OpenMP && isa<CallExpr>(Source) &&
+ POE->getNumSemanticExprs() == 1 &&
+ isa<CallExpr>(POE->getSemanticExpr(0)))
+ return DiagnoseUnusedExprResult(POE->getSemanticExpr(0));
if (isa<ObjCSubscriptRefExpr>(Source))
DiagID = diag::warn_unused_container_subscript_expr;
else
@@ -365,7 +370,10 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
}
}
- if (E->isGLValue() && E->getType().isVolatileQualified()) {
+ // Tell the user to assign it into a variable to force a volatile load if this
+ // isn't an array.
+ if (E->isGLValue() && E->getType().isVolatileQualified() &&
+ !E->getType()->isArrayType()) {
Diag(Loc, diag::warn_unused_volatile) << R1 << R2;
return;
}
@@ -389,6 +397,11 @@ StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr) {
const unsigned NumElts = Elts.size();
+ // Mark the current function as usng floating point constrained intrinsics
+ if (getCurFPFeatures().isFPConstrained())
+ if (FunctionDecl *F = dyn_cast<FunctionDecl>(CurContext))
+ F->setUsesFPIntrin(true);
+
// If we're in C89 mode, check that we don't have any decls after stmts. If
// so, emit an extension diagnostic.
if (!getLangOpts().C99 && !getLangOpts().CPlusPlus) {
@@ -464,7 +477,9 @@ Sema::ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val) {
return ER;
};
- ExprResult Converted = CorrectDelayedTyposInExpr(Val, CheckAndFinish);
+ ExprResult Converted = CorrectDelayedTyposInExpr(
+ Val, /*InitDecl=*/nullptr, /*RecoverUncorrectedTypos=*/false,
+ CheckAndFinish);
if (Converted.get() == Val.get())
Converted = CheckAndFinish(Val.get());
return Converted;
@@ -730,11 +745,11 @@ StmtResult Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
if (CondExpr && !CondExpr->isTypeDependent()) {
// We have already converted the expression to an integral or enumeration
- // type, when we parsed the switch condition. If we don't have an
- // appropriate type now, enter the switch scope but remember that it's
- // invalid.
- assert(CondExpr->getType()->isIntegralOrEnumerationType() &&
- "invalid condition type");
+ // type, when we parsed the switch condition. There are cases where we don't
+ // have an appropriate type, e.g. a typo-expr Cond was corrected to an
+ // inappropriate-type expr, we just return an error.
+ if (!CondExpr->getType()->isIntegralOrEnumerationType())
+ return StmtError();
if (CondExpr->isKnownToHaveBooleanValue()) {
// switch(bool_expr) {...} is often a programmer error, e.g.
// switch(n && mask) { ... } // Doh - should be "n & mask".
@@ -1313,8 +1328,9 @@ Sema::DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
}
}
-StmtResult Sema::ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
- Stmt *Body) {
+StmtResult Sema::ActOnWhileStmt(SourceLocation WhileLoc,
+ SourceLocation LParenLoc, ConditionResult Cond,
+ SourceLocation RParenLoc, Stmt *Body) {
if (Cond.isInvalid())
return StmtError();
@@ -1329,7 +1345,7 @@ StmtResult Sema::ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
getCurCompoundScope().setHasEmptyLoopBodies();
return WhileStmt::Create(Context, CondVal.first, CondVal.second, Body,
- WhileLoc);
+ WhileLoc, LParenLoc, RParenLoc);
}
StmtResult
@@ -1387,10 +1403,9 @@ namespace {
Simple = false;
}
- // Any Stmt not whitelisted will cause the condition to be marked complex.
- void VisitStmt(Stmt *S) {
- Simple = false;
- }
+ // Any Stmt not explicitly listed will cause the condition to be marked
+ // complex.
+ void VisitStmt(Stmt *S) { Simple = false; }
void VisitBinaryOperator(BinaryOperator *E) {
Visit(E->getLHS());
@@ -2114,18 +2129,22 @@ StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
return StmtError();
}
+ // This function is responsible for attaching an initializer to LoopVar. We
+ // must call ActOnInitializerError if we fail to do so.
Decl *LoopVar = DS->getSingleDecl();
if (LoopVar->isInvalidDecl() || !Range ||
DiagnoseUnexpandedParameterPack(Range, UPPC_Expression)) {
- LoopVar->setInvalidDecl();
+ ActOnInitializerError(LoopVar);
return StmtError();
}
// Build the coroutine state immediately and not later during template
// instantiation
if (!CoawaitLoc.isInvalid()) {
- if (!ActOnCoroutineBodyStart(S, CoawaitLoc, "co_await"))
+ if (!ActOnCoroutineBodyStart(S, CoawaitLoc, "co_await")) {
+ ActOnInitializerError(LoopVar);
return StmtError();
+ }
}
// Build auto && __range = range-init
@@ -2137,7 +2156,7 @@ StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
std::string("__range") + DepthStr);
if (FinishForRangeVarDecl(*this, RangeVar, Range, RangeLoc,
diag::err_for_range_deduction_failure)) {
- LoopVar->setInvalidDecl();
+ ActOnInitializerError(LoopVar);
return StmtError();
}
@@ -2146,14 +2165,20 @@ StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
BuildDeclaratorGroup(MutableArrayRef<Decl *>((Decl **)&RangeVar, 1));
StmtResult RangeDecl = ActOnDeclStmt(RangeGroup, RangeLoc, RangeLoc);
if (RangeDecl.isInvalid()) {
- LoopVar->setInvalidDecl();
+ ActOnInitializerError(LoopVar);
return StmtError();
}
- return BuildCXXForRangeStmt(
+ StmtResult R = BuildCXXForRangeStmt(
ForLoc, CoawaitLoc, InitStmt, ColonLoc, RangeDecl.get(),
/*BeginStmt=*/nullptr, /*EndStmt=*/nullptr,
/*Cond=*/nullptr, /*Inc=*/nullptr, DS, RParenLoc, Kind);
+ if (R.isInvalid()) {
+ ActOnInitializerError(LoopVar);
+ return StmtError();
+ }
+
+ return R;
}
/// Create the initialization, compare, and increment steps for
@@ -2336,22 +2361,6 @@ static StmtResult RebuildForRangeWithDereference(Sema &SemaRef, Scope *S,
AdjustedRange.get(), RParenLoc, Sema::BFRK_Rebuild);
}
-namespace {
-/// RAII object to automatically invalidate a declaration if an error occurs.
-struct InvalidateOnErrorScope {
- InvalidateOnErrorScope(Sema &SemaRef, Decl *D, bool Enabled)
- : Trap(SemaRef.Diags), D(D), Enabled(Enabled) {}
- ~InvalidateOnErrorScope() {
- if (Enabled && Trap.hasErrorOccurred())
- D->setInvalidDecl();
- }
-
- DiagnosticErrorTrap Trap;
- Decl *D;
- bool Enabled;
-};
-}
-
/// BuildCXXForRangeStmt - Build or instantiate a C++11 for-range statement.
StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc, Stmt *InitStmt,
@@ -2378,11 +2387,6 @@ StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc,
DeclStmt *LoopVarDS = cast<DeclStmt>(LoopVarDecl);
VarDecl *LoopVar = cast<VarDecl>(LoopVarDS->getSingleDecl());
- // If we hit any errors, mark the loop variable as invalid if its type
- // contains 'auto'.
- InvalidateOnErrorScope Invalidate(*this, LoopVar,
- LoopVar->getType()->isUndeducedType());
-
StmtResult BeginDeclStmt = Begin;
StmtResult EndDeclStmt = End;
ExprResult NotEqExpr = Cond, IncrExpr = Inc;
@@ -2664,7 +2668,8 @@ StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc,
// trying to determine whether this would be a valid range.
if (!LoopVar->isInvalidDecl() && Kind != BFRK_Check) {
AddInitializerToDecl(LoopVar, DerefExpr.get(), /*DirectInit=*/false);
- if (LoopVar->isInvalidDecl())
+ if (LoopVar->isInvalidDecl() ||
+ (LoopVar->getInit() && LoopVar->getInit()->containsErrors()))
NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
}
}
@@ -2741,22 +2746,24 @@ static void DiagnoseForRangeReferenceVariableCopies(Sema &SemaRef,
E = E->IgnoreImpCasts();
}
- bool ReturnsReference = false;
+ QualType ReferenceReturnType;
if (isa<UnaryOperator>(E)) {
- ReturnsReference = true;
+ ReferenceReturnType = SemaRef.Context.getLValueReferenceType(E->getType());
} else {
const CXXOperatorCallExpr *Call = cast<CXXOperatorCallExpr>(E);
const FunctionDecl *FD = Call->getDirectCallee();
QualType ReturnType = FD->getReturnType();
- ReturnsReference = ReturnType->isReferenceType();
+ if (ReturnType->isReferenceType())
+ ReferenceReturnType = ReturnType;
}
- if (ReturnsReference) {
+ if (!ReferenceReturnType.isNull()) {
// Loop variable creates a temporary. Suggest either to go with
// non-reference loop variable to indicate a copy is made, or
- // the correct time to bind a const reference.
- SemaRef.Diag(VD->getLocation(), diag::warn_for_range_const_reference_copy)
- << VD << VariableType << E->getType();
+ // the correct type to bind a const reference.
+ SemaRef.Diag(VD->getLocation(),
+ diag::warn_for_range_const_ref_binds_temp_built_from_ref)
+ << VD << VariableType << ReferenceReturnType;
QualType NonReferenceType = VariableType.getNonReferenceType();
NonReferenceType.removeLocalConst();
QualType NewReferenceType =
@@ -2769,7 +2776,7 @@ static void DiagnoseForRangeReferenceVariableCopies(Sema &SemaRef,
// Suggest removing the reference from the loop variable.
// If the type is a rvalue reference do not warn since that changes the
// semantic of the code.
- SemaRef.Diag(VD->getLocation(), diag::warn_for_range_variable_always_copy)
+ SemaRef.Diag(VD->getLocation(), diag::warn_for_range_ref_binds_ret_temp)
<< VD << RangeInitType;
QualType NonReferenceType = VariableType.getNonReferenceType();
NonReferenceType.removeLocalConst();
@@ -2821,7 +2828,7 @@ static void DiagnoseForRangeConstVariableCopies(Sema &SemaRef,
// Suggest changing from a const variable to a const reference variable
// if doing so will prevent a copy.
SemaRef.Diag(VD->getLocation(), diag::warn_for_range_copy)
- << VD << VariableType << InitExpr->getType();
+ << VD << VariableType;
SemaRef.Diag(VD->getBeginLoc(), diag::note_use_reference_type)
<< SemaRef.Context.getLValueReferenceType(VariableType)
<< VD->getSourceRange()
@@ -2841,9 +2848,10 @@ static void DiagnoseForRangeVariableCopies(Sema &SemaRef,
if (SemaRef.inTemplateInstantiation())
return;
- if (SemaRef.Diags.isIgnored(diag::warn_for_range_const_reference_copy,
- ForStmt->getBeginLoc()) &&
- SemaRef.Diags.isIgnored(diag::warn_for_range_variable_always_copy,
+ if (SemaRef.Diags.isIgnored(
+ diag::warn_for_range_const_ref_binds_temp_built_from_ref,
+ ForStmt->getBeginLoc()) &&
+ SemaRef.Diags.isIgnored(diag::warn_for_range_ref_binds_ret_temp,
ForStmt->getBeginLoc()) &&
SemaRef.Diags.isIgnored(diag::warn_for_range_copy,
ForStmt->getBeginLoc())) {
@@ -3292,6 +3300,7 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
assert(AT && "lost auto type from lambda return type");
if (DeduceFunctionTypeFromReturnExpr(FD, ReturnLoc, RetValExp, AT)) {
FD->setInvalidDecl();
+ // FIXME: preserve the ill-formed return expression.
return StmtError();
}
CurCap->ReturnType = FnRetType = FD->getReturnType();
@@ -3622,6 +3631,12 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
if (isa<CXXBoolLiteralExpr>(RetValExp))
Diag(ReturnLoc, diag::warn_main_returns_bool_literal)
<< RetValExp->getSourceRange();
+ if (FD->hasAttr<CmseNSEntryAttr>() && RetValExp) {
+ if (const auto *RT = dyn_cast<RecordType>(FnRetType.getCanonicalType())) {
+ if (RT->getDecl()->isOrContainsUnion())
+ Diag(RetValExp->getBeginLoc(), diag::warn_cmse_nonsecure_union) << 1;
+ }
+ }
} else if (ObjCMethodDecl *MD = getCurMethodDecl()) {
FnRetType = MD->getReturnType();
isObjCMethod = true;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
index 93faf2d151f9..10fa24682f9c 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
@@ -296,6 +296,14 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
checkExprMemoryConstraintCompat(*this, OutputExpr, Info, false))
return StmtError();
+ // Disallow _ExtInt, since the backends tend to have difficulties with
+ // non-normal sizes.
+ if (OutputExpr->getType()->isExtIntType())
+ return StmtError(
+ Diag(OutputExpr->getBeginLoc(), diag::err_asm_invalid_type)
+ << OutputExpr->getType() << 0 /*Input*/
+ << OutputExpr->getSourceRange());
+
OutputConstraintInfos.push_back(Info);
// If this is dependent, just continue.
@@ -420,6 +428,12 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
}
}
+ if (InputExpr->getType()->isExtIntType())
+ return StmtError(
+ Diag(InputExpr->getBeginLoc(), diag::err_asm_invalid_type)
+ << InputExpr->getType() << 1 /*Output*/
+ << InputExpr->getSourceRange());
+
InputConstraintInfos.push_back(Info);
const Type *Ty = Exprs[i]->getType().getTypePtr();
@@ -478,10 +492,10 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
// Look for the correct constraint index.
unsigned ConstraintIdx = Piece.getOperandNo();
+ unsigned NumOperands = NS->getNumOutputs() + NS->getNumInputs();
// Labels are the last in the Exprs list.
- if (NS->isAsmGoto() && ConstraintIdx >= NS->getNumInputs())
+ if (NS->isAsmGoto() && ConstraintIdx >= NumOperands)
continue;
- unsigned NumOperands = NS->getNumOutputs() + NS->getNumInputs();
// Look for the (ConstraintIdx - NumOperands + 1)th constraint with
// modifier '+'.
if (ConstraintIdx >= NumOperands) {
@@ -892,6 +906,15 @@ StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
SourceLocation EndLoc) {
bool IsSimple = (NumOutputs != 0 || NumInputs != 0);
setFunctionHasBranchProtectedScope();
+
+ for (uint64_t I = 0; I < NumOutputs + NumInputs; ++I) {
+ if (Exprs[I]->getType()->isExtIntType())
+ return StmtError(
+ Diag(Exprs[I]->getBeginLoc(), diag::err_asm_invalid_type)
+ << Exprs[I]->getType() << (I < NumOutputs)
+ << Exprs[I]->getSourceRange());
+ }
+
MSAsmStmt *NS =
new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, IsSimple,
/*IsVolatile*/ true, AsmToks, NumOutputs, NumInputs,
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp
index 3d91893b4065..e9d3c755eb23 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ASTContext.h"
#include "clang/Basic/SourceManager.h"
@@ -170,6 +171,44 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
return LoopHintAttr::CreateImplicit(S.Context, Option, State, ValueExpr, A);
}
+namespace {
+class CallExprFinder : public ConstEvaluatedExprVisitor<CallExprFinder> {
+ bool FoundCallExpr = false;
+
+public:
+ typedef ConstEvaluatedExprVisitor<CallExprFinder> Inherited;
+
+ CallExprFinder(Sema &S, const Stmt *St) : Inherited(S.Context) { Visit(St); }
+
+ bool foundCallExpr() { return FoundCallExpr; }
+
+ void VisitCallExpr(const CallExpr *E) { FoundCallExpr = true; }
+
+ void Visit(const Stmt *St) {
+ if (!St)
+ return;
+ ConstEvaluatedExprVisitor<CallExprFinder>::Visit(St);
+ }
+};
+} // namespace
+
+static Attr *handleNoMergeAttr(Sema &S, Stmt *St, const ParsedAttr &A,
+ SourceRange Range) {
+ NoMergeAttr NMA(S.Context, A);
+ if (S.CheckAttrNoArgs(A))
+ return nullptr;
+
+ CallExprFinder CEF(S, St);
+
+ if (!CEF.foundCallExpr()) {
+ S.Diag(St->getBeginLoc(), diag::warn_nomerge_attribute_ignored_in_stmt)
+ << NMA.getSpelling();
+ return nullptr;
+ }
+
+ return ::new (S.Context) NoMergeAttr(S.Context, A);
+}
+
static void
CheckForIncompatibleAttributes(Sema &S,
const SmallVectorImpl<const Attr *> &Attrs) {
@@ -335,6 +374,8 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
return handleOpenCLUnrollHint(S, St, A, Range);
case ParsedAttr::AT_Suppress:
return handleSuppressAttr(S, St, A, Range);
+ case ParsedAttr::AT_NoMerge:
+ return handleNoMergeAttr(S, St, A, Range);
default:
// if we're here, then we parsed a known attribute, but didn't recognize
// it as a statement attribute => it is declaration attribute
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
index 264c903209af..c05ed0b14e3e 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
@@ -51,8 +51,7 @@ unsigned Sema::getTemplateDepth(Scope *S) const {
// Each template parameter scope represents one level of template parameter
// depth.
- for (Scope *TempParamScope = S->getTemplateParamParent();
- TempParamScope && !Depth;
+ for (Scope *TempParamScope = S->getTemplateParamParent(); TempParamScope;
TempParamScope = TempParamScope->getParent()->getTemplateParamParent()) {
++Depth;
}
@@ -205,7 +204,8 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
LookupResult R(*this, TName, Name.getBeginLoc(), LookupOrdinaryName);
if (LookupTemplateName(R, S, SS, ObjectType, EnteringContext,
MemberOfUnknownSpecialization, SourceLocation(),
- &AssumedTemplate, Disambiguation))
+ &AssumedTemplate,
+ /*AllowTypoCorrection=*/!Disambiguation))
return TNK_Non_template;
if (AssumedTemplate != AssumedTemplateKind::None) {
@@ -371,12 +371,15 @@ bool Sema::LookupTemplateName(LookupResult &Found,
QualType ObjectType,
bool EnteringContext,
bool &MemberOfUnknownSpecialization,
- SourceLocation TemplateKWLoc,
+ RequiredTemplateKind RequiredTemplate,
AssumedTemplateKind *ATK,
- bool Disambiguation) {
+ bool AllowTypoCorrection) {
if (ATK)
*ATK = AssumedTemplateKind::None;
+ if (SS.isInvalid())
+ return true;
+
Found.setTemplateNameLookup(true);
// Determine where to perform name lookup
@@ -386,7 +389,7 @@ bool Sema::LookupTemplateName(LookupResult &Found,
if (!ObjectType.isNull()) {
// This nested-name-specifier occurs in a member access expression, e.g.,
// x->B::f, and we are looking into the type of the object.
- assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist");
+ assert(SS.isEmpty() && "ObjectType and scope specifier cannot coexist");
LookupCtx = computeDeclContext(ObjectType);
IsDependent = !LookupCtx && ObjectType->isDependentType();
assert((IsDependent || !ObjectType->isIncompleteType() ||
@@ -412,11 +415,11 @@ bool Sema::LookupTemplateName(LookupResult &Found,
Found.clear();
return false;
}
- } else if (SS.isSet()) {
+ } else if (SS.isNotEmpty()) {
// This nested-name-specifier occurs after another nested-name-specifier,
// so long into the context associated with the prior nested-name-specifier.
LookupCtx = computeDeclContext(SS, EnteringContext);
- IsDependent = !LookupCtx;
+ IsDependent = !LookupCtx && isDependentScopeSpecifier(SS);
// The declaration context must be complete.
if (LookupCtx && RequireCompleteDeclContext(SS, LookupCtx))
@@ -443,7 +446,7 @@ bool Sema::LookupTemplateName(LookupResult &Found,
IsDependent |= Found.wasNotFoundInCurrentInstantiation();
}
- if (!SS.isSet() && (ObjectType.isNull() || Found.empty())) {
+ if (SS.isEmpty() && (ObjectType.isNull() || Found.empty())) {
// C++ [basic.lookup.classref]p1:
// In a class member access expression (5.2.5), if the . or -> token is
// immediately followed by an identifier followed by a <, the
@@ -470,7 +473,8 @@ bool Sema::LookupTemplateName(LookupResult &Found,
if (Found.isAmbiguous())
return false;
- if (ATK && !SS.isSet() && ObjectType.isNull() && TemplateKWLoc.isInvalid()) {
+ if (ATK && SS.isEmpty() && ObjectType.isNull() &&
+ !RequiredTemplate.hasTemplateKeyword()) {
// C++2a [temp.names]p2:
// A name is also considered to refer to a template if it is an
// unqualified-id followed by a < and name lookup finds either one or more
@@ -480,7 +484,7 @@ bool Sema::LookupTemplateName(LookupResult &Found,
// all language modes, and diagnose the empty lookup in ActOnCallExpr if we
// successfully form a call to an undeclared template-id.
bool AllFunctions =
- getLangOpts().CPlusPlus2a &&
+ getLangOpts().CPlusPlus20 &&
std::all_of(Found.begin(), Found.end(), [](NamedDecl *ND) {
return isa<FunctionDecl>(ND->getUnderlyingDecl());
});
@@ -496,7 +500,7 @@ bool Sema::LookupTemplateName(LookupResult &Found,
}
}
- if (Found.empty() && !IsDependent && !Disambiguation) {
+ if (Found.empty() && !IsDependent && AllowTypoCorrection) {
// If we did not find any names, and this is not a disambiguation, attempt
// to correct any typos.
DeclarationName Name = Found.getLookupName();
@@ -542,9 +546,11 @@ bool Sema::LookupTemplateName(LookupResult &Found,
// If a 'template' keyword was used, a lookup that finds only non-template
// names is an error.
- if (ExampleLookupResult && TemplateKWLoc.isValid()) {
+ if (ExampleLookupResult && RequiredTemplate) {
Diag(Found.getNameLoc(), diag::err_template_kw_refers_to_non_template)
- << Found.getLookupName() << SS.getRange();
+ << Found.getLookupName() << SS.getRange()
+ << RequiredTemplate.hasTemplateKeyword()
+ << RequiredTemplate.getTemplateKeywordLoc();
Diag(ExampleLookupResult->getUnderlyingDecl()->getLocation(),
diag::note_template_kw_refers_to_non_template)
<< Found.getLookupName();
@@ -1333,11 +1339,11 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
// Check that we have valid decl-specifiers specified.
auto CheckValidDeclSpecifiers = [this, &D] {
// C++ [temp.param]
- // p1
+ // p1
// template-parameter:
// ...
// parameter-declaration
- // p2
+ // p2
// ... A storage class shall not be specified in a template-parameter
// declaration.
// [dcl.typedef]p1:
@@ -1940,16 +1946,46 @@ namespace {
/// constructor to a deduction guide.
class ExtractTypeForDeductionGuide
: public TreeTransform<ExtractTypeForDeductionGuide> {
+ llvm::SmallVectorImpl<TypedefNameDecl *> &MaterializedTypedefs;
+
public:
typedef TreeTransform<ExtractTypeForDeductionGuide> Base;
- ExtractTypeForDeductionGuide(Sema &SemaRef) : Base(SemaRef) {}
+ ExtractTypeForDeductionGuide(
+ Sema &SemaRef,
+ llvm::SmallVectorImpl<TypedefNameDecl *> &MaterializedTypedefs)
+ : Base(SemaRef), MaterializedTypedefs(MaterializedTypedefs) {}
TypeSourceInfo *transform(TypeSourceInfo *TSI) { return TransformType(TSI); }
QualType TransformTypedefType(TypeLocBuilder &TLB, TypedefTypeLoc TL) {
- return TransformType(
- TLB,
- TL.getTypedefNameDecl()->getTypeSourceInfo()->getTypeLoc());
+ ASTContext &Context = SemaRef.getASTContext();
+ TypedefNameDecl *OrigDecl = TL.getTypedefNameDecl();
+ TypeLocBuilder InnerTLB;
+ QualType Transformed =
+ TransformType(InnerTLB, OrigDecl->getTypeSourceInfo()->getTypeLoc());
+ TypeSourceInfo *TSI =
+ TransformType(InnerTLB.getTypeSourceInfo(Context, Transformed));
+
+ TypedefNameDecl *Decl = nullptr;
+
+ if (isa<TypeAliasDecl>(OrigDecl))
+ Decl = TypeAliasDecl::Create(
+ Context, Context.getTranslationUnitDecl(), OrigDecl->getBeginLoc(),
+ OrigDecl->getLocation(), OrigDecl->getIdentifier(), TSI);
+ else {
+ assert(isa<TypedefDecl>(OrigDecl) && "Not a Type alias or typedef");
+ Decl = TypedefDecl::Create(
+ Context, Context.getTranslationUnitDecl(), OrigDecl->getBeginLoc(),
+ OrigDecl->getLocation(), OrigDecl->getIdentifier(), TSI);
+ }
+
+ MaterializedTypedefs.push_back(Decl);
+
+ QualType TDTy = Context.getTypedefType(Decl);
+ TypedefTypeLoc TypedefTL = TLB.push<TypedefTypeLoc>(TDTy);
+ TypedefTL.setNameLoc(TL.getNameLoc());
+
+ return TDTy;
}
};
@@ -2001,6 +2037,7 @@ struct ConvertConstructorToDeductionGuideTransform {
// a list of substituted template arguments as we go.
for (NamedDecl *Param : *InnerParams) {
MultiLevelTemplateArgumentList Args;
+ Args.setKind(TemplateSubstitutionKind::Rewrite);
Args.addOuterTemplateArguments(SubstArgs);
Args.addOuterRetainedLevel();
NamedDecl *NewParam = transformTemplateParameter(Param, Args);
@@ -2020,6 +2057,7 @@ struct ConvertConstructorToDeductionGuideTransform {
// substitute references to the old parameters into references to the
// new ones.
MultiLevelTemplateArgumentList Args;
+ Args.setKind(TemplateSubstitutionKind::Rewrite);
if (FTD) {
Args.addOuterTemplateArguments(SubstArgs);
Args.addOuterRetainedLevel();
@@ -2034,14 +2072,16 @@ struct ConvertConstructorToDeductionGuideTransform {
// new ones.
TypeLocBuilder TLB;
SmallVector<ParmVarDecl*, 8> Params;
- QualType NewType = transformFunctionProtoType(TLB, FPTL, Params, Args);
+ SmallVector<TypedefNameDecl *, 4> MaterializedTypedefs;
+ QualType NewType = transformFunctionProtoType(TLB, FPTL, Params, Args,
+ MaterializedTypedefs);
if (NewType.isNull())
return nullptr;
TypeSourceInfo *NewTInfo = TLB.getTypeSourceInfo(SemaRef.Context, NewType);
return buildDeductionGuide(TemplateParams, CD->getExplicitSpecifier(),
NewTInfo, CD->getBeginLoc(), CD->getLocation(),
- CD->getEndLoc());
+ CD->getEndLoc(), MaterializedTypedefs);
}
/// Build a deduction guide with the specified parameter types.
@@ -2136,16 +2176,18 @@ private:
return NewParam;
}
- QualType transformFunctionProtoType(TypeLocBuilder &TLB,
- FunctionProtoTypeLoc TL,
- SmallVectorImpl<ParmVarDecl*> &Params,
- MultiLevelTemplateArgumentList &Args) {
+ QualType transformFunctionProtoType(
+ TypeLocBuilder &TLB, FunctionProtoTypeLoc TL,
+ SmallVectorImpl<ParmVarDecl *> &Params,
+ MultiLevelTemplateArgumentList &Args,
+ SmallVectorImpl<TypedefNameDecl *> &MaterializedTypedefs) {
SmallVector<QualType, 4> ParamTypes;
const FunctionProtoType *T = TL.getTypePtr();
// -- The types of the function parameters are those of the constructor.
for (auto *OldParam : TL.getParams()) {
- ParmVarDecl *NewParam = transformFunctionTypeParam(OldParam, Args);
+ ParmVarDecl *NewParam =
+ transformFunctionTypeParam(OldParam, Args, MaterializedTypedefs);
if (!NewParam)
return QualType();
ParamTypes.push_back(NewParam->getType());
@@ -2187,9 +2229,9 @@ private:
return Result;
}
- ParmVarDecl *
- transformFunctionTypeParam(ParmVarDecl *OldParam,
- MultiLevelTemplateArgumentList &Args) {
+ ParmVarDecl *transformFunctionTypeParam(
+ ParmVarDecl *OldParam, MultiLevelTemplateArgumentList &Args,
+ llvm::SmallVectorImpl<TypedefNameDecl *> &MaterializedTypedefs) {
TypeSourceInfo *OldDI = OldParam->getTypeSourceInfo();
TypeSourceInfo *NewDI;
if (auto PackTL = OldDI->getTypeLoc().getAs<PackExpansionTypeLoc>()) {
@@ -2212,7 +2254,8 @@ private:
// members of the current instantiations with the definitions of those
// typedefs, avoiding triggering instantiation of the deduced type during
// deduction.
- NewDI = ExtractTypeForDeductionGuide(SemaRef).transform(NewDI);
+ NewDI = ExtractTypeForDeductionGuide(SemaRef, MaterializedTypedefs)
+ .transform(NewDI);
// Resolving a wording defect, we also inherit default arguments from the
// constructor.
@@ -2243,10 +2286,11 @@ private:
return NewParam;
}
- NamedDecl *buildDeductionGuide(TemplateParameterList *TemplateParams,
- ExplicitSpecifier ES, TypeSourceInfo *TInfo,
- SourceLocation LocStart, SourceLocation Loc,
- SourceLocation LocEnd) {
+ FunctionTemplateDecl *buildDeductionGuide(
+ TemplateParameterList *TemplateParams, ExplicitSpecifier ES,
+ TypeSourceInfo *TInfo, SourceLocation LocStart, SourceLocation Loc,
+ SourceLocation LocEnd,
+ llvm::ArrayRef<TypedefNameDecl *> MaterializedTypedefs = {}) {
DeclarationNameInfo Name(DeductionGuideName, Loc);
ArrayRef<ParmVarDecl *> Params =
TInfo->getTypeLoc().castAs<FunctionProtoTypeLoc>().getParams();
@@ -2260,6 +2304,8 @@ private:
for (auto *Param : Params)
Param->setDeclContext(Guide);
+ for (auto *TD : MaterializedTypedefs)
+ TD->setDeclContext(Guide);
auto *GuideTemplate = FunctionTemplateDecl::Create(
SemaRef.Context, DC, Loc, DeductionGuideName, TemplateParams, Guide);
@@ -3470,6 +3516,10 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
DTN->getIdentifier(),
TemplateArgs);
+ if (Name.getAsAssumedTemplateName() &&
+ resolveAssumedTemplateNameAsType(/*Scope*/nullptr, Name, TemplateLoc))
+ return QualType();
+
TemplateDecl *Template = Name.getAsTemplateDecl();
if (!Template || isa<FunctionTemplateDecl>(Template) ||
isa<VarTemplateDecl>(Template) || isa<ConceptDecl>(Template)) {
@@ -3509,9 +3559,8 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// Only substitute for the innermost template argument list.
MultiLevelTemplateArgumentList TemplateArgLists;
TemplateArgLists.addOuterTemplateArguments(&StackTemplateArgs);
- unsigned Depth = AliasTemplate->getTemplateParameters()->getDepth();
- for (unsigned I = 0; I < Depth; ++I)
- TemplateArgLists.addOuterTemplateArguments(None);
+ TemplateArgLists.addOuterRetainedLevels(
+ AliasTemplate->getTemplateParameters()->getDepth());
LocalInstantiationScope Scope(*this);
InstantiatingTemplate Inst(*this, TemplateLoc, Template);
@@ -4157,7 +4206,7 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
if (isSameAsPrimaryTemplate(VarTemplate->getTemplateParameters(),
Converted) &&
- (!Context.getLangOpts().CPlusPlus2a ||
+ (!Context.getLangOpts().CPlusPlus20 ||
!TemplateParams->hasAssociatedConstraints())) {
// C++ [temp.class.spec]p9b3:
//
@@ -4623,21 +4672,28 @@ Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*ADL*/ false, TemplateArgs);
}
-/// Form a dependent template name.
+/// Form a template name from a name that is syntactically required to name a
+/// template, either due to use of the 'template' keyword or because a name in
+/// this syntactic context is assumed to name a template (C++ [temp.names]p2-4).
///
-/// This action forms a dependent template name given the template
-/// name and its (presumably dependent) scope specifier. For
-/// example, given "MetaFun::template apply", the scope specifier \p
-/// SS will be "MetaFun::", \p TemplateKWLoc contains the location
+/// This action forms a template name given the name of the template and its
+/// optional scope specifier. This is used when the 'template' keyword is used
+/// or when the parsing context unambiguously treats a following '<' as
+/// introducing a template argument list. Note that this may produce a
+/// non-dependent template name if we can perform the lookup now and identify
+/// the named template.
+///
+/// For example, given "x.MetaFun::template apply", the scope specifier
+/// \p SS will be "MetaFun::", \p TemplateKWLoc contains the location
/// of the "template" keyword, and "apply" is the \p Name.
-TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
- CXXScopeSpec &SS,
- SourceLocation TemplateKWLoc,
- const UnqualifiedId &Name,
- ParsedType ObjectType,
- bool EnteringContext,
- TemplateTy &Result,
- bool AllowInjectedClassName) {
+TemplateNameKind Sema::ActOnTemplateName(Scope *S,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ const UnqualifiedId &Name,
+ ParsedType ObjectType,
+ bool EnteringContext,
+ TemplateTy &Result,
+ bool AllowInjectedClassName) {
if (TemplateKWLoc.isValid() && S && !S->getTemplateParamParent())
Diag(TemplateKWLoc,
getLangOpts().CPlusPlus11 ?
@@ -4645,95 +4701,115 @@ TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
diag::ext_template_outside_of_template)
<< FixItHint::CreateRemoval(TemplateKWLoc);
+ if (SS.isInvalid())
+ return TNK_Non_template;
+
+ // Figure out where isTemplateName is going to look.
DeclContext *LookupCtx = nullptr;
- if (SS.isSet())
+ if (SS.isNotEmpty())
LookupCtx = computeDeclContext(SS, EnteringContext);
- if (!LookupCtx && ObjectType)
- LookupCtx = computeDeclContext(ObjectType.get());
- if (LookupCtx) {
- // C++0x [temp.names]p5:
- // If a name prefixed by the keyword template is not the name of
- // a template, the program is ill-formed. [Note: the keyword
- // template may not be applied to non-template members of class
- // templates. -end note ] [ Note: as is the case with the
- // typename prefix, the template prefix is allowed in cases
- // where it is not strictly necessary; i.e., when the
- // nested-name-specifier or the expression on the left of the ->
- // or . is not dependent on a template-parameter, or the use
- // does not appear in the scope of a template. -end note]
- //
- // Note: C++03 was more strict here, because it banned the use of
- // the "template" keyword prior to a template-name that was not a
- // dependent name. C++ DR468 relaxed this requirement (the
- // "template" keyword is now permitted). We follow the C++0x
- // rules, even in C++03 mode with a warning, retroactively applying the DR.
- bool MemberOfUnknownSpecialization;
- TemplateNameKind TNK = isTemplateName(S, SS, TemplateKWLoc.isValid(), Name,
- ObjectType, EnteringContext, Result,
- MemberOfUnknownSpecialization);
- if (TNK == TNK_Non_template && MemberOfUnknownSpecialization) {
- // This is a dependent template. Handle it below.
- } else if (TNK == TNK_Non_template) {
- // Do the lookup again to determine if this is a "nothing found" case or
- // a "not a template" case. FIXME: Refactor isTemplateName so we don't
- // need to do this.
- DeclarationNameInfo DNI = GetNameFromUnqualifiedId(Name);
- LookupResult R(*this, DNI.getName(), Name.getBeginLoc(),
- LookupOrdinaryName);
- bool MOUS;
- if (!LookupTemplateName(R, S, SS, ObjectType.get(), EnteringContext,
- MOUS, TemplateKWLoc) && !R.isAmbiguous())
+ else if (ObjectType)
+ LookupCtx = computeDeclContext(GetTypeFromParser(ObjectType));
+
+ // C++0x [temp.names]p5:
+ // If a name prefixed by the keyword template is not the name of
+ // a template, the program is ill-formed. [Note: the keyword
+ // template may not be applied to non-template members of class
+ // templates. -end note ] [ Note: as is the case with the
+ // typename prefix, the template prefix is allowed in cases
+ // where it is not strictly necessary; i.e., when the
+ // nested-name-specifier or the expression on the left of the ->
+ // or . is not dependent on a template-parameter, or the use
+ // does not appear in the scope of a template. -end note]
+ //
+ // Note: C++03 was more strict here, because it banned the use of
+ // the "template" keyword prior to a template-name that was not a
+ // dependent name. C++ DR468 relaxed this requirement (the
+ // "template" keyword is now permitted). We follow the C++0x
+ // rules, even in C++03 mode with a warning, retroactively applying the DR.
+ bool MemberOfUnknownSpecialization;
+ TemplateNameKind TNK = isTemplateName(S, SS, TemplateKWLoc.isValid(), Name,
+ ObjectType, EnteringContext, Result,
+ MemberOfUnknownSpecialization);
+ if (TNK != TNK_Non_template) {
+ // We resolved this to a (non-dependent) template name. Return it.
+ auto *LookupRD = dyn_cast_or_null<CXXRecordDecl>(LookupCtx);
+ if (!AllowInjectedClassName && SS.isNotEmpty() && LookupRD &&
+ Name.getKind() == UnqualifiedIdKind::IK_Identifier &&
+ Name.Identifier && LookupRD->getIdentifier() == Name.Identifier) {
+ // C++14 [class.qual]p2:
+ // In a lookup in which function names are not ignored and the
+ // nested-name-specifier nominates a class C, if the name specified
+ // [...] is the injected-class-name of C, [...] the name is instead
+ // considered to name the constructor
+ //
+ // We don't get here if naming the constructor would be valid, so we
+ // just reject immediately and recover by treating the
+ // injected-class-name as naming the template.
+ Diag(Name.getBeginLoc(),
+ diag::ext_out_of_line_qualified_id_type_names_constructor)
+ << Name.Identifier
+ << 0 /*injected-class-name used as template name*/
+ << TemplateKWLoc.isValid();
+ }
+ return TNK;
+ }
+
+ if (!MemberOfUnknownSpecialization) {
+ // Didn't find a template name, and the lookup wasn't dependent.
+ // Do the lookup again to determine if this is a "nothing found" case or
+ // a "not a template" case. FIXME: Refactor isTemplateName so we don't
+ // need to do this.
+ DeclarationNameInfo DNI = GetNameFromUnqualifiedId(Name);
+ LookupResult R(*this, DNI.getName(), Name.getBeginLoc(),
+ LookupOrdinaryName);
+ bool MOUS;
+ // Tell LookupTemplateName that we require a template so that it diagnoses
+ // cases where it finds a non-template.
+ RequiredTemplateKind RTK = TemplateKWLoc.isValid()
+ ? RequiredTemplateKind(TemplateKWLoc)
+ : TemplateNameIsRequired;
+ if (!LookupTemplateName(R, S, SS, ObjectType.get(), EnteringContext, MOUS,
+ RTK, nullptr, /*AllowTypoCorrection=*/false) &&
+ !R.isAmbiguous()) {
+ if (LookupCtx)
Diag(Name.getBeginLoc(), diag::err_no_member)
<< DNI.getName() << LookupCtx << SS.getRange();
- return TNK_Non_template;
- } else {
- // We found something; return it.
- auto *LookupRD = dyn_cast<CXXRecordDecl>(LookupCtx);
- if (!AllowInjectedClassName && SS.isSet() && LookupRD &&
- Name.getKind() == UnqualifiedIdKind::IK_Identifier &&
- Name.Identifier && LookupRD->getIdentifier() == Name.Identifier) {
- // C++14 [class.qual]p2:
- // In a lookup in which function names are not ignored and the
- // nested-name-specifier nominates a class C, if the name specified
- // [...] is the injected-class-name of C, [...] the name is instead
- // considered to name the constructor
- //
- // We don't get here if naming the constructor would be valid, so we
- // just reject immediately and recover by treating the
- // injected-class-name as naming the template.
- Diag(Name.getBeginLoc(),
- diag::ext_out_of_line_qualified_id_type_names_constructor)
- << Name.Identifier
- << 0 /*injected-class-name used as template name*/
- << 1 /*'template' keyword was used*/;
- }
- return TNK;
+ else
+ Diag(Name.getBeginLoc(), diag::err_undeclared_use)
+ << DNI.getName() << SS.getRange();
}
+ return TNK_Non_template;
}
NestedNameSpecifier *Qualifier = SS.getScopeRep();
switch (Name.getKind()) {
case UnqualifiedIdKind::IK_Identifier:
- Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
- Name.Identifier));
+ Result = TemplateTy::make(
+ Context.getDependentTemplateName(Qualifier, Name.Identifier));
return TNK_Dependent_template_name;
case UnqualifiedIdKind::IK_OperatorFunctionId:
- Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
- Name.OperatorFunctionId.Operator));
+ Result = TemplateTy::make(Context.getDependentTemplateName(
+ Qualifier, Name.OperatorFunctionId.Operator));
return TNK_Function_template;
case UnqualifiedIdKind::IK_LiteralOperatorId:
- llvm_unreachable("literal operator id cannot have a dependent scope");
+ // This is a kind of template name, but can never occur in a dependent
+ // scope (literal operators can only be declared at namespace scope).
+ break;
default:
break;
}
- Diag(Name.getBeginLoc(), diag::err_template_kw_refers_to_non_template)
+ // This name cannot possibly name a dependent template. Diagnose this now
+ // rather than building a dependent template name that can never be valid.
+ Diag(Name.getBeginLoc(),
+ diag::err_template_kw_refers_to_dependent_non_template)
<< GetNameFromUnqualifiedId(Name).getName() << Name.getSourceRange()
- << TemplateKWLoc;
+ << TemplateKWLoc.isValid() << TemplateKWLoc;
return TNK_Non_template;
}
@@ -4768,10 +4844,7 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
CXXScopeSpec SS;
DeclarationNameInfo NameInfo;
- if (DeclRefExpr *ArgExpr = dyn_cast<DeclRefExpr>(Arg.getAsExpr())) {
- SS.Adopt(ArgExpr->getQualifierLoc());
- NameInfo = ArgExpr->getNameInfo();
- } else if (DependentScopeDeclRefExpr *ArgExpr =
+ if (DependentScopeDeclRefExpr *ArgExpr =
dyn_cast<DependentScopeDeclRefExpr>(Arg.getAsExpr())) {
SS.Adopt(ArgExpr->getQualifierLoc());
NameInfo = ArgExpr->getNameInfo();
@@ -4790,6 +4863,7 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
if (Result.getAsSingle<TypeDecl>() ||
Result.getResultKind() ==
LookupResult::NotFoundInCurrentInstantiation) {
+ assert(SS.getScopeRep() && "dependent scope expr must has a scope!");
// Suggest that the user add 'typename' before the NNS.
SourceLocation Loc = AL.getSourceRange().getBegin();
Diag(Loc, getLangOpts().MSVCCompat
@@ -5829,6 +5903,11 @@ bool UnnamedLocalNoLinkageFinder::VisitDependentSizedExtVectorType(
return Visit(T->getElementType());
}
+bool UnnamedLocalNoLinkageFinder::VisitDependentSizedMatrixType(
+ const DependentSizedMatrixType *T) {
+ return Visit(T->getElementType());
+}
+
bool UnnamedLocalNoLinkageFinder::VisitDependentAddressSpaceType(
const DependentAddressSpaceType *T) {
return Visit(T->getPointeeType());
@@ -5847,6 +5926,11 @@ bool UnnamedLocalNoLinkageFinder::VisitExtVectorType(const ExtVectorType* T) {
return Visit(T->getElementType());
}
+bool UnnamedLocalNoLinkageFinder::VisitConstantMatrixType(
+ const ConstantMatrixType *T) {
+ return Visit(T->getElementType());
+}
+
bool UnnamedLocalNoLinkageFinder::VisitFunctionProtoType(
const FunctionProtoType* T) {
for (const auto &A : T->param_types()) {
@@ -5960,6 +6044,15 @@ bool UnnamedLocalNoLinkageFinder::VisitPipeType(const PipeType* T) {
return false;
}
+bool UnnamedLocalNoLinkageFinder::VisitExtIntType(const ExtIntType *T) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitDependentExtIntType(
+ const DependentExtIntType *T) {
+ return false;
+}
+
bool UnnamedLocalNoLinkageFinder::VisitTagDecl(const TagDecl *Tag) {
if (Tag->getDeclContext()->isFunctionOrMethod()) {
S.Diag(SR.getBegin(),
@@ -6293,8 +6386,11 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
Arg = subst->getReplacement()->IgnoreImpCasts();
}
- DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arg);
- ValueDecl *Entity = DRE ? DRE->getDecl() : nullptr;
+ ValueDecl *Entity = nullptr;
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arg))
+ Entity = DRE->getDecl();
+ else if (CXXUuidofExpr *CUE = dyn_cast<CXXUuidofExpr>(Arg))
+ Entity = CUE->getGuidDecl();
// If our parameter has pointer type, check for a null template value.
if (ParamType->isPointerType() || ParamType->isNullPtrType()) {
@@ -6321,16 +6417,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
return false;
}
- if (isa<CXXUuidofExpr>(Arg)) {
- if (CheckTemplateArgumentIsCompatibleWithParameter(S, Param, ParamType,
- ArgIn, Arg, ArgType))
- return true;
-
- Converted = TemplateArgument(ArgIn);
- return false;
- }
-
- if (!DRE) {
+ if (!Entity) {
S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_decl_ref)
<< Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
@@ -6357,13 +6444,14 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
FunctionDecl *Func = dyn_cast<FunctionDecl>(Entity);
VarDecl *Var = dyn_cast<VarDecl>(Entity);
+ MSGuidDecl *Guid = dyn_cast<MSGuidDecl>(Entity);
// A non-type template argument must refer to an object or function.
- if (!Func && !Var) {
+ if (!Func && !Var && !Guid) {
// We found something, but we don't know specifically what it is.
S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_object_or_func)
<< Arg->getSourceRange();
- S.Diag(DRE->getDecl()->getLocation(), diag::note_template_arg_refers_here);
+ S.Diag(Entity->getLocation(), diag::note_template_arg_refers_here);
return true;
}
@@ -6384,30 +6472,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
return true;
}
- if (Func) {
- // If the template parameter has pointer type, the function decays.
- if (ParamType->isPointerType() && !AddressTaken)
- ArgType = S.Context.getPointerType(Func->getType());
- else if (AddressTaken && ParamType->isReferenceType()) {
- // If we originally had an address-of operator, but the
- // parameter has reference type, complain and (if things look
- // like they will work) drop the address-of operator.
- if (!S.Context.hasSameUnqualifiedType(Func->getType(),
- ParamType.getNonReferenceType())) {
- S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
- << ParamType;
- S.Diag(Param->getLocation(), diag::note_template_param_here);
- return true;
- }
-
- S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
- << ParamType
- << FixItHint::CreateRemoval(AddrOpLoc);
- S.Diag(Param->getLocation(), diag::note_template_param_here);
-
- ArgType = Func->getType();
- }
- } else {
+ if (Var) {
// A value of reference type is not an object.
if (Var->getType()->isReferenceType()) {
S.Diag(Arg->getBeginLoc(), diag::err_template_arg_reference_var)
@@ -6423,50 +6488,53 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
S.Diag(Var->getLocation(), diag::note_template_arg_refers_here);
return true;
}
+ }
- // If the template parameter has pointer type, we must have taken
- // the address of this object.
- if (ParamType->isReferenceType()) {
- if (AddressTaken) {
- // If we originally had an address-of operator, but the
- // parameter has reference type, complain and (if things look
- // like they will work) drop the address-of operator.
- if (!S.Context.hasSameUnqualifiedType(Var->getType(),
- ParamType.getNonReferenceType())) {
- S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
- << ParamType;
- S.Diag(Param->getLocation(), diag::note_template_param_here);
- return true;
- }
+ if (AddressTaken && ParamType->isReferenceType()) {
+ // If we originally had an address-of operator, but the
+ // parameter has reference type, complain and (if things look
+ // like they will work) drop the address-of operator.
+ if (!S.Context.hasSameUnqualifiedType(Entity->getType(),
+ ParamType.getNonReferenceType())) {
+ S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
+ << ParamType;
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
- S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
- << ParamType
- << FixItHint::CreateRemoval(AddrOpLoc);
- S.Diag(Param->getLocation(), diag::note_template_param_here);
+ S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
+ << ParamType
+ << FixItHint::CreateRemoval(AddrOpLoc);
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
- ArgType = Var->getType();
- }
- } else if (!AddressTaken && ParamType->isPointerType()) {
- if (Var->getType()->isArrayType()) {
- // Array-to-pointer decay.
- ArgType = S.Context.getArrayDecayedType(Var->getType());
- } else {
- // If the template parameter has pointer type but the address of
- // this object was not taken, complain and (possibly) recover by
- // taking the address of the entity.
- ArgType = S.Context.getPointerType(Var->getType());
- if (!S.Context.hasSameUnqualifiedType(ArgType, ParamType)) {
- S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_address_of)
- << ParamType;
- S.Diag(Param->getLocation(), diag::note_template_param_here);
- return true;
- }
+ ArgType = Entity->getType();
+ }
+ // If the template parameter has pointer type, either we must have taken the
+ // address or the argument must decay to a pointer.
+ if (!AddressTaken && ParamType->isPointerType()) {
+ if (Func) {
+ // Function-to-pointer decay.
+ ArgType = S.Context.getPointerType(Func->getType());
+ } else if (Entity->getType()->isArrayType()) {
+ // Array-to-pointer decay.
+ ArgType = S.Context.getArrayDecayedType(Entity->getType());
+ } else {
+ // If the template parameter has pointer type but the address of
+ // this object was not taken, complain and (possibly) recover by
+ // taking the address of the entity.
+ ArgType = S.Context.getPointerType(Entity->getType());
+ if (!S.Context.hasSameUnqualifiedType(ArgType, ParamType)) {
S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_address_of)
- << ParamType << FixItHint::CreateInsertion(Arg->getBeginLoc(), "&");
-
+ << ParamType;
S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
}
+
+ S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_address_of)
+ << ParamType << FixItHint::CreateInsertion(Arg->getBeginLoc(), "&");
+
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
}
}
@@ -6791,12 +6859,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// -- a predefined __func__ variable
APValue::LValueBase Base = Value.getLValueBase();
auto *VD = const_cast<ValueDecl *>(Base.dyn_cast<const ValueDecl *>());
- if (Base && !VD) {
- auto *E = Base.dyn_cast<const Expr *>();
- if (E && isa<CXXUuidofExpr>(E)) {
- Converted = TemplateArgument(ArgResult.get()->IgnoreImpCasts());
- break;
- }
+ if (Base && (!VD || isa<LifetimeExtendedTemporaryDecl>(VD))) {
Diag(Arg->getBeginLoc(), diag::err_template_arg_not_decl_ref)
<< Arg->getSourceRange();
return ExprError();
@@ -6883,7 +6946,9 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType IntegerType = ParamType;
if (const EnumType *Enum = IntegerType->getAs<EnumType>())
IntegerType = Enum->getDecl()->getIntegerType();
- Value = Value.extOrTrunc(Context.getTypeSize(IntegerType));
+ Value = Value.extOrTrunc(IntegerType->isExtIntType()
+ ? Context.getIntWidth(IntegerType)
+ : Context.getTypeSize(IntegerType));
Converted = TemplateArgument(Context, Value,
Context.getCanonicalType(ParamType));
@@ -6977,7 +7042,9 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// Coerce the template argument's value to the value it will have
// based on the template parameter's type.
- unsigned AllowedBits = Context.getTypeSize(IntegerType);
+ unsigned AllowedBits = IntegerType->isExtIntType()
+ ? Context.getIntWidth(IntegerType)
+ : Context.getTypeSize(IntegerType);
if (Value.getBitWidth() != AllowedBits)
Value = Value.extOrTrunc(AllowedBits);
Value.setIsSigned(IntegerType->isSignedIntegerOrEnumerationType());
@@ -8172,7 +8239,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
if (Context.hasSameType(CanonType,
ClassTemplate->getInjectedClassNameSpecialization()) &&
- (!Context.getLangOpts().CPlusPlus2a ||
+ (!Context.getLangOpts().CPlusPlus20 ||
!TemplateParams->hasAssociatedConstraints())) {
// C++ [temp.class.spec]p9b3:
//
@@ -8398,7 +8465,7 @@ Decl *Sema::ActOnConceptDefinition(Scope *S,
ConceptDecl *NewDecl = ConceptDecl::Create(Context, DC, NameLoc, Name,
TemplateParameterLists.front(),
ConstraintExpr);
-
+
if (NewDecl->hasAssociatedConstraints()) {
// C++2a [temp.concept]p4:
// A concept shall not have associated constraints.
@@ -10684,7 +10751,7 @@ Sema::getTemplateArgumentBindingsText(const TemplateParameterList *Params,
}
Out << ']';
- return Out.str();
+ return std::string(Out.str());
}
void Sema::MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
index 1e321d637910..5392be57a3aa 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -355,7 +355,7 @@ checkDeducedTemplateArguments(ASTContext &Context,
TemplateArgument Merged = checkDeducedTemplateArguments(
Context, DeducedTemplateArgument(*XA, X.wasDeducedFromArrayBound()),
DeducedTemplateArgument(*YA, Y.wasDeducedFromArrayBound()));
- if (Merged.isNull())
+ if (Merged.isNull() && !(XA->isNull() && YA->isNull()))
return DeducedTemplateArgument();
NewPack.push_back(Merged);
}
@@ -738,8 +738,9 @@ private:
// type, so we need to collect the pending deduced values for those packs.
if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(
TemplateParams->getParam(Index))) {
- if (auto *Expansion = dyn_cast<PackExpansionType>(NTTP->getType()))
- ExtraDeductions.push_back(Expansion->getPattern());
+ if (!NTTP->isExpandedParameterPack())
+ if (auto *Expansion = dyn_cast<PackExpansionType>(NTTP->getType()))
+ ExtraDeductions.push_back(Expansion->getPattern());
}
// FIXME: Also collect the unexpanded packs in any type and template
// parameter packs that are pack expansions.
@@ -1515,6 +1516,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
+ case Type::ExtInt:
if (TDF & TDF_SkipNonDependent)
return Sema::TDK_Success;
@@ -2056,6 +2058,101 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// (clang extension)
//
+ // T __attribute__((matrix_type(<integral constant>,
+ // <integral constant>)))
+ case Type::ConstantMatrix: {
+ const ConstantMatrixType *MatrixArg = dyn_cast<ConstantMatrixType>(Arg);
+ if (!MatrixArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ const ConstantMatrixType *MatrixParam = cast<ConstantMatrixType>(Param);
+ // Check that the dimensions are the same
+ if (MatrixParam->getNumRows() != MatrixArg->getNumRows() ||
+ MatrixParam->getNumColumns() != MatrixArg->getNumColumns()) {
+ return Sema::TDK_NonDeducedMismatch;
+ }
+ // Perform deduction on element types.
+ return DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, MatrixParam->getElementType(),
+ MatrixArg->getElementType(), Info, Deduced, TDF);
+ }
+
+ case Type::DependentSizedMatrix: {
+ const MatrixType *MatrixArg = dyn_cast<MatrixType>(Arg);
+ if (!MatrixArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ // Check the element type of the matrixes.
+ const DependentSizedMatrixType *MatrixParam =
+ cast<DependentSizedMatrixType>(Param);
+ if (Sema::TemplateDeductionResult Result =
+ DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, MatrixParam->getElementType(),
+ MatrixArg->getElementType(), Info, Deduced, TDF))
+ return Result;
+
+ // Try to deduce a matrix dimension.
+ auto DeduceMatrixArg =
+ [&S, &Info, &Deduced, &TemplateParams](
+ Expr *ParamExpr, const MatrixType *Arg,
+ unsigned (ConstantMatrixType::*GetArgDimension)() const,
+ Expr *(DependentSizedMatrixType::*GetArgDimensionExpr)() const) {
+ const auto *ArgConstMatrix = dyn_cast<ConstantMatrixType>(Arg);
+ const auto *ArgDepMatrix = dyn_cast<DependentSizedMatrixType>(Arg);
+ if (!ParamExpr->isValueDependent()) {
+ llvm::APSInt ParamConst(
+ S.Context.getTypeSize(S.Context.getSizeType()));
+ if (!ParamExpr->isIntegerConstantExpr(ParamConst, S.Context))
+ return Sema::TDK_NonDeducedMismatch;
+
+ if (ArgConstMatrix) {
+ if ((ArgConstMatrix->*GetArgDimension)() == ParamConst)
+ return Sema::TDK_Success;
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ Expr *ArgExpr = (ArgDepMatrix->*GetArgDimensionExpr)();
+ llvm::APSInt ArgConst(
+ S.Context.getTypeSize(S.Context.getSizeType()));
+ if (!ArgExpr->isValueDependent() &&
+ ArgExpr->isIntegerConstantExpr(ArgConst, S.Context) &&
+ ArgConst == ParamConst)
+ return Sema::TDK_Success;
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ NonTypeTemplateParmDecl *NTTP =
+ getDeducedParameterFromExpr(Info, ParamExpr);
+ if (!NTTP)
+ return Sema::TDK_Success;
+
+ if (ArgConstMatrix) {
+ llvm::APSInt ArgConst(
+ S.Context.getTypeSize(S.Context.getSizeType()));
+ ArgConst = (ArgConstMatrix->*GetArgDimension)();
+ return DeduceNonTypeTemplateArgument(
+ S, TemplateParams, NTTP, ArgConst, S.Context.getSizeType(),
+ /*ArrayBound=*/true, Info, Deduced);
+ }
+
+ return DeduceNonTypeTemplateArgument(
+ S, TemplateParams, NTTP, (ArgDepMatrix->*GetArgDimensionExpr)(),
+ Info, Deduced);
+ };
+
+ auto Result = DeduceMatrixArg(MatrixParam->getRowExpr(), MatrixArg,
+ &ConstantMatrixType::getNumRows,
+ &DependentSizedMatrixType::getRowExpr);
+ if (Result)
+ return Result;
+
+ return DeduceMatrixArg(MatrixParam->getColumnExpr(), MatrixArg,
+ &ConstantMatrixType::getNumColumns,
+ &DependentSizedMatrixType::getColumnExpr);
+ }
+
+ // (clang extension)
+ //
// T __attribute__(((address_space(N))))
case Type::DependentAddressSpace: {
const DependentAddressSpaceType *AddressSpaceParam =
@@ -2106,6 +2203,33 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
return Sema::TDK_NonDeducedMismatch;
}
+ case Type::DependentExtInt: {
+ const auto *IntParam = cast<DependentExtIntType>(Param);
+
+ if (const auto *IntArg = dyn_cast<ExtIntType>(Arg)){
+ if (IntParam->isUnsigned() != IntArg->isUnsigned())
+ return Sema::TDK_NonDeducedMismatch;
+
+ NonTypeTemplateParmDecl *NTTP =
+ getDeducedParameterFromExpr(Info, IntParam->getNumBitsExpr());
+ if (!NTTP)
+ return Sema::TDK_Success;
+
+ llvm::APSInt ArgSize(S.Context.getTypeSize(S.Context.IntTy), false);
+ ArgSize = IntArg->getNumBits();
+
+ return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, ArgSize,
+ S.Context.IntTy, true, Info,
+ Deduced);
+ }
+
+ if (const auto *IntArg = dyn_cast<DependentExtIntType>(Arg)) {
+ if (IntParam->isUnsigned() != IntArg->isUnsigned())
+ return Sema::TDK_NonDeducedMismatch;
+ return Sema::TDK_Success;
+ }
+ return Sema::TDK_NonDeducedMismatch;
+ }
case Type::TypeOfExpr:
case Type::TypeOf:
@@ -2747,8 +2871,8 @@ CheckDeducedArgumentConstraints(Sema& S, TemplateDeclT *Template,
/// Complete template argument deduction for a partial specialization.
template <typename T>
-static typename std::enable_if<IsPartialSpecialization<T>::value,
- Sema::TemplateDeductionResult>::type
+static std::enable_if_t<IsPartialSpecialization<T>::value,
+ Sema::TemplateDeductionResult>
FinishTemplateArgumentDeduction(
Sema &S, T *Partial, bool IsPartialOrdering,
const TemplateArgumentList &TemplateArgs,
@@ -2917,8 +3041,13 @@ Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
if (Trap.hasErrorOccurred())
return Sema::TDK_SubstitutionFailure;
- return ::FinishTemplateArgumentDeduction(
- *this, Partial, /*IsPartialOrdering=*/false, TemplateArgs, Deduced, Info);
+ TemplateDeductionResult Result;
+ runWithSufficientStackSpace(Info.getLocation(), [&] {
+ Result = ::FinishTemplateArgumentDeduction(*this, Partial,
+ /*IsPartialOrdering=*/false,
+ TemplateArgs, Deduced, Info);
+ });
+ return Result;
}
/// Perform template argument deduction to determine whether
@@ -2958,8 +3087,13 @@ Sema::DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
if (Trap.hasErrorOccurred())
return Sema::TDK_SubstitutionFailure;
- return ::FinishTemplateArgumentDeduction(
- *this, Partial, /*IsPartialOrdering=*/false, TemplateArgs, Deduced, Info);
+ TemplateDeductionResult Result;
+ runWithSufficientStackSpace(Info.getLocation(), [&] {
+ Result = ::FinishTemplateArgumentDeduction(*this, Partial,
+ /*IsPartialOrdering=*/false,
+ TemplateArgs, Deduced, Info);
+ });
+ return Result;
}
/// Determine whether the given type T is a simple-template-id type.
@@ -3908,13 +4042,12 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
SmallVector<QualType, 8> ParamTypes;
unsigned NumExplicitlySpecified = 0;
if (ExplicitTemplateArgs) {
- TemplateDeductionResult Result =
- SubstituteExplicitTemplateArguments(FunctionTemplate,
- *ExplicitTemplateArgs,
- Deduced,
- ParamTypes,
- nullptr,
- Info);
+ TemplateDeductionResult Result;
+ runWithSufficientStackSpace(Info.getLocation(), [&] {
+ Result = SubstituteExplicitTemplateArguments(
+ FunctionTemplate, *ExplicitTemplateArgs, Deduced, ParamTypes, nullptr,
+ Info);
+ });
if (Result)
return Result;
@@ -4016,12 +4149,16 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
// that is needed when the accessibility of template arguments is checked.
DeclContext *CallingCtx = CurContext;
- return FinishTemplateArgumentDeduction(
- FunctionTemplate, Deduced, NumExplicitlySpecified, Specialization, Info,
- &OriginalCallArgs, PartialOverloading, [&, CallingCtx]() {
- ContextRAII SavedContext(*this, CallingCtx);
- return CheckNonDependent(ParamTypesForArgChecking);
- });
+ TemplateDeductionResult Result;
+ runWithSufficientStackSpace(Info.getLocation(), [&] {
+ Result = FinishTemplateArgumentDeduction(
+ FunctionTemplate, Deduced, NumExplicitlySpecified, Specialization, Info,
+ &OriginalCallArgs, PartialOverloading, [&, CallingCtx]() {
+ ContextRAII SavedContext(*this, CallingCtx);
+ return CheckNonDependent(ParamTypesForArgChecking);
+ });
+ });
+ return Result;
}
QualType Sema::adjustCCAndNoReturn(QualType ArgFunctionType,
@@ -4107,11 +4244,13 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
unsigned NumExplicitlySpecified = 0;
SmallVector<QualType, 4> ParamTypes;
if (ExplicitTemplateArgs) {
- if (TemplateDeductionResult Result
- = SubstituteExplicitTemplateArguments(FunctionTemplate,
- *ExplicitTemplateArgs,
- Deduced, ParamTypes,
- &FunctionType, Info))
+ TemplateDeductionResult Result;
+ runWithSufficientStackSpace(Info.getLocation(), [&] {
+ Result = SubstituteExplicitTemplateArguments(
+ FunctionTemplate, *ExplicitTemplateArgs, Deduced, ParamTypes,
+ &FunctionType, Info);
+ });
+ if (Result)
return Result;
NumExplicitlySpecified = Deduced.size();
@@ -4153,10 +4292,13 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
return Result;
}
- if (TemplateDeductionResult Result
- = FinishTemplateArgumentDeduction(FunctionTemplate, Deduced,
- NumExplicitlySpecified,
- Specialization, Info))
+ TemplateDeductionResult Result;
+ runWithSufficientStackSpace(Info.getLocation(), [&] {
+ Result = FinishTemplateArgumentDeduction(FunctionTemplate, Deduced,
+ NumExplicitlySpecified,
+ Specialization, Info);
+ });
+ if (Result)
return Result;
// If the function has a deduced return type, deduce it now, so we can check
@@ -4313,9 +4455,11 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *ConversionTemplate,
LocalInstantiationScope InstScope(*this);
// Finish template argument deduction.
FunctionDecl *ConversionSpecialized = nullptr;
- TemplateDeductionResult Result
- = FinishTemplateArgumentDeduction(ConversionTemplate, Deduced, 0,
- ConversionSpecialized, Info);
+ TemplateDeductionResult Result;
+ runWithSufficientStackSpace(Info.getLocation(), [&] {
+ Result = FinishTemplateArgumentDeduction(ConversionTemplate, Deduced, 0,
+ ConversionSpecialized, Info);
+ });
Specialization = cast_or_null<CXXConversionDecl>(ConversionSpecialized);
return Result;
}
@@ -4532,6 +4676,8 @@ Sema::DeduceAutoResult
Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result,
Optional<unsigned> DependentDeductionDepth,
bool IgnoreConstraints) {
+ if (Init->containsErrors())
+ return DAR_FailedAlreadyDiagnosed;
if (Init->getType()->isNonOverloadPlaceholderType()) {
ExprResult NonPlaceholder = CheckPlaceholderExpr(Init);
if (NonPlaceholder.isInvalid())
@@ -4850,7 +4996,10 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
TemplatePartialOrderingContext TPOC,
- unsigned NumCallArguments1) {
+ unsigned NumCallArguments1,
+ bool Reversed) {
+ assert(!Reversed || TPOC == TPOC_Call);
+
FunctionDecl *FD1 = FT1->getTemplatedDecl();
FunctionDecl *FD2 = FT2->getTemplatedDecl();
const FunctionProtoType *Proto1 = FD1->getType()->getAs<FunctionProtoType>();
@@ -4899,6 +5048,12 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
} else if (!Method1 && Method2 && !Method2->isStatic()) {
// Compare 'this' from Method2 against first parameter from Method1.
AddImplicitObjectParameterType(S.Context, Method2, Args2);
+ } else if (Method1 && Method2 && Reversed) {
+ // Compare 'this' from Method1 against second parameter from Method2
+ // and 'this' from Method2 against second parameter from Method1.
+ AddImplicitObjectParameterType(S.Context, Method1, Args1);
+ AddImplicitObjectParameterType(S.Context, Method2, Args2);
+ ++NumComparedArguments;
}
Args1.insert(Args1.end(), Proto1->param_type_begin(),
@@ -4913,6 +5068,8 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
Args1.resize(NumComparedArguments);
if (Args2.size() > NumComparedArguments)
Args2.resize(NumComparedArguments);
+ if (Reversed)
+ std::reverse(Args2.begin(), Args2.end());
if (DeduceTemplateArguments(S, TemplateParams, Args2.data(), Args2.size(),
Args1.data(), Args1.size(), Info, Deduced,
TDF_None, /*PartialOrdering=*/true))
@@ -5031,6 +5188,10 @@ static bool isVariadicFunctionTemplate(FunctionTemplateDecl *FunTmpl) {
/// \param NumCallArguments2 The number of arguments in the call to FT2, used
/// only when \c TPOC is \c TPOC_Call.
///
+/// \param Reversed If \c true, exactly one of FT1 and FT2 is an overload
+/// candidate with a reversed parameter order. In this case, the corresponding
+/// P/A pairs between FT1 and FT2 are reversed.
+///
/// \returns the more specialized function template. If neither
/// template is more specialized, returns NULL.
FunctionTemplateDecl *
@@ -5039,7 +5200,8 @@ Sema::getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
- unsigned NumCallArguments2) {
+ unsigned NumCallArguments2,
+ bool Reversed) {
auto JudgeByConstraints = [&] () -> FunctionTemplateDecl * {
llvm::SmallVector<const Expr *, 3> AC1, AC2;
@@ -5056,9 +5218,9 @@ Sema::getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
};
bool Better1 = isAtLeastAsSpecializedAs(*this, Loc, FT1, FT2, TPOC,
- NumCallArguments1);
+ NumCallArguments1, Reversed);
bool Better2 = isAtLeastAsSpecializedAs(*this, Loc, FT2, FT1, TPOC,
- NumCallArguments2);
+ NumCallArguments2, Reversed);
if (Better1 != Better2) // We have a clear winner
return Better1 ? FT1 : FT2;
@@ -5237,14 +5399,15 @@ static bool isAtLeastAsSpecializedAs(Sema &S, QualType T1, QualType T2,
Sema::InstantiatingTemplate Inst(S, Info.getLocation(), P2, DeducedArgs,
Info);
auto *TST1 = T1->castAs<TemplateSpecializationType>();
- if (FinishTemplateArgumentDeduction(
- S, P2, /*IsPartialOrdering=*/true,
- TemplateArgumentList(TemplateArgumentList::OnStack,
- TST1->template_arguments()),
- Deduced, Info))
- return false;
-
- return true;
+ bool AtLeastAsSpecialized;
+ S.runWithSufficientStackSpace(Info.getLocation(), [&] {
+ AtLeastAsSpecialized = !FinishTemplateArgumentDeduction(
+ S, P2, /*IsPartialOrdering=*/true,
+ TemplateArgumentList(TemplateArgumentList::OnStack,
+ TST1->template_arguments()),
+ Deduced, Info);
+ });
+ return AtLeastAsSpecialized;
}
/// Returns the more specialized class template partial specialization
@@ -5679,6 +5842,24 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
break;
}
+ case Type::ConstantMatrix: {
+ const ConstantMatrixType *MatType = cast<ConstantMatrixType>(T);
+ MarkUsedTemplateParameters(Ctx, MatType->getElementType(), OnlyDeduced,
+ Depth, Used);
+ break;
+ }
+
+ case Type::DependentSizedMatrix: {
+ const DependentSizedMatrixType *MatType = cast<DependentSizedMatrixType>(T);
+ MarkUsedTemplateParameters(Ctx, MatType->getElementType(), OnlyDeduced,
+ Depth, Used);
+ MarkUsedTemplateParameters(Ctx, MatType->getRowExpr(), OnlyDeduced, Depth,
+ Used);
+ MarkUsedTemplateParameters(Ctx, MatType->getColumnExpr(), OnlyDeduced,
+ Depth, Used);
+ break;
+ }
+
case Type::FunctionProto: {
const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
MarkUsedTemplateParameters(Ctx, Proto->getReturnType(), OnlyDeduced, Depth,
@@ -5834,6 +6015,11 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
cast<DeducedType>(T)->getDeducedType(),
OnlyDeduced, Depth, Used);
break;
+ case Type::DependentExtInt:
+ MarkUsedTemplateParameters(Ctx,
+ cast<DependentExtIntType>(T)->getNumBitsExpr(),
+ OnlyDeduced, Depth, Used);
+ break;
// None of these types have any template parameters in them.
case Type::Builtin:
@@ -5846,6 +6032,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
case Type::ObjCObjectPointer:
case Type::UnresolvedUsing:
case Type::Pipe:
+ case Type::ExtInt:
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
#define DEPENDENT_TYPE(Class, Base)
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
index c53c37ee109f..11e03c517d01 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -9,7 +9,6 @@
//
//===----------------------------------------------------------------------===/
-#include "clang/Sema/SemaInternal.h"
#include "TreeTransform.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
@@ -21,13 +20,15 @@
#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Stack.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/SemaConcept.h"
+#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/TemplateInstCallback.h"
-#include "clang/Sema/SemaConcept.h"
#include "llvm/Support/TimeProfiler.h"
using namespace clang;
@@ -214,6 +215,8 @@ bool Sema::CodeSynthesisContext::isInstantiationRecord() const {
case ParameterMappingSubstitution:
case ConstraintNormalization:
case RewritingOperatorAsSpaceship:
+ case InitializingStructuredBinding:
+ case MarkingClassDllexported:
return false;
// This function should never be called when Kind's value is Memoization.
@@ -759,9 +762,21 @@ void Sema::PrintInstantiationStack() {
diag::note_rewriting_operator_as_spaceship);
break;
+ case CodeSynthesisContext::InitializingStructuredBinding:
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_in_binding_decl_init)
+ << cast<BindingDecl>(Active->Entity);
+ break;
+
+ case CodeSynthesisContext::MarkingClassDllexported:
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_due_to_dllexported_class)
+ << cast<CXXRecordDecl>(Active->Entity) << !getLangOpts().CPlusPlus11;
+ break;
+
case CodeSynthesisContext::Memoization:
break;
-
+
case CodeSynthesisContext::ConstraintsCheck: {
unsigned DiagID = 0;
if (!Active->Entity) {
@@ -860,6 +875,8 @@ Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
case CodeSynthesisContext::DeclaringImplicitEqualityComparison:
case CodeSynthesisContext::DefiningSynthesizedFunction:
case CodeSynthesisContext::RewritingOperatorAsSpaceship:
+ case CodeSynthesisContext::InitializingStructuredBinding:
+ case CodeSynthesisContext::MarkingClassDllexported:
// This happens in a context unrelated to template instantiation, so
// there is no SFINAE.
return None;
@@ -1345,6 +1362,19 @@ TemplateName TemplateInstantiator::TransformTemplateName(
TemplateArgument Arg = TemplateArgs(TTP->getDepth(), TTP->getPosition());
+ if (TemplateArgs.isRewrite()) {
+ // We're rewriting the template parameter as a reference to another
+ // template parameter.
+ if (Arg.getKind() == TemplateArgument::Pack) {
+ assert(Arg.pack_size() == 1 && Arg.pack_begin()->isPackExpansion() &&
+ "unexpected pack arguments in template rewrite");
+ Arg = Arg.pack_begin()->getPackExpansionPattern();
+ }
+ assert(Arg.getKind() == TemplateArgument::Template &&
+ "unexpected nontype template argument kind in template rewrite");
+ return Arg.getAsTemplate();
+ }
+
if (TTP->isParameterPack()) {
assert(Arg.getKind() == TemplateArgument::Pack &&
"Missing argument pack");
@@ -1384,11 +1414,47 @@ TemplateName TemplateInstantiator::TransformTemplateName(
AllowInjectedClassName);
}
+static ExprResult TransformUniqueStableName(TemplateInstantiator &TI,
+ PredefinedExpr *E) {
+ if (E->getIdentKind() == PredefinedExpr::UniqueStableNameType) {
+ TypeSourceInfo *Info =
+ TI.getDerived().TransformType(E->getTypeSourceInfo());
+
+ if (!Info)
+ return ExprError();
+
+ if (!TI.getDerived().AlwaysRebuild() && Info == E->getTypeSourceInfo())
+ return E;
+
+ return TI.getSema().BuildUniqueStableName(E->getLocation(), Info);
+ }
+
+ if (E->getIdentKind() == PredefinedExpr::UniqueStableNameExpr) {
+ EnterExpressionEvaluationContext Unevaluated(
+ TI.getSema(), Sema::ExpressionEvaluationContext::Unevaluated);
+ ExprResult SubExpr = TI.getDerived().TransformExpr(E->getExpr());
+
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!TI.getDerived().AlwaysRebuild() && SubExpr.get() == E->getExpr())
+ return E;
+
+ return TI.getSema().BuildUniqueStableName(E->getLocation(), SubExpr.get());
+ }
+
+ llvm_unreachable("Only valid for UniqueStableNameType/Expr");
+}
+
ExprResult
TemplateInstantiator::TransformPredefinedExpr(PredefinedExpr *E) {
if (!E->isTypeDependent())
return E;
+ if (E->getIdentKind() == PredefinedExpr::UniqueStableNameType ||
+ E->getIdentKind() == PredefinedExpr::UniqueStableNameExpr)
+ return TransformUniqueStableName(*this, E);
+
return getSema().BuildPredefinedExpr(E->getLocation(), E->getIdentKind());
}
@@ -1405,19 +1471,18 @@ TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E,
TemplateArgument Arg = TemplateArgs(NTTP->getDepth(), NTTP->getPosition());
- if (TemplateArgs.getNumLevels() != TemplateArgs.getNumSubstitutedLevels()) {
- // We're performing a partial substitution, so the substituted argument
- // could be dependent. As a result we can't create a SubstNonType*Expr
- // node now, since that represents a fully-substituted argument.
- // FIXME: We should have some AST representation for this.
+ if (TemplateArgs.isRewrite()) {
+ // We're rewriting the template parameter as a reference to another
+ // template parameter.
if (Arg.getKind() == TemplateArgument::Pack) {
- // FIXME: This won't work for alias templates.
assert(Arg.pack_size() == 1 && Arg.pack_begin()->isPackExpansion() &&
- "unexpected pack arguments in partial substitution");
+ "unexpected pack arguments in template rewrite");
Arg = Arg.pack_begin()->getPackExpansionPattern();
}
assert(Arg.getKind() == TemplateArgument::Expression &&
- "unexpected nontype template argument kind in partial substitution");
+ "unexpected nontype template argument kind in template rewrite");
+ // FIXME: This can lead to the same subexpression appearing multiple times
+ // in a complete expression.
return Arg.getAsExpr();
}
@@ -1729,6 +1794,24 @@ TemplateInstantiator::TransformTemplateTypeParmType(TypeLocBuilder &TLB,
TemplateArgument Arg = TemplateArgs(T->getDepth(), T->getIndex());
+ if (TemplateArgs.isRewrite()) {
+ // We're rewriting the template parameter as a reference to another
+ // template parameter.
+ if (Arg.getKind() == TemplateArgument::Pack) {
+ assert(Arg.pack_size() == 1 && Arg.pack_begin()->isPackExpansion() &&
+ "unexpected pack arguments in template rewrite");
+ Arg = Arg.pack_begin()->getPackExpansionPattern();
+ }
+ assert(Arg.getKind() == TemplateArgument::Type &&
+ "unexpected nontype template argument kind in template rewrite");
+ QualType NewT = Arg.getAsType();
+ assert(isa<TemplateTypeParmType>(NewT) &&
+ "type parm not rewritten to type parm");
+ auto NewTL = TLB.push<TemplateTypeParmTypeLoc>(NewT);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return NewT;
+ }
+
if (T->isParameterPack()) {
assert(Arg.getKind() == TemplateArgument::Pack &&
"Missing argument pack");
@@ -2343,7 +2426,7 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
UnparsedDefaultArgInstantiations[OldParm].push_back(NewParm);
} else if (Expr *Arg = OldParm->getDefaultArg()) {
FunctionDecl *OwningFunc = cast<FunctionDecl>(OldParm->getDeclContext());
- if (OwningFunc->isInLocalScope()) {
+ if (OwningFunc->isInLocalScopeForInstantiation()) {
// Instantiate default arguments for methods of local classes (DR1484)
// and non-defining declarations.
Sema::ContextRAII SavedContext(*this, OwningFunc);
@@ -2352,7 +2435,12 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
if (NewArg.isUsable()) {
// It would be nice if we still had this.
SourceLocation EqualLoc = NewArg.get()->getBeginLoc();
- SetParamDefaultArgument(NewParm, NewArg.get(), EqualLoc);
+ ExprResult Result =
+ ConvertParamDefaultArgument(NewParm, NewArg.get(), EqualLoc);
+ if (Result.isInvalid())
+ return nullptr;
+
+ SetParamDefaultArgument(NewParm, Result.getAs<Expr>(), EqualLoc);
}
} else {
// FIXME: if we non-lazily instantiated non-dependent default args for
@@ -3521,6 +3609,12 @@ LocalInstantiationScope::findInstantiationOf(const Decl *D) {
if (isa<EnumDecl>(D))
return nullptr;
+ // Materialized typedefs/type alias for implicit deduction guides may require
+ // instantiation.
+ if (isa<TypedefNameDecl>(D) &&
+ isa<CXXDeductionGuideDecl>(D->getDeclContext()))
+ return nullptr;
+
// If we didn't find the decl, then we either have a sema bug, or we have a
// forward reference to a label declaration. Return null to indicate that
// we have an uninstantiated label.
@@ -3572,6 +3666,13 @@ void LocalInstantiationScope::MakeInstantiatedLocalArgPack(const Decl *D) {
ArgumentPacks.push_back(Pack);
}
+bool LocalInstantiationScope::isLocalPackExpansion(const Decl *D) {
+ for (DeclArgumentPack *Pack : ArgumentPacks)
+ if (std::find(Pack->begin(), Pack->end(), D) != Pack->end())
+ return true;
+ return false;
+}
+
void LocalInstantiationScope::SetPartiallySubstitutedPack(NamedDecl *Pack,
const TemplateArgument *ExplicitArgs,
unsigned NumExplicitArgs) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index f801e79c8902..2efb7acb9724 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -8,7 +8,7 @@
// This file implements C++ template instantiation for declarations.
//
//===----------------------------------------------------------------------===/
-#include "clang/Sema/SemaInternal.h"
+
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
@@ -19,8 +19,11 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateInstCallback.h"
#include "llvm/Support/TimeProfiler.h"
@@ -393,50 +396,39 @@ static void instantiateOMPDeclareVariantAttr(
VariantFuncRef = Subst(E);
}
+ // Copy the template version of the OMPTraitInfo and run substitute on all
+ // score and condition expressiosn.
+ OMPTraitInfo &TI = S.getASTContext().getNewOMPTraitInfo();
+ TI = *Attr.getTraitInfos();
+
+ // Try to substitute template parameters in score and condition expressions.
+ auto SubstScoreOrConditionExpr = [&S, Subst](Expr *&E, bool) {
+ if (E) {
+ EnterExpressionEvaluationContext Unevaluated(
+ S, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ ExprResult ER = Subst(E);
+ if (ER.isUsable())
+ E = ER.get();
+ else
+ return true;
+ }
+ return false;
+ };
+ if (TI.anyScoreOrCondition(SubstScoreOrConditionExpr))
+ return;
+
// Check function/variant ref.
Optional<std::pair<FunctionDecl *, Expr *>> DeclVarData =
- S.checkOpenMPDeclareVariantFunction(
- S.ConvertDeclToDeclGroup(New), VariantFuncRef.get(), Attr.getRange());
+ S.checkOpenMPDeclareVariantFunction(S.ConvertDeclToDeclGroup(New),
+ VariantFuncRef.get(), TI,
+ Attr.getRange());
+
if (!DeclVarData)
return;
- SmallVector<Sema::OMPCtxSelectorData, 4> Data;
- for (unsigned I = 0, E = Attr.scores_size(); I < E; ++I) {
- ExprResult Score;
- if (Expr *E = *std::next(Attr.scores_begin(), I))
- Score = Subst(E);
- // Instantiate the attribute.
- auto CtxSet = static_cast<OpenMPContextSelectorSetKind>(
- *std::next(Attr.ctxSelectorSets_begin(), I));
- auto Ctx = static_cast<OpenMPContextSelectorKind>(
- *std::next(Attr.ctxSelectors_begin(), I));
- switch (CtxSet) {
- case OMP_CTX_SET_implementation:
- switch (Ctx) {
- case OMP_CTX_vendor:
- Data.emplace_back(CtxSet, Ctx, Score, Attr.implVendors());
- break;
- case OMP_CTX_kind:
- case OMP_CTX_unknown:
- llvm_unreachable("Unexpected context selector kind.");
- }
- break;
- case OMP_CTX_SET_device:
- switch (Ctx) {
- case OMP_CTX_kind:
- Data.emplace_back(CtxSet, Ctx, Score, Attr.deviceKinds());
- break;
- case OMP_CTX_vendor:
- case OMP_CTX_unknown:
- llvm_unreachable("Unexpected context selector kind.");
- }
- break;
- case OMP_CTX_SET_unknown:
- llvm_unreachable("Unexpected context selector set kind.");
- }
- }
+
S.ActOnOpenMPDeclareVariantDirective(DeclVarData.getValue().first,
- DeclVarData.getValue().second,
- Attr.getRange(), Data);
+ DeclVarData.getValue().second, TI,
+ Attr.getRange());
}
static void instantiateDependentAMDGPUFlatWorkGroupSizeAttr(
@@ -706,6 +698,10 @@ TemplateDeclInstantiator::VisitExternCContextDecl(ExternCContextDecl *D) {
llvm_unreachable("extern \"C\" context cannot be instantiated");
}
+Decl *TemplateDeclInstantiator::VisitMSGuidDecl(MSGuidDecl *D) {
+ llvm_unreachable("GUID declaration cannot be instantiated");
+}
+
Decl *
TemplateDeclInstantiator::VisitLabelDecl(LabelDecl *D) {
LabelDecl *Inst = LabelDecl::Create(SemaRef.Context, Owner, D->getLocation(),
@@ -1915,6 +1911,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
D->hasWrittenPrototype(), D->getConstexprKind(),
TrailingRequiresClause);
Function->setRangeEnd(D->getSourceRange().getEnd());
+ Function->setUsesFPIntrin(D->usesFPIntrin());
}
if (D->isInlined())
@@ -2048,7 +2045,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
// Look only into the namespace where the friend would be declared to
// find a previous declaration. This is the innermost enclosing namespace,
// as described in ActOnFriendFunctionDecl.
- SemaRef.LookupQualifiedName(Previous, DC);
+ SemaRef.LookupQualifiedName(Previous, DC->getRedeclContext());
// In C++, the previous declaration we find might be a tag type
// (class or enum). In this case, the new declaration will hide the
@@ -3628,6 +3625,9 @@ Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
if (InsertPos)
VarTemplate->AddSpecialization(Var, InsertPos);
+ if (SemaRef.getLangOpts().OpenCL)
+ SemaRef.deduceOpenCLAddressSpace(Var);
+
// Substitute the nested name specifier, if any.
if (SubstQualifier(D, Var))
return nullptr;
@@ -3722,6 +3722,8 @@ FunctionDecl *Sema::SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
// access and function-definition and in the same class scope as the
// three-way comparison operator function
MultiLevelTemplateArgumentList NoTemplateArgs;
+ NoTemplateArgs.setKind(TemplateSubstitutionKind::Rewrite);
+ NoTemplateArgs.addOuterRetainedLevels(RD->getTemplateDepth());
TemplateDeclInstantiator Instantiator(*this, RD, NoTemplateArgs);
Decl *R;
if (auto *MD = dyn_cast<CXXMethodDecl>(Spaceship)) {
@@ -4228,6 +4230,94 @@ static bool addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
return false;
}
+bool Sema::InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
+ ParmVarDecl *Param) {
+ assert(Param->hasUninstantiatedDefaultArg());
+ Expr *UninstExpr = Param->getUninstantiatedDefaultArg();
+
+ EnterExpressionEvaluationContext EvalContext(
+ *this, ExpressionEvaluationContext::PotentiallyEvaluated, Param);
+
+ // Instantiate the expression.
+ //
+ // FIXME: Pass in a correct Pattern argument, otherwise
+ // getTemplateInstantiationArgs uses the lexical context of FD, e.g.
+ //
+ // template<typename T>
+ // struct A {
+ // static int FooImpl();
+ //
+ // template<typename Tp>
+ // // bug: default argument A<T>::FooImpl() is evaluated with 2-level
+ // // template argument list [[T], [Tp]], should be [[Tp]].
+ // friend A<Tp> Foo(int a);
+ // };
+ //
+ // template<typename T>
+ // A<T> Foo(int a = A<T>::FooImpl());
+ MultiLevelTemplateArgumentList TemplateArgs
+ = getTemplateInstantiationArgs(FD, nullptr, /*RelativeToPrimary=*/true);
+
+ InstantiatingTemplate Inst(*this, CallLoc, Param,
+ TemplateArgs.getInnermost());
+ if (Inst.isInvalid())
+ return true;
+ if (Inst.isAlreadyInstantiating()) {
+ Diag(Param->getBeginLoc(), diag::err_recursive_default_argument) << FD;
+ Param->setInvalidDecl();
+ return true;
+ }
+
+ ExprResult Result;
+ {
+ // C++ [dcl.fct.default]p5:
+ // The names in the [default argument] expression are bound, and
+ // the semantic constraints are checked, at the point where the
+ // default argument expression appears.
+ ContextRAII SavedContext(*this, FD);
+ LocalInstantiationScope Local(*this);
+
+ FunctionDecl *Pattern = FD->getTemplateInstantiationPattern(
+ /*ForDefinition*/ false);
+ if (addInstantiatedParametersToScope(*this, FD, Pattern, Local,
+ TemplateArgs))
+ return true;
+
+ runWithSufficientStackSpace(CallLoc, [&] {
+ Result = SubstInitializer(UninstExpr, TemplateArgs,
+ /*DirectInit*/false);
+ });
+ }
+ if (Result.isInvalid())
+ return true;
+
+ // Check the expression as an initializer for the parameter.
+ InitializedEntity Entity
+ = InitializedEntity::InitializeParameter(Context, Param);
+ InitializationKind Kind = InitializationKind::CreateCopy(
+ Param->getLocation(),
+ /*FIXME:EqualLoc*/ UninstExpr->getBeginLoc());
+ Expr *ResultE = Result.getAs<Expr>();
+
+ InitializationSequence InitSeq(*this, Entity, Kind, ResultE);
+ Result = InitSeq.Perform(*this, Entity, Kind, ResultE);
+ if (Result.isInvalid())
+ return true;
+
+ Result =
+ ActOnFinishFullExpr(Result.getAs<Expr>(), Param->getOuterLocStart(),
+ /*DiscardedValue*/ false);
+ if (Result.isInvalid())
+ return true;
+
+ // Remember the instantiated default argument.
+ Param->setDefaultArg(Result.getAs<Expr>());
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->DefaultArgumentInstantiated(Param);
+
+ return false;
+}
+
void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Decl) {
const FunctionProtoType *Proto = Decl->getType()->castAs<FunctionProtoType>();
@@ -4258,6 +4348,10 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
MultiLevelTemplateArgumentList TemplateArgs =
getTemplateInstantiationArgs(Decl, nullptr, /*RelativeToPrimary*/true);
+ // FIXME: We can't use getTemplateInstantiationPattern(false) in general
+ // here, because for a non-defining friend declaration in a class template,
+ // we don't store enough information to map back to the friend declaration in
+ // the template.
FunctionDecl *Template = Proto->getExceptionSpecTemplate();
if (addInstantiatedParametersToScope(*this, Decl, Template, Scope,
TemplateArgs)) {
@@ -4367,7 +4461,7 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
EPI.ExceptionSpec.Type != EST_None &&
EPI.ExceptionSpec.Type != EST_DynamicNone &&
EPI.ExceptionSpec.Type != EST_BasicNoexcept &&
- !Tmpl->isInLocalScope()) {
+ !Tmpl->isInLocalScopeForInstantiation()) {
FunctionDecl *ExceptionSpecTemplate = Tmpl;
if (EPI.ExceptionSpec.Type == EST_Uninstantiated)
ExceptionSpecTemplate = EPI.ExceptionSpec.SourceTemplate;
@@ -4804,6 +4898,9 @@ VarTemplateSpecializationDecl *Sema::CompleteVarTemplateSpecializationDecl(
// Instantiate the initializer.
InstantiateVariableInitializer(VarSpec, PatternDecl, TemplateArgs);
+ if (getLangOpts().OpenCL)
+ deduceOpenCLAddressSpace(VarSpec);
+
return VarSpec;
}
@@ -4844,6 +4941,7 @@ void Sema::BuildVariableInstantiation(
NewVar->setCXXForRangeDecl(OldVar->isCXXForRangeDecl());
NewVar->setObjCForDecl(OldVar->isObjCForDecl());
NewVar->setConstexpr(OldVar->isConstexpr());
+ MaybeAddCUDAConstantAttr(NewVar);
NewVar->setInitCapture(OldVar->isInitCapture());
NewVar->setPreviousDeclInSameBlockScope(
OldVar->isPreviousDeclInSameBlockScope());
@@ -5607,6 +5705,20 @@ DeclContext *Sema::FindInstantiatedContext(SourceLocation Loc, DeclContext* DC,
} else return DC;
}
+/// Determine whether the given context is dependent on template parameters at
+/// level \p Level or below.
+///
+/// Sometimes we only substitute an inner set of template arguments and leave
+/// the outer templates alone. In such cases, contexts dependent only on the
+/// outer levels are not effectively dependent.
+static bool isDependentContextAtLevel(DeclContext *DC, unsigned Level) {
+ if (!DC->isDependentContext())
+ return false;
+ if (!Level)
+ return true;
+ return cast<Decl>(DC)->getTemplateDepth() > Level;
+}
+
/// Find the instantiation of the given declaration within the
/// current instantiation.
///
@@ -5637,6 +5749,10 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext) {
DeclContext *ParentDC = D->getDeclContext();
+ // Determine whether our parent context depends on any of the tempalte
+ // arguments we're currently substituting.
+ bool ParentDependsOnArgs = isDependentContextAtLevel(
+ ParentDC, TemplateArgs.getNumRetainedOuterLevels());
// FIXME: Parmeters of pointer to functions (y below) that are themselves
// parameters (p below) can have their ParentDC set to the translation-unit
// - thus we can not consistently check if the ParentDC of such a parameter
@@ -5653,15 +5769,14 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
// - as long as we have a ParmVarDecl whose parent is non-dependent and
// whose type is not instantiation dependent, do nothing to the decl
// - otherwise find its instantiated decl.
- if (isa<ParmVarDecl>(D) && !ParentDC->isDependentContext() &&
+ if (isa<ParmVarDecl>(D) && !ParentDependsOnArgs &&
!cast<ParmVarDecl>(D)->getType()->isInstantiationDependentType())
return D;
if (isa<ParmVarDecl>(D) || isa<NonTypeTemplateParmDecl>(D) ||
isa<TemplateTypeParmDecl>(D) || isa<TemplateTemplateParmDecl>(D) ||
- ((ParentDC->isFunctionOrMethod() ||
- isa<OMPDeclareReductionDecl>(ParentDC) ||
- isa<OMPDeclareMapperDecl>(ParentDC)) &&
- ParentDC->isDependentContext()) ||
+ (ParentDependsOnArgs && (ParentDC->isFunctionOrMethod() ||
+ isa<OMPDeclareReductionDecl>(ParentDC) ||
+ isa<OMPDeclareMapperDecl>(ParentDC))) ||
(isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda())) {
// D is a local of some kind. Look into the map of local
// declarations to their instantiations.
@@ -5704,6 +5819,9 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
bool NeedInstantiate = false;
if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D))
NeedInstantiate = RD->isLocalClass();
+ else if (isa<TypedefNameDecl>(D) &&
+ isa<CXXDeductionGuideDecl>(D->getDeclContext()))
+ NeedInstantiate = true;
else
NeedInstantiate = isa<EnumDecl>(D);
if (NeedInstantiate) {
@@ -5812,7 +5930,7 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
// anonymous unions in class templates).
}
- if (!ParentDC->isDependentContext())
+ if (!ParentDependsOnArgs)
return D;
ParentDC = FindInstantiatedContext(Loc, ParentDC, TemplateArgs);
@@ -5881,10 +5999,11 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
if (!Result) {
if (isa<UsingShadowDecl>(D)) {
// UsingShadowDecls can instantiate to nothing because of using hiding.
- } else if (Diags.hasErrorOccurred()) {
- // We've already complained about something, so most likely this
- // declaration failed to instantiate. There's no point in complaining
- // further, since this is normal in invalid code.
+ } else if (Diags.hasUncompilableErrorOccurred()) {
+ // We've already complained about some ill-formed code, so most likely
+ // this declaration failed to instantiate. There's no point in
+ // complaining further, since this is normal in invalid code.
+ // FIXME: Use more fine-grained 'invalid' tracking for this.
} else if (IsBeingInstantiated) {
// The class in which this member exists is currently being
// instantiated, and we haven't gotten around to instantiating this
@@ -5924,6 +6043,7 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
/// Performs template instantiation for all implicit template
/// instantiations we have seen until this point.
void Sema::PerformPendingInstantiations(bool LocalOnly) {
+ std::deque<PendingImplicitInstantiation> delayedPCHInstantiations;
while (!PendingLocalImplicitInstantiations.empty() ||
(!LocalOnly && !PendingInstantiations.empty())) {
PendingImplicitInstantiation Inst;
@@ -5954,6 +6074,10 @@ void Sema::PerformPendingInstantiations(bool LocalOnly) {
if (Function->isDefined())
Function->setInstantiationIsPending(false);
}
+ // Definition of a PCH-ed template declaration may be available only in the TU.
+ if (!LocalOnly && LangOpts.PCHInstantiateTemplates &&
+ TUKind == TU_Prefix && Function->instantiationIsPending())
+ delayedPCHInstantiations.push_back(Inst);
continue;
}
@@ -5999,6 +6123,9 @@ void Sema::PerformPendingInstantiations(bool LocalOnly) {
InstantiateVariableDefinition(/*FIXME:*/ Inst.second, Var, true,
DefinitionRequired, true);
}
+
+ if (!LocalOnly && LangOpts.PCHInstantiateTemplates)
+ PendingInstantiations.swap(delayedPCHInstantiations);
}
void Sema::PerformDependentDiagnostics(const DeclContext *Pattern,
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
index d947d6d282be..7b77d1cb482a 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -847,6 +847,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
case TST_typeofExpr:
case TST_decltype:
+ case TST_extint:
if (DS.getRepAsExpr() &&
DS.getRepAsExpr()->containsUnexpandedParameterPack())
return true;
@@ -880,6 +881,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
case TST_auto:
case TST_auto_type:
case TST_decltype_auto:
+ case TST_BFloat16:
#define GENERIC_IMAGE_TYPE(ImgType, Id) case TST_##ImgType##_t:
#include "clang/Basic/OpenCLImageTypes.def"
case TST_unknown_anytype:
@@ -940,7 +942,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
if (Expr *TRC = D.getTrailingRequiresClause())
if (TRC->containsUnexpandedParameterPack())
return true;
-
+
return false;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
index 93ddd047e09b..b8f7f1a58159 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
@@ -11,7 +11,6 @@
//===----------------------------------------------------------------------===//
#include "TypeLocBuilder.h"
-#include "TreeTransform.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
@@ -36,6 +35,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
@@ -130,6 +130,7 @@ static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
case ParsedAttr::AT_NSReturnsRetained: \
case ParsedAttr::AT_NoReturn: \
case ParsedAttr::AT_Regparm: \
+ case ParsedAttr::AT_CmseNSCall: \
case ParsedAttr::AT_AnyX86NoCallerSavedRegisters: \
case ParsedAttr::AT_AnyX86NoCfCheck: \
CALLING_CONV_ATTRS_CASELIST
@@ -1441,6 +1442,18 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
}
break;
}
+ case DeclSpec::TST_extint: {
+ if (!S.Context.getTargetInfo().hasExtIntType())
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
+ << "_ExtInt";
+ Result = S.BuildExtIntType(DS.getTypeSpecSign() == TSS_unsigned,
+ DS.getRepAsExpr(), DS.getBeginLoc());
+ if (Result.isNull()) {
+ Result = Context.IntTy;
+ declarator.setInvalidType(true);
+ }
+ break;
+ }
case DeclSpec::TST_accum: {
switch (DS.getTypeSpecWidth()) {
case DeclSpec::TSW_short:
@@ -1508,6 +1521,12 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
Result = Context.Float16Ty;
break;
case DeclSpec::TST_half: Result = Context.HalfTy; break;
+ case DeclSpec::TST_BFloat16:
+ if (!S.Context.getTargetInfo().hasBFloat16Type())
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
+ << "__bf16";
+ Result = Context.BFloat16Ty;
+ break;
case DeclSpec::TST_float: Result = Context.FloatTy; break;
case DeclSpec::TST_double:
if (DS.getTypeSpecWidth() == DeclSpec::TSW_long)
@@ -1517,6 +1536,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
break;
case DeclSpec::TST_float128:
if (!S.Context.getTargetInfo().hasFloat128Type() &&
+ !S.getLangOpts().SYCLIsDevice &&
!(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
<< "__float128";
@@ -1678,6 +1698,12 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
break;
}
+ // FIXME: we want resulting declarations to be marked invalid, but claiming
+ // the type is invalid is too strong - e.g. it causes ActOnTypeName to return
+ // a null type.
+ if (Result->containsErrors())
+ declarator.setInvalidType();
+
if (S.getLangOpts().OpenCL &&
S.checkOpenCLDisabledTypeDeclSpec(DS, Result))
declarator.setInvalidType(true);
@@ -1733,7 +1759,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// The effect of a cv-qualifier-seq in a function declarator is not the
// same as adding cv-qualification on top of the function type. In the
// latter case, the cv-qualifiers are ignored.
- if (TypeQuals && Result->isFunctionType()) {
+ if (Result->isFunctionType()) {
diagnoseAndRemoveTypeQualifiers(
S, DS, TypeQuals, Result, DeclSpec::TQ_const | DeclSpec::TQ_volatile,
S.getLangOpts().CPlusPlus
@@ -2154,6 +2180,45 @@ QualType Sema::BuildWritePipeType(QualType T, SourceLocation Loc) {
return Context.getWritePipeType(T);
}
+/// Build a extended int type.
+///
+/// \param IsUnsigned Boolean representing the signedness of the type.
+///
+/// \param BitWidth Size of this int type in bits, or an expression representing
+/// that.
+///
+/// \param Loc Location of the keyword.
+QualType Sema::BuildExtIntType(bool IsUnsigned, Expr *BitWidth,
+ SourceLocation Loc) {
+ if (BitWidth->isInstantiationDependent())
+ return Context.getDependentExtIntType(IsUnsigned, BitWidth);
+
+ llvm::APSInt Bits(32);
+ ExprResult ICE = VerifyIntegerConstantExpression(BitWidth, &Bits);
+
+ if (ICE.isInvalid())
+ return QualType();
+
+ int64_t NumBits = Bits.getSExtValue();
+ if (!IsUnsigned && NumBits < 2) {
+ Diag(Loc, diag::err_ext_int_bad_size) << 0;
+ return QualType();
+ }
+
+ if (IsUnsigned && NumBits < 1) {
+ Diag(Loc, diag::err_ext_int_bad_size) << 1;
+ return QualType();
+ }
+
+ if (NumBits > llvm::IntegerType::MAX_INT_BITS) {
+ Diag(Loc, diag::err_ext_int_max_size) << IsUnsigned
+ << llvm::IntegerType::MAX_INT_BITS;
+ return QualType();
+ }
+
+ return Context.getExtIntType(IsUnsigned, NumBits);
+}
+
/// Check whether the specified array size makes the array type a VLA. If so,
/// return true, if not, return the size of the array in SizeVal.
static bool isArraySizeVLA(Sema &S, Expr *ArraySize, llvm::APSInt &SizeVal) {
@@ -2215,7 +2280,7 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
}
if (T->isVoidType() || T->isIncompleteArrayType()) {
- Diag(Loc, diag::err_illegal_decl_array_incomplete_type) << T;
+ Diag(Loc, diag::err_array_incomplete_or_sizeless_type) << 0 << T;
return QualType();
}
@@ -2233,11 +2298,16 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
} else {
// C99 6.7.5.2p1: If the element type is an incomplete or function type,
// reject it (e.g. void ary[7], struct foo ary[7], void ary[7]())
- if (RequireCompleteType(Loc, T,
- diag::err_illegal_decl_array_incomplete_type))
+ if (RequireCompleteSizedType(Loc, T,
+ diag::err_array_incomplete_or_sizeless_type))
return QualType();
}
+ if (T->isSizelessType()) {
+ Diag(Loc, diag::err_array_incomplete_or_sizeless_type) << 1 << T;
+ return QualType();
+ }
+
if (T->isFunctionType()) {
Diag(Loc, diag::err_illegal_decl_array_of_functions)
<< getPrintableNameForEntity(Entity) << T;
@@ -2323,13 +2393,6 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
? diag::err_typecheck_zero_array_size
: diag::ext_typecheck_zero_array_size)
<< ArraySize->getSourceRange();
-
- if (ASM == ArrayType::Static) {
- Diag(ArraySize->getBeginLoc(),
- diag::warn_typecheck_zero_static_array_size)
- << ArraySize->getSourceRange();
- ASM = ArrayType::Normal;
- }
} else if (!T->isDependentType() && !T->isVariablyModifiedType() &&
!T->isIncompleteType() && !T->isUndeducedType()) {
// Is the array too large?
@@ -2425,28 +2488,35 @@ QualType Sema::BuildVectorType(QualType CurType, Expr *SizeExpr,
return Context.getDependentVectorType(CurType, SizeExpr, AttrLoc,
VectorType::GenericVector);
- unsigned VectorSize = static_cast<unsigned>(VecSize.getZExtValue() * 8);
+ // vecSize is specified in bytes - convert to bits.
+ if (!VecSize.isIntN(61)) {
+ // Bit size will overflow uint64.
+ Diag(AttrLoc, diag::err_attribute_size_too_large)
+ << SizeExpr->getSourceRange() << "vector";
+ return QualType();
+ }
+ uint64_t VectorSizeBits = VecSize.getZExtValue() * 8;
unsigned TypeSize = static_cast<unsigned>(Context.getTypeSize(CurType));
- if (VectorSize == 0) {
- Diag(AttrLoc, diag::err_attribute_zero_size) << SizeExpr->getSourceRange();
+ if (VectorSizeBits == 0) {
+ Diag(AttrLoc, diag::err_attribute_zero_size)
+ << SizeExpr->getSourceRange() << "vector";
return QualType();
}
- // vecSize is specified in bytes - convert to bits.
- if (VectorSize % TypeSize) {
+ if (VectorSizeBits % TypeSize) {
Diag(AttrLoc, diag::err_attribute_invalid_size)
<< SizeExpr->getSourceRange();
return QualType();
}
- if (VectorType::isVectorSizeTooLarge(VectorSize / TypeSize)) {
+ if (VectorSizeBits / TypeSize > std::numeric_limits<uint32_t>::max()) {
Diag(AttrLoc, diag::err_attribute_size_too_large)
- << SizeExpr->getSourceRange();
+ << SizeExpr->getSourceRange() << "vector";
return QualType();
}
- return Context.getVectorType(CurType, VectorSize / TypeSize,
+ return Context.getVectorType(CurType, VectorSizeBits / TypeSize,
VectorType::GenericVector);
}
@@ -2478,19 +2548,18 @@ QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,
return QualType();
}
+ if (!vecSize.isIntN(32)) {
+ Diag(AttrLoc, diag::err_attribute_size_too_large)
+ << ArraySize->getSourceRange() << "vector";
+ return QualType();
+ }
// Unlike gcc's vector_size attribute, the size is specified as the
// number of elements, not the number of bytes.
unsigned vectorSize = static_cast<unsigned>(vecSize.getZExtValue());
if (vectorSize == 0) {
Diag(AttrLoc, diag::err_attribute_zero_size)
- << ArraySize->getSourceRange();
- return QualType();
- }
-
- if (VectorType::isVectorSizeTooLarge(vectorSize)) {
- Diag(AttrLoc, diag::err_attribute_size_too_large)
- << ArraySize->getSourceRange();
+ << ArraySize->getSourceRange() << "vector";
return QualType();
}
@@ -2500,6 +2569,84 @@ QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,
return Context.getDependentSizedExtVectorType(T, ArraySize, AttrLoc);
}
+QualType Sema::BuildMatrixType(QualType ElementTy, Expr *NumRows, Expr *NumCols,
+ SourceLocation AttrLoc) {
+ assert(Context.getLangOpts().MatrixTypes &&
+ "Should never build a matrix type when it is disabled");
+
+ // Check element type, if it is not dependent.
+ if (!ElementTy->isDependentType() &&
+ !MatrixType::isValidElementType(ElementTy)) {
+ Diag(AttrLoc, diag::err_attribute_invalid_matrix_type) << ElementTy;
+ return QualType();
+ }
+
+ if (NumRows->isTypeDependent() || NumCols->isTypeDependent() ||
+ NumRows->isValueDependent() || NumCols->isValueDependent())
+ return Context.getDependentSizedMatrixType(ElementTy, NumRows, NumCols,
+ AttrLoc);
+
+ // Both row and column values can only be 20 bit wide currently.
+ llvm::APSInt ValueRows(32), ValueColumns(32);
+
+ bool const RowsIsInteger = NumRows->isIntegerConstantExpr(ValueRows, Context);
+ bool const ColumnsIsInteger =
+ NumCols->isIntegerConstantExpr(ValueColumns, Context);
+
+ auto const RowRange = NumRows->getSourceRange();
+ auto const ColRange = NumCols->getSourceRange();
+
+ // Both are row and column expressions are invalid.
+ if (!RowsIsInteger && !ColumnsIsInteger) {
+ Diag(AttrLoc, diag::err_attribute_argument_type)
+ << "matrix_type" << AANT_ArgumentIntegerConstant << RowRange
+ << ColRange;
+ return QualType();
+ }
+
+ // Only the row expression is invalid.
+ if (!RowsIsInteger) {
+ Diag(AttrLoc, diag::err_attribute_argument_type)
+ << "matrix_type" << AANT_ArgumentIntegerConstant << RowRange;
+ return QualType();
+ }
+
+ // Only the column expression is invalid.
+ if (!ColumnsIsInteger) {
+ Diag(AttrLoc, diag::err_attribute_argument_type)
+ << "matrix_type" << AANT_ArgumentIntegerConstant << ColRange;
+ return QualType();
+ }
+
+ // Check the matrix dimensions.
+ unsigned MatrixRows = static_cast<unsigned>(ValueRows.getZExtValue());
+ unsigned MatrixColumns = static_cast<unsigned>(ValueColumns.getZExtValue());
+ if (MatrixRows == 0 && MatrixColumns == 0) {
+ Diag(AttrLoc, diag::err_attribute_zero_size)
+ << "matrix" << RowRange << ColRange;
+ return QualType();
+ }
+ if (MatrixRows == 0) {
+ Diag(AttrLoc, diag::err_attribute_zero_size) << "matrix" << RowRange;
+ return QualType();
+ }
+ if (MatrixColumns == 0) {
+ Diag(AttrLoc, diag::err_attribute_zero_size) << "matrix" << ColRange;
+ return QualType();
+ }
+ if (!ConstantMatrixType::isDimensionValid(MatrixRows)) {
+ Diag(AttrLoc, diag::err_attribute_size_too_large)
+ << RowRange << "matrix row";
+ return QualType();
+ }
+ if (!ConstantMatrixType::isDimensionValid(MatrixColumns)) {
+ Diag(AttrLoc, diag::err_attribute_size_too_large)
+ << ColRange << "matrix column";
+ return QualType();
+ }
+ return Context.getConstantMatrixType(ElementTy, MatrixRows, MatrixColumns);
+}
+
bool Sema::CheckFunctionReturnType(QualType T, SourceLocation Loc) {
if (T->isArrayType() || T->isFunctionType()) {
Diag(Loc, diag::err_func_returning_array_function)
@@ -2529,7 +2676,7 @@ bool Sema::CheckFunctionReturnType(QualType T, SourceLocation Loc) {
// C++2a [dcl.fct]p12:
// A volatile-qualified return type is deprecated
- if (T.isVolatileQualified() && getLangOpts().CPlusPlus2a)
+ if (T.isVolatileQualified() && getLangOpts().CPlusPlus20)
Diag(Loc, diag::warn_deprecated_volatile_return) << T;
return false;
@@ -2614,7 +2761,7 @@ QualType Sema::BuildFunctionType(QualType T,
// C++2a [dcl.fct]p4:
// A parameter with volatile-qualified type is deprecated
- if (ParamType.isVolatileQualified() && getLangOpts().CPlusPlus2a)
+ if (ParamType.isVolatileQualified() && getLangOpts().CPlusPlus20)
Diag(Loc, diag::warn_deprecated_volatile_param) << ParamType;
ParamTypes[Idx] = ParamType;
@@ -3115,7 +3262,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
InventedTemplateParameterInfo *Info = nullptr;
if (D.getContext() == DeclaratorContext::PrototypeContext) {
// With concepts we allow 'auto' in function parameters.
- if (!SemaRef.getLangOpts().CPlusPlus2a || !Auto ||
+ if (!SemaRef.getLangOpts().CPlusPlus20 || !Auto ||
Auto->getKeyword() != AutoTypeKeyword::Auto) {
Error = 0;
break;
@@ -3147,12 +3294,16 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
D.isFunctionDeclarator())
break;
bool Cxx = SemaRef.getLangOpts().CPlusPlus;
- switch (cast<TagDecl>(SemaRef.CurContext)->getTagKind()) {
- case TTK_Enum: llvm_unreachable("unhandled tag kind");
- case TTK_Struct: Error = Cxx ? 1 : 2; /* Struct member */ break;
- case TTK_Union: Error = Cxx ? 3 : 4; /* Union member */ break;
- case TTK_Class: Error = 5; /* Class member */ break;
- case TTK_Interface: Error = 6; /* Interface member */ break;
+ if (isa<ObjCContainerDecl>(SemaRef.CurContext)) {
+ Error = 6; // Interface member.
+ } else {
+ switch (cast<TagDecl>(SemaRef.CurContext)->getTagKind()) {
+ case TTK_Enum: llvm_unreachable("unhandled tag kind");
+ case TTK_Struct: Error = Cxx ? 1 : 2; /* Struct member */ break;
+ case TTK_Union: Error = Cxx ? 3 : 4; /* Union member */ break;
+ case TTK_Class: Error = 5; /* Class member */ break;
+ case TTK_Interface: Error = 6; /* Interface member */ break;
+ }
}
if (D.getDeclSpec().isFriendSpecified())
Error = 20; // Friend type
@@ -4730,7 +4881,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// An error occurred parsing the trailing return type.
T = Context.IntTy;
D.setInvalidType(true);
- } else if (S.getLangOpts().CPlusPlus2a)
+ } else if (S.getLangOpts().CPlusPlus20)
// Handle cases like: `auto f() -> auto` or `auto f() -> C auto`.
if (AutoType *Auto = T->getContainedAutoType())
if (S.getCurScope()->isFunctionDeclarationScope())
@@ -4839,7 +4990,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// C++2a [dcl.fct]p12:
// A volatile-qualified return type is deprecated
- if (T.isVolatileQualified() && S.getLangOpts().CPlusPlus2a)
+ if (T.isVolatileQualified() && S.getLangOpts().CPlusPlus20)
S.Diag(DeclType.Loc, diag::warn_deprecated_volatile_return) << T;
}
@@ -4981,8 +5132,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// FIXME: This really should be in BuildFunctionType.
if (S.getLangOpts().OpenCL) {
if (!S.getOpenCLOptions().isEnabled("cl_khr_fp16")) {
- S.Diag(Param->getLocation(),
- diag::err_opencl_half_param) << ParamTy;
+ S.Diag(Param->getLocation(), diag::err_opencl_invalid_param)
+ << ParamTy << 0;
D.setInvalidType();
Param->setInvalidDecl();
}
@@ -5001,6 +5152,11 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
Param->setKNRPromoted(true);
}
}
+ } else if (S.getLangOpts().OpenCL && ParamTy->isBlockPointerType()) {
+ // OpenCL 2.0 s6.12.5: A block cannot be a parameter of a function.
+ S.Diag(Param->getLocation(), diag::err_opencl_invalid_param)
+ << ParamTy << 1 /*hint off*/;
+ D.setInvalidType();
}
if (LangOpts.ObjCAutoRefCount && Param->hasAttr<NSConsumedAttr>()) {
@@ -5330,7 +5486,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// C++2a [dcl.fct]p4:
// A parameter with volatile-qualified type is deprecated
- if (T.isVolatileQualified() && S.getLangOpts().CPlusPlus2a &&
+ if (T.isVolatileQualified() && S.getLangOpts().CPlusPlus20 &&
(D.getContext() == DeclaratorContext::PrototypeContext ||
D.getContext() == DeclaratorContext::LambdaExprParameterContext))
S.Diag(D.getIdentifierLoc(), diag::warn_deprecated_volatile_param) << T;
@@ -5356,7 +5512,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// We represent function parameter packs as function parameters whose
// type is a pack expansion.
if (!T->containsUnexpandedParameterPack() &&
- (!LangOpts.CPlusPlus2a || !T->getContainedAutoType())) {
+ (!LangOpts.CPlusPlus20 || !T->getContainedAutoType())) {
S.Diag(D.getEllipsisLoc(),
diag::err_function_parameter_pack_without_parameter_packs)
<< T << D.getSourceRange();
@@ -5758,6 +5914,14 @@ namespace {
TL.getValueLoc().initializeFullCopy(TInfo->getTypeLoc());
}
+ void VisitExtIntTypeLoc(ExtIntTypeLoc TL) {
+ TL.setNameLoc(DS.getTypeSpecTypeLoc());
+ }
+
+ void VisitDependentExtIntTypeLoc(DependentExtIntTypeLoc TL) {
+ TL.setNameLoc(DS.getTypeSpecTypeLoc());
+ }
+
void VisitTypeLoc(TypeLoc TL) {
// FIXME: add other typespec types and change this to an assert.
TL.initialize(Context, DS.getTypeSpecTypeLoc());
@@ -5841,7 +6005,7 @@ namespace {
}
// Finally fill in MemberPointerLocInfo fields.
- TL.setStarLoc(Chunk.Loc);
+ TL.setStarLoc(SourceLocation::getFromRawEncoding(Chunk.Mem.StarLoc));
TL.setClassTInfo(ClsTInfo);
}
void VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) {
@@ -5884,6 +6048,9 @@ namespace {
assert(Chunk.Kind == DeclaratorChunk::Pipe);
TL.setKWLoc(Chunk.Loc);
}
+ void VisitExtIntTypeLoc(ExtIntTypeLoc TL) {
+ TL.setNameLoc(Chunk.Loc);
+ }
void VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
TL.setExpansionLoc(Chunk.Loc);
}
@@ -5934,6 +6101,21 @@ fillDependentAddressSpaceTypeLoc(DependentAddressSpaceTypeLoc DASTL,
"no address_space attribute found at the expected location!");
}
+static void fillMatrixTypeLoc(MatrixTypeLoc MTL,
+ const ParsedAttributesView &Attrs) {
+ for (const ParsedAttr &AL : Attrs) {
+ if (AL.getKind() == ParsedAttr::AT_MatrixType) {
+ MTL.setAttrNameLoc(AL.getLoc());
+ MTL.setAttrRowOperand(AL.getArgAsExpr(0));
+ MTL.setAttrColumnOperand(AL.getArgAsExpr(1));
+ MTL.setAttrOperandParensRange(SourceRange());
+ return;
+ }
+ }
+
+ llvm_unreachable("no matrix_type attribute found at the expected location!");
+}
+
/// Create and instantiate a TypeSourceInfo with type source information.
///
/// \param T QualType referring to the type as written in source code.
@@ -5982,6 +6164,9 @@ GetTypeSourceInfoForDeclarator(TypeProcessingState &State,
CurrTL = TL.getPointeeTypeLoc().getUnqualifiedLoc();
}
+ if (MatrixTypeLoc TL = CurrTL.getAs<MatrixTypeLoc>())
+ fillMatrixTypeLoc(TL, D.getTypeObject(i).getAttrs());
+
// FIXME: Ordering here?
while (AdjustedTypeLoc TL = CurrTL.getAs<AdjustedTypeLoc>())
CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
@@ -6498,6 +6683,7 @@ namespace {
Desugar,
Attributed,
Parens,
+ Array,
Pointer,
BlockPointer,
Reference,
@@ -6518,6 +6704,10 @@ namespace {
} else if (isa<ParenType>(Ty)) {
T = cast<ParenType>(Ty)->getInnerType();
Stack.push_back(Parens);
+ } else if (isa<ConstantArrayType>(Ty) || isa<VariableArrayType>(Ty) ||
+ isa<IncompleteArrayType>(Ty)) {
+ T = cast<ArrayType>(Ty)->getElementType();
+ Stack.push_back(Array);
} else if (isa<PointerType>(Ty)) {
T = cast<PointerType>(Ty)->getPointeeType();
Stack.push_back(Pointer);
@@ -6595,6 +6785,27 @@ namespace {
case MacroQualified:
return wrap(C, cast<MacroQualifiedType>(Old)->getUnderlyingType(), I);
+ case Array: {
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(Old)) {
+ QualType New = wrap(C, CAT->getElementType(), I);
+ return C.getConstantArrayType(New, CAT->getSize(), CAT->getSizeExpr(),
+ CAT->getSizeModifier(),
+ CAT->getIndexTypeCVRQualifiers());
+ }
+
+ if (const auto *VAT = dyn_cast<VariableArrayType>(Old)) {
+ QualType New = wrap(C, VAT->getElementType(), I);
+ return C.getVariableArrayType(
+ New, VAT->getSizeExpr(), VAT->getSizeModifier(),
+ VAT->getIndexTypeCVRQualifiers(), VAT->getBracketsRange());
+ }
+
+ const auto *IAT = cast<IncompleteArrayType>(Old);
+ QualType New = wrap(C, IAT->getElementType(), I);
+ return C.getIncompleteArrayType(New, IAT->getSizeModifier(),
+ IAT->getIndexTypeCVRQualifiers());
+ }
+
case Pointer: {
QualType New = wrap(C, cast<PointerType>(Old)->getPointeeType(), I);
return C.getPointerType(New);
@@ -6822,15 +7033,15 @@ static bool checkNullabilityTypeSpecifier(TypeProcessingState &state,
// attributes, require that the type be a single-level pointer.
if (isContextSensitive) {
// Make sure that the pointee isn't itself a pointer type.
- const Type *pointeeType;
+ const Type *pointeeType = nullptr;
if (desugared->isArrayType())
pointeeType = desugared->getArrayElementTypeNoTypeQual();
- else
+ else if (desugared->isAnyPointerType())
pointeeType = desugared->getPointeeType().getTypePtr();
- if (pointeeType->isAnyPointerType() ||
- pointeeType->isObjCObjectPointerType() ||
- pointeeType->isMemberPointerType()) {
+ if (pointeeType && (pointeeType->isAnyPointerType() ||
+ pointeeType->isObjCObjectPointerType() ||
+ pointeeType->isMemberPointerType())) {
S.Diag(nullabilityLoc, diag::err_nullability_cs_multilevel)
<< DiagNullabilityKind(nullability, true)
<< type;
@@ -7063,6 +7274,25 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
return true;
}
+ if (attr.getKind() == ParsedAttr::AT_CmseNSCall) {
+ // Delay if this is not a function type.
+ if (!unwrapped.isFunctionType())
+ return false;
+
+ // Ignore if we don't have CMSE enabled.
+ if (!S.getLangOpts().Cmse) {
+ S.Diag(attr.getLoc(), diag::warn_attribute_ignored) << attr;
+ attr.setInvalid();
+ return true;
+ }
+
+ // Otherwise we can process right away.
+ FunctionType::ExtInfo EI =
+ unwrapped.get()->getExtInfo().withCmseNSCall(true);
+ type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
+ return true;
+ }
+
// ns_returns_retained is not always a type attribute, but if we got
// here, we're treating it as one right now.
if (attr.getKind() == ParsedAttr::AT_NSReturnsRetained) {
@@ -7422,15 +7652,16 @@ static bool isPermittedNeonBaseType(QualType &Ty,
Triple.getArch() == llvm::Triple::aarch64_be;
if (VecKind == VectorType::NeonPolyVector) {
if (IsPolyUnsigned) {
- // AArch64 polynomial vectors are unsigned and support poly64.
+ // AArch64 polynomial vectors are unsigned.
return BTy->getKind() == BuiltinType::UChar ||
BTy->getKind() == BuiltinType::UShort ||
BTy->getKind() == BuiltinType::ULong ||
BTy->getKind() == BuiltinType::ULongLong;
} else {
- // AArch32 polynomial vector are signed.
+ // AArch32 polynomial vectors are signed.
return BTy->getKind() == BuiltinType::SChar ||
- BTy->getKind() == BuiltinType::Short;
+ BTy->getKind() == BuiltinType::Short ||
+ BTy->getKind() == BuiltinType::LongLong;
}
}
@@ -7451,7 +7682,8 @@ static bool isPermittedNeonBaseType(QualType &Ty,
BTy->getKind() == BuiltinType::LongLong ||
BTy->getKind() == BuiltinType::ULongLong ||
BTy->getKind() == BuiltinType::Float ||
- BTy->getKind() == BuiltinType::Half;
+ BTy->getKind() == BuiltinType::Half ||
+ BTy->getKind() == BuiltinType::BFloat16;
}
/// HandleNeonVectorTypeAttr - The "neon_vector_type" and
@@ -7509,6 +7741,23 @@ static void HandleNeonVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
CurType = S.Context.getVectorType(CurType, numElts, VecKind);
}
+static void HandleArmMveStrictPolymorphismAttr(TypeProcessingState &State,
+ QualType &CurType,
+ ParsedAttr &Attr) {
+ const VectorType *VT = dyn_cast<VectorType>(CurType);
+ if (!VT || VT->getVectorKind() != VectorType::NeonVector) {
+ State.getSema().Diag(Attr.getLoc(),
+ diag::err_attribute_arm_mve_polymorphism);
+ Attr.setInvalid();
+ return;
+ }
+
+ CurType =
+ State.getAttributedType(createSimpleAttr<ArmMveStrictPolymorphismAttr>(
+ State.getSema().Context, Attr),
+ CurType, CurType);
+}
+
/// Handle OpenCL Access Qualifier Attribute.
static void HandleOpenCLAccessAttr(QualType &CurType, const ParsedAttr &Attr,
Sema &S) {
@@ -7565,6 +7814,68 @@ static void HandleOpenCLAccessAttr(QualType &CurType, const ParsedAttr &Attr,
}
}
+/// HandleMatrixTypeAttr - "matrix_type" attribute, like ext_vector_type
+static void HandleMatrixTypeAttr(QualType &CurType, const ParsedAttr &Attr,
+ Sema &S) {
+ if (!S.getLangOpts().MatrixTypes) {
+ S.Diag(Attr.getLoc(), diag::err_builtin_matrix_disabled);
+ return;
+ }
+
+ if (Attr.getNumArgs() != 2) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << Attr << 2;
+ return;
+ }
+
+ Expr *RowsExpr = nullptr;
+ Expr *ColsExpr = nullptr;
+
+ // TODO: Refactor parameter extraction into separate function
+ // Get the number of rows
+ if (Attr.isArgIdent(0)) {
+ CXXScopeSpec SS;
+ SourceLocation TemplateKeywordLoc;
+ UnqualifiedId id;
+ id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc());
+ ExprResult Rows = S.ActOnIdExpression(S.getCurScope(), SS,
+ TemplateKeywordLoc, id, false, false);
+
+ if (Rows.isInvalid())
+ // TODO: maybe a good error message would be nice here
+ return;
+ RowsExpr = Rows.get();
+ } else {
+ assert(Attr.isArgExpr(0) &&
+ "Argument to should either be an identity or expression");
+ RowsExpr = Attr.getArgAsExpr(0);
+ }
+
+ // Get the number of columns
+ if (Attr.isArgIdent(1)) {
+ CXXScopeSpec SS;
+ SourceLocation TemplateKeywordLoc;
+ UnqualifiedId id;
+ id.setIdentifier(Attr.getArgAsIdent(1)->Ident, Attr.getLoc());
+ ExprResult Columns = S.ActOnIdExpression(
+ S.getCurScope(), SS, TemplateKeywordLoc, id, false, false);
+
+ if (Columns.isInvalid())
+ // TODO: a good error message would be nice here
+ return;
+ RowsExpr = Columns.get();
+ } else {
+ assert(Attr.isArgExpr(1) &&
+ "Argument to should either be an identity or expression");
+ ColsExpr = Attr.getArgAsExpr(1);
+ }
+
+ // Create the matrix type.
+ QualType T = S.BuildMatrixType(CurType, RowsExpr, ColsExpr, Attr.getLoc());
+ if (!T.isNull())
+ CurType = T;
+}
+
static void HandleLifetimeBoundAttr(TypeProcessingState &State,
QualType &CurType,
ParsedAttr &Attr) {
@@ -7693,6 +8004,11 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
VectorType::NeonPolyVector);
attr.setUsedAsTypeAttr();
break;
+ case ParsedAttr::AT_ArmMveStrictPolymorphism: {
+ HandleArmMveStrictPolymorphismAttr(state, type, attr);
+ attr.setUsedAsTypeAttr();
+ break;
+ }
case ParsedAttr::AT_OpenCLAccess:
HandleOpenCLAccessAttr(type, attr, state.getSema());
attr.setUsedAsTypeAttr();
@@ -7711,6 +8027,11 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
break;
}
+ case ParsedAttr::AT_MatrixType:
+ HandleMatrixTypeAttr(type, attr, state.getSema());
+ attr.setUsedAsTypeAttr();
+ break;
+
MS_TYPE_ATTRS_CASELIST:
if (!handleMSPointerTypeQualifierAttr(state, attr, type))
attr.setUsedAsTypeAttr();
@@ -7787,6 +8108,15 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
case ParsedAttr::AT_AcquireHandle: {
if (!type->isFunctionType())
return;
+
+ if (attr.getNumArgs() != 1) {
+ state.getSema().Diag(attr.getLoc(),
+ diag::err_attribute_wrong_number_arguments)
+ << attr << 1;
+ attr.setInvalid();
+ return;
+ }
+
StringRef HandleType;
if (!state.getSema().checkStringLiteralArgumentAttr(attr, 0, HandleType))
return;
@@ -7871,12 +8201,14 @@ void Sema::completeExprArrayBound(Expr *E) {
/// case of a reference type, the referred-to type).
///
/// \param E The expression whose type is required to be complete.
+/// \param Kind Selects which completeness rules should be applied.
/// \param Diagnoser The object that will emit a diagnostic if the type is
/// incomplete.
///
/// \returns \c true if the type of \p E is incomplete and diagnosed, \c false
/// otherwise.
-bool Sema::RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser) {
+bool Sema::RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
+ TypeDiagnoser &Diagnoser) {
QualType T = E->getType();
// Incomplete array types may be completed by the initializer attached to
@@ -7891,12 +8223,12 @@ bool Sema::RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser) {
// FIXME: Are there other cases which require instantiating something other
// than the type to complete the type of an expression?
- return RequireCompleteType(E->getExprLoc(), T, Diagnoser);
+ return RequireCompleteType(E->getExprLoc(), T, Kind, Diagnoser);
}
bool Sema::RequireCompleteExprType(Expr *E, unsigned DiagID) {
BoundTypeDiagnoser<> Diagnoser(DiagID);
- return RequireCompleteExprType(E, Diagnoser);
+ return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
/// Ensure that the type T is a complete type.
@@ -7914,11 +8246,14 @@ bool Sema::RequireCompleteExprType(Expr *E, unsigned DiagID) {
///
/// @param T The type that this routine is examining for completeness.
///
+/// @param Kind Selects which completeness rules should be applied.
+///
/// @returns @c true if @p T is incomplete and a diagnostic was emitted,
/// @c false otherwise.
bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
+ CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser) {
- if (RequireCompleteTypeImpl(Loc, T, &Diagnoser))
+ if (RequireCompleteTypeImpl(Loc, T, Kind, &Diagnoser))
return true;
if (const TagType *Tag = T->getAs<TagType>()) {
if (!Tag->getDecl()->isCompleteDefinitionRequired()) {
@@ -7972,10 +8307,12 @@ bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
} else if (auto *ED = dyn_cast<EnumDecl>(D)) {
if (auto *Pattern = ED->getTemplateInstantiationPattern())
ED = Pattern;
- if (OnlyNeedComplete && ED->isFixed()) {
- // If the enum has a fixed underlying type, and we're only looking for a
- // complete type (not a definition), any visible declaration of it will
- // do.
+ if (OnlyNeedComplete && (ED->isFixed() || getLangOpts().MSVCCompat)) {
+ // If the enum has a fixed underlying type, it may have been forward
+ // declared. In -fms-compatibility, `enum Foo;` will also forward declare
+ // the enum and assign it the underlying type of `int`. Since we're only
+ // looking for a complete type (not a definition), any visible declaration
+ // of it will do.
*Suggested = nullptr;
for (auto *Redecl : ED->redecls()) {
if (isVisible(Redecl))
@@ -8067,6 +8404,7 @@ static void assignInheritanceModel(Sema &S, CXXRecordDecl *RD) {
/// The implementation of RequireCompleteType
bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
+ CompleteTypeKind Kind,
TypeDiagnoser *Diagnoser) {
// FIXME: Add this assertion to make sure we always get instantiation points.
// assert(!Loc.isInvalid() && "Invalid location in RequireCompleteType");
@@ -8080,7 +8418,7 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
if (!MPTy->getClass()->isDependentType()) {
if (getLangOpts().CompleteMemberPointers &&
!MPTy->getClass()->getAsCXXRecordDecl()->isBeingDefined() &&
- RequireCompleteType(Loc, QualType(MPTy->getClass(), 0),
+ RequireCompleteType(Loc, QualType(MPTy->getClass(), 0), Kind,
diag::err_memptr_incomplete))
return true;
@@ -8094,7 +8432,9 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
}
NamedDecl *Def = nullptr;
- bool Incomplete = T->isIncompleteType(&Def);
+ bool AcceptSizeless = (Kind == CompleteTypeKind::AcceptSizeless);
+ bool Incomplete = (T->isIncompleteType(&Def) ||
+ (!AcceptSizeless && T->isSizelessBuiltinType()));
// Check that any necessary explicit specializations are visible. For an
// enum, we just need the declaration, so don't check this.
@@ -8148,7 +8488,7 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// If the external source completed the type, go through the motions
// again to ensure we're allowed to use the completed type.
if (!T->isIncompleteType())
- return RequireCompleteTypeImpl(Loc, T, Diagnoser);
+ return RequireCompleteTypeImpl(Loc, T, Kind, Diagnoser);
}
}
@@ -8200,7 +8540,7 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// instantiation produced an error, so that repeated calls to this
// function give consistent answers.
if (!T->isIncompleteType())
- return RequireCompleteTypeImpl(Loc, T, Diagnoser);
+ return RequireCompleteTypeImpl(Loc, T, Kind, Diagnoser);
}
}
@@ -8214,14 +8554,14 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// If the type was a forward declaration of a class/struct/union
// type, produce a note.
- if (Tag && !Tag->isInvalidDecl())
+ if (Tag && !Tag->isInvalidDecl() && !Tag->getLocation().isInvalid())
Diag(Tag->getLocation(),
Tag->isBeingDefined() ? diag::note_type_being_defined
: diag::note_forward_declaration)
<< Context.getTagDeclType(Tag);
// If the Objective-C class was a forward declaration, produce a note.
- if (IFace && !IFace->isInvalidDecl())
+ if (IFace && !IFace->isInvalidDecl() && !IFace->getLocation().isInvalid())
Diag(IFace->getLocation(), diag::note_forward_class);
// If we have external information that we can use to suggest a fix,
@@ -8233,9 +8573,9 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
}
bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
- unsigned DiagID) {
+ CompleteTypeKind Kind, unsigned DiagID) {
BoundTypeDiagnoser<> Diagnoser(DiagID);
- return RequireCompleteType(Loc, T, Diagnoser);
+ return RequireCompleteType(Loc, T, Kind, Diagnoser);
}
/// Get diagnostic %select index for tag kind for
@@ -8333,7 +8673,7 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
return true;
}
}
- } else if (getLangOpts().CPlusPlus2a ? !RD->hasConstexprDestructor()
+ } else if (getLangOpts().CPlusPlus20 ? !RD->hasConstexprDestructor()
: !RD->hasTrivialDestructor()) {
// All fields and bases are of literal types, so have trivial or constexpr
// destructors. If this class's destructor is non-trivial / non-constexpr,
@@ -8343,7 +8683,7 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
if (!Dtor)
return true;
- if (getLangOpts().CPlusPlus2a) {
+ if (getLangOpts().CPlusPlus20) {
Diag(Dtor->getLocation(), diag::note_non_literal_non_constexpr_dtor)
<< RD;
} else {
@@ -8535,9 +8875,17 @@ QualType Sema::BuildAtomicType(QualType T, SourceLocation Loc) {
DisallowedKind = 4;
else if (T.hasQualifiers())
DisallowedKind = 5;
+ else if (T->isSizelessType())
+ DisallowedKind = 6;
else if (!T.isTriviallyCopyableType(Context))
// Some other non-trivially-copyable type (probably a C++ class)
- DisallowedKind = 6;
+ DisallowedKind = 7;
+ else if (auto *ExtTy = T->getAs<ExtIntType>()) {
+ if (ExtTy->getNumBits() < 8)
+ DisallowedKind = 8;
+ else if (!llvm::isPowerOf2_32(ExtTy->getNumBits()))
+ DisallowedKind = 9;
+ }
if (DisallowedKind != -1) {
Diag(Loc, diag::err_atomic_specifier_bad_type) << DisallowedKind << T;
diff --git a/contrib/llvm-project/clang/lib/Sema/TreeTransform.h b/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
index bbc6fc6deeef..ae0e9f1119b4 100644
--- a/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
+++ b/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
@@ -28,6 +28,7 @@
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
+#include "clang/Basic/OpenMPKinds.h"
#include "clang/Sema/Designator.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Ownership.h"
@@ -157,6 +158,13 @@ public:
/// existing lambdas.
bool ReplacingOriginal() { return false; }
+ /// Wether CXXConstructExpr can be skipped when they are implicit.
+ /// They will be reconstructed when used if needed.
+ /// This is usefull when the user that cause rebuilding of the
+ /// CXXConstructExpr is outside of the expression at which the TreeTransform
+ /// started.
+ bool AllowSkippingCXXConstructExpr() { return true; }
+
/// Returns the location of the entity being transformed, if that
/// information was not available elsewhere in the AST.
///
@@ -722,10 +730,10 @@ public:
#define ABSTRACT_STMT(Stmt)
#include "clang/AST/StmtNodes.inc"
-#define OPENMP_CLAUSE(Name, Class) \
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
LLVM_ATTRIBUTE_NOINLINE \
OMPClause *Transform ## Class(Class *S);
-#include "clang/Basic/OpenMPKinds.def"
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
/// Build a new qualified type given its unqualified type and type location.
///
@@ -886,6 +894,16 @@ public:
Expr *SizeExpr,
SourceLocation AttributeLoc);
+ /// Build a new matrix type given the element type and dimensions.
+ QualType RebuildConstantMatrixType(QualType ElementType, unsigned NumRows,
+ unsigned NumColumns);
+
+ /// Build a new matrix type given the type and dependently-defined
+ /// dimensions.
+ QualType RebuildDependentSizedMatrixType(QualType ElementType, Expr *RowExpr,
+ Expr *ColumnExpr,
+ SourceLocation AttributeLoc);
+
/// Build a new DependentAddressSpaceType or return the pointee
/// type variable with the correct address space (retrieved from
/// AddrSpaceExpr) applied to it. The former will be returned in cases
@@ -1175,6 +1193,14 @@ public:
QualType RebuildPipeType(QualType ValueType, SourceLocation KWLoc,
bool isReadPipe);
+ /// Build an extended int given its value type.
+ QualType RebuildExtIntType(bool IsUnsigned, unsigned NumBits,
+ SourceLocation Loc);
+
+ /// Build a dependent extended int given its value type.
+ QualType RebuildDependentExtIntType(bool IsUnsigned, Expr *NumBitsExpr,
+ SourceLocation Loc);
+
/// Build a new template name given a nested name specifier, a flag
/// indicating whether the "template" keyword was provided, and the template
/// that the template name refers to.
@@ -1321,9 +1347,10 @@ public:
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- StmtResult RebuildWhileStmt(SourceLocation WhileLoc,
- Sema::ConditionResult Cond, Stmt *Body) {
- return getSema().ActOnWhileStmt(WhileLoc, Cond, Body);
+ StmtResult RebuildWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
+ Sema::ConditionResult Cond,
+ SourceLocation RParenLoc, Stmt *Body) {
+ return getSema().ActOnWhileStmt(WhileLoc, LParenLoc, Cond, RParenLoc, Body);
}
/// Build a new do-while statement.
@@ -1610,8 +1637,7 @@ public:
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *RebuildOMPDefaultClause(OpenMPDefaultClauseKind Kind,
- SourceLocation KindKwLoc,
+ OMPClause *RebuildOMPDefaultClause(DefaultKind Kind, SourceLocation KindKwLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
@@ -1711,17 +1737,16 @@ public:
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *RebuildOMPReductionClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation ColonLoc,
- SourceLocation EndLoc,
- CXXScopeSpec &ReductionIdScopeSpec,
- const DeclarationNameInfo &ReductionId,
- ArrayRef<Expr *> UnresolvedReductions) {
+ OMPClause *RebuildOMPReductionClause(
+ ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation ModifierLoc, SourceLocation ColonLoc,
+ SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
+ const DeclarationNameInfo &ReductionId,
+ ArrayRef<Expr *> UnresolvedReductions) {
return getSema().ActOnOpenMPReductionClause(
- VarList, StartLoc, LParenLoc, ColonLoc, EndLoc, ReductionIdScopeSpec,
- ReductionId, UnresolvedReductions);
+ VarList, Modifier, StartLoc, LParenLoc, ModifierLoc, ColonLoc, EndLoc,
+ ReductionIdScopeSpec, ReductionId, UnresolvedReductions);
}
/// Build a new OpenMP 'task_reduction' clause.
@@ -1820,28 +1845,42 @@ public:
EndLoc);
}
+ /// Build a new OpenMP 'depobj' pseudo clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPDepobjClause(Depobj, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
/// Build a new OpenMP 'depend' pseudo clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
OMPClause *
- RebuildOMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
- SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
- SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation EndLoc) {
- return getSema().ActOnOpenMPDependClause(DepKind, DepLoc, ColonLoc, VarList,
- StartLoc, LParenLoc, EndLoc);
+ RebuildOMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
+ SourceLocation DepLoc, SourceLocation ColonLoc,
+ ArrayRef<Expr *> VarList, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPDependClause(DepModifier, DepKind, DepLoc,
+ ColonLoc, VarList, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'device' clause.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *RebuildOMPDeviceClause(Expr *Device, SourceLocation StartLoc,
+ OMPClause *RebuildOMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
+ Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
+ SourceLocation ModifierLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPDeviceClause(Device, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().ActOnOpenMPDeviceClause(Modifier, Device, StartLoc,
+ LParenLoc, ModifierLoc, EndLoc);
}
/// Build a new OpenMP 'map' clause.
@@ -1940,6 +1979,16 @@ public:
return getSema().ActOnOpenMPHintClause(Hint, StartLoc, LParenLoc, EndLoc);
}
+ /// Build a new OpenMP 'detach' clause.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPDetachClause(Expr *Evt, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPDetachClause(Evt, StartLoc, LParenLoc, EndLoc);
+ }
+
/// Build a new OpenMP 'dist_schedule' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
@@ -1988,6 +2037,15 @@ public:
return getSema().ActOnOpenMPUseDevicePtrClause(VarList, Locs);
}
+ /// Build a new OpenMP 'use_device_addr' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs) {
+ return getSema().ActOnOpenMPUseDeviceAddrClause(VarList, Locs);
+ }
+
/// Build a new OpenMP 'is_device_ptr' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
@@ -2024,6 +2082,67 @@ public:
EndLoc);
}
+ /// Build a new OpenMP 'inclusive' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPInclusiveClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPInclusiveClause(VarList, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// Build a new OpenMP 'exclusive' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPExclusiveClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPExclusiveClause(VarList, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// Build a new OpenMP 'uses_allocators' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPUsesAllocatorsClause(
+ ArrayRef<Sema::UsesAllocatorsData> Data, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPUsesAllocatorClause(StartLoc, LParenLoc, EndLoc,
+ Data);
+ }
+
+ /// Build a new OpenMP 'affinity' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPAffinityClause(SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation ColonLoc,
+ SourceLocation EndLoc, Expr *Modifier,
+ ArrayRef<Expr *> Locators) {
+ return getSema().ActOnOpenMPAffinityClause(StartLoc, LParenLoc, ColonLoc,
+ EndLoc, Modifier, Locators);
+ }
+
+ /// Build a new OpenMP 'order' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPOrderClause(OpenMPOrderClauseKind Kind,
+ SourceLocation KindKwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPOrderClause(Kind, KindKwLoc, StartLoc,
+ LParenLoc, EndLoc);
+ }
+
/// Rebuild the operand to an Objective-C \@synchronized statement.
///
/// By default, performs semantic analysis to build the new statement.
@@ -2301,16 +2420,53 @@ public:
RBracketLoc);
}
+ /// Build a new matrix subscript expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
+ Expr *ColumnIdx,
+ SourceLocation RBracketLoc) {
+ return getSema().CreateBuiltinMatrixSubscriptExpr(Base, RowIdx, ColumnIdx,
+ RBracketLoc);
+ }
+
/// Build a new array section expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildOMPArraySectionExpr(Expr *Base, SourceLocation LBracketLoc,
Expr *LowerBound,
- SourceLocation ColonLoc, Expr *Length,
+ SourceLocation ColonLocFirst,
+ SourceLocation ColonLocSecond,
+ Expr *Length, Expr *Stride,
SourceLocation RBracketLoc) {
return getSema().ActOnOMPArraySectionExpr(Base, LBracketLoc, LowerBound,
- ColonLoc, Length, RBracketLoc);
+ ColonLocFirst, ColonLocSecond,
+ Length, Stride, RBracketLoc);
+ }
+
+ /// Build a new array shaping expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
+ SourceLocation RParenLoc,
+ ArrayRef<Expr *> Dims,
+ ArrayRef<SourceRange> BracketsRanges) {
+ return getSema().ActOnOMPArrayShapingExpr(Base, LParenLoc, RParenLoc, Dims,
+ BracketsRanges);
+ }
+
+ /// Build a new iterator expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildOMPIteratorExpr(
+ SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc,
+ ArrayRef<Sema::OMPIteratorData> Data) {
+ return getSema().ActOnOMPIteratorExpr(/*Scope=*/nullptr, IteratorKwLoc,
+ LLoc, RLoc, Data);
}
/// Build a new call expression.
@@ -2321,8 +2477,8 @@ public:
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig = nullptr) {
- return getSema().BuildCallExpr(/*Scope=*/nullptr, Callee, LParenLoc, Args,
- RParenLoc, ExecConfig);
+ return getSema().ActOnCallExpr(
+ /*Scope=*/nullptr, Callee, LParenLoc, Args, RParenLoc, ExecConfig);
}
/// Build a new member access expression.
@@ -2616,6 +2772,10 @@ public:
RAngleLoc, LParenLoc,
SubExpr, RParenLoc);
+ case Stmt::CXXAddrspaceCastExprClass:
+ return getDerived().RebuildCXXAddrspaceCastExpr(
+ OpLoc, LAngleLoc, TInfo, RAngleLoc, LParenLoc, SubExpr, RParenLoc);
+
default:
llvm_unreachable("Invalid C++ named cast");
}
@@ -2689,6 +2849,16 @@ public:
SourceRange(LParenLoc, RParenLoc));
}
+ ExprResult
+ RebuildCXXAddrspaceCastExpr(SourceLocation OpLoc, SourceLocation LAngleLoc,
+ TypeSourceInfo *TInfo, SourceLocation RAngleLoc,
+ SourceLocation LParenLoc, Expr *SubExpr,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXNamedCast(
+ OpLoc, tok::kw_addrspace_cast, TInfo, SubExpr,
+ SourceRange(LAngleLoc, RAngleLoc), SourceRange(LParenLoc, RParenLoc));
+ }
+
/// Build a new C++ functional-style cast expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -2742,24 +2912,19 @@ public:
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildCXXUuidofExpr(QualType TypeInfoType,
- SourceLocation TypeidLoc,
- TypeSourceInfo *Operand,
- SourceLocation RParenLoc) {
- return getSema().BuildCXXUuidof(TypeInfoType, TypeidLoc, Operand,
- RParenLoc);
+ ExprResult RebuildCXXUuidofExpr(QualType Type, SourceLocation TypeidLoc,
+ TypeSourceInfo *Operand,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXUuidof(Type, TypeidLoc, Operand, RParenLoc);
}
/// Build a new C++ __uuidof(expr) expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildCXXUuidofExpr(QualType TypeInfoType,
- SourceLocation TypeidLoc,
- Expr *Operand,
- SourceLocation RParenLoc) {
- return getSema().BuildCXXUuidof(TypeInfoType, TypeidLoc, Operand,
- RParenLoc);
+ ExprResult RebuildCXXUuidofExpr(QualType Type, SourceLocation TypeidLoc,
+ Expr *Operand, SourceLocation RParenLoc) {
+ return getSema().BuildCXXUuidof(Type, TypeidLoc, Operand, RParenLoc);
}
/// Build a new C++ "this" expression.
@@ -2938,9 +3103,14 @@ public:
bool RequiresZeroInit,
CXXConstructExpr::ConstructionKind ConstructKind,
SourceRange ParenRange) {
+ // Reconstruct the constructor we originally found, which might be
+ // different if this is a call to an inherited constructor.
+ CXXConstructorDecl *FoundCtor = Constructor;
+ if (Constructor->isInheritingConstructor())
+ FoundCtor = Constructor->getInheritedConstructor().getConstructor();
+
SmallVector<Expr*, 8> ConvertedArgs;
- if (getSema().CompleteConstructorCall(Constructor, Args, Loc,
- ConvertedArgs))
+ if (getSema().CompleteConstructorCall(FoundCtor, Args, Loc, ConvertedArgs))
return ExprError();
return getSema().BuildCXXConstructExpr(Loc, T, Constructor,
@@ -3438,6 +3608,11 @@ public:
Sema::AtomicArgumentOrder::AST);
}
+ ExprResult RebuildRecoveryExpr(SourceLocation BeginLoc, SourceLocation EndLoc,
+ ArrayRef<Expr *> SubExprs) {
+ return getSema().CreateRecoveryExpr(BeginLoc, EndLoc, SubExprs);
+ }
+
private:
TypeLoc TransformTypeInObjectScope(TypeLoc TL,
QualType ObjectType,
@@ -3502,10 +3677,10 @@ OMPClause *TreeTransform<Derived>::TransformOMPClause(OMPClause *S) {
switch (S->getClauseKind()) {
default: break;
// Transform individual clause nodes
-#define OPENMP_CLAUSE(Name, Class) \
- case OMPC_ ## Name : \
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
+ case Enum: \
return getDerived().Transform ## Class(cast<Class>(S));
-#include "clang/Basic/OpenMPKinds.def"
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
}
return S;
@@ -5066,6 +5241,86 @@ QualType TreeTransform<Derived>::TransformDependentSizedExtVectorType(
}
template <typename Derived>
+QualType
+TreeTransform<Derived>::TransformConstantMatrixType(TypeLocBuilder &TLB,
+ ConstantMatrixTypeLoc TL) {
+ const ConstantMatrixType *T = TL.getTypePtr();
+ QualType ElementType = getDerived().TransformType(T->getElementType());
+ if (ElementType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || ElementType != T->getElementType()) {
+ Result = getDerived().RebuildConstantMatrixType(
+ ElementType, T->getNumRows(), T->getNumColumns());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ ConstantMatrixTypeLoc NewTL = TLB.push<ConstantMatrixTypeLoc>(Result);
+ NewTL.setAttrNameLoc(TL.getAttrNameLoc());
+ NewTL.setAttrOperandParensRange(TL.getAttrOperandParensRange());
+ NewTL.setAttrRowOperand(TL.getAttrRowOperand());
+ NewTL.setAttrColumnOperand(TL.getAttrColumnOperand());
+
+ return Result;
+}
+
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformDependentSizedMatrixType(
+ TypeLocBuilder &TLB, DependentSizedMatrixTypeLoc TL) {
+ const DependentSizedMatrixType *T = TL.getTypePtr();
+
+ QualType ElementType = getDerived().TransformType(T->getElementType());
+ if (ElementType.isNull()) {
+ return QualType();
+ }
+
+ // Matrix dimensions are constant expressions.
+ EnterExpressionEvaluationContext Unevaluated(
+ SemaRef, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+
+ Expr *origRows = TL.getAttrRowOperand();
+ if (!origRows)
+ origRows = T->getRowExpr();
+ Expr *origColumns = TL.getAttrColumnOperand();
+ if (!origColumns)
+ origColumns = T->getColumnExpr();
+
+ ExprResult rowResult = getDerived().TransformExpr(origRows);
+ rowResult = SemaRef.ActOnConstantExpression(rowResult);
+ if (rowResult.isInvalid())
+ return QualType();
+
+ ExprResult columnResult = getDerived().TransformExpr(origColumns);
+ columnResult = SemaRef.ActOnConstantExpression(columnResult);
+ if (columnResult.isInvalid())
+ return QualType();
+
+ Expr *rows = rowResult.get();
+ Expr *columns = columnResult.get();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || ElementType != T->getElementType() ||
+ rows != origRows || columns != origColumns) {
+ Result = getDerived().RebuildDependentSizedMatrixType(
+ ElementType, rows, columns, T->getAttributeLoc());
+
+ if (Result.isNull())
+ return QualType();
+ }
+
+ // We might have any sort of matrix type now, but fortunately they
+ // all have the same location layout.
+ MatrixTypeLoc NewTL = TLB.push<MatrixTypeLoc>(Result);
+ NewTL.setAttrNameLoc(TL.getAttrNameLoc());
+ NewTL.setAttrOperandParensRange(TL.getAttrOperandParensRange());
+ NewTL.setAttrRowOperand(rows);
+ NewTL.setAttrColumnOperand(columns);
+ return Result;
+}
+
+template <typename Derived>
QualType TreeTransform<Derived>::TransformDependentAddressSpaceType(
TypeLocBuilder &TLB, DependentAddressSpaceTypeLoc TL) {
const DependentAddressSpaceType *T = TL.getTypePtr();
@@ -6025,6 +6280,57 @@ QualType TreeTransform<Derived>::TransformPipeType(TypeLocBuilder &TLB,
return Result;
}
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformExtIntType(TypeLocBuilder &TLB,
+ ExtIntTypeLoc TL) {
+ const ExtIntType *EIT = TL.getTypePtr();
+ QualType Result = TL.getType();
+
+ if (getDerived().AlwaysRebuild()) {
+ Result = getDerived().RebuildExtIntType(EIT->isUnsigned(),
+ EIT->getNumBits(), TL.getNameLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ ExtIntTypeLoc NewTL = TLB.push<ExtIntTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+}
+
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformDependentExtIntType(
+ TypeLocBuilder &TLB, DependentExtIntTypeLoc TL) {
+ const DependentExtIntType *EIT = TL.getTypePtr();
+
+ EnterExpressionEvaluationContext Unevaluated(
+ SemaRef, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ ExprResult BitsExpr = getDerived().TransformExpr(EIT->getNumBitsExpr());
+ BitsExpr = SemaRef.ActOnConstantExpression(BitsExpr);
+
+ if (BitsExpr.isInvalid())
+ return QualType();
+
+ QualType Result = TL.getType();
+
+ if (getDerived().AlwaysRebuild() || BitsExpr.get() != EIT->getNumBitsExpr()) {
+ Result = getDerived().RebuildDependentExtIntType(
+ EIT->isUnsigned(), BitsExpr.get(), TL.getNameLoc());
+
+ if (Result.isNull())
+ return QualType();
+ }
+
+ if (isa<DependentExtIntType>(Result)) {
+ DependentExtIntTypeLoc NewTL = TLB.push<DependentExtIntTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ } else {
+ ExtIntTypeLoc NewTL = TLB.push<ExtIntTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ }
+ return Result;
+}
+
/// Simple iterator that traverses the template arguments in a
/// container that provides a \c getArgLoc() member function.
///
@@ -7030,7 +7336,8 @@ TreeTransform<Derived>::TransformWhileStmt(WhileStmt *S) {
Body.get() == S->getBody())
return Owned(S);
- return getDerived().RebuildWhileStmt(S->getWhileLoc(), Cond, Body.get());
+ return getDerived().RebuildWhileStmt(S->getWhileLoc(), S->getLParenLoc(),
+ Cond, S->getRParenLoc(), Body.get());
}
template<typename Derived>
@@ -7328,7 +7635,8 @@ TreeTransform<Derived>::TransformCoroutineBodyStmt(CoroutineBodyStmt *S) {
return StmtError();
StmtResult FinalSuspend =
getDerived().TransformStmt(S->getFinalSuspendStmt());
- if (FinalSuspend.isInvalid())
+ if (FinalSuspend.isInvalid() ||
+ !SemaRef.checkFinalSuspendNoThrow(FinalSuspend.get()))
return StmtError();
ScopeInfo->setCoroutineSuspends(InitSuspend.get(), FinalSuspend.get());
assert(isa<Expr>(InitSuspend.get()) && isa<Expr>(FinalSuspend.get()));
@@ -7784,8 +8092,12 @@ TreeTransform<Derived>::TransformCXXForRangeStmt(CXXForRangeStmt *S) {
Cond.get(),
Inc.get(), LoopVar.get(),
S->getRParenLoc());
- if (NewStmt.isInvalid())
+ if (NewStmt.isInvalid() && LoopVar.get() != S->getLoopVarStmt()) {
+ // Might not have attached any initializer to the loop variable.
+ getSema().ActOnInitializerError(
+ cast<DeclStmt>(LoopVar.get())->getSingleDecl());
return StmtError();
+ }
}
StmtResult Body = getDerived().TransformStmt(S->getBody());
@@ -8252,6 +8564,28 @@ TreeTransform<Derived>::TransformOMPFlushDirective(OMPFlushDirective *D) {
template <typename Derived>
StmtResult
+TreeTransform<Derived>::TransformOMPDepobjDirective(OMPDepobjDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_depobj, DirName, nullptr,
+ D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPScanDirective(OMPScanDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_scan, DirName, nullptr,
+ D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
TreeTransform<Derived>::TransformOMPOrderedDirective(OMPOrderedDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_ordered, DirName, nullptr,
@@ -8738,6 +9072,19 @@ TreeTransform<Derived>::TransformOMPOrderedClause(OMPOrderedClause *C) {
template <typename Derived>
OMPClause *
+TreeTransform<Derived>::TransformOMPDetachClause(OMPDetachClause *C) {
+ ExprResult E;
+ if (Expr *Evt = C->getEventHandler()) {
+ E = getDerived().TransformExpr(Evt);
+ if (E.isInvalid())
+ return nullptr;
+ }
+ return getDerived().RebuildOMPDetachClause(E.get(), C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *
TreeTransform<Derived>::TransformOMPNowaitClause(OMPNowaitClause *C) {
// No need to rebuild this clause, no template-dependent parameters.
return C;
@@ -8792,6 +9139,34 @@ TreeTransform<Derived>::TransformOMPSeqCstClause(OMPSeqCstClause *C) {
template <typename Derived>
OMPClause *
+TreeTransform<Derived>::TransformOMPAcqRelClause(OMPAcqRelClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPAcquireClause(OMPAcquireClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPReleaseClause(OMPReleaseClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPRelaxedClause(OMPRelaxedClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
TreeTransform<Derived>::TransformOMPThreadsClause(OMPThreadsClause *C) {
// No need to rebuild this clause, no template-dependent parameters.
return C;
@@ -8811,6 +9186,13 @@ TreeTransform<Derived>::TransformOMPNogroupClause(OMPNogroupClause *C) {
}
template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPDestroyClause(OMPDestroyClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
OMPClause *TreeTransform<Derived>::TransformOMPUnifiedAddressClause(
OMPUnifiedAddressClause *C) {
llvm_unreachable("unified_address clause cannot appear in dependent context");
@@ -8947,8 +9329,9 @@ TreeTransform<Derived>::TransformOMPReductionClause(OMPReductionClause *C) {
UnresolvedReductions.push_back(nullptr);
}
return getDerived().RebuildOMPReductionClause(
- Vars, C->getBeginLoc(), C->getLParenLoc(), C->getColonLoc(),
- C->getEndLoc(), ReductionIdScopeSpec, NameInfo, UnresolvedReductions);
+ Vars, C->getModifier(), C->getBeginLoc(), C->getLParenLoc(),
+ C->getModifierLoc(), C->getColonLoc(), C->getEndLoc(),
+ ReductionIdScopeSpec, NameInfo, UnresolvedReductions);
}
template <typename Derived>
@@ -9125,8 +9508,25 @@ OMPClause *TreeTransform<Derived>::TransformOMPFlushClause(OMPFlushClause *C) {
template <typename Derived>
OMPClause *
+TreeTransform<Derived>::TransformOMPDepobjClause(OMPDepobjClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getDepobj());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPDepobjClause(E.get(), C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *
TreeTransform<Derived>::TransformOMPDependClause(OMPDependClause *C) {
llvm::SmallVector<Expr *, 16> Vars;
+ Expr *DepModifier = C->getModifier();
+ if (DepModifier) {
+ ExprResult DepModRes = getDerived().TransformExpr(DepModifier);
+ if (DepModRes.isInvalid())
+ return nullptr;
+ DepModifier = DepModRes.get();
+ }
Vars.reserve(C->varlist_size());
for (auto *VE : C->varlists()) {
ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
@@ -9135,8 +9535,9 @@ TreeTransform<Derived>::TransformOMPDependClause(OMPDependClause *C) {
Vars.push_back(EVar.get());
}
return getDerived().RebuildOMPDependClause(
- C->getDependencyKind(), C->getDependencyLoc(), C->getColonLoc(), Vars,
- C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+ DepModifier, C->getDependencyKind(), C->getDependencyLoc(),
+ C->getColonLoc(), Vars, C->getBeginLoc(), C->getLParenLoc(),
+ C->getEndLoc());
}
template <typename Derived>
@@ -9145,8 +9546,9 @@ TreeTransform<Derived>::TransformOMPDeviceClause(OMPDeviceClause *C) {
ExprResult E = getDerived().TransformExpr(C->getDevice());
if (E.isInvalid())
return nullptr;
- return getDerived().RebuildOMPDeviceClause(E.get(), C->getBeginLoc(),
- C->getLParenLoc(), C->getEndLoc());
+ return getDerived().RebuildOMPDeviceClause(
+ C->getModifier(), E.get(), C->getBeginLoc(), C->getLParenLoc(),
+ C->getModifierLoc(), C->getEndLoc());
}
template <typename Derived, class T>
@@ -9369,6 +9771,21 @@ OMPClause *TreeTransform<Derived>::TransformOMPUseDevicePtrClause(
}
template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPUseDeviceAddrClause(
+ OMPUseDeviceAddrClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ OMPVarListLocTy Locs(C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+ return getDerived().RebuildOMPUseDeviceAddrClause(Vars, Locs);
+}
+
+template <typename Derived>
OMPClause *
TreeTransform<Derived>::TransformOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
llvm::SmallVector<Expr *, 16> Vars;
@@ -9398,6 +9815,91 @@ TreeTransform<Derived>::TransformOMPNontemporalClause(OMPNontemporalClause *C) {
Vars, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPInclusiveClause(OMPInclusiveClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ return getDerived().RebuildOMPInclusiveClause(
+ Vars, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPExclusiveClause(OMPExclusiveClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ return getDerived().RebuildOMPExclusiveClause(
+ Vars, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPUsesAllocatorsClause(
+ OMPUsesAllocatorsClause *C) {
+ SmallVector<Sema::UsesAllocatorsData, 16> Data;
+ Data.reserve(C->getNumberOfAllocators());
+ for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
+ OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
+ ExprResult Allocator = getDerived().TransformExpr(D.Allocator);
+ if (Allocator.isInvalid())
+ continue;
+ ExprResult AllocatorTraits;
+ if (Expr *AT = D.AllocatorTraits) {
+ AllocatorTraits = getDerived().TransformExpr(AT);
+ if (AllocatorTraits.isInvalid())
+ continue;
+ }
+ Sema::UsesAllocatorsData &NewD = Data.emplace_back();
+ NewD.Allocator = Allocator.get();
+ NewD.AllocatorTraits = AllocatorTraits.get();
+ NewD.LParenLoc = D.LParenLoc;
+ NewD.RParenLoc = D.RParenLoc;
+ }
+ return getDerived().RebuildOMPUsesAllocatorsClause(
+ Data, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPAffinityClause(OMPAffinityClause *C) {
+ SmallVector<Expr *, 4> Locators;
+ Locators.reserve(C->varlist_size());
+ ExprResult ModifierRes;
+ if (Expr *Modifier = C->getModifier()) {
+ ModifierRes = getDerived().TransformExpr(Modifier);
+ if (ModifierRes.isInvalid())
+ return nullptr;
+ }
+ for (Expr *E : C->varlists()) {
+ ExprResult Locator = getDerived().TransformExpr(E);
+ if (Locator.isInvalid())
+ continue;
+ Locators.push_back(Locator.get());
+ }
+ return getDerived().RebuildOMPAffinityClause(
+ C->getBeginLoc(), C->getLParenLoc(), C->getColonLoc(), C->getEndLoc(),
+ ModifierRes.get(), Locators);
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPOrderClause(OMPOrderClause *C) {
+ return getDerived().RebuildOMPOrderClause(C->getKind(), C->getKindKwLoc(),
+ C->getBeginLoc(), C->getLParenLoc(),
+ C->getEndLoc());
+}
+
//===----------------------------------------------------------------------===//
// Expression transformation
//===----------------------------------------------------------------------===//
@@ -9681,6 +10183,24 @@ TreeTransform<Derived>::TransformTypoExpr(TypoExpr *E) {
return E;
}
+template <typename Derived>
+ExprResult TreeTransform<Derived>::TransformRecoveryExpr(RecoveryExpr *E) {
+ llvm::SmallVector<Expr *, 8> Children;
+ bool Changed = false;
+ for (Expr *C : E->subExpressions()) {
+ ExprResult NewC = getDerived().TransformExpr(C);
+ if (NewC.isInvalid())
+ return ExprError();
+ Children.push_back(NewC.get());
+
+ Changed |= NewC.get() != C;
+ }
+ if (!getDerived().AlwaysRebuild() && !Changed)
+ return E;
+ return getDerived().RebuildRecoveryExpr(E->getBeginLoc(), E->getEndLoc(),
+ Children);
+}
+
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformPseudoObjectExpr(PseudoObjectExpr *E) {
@@ -9780,6 +10300,29 @@ TreeTransform<Derived>::TransformArraySubscriptExpr(ArraySubscriptExpr *E) {
template <typename Derived>
ExprResult
+TreeTransform<Derived>::TransformMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ ExprResult RowIdx = getDerived().TransformExpr(E->getRowIdx());
+ if (RowIdx.isInvalid())
+ return ExprError();
+
+ ExprResult ColumnIdx = getDerived().TransformExpr(E->getColumnIdx());
+ if (ColumnIdx.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && Base.get() == E->getBase() &&
+ RowIdx.get() == E->getRowIdx() && ColumnIdx.get() == E->getColumnIdx())
+ return E;
+
+ return getDerived().RebuildMatrixSubscriptExpr(
+ Base.get(), RowIdx.get(), ColumnIdx.get(), E->getRBracketLoc());
+}
+
+template <typename Derived>
+ExprResult
TreeTransform<Derived>::TransformOMPArraySectionExpr(OMPArraySectionExpr *E) {
ExprResult Base = getDerived().TransformExpr(E->getBase());
if (Base.isInvalid())
@@ -9799,13 +10342,105 @@ TreeTransform<Derived>::TransformOMPArraySectionExpr(OMPArraySectionExpr *E) {
return ExprError();
}
+ ExprResult Stride;
+ if (Expr *Str = E->getStride()) {
+ Stride = getDerived().TransformExpr(Str);
+ if (Stride.isInvalid())
+ return ExprError();
+ }
+
if (!getDerived().AlwaysRebuild() && Base.get() == E->getBase() &&
LowerBound.get() == E->getLowerBound() && Length.get() == E->getLength())
return E;
return getDerived().RebuildOMPArraySectionExpr(
- Base.get(), E->getBase()->getEndLoc(), LowerBound.get(), E->getColonLoc(),
- Length.get(), E->getRBracketLoc());
+ Base.get(), E->getBase()->getEndLoc(), LowerBound.get(),
+ E->getColonLocFirst(), E->getColonLocSecond(), Length.get(), Stride.get(),
+ E->getRBracketLoc());
+}
+
+template <typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformOMPArrayShapingExpr(OMPArrayShapingExpr *E) {
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ SmallVector<Expr *, 4> Dims;
+ bool ErrorFound = false;
+ for (Expr *Dim : E->getDimensions()) {
+ ExprResult DimRes = getDerived().TransformExpr(Dim);
+ if (DimRes.isInvalid()) {
+ ErrorFound = true;
+ continue;
+ }
+ Dims.push_back(DimRes.get());
+ }
+
+ if (ErrorFound)
+ return ExprError();
+ return getDerived().RebuildOMPArrayShapingExpr(Base.get(), E->getLParenLoc(),
+ E->getRParenLoc(), Dims,
+ E->getBracketsRanges());
+}
+
+template <typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformOMPIteratorExpr(OMPIteratorExpr *E) {
+ unsigned NumIterators = E->numOfIterators();
+ SmallVector<Sema::OMPIteratorData, 4> Data(NumIterators);
+
+ bool ErrorFound = false;
+ bool NeedToRebuild = getDerived().AlwaysRebuild();
+ for (unsigned I = 0; I < NumIterators; ++I) {
+ auto *D = cast<VarDecl>(E->getIteratorDecl(I));
+ Data[I].DeclIdent = D->getIdentifier();
+ Data[I].DeclIdentLoc = D->getLocation();
+ if (D->getLocation() == D->getBeginLoc()) {
+ assert(SemaRef.Context.hasSameType(D->getType(), SemaRef.Context.IntTy) &&
+ "Implicit type must be int.");
+ } else {
+ TypeSourceInfo *TSI = getDerived().TransformType(D->getTypeSourceInfo());
+ QualType DeclTy = getDerived().TransformType(D->getType());
+ Data[I].Type = SemaRef.CreateParsedType(DeclTy, TSI);
+ }
+ OMPIteratorExpr::IteratorRange Range = E->getIteratorRange(I);
+ ExprResult Begin = getDerived().TransformExpr(Range.Begin);
+ ExprResult End = getDerived().TransformExpr(Range.End);
+ ExprResult Step = getDerived().TransformExpr(Range.Step);
+ ErrorFound = ErrorFound ||
+ !(!D->getTypeSourceInfo() || (Data[I].Type.getAsOpaquePtr() &&
+ !Data[I].Type.get().isNull())) ||
+ Begin.isInvalid() || End.isInvalid() || Step.isInvalid();
+ if (ErrorFound)
+ continue;
+ Data[I].Range.Begin = Begin.get();
+ Data[I].Range.End = End.get();
+ Data[I].Range.Step = Step.get();
+ Data[I].AssignLoc = E->getAssignLoc(I);
+ Data[I].ColonLoc = E->getColonLoc(I);
+ Data[I].SecColonLoc = E->getSecondColonLoc(I);
+ NeedToRebuild =
+ NeedToRebuild ||
+ (D->getTypeSourceInfo() && Data[I].Type.get().getTypePtrOrNull() !=
+ D->getType().getTypePtrOrNull()) ||
+ Range.Begin != Data[I].Range.Begin || Range.End != Data[I].Range.End ||
+ Range.Step != Data[I].Range.Step;
+ }
+ if (ErrorFound)
+ return ExprError();
+ if (!NeedToRebuild)
+ return E;
+
+ ExprResult Res = getDerived().RebuildOMPIteratorExpr(
+ E->getIteratorKwLoc(), E->getLParenLoc(), E->getRParenLoc(), Data);
+ if (!Res.isUsable())
+ return Res;
+ auto *IE = cast<OMPIteratorExpr>(Res.get());
+ for (unsigned I = 0; I < NumIterators; ++I)
+ getDerived().transformedLocalDecl(E->getIteratorDecl(I),
+ IE->getIteratorDecl(I));
+ return Res;
}
template<typename Derived>
@@ -9937,9 +10572,15 @@ TreeTransform<Derived>::TransformBinaryOperator(BinaryOperator *E) {
RHS.get() == E->getRHS())
return E;
- Sema::FPContractStateRAII FPContractState(getSema());
- getSema().FPFeatures = E->getFPFeatures();
-
+ if (E->isCompoundAssignmentOp())
+ // FPFeatures has already been established from trailing storage
+ return getDerived().RebuildBinaryOperator(
+ E->getOperatorLoc(), E->getOpcode(), LHS.get(), RHS.get());
+ Sema::FPFeaturesStateRAII FPFeaturesState(getSema());
+ FPOptionsOverride NewOverrides(E->getFPFeatures(getSema().getLangOpts()));
+ getSema().CurFPFeatures =
+ NewOverrides.applyOverrides(getSema().getLangOpts());
+ getSema().FpPragmaStack.CurrentValue = NewOverrides.getAsOpaqueInt();
return getDerived().RebuildBinaryOperator(E->getOperatorLoc(), E->getOpcode(),
LHS.get(), RHS.get());
}
@@ -9992,6 +10633,11 @@ template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCompoundAssignOperator(
CompoundAssignOperator *E) {
+ Sema::FPFeaturesStateRAII FPFeaturesState(getSema());
+ FPOptionsOverride NewOverrides(E->getFPFeatures(getSema().getLangOpts()));
+ getSema().CurFPFeatures =
+ NewOverrides.applyOverrides(getSema().getLangOpts());
+ getSema().FpPragmaStack.CurrentValue = NewOverrides.getAsOpaqueInt();
return getDerived().TransformBinaryOperator(E);
}
@@ -10465,8 +11111,11 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
(E->getNumArgs() != 2 || Second.get() == E->getArg(1)))
return SemaRef.MaybeBindToTemporary(E);
- Sema::FPContractStateRAII FPContractState(getSema());
- getSema().FPFeatures = E->getFPFeatures();
+ Sema::FPFeaturesStateRAII FPFeaturesState(getSema());
+ FPOptionsOverride NewOverrides(E->getFPFeatures());
+ getSema().CurFPFeatures =
+ NewOverrides.applyOverrides(getSema().getLangOpts());
+ getSema().FpPragmaStack.CurrentValue = NewOverrides.getAsOpaqueInt();
return getDerived().RebuildCXXOperatorCallExpr(E->getOperator(),
E->getOperatorLoc(),
@@ -10593,6 +11242,12 @@ TreeTransform<Derived>::TransformCXXConstCastExpr(CXXConstCastExpr *E) {
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformCXXAddrspaceCastExpr(CXXAddrspaceCastExpr *E) {
+ return getDerived().TransformCXXNamedCastExpr(E);
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformCXXFunctionalCastExpr(
CXXFunctionalCastExpr *E) {
TypeSourceInfo *Type =
@@ -11545,10 +12200,11 @@ TreeTransform<Derived>::TransformCXXConstructExpr(CXXConstructExpr *E) {
// CXXConstructExprs other than for list-initialization and
// CXXTemporaryObjectExpr are always implicit, so when we have
// a 1-argument construction we just transform that argument.
- if ((E->getNumArgs() == 1 ||
- (E->getNumArgs() > 1 && getDerived().DropCallArgument(E->getArg(1)))) &&
- (!getDerived().DropCallArgument(E->getArg(0))) &&
- !E->isListInitialization())
+ if (getDerived().AllowSkippingCXXConstructExpr() &&
+ ((E->getNumArgs() == 1 ||
+ (E->getNumArgs() > 1 && getDerived().DropCallArgument(E->getArg(1)))) &&
+ (!getDerived().DropCallArgument(E->getArg(0))) &&
+ !E->isListInitialization()))
return getDerived().TransformExpr(E->getArg(0));
TemporaryBase Rebase(*this, /*FIXME*/ E->getBeginLoc(), DeclarationName());
@@ -13323,6 +13979,21 @@ TreeTransform<Derived>::RebuildDependentSizedExtVectorType(QualType ElementType,
return SemaRef.BuildExtVectorType(ElementType, SizeExpr, AttributeLoc);
}
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildConstantMatrixType(
+ QualType ElementType, unsigned NumRows, unsigned NumColumns) {
+ return SemaRef.Context.getConstantMatrixType(ElementType, NumRows,
+ NumColumns);
+}
+
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildDependentSizedMatrixType(
+ QualType ElementType, Expr *RowExpr, Expr *ColumnExpr,
+ SourceLocation AttributeLoc) {
+ return SemaRef.BuildMatrixType(ElementType, RowExpr, ColumnExpr,
+ AttributeLoc);
+}
+
template<typename Derived>
QualType TreeTransform<Derived>::RebuildFunctionProtoType(
QualType T,
@@ -13437,6 +14108,23 @@ QualType TreeTransform<Derived>::RebuildPipeType(QualType ValueType,
: SemaRef.BuildWritePipeType(ValueType, KWLoc);
}
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildExtIntType(bool IsUnsigned,
+ unsigned NumBits,
+ SourceLocation Loc) {
+ llvm::APInt NumBitsAP(SemaRef.Context.getIntWidth(SemaRef.Context.IntTy),
+ NumBits, true);
+ IntegerLiteral *Bits = IntegerLiteral::Create(SemaRef.Context, NumBitsAP,
+ SemaRef.Context.IntTy, Loc);
+ return SemaRef.BuildExtIntType(IsUnsigned, Bits, Loc);
+}
+
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildDependentExtIntType(
+ bool IsUnsigned, Expr *NumBitsExpr, SourceLocation Loc) {
+ return SemaRef.BuildExtIntType(IsUnsigned, NumBitsExpr, Loc);
+}
+
template<typename Derived>
TemplateName
TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
@@ -13458,11 +14146,10 @@ TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
UnqualifiedId TemplateName;
TemplateName.setIdentifier(&Name, NameLoc);
Sema::TemplateTy Template;
- getSema().ActOnDependentTemplateName(/*Scope=*/nullptr,
- SS, TemplateKWLoc, TemplateName,
- ParsedType::make(ObjectType),
- /*EnteringContext=*/false,
- Template, AllowInjectedClassName);
+ getSema().ActOnTemplateName(/*Scope=*/nullptr, SS, TemplateKWLoc,
+ TemplateName, ParsedType::make(ObjectType),
+ /*EnteringContext=*/false, Template,
+ AllowInjectedClassName);
return Template.get();
}
@@ -13479,11 +14166,9 @@ TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
SourceLocation SymbolLocations[3] = { NameLoc, NameLoc, NameLoc };
Name.setOperatorFunctionId(NameLoc, Operator, SymbolLocations);
Sema::TemplateTy Template;
- getSema().ActOnDependentTemplateName(/*Scope=*/nullptr,
- SS, TemplateKWLoc, Name,
- ParsedType::make(ObjectType),
- /*EnteringContext=*/false,
- Template, AllowInjectedClassName);
+ getSema().ActOnTemplateName(
+ /*Scope=*/nullptr, SS, TemplateKWLoc, Name, ParsedType::make(ObjectType),
+ /*EnteringContext=*/false, Template, AllowInjectedClassName);
return Template.get();
}
diff --git a/contrib/llvm-project/clang/lib/Sema/UsedDeclVisitor.h b/contrib/llvm-project/clang/lib/Sema/UsedDeclVisitor.h
new file mode 100644
index 000000000000..d207e07f451a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/UsedDeclVisitor.h
@@ -0,0 +1,102 @@
+//===- UsedDeclVisitor.h - ODR-used declarations visitor --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//===----------------------------------------------------------------------===//
+//
+// This file defines UsedDeclVisitor, a CRTP class which visits all the
+// declarations that are ODR-used by an expression or statement.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_SEMA_USEDDECLVISITOR_H
+#define LLVM_CLANG_LIB_SEMA_USEDDECLVISITOR_H
+
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/Sema/SemaInternal.h"
+
+namespace clang {
+template <class Derived>
+class UsedDeclVisitor : public EvaluatedExprVisitor<Derived> {
+protected:
+ Sema &S;
+
+public:
+ typedef EvaluatedExprVisitor<Derived> Inherited;
+
+ UsedDeclVisitor(Sema &S) : Inherited(S.Context), S(S) {}
+
+ Derived &asImpl() { return *static_cast<Derived *>(this); }
+
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ auto *D = E->getDecl();
+ if (isa<FunctionDecl>(D) || isa<VarDecl>(D)) {
+ asImpl().visitUsedDecl(E->getLocation(), D);
+ }
+ }
+
+ void VisitMemberExpr(MemberExpr *E) {
+ auto *D = E->getMemberDecl();
+ if (isa<FunctionDecl>(D) || isa<VarDecl>(D)) {
+ asImpl().visitUsedDecl(E->getMemberLoc(), D);
+ }
+ asImpl().Visit(E->getBase());
+ }
+
+ void VisitCapturedStmt(CapturedStmt *Node) {
+ asImpl().visitUsedDecl(Node->getBeginLoc(), Node->getCapturedDecl());
+ Inherited::VisitCapturedStmt(Node);
+ }
+
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ asImpl().visitUsedDecl(
+ E->getBeginLoc(),
+ const_cast<CXXDestructorDecl *>(E->getTemporary()->getDestructor()));
+ asImpl().Visit(E->getSubExpr());
+ }
+
+ void VisitCXXNewExpr(CXXNewExpr *E) {
+ if (E->getOperatorNew())
+ asImpl().visitUsedDecl(E->getBeginLoc(), E->getOperatorNew());
+ if (E->getOperatorDelete())
+ asImpl().visitUsedDecl(E->getBeginLoc(), E->getOperatorDelete());
+ Inherited::VisitCXXNewExpr(E);
+ }
+
+ void VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ if (E->getOperatorDelete())
+ asImpl().visitUsedDecl(E->getBeginLoc(), E->getOperatorDelete());
+ QualType Destroyed = S.Context.getBaseElementType(E->getDestroyedType());
+ if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
+ asImpl().visitUsedDecl(E->getBeginLoc(), S.LookupDestructor(Record));
+ }
+
+ Inherited::VisitCXXDeleteExpr(E);
+ }
+
+ void VisitCXXConstructExpr(CXXConstructExpr *E) {
+ asImpl().visitUsedDecl(E->getBeginLoc(), E->getConstructor());
+ Inherited::VisitCXXConstructExpr(E);
+ }
+
+ void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
+ asImpl().Visit(E->getExpr());
+ }
+
+ void visitUsedDecl(SourceLocation Loc, Decl *D) {
+ if (auto *CD = dyn_cast<CapturedDecl>(D)) {
+ if (auto *S = CD->getBody()) {
+ asImpl().Visit(S);
+ }
+ } else if (auto *CD = dyn_cast<BlockDecl>(D)) {
+ if (auto *S = CD->getBody()) {
+ asImpl().Visit(S);
+ }
+ }
+ }
+};
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_SEMA_USEDDECLVISITOR_H
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp
index f93f1f77405d..bf583b02f96b 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp
@@ -240,9 +240,21 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
case BuiltinType::BuiltinFn:
ID = PREDEF_TYPE_BUILTIN_FN;
break;
+ case BuiltinType::IncompleteMatrixIdx:
+ ID = PREDEF_TYPE_INCOMPLETE_MATRIX_IDX;
+ break;
case BuiltinType::OMPArraySection:
ID = PREDEF_TYPE_OMP_ARRAY_SECTION;
break;
+ case BuiltinType::OMPArrayShaping:
+ ID = PREDEF_TYPE_OMP_ARRAY_SHAPING;
+ break;
+ case BuiltinType::OMPIterator:
+ ID = PREDEF_TYPE_OMP_ITERATOR;
+ break;
+ case BuiltinType::BFloat16:
+ ID = PREDEF_TYPE_BFLOAT16_ID;
+ break;
}
return TypeIdx(ID);
@@ -365,6 +377,7 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::IndirectField:
case Decl::Field:
case Decl::MSProperty:
+ case Decl::MSGuid:
case Decl::ObjCIvar:
case Decl::ObjCAtDefsField:
case Decl::NonTypeTemplateParm:
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
index 652b772f37cc..4a1a995204e5 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
@@ -89,6 +89,7 @@
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
@@ -139,6 +140,7 @@ using namespace clang;
using namespace clang::serialization;
using namespace clang::serialization::reader;
using llvm::BitstreamCursor;
+using llvm::RoundingMode;
//===----------------------------------------------------------------------===//
// ChainedASTReaderListener implementation
@@ -1334,6 +1336,7 @@ bool ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
Error(std::move(Err));
return true;
}
+ F.SourceManagerBlockStartOffset = SLocEntryCursor.GetCurrentBitNo();
RecordData Record;
while (true) {
@@ -1411,7 +1414,7 @@ resolveFileRelativeToOriginalDir(const std::string &Filename,
path::append(currPCHPath, "..");
path::append(currPCHPath, fileDirI, fileDirE);
path::append(currPCHPath, path::filename(Filename));
- return currPCHPath.str();
+ return std::string(currPCHPath.str());
}
bool ASTReader::ReadSLocEntry(int ID) {
@@ -1468,6 +1471,7 @@ bool ASTReader::ReadSLocEntry(int ID) {
ModuleFile *F = GlobalSLocEntryMap.find(-ID)->second;
if (llvm::Error Err = F->SLocEntryCursor.JumpToBit(
+ F->SLocEntryOffsetsBase +
F->SLocEntryOffsets[ID - F->SLocEntryBaseID])) {
Error(std::move(Err));
return true;
@@ -1625,13 +1629,17 @@ SourceLocation ASTReader::getImportLocation(ModuleFile *F) {
/// Enter a subblock of the specified BlockID with the specified cursor. Read
/// the abbreviations that are at the top of the block and then leave the cursor
/// pointing into the block.
-bool ASTReader::ReadBlockAbbrevs(BitstreamCursor &Cursor, unsigned BlockID) {
+bool ASTReader::ReadBlockAbbrevs(BitstreamCursor &Cursor, unsigned BlockID,
+ uint64_t *StartOfBlockOffset) {
if (llvm::Error Err = Cursor.EnterSubBlock(BlockID)) {
// FIXME this drops errors on the floor.
consumeError(std::move(Err));
return true;
}
+ if (StartOfBlockOffset)
+ *StartOfBlockOffset = Cursor.GetCurrentBitNo();
+
while (true) {
uint64_t Offset = Cursor.GetCurrentBitNo();
Expected<unsigned> MaybeCode = Cursor.ReadCode();
@@ -1838,7 +1846,7 @@ bool HeaderFileInfoTrait::EqualKey(internal_key_ref a, internal_key_ref b) {
return nullptr;
}
- std::string Resolved = Key.Filename;
+ std::string Resolved = std::string(Key.Filename);
Reader.ResolveImportedPath(M, Resolved);
if (auto File = FileMgr.getFile(Resolved))
return *File;
@@ -1913,13 +1921,13 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
ModuleMap &ModMap =
Reader.getPreprocessor().getHeaderSearchInfo().getModuleMap();
- std::string Filename = key.Filename;
+ std::string Filename = std::string(key.Filename);
if (key.Imported)
Reader.ResolveImportedPath(M, Filename);
// FIXME: This is not always the right filename-as-written, but we're not
// going to use this information to rebuild the module, so it doesn't make
// a lot of difference.
- Module::Header H = { key.Filename, *FileMgr.getFile(Filename) };
+ Module::Header H = {std::string(key.Filename), *FileMgr.getFile(Filename)};
ModMap.addHeader(Mod, H, HeaderRole, /*Imported*/true);
HFI.isModuleHeader |= !(HeaderRole & ModuleMap::TextualHeader);
}
@@ -1930,9 +1938,8 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
return HFI;
}
-void ASTReader::addPendingMacro(IdentifierInfo *II,
- ModuleFile *M,
- uint64_t MacroDirectivesOffset) {
+void ASTReader::addPendingMacro(IdentifierInfo *II, ModuleFile *M,
+ uint32_t MacroDirectivesOffset) {
assert(NumCurrentElementsDeserializing > 0 &&"Missing deserialization guard");
PendingMacroIDs[II].push_back(PendingMacroInfo(M, MacroDirectivesOffset));
}
@@ -2097,7 +2104,8 @@ void ASTReader::resolvePendingMacro(IdentifierInfo *II,
BitstreamCursor &Cursor = M.MacroCursor;
SavedStreamPosition SavedPosition(Cursor);
- if (llvm::Error Err = Cursor.JumpToBit(PMInfo.MacroDirectivesOffset)) {
+ if (llvm::Error Err =
+ Cursor.JumpToBit(M.MacroOffsetsBase + PMInfo.MacroDirectivesOffset)) {
Error(std::move(Err));
return;
}
@@ -2248,7 +2256,7 @@ ASTReader::readInputFileInfo(ModuleFile &F, unsigned ID) {
R.Overridden = static_cast<bool>(Record[3]);
R.Transient = static_cast<bool>(Record[4]);
R.TopLevelModuleMap = static_cast<bool>(Record[5]);
- R.Filename = Blob;
+ R.Filename = std::string(Blob);
ResolveImportedPath(F, R.Filename);
Expected<llvm::BitstreamEntry> MaybeEntry = Cursor.advance();
@@ -2309,7 +2317,7 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
if (File == nullptr && !F.OriginalDir.empty() && !F.BaseDirectory.empty() &&
F.OriginalDir != F.BaseDirectory) {
std::string Resolved = resolveFileRelativeToOriginalDir(
- Filename, F.OriginalDir, F.BaseDirectory);
+ std::string(Filename), F.OriginalDir, F.BaseDirectory);
if (!Resolved.empty())
if (auto FE = FileMgr.getFile(Resolved))
File = *FE;
@@ -2788,10 +2796,10 @@ ASTReader::ReadControlBlock(ModuleFile &F,
ReadUntranslatedSourceLocation(Record[Idx++]);
off_t StoredSize = (off_t)Record[Idx++];
time_t StoredModTime = (time_t)Record[Idx++];
- ASTFileSignature StoredSignature = {
- {{(uint32_t)Record[Idx++], (uint32_t)Record[Idx++],
- (uint32_t)Record[Idx++], (uint32_t)Record[Idx++],
- (uint32_t)Record[Idx++]}}};
+ auto FirstSignatureByte = Record.begin() + Idx;
+ ASTFileSignature StoredSignature = ASTFileSignature::create(
+ FirstSignatureByte, FirstSignatureByte + ASTFileSignature::size);
+ Idx += ASTFileSignature::size;
std::string ImportedName = ReadString(Record, Idx);
std::string ImportedFile;
@@ -2844,7 +2852,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
case ORIGINAL_FILE:
F.OriginalSourceFileID = FileID::get(Record[0]);
- F.ActualOriginalSourceFileName = Blob;
+ F.ActualOriginalSourceFileName = std::string(Blob);
F.OriginalSourceFileName = F.ActualOriginalSourceFileName;
ResolveImportedPath(F, F.OriginalSourceFileName);
break;
@@ -2854,11 +2862,11 @@ ASTReader::ReadControlBlock(ModuleFile &F,
break;
case ORIGINAL_PCH_DIR:
- F.OriginalDir = Blob;
+ F.OriginalDir = std::string(Blob);
break;
case MODULE_NAME:
- F.ModuleName = Blob;
+ F.ModuleName = std::string(Blob);
Diag(diag::remark_module_import)
<< F.ModuleName << F.FileName << (ImportedBy ? true : false)
<< (ImportedBy ? StringRef(ImportedBy->ModuleName) : StringRef());
@@ -2897,9 +2905,9 @@ ASTReader::ReadControlBlock(ModuleFile &F,
return OutOfDate;
}
}
- F.BaseDirectory = M->Directory->getName();
+ F.BaseDirectory = std::string(M->Directory->getName());
} else {
- F.BaseDirectory = Blob;
+ F.BaseDirectory = std::string(Blob);
}
break;
}
@@ -2930,6 +2938,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
Error(std::move(Err));
return Failure;
}
+ F.ASTBlockStartOffset = Stream.GetCurrentBitNo();
// Read all of the records and blocks for the AST file.
RecordData Record;
@@ -2970,7 +2979,8 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
Error(std::move(Err));
return Failure;
}
- if (ReadBlockAbbrevs(F.DeclsCursor, DECLTYPES_BLOCK_ID)) {
+ if (ReadBlockAbbrevs(F.DeclsCursor, DECLTYPES_BLOCK_ID,
+ &F.DeclsBlockStartOffset)) {
Error("malformed block record in AST file");
return Failure;
}
@@ -3096,7 +3106,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
Error("duplicate TYPE_OFFSET record in AST file");
return Failure;
}
- F.TypeOffsets = (const uint32_t *)Blob.data();
+ F.TypeOffsets = reinterpret_cast<const UnderalignedInt64 *>(Blob.data());
F.LocalNumTypes = Record[0];
unsigned LocalBaseTypeIndex = Record[1];
F.BaseTypeIndex = getTotalNumTypes();
@@ -3224,7 +3234,8 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
case MODULAR_CODEGEN_DECLS:
// FIXME: Skip reading this record if our ASTConsumer doesn't care about
// them (ie: if we're not codegenerating this module).
- if (F.Kind == MK_MainFile)
+ if (F.Kind == MK_MainFile ||
+ getContext().getLangOpts().BuildingPCHWithObjectFile)
for (unsigned I = 0, N = Record.size(); I != N; ++I)
EagerlyDeserializedDecls.push_back(getGlobalDeclID(F, Record[I]));
break;
@@ -3374,6 +3385,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
F.SLocEntryOffsets = (const uint32_t *)Blob.data();
F.LocalNumSLocEntries = Record[0];
unsigned SLocSpaceSize = Record[1];
+ F.SLocEntryOffsetsBase = Record[2] + F.SourceManagerBlockStartOffset;
std::tie(F.SLocEntryBaseID, F.SLocEntryBaseOffset) =
SourceMgr.AllocateLoadedSLocEntries(F.LocalNumSLocEntries,
SLocSpaceSize);
@@ -3692,6 +3704,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
F.MacroOffsets = (const uint32_t *)Blob.data();
F.LocalNumMacros = Record[0];
unsigned LocalBaseMacroID = Record[1];
+ F.MacroOffsetsBase = Record[2] + F.ASTBlockStartOffset;
F.BaseMacroID = getTotalNumMacros();
if (F.LocalNumMacros > 0) {
@@ -3773,6 +3786,34 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
break;
}
+
+ case FLOAT_CONTROL_PRAGMA_OPTIONS: {
+ if (Record.size() < 3) {
+ Error("invalid pragma pack record");
+ return Failure;
+ }
+ FpPragmaCurrentValue = Record[0];
+ FpPragmaCurrentLocation = ReadSourceLocation(F, Record[1]);
+ unsigned NumStackEntries = Record[2];
+ unsigned Idx = 3;
+ // Reset the stack when importing a new module.
+ FpPragmaStack.clear();
+ for (unsigned I = 0; I < NumStackEntries; ++I) {
+ FpPragmaStackEntry Entry;
+ Entry.Value = Record[Idx++];
+ Entry.Location = ReadSourceLocation(F, Record[Idx++]);
+ Entry.PushLocation = ReadSourceLocation(F, Record[Idx++]);
+ FpPragmaStrings.push_back(ReadString(Record, Idx));
+ Entry.SlotLabel = FpPragmaStrings.back();
+ FpPragmaStack.push_back(Entry);
+ }
+ break;
+ }
+
+ case DECLS_TO_CHECK_FOR_DEFERRED_DIAGS:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ DeclsToCheckForDeferredDiags.push_back(getGlobalDeclID(F, Record[I]));
+ break;
}
}
}
@@ -3804,21 +3845,22 @@ void ASTReader::ReadModuleOffsetMap(ModuleFile &F) const {
while (Data < DataEnd) {
// FIXME: Looking up dependency modules by filename is horrible. Let's
- // start fixing this with prebuilt and explicit modules and see how it
- // goes...
+ // start fixing this with prebuilt, explicit and implicit modules and see
+ // how it goes...
using namespace llvm::support;
ModuleKind Kind = static_cast<ModuleKind>(
endian::readNext<uint8_t, little, unaligned>(Data));
uint16_t Len = endian::readNext<uint16_t, little, unaligned>(Data);
StringRef Name = StringRef((const char*)Data, Len);
Data += Len;
- ModuleFile *OM = (Kind == MK_PrebuiltModule || Kind == MK_ExplicitModule
- ? ModuleMgr.lookupByModuleName(Name)
- : ModuleMgr.lookupByFileName(Name));
+ ModuleFile *OM = (Kind == MK_PrebuiltModule || Kind == MK_ExplicitModule ||
+ Kind == MK_ImplicitModule
+ ? ModuleMgr.lookupByModuleName(Name)
+ : ModuleMgr.lookupByFileName(Name));
if (!OM) {
std::string Msg =
"SourceLocation remap refers to unknown module, cannot find ";
- Msg.append(Name);
+ Msg.append(std::string(Name));
Error(Msg);
return;
}
@@ -3997,7 +4039,7 @@ static void moveMethodToBackOfGlobalList(Sema &S, ObjCMethodDecl *Method) {
void ASTReader::makeNamesVisible(const HiddenNames &Names, Module *Owner) {
assert(Owner->NameVisibility != Module::Hidden && "nothing to make visible?");
for (Decl *D : Names) {
- bool wasHidden = D->isHidden();
+ bool wasHidden = !D->isUnconditionallyVisible();
D->setVisibleDespiteOwningModule();
if (wasHidden && SemaObj) {
@@ -4023,8 +4065,8 @@ void ASTReader::makeModuleVisible(Module *Mod,
continue;
}
- if (!Mod->isAvailable()) {
- // Modules that aren't available cannot be made visible.
+ if (Mod->isUnimportable()) {
+ // Modules that aren't importable cannot be made visible.
continue;
}
@@ -4059,9 +4101,9 @@ void ASTReader::makeModuleVisible(Module *Mod,
/// visible.
void ASTReader::mergeDefinitionVisibility(NamedDecl *Def,
NamedDecl *MergedDef) {
- if (Def->isHidden()) {
+ if (!Def->isUnconditionallyVisible()) {
// If MergedDef is visible or becomes visible, make the definition visible.
- if (!MergedDef->isHidden())
+ if (MergedDef->isUnconditionallyVisible())
Def->setVisibleDespiteOwningModule();
else {
getContext().mergeDefinitionIntoModule(
@@ -4701,7 +4743,12 @@ ASTReader::ASTReadResult ASTReader::readUnhashedControlBlockImpl(
switch ((UnhashedControlBlockRecordTypes)MaybeRecordType.get()) {
case SIGNATURE:
if (F)
- std::copy(Record.begin(), Record.end(), F->Signature.data());
+ F->Signature = ASTFileSignature::create(Record.begin(), Record.end());
+ break;
+ case AST_BLOCK_HASH:
+ if (F)
+ F->ASTBlockHash =
+ ASTFileSignature::create(Record.begin(), Record.end());
break;
case DIAGNOSTIC_OPTIONS: {
bool Complain = (ClientLoadCapabilities & ARR_OutOfDate) == 0;
@@ -4990,8 +5037,8 @@ static ASTFileSignature readASTFileSignature(StringRef PCH) {
return ASTFileSignature();
}
if (SIGNATURE == MaybeRecord.get())
- return {{{(uint32_t)Record[0], (uint32_t)Record[1], (uint32_t)Record[2],
- (uint32_t)Record[3], (uint32_t)Record[4]}}};
+ return ASTFileSignature::create(Record.begin(),
+ Record.begin() + ASTFileSignature::size);
}
}
@@ -5070,13 +5117,11 @@ namespace {
SimplePCHValidator(const LangOptions &ExistingLangOpts,
const TargetOptions &ExistingTargetOpts,
const PreprocessorOptions &ExistingPPOpts,
- StringRef ExistingModuleCachePath,
- FileManager &FileMgr)
- : ExistingLangOpts(ExistingLangOpts),
- ExistingTargetOpts(ExistingTargetOpts),
- ExistingPPOpts(ExistingPPOpts),
- ExistingModuleCachePath(ExistingModuleCachePath),
- FileMgr(FileMgr) {}
+ StringRef ExistingModuleCachePath, FileManager &FileMgr)
+ : ExistingLangOpts(ExistingLangOpts),
+ ExistingTargetOpts(ExistingTargetOpts),
+ ExistingPPOpts(ExistingPPOpts),
+ ExistingModuleCachePath(ExistingModuleCachePath), FileMgr(FileMgr) {}
bool ReadLanguageOptions(const LangOptions &LangOpts, bool Complain,
bool AllowCompatibleDifferences) override {
@@ -5220,7 +5265,7 @@ bool ASTReader::readASTFileControlBlock(
Listener.ReadModuleName(Blob);
break;
case MODULE_DIRECTORY:
- ModuleDir = Blob;
+ ModuleDir = std::string(Blob);
break;
case MODULE_MAP_FILE: {
unsigned Idx = 0;
@@ -5272,7 +5317,7 @@ bool ASTReader::readASTFileControlBlock(
break;
case INPUT_FILE:
bool Overridden = static_cast<bool>(Record[3]);
- std::string Filename = Blob;
+ std::string Filename = std::string(Blob);
ResolveImportedPath(Filename, ModuleDir);
shouldContinue = Listener.visitInputFile(
Filename, isSystemFile, Overridden, /*IsExplicitModule*/false);
@@ -5291,7 +5336,9 @@ bool ASTReader::readASTFileControlBlock(
unsigned Idx = 0, N = Record.size();
while (Idx < N) {
// Read information about the AST file.
- Idx += 1+1+1+1+5; // Kind, ImportLoc, Size, ModTime, Signature
+ Idx +=
+ 1 + 1 + 1 + 1 +
+ ASTFileSignature::size; // Kind, ImportLoc, Size, ModTime, Signature
std::string ModuleName = ReadString(Record, Idx);
std::string Filename = ReadString(Record, Idx);
ResolveImportedPath(Filename, ModuleDir);
@@ -5531,14 +5578,14 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
// imported module file.
CurrentModule->Requirements.clear();
CurrentModule->MissingHeaders.clear();
- CurrentModule->IsMissingRequirement =
- ParentModule && ParentModule->IsMissingRequirement;
- CurrentModule->IsAvailable = !CurrentModule->IsMissingRequirement;
+ CurrentModule->IsUnimportable =
+ ParentModule && ParentModule->IsUnimportable;
+ CurrentModule->IsAvailable = !CurrentModule->IsUnimportable;
break;
}
case SUBMODULE_UMBRELLA_HEADER: {
- std::string Filename = Blob;
+ std::string Filename = std::string(Blob);
ResolveImportedPath(F, Filename);
if (auto Umbrella = PP.getFileManager().getFile(Filename)) {
if (!CurrentModule->getUmbrellaHeader())
@@ -5571,7 +5618,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case SUBMODULE_UMBRELLA_DIR: {
- std::string Dirname = Blob;
+ std::string Dirname = std::string(Blob);
ResolveImportedPath(F, Dirname);
if (auto Umbrella = PP.getFileManager().getDirectory(Dirname)) {
if (!CurrentModule->getUmbrellaDir())
@@ -5641,7 +5688,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
case SUBMODULE_LINK_LIBRARY:
ModMap.resolveLinkAsDependencies(CurrentModule);
CurrentModule->LinkLibraries.push_back(
- Module::LinkLibrary(Blob, Record[0]));
+ Module::LinkLibrary(std::string(Blob), Record[0]));
break;
case SUBMODULE_CONFIG_MACRO:
@@ -5902,8 +5949,8 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
}
SavedStreamPosition SavedPosition(M.PreprocessorDetailCursor);
- if (llvm::Error Err =
- M.PreprocessorDetailCursor.JumpToBit(PPOffs.BitOffset)) {
+ if (llvm::Error Err = M.PreprocessorDetailCursor.JumpToBit(
+ M.MacroOffsetsBase + PPOffs.BitOffset)) {
Error(std::move(Err));
return nullptr;
}
@@ -6316,7 +6363,9 @@ ASTReader::RecordLocation ASTReader::TypeCursorForIndex(unsigned Index) {
GlobalTypeMapType::iterator I = GlobalTypeMap.find(Index);
assert(I != GlobalTypeMap.end() && "Corrupted global type map");
ModuleFile *M = I->second;
- return RecordLocation(M, M->TypeOffsets[Index - M->BaseTypeIndex]);
+ return RecordLocation(
+ M, M->TypeOffsets[Index - M->BaseTypeIndex].getBitOffset() +
+ M->DeclsBlockStartOffset);
}
static llvm::Optional<Type::TypeClass> getTypeClassForCode(TypeCode code) {
@@ -6522,6 +6571,21 @@ void TypeLocReader::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
TL.setNameLoc(readSourceLocation());
}
+void TypeLocReader::VisitConstantMatrixTypeLoc(ConstantMatrixTypeLoc TL) {
+ TL.setAttrNameLoc(readSourceLocation());
+ TL.setAttrOperandParensRange(Reader.readSourceRange());
+ TL.setAttrRowOperand(Reader.readExpr());
+ TL.setAttrColumnOperand(Reader.readExpr());
+}
+
+void TypeLocReader::VisitDependentSizedMatrixTypeLoc(
+ DependentSizedMatrixTypeLoc TL) {
+ TL.setAttrNameLoc(readSourceLocation());
+ TL.setAttrOperandParensRange(Reader.readSourceRange());
+ TL.setAttrRowOperand(Reader.readExpr());
+ TL.setAttrColumnOperand(Reader.readExpr());
+}
+
void TypeLocReader::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
TL.setLocalRangeBegin(readSourceLocation());
TL.setLParenLoc(readSourceLocation());
@@ -6710,6 +6774,15 @@ void TypeLocReader::VisitPipeTypeLoc(PipeTypeLoc TL) {
TL.setKWLoc(readSourceLocation());
}
+void TypeLocReader::VisitExtIntTypeLoc(clang::ExtIntTypeLoc TL) {
+ TL.setNameLoc(readSourceLocation());
+}
+void TypeLocReader::VisitDependentExtIntTypeLoc(
+ clang::DependentExtIntTypeLoc TL) {
+ TL.setNameLoc(readSourceLocation());
+}
+
+
void ASTRecordReader::readTypeLoc(TypeLoc TL) {
TypeLocReader TLR(*this);
for (; !TL.isNull(); TL = TL.getNextTypeLoc())
@@ -6788,6 +6861,9 @@ QualType ASTReader::GetType(TypeID ID) {
case PREDEF_TYPE_INT128_ID:
T = Context.Int128Ty;
break;
+ case PREDEF_TYPE_BFLOAT16_ID:
+ T = Context.BFloat16Ty;
+ break;
case PREDEF_TYPE_HALF_ID:
T = Context.HalfTy;
break;
@@ -6951,9 +7027,18 @@ QualType ASTReader::GetType(TypeID ID) {
case PREDEF_TYPE_BUILTIN_FN:
T = Context.BuiltinFnTy;
break;
+ case PREDEF_TYPE_INCOMPLETE_MATRIX_IDX:
+ T = Context.IncompleteMatrixIdxTy;
+ break;
case PREDEF_TYPE_OMP_ARRAY_SECTION:
T = Context.OMPArraySectionTy;
break;
+ case PREDEF_TYPE_OMP_ARRAY_SHAPING:
+ T = Context.OMPArraySectionTy;
+ break;
+ case PREDEF_TYPE_OMP_ITERATOR:
+ T = Context.OMPIteratorTy;
+ break;
#define SVE_TYPE(Name, Id, SingletonId) \
case PREDEF_TYPE_##Id##_ID: \
T = Context.SingletonId; \
@@ -7276,6 +7361,9 @@ static Decl *getPredefinedDecl(ASTContext &Context, PredefinedDeclIDs ID) {
case PREDEF_DECL_BUILTIN_MS_VA_LIST_ID:
return Context.getBuiltinMSVaListDecl();
+ case PREDEF_DECL_BUILTIN_MS_GUID_ID:
+ return Context.getMSGuidTagDecl();
+
case PREDEF_DECL_EXTERN_C_CONTEXT_ID:
return Context.getExternCContextDecl();
@@ -7756,7 +7844,9 @@ void ASTReader::InitializeSema(Sema &S) {
// FIXME: What happens if these are changed by a module import?
if (!FPPragmaOptions.empty()) {
assert(FPPragmaOptions.size() == 1 && "Wrong number of FP_PRAGMA_OPTIONS");
- SemaObj->FPFeatures = FPOptions(FPPragmaOptions[0]);
+ FPOptionsOverride NewOverrides(FPPragmaOptions[0]);
+ SemaObj->CurFPFeatures =
+ NewOverrides.applyOverrides(SemaObj->getLangOpts());
}
SemaObj->OpenCLFeatures.copy(OpenCLExtensions);
@@ -7826,6 +7916,34 @@ void ASTReader::UpdateSema() {
SemaObj->PackStack.CurrentPragmaLocation = PragmaPackCurrentLocation;
}
}
+ if (FpPragmaCurrentValue) {
+ // The bottom of the stack might have a default value. It must be adjusted
+ // to the current value to ensure that fp-pragma state is preserved after
+ // popping entries that were included/imported from a PCH/module.
+ bool DropFirst = false;
+ if (!FpPragmaStack.empty() && FpPragmaStack.front().Location.isInvalid()) {
+ assert(FpPragmaStack.front().Value ==
+ SemaObj->FpPragmaStack.DefaultValue &&
+ "Expected a default pragma float_control value");
+ SemaObj->FpPragmaStack.Stack.emplace_back(
+ FpPragmaStack.front().SlotLabel, SemaObj->FpPragmaStack.CurrentValue,
+ SemaObj->FpPragmaStack.CurrentPragmaLocation,
+ FpPragmaStack.front().PushLocation);
+ DropFirst = true;
+ }
+ for (const auto &Entry :
+ llvm::makeArrayRef(FpPragmaStack).drop_front(DropFirst ? 1 : 0))
+ SemaObj->FpPragmaStack.Stack.emplace_back(
+ Entry.SlotLabel, Entry.Value, Entry.Location, Entry.PushLocation);
+ if (FpPragmaCurrentLocation.isInvalid()) {
+ assert(*FpPragmaCurrentValue == SemaObj->FpPragmaStack.DefaultValue &&
+ "Expected a default pragma float_control value");
+ // Keep the current values.
+ } else {
+ SemaObj->FpPragmaStack.CurrentValue = *FpPragmaCurrentValue;
+ SemaObj->FpPragmaStack.CurrentPragmaLocation = FpPragmaCurrentLocation;
+ }
+ }
}
IdentifierInfo *ASTReader::get(StringRef Name) {
@@ -8182,6 +8300,19 @@ void ASTReader::ReadUnusedLocalTypedefNameCandidates(
UnusedLocalTypedefNameCandidates.clear();
}
+void ASTReader::ReadDeclsToCheckForDeferredDiags(
+ llvm::SmallVector<Decl *, 4> &Decls) {
+ for (unsigned I = 0, N = DeclsToCheckForDeferredDiags.size(); I != N;
+ ++I) {
+ auto *D = dyn_cast_or_null<Decl>(
+ GetDecl(DeclsToCheckForDeferredDiags[I]));
+ if (D)
+ Decls.push_back(D);
+ }
+ DeclsToCheckForDeferredDiags.clear();
+}
+
+
void ASTReader::ReadReferencedSelectors(
SmallVectorImpl<std::pair<Selector, SourceLocation>> &Sels) {
if (ReferencedSelectorsData.empty())
@@ -8400,7 +8531,8 @@ MacroInfo *ASTReader::getMacro(MacroID ID) {
assert(I != GlobalMacroMap.end() && "Corrupted global macro map");
ModuleFile *M = I->second;
unsigned Index = ID - M->BaseMacroID;
- MacrosLoaded[ID] = ReadMacroRecord(*M, M->MacroOffsets[Index]);
+ MacrosLoaded[ID] =
+ ReadMacroRecord(*M, M->MacroOffsetsBase + M->MacroOffsets[Index]);
if (DeserializationListener)
DeserializationListener->MacroRead(ID + NUM_PREDEF_MACRO_IDS,
@@ -8493,10 +8625,10 @@ unsigned ASTReader::getModuleFileID(ModuleFile *F) {
return (I - PCHModules.end()) << 1;
}
-llvm::Optional<ExternalASTSource::ASTSourceDescriptor>
+llvm::Optional<ASTSourceDescriptor>
ASTReader::getSourceDescriptor(unsigned ID) {
- if (const Module *M = getSubmodule(ID))
- return ExternalASTSource::ASTSourceDescriptor(*M);
+ if (Module *M = getSubmodule(ID))
+ return ASTSourceDescriptor(*M);
// If there is only a single PCH, return it instead.
// Chained PCH are not supported.
@@ -8505,8 +8637,8 @@ ASTReader::getSourceDescriptor(unsigned ID) {
ModuleFile &MF = ModuleMgr.getPrimaryModule();
StringRef ModuleName = llvm::sys::path::filename(MF.OriginalSourceFileName);
StringRef FileName = llvm::sys::path::filename(MF.FileName);
- return ASTReader::ASTSourceDescriptor(ModuleName, MF.OriginalDir, FileName,
- MF.Signature);
+ return ASTSourceDescriptor(ModuleName, MF.OriginalDir, FileName,
+ MF.Signature);
}
return None;
}
@@ -9450,6 +9582,446 @@ void ASTReader::diagnoseOdrViolations() {
return Hash.CalculateHash();
};
+ // Used with err_module_odr_violation_mismatch_decl and
+ // note_module_odr_violation_mismatch_decl
+ // This list should be the same Decl's as in ODRHash::isDeclToBeProcessed
+ enum ODRMismatchDecl {
+ EndOfClass,
+ PublicSpecifer,
+ PrivateSpecifer,
+ ProtectedSpecifer,
+ StaticAssert,
+ Field,
+ CXXMethod,
+ TypeAlias,
+ TypeDef,
+ Var,
+ Friend,
+ FunctionTemplate,
+ Other
+ };
+
+ // Used with err_module_odr_violation_mismatch_decl_diff and
+ // note_module_odr_violation_mismatch_decl_diff
+ enum ODRMismatchDeclDifference {
+ StaticAssertCondition,
+ StaticAssertMessage,
+ StaticAssertOnlyMessage,
+ FieldName,
+ FieldTypeName,
+ FieldSingleBitField,
+ FieldDifferentWidthBitField,
+ FieldSingleMutable,
+ FieldSingleInitializer,
+ FieldDifferentInitializers,
+ MethodName,
+ MethodDeleted,
+ MethodDefaulted,
+ MethodVirtual,
+ MethodStatic,
+ MethodVolatile,
+ MethodConst,
+ MethodInline,
+ MethodNumberParameters,
+ MethodParameterType,
+ MethodParameterName,
+ MethodParameterSingleDefaultArgument,
+ MethodParameterDifferentDefaultArgument,
+ MethodNoTemplateArguments,
+ MethodDifferentNumberTemplateArguments,
+ MethodDifferentTemplateArgument,
+ MethodSingleBody,
+ MethodDifferentBody,
+ TypedefName,
+ TypedefType,
+ VarName,
+ VarType,
+ VarSingleInitializer,
+ VarDifferentInitializer,
+ VarConstexpr,
+ FriendTypeFunction,
+ FriendType,
+ FriendFunction,
+ FunctionTemplateDifferentNumberParameters,
+ FunctionTemplateParameterDifferentKind,
+ FunctionTemplateParameterName,
+ FunctionTemplateParameterSingleDefaultArgument,
+ FunctionTemplateParameterDifferentDefaultArgument,
+ FunctionTemplateParameterDifferentType,
+ FunctionTemplatePackParameter,
+ };
+
+ // These lambdas have the common portions of the ODR diagnostics. This
+ // has the same return as Diag(), so addition parameters can be passed
+ // in with operator<<
+ auto ODRDiagDeclError = [this](NamedDecl *FirstRecord, StringRef FirstModule,
+ SourceLocation Loc, SourceRange Range,
+ ODRMismatchDeclDifference DiffType) {
+ return Diag(Loc, diag::err_module_odr_violation_mismatch_decl_diff)
+ << FirstRecord << FirstModule.empty() << FirstModule << Range
+ << DiffType;
+ };
+ auto ODRDiagDeclNote = [this](StringRef SecondModule, SourceLocation Loc,
+ SourceRange Range, ODRMismatchDeclDifference DiffType) {
+ return Diag(Loc, diag::note_module_odr_violation_mismatch_decl_diff)
+ << SecondModule << Range << DiffType;
+ };
+
+ auto ODRDiagField = [this, &ODRDiagDeclError, &ODRDiagDeclNote,
+ &ComputeQualTypeODRHash, &ComputeODRHash](
+ NamedDecl *FirstRecord, StringRef FirstModule,
+ StringRef SecondModule, FieldDecl *FirstField,
+ FieldDecl *SecondField) {
+ IdentifierInfo *FirstII = FirstField->getIdentifier();
+ IdentifierInfo *SecondII = SecondField->getIdentifier();
+ if (FirstII->getName() != SecondII->getName()) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
+ FirstField->getSourceRange(), FieldName)
+ << FirstII;
+ ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
+ SecondField->getSourceRange(), FieldName)
+ << SecondII;
+
+ return true;
+ }
+
+ assert(getContext().hasSameType(FirstField->getType(),
+ SecondField->getType()));
+
+ QualType FirstType = FirstField->getType();
+ QualType SecondType = SecondField->getType();
+ if (ComputeQualTypeODRHash(FirstType) !=
+ ComputeQualTypeODRHash(SecondType)) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
+ FirstField->getSourceRange(), FieldTypeName)
+ << FirstII << FirstType;
+ ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
+ SecondField->getSourceRange(), FieldTypeName)
+ << SecondII << SecondType;
+
+ return true;
+ }
+
+ const bool IsFirstBitField = FirstField->isBitField();
+ const bool IsSecondBitField = SecondField->isBitField();
+ if (IsFirstBitField != IsSecondBitField) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
+ FirstField->getSourceRange(), FieldSingleBitField)
+ << FirstII << IsFirstBitField;
+ ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
+ SecondField->getSourceRange(), FieldSingleBitField)
+ << SecondII << IsSecondBitField;
+ return true;
+ }
+
+ if (IsFirstBitField && IsSecondBitField) {
+ unsigned FirstBitWidthHash =
+ ComputeODRHash(FirstField->getBitWidth());
+ unsigned SecondBitWidthHash =
+ ComputeODRHash(SecondField->getBitWidth());
+ if (FirstBitWidthHash != SecondBitWidthHash) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
+ FirstField->getSourceRange(),
+ FieldDifferentWidthBitField)
+ << FirstII << FirstField->getBitWidth()->getSourceRange();
+ ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
+ SecondField->getSourceRange(),
+ FieldDifferentWidthBitField)
+ << SecondII << SecondField->getBitWidth()->getSourceRange();
+ return true;
+ }
+ }
+
+ if (!PP.getLangOpts().CPlusPlus)
+ return false;
+
+ const bool IsFirstMutable = FirstField->isMutable();
+ const bool IsSecondMutable = SecondField->isMutable();
+ if (IsFirstMutable != IsSecondMutable) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
+ FirstField->getSourceRange(), FieldSingleMutable)
+ << FirstII << IsFirstMutable;
+ ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
+ SecondField->getSourceRange(), FieldSingleMutable)
+ << SecondII << IsSecondMutable;
+ return true;
+ }
+
+ const Expr *FirstInitializer = FirstField->getInClassInitializer();
+ const Expr *SecondInitializer = SecondField->getInClassInitializer();
+ if ((!FirstInitializer && SecondInitializer) ||
+ (FirstInitializer && !SecondInitializer)) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
+ FirstField->getSourceRange(), FieldSingleInitializer)
+ << FirstII << (FirstInitializer != nullptr);
+ ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
+ SecondField->getSourceRange(), FieldSingleInitializer)
+ << SecondII << (SecondInitializer != nullptr);
+ return true;
+ }
+
+ if (FirstInitializer && SecondInitializer) {
+ unsigned FirstInitHash = ComputeODRHash(FirstInitializer);
+ unsigned SecondInitHash = ComputeODRHash(SecondInitializer);
+ if (FirstInitHash != SecondInitHash) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
+ FirstField->getSourceRange(),
+ FieldDifferentInitializers)
+ << FirstII << FirstInitializer->getSourceRange();
+ ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
+ SecondField->getSourceRange(),
+ FieldDifferentInitializers)
+ << SecondII << SecondInitializer->getSourceRange();
+ return true;
+ }
+ }
+
+ return false;
+ };
+
+ auto ODRDiagTypeDefOrAlias =
+ [&ODRDiagDeclError, &ODRDiagDeclNote, &ComputeQualTypeODRHash](
+ NamedDecl *FirstRecord, StringRef FirstModule, StringRef SecondModule,
+ TypedefNameDecl *FirstTD, TypedefNameDecl *SecondTD,
+ bool IsTypeAlias) {
+ auto FirstName = FirstTD->getDeclName();
+ auto SecondName = SecondTD->getDeclName();
+ if (FirstName != SecondName) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstTD->getLocation(),
+ FirstTD->getSourceRange(), TypedefName)
+ << IsTypeAlias << FirstName;
+ ODRDiagDeclNote(SecondModule, SecondTD->getLocation(),
+ SecondTD->getSourceRange(), TypedefName)
+ << IsTypeAlias << SecondName;
+ return true;
+ }
+
+ QualType FirstType = FirstTD->getUnderlyingType();
+ QualType SecondType = SecondTD->getUnderlyingType();
+ if (ComputeQualTypeODRHash(FirstType) !=
+ ComputeQualTypeODRHash(SecondType)) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstTD->getLocation(),
+ FirstTD->getSourceRange(), TypedefType)
+ << IsTypeAlias << FirstName << FirstType;
+ ODRDiagDeclNote(SecondModule, SecondTD->getLocation(),
+ SecondTD->getSourceRange(), TypedefType)
+ << IsTypeAlias << SecondName << SecondType;
+ return true;
+ }
+
+ return false;
+ };
+
+ auto ODRDiagVar = [&ODRDiagDeclError, &ODRDiagDeclNote,
+ &ComputeQualTypeODRHash, &ComputeODRHash,
+ this](NamedDecl *FirstRecord, StringRef FirstModule,
+ StringRef SecondModule, VarDecl *FirstVD,
+ VarDecl *SecondVD) {
+ auto FirstName = FirstVD->getDeclName();
+ auto SecondName = SecondVD->getDeclName();
+ if (FirstName != SecondName) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
+ FirstVD->getSourceRange(), VarName)
+ << FirstName;
+ ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
+ SecondVD->getSourceRange(), VarName)
+ << SecondName;
+ return true;
+ }
+
+ QualType FirstType = FirstVD->getType();
+ QualType SecondType = SecondVD->getType();
+ if (ComputeQualTypeODRHash(FirstType) !=
+ ComputeQualTypeODRHash(SecondType)) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
+ FirstVD->getSourceRange(), VarType)
+ << FirstName << FirstType;
+ ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
+ SecondVD->getSourceRange(), VarType)
+ << SecondName << SecondType;
+ return true;
+ }
+
+ if (!PP.getLangOpts().CPlusPlus)
+ return false;
+
+ const Expr *FirstInit = FirstVD->getInit();
+ const Expr *SecondInit = SecondVD->getInit();
+ if ((FirstInit == nullptr) != (SecondInit == nullptr)) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
+ FirstVD->getSourceRange(), VarSingleInitializer)
+ << FirstName << (FirstInit == nullptr)
+ << (FirstInit ? FirstInit->getSourceRange() : SourceRange());
+ ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
+ SecondVD->getSourceRange(), VarSingleInitializer)
+ << SecondName << (SecondInit == nullptr)
+ << (SecondInit ? SecondInit->getSourceRange() : SourceRange());
+ return true;
+ }
+
+ if (FirstInit && SecondInit &&
+ ComputeODRHash(FirstInit) != ComputeODRHash(SecondInit)) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
+ FirstVD->getSourceRange(), VarDifferentInitializer)
+ << FirstName << FirstInit->getSourceRange();
+ ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
+ SecondVD->getSourceRange(), VarDifferentInitializer)
+ << SecondName << SecondInit->getSourceRange();
+ return true;
+ }
+
+ const bool FirstIsConstexpr = FirstVD->isConstexpr();
+ const bool SecondIsConstexpr = SecondVD->isConstexpr();
+ if (FirstIsConstexpr != SecondIsConstexpr) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
+ FirstVD->getSourceRange(), VarConstexpr)
+ << FirstName << FirstIsConstexpr;
+ ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
+ SecondVD->getSourceRange(), VarConstexpr)
+ << SecondName << SecondIsConstexpr;
+ return true;
+ }
+ return false;
+ };
+
+ auto DifferenceSelector = [](Decl *D) {
+ assert(D && "valid Decl required");
+ switch (D->getKind()) {
+ default:
+ return Other;
+ case Decl::AccessSpec:
+ switch (D->getAccess()) {
+ case AS_public:
+ return PublicSpecifer;
+ case AS_private:
+ return PrivateSpecifer;
+ case AS_protected:
+ return ProtectedSpecifer;
+ case AS_none:
+ break;
+ }
+ llvm_unreachable("Invalid access specifier");
+ case Decl::StaticAssert:
+ return StaticAssert;
+ case Decl::Field:
+ return Field;
+ case Decl::CXXMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ return CXXMethod;
+ case Decl::TypeAlias:
+ return TypeAlias;
+ case Decl::Typedef:
+ return TypeDef;
+ case Decl::Var:
+ return Var;
+ case Decl::Friend:
+ return Friend;
+ case Decl::FunctionTemplate:
+ return FunctionTemplate;
+ }
+ };
+
+ using DeclHashes = llvm::SmallVector<std::pair<Decl *, unsigned>, 4>;
+ auto PopulateHashes = [&ComputeSubDeclODRHash](DeclHashes &Hashes,
+ RecordDecl *Record,
+ const DeclContext *DC) {
+ for (auto *D : Record->decls()) {
+ if (!ODRHash::isDeclToBeProcessed(D, DC))
+ continue;
+ Hashes.emplace_back(D, ComputeSubDeclODRHash(D));
+ }
+ };
+
+ struct DiffResult {
+ Decl *FirstDecl = nullptr, *SecondDecl = nullptr;
+ ODRMismatchDecl FirstDiffType = Other, SecondDiffType = Other;
+ };
+
+ // If there is a diagnoseable difference, FirstDiffType and
+ // SecondDiffType will not be Other and FirstDecl and SecondDecl will be
+ // filled in if not EndOfClass.
+ auto FindTypeDiffs = [&DifferenceSelector](DeclHashes &FirstHashes,
+ DeclHashes &SecondHashes) {
+ DiffResult DR;
+ auto FirstIt = FirstHashes.begin();
+ auto SecondIt = SecondHashes.begin();
+ while (FirstIt != FirstHashes.end() || SecondIt != SecondHashes.end()) {
+ if (FirstIt != FirstHashes.end() && SecondIt != SecondHashes.end() &&
+ FirstIt->second == SecondIt->second) {
+ ++FirstIt;
+ ++SecondIt;
+ continue;
+ }
+
+ DR.FirstDecl = FirstIt == FirstHashes.end() ? nullptr : FirstIt->first;
+ DR.SecondDecl =
+ SecondIt == SecondHashes.end() ? nullptr : SecondIt->first;
+
+ DR.FirstDiffType =
+ DR.FirstDecl ? DifferenceSelector(DR.FirstDecl) : EndOfClass;
+ DR.SecondDiffType =
+ DR.SecondDecl ? DifferenceSelector(DR.SecondDecl) : EndOfClass;
+ return DR;
+ }
+ return DR;
+ };
+
+ // Use this to diagnose that an unexpected Decl was encountered
+ // or no difference was detected. This causes a generic error
+ // message to be emitted.
+ auto DiagnoseODRUnexpected = [this](DiffResult &DR, NamedDecl *FirstRecord,
+ StringRef FirstModule,
+ NamedDecl *SecondRecord,
+ StringRef SecondModule) {
+ Diag(FirstRecord->getLocation(),
+ diag::err_module_odr_violation_different_definitions)
+ << FirstRecord << FirstModule.empty() << FirstModule;
+
+ if (DR.FirstDecl) {
+ Diag(DR.FirstDecl->getLocation(), diag::note_first_module_difference)
+ << FirstRecord << DR.FirstDecl->getSourceRange();
+ }
+
+ Diag(SecondRecord->getLocation(),
+ diag::note_module_odr_violation_different_definitions)
+ << SecondModule;
+
+ if (DR.SecondDecl) {
+ Diag(DR.SecondDecl->getLocation(), diag::note_second_module_difference)
+ << DR.SecondDecl->getSourceRange();
+ }
+ };
+
+ auto DiagnoseODRMismatch =
+ [this](DiffResult &DR, NamedDecl *FirstRecord, StringRef FirstModule,
+ NamedDecl *SecondRecord, StringRef SecondModule) {
+ SourceLocation FirstLoc;
+ SourceRange FirstRange;
+ auto *FirstTag = dyn_cast<TagDecl>(FirstRecord);
+ if (DR.FirstDiffType == EndOfClass && FirstTag) {
+ FirstLoc = FirstTag->getBraceRange().getEnd();
+ } else {
+ FirstLoc = DR.FirstDecl->getLocation();
+ FirstRange = DR.FirstDecl->getSourceRange();
+ }
+ Diag(FirstLoc, diag::err_module_odr_violation_mismatch_decl)
+ << FirstRecord << FirstModule.empty() << FirstModule << FirstRange
+ << DR.FirstDiffType;
+
+ SourceLocation SecondLoc;
+ SourceRange SecondRange;
+ auto *SecondTag = dyn_cast<TagDecl>(SecondRecord);
+ if (DR.SecondDiffType == EndOfClass && SecondTag) {
+ SecondLoc = SecondTag->getBraceRange().getEnd();
+ } else {
+ SecondLoc = DR.SecondDecl->getLocation();
+ SecondRange = DR.SecondDecl->getSourceRange();
+ }
+ Diag(SecondLoc, diag::note_module_odr_violation_mismatch_decl)
+ << SecondModule << SecondRange << DR.SecondDiffType;
+ };
+
// Issue any pending ODR-failure diagnostics.
for (auto &Merge : OdrMergeFailures) {
// If we've already pointed out a specific problem with this class, don't
@@ -9483,16 +10055,16 @@ void ASTReader::diagnoseOdrViolations() {
BaseVirtual,
BaseAccess,
};
- auto ODRDiagError = [FirstRecord, &FirstModule,
- this](SourceLocation Loc, SourceRange Range,
- ODRDefinitionDataDifference DiffType) {
+ auto ODRDiagBaseError = [FirstRecord, &FirstModule,
+ this](SourceLocation Loc, SourceRange Range,
+ ODRDefinitionDataDifference DiffType) {
return Diag(Loc, diag::err_module_odr_violation_definition_data)
<< FirstRecord << FirstModule.empty() << FirstModule << Range
<< DiffType;
};
- auto ODRDiagNote = [&SecondModule,
- this](SourceLocation Loc, SourceRange Range,
- ODRDefinitionDataDifference DiffType) {
+ auto ODRDiagBaseNote = [&SecondModule,
+ this](SourceLocation Loc, SourceRange Range,
+ ODRDefinitionDataDifference DiffType) {
return Diag(Loc, diag::note_module_odr_violation_definition_data)
<< SecondModule << Range << DiffType;
};
@@ -9511,22 +10083,22 @@ void ASTReader::diagnoseOdrViolations() {
};
if (FirstNumBases != SecondNumBases) {
- ODRDiagError(FirstRecord->getLocation(), GetSourceRange(FirstDD),
- NumBases)
+ ODRDiagBaseError(FirstRecord->getLocation(), GetSourceRange(FirstDD),
+ NumBases)
<< FirstNumBases;
- ODRDiagNote(SecondRecord->getLocation(), GetSourceRange(SecondDD),
- NumBases)
+ ODRDiagBaseNote(SecondRecord->getLocation(), GetSourceRange(SecondDD),
+ NumBases)
<< SecondNumBases;
Diagnosed = true;
break;
}
if (FirstNumVBases != SecondNumVBases) {
- ODRDiagError(FirstRecord->getLocation(), GetSourceRange(FirstDD),
- NumVBases)
+ ODRDiagBaseError(FirstRecord->getLocation(), GetSourceRange(FirstDD),
+ NumVBases)
<< FirstNumVBases;
- ODRDiagNote(SecondRecord->getLocation(), GetSourceRange(SecondDD),
- NumVBases)
+ ODRDiagBaseNote(SecondRecord->getLocation(), GetSourceRange(SecondDD),
+ NumVBases)
<< SecondNumVBases;
Diagnosed = true;
break;
@@ -9540,33 +10112,33 @@ void ASTReader::diagnoseOdrViolations() {
auto SecondBase = SecondBases[i];
if (ComputeQualTypeODRHash(FirstBase.getType()) !=
ComputeQualTypeODRHash(SecondBase.getType())) {
- ODRDiagError(FirstRecord->getLocation(), FirstBase.getSourceRange(),
- BaseType)
+ ODRDiagBaseError(FirstRecord->getLocation(),
+ FirstBase.getSourceRange(), BaseType)
<< (i + 1) << FirstBase.getType();
- ODRDiagNote(SecondRecord->getLocation(),
- SecondBase.getSourceRange(), BaseType)
+ ODRDiagBaseNote(SecondRecord->getLocation(),
+ SecondBase.getSourceRange(), BaseType)
<< (i + 1) << SecondBase.getType();
break;
}
if (FirstBase.isVirtual() != SecondBase.isVirtual()) {
- ODRDiagError(FirstRecord->getLocation(), FirstBase.getSourceRange(),
- BaseVirtual)
+ ODRDiagBaseError(FirstRecord->getLocation(),
+ FirstBase.getSourceRange(), BaseVirtual)
<< (i + 1) << FirstBase.isVirtual() << FirstBase.getType();
- ODRDiagNote(SecondRecord->getLocation(),
- SecondBase.getSourceRange(), BaseVirtual)
+ ODRDiagBaseNote(SecondRecord->getLocation(),
+ SecondBase.getSourceRange(), BaseVirtual)
<< (i + 1) << SecondBase.isVirtual() << SecondBase.getType();
break;
}
if (FirstBase.getAccessSpecifierAsWritten() !=
SecondBase.getAccessSpecifierAsWritten()) {
- ODRDiagError(FirstRecord->getLocation(), FirstBase.getSourceRange(),
- BaseAccess)
+ ODRDiagBaseError(FirstRecord->getLocation(),
+ FirstBase.getSourceRange(), BaseAccess)
<< (i + 1) << FirstBase.getType()
<< (int)FirstBase.getAccessSpecifierAsWritten();
- ODRDiagNote(SecondRecord->getLocation(),
- SecondBase.getSourceRange(), BaseAccess)
+ ODRDiagBaseNote(SecondRecord->getLocation(),
+ SecondBase.getSourceRange(), BaseAccess)
<< (i + 1) << SecondBase.getType()
<< (int)SecondBase.getAccessSpecifierAsWritten();
break;
@@ -9579,8 +10151,6 @@ void ASTReader::diagnoseOdrViolations() {
}
}
- using DeclHashes = llvm::SmallVector<std::pair<Decl *, unsigned>, 4>;
-
const ClassTemplateDecl *FirstTemplate =
FirstRecord->getDescribedClassTemplate();
const ClassTemplateDecl *SecondTemplate =
@@ -9621,16 +10191,16 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstIt->second == SecondIt->second)
continue;
- auto ODRDiagError = [FirstRecord, &FirstModule,
- this](SourceLocation Loc, SourceRange Range,
- ODRTemplateDifference DiffType) {
+ auto ODRDiagTemplateError = [FirstRecord, &FirstModule, this](
+ SourceLocation Loc, SourceRange Range,
+ ODRTemplateDifference DiffType) {
return Diag(Loc, diag::err_module_odr_violation_template_parameter)
<< FirstRecord << FirstModule.empty() << FirstModule << Range
<< DiffType;
};
- auto ODRDiagNote = [&SecondModule,
- this](SourceLocation Loc, SourceRange Range,
- ODRTemplateDifference DiffType) {
+ auto ODRDiagTemplateNote = [&SecondModule, this](
+ SourceLocation Loc, SourceRange Range,
+ ODRTemplateDifference DiffType) {
return Diag(Loc, diag::note_module_odr_violation_template_parameter)
<< SecondModule << Range << DiffType;
};
@@ -9651,11 +10221,13 @@ void ASTReader::diagnoseOdrViolations() {
SecondName.isIdentifier() && !SecondName.getAsIdentifierInfo();
assert((!FirstNameEmpty || !SecondNameEmpty) &&
"Both template parameters cannot be unnamed.");
- ODRDiagError(FirstDecl->getLocation(), FirstDecl->getSourceRange(),
- FirstNameEmpty ? ParamEmptyName : ParamName)
+ ODRDiagTemplateError(FirstDecl->getLocation(),
+ FirstDecl->getSourceRange(),
+ FirstNameEmpty ? ParamEmptyName : ParamName)
<< FirstName;
- ODRDiagNote(SecondDecl->getLocation(), SecondDecl->getSourceRange(),
- SecondNameEmpty ? ParamEmptyName : ParamName)
+ ODRDiagTemplateNote(SecondDecl->getLocation(),
+ SecondDecl->getSourceRange(),
+ SecondNameEmpty ? ParamEmptyName : ParamName)
<< SecondName;
break;
}
@@ -9674,13 +10246,13 @@ void ASTReader::diagnoseOdrViolations() {
!SecondParam->defaultArgumentWasInherited();
if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamSingleDefaultArgument)
+ ODRDiagTemplateError(FirstDecl->getLocation(),
+ FirstDecl->getSourceRange(),
+ ParamSingleDefaultArgument)
<< HasFirstDefaultArgument;
- ODRDiagNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamSingleDefaultArgument)
+ ODRDiagTemplateNote(SecondDecl->getLocation(),
+ SecondDecl->getSourceRange(),
+ ParamSingleDefaultArgument)
<< HasSecondDefaultArgument;
break;
}
@@ -9688,10 +10260,12 @@ void ASTReader::diagnoseOdrViolations() {
assert(HasFirstDefaultArgument && HasSecondDefaultArgument &&
"Expecting default arguments.");
- ODRDiagError(FirstDecl->getLocation(), FirstDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
- ODRDiagNote(SecondDecl->getLocation(), SecondDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
+ ODRDiagTemplateError(FirstDecl->getLocation(),
+ FirstDecl->getSourceRange(),
+ ParamDifferentDefaultArgument);
+ ODRDiagTemplateNote(SecondDecl->getLocation(),
+ SecondDecl->getSourceRange(),
+ ParamDifferentDefaultArgument);
break;
}
@@ -9706,13 +10280,13 @@ void ASTReader::diagnoseOdrViolations() {
!SecondParam->defaultArgumentWasInherited();
if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamSingleDefaultArgument)
+ ODRDiagTemplateError(FirstDecl->getLocation(),
+ FirstDecl->getSourceRange(),
+ ParamSingleDefaultArgument)
<< HasFirstDefaultArgument;
- ODRDiagNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamSingleDefaultArgument)
+ ODRDiagTemplateNote(SecondDecl->getLocation(),
+ SecondDecl->getSourceRange(),
+ ParamSingleDefaultArgument)
<< HasSecondDefaultArgument;
break;
}
@@ -9720,10 +10294,12 @@ void ASTReader::diagnoseOdrViolations() {
assert(HasFirstDefaultArgument && HasSecondDefaultArgument &&
"Expecting default arguments.");
- ODRDiagError(FirstDecl->getLocation(), FirstDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
- ODRDiagNote(SecondDecl->getLocation(), SecondDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
+ ODRDiagTemplateError(FirstDecl->getLocation(),
+ FirstDecl->getSourceRange(),
+ ParamDifferentDefaultArgument);
+ ODRDiagTemplateNote(SecondDecl->getLocation(),
+ SecondDecl->getSourceRange(),
+ ParamDifferentDefaultArgument);
break;
}
@@ -9739,13 +10315,13 @@ void ASTReader::diagnoseOdrViolations() {
!SecondParam->defaultArgumentWasInherited();
if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamSingleDefaultArgument)
+ ODRDiagTemplateError(FirstDecl->getLocation(),
+ FirstDecl->getSourceRange(),
+ ParamSingleDefaultArgument)
<< HasFirstDefaultArgument;
- ODRDiagNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamSingleDefaultArgument)
+ ODRDiagTemplateNote(SecondDecl->getLocation(),
+ SecondDecl->getSourceRange(),
+ ParamSingleDefaultArgument)
<< HasSecondDefaultArgument;
break;
}
@@ -9753,10 +10329,12 @@ void ASTReader::diagnoseOdrViolations() {
assert(HasFirstDefaultArgument && HasSecondDefaultArgument &&
"Expecting default arguments.");
- ODRDiagError(FirstDecl->getLocation(), FirstDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
- ODRDiagNote(SecondDecl->getLocation(), SecondDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
+ ODRDiagTemplateError(FirstDecl->getLocation(),
+ FirstDecl->getSourceRange(),
+ ParamDifferentDefaultArgument);
+ ODRDiagTemplateNote(SecondDecl->getLocation(),
+ SecondDecl->getSourceRange(),
+ ParamDifferentDefaultArgument);
break;
}
@@ -9773,224 +10351,32 @@ void ASTReader::diagnoseOdrViolations() {
DeclHashes FirstHashes;
DeclHashes SecondHashes;
+ const DeclContext *DC = FirstRecord;
+ PopulateHashes(FirstHashes, FirstRecord, DC);
+ PopulateHashes(SecondHashes, SecondRecord, DC);
- auto PopulateHashes = [&ComputeSubDeclODRHash, FirstRecord](
- DeclHashes &Hashes, CXXRecordDecl *Record) {
- for (auto *D : Record->decls()) {
- // Due to decl merging, the first CXXRecordDecl is the parent of
- // Decls in both records.
- if (!ODRHash::isWhitelistedDecl(D, FirstRecord))
- continue;
- Hashes.emplace_back(D, ComputeSubDeclODRHash(D));
- }
- };
- PopulateHashes(FirstHashes, FirstRecord);
- PopulateHashes(SecondHashes, SecondRecord);
-
- // Used with err_module_odr_violation_mismatch_decl and
- // note_module_odr_violation_mismatch_decl
- // This list should be the same Decl's as in ODRHash::isWhiteListedDecl
- enum {
- EndOfClass,
- PublicSpecifer,
- PrivateSpecifer,
- ProtectedSpecifer,
- StaticAssert,
- Field,
- CXXMethod,
- TypeAlias,
- TypeDef,
- Var,
- Friend,
- FunctionTemplate,
- Other
- } FirstDiffType = Other,
- SecondDiffType = Other;
-
- auto DifferenceSelector = [](Decl *D) {
- assert(D && "valid Decl required");
- switch (D->getKind()) {
- default:
- return Other;
- case Decl::AccessSpec:
- switch (D->getAccess()) {
- case AS_public:
- return PublicSpecifer;
- case AS_private:
- return PrivateSpecifer;
- case AS_protected:
- return ProtectedSpecifer;
- case AS_none:
- break;
- }
- llvm_unreachable("Invalid access specifier");
- case Decl::StaticAssert:
- return StaticAssert;
- case Decl::Field:
- return Field;
- case Decl::CXXMethod:
- case Decl::CXXConstructor:
- case Decl::CXXDestructor:
- return CXXMethod;
- case Decl::TypeAlias:
- return TypeAlias;
- case Decl::Typedef:
- return TypeDef;
- case Decl::Var:
- return Var;
- case Decl::Friend:
- return Friend;
- case Decl::FunctionTemplate:
- return FunctionTemplate;
- }
- };
-
- Decl *FirstDecl = nullptr;
- Decl *SecondDecl = nullptr;
- auto FirstIt = FirstHashes.begin();
- auto SecondIt = SecondHashes.begin();
-
- // If there is a diagnoseable difference, FirstDiffType and
- // SecondDiffType will not be Other and FirstDecl and SecondDecl will be
- // filled in if not EndOfClass.
- while (FirstIt != FirstHashes.end() || SecondIt != SecondHashes.end()) {
- if (FirstIt != FirstHashes.end() && SecondIt != SecondHashes.end() &&
- FirstIt->second == SecondIt->second) {
- ++FirstIt;
- ++SecondIt;
- continue;
- }
-
- FirstDecl = FirstIt == FirstHashes.end() ? nullptr : FirstIt->first;
- SecondDecl = SecondIt == SecondHashes.end() ? nullptr : SecondIt->first;
-
- FirstDiffType = FirstDecl ? DifferenceSelector(FirstDecl) : EndOfClass;
- SecondDiffType =
- SecondDecl ? DifferenceSelector(SecondDecl) : EndOfClass;
-
- break;
- }
+ auto DR = FindTypeDiffs(FirstHashes, SecondHashes);
+ ODRMismatchDecl FirstDiffType = DR.FirstDiffType;
+ ODRMismatchDecl SecondDiffType = DR.SecondDiffType;
+ Decl *FirstDecl = DR.FirstDecl;
+ Decl *SecondDecl = DR.SecondDecl;
if (FirstDiffType == Other || SecondDiffType == Other) {
- // Reaching this point means an unexpected Decl was encountered
- // or no difference was detected. This causes a generic error
- // message to be emitted.
- Diag(FirstRecord->getLocation(),
- diag::err_module_odr_violation_different_definitions)
- << FirstRecord << FirstModule.empty() << FirstModule;
-
- if (FirstDecl) {
- Diag(FirstDecl->getLocation(), diag::note_first_module_difference)
- << FirstRecord << FirstDecl->getSourceRange();
- }
-
- Diag(SecondRecord->getLocation(),
- diag::note_module_odr_violation_different_definitions)
- << SecondModule;
-
- if (SecondDecl) {
- Diag(SecondDecl->getLocation(), diag::note_second_module_difference)
- << SecondDecl->getSourceRange();
- }
-
+ DiagnoseODRUnexpected(DR, FirstRecord, FirstModule, SecondRecord,
+ SecondModule);
Diagnosed = true;
break;
}
if (FirstDiffType != SecondDiffType) {
- SourceLocation FirstLoc;
- SourceRange FirstRange;
- if (FirstDiffType == EndOfClass) {
- FirstLoc = FirstRecord->getBraceRange().getEnd();
- } else {
- FirstLoc = FirstIt->first->getLocation();
- FirstRange = FirstIt->first->getSourceRange();
- }
- Diag(FirstLoc, diag::err_module_odr_violation_mismatch_decl)
- << FirstRecord << FirstModule.empty() << FirstModule << FirstRange
- << FirstDiffType;
-
- SourceLocation SecondLoc;
- SourceRange SecondRange;
- if (SecondDiffType == EndOfClass) {
- SecondLoc = SecondRecord->getBraceRange().getEnd();
- } else {
- SecondLoc = SecondDecl->getLocation();
- SecondRange = SecondDecl->getSourceRange();
- }
- Diag(SecondLoc, diag::note_module_odr_violation_mismatch_decl)
- << SecondModule << SecondRange << SecondDiffType;
+ DiagnoseODRMismatch(DR, FirstRecord, FirstModule, SecondRecord,
+ SecondModule);
Diagnosed = true;
break;
}
assert(FirstDiffType == SecondDiffType);
- // Used with err_module_odr_violation_mismatch_decl_diff and
- // note_module_odr_violation_mismatch_decl_diff
- enum ODRDeclDifference {
- StaticAssertCondition,
- StaticAssertMessage,
- StaticAssertOnlyMessage,
- FieldName,
- FieldTypeName,
- FieldSingleBitField,
- FieldDifferentWidthBitField,
- FieldSingleMutable,
- FieldSingleInitializer,
- FieldDifferentInitializers,
- MethodName,
- MethodDeleted,
- MethodDefaulted,
- MethodVirtual,
- MethodStatic,
- MethodVolatile,
- MethodConst,
- MethodInline,
- MethodNumberParameters,
- MethodParameterType,
- MethodParameterName,
- MethodParameterSingleDefaultArgument,
- MethodParameterDifferentDefaultArgument,
- MethodNoTemplateArguments,
- MethodDifferentNumberTemplateArguments,
- MethodDifferentTemplateArgument,
- MethodSingleBody,
- MethodDifferentBody,
- TypedefName,
- TypedefType,
- VarName,
- VarType,
- VarSingleInitializer,
- VarDifferentInitializer,
- VarConstexpr,
- FriendTypeFunction,
- FriendType,
- FriendFunction,
- FunctionTemplateDifferentNumberParameters,
- FunctionTemplateParameterDifferentKind,
- FunctionTemplateParameterName,
- FunctionTemplateParameterSingleDefaultArgument,
- FunctionTemplateParameterDifferentDefaultArgument,
- FunctionTemplateParameterDifferentType,
- FunctionTemplatePackParameter,
- };
-
- // These lambdas have the common portions of the ODR diagnostics. This
- // has the same return as Diag(), so addition parameters can be passed
- // in with operator<<
- auto ODRDiagError = [FirstRecord, &FirstModule, this](
- SourceLocation Loc, SourceRange Range, ODRDeclDifference DiffType) {
- return Diag(Loc, diag::err_module_odr_violation_mismatch_decl_diff)
- << FirstRecord << FirstModule.empty() << FirstModule << Range
- << DiffType;
- };
- auto ODRDiagNote = [&SecondModule, this](
- SourceLocation Loc, SourceRange Range, ODRDeclDifference DiffType) {
- return Diag(Loc, diag::note_module_odr_violation_mismatch_decl_diff)
- << SecondModule << Range << DiffType;
- };
-
switch (FirstDiffType) {
case Other:
case EndOfClass:
@@ -10008,10 +10394,10 @@ void ASTReader::diagnoseOdrViolations() {
unsigned FirstODRHash = ComputeODRHash(FirstExpr);
unsigned SecondODRHash = ComputeODRHash(SecondExpr);
if (FirstODRHash != SecondODRHash) {
- ODRDiagError(FirstExpr->getBeginLoc(), FirstExpr->getSourceRange(),
- StaticAssertCondition);
- ODRDiagNote(SecondExpr->getBeginLoc(), SecondExpr->getSourceRange(),
- StaticAssertCondition);
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstExpr->getBeginLoc(),
+ FirstExpr->getSourceRange(), StaticAssertCondition);
+ ODRDiagDeclNote(SecondModule, SecondExpr->getBeginLoc(),
+ SecondExpr->getSourceRange(), StaticAssertCondition);
Diagnosed = true;
break;
}
@@ -10036,9 +10422,11 @@ void ASTReader::diagnoseOdrViolations() {
SecondLoc = SecondSA->getBeginLoc();
SecondRange = SecondSA->getSourceRange();
}
- ODRDiagError(FirstLoc, FirstRange, StaticAssertOnlyMessage)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstLoc, FirstRange,
+ StaticAssertOnlyMessage)
<< (FirstStr == nullptr);
- ODRDiagNote(SecondLoc, SecondRange, StaticAssertOnlyMessage)
+ ODRDiagDeclNote(SecondModule, SecondLoc, SecondRange,
+ StaticAssertOnlyMessage)
<< (SecondStr == nullptr);
Diagnosed = true;
break;
@@ -10046,126 +10434,19 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstStr && SecondStr &&
FirstStr->getString() != SecondStr->getString()) {
- ODRDiagError(FirstStr->getBeginLoc(), FirstStr->getSourceRange(),
- StaticAssertMessage);
- ODRDiagNote(SecondStr->getBeginLoc(), SecondStr->getSourceRange(),
- StaticAssertMessage);
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstStr->getBeginLoc(),
+ FirstStr->getSourceRange(), StaticAssertMessage);
+ ODRDiagDeclNote(SecondModule, SecondStr->getBeginLoc(),
+ SecondStr->getSourceRange(), StaticAssertMessage);
Diagnosed = true;
break;
}
break;
}
case Field: {
- FieldDecl *FirstField = cast<FieldDecl>(FirstDecl);
- FieldDecl *SecondField = cast<FieldDecl>(SecondDecl);
- IdentifierInfo *FirstII = FirstField->getIdentifier();
- IdentifierInfo *SecondII = SecondField->getIdentifier();
- if (FirstII->getName() != SecondII->getName()) {
- ODRDiagError(FirstField->getLocation(), FirstField->getSourceRange(),
- FieldName)
- << FirstII;
- ODRDiagNote(SecondField->getLocation(), SecondField->getSourceRange(),
- FieldName)
- << SecondII;
-
- Diagnosed = true;
- break;
- }
-
- assert(getContext().hasSameType(FirstField->getType(),
- SecondField->getType()));
-
- QualType FirstType = FirstField->getType();
- QualType SecondType = SecondField->getType();
- if (ComputeQualTypeODRHash(FirstType) !=
- ComputeQualTypeODRHash(SecondType)) {
- ODRDiagError(FirstField->getLocation(), FirstField->getSourceRange(),
- FieldTypeName)
- << FirstII << FirstType;
- ODRDiagNote(SecondField->getLocation(), SecondField->getSourceRange(),
- FieldTypeName)
- << SecondII << SecondType;
-
- Diagnosed = true;
- break;
- }
-
- const bool IsFirstBitField = FirstField->isBitField();
- const bool IsSecondBitField = SecondField->isBitField();
- if (IsFirstBitField != IsSecondBitField) {
- ODRDiagError(FirstField->getLocation(), FirstField->getSourceRange(),
- FieldSingleBitField)
- << FirstII << IsFirstBitField;
- ODRDiagNote(SecondField->getLocation(), SecondField->getSourceRange(),
- FieldSingleBitField)
- << SecondII << IsSecondBitField;
- Diagnosed = true;
- break;
- }
-
- if (IsFirstBitField && IsSecondBitField) {
- unsigned FirstBitWidthHash =
- ComputeODRHash(FirstField->getBitWidth());
- unsigned SecondBitWidthHash =
- ComputeODRHash(SecondField->getBitWidth());
- if (FirstBitWidthHash != SecondBitWidthHash) {
- ODRDiagError(FirstField->getLocation(),
- FirstField->getSourceRange(),
- FieldDifferentWidthBitField)
- << FirstII << FirstField->getBitWidth()->getSourceRange();
- ODRDiagNote(SecondField->getLocation(),
- SecondField->getSourceRange(),
- FieldDifferentWidthBitField)
- << SecondII << SecondField->getBitWidth()->getSourceRange();
- Diagnosed = true;
- break;
- }
- }
-
- const bool IsFirstMutable = FirstField->isMutable();
- const bool IsSecondMutable = SecondField->isMutable();
- if (IsFirstMutable != IsSecondMutable) {
- ODRDiagError(FirstField->getLocation(), FirstField->getSourceRange(),
- FieldSingleMutable)
- << FirstII << IsFirstMutable;
- ODRDiagNote(SecondField->getLocation(), SecondField->getSourceRange(),
- FieldSingleMutable)
- << SecondII << IsSecondMutable;
- Diagnosed = true;
- break;
- }
-
- const Expr *FirstInitializer = FirstField->getInClassInitializer();
- const Expr *SecondInitializer = SecondField->getInClassInitializer();
- if ((!FirstInitializer && SecondInitializer) ||
- (FirstInitializer && !SecondInitializer)) {
- ODRDiagError(FirstField->getLocation(), FirstField->getSourceRange(),
- FieldSingleInitializer)
- << FirstII << (FirstInitializer != nullptr);
- ODRDiagNote(SecondField->getLocation(), SecondField->getSourceRange(),
- FieldSingleInitializer)
- << SecondII << (SecondInitializer != nullptr);
- Diagnosed = true;
- break;
- }
-
- if (FirstInitializer && SecondInitializer) {
- unsigned FirstInitHash = ComputeODRHash(FirstInitializer);
- unsigned SecondInitHash = ComputeODRHash(SecondInitializer);
- if (FirstInitHash != SecondInitHash) {
- ODRDiagError(FirstField->getLocation(),
- FirstField->getSourceRange(),
- FieldDifferentInitializers)
- << FirstII << FirstInitializer->getSourceRange();
- ODRDiagNote(SecondField->getLocation(),
- SecondField->getSourceRange(),
- FieldDifferentInitializers)
- << SecondII << SecondInitializer->getSourceRange();
- Diagnosed = true;
- break;
- }
- }
-
+ Diagnosed = ODRDiagField(FirstRecord, FirstModule, SecondModule,
+ cast<FieldDecl>(FirstDecl),
+ cast<FieldDecl>(SecondDecl));
break;
}
case CXXMethod: {
@@ -10187,11 +10468,11 @@ void ASTReader::diagnoseOdrViolations() {
auto FirstName = FirstMethod->getDeclName();
auto SecondName = SecondMethod->getDeclName();
if (FirstMethodType != SecondMethodType || FirstName != SecondName) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodName)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodName)
<< FirstMethodType << FirstName;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodName)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodName)
<< SecondMethodType << SecondName;
Diagnosed = true;
@@ -10201,12 +10482,12 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstDeleted = FirstMethod->isDeletedAsWritten();
const bool SecondDeleted = SecondMethod->isDeletedAsWritten();
if (FirstDeleted != SecondDeleted) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodDeleted)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodDeleted)
<< FirstMethodType << FirstName << FirstDeleted;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodDeleted)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodDeleted)
<< SecondMethodType << SecondName << SecondDeleted;
Diagnosed = true;
break;
@@ -10215,12 +10496,12 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstDefaulted = FirstMethod->isExplicitlyDefaulted();
const bool SecondDefaulted = SecondMethod->isExplicitlyDefaulted();
if (FirstDefaulted != SecondDefaulted) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodDefaulted)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodDefaulted)
<< FirstMethodType << FirstName << FirstDefaulted;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodDefaulted)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodDefaulted)
<< SecondMethodType << SecondName << SecondDefaulted;
Diagnosed = true;
break;
@@ -10232,11 +10513,11 @@ void ASTReader::diagnoseOdrViolations() {
const bool SecondPure = SecondMethod->isPure();
if ((FirstVirtual || SecondVirtual) &&
(FirstVirtual != SecondVirtual || FirstPure != SecondPure)) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodVirtual)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodVirtual)
<< FirstMethodType << FirstName << FirstPure << FirstVirtual;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodVirtual)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodVirtual)
<< SecondMethodType << SecondName << SecondPure << SecondVirtual;
Diagnosed = true;
break;
@@ -10250,11 +10531,11 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstStatic = FirstStorage == SC_Static;
const bool SecondStatic = SecondStorage == SC_Static;
if (FirstStatic != SecondStatic) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodStatic)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodStatic)
<< FirstMethodType << FirstName << FirstStatic;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodStatic)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodStatic)
<< SecondMethodType << SecondName << SecondStatic;
Diagnosed = true;
break;
@@ -10263,11 +10544,11 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstVolatile = FirstMethod->isVolatile();
const bool SecondVolatile = SecondMethod->isVolatile();
if (FirstVolatile != SecondVolatile) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodVolatile)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodVolatile)
<< FirstMethodType << FirstName << FirstVolatile;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodVolatile)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodVolatile)
<< SecondMethodType << SecondName << SecondVolatile;
Diagnosed = true;
break;
@@ -10276,11 +10557,11 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstConst = FirstMethod->isConst();
const bool SecondConst = SecondMethod->isConst();
if (FirstConst != SecondConst) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodConst)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodConst)
<< FirstMethodType << FirstName << FirstConst;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodConst)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodConst)
<< SecondMethodType << SecondName << SecondConst;
Diagnosed = true;
break;
@@ -10289,11 +10570,11 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstInline = FirstMethod->isInlineSpecified();
const bool SecondInline = SecondMethod->isInlineSpecified();
if (FirstInline != SecondInline) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodInline)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodInline)
<< FirstMethodType << FirstName << FirstInline;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodInline)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodInline)
<< SecondMethodType << SecondName << SecondInline;
Diagnosed = true;
break;
@@ -10302,11 +10583,13 @@ void ASTReader::diagnoseOdrViolations() {
const unsigned FirstNumParameters = FirstMethod->param_size();
const unsigned SecondNumParameters = SecondMethod->param_size();
if (FirstNumParameters != SecondNumParameters) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodNumberParameters)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(),
+ MethodNumberParameters)
<< FirstMethodType << FirstName << FirstNumParameters;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodNumberParameters)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodNumberParameters)
<< SecondMethodType << SecondName << SecondNumParameters;
Diagnosed = true;
break;
@@ -10325,27 +10608,31 @@ void ASTReader::diagnoseOdrViolations() {
ComputeQualTypeODRHash(SecondParamType)) {
if (const DecayedType *ParamDecayedType =
FirstParamType->getAs<DecayedType>()) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodParameterType)
+ ODRDiagDeclError(
+ FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodParameterType)
<< FirstMethodType << FirstName << (I + 1) << FirstParamType
<< true << ParamDecayedType->getOriginalType();
} else {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodParameterType)
+ ODRDiagDeclError(
+ FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodParameterType)
<< FirstMethodType << FirstName << (I + 1) << FirstParamType
<< false;
}
if (const DecayedType *ParamDecayedType =
SecondParamType->getAs<DecayedType>()) {
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodParameterType)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodParameterType)
<< SecondMethodType << SecondName << (I + 1)
<< SecondParamType << true
<< ParamDecayedType->getOriginalType();
} else {
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodParameterType)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodParameterType)
<< SecondMethodType << SecondName << (I + 1)
<< SecondParamType << false;
}
@@ -10356,11 +10643,12 @@ void ASTReader::diagnoseOdrViolations() {
DeclarationName FirstParamName = FirstParam->getDeclName();
DeclarationName SecondParamName = SecondParam->getDeclName();
if (FirstParamName != SecondParamName) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodParameterName)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodParameterName)
<< FirstMethodType << FirstName << (I + 1) << FirstParamName;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodParameterName)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodParameterName)
<< SecondMethodType << SecondName << (I + 1) << SecondParamName;
ParameterMismatch = true;
break;
@@ -10369,15 +10657,16 @@ void ASTReader::diagnoseOdrViolations() {
const Expr *FirstInit = FirstParam->getInit();
const Expr *SecondInit = SecondParam->getInit();
if ((FirstInit == nullptr) != (SecondInit == nullptr)) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodParameterSingleDefaultArgument)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(),
+ MethodParameterSingleDefaultArgument)
<< FirstMethodType << FirstName << (I + 1)
<< (FirstInit == nullptr)
<< (FirstInit ? FirstInit->getSourceRange() : SourceRange());
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodParameterSingleDefaultArgument)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodParameterSingleDefaultArgument)
<< SecondMethodType << SecondName << (I + 1)
<< (SecondInit == nullptr)
<< (SecondInit ? SecondInit->getSourceRange() : SourceRange());
@@ -10387,14 +10676,15 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstInit && SecondInit &&
ComputeODRHash(FirstInit) != ComputeODRHash(SecondInit)) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodParameterDifferentDefaultArgument)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(),
+ MethodParameterDifferentDefaultArgument)
<< FirstMethodType << FirstName << (I + 1)
<< FirstInit->getSourceRange();
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodParameterDifferentDefaultArgument)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodParameterDifferentDefaultArgument)
<< SecondMethodType << SecondName << (I + 1)
<< SecondInit->getSourceRange();
ParameterMismatch = true;
@@ -10415,11 +10705,13 @@ void ASTReader::diagnoseOdrViolations() {
if ((FirstTemplateArgs && !SecondTemplateArgs) ||
(!FirstTemplateArgs && SecondTemplateArgs)) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodNoTemplateArguments)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(),
+ MethodNoTemplateArguments)
<< FirstMethodType << FirstName << (FirstTemplateArgs != nullptr);
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodNoTemplateArguments)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodNoTemplateArguments)
<< SecondMethodType << SecondName
<< (SecondTemplateArgs != nullptr);
@@ -10449,14 +10741,15 @@ void ASTReader::diagnoseOdrViolations() {
ExpandTemplateArgumentList(SecondTemplateArgs);
if (FirstExpandedList.size() != SecondExpandedList.size()) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodDifferentNumberTemplateArguments)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(),
+ MethodDifferentNumberTemplateArguments)
<< FirstMethodType << FirstName
<< (unsigned)FirstExpandedList.size();
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodDifferentNumberTemplateArguments)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodDifferentNumberTemplateArguments)
<< SecondMethodType << SecondName
<< (unsigned)SecondExpandedList.size();
@@ -10473,13 +10766,13 @@ void ASTReader::diagnoseOdrViolations() {
continue;
}
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodDifferentTemplateArgument)
+ ODRDiagDeclError(
+ FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodDifferentTemplateArgument)
<< FirstMethodType << FirstName << FirstTA << i + 1;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodDifferentTemplateArgument)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodDifferentTemplateArgument)
<< SecondMethodType << SecondName << SecondTA << i + 1;
TemplateArgumentMismatch = true;
@@ -10508,22 +10801,22 @@ void ASTReader::diagnoseOdrViolations() {
ComputeCXXMethodODRHash(SecondMethod) != SecondMethod->getODRHash();
if (HasFirstBody != HasSecondBody) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodSingleBody)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodSingleBody)
<< FirstMethodType << FirstName << HasFirstBody;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodSingleBody)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodSingleBody)
<< SecondMethodType << SecondName << HasSecondBody;
Diagnosed = true;
break;
}
if (HasFirstBody && HasSecondBody) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodDifferentBody)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodDifferentBody)
<< FirstMethodType << FirstName;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodDifferentBody)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodDifferentBody)
<< SecondMethodType << SecondName;
Diagnosed = true;
break;
@@ -10533,105 +10826,16 @@ void ASTReader::diagnoseOdrViolations() {
}
case TypeAlias:
case TypeDef: {
- TypedefNameDecl *FirstTD = cast<TypedefNameDecl>(FirstDecl);
- TypedefNameDecl *SecondTD = cast<TypedefNameDecl>(SecondDecl);
- auto FirstName = FirstTD->getDeclName();
- auto SecondName = SecondTD->getDeclName();
- if (FirstName != SecondName) {
- ODRDiagError(FirstTD->getLocation(), FirstTD->getSourceRange(),
- TypedefName)
- << (FirstDiffType == TypeAlias) << FirstName;
- ODRDiagNote(SecondTD->getLocation(), SecondTD->getSourceRange(),
- TypedefName)
- << (FirstDiffType == TypeAlias) << SecondName;
- Diagnosed = true;
- break;
- }
-
- QualType FirstType = FirstTD->getUnderlyingType();
- QualType SecondType = SecondTD->getUnderlyingType();
- if (ComputeQualTypeODRHash(FirstType) !=
- ComputeQualTypeODRHash(SecondType)) {
- ODRDiagError(FirstTD->getLocation(), FirstTD->getSourceRange(),
- TypedefType)
- << (FirstDiffType == TypeAlias) << FirstName << FirstType;
- ODRDiagNote(SecondTD->getLocation(), SecondTD->getSourceRange(),
- TypedefType)
- << (FirstDiffType == TypeAlias) << SecondName << SecondType;
- Diagnosed = true;
- break;
- }
+ Diagnosed = ODRDiagTypeDefOrAlias(
+ FirstRecord, FirstModule, SecondModule,
+ cast<TypedefNameDecl>(FirstDecl), cast<TypedefNameDecl>(SecondDecl),
+ FirstDiffType == TypeAlias);
break;
}
case Var: {
- VarDecl *FirstVD = cast<VarDecl>(FirstDecl);
- VarDecl *SecondVD = cast<VarDecl>(SecondDecl);
- auto FirstName = FirstVD->getDeclName();
- auto SecondName = SecondVD->getDeclName();
- if (FirstName != SecondName) {
- ODRDiagError(FirstVD->getLocation(), FirstVD->getSourceRange(),
- VarName)
- << FirstName;
- ODRDiagNote(SecondVD->getLocation(), SecondVD->getSourceRange(),
- VarName)
- << SecondName;
- Diagnosed = true;
- break;
- }
-
- QualType FirstType = FirstVD->getType();
- QualType SecondType = SecondVD->getType();
- if (ComputeQualTypeODRHash(FirstType) !=
- ComputeQualTypeODRHash(SecondType)) {
- ODRDiagError(FirstVD->getLocation(), FirstVD->getSourceRange(),
- VarType)
- << FirstName << FirstType;
- ODRDiagNote(SecondVD->getLocation(), SecondVD->getSourceRange(),
- VarType)
- << SecondName << SecondType;
- Diagnosed = true;
- break;
- }
-
- const Expr *FirstInit = FirstVD->getInit();
- const Expr *SecondInit = SecondVD->getInit();
- if ((FirstInit == nullptr) != (SecondInit == nullptr)) {
- ODRDiagError(FirstVD->getLocation(), FirstVD->getSourceRange(),
- VarSingleInitializer)
- << FirstName << (FirstInit == nullptr)
- << (FirstInit ? FirstInit->getSourceRange(): SourceRange());
- ODRDiagNote(SecondVD->getLocation(), SecondVD->getSourceRange(),
- VarSingleInitializer)
- << SecondName << (SecondInit == nullptr)
- << (SecondInit ? SecondInit->getSourceRange() : SourceRange());
- Diagnosed = true;
- break;
- }
-
- if (FirstInit && SecondInit &&
- ComputeODRHash(FirstInit) != ComputeODRHash(SecondInit)) {
- ODRDiagError(FirstVD->getLocation(), FirstVD->getSourceRange(),
- VarDifferentInitializer)
- << FirstName << FirstInit->getSourceRange();
- ODRDiagNote(SecondVD->getLocation(), SecondVD->getSourceRange(),
- VarDifferentInitializer)
- << SecondName << SecondInit->getSourceRange();
- Diagnosed = true;
- break;
- }
-
- const bool FirstIsConstexpr = FirstVD->isConstexpr();
- const bool SecondIsConstexpr = SecondVD->isConstexpr();
- if (FirstIsConstexpr != SecondIsConstexpr) {
- ODRDiagError(FirstVD->getLocation(), FirstVD->getSourceRange(),
- VarConstexpr)
- << FirstName << FirstIsConstexpr;
- ODRDiagNote(SecondVD->getLocation(), SecondVD->getSourceRange(),
- VarConstexpr)
- << SecondName << SecondIsConstexpr;
- Diagnosed = true;
- break;
- }
+ Diagnosed =
+ ODRDiagVar(FirstRecord, FirstModule, SecondModule,
+ cast<VarDecl>(FirstDecl), cast<VarDecl>(SecondDecl));
break;
}
case Friend: {
@@ -10645,11 +10849,12 @@ void ASTReader::diagnoseOdrViolations() {
TypeSourceInfo *SecondTSI = SecondFriend->getFriendType();
if (FirstND && SecondND) {
- ODRDiagError(FirstFriend->getFriendLoc(),
- FirstFriend->getSourceRange(), FriendFunction)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstFriend->getFriendLoc(),
+ FirstFriend->getSourceRange(), FriendFunction)
<< FirstND;
- ODRDiagNote(SecondFriend->getFriendLoc(),
- SecondFriend->getSourceRange(), FriendFunction)
+ ODRDiagDeclNote(SecondModule, SecondFriend->getFriendLoc(),
+ SecondFriend->getSourceRange(), FriendFunction)
<< SecondND;
Diagnosed = true;
@@ -10661,21 +10866,22 @@ void ASTReader::diagnoseOdrViolations() {
QualType SecondFriendType = SecondTSI->getType();
assert(ComputeQualTypeODRHash(FirstFriendType) !=
ComputeQualTypeODRHash(SecondFriendType));
- ODRDiagError(FirstFriend->getFriendLoc(),
- FirstFriend->getSourceRange(), FriendType)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstFriend->getFriendLoc(),
+ FirstFriend->getSourceRange(), FriendType)
<< FirstFriendType;
- ODRDiagNote(SecondFriend->getFriendLoc(),
- SecondFriend->getSourceRange(), FriendType)
+ ODRDiagDeclNote(SecondModule, SecondFriend->getFriendLoc(),
+ SecondFriend->getSourceRange(), FriendType)
<< SecondFriendType;
Diagnosed = true;
break;
}
- ODRDiagError(FirstFriend->getFriendLoc(), FirstFriend->getSourceRange(),
- FriendTypeFunction)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstFriend->getFriendLoc(),
+ FirstFriend->getSourceRange(), FriendTypeFunction)
<< (FirstTSI == nullptr);
- ODRDiagNote(SecondFriend->getFriendLoc(),
- SecondFriend->getSourceRange(), FriendTypeFunction)
+ ODRDiagDeclNote(SecondModule, SecondFriend->getFriendLoc(),
+ SecondFriend->getSourceRange(), FriendTypeFunction)
<< (SecondTSI == nullptr);
Diagnosed = true;
@@ -10693,14 +10899,15 @@ void ASTReader::diagnoseOdrViolations() {
SecondTemplate->getTemplateParameters();
if (FirstTPL->size() != SecondTPL->size()) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateDifferentNumberParameters)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateDifferentNumberParameters)
<< FirstTemplate << FirstTPL->size();
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateDifferentNumberParameters)
- << SecondTemplate << SecondTPL->size();
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateDifferentNumberParameters)
+ << SecondTemplate << SecondTPL->size();
Diagnosed = true;
break;
@@ -10730,13 +10937,14 @@ void ASTReader::diagnoseOdrViolations() {
}
};
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentKind)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentKind)
<< FirstTemplate << (i + 1) << GetParamType(FirstParam);
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentKind)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentKind)
<< SecondTemplate << (i + 1) << GetParamType(SecondParam);
ParameterMismatch = true;
@@ -10744,14 +10952,14 @@ void ASTReader::diagnoseOdrViolations() {
}
if (FirstParam->getName() != SecondParam->getName()) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterName)
+ ODRDiagDeclError(
+ FirstRecord, FirstModule, FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(), FunctionTemplateParameterName)
<< FirstTemplate << (i + 1) << (bool)FirstParam->getIdentifier()
<< FirstParam;
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterName)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterName)
<< SecondTemplate << (i + 1)
<< (bool)SecondParam->getIdentifier() << SecondParam;
ParameterMismatch = true;
@@ -10771,13 +10979,14 @@ void ASTReader::diagnoseOdrViolations() {
SecondTTPD->hasDefaultArgument() &&
!SecondTTPD->defaultArgumentWasInherited();
if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
<< FirstTemplate << (i + 1) << HasFirstDefaultArgument;
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
<< SecondTemplate << (i + 1) << HasSecondDefaultArgument;
ParameterMismatch = true;
break;
@@ -10788,13 +10997,15 @@ void ASTReader::diagnoseOdrViolations() {
QualType SecondType = SecondTTPD->getDefaultArgument();
if (ComputeQualTypeODRHash(FirstType) !=
ComputeQualTypeODRHash(SecondType)) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
+ ODRDiagDeclError(
+ FirstRecord, FirstModule, FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
<< FirstTemplate << (i + 1) << FirstType;
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
+ ODRDiagDeclNote(
+ SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
<< SecondTemplate << (i + 1) << SecondType;
ParameterMismatch = true;
break;
@@ -10803,13 +11014,14 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstTTPD->isParameterPack() !=
SecondTTPD->isParameterPack()) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
<< FirstTemplate << (i + 1) << FirstTTPD->isParameterPack();
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
<< SecondTemplate << (i + 1) << SecondTTPD->isParameterPack();
ParameterMismatch = true;
break;
@@ -10830,13 +11042,14 @@ void ASTReader::diagnoseOdrViolations() {
if (ComputeTemplateParameterListODRHash(FirstTPL) !=
ComputeTemplateParameterListODRHash(SecondTPL)) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentType)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentType)
<< FirstTemplate << (i + 1);
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentType)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentType)
<< SecondTemplate << (i + 1);
ParameterMismatch = true;
break;
@@ -10849,13 +11062,14 @@ void ASTReader::diagnoseOdrViolations() {
SecondTTPD->hasDefaultArgument() &&
!SecondTTPD->defaultArgumentWasInherited();
if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
<< FirstTemplate << (i + 1) << HasFirstDefaultArgument;
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
<< SecondTemplate << (i + 1) << HasSecondDefaultArgument;
ParameterMismatch = true;
break;
@@ -10868,13 +11082,15 @@ void ASTReader::diagnoseOdrViolations() {
SecondTTPD->getDefaultArgument().getArgument();
if (ComputeTemplateArgumentODRHash(FirstTA) !=
ComputeTemplateArgumentODRHash(SecondTA)) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
+ ODRDiagDeclError(
+ FirstRecord, FirstModule, FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
<< FirstTemplate << (i + 1) << FirstTA;
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
+ ODRDiagDeclNote(
+ SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
<< SecondTemplate << (i + 1) << SecondTA;
ParameterMismatch = true;
break;
@@ -10883,13 +11099,14 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstTTPD->isParameterPack() !=
SecondTTPD->isParameterPack()) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
<< FirstTemplate << (i + 1) << FirstTTPD->isParameterPack();
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
<< SecondTemplate << (i + 1) << SecondTTPD->isParameterPack();
ParameterMismatch = true;
break;
@@ -10907,13 +11124,14 @@ void ASTReader::diagnoseOdrViolations() {
QualType SecondType = SecondNTTPD->getType();
if (ComputeQualTypeODRHash(FirstType) !=
ComputeQualTypeODRHash(SecondType)) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentType)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentType)
<< FirstTemplate << (i + 1);
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentType)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentType)
<< SecondTemplate << (i + 1);
ParameterMismatch = true;
break;
@@ -10926,13 +11144,14 @@ void ASTReader::diagnoseOdrViolations() {
SecondNTTPD->hasDefaultArgument() &&
!SecondNTTPD->defaultArgumentWasInherited();
if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
<< FirstTemplate << (i + 1) << HasFirstDefaultArgument;
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
<< SecondTemplate << (i + 1) << HasSecondDefaultArgument;
ParameterMismatch = true;
break;
@@ -10943,13 +11162,15 @@ void ASTReader::diagnoseOdrViolations() {
Expr *SecondDefaultArgument = SecondNTTPD->getDefaultArgument();
if (ComputeODRHash(FirstDefaultArgument) !=
ComputeODRHash(SecondDefaultArgument)) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
+ ODRDiagDeclError(
+ FirstRecord, FirstModule, FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
<< FirstTemplate << (i + 1) << FirstDefaultArgument;
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
+ ODRDiagDeclNote(
+ SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
<< SecondTemplate << (i + 1) << SecondDefaultArgument;
ParameterMismatch = true;
break;
@@ -10958,13 +11179,14 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstNTTPD->isParameterPack() !=
SecondNTTPD->isParameterPack()) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
<< FirstTemplate << (i + 1) << FirstNTTPD->isParameterPack();
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
<< SecondTemplate << (i + 1)
<< SecondNTTPD->isParameterPack();
ParameterMismatch = true;
@@ -11191,7 +11413,7 @@ void ASTReader::diagnoseOdrViolations() {
for (auto *D : Enum->decls()) {
// Due to decl merging, the first EnumDecl is the parent of
// Decls in both records.
- if (!ODRHash::isWhitelistedDecl(D, FirstEnum))
+ if (!ODRHash::isDeclToBeProcessed(D, FirstEnum))
continue;
assert(isa<EnumConstantDecl>(D) && "Unexpected Decl kind");
Hashes.emplace_back(cast<EnumConstantDecl>(D),
@@ -11504,8 +11726,8 @@ public:
OMPClauseReader(ASTRecordReader &Record)
: Record(Record), Context(Record.getContext()) {}
-#define OPENMP_CLAUSE(Name, Class) void Visit##Class(Class *C);
-#include "clang/Basic/OpenMPKinds.def"
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(Class *C);
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
OMPClause *readClause();
void VisitOMPClauseWithPreInit(OMPClauseWithPreInit *C);
void VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C);
@@ -11519,134 +11741,152 @@ OMPClause *ASTRecordReader::readOMPClause() {
OMPClause *OMPClauseReader::readClause() {
OMPClause *C = nullptr;
- switch (Record.readInt()) {
- case OMPC_if:
+ switch (llvm::omp::Clause(Record.readInt())) {
+ case llvm::omp::OMPC_if:
C = new (Context) OMPIfClause();
break;
- case OMPC_final:
+ case llvm::omp::OMPC_final:
C = new (Context) OMPFinalClause();
break;
- case OMPC_num_threads:
+ case llvm::omp::OMPC_num_threads:
C = new (Context) OMPNumThreadsClause();
break;
- case OMPC_safelen:
+ case llvm::omp::OMPC_safelen:
C = new (Context) OMPSafelenClause();
break;
- case OMPC_simdlen:
+ case llvm::omp::OMPC_simdlen:
C = new (Context) OMPSimdlenClause();
break;
- case OMPC_allocator:
+ case llvm::omp::OMPC_allocator:
C = new (Context) OMPAllocatorClause();
break;
- case OMPC_collapse:
+ case llvm::omp::OMPC_collapse:
C = new (Context) OMPCollapseClause();
break;
- case OMPC_default:
+ case llvm::omp::OMPC_default:
C = new (Context) OMPDefaultClause();
break;
- case OMPC_proc_bind:
+ case llvm::omp::OMPC_proc_bind:
C = new (Context) OMPProcBindClause();
break;
- case OMPC_schedule:
+ case llvm::omp::OMPC_schedule:
C = new (Context) OMPScheduleClause();
break;
- case OMPC_ordered:
+ case llvm::omp::OMPC_ordered:
C = OMPOrderedClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_nowait:
+ case llvm::omp::OMPC_nowait:
C = new (Context) OMPNowaitClause();
break;
- case OMPC_untied:
+ case llvm::omp::OMPC_untied:
C = new (Context) OMPUntiedClause();
break;
- case OMPC_mergeable:
+ case llvm::omp::OMPC_mergeable:
C = new (Context) OMPMergeableClause();
break;
- case OMPC_read:
+ case llvm::omp::OMPC_read:
C = new (Context) OMPReadClause();
break;
- case OMPC_write:
+ case llvm::omp::OMPC_write:
C = new (Context) OMPWriteClause();
break;
- case OMPC_update:
- C = new (Context) OMPUpdateClause();
+ case llvm::omp::OMPC_update:
+ C = OMPUpdateClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_capture:
+ case llvm::omp::OMPC_capture:
C = new (Context) OMPCaptureClause();
break;
- case OMPC_seq_cst:
+ case llvm::omp::OMPC_seq_cst:
C = new (Context) OMPSeqCstClause();
break;
- case OMPC_threads:
+ case llvm::omp::OMPC_acq_rel:
+ C = new (Context) OMPAcqRelClause();
+ break;
+ case llvm::omp::OMPC_acquire:
+ C = new (Context) OMPAcquireClause();
+ break;
+ case llvm::omp::OMPC_release:
+ C = new (Context) OMPReleaseClause();
+ break;
+ case llvm::omp::OMPC_relaxed:
+ C = new (Context) OMPRelaxedClause();
+ break;
+ case llvm::omp::OMPC_threads:
C = new (Context) OMPThreadsClause();
break;
- case OMPC_simd:
+ case llvm::omp::OMPC_simd:
C = new (Context) OMPSIMDClause();
break;
- case OMPC_nogroup:
+ case llvm::omp::OMPC_nogroup:
C = new (Context) OMPNogroupClause();
break;
- case OMPC_unified_address:
+ case llvm::omp::OMPC_unified_address:
C = new (Context) OMPUnifiedAddressClause();
break;
- case OMPC_unified_shared_memory:
+ case llvm::omp::OMPC_unified_shared_memory:
C = new (Context) OMPUnifiedSharedMemoryClause();
break;
- case OMPC_reverse_offload:
+ case llvm::omp::OMPC_reverse_offload:
C = new (Context) OMPReverseOffloadClause();
break;
- case OMPC_dynamic_allocators:
+ case llvm::omp::OMPC_dynamic_allocators:
C = new (Context) OMPDynamicAllocatorsClause();
break;
- case OMPC_atomic_default_mem_order:
+ case llvm::omp::OMPC_atomic_default_mem_order:
C = new (Context) OMPAtomicDefaultMemOrderClause();
break;
- case OMPC_private:
+ case llvm::omp::OMPC_private:
C = OMPPrivateClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_firstprivate:
+ case llvm::omp::OMPC_firstprivate:
C = OMPFirstprivateClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_lastprivate:
+ case llvm::omp::OMPC_lastprivate:
C = OMPLastprivateClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_shared:
+ case llvm::omp::OMPC_shared:
C = OMPSharedClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_reduction:
- C = OMPReductionClause::CreateEmpty(Context, Record.readInt());
+ case llvm::omp::OMPC_reduction: {
+ unsigned N = Record.readInt();
+ auto Modifier = Record.readEnum<OpenMPReductionClauseModifier>();
+ C = OMPReductionClause::CreateEmpty(Context, N, Modifier);
break;
- case OMPC_task_reduction:
+ }
+ case llvm::omp::OMPC_task_reduction:
C = OMPTaskReductionClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_in_reduction:
+ case llvm::omp::OMPC_in_reduction:
C = OMPInReductionClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_linear:
+ case llvm::omp::OMPC_linear:
C = OMPLinearClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_aligned:
+ case llvm::omp::OMPC_aligned:
C = OMPAlignedClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_copyin:
+ case llvm::omp::OMPC_copyin:
C = OMPCopyinClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_copyprivate:
+ case llvm::omp::OMPC_copyprivate:
C = OMPCopyprivateClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_flush:
+ case llvm::omp::OMPC_flush:
C = OMPFlushClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_depend: {
+ case llvm::omp::OMPC_depobj:
+ C = OMPDepobjClause::CreateEmpty(Context);
+ break;
+ case llvm::omp::OMPC_depend: {
unsigned NumVars = Record.readInt();
unsigned NumLoops = Record.readInt();
C = OMPDependClause::CreateEmpty(Context, NumVars, NumLoops);
break;
}
- case OMPC_device:
+ case llvm::omp::OMPC_device:
C = new (Context) OMPDeviceClause();
break;
- case OMPC_map: {
+ case llvm::omp::OMPC_map: {
OMPMappableExprListSizeTy Sizes;
Sizes.NumVars = Record.readInt();
Sizes.NumUniqueDeclarations = Record.readInt();
@@ -11655,31 +11895,31 @@ OMPClause *OMPClauseReader::readClause() {
C = OMPMapClause::CreateEmpty(Context, Sizes);
break;
}
- case OMPC_num_teams:
+ case llvm::omp::OMPC_num_teams:
C = new (Context) OMPNumTeamsClause();
break;
- case OMPC_thread_limit:
+ case llvm::omp::OMPC_thread_limit:
C = new (Context) OMPThreadLimitClause();
break;
- case OMPC_priority:
+ case llvm::omp::OMPC_priority:
C = new (Context) OMPPriorityClause();
break;
- case OMPC_grainsize:
+ case llvm::omp::OMPC_grainsize:
C = new (Context) OMPGrainsizeClause();
break;
- case OMPC_num_tasks:
+ case llvm::omp::OMPC_num_tasks:
C = new (Context) OMPNumTasksClause();
break;
- case OMPC_hint:
+ case llvm::omp::OMPC_hint:
C = new (Context) OMPHintClause();
break;
- case OMPC_dist_schedule:
+ case llvm::omp::OMPC_dist_schedule:
C = new (Context) OMPDistScheduleClause();
break;
- case OMPC_defaultmap:
+ case llvm::omp::OMPC_defaultmap:
C = new (Context) OMPDefaultmapClause();
break;
- case OMPC_to: {
+ case llvm::omp::OMPC_to: {
OMPMappableExprListSizeTy Sizes;
Sizes.NumVars = Record.readInt();
Sizes.NumUniqueDeclarations = Record.readInt();
@@ -11688,7 +11928,7 @@ OMPClause *OMPClauseReader::readClause() {
C = OMPToClause::CreateEmpty(Context, Sizes);
break;
}
- case OMPC_from: {
+ case llvm::omp::OMPC_from: {
OMPMappableExprListSizeTy Sizes;
Sizes.NumVars = Record.readInt();
Sizes.NumUniqueDeclarations = Record.readInt();
@@ -11697,7 +11937,7 @@ OMPClause *OMPClauseReader::readClause() {
C = OMPFromClause::CreateEmpty(Context, Sizes);
break;
}
- case OMPC_use_device_ptr: {
+ case llvm::omp::OMPC_use_device_ptr: {
OMPMappableExprListSizeTy Sizes;
Sizes.NumVars = Record.readInt();
Sizes.NumUniqueDeclarations = Record.readInt();
@@ -11706,7 +11946,16 @@ OMPClause *OMPClauseReader::readClause() {
C = OMPUseDevicePtrClause::CreateEmpty(Context, Sizes);
break;
}
- case OMPC_is_device_ptr: {
+ case llvm::omp::OMPC_use_device_addr: {
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Record.readInt();
+ Sizes.NumUniqueDeclarations = Record.readInt();
+ Sizes.NumComponentLists = Record.readInt();
+ Sizes.NumComponents = Record.readInt();
+ C = OMPUseDeviceAddrClause::CreateEmpty(Context, Sizes);
+ break;
+ }
+ case llvm::omp::OMPC_is_device_ptr: {
OMPMappableExprListSizeTy Sizes;
Sizes.NumVars = Record.readInt();
Sizes.NumUniqueDeclarations = Record.readInt();
@@ -11715,12 +11964,39 @@ OMPClause *OMPClauseReader::readClause() {
C = OMPIsDevicePtrClause::CreateEmpty(Context, Sizes);
break;
}
- case OMPC_allocate:
+ case llvm::omp::OMPC_allocate:
C = OMPAllocateClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_nontemporal:
+ case llvm::omp::OMPC_nontemporal:
C = OMPNontemporalClause::CreateEmpty(Context, Record.readInt());
break;
+ case llvm::omp::OMPC_inclusive:
+ C = OMPInclusiveClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case llvm::omp::OMPC_exclusive:
+ C = OMPExclusiveClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case llvm::omp::OMPC_order:
+ C = new (Context) OMPOrderClause();
+ break;
+ case llvm::omp::OMPC_destroy:
+ C = new (Context) OMPDestroyClause();
+ break;
+ case llvm::omp::OMPC_detach:
+ C = new (Context) OMPDetachClause();
+ break;
+ case llvm::omp::OMPC_uses_allocators:
+ C = OMPUsesAllocatorsClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case llvm::omp::OMPC_affinity:
+ C = OMPAffinityClause::CreateEmpty(Context, Record.readInt());
+ break;
+#define OMP_CLAUSE_NO_CLASS(Enum, Str) \
+ case llvm::omp::Enum: \
+ break;
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+ default:
+ break;
}
assert(C && "Unknown OMPClause type");
@@ -11783,8 +12059,7 @@ void OMPClauseReader::VisitOMPCollapseClause(OMPCollapseClause *C) {
}
void OMPClauseReader::VisitOMPDefaultClause(OMPDefaultClause *C) {
- C->setDefaultKind(
- static_cast<OpenMPDefaultClauseKind>(Record.readInt()));
+ C->setDefaultKind(static_cast<llvm::omp::DefaultKind>(Record.readInt()));
C->setLParenLoc(Record.readSourceLocation());
C->setDefaultKindKwLoc(Record.readSourceLocation());
}
@@ -11820,6 +12095,11 @@ void OMPClauseReader::VisitOMPOrderedClause(OMPOrderedClause *C) {
C->setLParenLoc(Record.readSourceLocation());
}
+void OMPClauseReader::VisitOMPDetachClause(OMPDetachClause *C) {
+ C->setEventHandler(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
void OMPClauseReader::VisitOMPNowaitClause(OMPNowaitClause *) {}
void OMPClauseReader::VisitOMPUntiedClause(OMPUntiedClause *) {}
@@ -11830,18 +12110,34 @@ void OMPClauseReader::VisitOMPReadClause(OMPReadClause *) {}
void OMPClauseReader::VisitOMPWriteClause(OMPWriteClause *) {}
-void OMPClauseReader::VisitOMPUpdateClause(OMPUpdateClause *) {}
+void OMPClauseReader::VisitOMPUpdateClause(OMPUpdateClause *C) {
+ if (C->isExtended()) {
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setArgumentLoc(Record.readSourceLocation());
+ C->setDependencyKind(Record.readEnum<OpenMPDependClauseKind>());
+ }
+}
void OMPClauseReader::VisitOMPCaptureClause(OMPCaptureClause *) {}
void OMPClauseReader::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
+void OMPClauseReader::VisitOMPAcqRelClause(OMPAcqRelClause *) {}
+
+void OMPClauseReader::VisitOMPAcquireClause(OMPAcquireClause *) {}
+
+void OMPClauseReader::VisitOMPReleaseClause(OMPReleaseClause *) {}
+
+void OMPClauseReader::VisitOMPRelaxedClause(OMPRelaxedClause *) {}
+
void OMPClauseReader::VisitOMPThreadsClause(OMPThreadsClause *) {}
void OMPClauseReader::VisitOMPSIMDClause(OMPSIMDClause *) {}
void OMPClauseReader::VisitOMPNogroupClause(OMPNogroupClause *) {}
+void OMPClauseReader::VisitOMPDestroyClause(OMPDestroyClause *) {}
+
void OMPClauseReader::VisitOMPUnifiedAddressClause(OMPUnifiedAddressClause *) {}
void OMPClauseReader::VisitOMPUnifiedSharedMemoryClause(
@@ -11937,6 +12233,7 @@ void OMPClauseReader::VisitOMPSharedClause(OMPSharedClause *C) {
void OMPClauseReader::VisitOMPReductionClause(OMPReductionClause *C) {
VisitOMPClauseWithPostUpdate(C);
C->setLParenLoc(Record.readSourceLocation());
+ C->setModifierLoc(Record.readSourceLocation());
C->setColonLoc(Record.readSourceLocation());
NestedNameSpecifierLoc NNSL = Record.readNestedNameSpecifierLoc();
DeclarationNameInfo DNI = Record.readDeclarationNameInfo();
@@ -11965,6 +12262,20 @@ void OMPClauseReader::VisitOMPReductionClause(OMPReductionClause *C) {
for (unsigned i = 0; i != NumVars; ++i)
Vars.push_back(Record.readSubExpr());
C->setReductionOps(Vars);
+ if (C->getModifier() == OMPC_REDUCTION_inscan) {
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setInscanCopyOps(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setInscanCopyArrayTemps(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setInscanCopyArrayElems(Vars);
+ }
}
void OMPClauseReader::VisitOMPTaskReductionClause(OMPTaskReductionClause *C) {
@@ -12139,8 +12450,14 @@ void OMPClauseReader::VisitOMPFlushClause(OMPFlushClause *C) {
C->setVarRefs(Vars);
}
+void OMPClauseReader::VisitOMPDepobjClause(OMPDepobjClause *C) {
+ C->setDepobj(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
void OMPClauseReader::VisitOMPDependClause(OMPDependClause *C) {
C->setLParenLoc(Record.readSourceLocation());
+ C->setModifier(Record.readSubExpr());
C->setDependencyKind(
static_cast<OpenMPDependClauseKind>(Record.readInt()));
C->setDependencyLoc(Record.readSourceLocation());
@@ -12157,13 +12474,15 @@ void OMPClauseReader::VisitOMPDependClause(OMPDependClause *C) {
void OMPClauseReader::VisitOMPDeviceClause(OMPDeviceClause *C) {
VisitOMPClauseWithPreInit(C);
+ C->setModifier(Record.readEnum<OpenMPDeviceClauseModifier>());
C->setDevice(Record.readSubExpr());
+ C->setModifierLoc(Record.readSourceLocation());
C->setLParenLoc(Record.readSourceLocation());
}
void OMPClauseReader::VisitOMPMapClause(OMPMapClause *C) {
C->setLParenLoc(Record.readSourceLocation());
- for (unsigned I = 0; I < OMPMapClause::NumberOfModifiers; ++I) {
+ for (unsigned I = 0; I < NumberOfOMPMapClauseModifiers; ++I) {
C->setMapTypeModifier(
I, static_cast<OpenMPMapModifierKind>(Record.readInt()));
C->setMapTypeModifierLoc(I, Record.readSourceLocation());
@@ -12437,6 +12756,48 @@ void OMPClauseReader::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *C) {
C->setComponents(Components, ListSizes);
}
+void OMPClauseReader::VisitOMPUseDeviceAddrClause(OMPUseDeviceAddrClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ auto NumVars = C->varlist_size();
+ auto UniqueDecls = C->getUniqueDeclarationsNum();
+ auto TotalLists = C->getTotalComponentListNum();
+ auto TotalComponents = C->getTotalComponentsNum();
+
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+
+ SmallVector<ValueDecl *, 16> Decls;
+ Decls.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ Decls.push_back(Record.readDeclAs<ValueDecl>());
+ C->setUniqueDecls(Decls);
+
+ SmallVector<unsigned, 16> ListsPerDecl;
+ ListsPerDecl.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ ListsPerDecl.push_back(Record.readInt());
+ C->setDeclNumLists(ListsPerDecl);
+
+ SmallVector<unsigned, 32> ListSizes;
+ ListSizes.reserve(TotalLists);
+ for (unsigned i = 0; i < TotalLists; ++i)
+ ListSizes.push_back(Record.readInt());
+ C->setComponentListSizes(ListSizes);
+
+ SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
+ Components.reserve(TotalComponents);
+ for (unsigned i = 0; i < TotalComponents; ++i) {
+ Expr *AssociatedExpr = Record.readSubExpr();
+ auto *AssociatedDecl = Record.readDeclAs<ValueDecl>();
+ Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
+ AssociatedExpr, AssociatedDecl));
+ }
+ C->setComponents(Components, ListSizes);
+}
+
void OMPClauseReader::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
C->setLParenLoc(Record.readSourceLocation());
auto NumVars = C->varlist_size();
@@ -12494,3 +12855,75 @@ void OMPClauseReader::VisitOMPNontemporalClause(OMPNontemporalClause *C) {
Vars.push_back(Record.readSubExpr());
C->setPrivateRefs(Vars);
}
+
+void OMPClauseReader::VisitOMPInclusiveClause(OMPInclusiveClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+}
+
+void OMPClauseReader::VisitOMPExclusiveClause(OMPExclusiveClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+}
+
+void OMPClauseReader::VisitOMPUsesAllocatorsClause(OMPUsesAllocatorsClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ unsigned NumOfAllocators = C->getNumberOfAllocators();
+ SmallVector<OMPUsesAllocatorsClause::Data, 4> Data;
+ Data.reserve(NumOfAllocators);
+ for (unsigned I = 0; I != NumOfAllocators; ++I) {
+ OMPUsesAllocatorsClause::Data &D = Data.emplace_back();
+ D.Allocator = Record.readSubExpr();
+ D.AllocatorTraits = Record.readSubExpr();
+ D.LParenLoc = Record.readSourceLocation();
+ D.RParenLoc = Record.readSourceLocation();
+ }
+ C->setAllocatorsData(Data);
+}
+
+void OMPClauseReader::VisitOMPAffinityClause(OMPAffinityClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setModifier(Record.readSubExpr());
+ C->setColonLoc(Record.readSourceLocation());
+ unsigned NumOfLocators = C->varlist_size();
+ SmallVector<Expr *, 4> Locators;
+ Locators.reserve(NumOfLocators);
+ for (unsigned I = 0; I != NumOfLocators; ++I)
+ Locators.push_back(Record.readSubExpr());
+ C->setVarRefs(Locators);
+}
+
+void OMPClauseReader::VisitOMPOrderClause(OMPOrderClause *C) {
+ C->setKind(Record.readEnum<OpenMPOrderClauseKind>());
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setKindKwLoc(Record.readSourceLocation());
+}
+
+OMPTraitInfo *ASTRecordReader::readOMPTraitInfo() {
+ OMPTraitInfo &TI = getContext().getNewOMPTraitInfo();
+ TI.Sets.resize(readUInt32());
+ for (auto &Set : TI.Sets) {
+ Set.Kind = readEnum<llvm::omp::TraitSet>();
+ Set.Selectors.resize(readUInt32());
+ for (auto &Selector : Set.Selectors) {
+ Selector.Kind = readEnum<llvm::omp::TraitSelector>();
+ Selector.ScoreOrCondition = nullptr;
+ if (readBool())
+ Selector.ScoreOrCondition = readExprRef();
+ Selector.Properties.resize(readUInt32());
+ for (auto &Property : Selector.Properties)
+ Property.Kind = readEnum<llvm::omp::TraitProperty>();
+ }
+ }
+ return &TI;
+}
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
index e3eea3c6f860..eef4ab16ec15 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -365,6 +365,7 @@ namespace clang {
void VisitCXXConversionDecl(CXXConversionDecl *D);
void VisitFieldDecl(FieldDecl *FD);
void VisitMSPropertyDecl(MSPropertyDecl *FD);
+ void VisitMSGuidDecl(MSGuidDecl *D);
void VisitIndirectFieldDecl(IndirectFieldDecl *FD);
RedeclarableResult VisitVarDeclImpl(VarDecl *D);
void VisitVarDecl(VarDecl *VD) { VisitVarDeclImpl(VD); }
@@ -502,8 +503,12 @@ uint64_t ASTDeclReader::GetCurrentCursorOffset() {
}
void ASTDeclReader::ReadFunctionDefinition(FunctionDecl *FD) {
- if (Record.readInt())
+ if (Record.readInt()) {
Reader.DefinitionSource[FD] = Loc.F->Kind == ModuleKind::MK_MainFile;
+ if (Reader.getContext().getLangOpts().BuildingPCHWithObjectFile &&
+ Reader.DeclIsFromPCHWithObjectFile(FD))
+ Reader.DefinitionSource[FD] = true;
+ }
if (auto *CD = dyn_cast<CXXConstructorDecl>(FD)) {
CD->setNumCtorInitializers(Record.readInt());
if (CD->getNumCtorInitializers())
@@ -1279,10 +1284,9 @@ void ASTDeclReader::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
QualType T = Record.readType();
TypeSourceInfo *TSI = readTypeSourceInfo();
D->setType(T, TSI);
- D->setPropertyAttributes(
- (ObjCPropertyDecl::PropertyAttributeKind)Record.readInt());
+ D->setPropertyAttributes((ObjCPropertyAttribute::Kind)Record.readInt());
D->setPropertyAttributesAsWritten(
- (ObjCPropertyDecl::PropertyAttributeKind)Record.readInt());
+ (ObjCPropertyAttribute::Kind)Record.readInt());
D->setPropertyImplementation(
(ObjCPropertyDecl::PropertyControl)Record.readInt());
DeclarationName GetterName = Record.readDeclarationName();
@@ -1358,6 +1362,19 @@ void ASTDeclReader::VisitMSPropertyDecl(MSPropertyDecl *PD) {
PD->SetterId = Record.readIdentifier();
}
+void ASTDeclReader::VisitMSGuidDecl(MSGuidDecl *D) {
+ VisitValueDecl(D);
+ D->PartVal.Part1 = Record.readInt();
+ D->PartVal.Part2 = Record.readInt();
+ D->PartVal.Part3 = Record.readInt();
+ for (auto &C : D->PartVal.Part4And5)
+ C = Record.readInt();
+
+ // Add this GUID to the AST context's lookup structure, and merge if needed.
+ if (MSGuidDecl *Existing = Reader.getContext().MSGuidDecls.GetOrInsertNode(D))
+ Reader.getContext().setPrimaryMergedDecl(D, Existing->getCanonicalDecl());
+}
+
void ASTDeclReader::VisitIndirectFieldDecl(IndirectFieldDecl *FD) {
VisitValueDecl(FD);
@@ -1418,8 +1435,12 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
Reader.getContext().setBlockVarCopyInit(VD, CopyExpr, Record.readInt());
}
- if (VD->getStorageDuration() == SD_Static && Record.readInt())
+ if (VD->getStorageDuration() == SD_Static && Record.readInt()) {
Reader.DefinitionSource[VD] = Loc.F->Kind == ModuleKind::MK_MainFile;
+ if (Reader.getContext().getLangOpts().BuildingPCHWithObjectFile &&
+ Reader.DeclIsFromPCHWithObjectFile(VD))
+ Reader.DefinitionSource[VD] = true;
+ }
enum VarKind {
VarNotTemplate = 0, VarTemplate, StaticDataMemberSpecialization
@@ -1678,8 +1699,12 @@ void ASTDeclReader::ReadCXXDefinitionData(
Data.ODRHash = Record.readInt();
Data.HasODRHash = true;
- if (Record.readInt())
+ if (Record.readInt()) {
Reader.DefinitionSource[D] = Loc.F->Kind == ModuleKind::MK_MainFile;
+ if (Reader.getContext().getLangOpts().BuildingPCHWithObjectFile &&
+ Reader.DeclIsFromPCHWithObjectFile(D))
+ Reader.DefinitionSource[D] = true;
+ }
Data.NumBases = Record.readInt();
if (Data.NumBases)
@@ -1968,8 +1993,8 @@ void ASTDeclReader::VisitCXXConversionDecl(CXXConversionDecl *D) {
void ASTDeclReader::VisitImportDecl(ImportDecl *D) {
VisitDecl(D);
- D->ImportedAndComplete.setPointer(readModule());
- D->ImportedAndComplete.setInt(Record.readInt());
+ D->ImportedModule = readModule();
+ D->setImportComplete(Record.readInt());
auto *StoredLocs = D->getTrailingObjects<SourceLocation>();
for (unsigned I = 0, N = Record.back(); I != N; ++I)
StoredLocs[I] = readSourceLocation();
@@ -2744,6 +2769,8 @@ public:
return Reader.readVersionTuple();
}
+ OMPTraitInfo *readOMPTraitInfo() { return Reader.readOMPTraitInfo(); }
+
template <typename T> T *GetLocalDeclAs(uint32_t LocalID) {
return Reader.GetLocalDeclAs<T>(LocalID);
}
@@ -2828,7 +2855,8 @@ static bool isConsumerInterestedIn(ASTContext &Ctx, Decl *D, bool HasBody) {
isa<PragmaDetectMismatchDecl>(D))
return true;
if (isa<OMPThreadPrivateDecl>(D) || isa<OMPDeclareReductionDecl>(D) ||
- isa<OMPDeclareMapperDecl>(D) || isa<OMPAllocateDecl>(D))
+ isa<OMPDeclareMapperDecl>(D) || isa<OMPAllocateDecl>(D) ||
+ isa<OMPRequiresDecl>(D))
return !D->getDeclContext()->isFunctionOrMethod();
if (const auto *Var = dyn_cast<VarDecl>(D))
return Var->isFileVarDecl() &&
@@ -2853,7 +2881,7 @@ ASTReader::DeclCursorForID(DeclID ID, SourceLocation &Loc) {
const DeclOffset &DOffs =
M->DeclOffsets[ID - M->BaseDeclID - NUM_PREDEF_DECL_IDS];
Loc = TranslateSourceLocation(*M, DOffs.getLocation());
- return RecordLocation(M, DOffs.BitOffset);
+ return RecordLocation(M, DOffs.getBitOffset(M->DeclsBlockStartOffset));
}
ASTReader::RecordLocation ASTReader::getLocalBitOffset(uint64_t GlobalOffset) {
@@ -2863,7 +2891,7 @@ ASTReader::RecordLocation ASTReader::getLocalBitOffset(uint64_t GlobalOffset) {
return RecordLocation(I->second, GlobalOffset - I->second->GlobalBitOffset);
}
-uint64_t ASTReader::getGlobalBitOffset(ModuleFile &M, uint32_t LocalOffset) {
+uint64_t ASTReader::getGlobalBitOffset(ModuleFile &M, uint64_t LocalOffset) {
return LocalOffset + M.GlobalBitOffset;
}
@@ -3963,6 +3991,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_MS_PROPERTY:
D = MSPropertyDecl::CreateDeserialized(Context, ID);
break;
+ case DECL_MS_GUID:
+ D = MSGuidDecl::CreateDeserialized(Context, ID);
+ break;
case DECL_CAPTURED:
D = CapturedDecl::CreateDeserialized(Context, ID, Record.readInt());
break;
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp
index c38b3ad18467..a40c5499a6d7 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Serialization/ASTRecordReader.h"
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/AttrIterator.h"
@@ -22,6 +21,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
@@ -49,6 +49,8 @@
#include "clang/Basic/TypeTraits.h"
#include "clang/Lex/Token.h"
#include "clang/Serialization/ASTBitCodes.h"
+#include "clang/Serialization/ASTRecordReader.h"
+#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
@@ -101,11 +103,12 @@ namespace clang {
/// The number of record fields required for the Stmt class
/// itself.
- static const unsigned NumStmtFields = 1;
+ static const unsigned NumStmtFields = 0;
/// The number of record fields required for the Expr class
/// itself.
- static const unsigned NumExprFields = NumStmtFields + 7;
+ static const unsigned NumExprFields =
+ NumStmtFields + llvm::BitWidth<ExprDependence> + 3;
/// Read and initialize a ExplicitTemplateArgumentList structure.
void ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
@@ -137,7 +140,6 @@ void ASTStmtReader::ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
}
void ASTStmtReader::VisitStmt(Stmt *S) {
- S->setIsOMPStructuredBlock(Record.readInt());
assert(Record.getIdx() == NumStmtFields && "Incorrect statement field count");
}
@@ -269,6 +271,8 @@ void ASTStmtReader::VisitWhileStmt(WhileStmt *S) {
S->setConditionVariable(Record.getContext(), readDeclAs<VarDecl>());
S->setWhileLoc(readSourceLocation());
+ S->setLParenLoc(readSourceLocation());
+ S->setRParenLoc(readSourceLocation());
}
void ASTStmtReader::VisitDoStmt(DoStmt *S) {
@@ -511,10 +515,26 @@ void ASTStmtReader::VisitCapturedStmt(CapturedStmt *S) {
void ASTStmtReader::VisitExpr(Expr *E) {
VisitStmt(E);
E->setType(Record.readType());
- E->setTypeDependent(Record.readInt());
- E->setValueDependent(Record.readInt());
- E->setInstantiationDependent(Record.readInt());
- E->ExprBits.ContainsUnexpandedParameterPack = Record.readInt();
+
+ // FIXME: write and read all DependentFlags with a single call.
+ bool TypeDependent = Record.readInt();
+ bool ValueDependent = Record.readInt();
+ bool InstantiationDependent = Record.readInt();
+ bool ContainsUnexpandedTemplateParameters = Record.readInt();
+ bool ContainsErrors = Record.readInt();
+ auto Deps = ExprDependence::None;
+ if (TypeDependent)
+ Deps |= ExprDependence::Type;
+ if (ValueDependent)
+ Deps |= ExprDependence::Value;
+ if (InstantiationDependent)
+ Deps |= ExprDependence::Instantiation;
+ if (ContainsUnexpandedTemplateParameters)
+ Deps |= ExprDependence::UnexpandedPack;
+ if (ContainsErrors)
+ Deps |= ExprDependence::Error;
+ E->setDependence(Deps);
+
E->setValueKind(static_cast<ExprValueKind>(Record.readInt()));
E->setObjectKind(static_cast<ExprObjectKind>(Record.readInt()));
assert(Record.getIdx() == NumExprFields &&
@@ -523,18 +543,35 @@ void ASTStmtReader::VisitExpr(Expr *E) {
void ASTStmtReader::VisitConstantExpr(ConstantExpr *E) {
VisitExpr(E);
- E->ConstantExprBits.ResultKind = Record.readInt();
- switch (E->ConstantExprBits.ResultKind) {
- case ConstantExpr::RSK_Int64: {
+
+ auto StorageKind = Record.readInt();
+ assert(E->ConstantExprBits.ResultKind == StorageKind && "Wrong ResultKind!");
+
+ E->ConstantExprBits.APValueKind = Record.readInt();
+ E->ConstantExprBits.IsUnsigned = Record.readInt();
+ E->ConstantExprBits.BitWidth = Record.readInt();
+ E->ConstantExprBits.HasCleanup = false; // Not serialized, see below.
+ E->ConstantExprBits.IsImmediateInvocation = Record.readInt();
+
+ switch (StorageKind) {
+ case ConstantExpr::RSK_None:
+ break;
+
+ case ConstantExpr::RSK_Int64:
E->Int64Result() = Record.readInt();
- uint64_t tmp = Record.readInt();
- E->ConstantExprBits.IsUnsigned = tmp & 0x1;
- E->ConstantExprBits.BitWidth = tmp >> 1;
break;
- }
+
case ConstantExpr::RSK_APValue:
E->APValueResult() = Record.readAPValue();
+ if (E->APValueResult().needsCleanup()) {
+ E->ConstantExprBits.HasCleanup = true;
+ Record.getContext().addDestruction(&E->APValueResult());
+ }
+ break;
+ default:
+ llvm_unreachable("unexpected ResultKind!");
}
+
E->setSubExpr(Record.readSubExpr());
}
@@ -587,6 +624,7 @@ void ASTStmtReader::VisitIntegerLiteral(IntegerLiteral *E) {
void ASTStmtReader::VisitFixedPointLiteral(FixedPointLiteral *E) {
VisitExpr(E);
E->setLocation(readSourceLocation());
+ E->setScale(Record.readInt());
E->setValue(Record.getContext(), Record.readAPInt());
}
@@ -663,10 +701,14 @@ void ASTStmtReader::VisitParenListExpr(ParenListExpr *E) {
void ASTStmtReader::VisitUnaryOperator(UnaryOperator *E) {
VisitExpr(E);
+ bool hasFP_Features = Record.readInt();
+ assert(hasFP_Features == E->hasStoredFPFeatures());
E->setSubExpr(Record.readSubExpr());
E->setOpcode((UnaryOperator::Opcode)Record.readInt());
E->setOperatorLoc(readSourceLocation());
E->setCanOverflow(Record.readInt());
+ if (hasFP_Features)
+ E->setStoredFPFeatures(FPOptionsOverride(Record.readInt()));
}
void ASTStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) {
@@ -732,7 +774,7 @@ readConstraintSatisfaction(ASTRecordReader &Record) {
unsigned NumDetailRecords = Record.readInt();
for (unsigned i = 0; i != NumDetailRecords; ++i) {
Expr *ConstraintExpr = Record.readExpr();
- if (bool IsDiagnostic = Record.readInt()) {
+ if (/* IsDiagnostic */Record.readInt()) {
SourceLocation DiagLocation = Record.readSourceLocation();
std::string DiagMessage = Record.readString();
Satisfaction.Details.emplace_back(
@@ -823,7 +865,7 @@ void ASTStmtReader::VisitRequiresExpr(RequiresExpr *E) {
Req.emplace();
} else {
NoexceptLoc = Record.readSourceLocation();
- switch (auto returnTypeRequirementKind = Record.readInt()) {
+ switch (/* returnTypeRequirementKind */Record.readInt()) {
case 0:
// No return type requirement.
Req.emplace();
@@ -854,7 +896,7 @@ void ASTStmtReader::VisitRequiresExpr(RequiresExpr *E) {
std::move(*Req));
} break;
case concepts::Requirement::RK_Nested: {
- if (bool IsSubstitutionDiagnostic = Record.readInt()) {
+ if (/* IsSubstitutionDiagnostic */Record.readInt()) {
R = new (Record.getContext()) concepts::NestedRequirement(
readSubstitutionDiagnostic(Record));
break;
@@ -884,15 +926,68 @@ void ASTStmtReader::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
E->setRBracketLoc(readSourceLocation());
}
+void ASTStmtReader::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
+ VisitExpr(E);
+ E->setBase(Record.readSubExpr());
+ E->setRowIdx(Record.readSubExpr());
+ E->setColumnIdx(Record.readSubExpr());
+ E->setRBracketLoc(readSourceLocation());
+}
+
void ASTStmtReader::VisitOMPArraySectionExpr(OMPArraySectionExpr *E) {
VisitExpr(E);
E->setBase(Record.readSubExpr());
E->setLowerBound(Record.readSubExpr());
E->setLength(Record.readSubExpr());
- E->setColonLoc(readSourceLocation());
+ E->setStride(Record.readSubExpr());
+ E->setColonLocFirst(readSourceLocation());
+ E->setColonLocSecond(readSourceLocation());
E->setRBracketLoc(readSourceLocation());
}
+void ASTStmtReader::VisitOMPArrayShapingExpr(OMPArrayShapingExpr *E) {
+ VisitExpr(E);
+ unsigned NumDims = Record.readInt();
+ E->setBase(Record.readSubExpr());
+ SmallVector<Expr *, 4> Dims(NumDims);
+ for (unsigned I = 0; I < NumDims; ++I)
+ Dims[I] = Record.readSubExpr();
+ E->setDimensions(Dims);
+ SmallVector<SourceRange, 4> SRs(NumDims);
+ for (unsigned I = 0; I < NumDims; ++I)
+ SRs[I] = readSourceRange();
+ E->setBracketsRanges(SRs);
+ E->setLParenLoc(readSourceLocation());
+ E->setRParenLoc(readSourceLocation());
+}
+
+void ASTStmtReader::VisitOMPIteratorExpr(OMPIteratorExpr *E) {
+ VisitExpr(E);
+ unsigned NumIters = Record.readInt();
+ E->setIteratorKwLoc(readSourceLocation());
+ E->setLParenLoc(readSourceLocation());
+ E->setRParenLoc(readSourceLocation());
+ for (unsigned I = 0; I < NumIters; ++I) {
+ E->setIteratorDeclaration(I, Record.readDeclRef());
+ E->setAssignmentLoc(I, readSourceLocation());
+ Expr *Begin = Record.readSubExpr();
+ Expr *End = Record.readSubExpr();
+ Expr *Step = Record.readSubExpr();
+ SourceLocation ColonLoc = readSourceLocation();
+ SourceLocation SecColonLoc;
+ if (Step)
+ SecColonLoc = readSourceLocation();
+ E->setIteratorRange(I, Begin, ColonLoc, End, SecColonLoc, Step);
+ // Deserialize helpers
+ OMPIteratorHelperData HD;
+ HD.CounterVD = cast_or_null<VarDecl>(Record.readDeclRef());
+ HD.Upper = Record.readSubExpr();
+ HD.Update = Record.readSubExpr();
+ HD.CounterUpdate = Record.readSubExpr();
+ E->setHelper(I, HD);
+ }
+}
+
void ASTStmtReader::VisitCallExpr(CallExpr *E) {
VisitExpr(E);
unsigned NumArgs = Record.readInt();
@@ -989,12 +1084,16 @@ void ASTStmtReader::VisitCastExpr(CastExpr *E) {
}
void ASTStmtReader::VisitBinaryOperator(BinaryOperator *E) {
+ bool hasFP_Features;
+ BinaryOperator::Opcode opc;
VisitExpr(E);
+ E->setHasStoredFPFeatures(hasFP_Features = Record.readInt());
+ E->setOpcode(opc = (BinaryOperator::Opcode)Record.readInt());
E->setLHS(Record.readSubExpr());
E->setRHS(Record.readSubExpr());
- E->setOpcode((BinaryOperator::Opcode)Record.readInt());
E->setOperatorLoc(readSourceLocation());
- E->setFPFeatures(FPOptions(Record.readInt()));
+ if (hasFP_Features)
+ E->setStoredFPFeatures(FPOptionsOverride(Record.readInt()));
}
void ASTStmtReader::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
@@ -1562,8 +1661,8 @@ void ASTStmtReader::VisitMSDependentExistsStmt(MSDependentExistsStmt *S) {
void ASTStmtReader::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
VisitCallExpr(E);
E->CXXOperatorCallExprBits.OperatorKind = Record.readInt();
- E->CXXOperatorCallExprBits.FPFeatures = Record.readInt();
E->Range = Record.readSourceRange();
+ E->setFPFeatures(FPOptionsOverride(Record.readInt()));
}
void ASTStmtReader::VisitCXXRewrittenBinaryOperator(
@@ -1609,19 +1708,23 @@ void ASTStmtReader::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
void ASTStmtReader::VisitLambdaExpr(LambdaExpr *E) {
VisitExpr(E);
unsigned NumCaptures = Record.readInt();
- assert(NumCaptures == E->NumCaptures);(void)NumCaptures;
+ (void)NumCaptures;
+ assert(NumCaptures == E->LambdaExprBits.NumCaptures);
E->IntroducerRange = readSourceRange();
- E->CaptureDefault = static_cast<LambdaCaptureDefault>(Record.readInt());
+ E->LambdaExprBits.CaptureDefault = Record.readInt();
E->CaptureDefaultLoc = readSourceLocation();
- E->ExplicitParams = Record.readInt();
- E->ExplicitResultType = Record.readInt();
+ E->LambdaExprBits.ExplicitParams = Record.readInt();
+ E->LambdaExprBits.ExplicitResultType = Record.readInt();
E->ClosingBrace = readSourceLocation();
// Read capture initializers.
for (LambdaExpr::capture_init_iterator C = E->capture_init_begin(),
- CEnd = E->capture_init_end();
+ CEnd = E->capture_init_end();
C != CEnd; ++C)
*C = Record.readSubExpr();
+
+ // The body will be lazily deserialized when needed from the call operator
+ // declaration.
}
void
@@ -1651,6 +1754,10 @@ void ASTStmtReader::VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *E) {
return VisitCXXNamedCastExpr(E);
}
+void ASTStmtReader::VisitCXXAddrspaceCastExpr(CXXAddrspaceCastExpr *E) {
+ return VisitCXXNamedCastExpr(E);
+}
+
void ASTStmtReader::VisitCXXConstCastExpr(CXXConstCastExpr *E) {
return VisitCXXNamedCastExpr(E);
}
@@ -1686,14 +1793,10 @@ void ASTStmtReader::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) {
void ASTStmtReader::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
VisitExpr(E);
E->setSourceRange(readSourceRange());
- if (E->isTypeOperand()) { // typeid(int)
- E->setTypeOperandSourceInfo(
- readTypeSourceInfo());
- return;
- }
-
- // typeid(42+2)
- E->setExprOperand(Record.readSubExpr());
+ if (E->isTypeOperand())
+ E->Operand = readTypeSourceInfo();
+ else
+ E->Operand = Record.readSubExpr();
}
void ASTStmtReader::VisitCXXThisExpr(CXXThisExpr *E) {
@@ -1806,9 +1909,17 @@ void ASTStmtReader::VisitExprWithCleanups(ExprWithCleanups *E) {
unsigned NumObjects = Record.readInt();
assert(NumObjects == E->getNumObjects());
- for (unsigned i = 0; i != NumObjects; ++i)
- E->getTrailingObjects<BlockDecl *>()[i] =
- readDeclAs<BlockDecl>();
+ for (unsigned i = 0; i != NumObjects; ++i) {
+ unsigned CleanupKind = Record.readInt();
+ ExprWithCleanups::CleanupObject Obj;
+ if (CleanupKind == COK_Block)
+ Obj = readDeclAs<BlockDecl>();
+ else if (CleanupKind == COK_CompoundLiteral)
+ Obj = cast<CompoundLiteralExpr>(Record.readSubExpr());
+ else
+ llvm_unreachable("unexpected cleanup object type");
+ E->getTrailingObjects<ExprWithCleanups::CleanupObject>()[i] = Obj;
+ }
E->ExprWithCleanupsBits.CleanupsHaveSideEffects = Record.readInt();
E->SubExpr = Record.readSubExpr();
@@ -2055,6 +2166,19 @@ void ASTStmtReader::VisitTypoExpr(TypoExpr *E) {
llvm_unreachable("Cannot read TypoExpr nodes");
}
+void ASTStmtReader::VisitRecoveryExpr(RecoveryExpr *E) {
+ VisitExpr(E);
+ unsigned NumArgs = Record.readInt();
+ E->BeginLoc = readSourceLocation();
+ E->EndLoc = readSourceLocation();
+ assert(
+ (NumArgs == std::distance(E->children().begin(), E->children().end())) &&
+ "Wrong NumArgs!");
+ (void)NumArgs;
+ for (Stmt *&Child : E->children())
+ Child = Record.readSubStmt();
+}
+
//===----------------------------------------------------------------------===//
// Microsoft Expressions and Statements
//===----------------------------------------------------------------------===//
@@ -2077,16 +2201,11 @@ void ASTStmtReader::VisitMSPropertySubscriptExpr(MSPropertySubscriptExpr *E) {
void ASTStmtReader::VisitCXXUuidofExpr(CXXUuidofExpr *E) {
VisitExpr(E);
E->setSourceRange(readSourceRange());
- std::string UuidStr = readString();
- E->setUuidStr(StringRef(UuidStr).copy(Record.getContext()));
- if (E->isTypeOperand()) { // __uuidof(ComType)
- E->setTypeOperandSourceInfo(
- readTypeSourceInfo());
- return;
- }
-
- // __uuidof(expr)
- E->setExprOperand(Record.readSubExpr());
+ E->Guid = readDeclAs<MSGuidDecl>();
+ if (E->isTypeOperand())
+ E->Operand = readTypeSourceInfo();
+ else
+ E->Operand = Record.readSubExpr();
}
void ASTStmtReader::VisitSEHLeaveStmt(SEHLeaveStmt *S) {
@@ -2230,6 +2349,7 @@ void ASTStmtReader::VisitOMPParallelDirective(OMPParallelDirective *D) {
// The NumClauses field was read in ReadStmtFromStream.
Record.skipInts(1);
VisitOMPExecutableDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2239,6 +2359,7 @@ void ASTStmtReader::VisitOMPSimdDirective(OMPSimdDirective *D) {
void ASTStmtReader::VisitOMPForDirective(OMPForDirective *D) {
VisitOMPLoopDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2251,6 +2372,7 @@ void ASTStmtReader::VisitOMPSectionsDirective(OMPSectionsDirective *D) {
// The NumClauses field was read in ReadStmtFromStream.
Record.skipInts(1);
VisitOMPExecutableDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2282,6 +2404,7 @@ void ASTStmtReader::VisitOMPCriticalDirective(OMPCriticalDirective *D) {
void ASTStmtReader::VisitOMPParallelForDirective(OMPParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2296,6 +2419,7 @@ void ASTStmtReader::VisitOMPParallelMasterDirective(
// The NumClauses field was read in ReadStmtFromStream.
Record.skipInts(1);
VisitOMPExecutableDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
}
void ASTStmtReader::VisitOMPParallelSectionsDirective(
@@ -2304,6 +2428,7 @@ void ASTStmtReader::VisitOMPParallelSectionsDirective(
// The NumClauses field was read in ReadStmtFromStream.
Record.skipInts(1);
VisitOMPExecutableDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2345,6 +2470,20 @@ void ASTStmtReader::VisitOMPFlushDirective(OMPFlushDirective *D) {
VisitOMPExecutableDirective(D);
}
+void ASTStmtReader::VisitOMPDepobjDirective(OMPDepobjDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ Record.skipInts(1);
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPScanDirective(OMPScanDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ Record.skipInts(1);
+ VisitOMPExecutableDirective(D);
+}
+
void ASTStmtReader::VisitOMPOrderedDirective(OMPOrderedDirective *D) {
VisitStmt(D);
// The NumClauses field was read in ReadStmtFromStream.
@@ -2397,11 +2536,14 @@ void ASTStmtReader::VisitOMPTargetParallelDirective(
VisitStmt(D);
Record.skipInts(1);
VisitOMPExecutableDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
+ D->setHasCancel(Record.readBool());
}
void ASTStmtReader::VisitOMPTargetParallelForDirective(
OMPTargetParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2429,6 +2571,7 @@ void ASTStmtReader::VisitOMPCancelDirective(OMPCancelDirective *D) {
void ASTStmtReader::VisitOMPTaskLoopDirective(OMPTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
+ D->setHasCancel(Record.readInt());
}
void ASTStmtReader::VisitOMPTaskLoopSimdDirective(OMPTaskLoopSimdDirective *D) {
@@ -2438,6 +2581,7 @@ void ASTStmtReader::VisitOMPTaskLoopSimdDirective(OMPTaskLoopSimdDirective *D) {
void ASTStmtReader::VisitOMPMasterTaskLoopDirective(
OMPMasterTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
+ D->setHasCancel(Record.readInt());
}
void ASTStmtReader::VisitOMPMasterTaskLoopSimdDirective(
@@ -2448,6 +2592,7 @@ void ASTStmtReader::VisitOMPMasterTaskLoopSimdDirective(
void ASTStmtReader::VisitOMPParallelMasterTaskLoopDirective(
OMPParallelMasterTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
+ D->setHasCancel(Record.readInt());
}
void ASTStmtReader::VisitOMPParallelMasterTaskLoopSimdDirective(
@@ -2468,6 +2613,7 @@ void ASTStmtReader::VisitOMPTargetUpdateDirective(OMPTargetUpdateDirective *D) {
void ASTStmtReader::VisitOMPDistributeParallelForDirective(
OMPDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2508,6 +2654,7 @@ void ASTStmtReader::VisitOMPTeamsDistributeParallelForSimdDirective(
void ASTStmtReader::VisitOMPTeamsDistributeParallelForDirective(
OMPTeamsDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2526,6 +2673,7 @@ void ASTStmtReader::VisitOMPTargetTeamsDistributeDirective(
void ASTStmtReader::VisitOMPTargetTeamsDistributeParallelForDirective(
OMPTargetTeamsDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2732,10 +2880,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_CONSTANT:
S = ConstantExpr::CreateEmpty(
- Context,
- static_cast<ConstantExpr::ResultStorageKind>(
- Record[ASTStmtReader::NumExprFields]),
- Empty);
+ Context, static_cast<ConstantExpr::ResultStorageKind>(
+ /*StorageKind=*/Record[ASTStmtReader::NumExprFields]));
break;
case EXPR_PREDEFINED:
@@ -2758,6 +2904,10 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = IntegerLiteral::Create(Context, Empty);
break;
+ case EXPR_FIXEDPOINT_LITERAL:
+ S = FixedPointLiteral::Create(Context, Empty);
+ break;
+
case EXPR_FLOATING_LITERAL:
S = FloatingLiteral::Create(Context, Empty);
break;
@@ -2789,7 +2939,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_UNARY_OPERATOR:
- S = new (Context) UnaryOperator(Empty);
+ S = UnaryOperator::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields]);
break;
case EXPR_OFFSETOF:
@@ -2806,15 +2957,34 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = new (Context) ArraySubscriptExpr(Empty);
break;
+ case EXPR_MATRIX_SUBSCRIPT:
+ S = new (Context) MatrixSubscriptExpr(Empty);
+ break;
+
case EXPR_OMP_ARRAY_SECTION:
S = new (Context) OMPArraySectionExpr(Empty);
break;
+ case EXPR_OMP_ARRAY_SHAPING:
+ S = OMPArrayShapingExpr::CreateEmpty(
+ Context, Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_OMP_ITERATOR:
+ S = OMPIteratorExpr::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields]);
+ break;
+
case EXPR_CALL:
S = CallExpr::CreateEmpty(
Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Empty);
break;
+ case EXPR_RECOVERY:
+ S = RecoveryExpr::CreateEmpty(
+ Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields]);
+ break;
+
case EXPR_MEMBER:
S = MemberExpr::CreateEmpty(Context, Record[ASTStmtReader::NumExprFields],
Record[ASTStmtReader::NumExprFields + 1],
@@ -2823,11 +2993,13 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_BINARY_OPERATOR:
- S = new (Context) BinaryOperator(Empty);
+ S = BinaryOperator::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields]);
break;
case EXPR_COMPOUND_ASSIGN_OPERATOR:
- S = new (Context) CompoundAssignOperator(Empty);
+ S = CompoundAssignOperator::CreateEmpty(
+ Context, Record[ASTStmtReader::NumExprFields]);
break;
case EXPR_CONDITIONAL_OPERATOR:
@@ -3173,6 +3345,16 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
Context, Record[ASTStmtReader::NumStmtFields], Empty);
break;
+ case STMT_OMP_DEPOBJ_DIRECTIVE:
+ S = OMPDepobjDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_SCAN_DIRECTIVE:
+ S = OMPScanDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
case STMT_OMP_ORDERED_DIRECTIVE:
S = OMPOrderedDirective::CreateEmpty(
Context, Record[ASTStmtReader::NumStmtFields], Empty);
@@ -3450,11 +3632,20 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = CXXConstCastExpr::CreateEmpty(Context);
break;
+ case EXPR_CXX_ADDRSPACE_CAST:
+ S = CXXAddrspaceCastExpr::CreateEmpty(Context);
+ break;
+
case EXPR_CXX_FUNCTIONAL_CAST:
S = CXXFunctionalCastExpr::CreateEmpty(Context,
/*PathSize*/ Record[ASTStmtReader::NumExprFields]);
break;
+ case EXPR_BUILTIN_BIT_CAST:
+ assert(Record[ASTStmtReader::NumExprFields] == 0 && "Wrong PathSize!");
+ S = new (Context) BuiltinBitCastExpr(Empty);
+ break;
+
case EXPR_USER_DEFINED_LITERAL:
S = UserDefinedLiteral::CreateEmpty(
Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Empty);
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
index 7626827b441a..2345a12caeb2 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
@@ -10,14 +10,12 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/OpenMPClause.h"
-#include "clang/Serialization/ASTRecordWriter.h"
#include "ASTCommon.h"
#include "ASTReaderInternals.h"
#include "MultiOnDiskHashTable.h"
-#include "clang/AST/AbstractTypeWriter.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTUnresolvedSet.h"
+#include "clang/AST/AbstractTypeWriter.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
@@ -31,6 +29,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/OpenMPClause.h"
#include "clang/AST/RawCommentList.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
@@ -65,7 +64,9 @@
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/Weak.h"
+#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/ASTRecordWriter.h"
#include "clang/Serialization/InMemoryModuleCache.h"
#include "clang/Serialization/ModuleFile.h"
#include "clang/Serialization/ModuleFileExtension.h"
@@ -288,6 +289,25 @@ void TypeLocWriter::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+void TypeLocWriter::VisitConstantMatrixTypeLoc(ConstantMatrixTypeLoc TL) {
+ Record.AddSourceLocation(TL.getAttrNameLoc());
+ SourceRange range = TL.getAttrOperandParensRange();
+ Record.AddSourceLocation(range.getBegin());
+ Record.AddSourceLocation(range.getEnd());
+ Record.AddStmt(TL.getAttrRowOperand());
+ Record.AddStmt(TL.getAttrColumnOperand());
+}
+
+void TypeLocWriter::VisitDependentSizedMatrixTypeLoc(
+ DependentSizedMatrixTypeLoc TL) {
+ Record.AddSourceLocation(TL.getAttrNameLoc());
+ SourceRange range = TL.getAttrOperandParensRange();
+ Record.AddSourceLocation(range.getBegin());
+ Record.AddSourceLocation(range.getEnd());
+ Record.AddStmt(TL.getAttrRowOperand());
+ Record.AddStmt(TL.getAttrColumnOperand());
+}
+
void TypeLocWriter::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
Record.AddSourceLocation(TL.getLocalRangeBegin());
Record.AddSourceLocation(TL.getLParenLoc());
@@ -476,6 +496,14 @@ void TypeLocWriter::VisitPipeTypeLoc(PipeTypeLoc TL) {
Record.AddSourceLocation(TL.getKWLoc());
}
+void TypeLocWriter::VisitExtIntTypeLoc(clang::ExtIntTypeLoc TL) {
+ Record.AddSourceLocation(TL.getNameLoc());
+}
+void TypeLocWriter::VisitDependentExtIntTypeLoc(
+ clang::DependentExtIntTypeLoc TL) {
+ Record.AddSourceLocation(TL.getNameLoc());
+}
+
void ASTWriter::WriteTypeAbbrevs() {
using namespace llvm;
@@ -500,6 +528,7 @@ void ASTWriter::WriteTypeAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // ProducesResult
Abv->Add(BitCodeAbbrevOp(0)); // NoCallerSavedRegs
Abv->Add(BitCodeAbbrevOp(0)); // NoCfCheck
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // CmseNSCall
// FunctionProtoType
Abv->Add(BitCodeAbbrevOp(0)); // IsVariadic
Abv->Add(BitCodeAbbrevOp(0)); // HasTrailingReturn
@@ -570,6 +599,7 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(EXPR_PREDEFINED);
RECORD(EXPR_DECL_REF);
RECORD(EXPR_INTEGER_LITERAL);
+ RECORD(EXPR_FIXEDPOINT_LITERAL);
RECORD(EXPR_FLOATING_LITERAL);
RECORD(EXPR_IMAGINARY_LITERAL);
RECORD(EXPR_STRING_LITERAL);
@@ -631,6 +661,7 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(EXPR_CXX_DYNAMIC_CAST);
RECORD(EXPR_CXX_REINTERPRET_CAST);
RECORD(EXPR_CXX_CONST_CAST);
+ RECORD(EXPR_CXX_ADDRSPACE_CAST);
RECORD(EXPR_CXX_FUNCTIONAL_CAST);
RECORD(EXPR_USER_DEFINED_LITERAL);
RECORD(EXPR_CXX_STD_INITIALIZER_LIST);
@@ -756,6 +787,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(DELETE_EXPRS_TO_ANALYZE);
RECORD(CUDA_PRAGMA_FORCE_HOST_DEVICE_DEPTH);
RECORD(PP_CONDITIONAL_STACK);
+ RECORD(DECLS_TO_CHECK_FOR_DEFERRED_DIAGS);
// SourceManager Block.
BLOCK(SOURCE_MANAGER_BLOCK);
@@ -930,6 +962,7 @@ void ASTWriter::WriteBlockInfoBlock() {
BLOCK(UNHASHED_CONTROL_BLOCK);
RECORD(SIGNATURE);
+ RECORD(AST_BLOCK_HASH);
RECORD(DIAGNOSTIC_OPTIONS);
RECORD(DIAG_PRAGMA_MAPPINGS);
@@ -995,22 +1028,23 @@ adjustFilenameForRelocatableAST(const char *Filename, StringRef BaseDir) {
return Filename + Pos;
}
-ASTFileSignature ASTWriter::createSignature(StringRef Bytes) {
- // Calculate the hash till start of UNHASHED_CONTROL_BLOCK.
+std::pair<ASTFileSignature, ASTFileSignature>
+ASTWriter::createSignature(StringRef AllBytes, StringRef ASTBlockBytes) {
llvm::SHA1 Hasher;
- Hasher.update(ArrayRef<uint8_t>(Bytes.bytes_begin(), Bytes.size()));
+ Hasher.update(ASTBlockBytes);
auto Hash = Hasher.result();
+ ASTFileSignature ASTBlockHash = ASTFileSignature::create(Hash);
- // Convert to an array [5*i32].
- ASTFileSignature Signature;
- auto LShift = [&](unsigned char Val, unsigned Shift) {
- return (uint32_t)Val << Shift;
- };
- for (int I = 0; I != 5; ++I)
- Signature[I] = LShift(Hash[I * 4 + 0], 24) | LShift(Hash[I * 4 + 1], 16) |
- LShift(Hash[I * 4 + 2], 8) | LShift(Hash[I * 4 + 3], 0);
+ // Add the remaining bytes (i.e. bytes before the unhashed control block that
+ // are not part of the AST block).
+ Hasher.update(
+ AllBytes.take_front(ASTBlockBytes.bytes_end() - AllBytes.bytes_begin()));
+ Hasher.update(
+ AllBytes.take_back(AllBytes.bytes_end() - ASTBlockBytes.bytes_end()));
+ Hash = Hasher.result();
+ ASTFileSignature Signature = ASTFileSignature::create(Hash);
- return Signature;
+ return std::make_pair(ASTBlockHash, Signature);
}
ASTFileSignature ASTWriter::writeUnhashedControlBlock(Preprocessor &PP,
@@ -1027,7 +1061,16 @@ ASTFileSignature ASTWriter::writeUnhashedControlBlock(Preprocessor &PP,
ASTFileSignature Signature;
if (WritingModule &&
PP.getHeaderSearchInfo().getHeaderSearchOpts().ModulesHashContent) {
- Signature = createSignature(StringRef(Buffer.begin(), StartOfUnhashedControl));
+ ASTFileSignature ASTBlockHash;
+ auto ASTBlockStartByte = ASTBlockRange.first >> 3;
+ auto ASTBlockByteLength = (ASTBlockRange.second >> 3) - ASTBlockStartByte;
+ std::tie(ASTBlockHash, Signature) = createSignature(
+ StringRef(Buffer.begin(), StartOfUnhashedControl),
+ StringRef(Buffer.begin() + ASTBlockStartByte, ASTBlockByteLength));
+
+ Record.append(ASTBlockHash.begin(), ASTBlockHash.end());
+ Stream.EmitRecord(AST_BLOCK_HASH, Record);
+ Record.clear();
Record.append(Signature.begin(), Signature.end());
Stream.EmitRecord(SIGNATURE, Record);
Record.clear();
@@ -1132,7 +1175,7 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
BaseDirectory.assign(BaseDir.begin(), BaseDir.end());
} else if (!isysroot.empty()) {
// Write out paths relative to the sysroot if possible.
- BaseDirectory = isysroot;
+ BaseDirectory = std::string(isysroot);
}
// Module map file
@@ -1718,7 +1761,8 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
llvm::SmallVector<Module *, 16> Worklist(1, WritingModule);
while (!Worklist.empty()) {
Module *M = Worklist.pop_back_val();
- if (!M->isAvailable())
+ // We don't care about headers in unimportable submodules.
+ if (M->isUnimportable())
continue;
// Map to disk files where possible, to pick up any missing stat
@@ -1800,7 +1844,7 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
Filename, File->getSize(), getTimestampForOutput(File)
};
HeaderFileInfoTrait::data_type Data = {
- *HFI, HS.getModuleMap().findAllModulesForHeader(File), {}
+ *HFI, HS.getModuleMap().findResolvedModulesForHeader(File), {}
};
Generator.insert(Key, Data, GeneratorTrait);
++NumHeaderSearchEntries;
@@ -1878,6 +1922,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
// Enter the source manager block.
Stream.EnterSubblock(SOURCE_MANAGER_BLOCK_ID, 4);
+ const uint64_t SourceManagerBlockOffset = Stream.GetCurrentBitNo();
// Abbreviations for the various kinds of source-location entries.
unsigned SLocFileAbbrv = CreateSLocFileAbbrev(Stream);
@@ -1890,6 +1935,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
// Write out the source location entry table. We skip the first
// entry, which is always the same dummy entry.
std::vector<uint32_t> SLocEntryOffsets;
+ uint64_t SLocEntryOffsetsBase = Stream.GetCurrentBitNo();
RecordData PreloadSLocs;
SLocEntryOffsets.reserve(SourceMgr.local_sloc_entry_size() - 1);
for (unsigned I = 1, N = SourceMgr.local_sloc_entry_size();
@@ -1900,7 +1946,9 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
assert(&SourceMgr.getSLocEntry(FID) == SLoc);
// Record the offset of this source-location entry.
- SLocEntryOffsets.push_back(Stream.GetCurrentBitNo());
+ uint64_t Offset = Stream.GetCurrentBitNo() - SLocEntryOffsetsBase;
+ assert((Offset >> 32) == 0 && "SLocEntry offset too large");
+ SLocEntryOffsets.push_back(Offset);
// Figure out which record code to use.
unsigned Code;
@@ -2008,12 +2056,14 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
Abbrev->Add(BitCodeAbbrevOp(SOURCE_LOCATION_OFFSETS));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // # of slocs
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // total size
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 32)); // base offset
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // offsets
unsigned SLocOffsetsAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
{
RecordData::value_type Record[] = {
SOURCE_LOCATION_OFFSETS, SLocEntryOffsets.size(),
- SourceMgr.getNextLocalOffset() - 1 /* skip dummy */};
+ SourceMgr.getNextLocalOffset() - 1 /* skip dummy */,
+ SLocEntryOffsetsBase - SourceManagerBlockOffset};
Stream.EmitRecordWithBlob(SLocOffsetsAbbrev, Record,
bytes(SLocEntryOffsets));
}
@@ -2090,9 +2140,11 @@ static bool shouldIgnoreMacro(MacroDirective *MD, bool IsModule,
/// Writes the block containing the serialized form of the
/// preprocessor.
void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
+ uint64_t MacroOffsetsBase = Stream.GetCurrentBitNo();
+
PreprocessingRecord *PPRec = PP.getPreprocessingRecord();
if (PPRec)
- WritePreprocessorDetail(*PPRec);
+ WritePreprocessorDetail(*PPRec, MacroOffsetsBase);
RecordData Record;
RecordData ModuleMacroRecord;
@@ -2153,7 +2205,8 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
// identifier they belong to.
for (const IdentifierInfo *Name : MacroIdentifiers) {
MacroDirective *MD = PP.getLocalMacroDirectiveHistory(Name);
- auto StartOffset = Stream.GetCurrentBitNo();
+ uint64_t StartOffset = Stream.GetCurrentBitNo() - MacroOffsetsBase;
+ assert((StartOffset >> 32) == 0 && "Macro identifiers offset too large");
// Emit the macro directives in reverse source order.
for (; MD; MD = MD->getPrevious()) {
@@ -2226,14 +2279,12 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
// Record the local offset of this macro.
unsigned Index = ID - FirstMacroID;
- if (Index == MacroOffsets.size())
- MacroOffsets.push_back(Stream.GetCurrentBitNo());
- else {
- if (Index > MacroOffsets.size())
- MacroOffsets.resize(Index + 1);
+ if (Index >= MacroOffsets.size())
+ MacroOffsets.resize(Index + 1);
- MacroOffsets[Index] = Stream.GetCurrentBitNo();
- }
+ uint64_t Offset = Stream.GetCurrentBitNo() - MacroOffsetsBase;
+ assert((Offset >> 32) == 0 && "Macro offset too large");
+ MacroOffsets[Index] = Offset;
AddIdentifierRef(Name, Record);
AddSourceLocation(MI->getDefinitionLoc(), Record);
@@ -2284,17 +2335,20 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
Abbrev->Add(BitCodeAbbrevOp(MACRO_OFFSET));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of macros
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // first ID
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 32)); // base offset
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
unsigned MacroOffsetAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
{
RecordData::value_type Record[] = {MACRO_OFFSET, MacroOffsets.size(),
- FirstMacroID - NUM_PREDEF_MACRO_IDS};
+ FirstMacroID - NUM_PREDEF_MACRO_IDS,
+ MacroOffsetsBase - ASTBlockStartOffset};
Stream.EmitRecordWithBlob(MacroOffsetAbbrev, Record, bytes(MacroOffsets));
}
}
-void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
+void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec,
+ uint64_t MacroOffsetsBase) {
if (PPRec.local_begin() == PPRec.local_end())
return;
@@ -2331,8 +2385,10 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
(void)++E, ++NumPreprocessingRecords, ++NextPreprocessorEntityID) {
Record.clear();
+ uint64_t Offset = Stream.GetCurrentBitNo() - MacroOffsetsBase;
+ assert((Offset >> 32) == 0 && "Preprocessed entity offset too large");
PreprocessedEntityOffsets.push_back(
- PPEntityOffset((*E)->getSourceRange(), Stream.GetCurrentBitNo()));
+ PPEntityOffset((*E)->getSourceRange(), Offset));
if (auto *MD = dyn_cast<MacroDefinitionRecord>(*E)) {
// Record this macro definition's ID.
@@ -2800,15 +2856,15 @@ void ASTWriter::WriteType(QualType T) {
assert(Idx.getIndex() >= FirstTypeID && "Re-writing a type from a prior AST");
// Emit the type's representation.
- uint64_t Offset = ASTTypeWriter(*this).write(T);
+ uint64_t Offset = ASTTypeWriter(*this).write(T) - DeclTypesBlockStartOffset;
// Record the offset for this type.
unsigned Index = Idx.getIndex() - FirstTypeID;
if (TypeOffsets.size() == Index)
- TypeOffsets.push_back(Offset);
+ TypeOffsets.emplace_back(Offset);
else if (TypeOffsets.size() < Index) {
TypeOffsets.resize(Index + 1);
- TypeOffsets[Index] = Offset;
+ TypeOffsets[Index].setBitOffset(Offset);
} else {
llvm_unreachable("Types emitted in wrong order");
}
@@ -2875,8 +2931,10 @@ void ASTWriter::WriteTypeDeclOffsets() {
void ASTWriter::WriteFileDeclIDsMap() {
using namespace llvm;
- SmallVector<std::pair<FileID, DeclIDInFileInfo *>, 64> SortedFileDeclIDs(
- FileDeclIDs.begin(), FileDeclIDs.end());
+ SmallVector<std::pair<FileID, DeclIDInFileInfo *>, 64> SortedFileDeclIDs;
+ SortedFileDeclIDs.reserve(FileDeclIDs.size());
+ for (const auto &P : FileDeclIDs)
+ SortedFileDeclIDs.push_back(std::make_pair(P.first, P.second.get()));
llvm::sort(SortedFileDeclIDs, llvm::less_first());
// Join the vectors of DeclIDs from all files.
@@ -3902,8 +3960,8 @@ void ASTWriter::WriteDeclContextVisibleUpdate(const DeclContext *DC) {
}
/// Write an FP_PRAGMA_OPTIONS block for the given FPOptions.
-void ASTWriter::WriteFPPragmaOptions(const FPOptions &Opts) {
- RecordData::value_type Record[] = {Opts.getInt()};
+void ASTWriter::WriteFPPragmaOptions(const FPOptionsOverride &Opts) {
+ RecordData::value_type Record[] = {Opts.getAsOpaqueInt()};
Stream.EmitRecord(FP_PRAGMA_OPTIONS, Record);
}
@@ -4114,6 +4172,26 @@ void ASTWriter::WritePackPragmaOptions(Sema &SemaRef) {
Stream.EmitRecord(PACK_PRAGMA_OPTIONS, Record);
}
+/// Write the state of 'pragma float_control' at the end of the module.
+void ASTWriter::WriteFloatControlPragmaOptions(Sema &SemaRef) {
+ // Don't serialize pragma float_control state for modules,
+ // since it should only take effect on a per-submodule basis.
+ if (WritingModule)
+ return;
+
+ RecordData Record;
+ Record.push_back(SemaRef.FpPragmaStack.CurrentValue);
+ AddSourceLocation(SemaRef.FpPragmaStack.CurrentPragmaLocation, Record);
+ Record.push_back(SemaRef.FpPragmaStack.Stack.size());
+ for (const auto &StackEntry : SemaRef.FpPragmaStack.Stack) {
+ Record.push_back(StackEntry.Value);
+ AddSourceLocation(StackEntry.PragmaLocation, Record);
+ AddSourceLocation(StackEntry.PragmaPushLocation, Record);
+ AddString(StackEntry.StackSlotLabel, Record);
+ }
+ Stream.EmitRecord(FLOAT_CONTROL_PRAGMA_OPTIONS, Record);
+}
+
void ASTWriter::WriteModuleFileExtension(Sema &SemaRef,
ModuleFileExtensionWriter &Writer) {
// Enter the extension block.
@@ -4274,9 +4352,7 @@ ASTWriter::ASTWriter(llvm::BitstreamWriter &Stream,
}
}
-ASTWriter::~ASTWriter() {
- llvm::DeleteContainerSeconds(FileDeclIDs);
-}
+ASTWriter::~ASTWriter() = default;
const LangOptions &ASTWriter::getLangOpts() const {
assert(WritingAST && "can't determine lang opts when not writing AST");
@@ -4369,6 +4445,8 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
RegisterPredefDecl(Context.VaListTagDecl, PREDEF_DECL_VA_LIST_TAG);
RegisterPredefDecl(Context.BuiltinMSVaListDecl,
PREDEF_DECL_BUILTIN_MS_VA_LIST_ID);
+ RegisterPredefDecl(Context.MSGuidTagDecl,
+ PREDEF_DECL_BUILTIN_MS_GUID_ID);
RegisterPredefDecl(Context.ExternCContext, PREDEF_DECL_EXTERN_C_CONTEXT_ID);
RegisterPredefDecl(Context.MakeIntegerSeqDecl,
PREDEF_DECL_MAKE_INTEGER_SEQ_ID);
@@ -4488,7 +4566,10 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
WriteControlBlock(PP, Context, isysroot, OutputFile);
// Write the remaining AST contents.
+ Stream.FlushToWord();
+ ASTBlockRange.first = Stream.GetCurrentBitNo();
Stream.EnterSubblock(AST_BLOCK_ID, 5);
+ ASTBlockStartOffset = Stream.GetCurrentBitNo();
// This is so that older clang versions, before the introduction
// of the control block, can read and reject the newer PCH format.
@@ -4619,9 +4700,9 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
// c++-base-specifiers-id:i32
// type-id:i32)
//
- // module-kind is the ModuleKind enum value. If it is MK_PrebuiltModule or
- // MK_ExplicitModule, then the module-name is the module name. Otherwise,
- // it is the module file name.
+ // module-kind is the ModuleKind enum value. If it is MK_PrebuiltModule,
+ // MK_ExplicitModule or MK_ImplicitModule, then the module-name is the
+ // module name. Otherwise, it is the module file name.
auto Abbrev = std::make_shared<BitCodeAbbrev>();
Abbrev->Add(BitCodeAbbrevOp(MODULE_OFFSET_MAP));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
@@ -4634,10 +4715,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
endian::Writer LE(Out, little);
LE.write<uint8_t>(static_cast<uint8_t>(M.Kind));
- StringRef Name =
- M.Kind == MK_PrebuiltModule || M.Kind == MK_ExplicitModule
- ? M.ModuleName
- : M.FileName;
+ StringRef Name = M.isModule() ? M.ModuleName : M.FileName;
LE.write<uint16_t>(Name.size());
Out.write(Name.data(), Name.size());
@@ -4671,11 +4749,17 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
Buffer.data(), Buffer.size());
}
+ // Build a record containing all of the DeclsToCheckForDeferredDiags.
+ RecordData DeclsToCheckForDeferredDiags;
+ for (auto *D : SemaRef.DeclsToCheckForDeferredDiags)
+ AddDeclRef(D, DeclsToCheckForDeferredDiags);
+
RecordData DeclUpdatesOffsetsRecord;
// Keep writing types, declarations, and declaration update records
// until we've emitted all of them.
Stream.EnterSubblock(DECLTYPES_BLOCK_ID, /*bits for abbreviations*/5);
+ DeclTypesBlockStartOffset = Stream.GetCurrentBitNo();
WriteTypeAbbrevs();
WriteDeclAbbrevs();
do {
@@ -4706,7 +4790,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
WriteReferencedSelectorsPool(SemaRef);
WriteLateParsedTemplates(SemaRef);
WriteIdentifierTable(PP, SemaRef.IdResolver, isModule);
- WriteFPPragmaOptions(SemaRef.getFPOptions());
+ WriteFPPragmaOptions(SemaRef.CurFPFeatureOverrides());
WriteOpenCLExtensions(SemaRef);
WriteOpenCLExtensionTypes(SemaRef);
WriteCUDAPragmas(SemaRef);
@@ -4762,6 +4846,11 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
if (!SemaDeclRefs.empty())
Stream.EmitRecord(SEMA_DECL_REFS, SemaDeclRefs);
+ // Write the record containing decls to be checked for deferred diags.
+ if (!DeclsToCheckForDeferredDiags.empty())
+ Stream.EmitRecord(DECLS_TO_CHECK_FOR_DEFERRED_DIAGS,
+ DeclsToCheckForDeferredDiags);
+
// Write the record containing CUDA-specific declaration references.
if (!CUDASpecialDeclRefs.empty())
Stream.EmitRecord(CUDA_SPECIAL_DECL_REFS, CUDASpecialDeclRefs);
@@ -4832,12 +4921,15 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
WriteMSPointersToMembersPragmaOptions(SemaRef);
}
WritePackPragmaOptions(SemaRef);
+ WriteFloatControlPragmaOptions(SemaRef);
// Some simple statistics
RecordData::value_type Record[] = {
NumStatements, NumMacros, NumLexicalDeclContexts, NumVisibleDeclContexts};
Stream.EmitRecord(STATISTICS, Record);
Stream.ExitBlock();
+ Stream.FlushToWord();
+ ASTBlockRange.second = Stream.GetCurrentBitNo();
// Write the module file extension blocks.
for (const auto &ExtWriter : ModuleFileExtensionWriters)
@@ -5129,7 +5221,7 @@ MacroID ASTWriter::getMacroID(MacroInfo *MI) {
return MacroIDs[MI];
}
-uint64_t ASTWriter::getMacroDirectivesOffset(const IdentifierInfo *Name) {
+uint32_t ASTWriter::getMacroDirectivesOffset(const IdentifierInfo *Name) {
return IdentMacroDirectivesOffsetMap.lookup(Name);
}
@@ -5331,9 +5423,9 @@ void ASTWriter::associateDeclWithFile(const Decl *D, DeclID ID) {
return;
assert(SM.getSLocEntry(FID).isFile());
- DeclIDInFileInfo *&Info = FileDeclIDs[FID];
+ std::unique_ptr<DeclIDInFileInfo> &Info = FileDeclIDs[FID];
if (!Info)
- Info = new DeclIDInFileInfo();
+ Info = std::make_unique<DeclIDInFileInfo>();
std::pair<unsigned, serialization::DeclID> LocDecl(Offset, ID);
LocDeclIDsTy &Decls = Info->DeclIDs;
@@ -5596,8 +5688,8 @@ void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
// getODRHash will compute the ODRHash if it has not been previously computed.
Record->push_back(D->getODRHash());
- bool ModulesDebugInfo = Writer->Context->getLangOpts().ModulesDebugInfo &&
- Writer->WritingModule && !D->isDependentType();
+ bool ModulesDebugInfo =
+ Writer->Context->getLangOpts().ModulesDebugInfo && !D->isDependentType();
Record->push_back(ModulesDebugInfo);
if (ModulesDebugInfo)
Writer->ModularCodegenDecls.push_back(Writer->GetDeclRef(D));
@@ -5965,7 +6057,7 @@ void ASTWriter::DeclarationMarkedOpenMPDeclareTarget(const Decl *D,
void ASTWriter::RedefinedHiddenDefinition(const NamedDecl *D, Module *M) {
if (Chain && Chain->isProcessingUpdateRecords()) return;
assert(!WritingAST && "Already writing the AST!");
- assert(D->isHidden() && "expected a hidden declaration");
+ assert(!D->isUnconditionallyVisible() && "expected a hidden declaration");
DeclUpdates[D].push_back(DeclUpdate(UPD_DECL_EXPORTED, M));
}
@@ -6025,8 +6117,8 @@ class OMPClauseWriter : public OMPClauseVisitor<OMPClauseWriter> {
public:
OMPClauseWriter(ASTRecordWriter &Record) : Record(Record) {}
-#define OPENMP_CLAUSE(Name, Class) void Visit##Class(Class *S);
-#include "clang/Basic/OpenMPKinds.def"
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(Class *S);
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
void writeClause(OMPClause *C);
void VisitOMPClauseWithPreInit(OMPClauseWithPreInit *C);
void VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C);
@@ -6039,7 +6131,7 @@ void ASTRecordWriter::writeOMPClause(OMPClause *C) {
}
void OMPClauseWriter::writeClause(OMPClause *C) {
- Record.push_back(C->getClauseKind());
+ Record.push_back(unsigned(C->getClauseKind()));
Visit(C);
Record.AddSourceLocation(C->getBeginLoc());
Record.AddSourceLocation(C->getEndLoc());
@@ -6096,8 +6188,13 @@ void OMPClauseWriter::VisitOMPCollapseClause(OMPCollapseClause *C) {
Record.AddSourceLocation(C->getLParenLoc());
}
+void OMPClauseWriter::VisitOMPDetachClause(OMPDetachClause *C) {
+ Record.AddStmt(C->getEventHandler());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
void OMPClauseWriter::VisitOMPDefaultClause(OMPDefaultClause *C) {
- Record.push_back(C->getDefaultKind());
+ Record.push_back(unsigned(C->getDefaultKind()));
Record.AddSourceLocation(C->getLParenLoc());
Record.AddSourceLocation(C->getDefaultKindKwLoc());
}
@@ -6141,18 +6238,35 @@ void OMPClauseWriter::VisitOMPReadClause(OMPReadClause *) {}
void OMPClauseWriter::VisitOMPWriteClause(OMPWriteClause *) {}
-void OMPClauseWriter::VisitOMPUpdateClause(OMPUpdateClause *) {}
+void OMPClauseWriter::VisitOMPUpdateClause(OMPUpdateClause *C) {
+ Record.push_back(C->isExtended() ? 1 : 0);
+ if (C->isExtended()) {
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getArgumentLoc());
+ Record.writeEnum(C->getDependencyKind());
+ }
+}
void OMPClauseWriter::VisitOMPCaptureClause(OMPCaptureClause *) {}
void OMPClauseWriter::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
+void OMPClauseWriter::VisitOMPAcqRelClause(OMPAcqRelClause *) {}
+
+void OMPClauseWriter::VisitOMPAcquireClause(OMPAcquireClause *) {}
+
+void OMPClauseWriter::VisitOMPReleaseClause(OMPReleaseClause *) {}
+
+void OMPClauseWriter::VisitOMPRelaxedClause(OMPRelaxedClause *) {}
+
void OMPClauseWriter::VisitOMPThreadsClause(OMPThreadsClause *) {}
void OMPClauseWriter::VisitOMPSIMDClause(OMPSIMDClause *) {}
void OMPClauseWriter::VisitOMPNogroupClause(OMPNogroupClause *) {}
+void OMPClauseWriter::VisitOMPDestroyClause(OMPDestroyClause *) {}
+
void OMPClauseWriter::VisitOMPPrivateClause(OMPPrivateClause *C) {
Record.push_back(C->varlist_size());
Record.AddSourceLocation(C->getLParenLoc());
@@ -6207,8 +6321,10 @@ void OMPClauseWriter::VisitOMPSharedClause(OMPSharedClause *C) {
void OMPClauseWriter::VisitOMPReductionClause(OMPReductionClause *C) {
Record.push_back(C->varlist_size());
+ Record.writeEnum(C->getModifier());
VisitOMPClauseWithPostUpdate(C);
Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getModifierLoc());
Record.AddSourceLocation(C->getColonLoc());
Record.AddNestedNameSpecifierLoc(C->getQualifierLoc());
Record.AddDeclarationNameInfo(C->getNameInfo());
@@ -6222,6 +6338,14 @@ void OMPClauseWriter::VisitOMPReductionClause(OMPReductionClause *C) {
Record.AddStmt(E);
for (auto *E : C->reduction_ops())
Record.AddStmt(E);
+ if (C->getModifier() == clang::OMPC_REDUCTION_inscan) {
+ for (auto *E : C->copy_ops())
+ Record.AddStmt(E);
+ for (auto *E : C->copy_array_temps())
+ Record.AddStmt(E);
+ for (auto *E : C->copy_array_elems())
+ Record.AddStmt(E);
+ }
}
void OMPClauseWriter::VisitOMPTaskReductionClause(OMPTaskReductionClause *C) {
@@ -6334,10 +6458,16 @@ void OMPClauseWriter::VisitOMPFlushClause(OMPFlushClause *C) {
Record.AddStmt(VE);
}
+void OMPClauseWriter::VisitOMPDepobjClause(OMPDepobjClause *C) {
+ Record.AddStmt(C->getDepobj());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
void OMPClauseWriter::VisitOMPDependClause(OMPDependClause *C) {
Record.push_back(C->varlist_size());
Record.push_back(C->getNumLoops());
Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddStmt(C->getModifier());
Record.push_back(C->getDependencyKind());
Record.AddSourceLocation(C->getDependencyLoc());
Record.AddSourceLocation(C->getColonLoc());
@@ -6349,7 +6479,9 @@ void OMPClauseWriter::VisitOMPDependClause(OMPDependClause *C) {
void OMPClauseWriter::VisitOMPDeviceClause(OMPDeviceClause *C) {
VisitOMPClauseWithPreInit(C);
+ Record.writeEnum(C->getModifier());
Record.AddStmt(C->getDevice());
+ Record.AddSourceLocation(C->getModifierLoc());
Record.AddSourceLocation(C->getLParenLoc());
}
@@ -6359,7 +6491,7 @@ void OMPClauseWriter::VisitOMPMapClause(OMPMapClause *C) {
Record.push_back(C->getTotalComponentListNum());
Record.push_back(C->getTotalComponentsNum());
Record.AddSourceLocation(C->getLParenLoc());
- for (unsigned I = 0; I < OMPMapClause::NumberOfModifiers; ++I) {
+ for (unsigned I = 0; I < NumberOfOMPMapClauseModifiers; ++I) {
Record.push_back(C->getMapTypeModifier(I));
Record.AddSourceLocation(C->getMapTypeModifierLoc(I));
}
@@ -6517,6 +6649,26 @@ void OMPClauseWriter::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *C) {
}
}
+void OMPClauseWriter::VisitOMPUseDeviceAddrClause(OMPUseDeviceAddrClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.push_back(C->getUniqueDeclarationsNum());
+ Record.push_back(C->getTotalComponentListNum());
+ Record.push_back(C->getTotalComponentsNum());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *E : C->varlists())
+ Record.AddStmt(E);
+ for (auto *D : C->all_decls())
+ Record.AddDeclRef(D);
+ for (auto N : C->all_num_lists())
+ Record.push_back(N);
+ for (auto N : C->all_lists_sizes())
+ Record.push_back(N);
+ for (auto &M : C->all_components()) {
+ Record.AddStmt(M.getAssociatedExpression());
+ Record.AddDeclRef(M.getAssociatedDeclaration());
+ }
+}
+
void OMPClauseWriter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
Record.push_back(C->varlist_size());
Record.push_back(C->getUniqueDeclarationsNum());
@@ -6563,3 +6715,61 @@ void OMPClauseWriter::VisitOMPNontemporalClause(OMPNontemporalClause *C) {
for (auto *E : C->private_refs())
Record.AddStmt(E);
}
+
+void OMPClauseWriter::VisitOMPInclusiveClause(OMPInclusiveClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *VE : C->varlists())
+ Record.AddStmt(VE);
+}
+
+void OMPClauseWriter::VisitOMPExclusiveClause(OMPExclusiveClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *VE : C->varlists())
+ Record.AddStmt(VE);
+}
+
+void OMPClauseWriter::VisitOMPOrderClause(OMPOrderClause *C) {
+ Record.writeEnum(C->getKind());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getKindKwLoc());
+}
+
+void OMPClauseWriter::VisitOMPUsesAllocatorsClause(OMPUsesAllocatorsClause *C) {
+ Record.push_back(C->getNumberOfAllocators());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
+ OMPUsesAllocatorsClause::Data Data = C->getAllocatorData(I);
+ Record.AddStmt(Data.Allocator);
+ Record.AddStmt(Data.AllocatorTraits);
+ Record.AddSourceLocation(Data.LParenLoc);
+ Record.AddSourceLocation(Data.RParenLoc);
+ }
+}
+
+void OMPClauseWriter::VisitOMPAffinityClause(OMPAffinityClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddStmt(C->getModifier());
+ Record.AddSourceLocation(C->getColonLoc());
+ for (Expr *E : C->varlists())
+ Record.AddStmt(E);
+}
+
+void ASTRecordWriter::writeOMPTraitInfo(const OMPTraitInfo *TI) {
+ writeUInt32(TI->Sets.size());
+ for (const auto &Set : TI->Sets) {
+ writeEnum(Set.Kind);
+ writeUInt32(Set.Selectors.size());
+ for (const auto &Selector : Set.Selectors) {
+ writeEnum(Selector.Kind);
+ writeBool(Selector.ScoreOrCondition);
+ if (Selector.ScoreOrCondition)
+ writeExprRef(Selector.ScoreOrCondition);
+ writeUInt32(Selector.Properties.size());
+ for (const auto &Property : Selector.Properties)
+ writeEnum(Property.Kind);
+ }
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
index 92d566fc7854..eecdf89c791a 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -95,6 +95,7 @@ namespace clang {
void VisitCXXConversionDecl(CXXConversionDecl *D);
void VisitFieldDecl(FieldDecl *D);
void VisitMSPropertyDecl(MSPropertyDecl *D);
+ void VisitMSGuidDecl(MSGuidDecl *D);
void VisitIndirectFieldDecl(IndirectFieldDecl *D);
void VisitVarDecl(VarDecl *D);
void VisitImplicitParamDecl(ImplicitParamDecl *D);
@@ -953,6 +954,17 @@ void ASTDeclWriter::VisitMSPropertyDecl(MSPropertyDecl *D) {
Code = serialization::DECL_MS_PROPERTY;
}
+void ASTDeclWriter::VisitMSGuidDecl(MSGuidDecl *D) {
+ VisitValueDecl(D);
+ MSGuidDecl::Parts Parts = D->getParts();
+ Record.push_back(Parts.Part1);
+ Record.push_back(Parts.Part2);
+ Record.push_back(Parts.Part3);
+ for (auto C : Parts.Part4And5)
+ Record.push_back(C);
+ Code = serialization::DECL_MS_GUID;
+}
+
void ASTDeclWriter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
VisitValueDecl(D);
Record.push_back(D->getChainingSize());
@@ -1088,8 +1100,6 @@ void ASTDeclWriter::VisitParmVarDecl(ParmVarDecl *D) {
Record.AddStmt(D->getUninstantiatedDefaultArg());
Code = serialization::DECL_PARM_VAR;
- assert(!D->isARCPseudoStrong()); // can be true of ImplicitParamDecl
-
// If the assumptions about the DECL_PARM_VAR abbrev are true, use it. Here
// we dynamically check for the properties that we optimize for, but don't
// know are true of all PARM_VAR_DECLs.
@@ -2109,7 +2119,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // SClass
Abv->Add(BitCodeAbbrevOp(0)); // TSCSpec
Abv->Add(BitCodeAbbrevOp(0)); // InitStyle
- Abv->Add(BitCodeAbbrevOp(0)); // ARCPseudoStrong
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isARCPseudoStrong
Abv->Add(BitCodeAbbrevOp(0)); // Linkage
Abv->Add(BitCodeAbbrevOp(0)); // HasInit
Abv->Add(BitCodeAbbrevOp(0)); // HasMemberSpecializationInfo
@@ -2274,13 +2284,13 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_DECL_REF));
//Stmt
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsOMPStructuredBlock
// Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ContainsErrors
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
//DeclRefExpr
@@ -2298,13 +2308,13 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_INTEGER_LITERAL));
//Stmt
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsOMPStructuredBlock
// Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ContainsErrors
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
//Integer Literal
@@ -2317,13 +2327,13 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_CHARACTER_LITERAL));
//Stmt
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsOMPStructuredBlock
// Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ContainsErrors
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
//Character Literal
@@ -2336,13 +2346,13 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_IMPLICIT_CAST));
// Stmt
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsOMPStructuredBlock
// Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ContainsErrors
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
// CastExpr
@@ -2422,12 +2432,12 @@ void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
SourceLocation Loc = D->getLocation();
unsigned Index = ID - FirstDeclID;
if (DeclOffsets.size() == Index)
- DeclOffsets.push_back(DeclOffset(Loc, Offset));
+ DeclOffsets.emplace_back(Loc, Offset, DeclTypesBlockStartOffset);
else if (DeclOffsets.size() < Index) {
// FIXME: Can/should this happen?
DeclOffsets.resize(Index+1);
DeclOffsets[Index].setLocation(Loc);
- DeclOffsets[Index].BitOffset = Offset;
+ DeclOffsets[Index].setBitOffset(Offset, DeclTypesBlockStartOffset);
} else {
llvm_unreachable("declarations should be emitted in ID order");
}
@@ -2448,9 +2458,10 @@ void ASTRecordWriter::AddFunctionDefinition(const FunctionDecl *FD) {
assert(FD->doesThisDeclarationHaveABody());
bool ModulesCodegen = false;
- if (Writer->WritingModule && !FD->isDependentContext()) {
+ if (!FD->isDependentContext()) {
Optional<GVALinkage> Linkage;
- if (Writer->WritingModule->Kind == Module::ModuleInterfaceUnit) {
+ if (Writer->WritingModule &&
+ Writer->WritingModule->Kind == Module::ModuleInterfaceUnit) {
// When building a C++ Modules TS module interface unit, a strong
// definition in the module interface is provided by the compilation of
// that module interface unit, not by its users. (Inline functions are
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp
index 8519a4df019d..0767b3a24bf2 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -11,6 +11,7 @@
///
//===----------------------------------------------------------------------===//
+#include "clang/AST/ExprOpenMP.h"
#include "clang/Serialization/ASTRecordWriter.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/AST/ASTContext.h"
@@ -68,7 +69,6 @@ void ASTStmtWriter::AddTemplateKWAndArgsInfo(
}
void ASTStmtWriter::VisitStmt(Stmt *S) {
- Record.push_back(S->StmtBits.IsOMPStructuredBlock);
}
void ASTStmtWriter::VisitNullStmt(NullStmt *S) {
@@ -194,6 +194,8 @@ void ASTStmtWriter::VisitWhileStmt(WhileStmt *S) {
Record.AddDeclRef(S->getConditionVariable());
Record.AddSourceLocation(S->getWhileLoc());
+ Record.AddSourceLocation(S->getLParenLoc());
+ Record.AddSourceLocation(S->getRParenLoc());
Code = serialization::STMT_WHILE;
}
@@ -541,22 +543,34 @@ void ASTStmtWriter::VisitExpr(Expr *E) {
Record.push_back(E->isValueDependent());
Record.push_back(E->isInstantiationDependent());
Record.push_back(E->containsUnexpandedParameterPack());
+ Record.push_back(E->containsErrors());
Record.push_back(E->getValueKind());
Record.push_back(E->getObjectKind());
}
void ASTStmtWriter::VisitConstantExpr(ConstantExpr *E) {
VisitExpr(E);
- Record.push_back(static_cast<uint64_t>(E->ConstantExprBits.ResultKind));
+ Record.push_back(E->ConstantExprBits.ResultKind);
+
+ Record.push_back(E->ConstantExprBits.APValueKind);
+ Record.push_back(E->ConstantExprBits.IsUnsigned);
+ Record.push_back(E->ConstantExprBits.BitWidth);
+ // HasCleanup not serialized since we can just query the APValue.
+ Record.push_back(E->ConstantExprBits.IsImmediateInvocation);
+
switch (E->ConstantExprBits.ResultKind) {
+ case ConstantExpr::RSK_None:
+ break;
case ConstantExpr::RSK_Int64:
Record.push_back(E->Int64Result());
- Record.push_back(E->ConstantExprBits.IsUnsigned |
- E->ConstantExprBits.BitWidth << 1);
break;
case ConstantExpr::RSK_APValue:
Record.AddAPValue(E->APValueResult());
+ break;
+ default:
+ llvm_unreachable("unexpected ResultKind!");
}
+
Record.AddStmt(E->getSubExpr());
Code = serialization::EXPR_CONSTANT;
}
@@ -628,8 +642,9 @@ void ASTStmtWriter::VisitIntegerLiteral(IntegerLiteral *E) {
void ASTStmtWriter::VisitFixedPointLiteral(FixedPointLiteral *E) {
VisitExpr(E);
Record.AddSourceLocation(E->getLocation());
+ Record.push_back(E->getScale());
Record.AddAPInt(E->getValue());
- Code = serialization::EXPR_INTEGER_LITERAL;
+ Code = serialization::EXPR_FIXEDPOINT_LITERAL;
}
void ASTStmtWriter::VisitFloatingLiteral(FloatingLiteral *E) {
@@ -700,10 +715,16 @@ void ASTStmtWriter::VisitParenListExpr(ParenListExpr *E) {
void ASTStmtWriter::VisitUnaryOperator(UnaryOperator *E) {
VisitExpr(E);
+ bool HasFPFeatures = E->hasStoredFPFeatures();
+ // Write this first for easy access when deserializing, as they affect the
+ // size of the UnaryOperator.
+ Record.push_back(HasFPFeatures);
Record.AddStmt(E->getSubExpr());
Record.push_back(E->getOpcode()); // FIXME: stable encoding
Record.AddSourceLocation(E->getOperatorLoc());
Record.push_back(E->canOverflow());
+ if (HasFPFeatures)
+ Record.push_back(E->getStoredFPFeatures().getAsOpaqueInt());
Code = serialization::EXPR_UNARY_OPERATOR;
}
@@ -764,16 +785,66 @@ void ASTStmtWriter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
Code = serialization::EXPR_ARRAY_SUBSCRIPT;
}
+void ASTStmtWriter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
+ VisitExpr(E);
+ Record.AddStmt(E->getBase());
+ Record.AddStmt(E->getRowIdx());
+ Record.AddStmt(E->getColumnIdx());
+ Record.AddSourceLocation(E->getRBracketLoc());
+ Code = serialization::EXPR_ARRAY_SUBSCRIPT;
+}
+
void ASTStmtWriter::VisitOMPArraySectionExpr(OMPArraySectionExpr *E) {
VisitExpr(E);
Record.AddStmt(E->getBase());
Record.AddStmt(E->getLowerBound());
Record.AddStmt(E->getLength());
- Record.AddSourceLocation(E->getColonLoc());
+ Record.AddStmt(E->getStride());
+ Record.AddSourceLocation(E->getColonLocFirst());
+ Record.AddSourceLocation(E->getColonLocSecond());
Record.AddSourceLocation(E->getRBracketLoc());
Code = serialization::EXPR_OMP_ARRAY_SECTION;
}
+void ASTStmtWriter::VisitOMPArrayShapingExpr(OMPArrayShapingExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getDimensions().size());
+ Record.AddStmt(E->getBase());
+ for (Expr *Dim : E->getDimensions())
+ Record.AddStmt(Dim);
+ for (SourceRange SR : E->getBracketsRanges())
+ Record.AddSourceRange(SR);
+ Record.AddSourceLocation(E->getLParenLoc());
+ Record.AddSourceLocation(E->getRParenLoc());
+ Code = serialization::EXPR_OMP_ARRAY_SHAPING;
+}
+
+void ASTStmtWriter::VisitOMPIteratorExpr(OMPIteratorExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->numOfIterators());
+ Record.AddSourceLocation(E->getIteratorKwLoc());
+ Record.AddSourceLocation(E->getLParenLoc());
+ Record.AddSourceLocation(E->getRParenLoc());
+ for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
+ Record.AddDeclRef(E->getIteratorDecl(I));
+ Record.AddSourceLocation(E->getAssignLoc(I));
+ OMPIteratorExpr::IteratorRange Range = E->getIteratorRange(I);
+ Record.AddStmt(Range.Begin);
+ Record.AddStmt(Range.End);
+ Record.AddStmt(Range.Step);
+ Record.AddSourceLocation(E->getColonLoc(I));
+ if (Range.Step)
+ Record.AddSourceLocation(E->getSecondColonLoc(I));
+ // Serialize helpers
+ OMPIteratorHelperData &HD = E->getHelper(I);
+ Record.AddDeclRef(HD.CounterVD);
+ Record.AddStmt(HD.Upper);
+ Record.AddStmt(HD.Update);
+ Record.AddStmt(HD.CounterUpdate);
+ }
+ Code = serialization::EXPR_OMP_ITERATOR;
+}
+
void ASTStmtWriter::VisitCallExpr(CallExpr *E) {
VisitExpr(E);
Record.push_back(E->getNumArgs());
@@ -786,6 +857,16 @@ void ASTStmtWriter::VisitCallExpr(CallExpr *E) {
Code = serialization::EXPR_CALL;
}
+void ASTStmtWriter::VisitRecoveryExpr(RecoveryExpr *E) {
+ VisitExpr(E);
+ Record.push_back(std::distance(E->children().begin(), E->children().end()));
+ Record.AddSourceLocation(E->getBeginLoc());
+ Record.AddSourceLocation(E->getEndLoc());
+ for (Stmt *Child : E->children())
+ Record.AddStmt(Child);
+ Code = serialization::EXPR_RECOVERY;
+}
+
void ASTStmtWriter::VisitMemberExpr(MemberExpr *E) {
VisitExpr(E);
@@ -868,11 +949,16 @@ void ASTStmtWriter::VisitCastExpr(CastExpr *E) {
void ASTStmtWriter::VisitBinaryOperator(BinaryOperator *E) {
VisitExpr(E);
+ bool HasFPFeatures = E->hasStoredFPFeatures();
+ // Write this first for easy access when deserializing, as they affect the
+ // size of the UnaryOperator.
+ Record.push_back(HasFPFeatures);
+ Record.push_back(E->getOpcode()); // FIXME: stable encoding
Record.AddStmt(E->getLHS());
Record.AddStmt(E->getRHS());
- Record.push_back(E->getOpcode()); // FIXME: stable encoding
Record.AddSourceLocation(E->getOperatorLoc());
- Record.push_back(E->getFPFeatures().getInt());
+ if (HasFPFeatures)
+ Record.push_back(E->getStoredFPFeatures().getAsOpaqueInt());
Code = serialization::EXPR_BINARY_OPERATOR;
}
@@ -1463,8 +1549,8 @@ void ASTStmtWriter::VisitMSDependentExistsStmt(MSDependentExistsStmt *S) {
void ASTStmtWriter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
VisitCallExpr(E);
Record.push_back(E->getOperator());
- Record.push_back(E->getFPFeatures().getInt());
Record.AddSourceRange(E->Range);
+ Record.push_back(E->getFPFeatures().getAsOpaqueInt());
Code = serialization::EXPR_CXX_OPERATOR_CALL;
}
@@ -1518,12 +1604,12 @@ void ASTStmtWriter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
void ASTStmtWriter::VisitLambdaExpr(LambdaExpr *E) {
VisitExpr(E);
- Record.push_back(E->NumCaptures);
+ Record.push_back(E->LambdaExprBits.NumCaptures);
Record.AddSourceRange(E->IntroducerRange);
- Record.push_back(E->CaptureDefault); // FIXME: stable encoding
+ Record.push_back(E->LambdaExprBits.CaptureDefault); // FIXME: stable encoding
Record.AddSourceLocation(E->CaptureDefaultLoc);
- Record.push_back(E->ExplicitParams);
- Record.push_back(E->ExplicitResultType);
+ Record.push_back(E->LambdaExprBits.ExplicitParams);
+ Record.push_back(E->LambdaExprBits.ExplicitResultType);
Record.AddSourceLocation(E->ClosingBrace);
// Add capture initializers.
@@ -1533,6 +1619,9 @@ void ASTStmtWriter::VisitLambdaExpr(LambdaExpr *E) {
Record.AddStmt(*C);
}
+ // Don't serialize the body. It belongs to the call operator declaration.
+ // LambdaExpr only stores a copy of the Stmt *.
+
Code = serialization::EXPR_LAMBDA;
}
@@ -1568,6 +1657,11 @@ void ASTStmtWriter::VisitCXXConstCastExpr(CXXConstCastExpr *E) {
Code = serialization::EXPR_CXX_CONST_CAST;
}
+void ASTStmtWriter::VisitCXXAddrspaceCastExpr(CXXAddrspaceCastExpr *E) {
+ VisitCXXNamedCastExpr(E);
+ Code = serialization::EXPR_CXX_ADDRSPACE_CAST;
+}
+
void ASTStmtWriter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) {
VisitExplicitCastExpr(E);
Record.AddSourceLocation(E->getLParenLoc());
@@ -1579,6 +1673,7 @@ void ASTStmtWriter::VisitBuiltinBitCastExpr(BuiltinBitCastExpr *E) {
VisitExplicitCastExpr(E);
Record.AddSourceLocation(E->getBeginLoc());
Record.AddSourceLocation(E->getEndLoc());
+ Code = serialization::EXPR_BUILTIN_BIT_CAST;
}
void ASTStmtWriter::VisitUserDefinedLiteral(UserDefinedLiteral *E) {
@@ -1722,8 +1817,15 @@ void ASTStmtWriter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
void ASTStmtWriter::VisitExprWithCleanups(ExprWithCleanups *E) {
VisitExpr(E);
Record.push_back(E->getNumObjects());
- for (unsigned i = 0, e = E->getNumObjects(); i != e; ++i)
- Record.AddDeclRef(E->getObject(i));
+ for (auto &Obj : E->getObjects()) {
+ if (auto *BD = Obj.dyn_cast<BlockDecl *>()) {
+ Record.push_back(serialization::COK_Block);
+ Record.AddDeclRef(BD);
+ } else if (auto *CLE = Obj.dyn_cast<CompoundLiteralExpr *>()) {
+ Record.push_back(serialization::COK_CompoundLiteral);
+ Record.AddStmt(CLE);
+ }
+ }
Record.push_back(E->cleanupsHaveSideEffects());
Record.AddStmt(E->getSubExpr());
@@ -2012,7 +2114,7 @@ void ASTStmtWriter::VisitMSPropertySubscriptExpr(MSPropertySubscriptExpr *E) {
void ASTStmtWriter::VisitCXXUuidofExpr(CXXUuidofExpr *E) {
VisitExpr(E);
Record.AddSourceRange(E->getSourceRange());
- Record.AddString(E->getUuidStr());
+ Record.AddDeclRef(E->getGuidDecl());
if (E->isTypeOperand()) {
Record.AddTypeSourceInfo(E->getTypeOperandSourceInfo());
Code = serialization::EXPR_CXX_UUIDOF_TYPE;
@@ -2132,6 +2234,7 @@ void ASTStmtWriter::VisitOMPParallelDirective(OMPParallelDirective *D) {
VisitStmt(D);
Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_PARALLEL_DIRECTIVE;
}
@@ -2143,6 +2246,7 @@ void ASTStmtWriter::VisitOMPSimdDirective(OMPSimdDirective *D) {
void ASTStmtWriter::VisitOMPForDirective(OMPForDirective *D) {
VisitOMPLoopDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_FOR_DIRECTIVE;
}
@@ -2156,6 +2260,7 @@ void ASTStmtWriter::VisitOMPSectionsDirective(OMPSectionsDirective *D) {
VisitStmt(D);
Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_SECTIONS_DIRECTIVE;
}
@@ -2190,6 +2295,7 @@ void ASTStmtWriter::VisitOMPCriticalDirective(OMPCriticalDirective *D) {
void ASTStmtWriter::VisitOMPParallelForDirective(OMPParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_PARALLEL_FOR_DIRECTIVE;
}
@@ -2205,6 +2311,7 @@ void ASTStmtWriter::VisitOMPParallelMasterDirective(
VisitStmt(D);
Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Code = serialization::STMT_OMP_PARALLEL_MASTER_DIRECTIVE;
}
@@ -2213,6 +2320,7 @@ void ASTStmtWriter::VisitOMPParallelSectionsDirective(
VisitStmt(D);
Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_PARALLEL_SECTIONS_DIRECTIVE;
}
@@ -2273,12 +2381,15 @@ void ASTStmtWriter::VisitOMPTargetParallelDirective(
VisitStmt(D);
Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
+ Record.writeBool(D->hasCancel());
Code = serialization::STMT_OMP_TARGET_PARALLEL_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPTargetParallelForDirective(
OMPTargetParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_TARGET_PARALLEL_FOR_DIRECTIVE;
}
@@ -2316,6 +2427,20 @@ void ASTStmtWriter::VisitOMPFlushDirective(OMPFlushDirective *D) {
Code = serialization::STMT_OMP_FLUSH_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPDepobjDirective(OMPDepobjDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_DEPOBJ_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPScanDirective(OMPScanDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_SCAN_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPOrderedDirective(OMPOrderedDirective *D) {
VisitStmt(D);
Record.push_back(D->getNumClauses());
@@ -2348,6 +2473,7 @@ void ASTStmtWriter::VisitOMPCancelDirective(OMPCancelDirective *D) {
void ASTStmtWriter::VisitOMPTaskLoopDirective(OMPTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_TASKLOOP_DIRECTIVE;
}
@@ -2359,6 +2485,7 @@ void ASTStmtWriter::VisitOMPTaskLoopSimdDirective(OMPTaskLoopSimdDirective *D) {
void ASTStmtWriter::VisitOMPMasterTaskLoopDirective(
OMPMasterTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_MASTER_TASKLOOP_DIRECTIVE;
}
@@ -2371,6 +2498,7 @@ void ASTStmtWriter::VisitOMPMasterTaskLoopSimdDirective(
void ASTStmtWriter::VisitOMPParallelMasterTaskLoopDirective(
OMPParallelMasterTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_PARALLEL_MASTER_TASKLOOP_DIRECTIVE;
}
@@ -2395,6 +2523,7 @@ void ASTStmtWriter::VisitOMPTargetUpdateDirective(OMPTargetUpdateDirective *D) {
void ASTStmtWriter::VisitOMPDistributeParallelForDirective(
OMPDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE;
}
@@ -2443,6 +2572,7 @@ void ASTStmtWriter::VisitOMPTeamsDistributeParallelForSimdDirective(
void ASTStmtWriter::VisitOMPTeamsDistributeParallelForDirective(
OMPTeamsDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE;
}
@@ -2463,6 +2593,7 @@ void ASTStmtWriter::VisitOMPTargetTeamsDistributeDirective(
void ASTStmtWriter::VisitOMPTargetTeamsDistributeParallelForDirective(
OMPTargetTeamsDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE;
}
diff --git a/contrib/llvm-project/clang/lib/Serialization/GeneratePCH.cpp b/contrib/llvm-project/clang/lib/Serialization/GeneratePCH.cpp
index 002233e49bb0..d869796b82c1 100644
--- a/contrib/llvm-project/clang/lib/Serialization/GeneratePCH.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/GeneratePCH.cpp
@@ -57,6 +57,11 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
}
}
+ // Errors that do not prevent the PCH from being written should not cause the
+ // overall compilation to fail either.
+ if (AllowASTWithErrors)
+ PP.getDiagnostics().getClient()->clear();
+
// Emit the PCH file to the Buffer.
assert(SemaPtr && "No Sema?");
Buffer->Signature =
diff --git a/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp b/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp
index 462d29c2a0f1..9192b3b476bb 100644
--- a/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp
@@ -321,7 +321,7 @@ bool GlobalModuleIndex::lookupIdentifier(StringRef Name, HitSet &Hits) {
= *static_cast<IdentifierIndexTable *>(IdentifierIndex);
IdentifierIndexTable::iterator Known = Table.find(Name);
if (Known == Table.end()) {
- return true;
+ return false;
}
SmallVector<unsigned, 2> ModuleIDs = *Known;
@@ -643,10 +643,10 @@ llvm::Error GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
// Skip the stored signature.
// FIXME: we could read the signature out of the import and validate it.
- ASTFileSignature StoredSignature = {
- {{(uint32_t)Record[Idx++], (uint32_t)Record[Idx++],
- (uint32_t)Record[Idx++], (uint32_t)Record[Idx++],
- (uint32_t)Record[Idx++]}}};
+ auto FirstSignatureByte = Record.begin() + Idx;
+ ASTFileSignature StoredSignature = ASTFileSignature::create(
+ FirstSignatureByte, FirstSignatureByte + ASTFileSignature::size);
+ Idx += ASTFileSignature::size;
// Skip the module name (currently this is only used for prebuilt
// modules while here we are only dealing with cached).
@@ -704,9 +704,8 @@ llvm::Error GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
// Get Signature.
if (State == DiagnosticOptionsBlock && Code == SIGNATURE)
- getModuleFileInfo(File).Signature = {
- {{(uint32_t)Record[0], (uint32_t)Record[1], (uint32_t)Record[2],
- (uint32_t)Record[3], (uint32_t)Record[4]}}};
+ getModuleFileInfo(File).Signature = ASTFileSignature::create(
+ Record.begin(), Record.begin() + ASTFileSignature::size);
// We don't care about this record.
}
diff --git a/contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp b/contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp
index daef502cdcb5..a42ed2f3c179 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp
@@ -185,7 +185,14 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
Buf = llvm::MemoryBuffer::getSTDIN();
} else {
// Get a buffer of the file and close the file descriptor when done.
- Buf = FileMgr.getBufferForFile(NewModule->File, /*isVolatile=*/false);
+ // The file is volatile because in a parallel build we expect multiple
+ // compiler processes to use the same module file rebuilding it if needed.
+ //
+ // RequiresNullTerminator is false because module files don't need it, and
+ // this allows the file to still be mmapped.
+ Buf = FileMgr.getBufferForFile(NewModule->File,
+ /*IsVolatile=*/true,
+ /*RequiresNullTerminator=*/false);
}
if (!Buf) {
@@ -439,7 +446,7 @@ bool ModuleManager::lookupModuleFile(StringRef FileName,
// Open the file immediately to ensure there is no race between stat'ing and
// opening the file.
- auto FileOrErr = FileMgr.getFile(FileName, /*OpenFile=*/true,
+ auto FileOrErr = FileMgr.getFile(FileName, /*OpenFile=*/true,
/*CacheFailure=*/false);
if (!FileOrErr) {
File = nullptr;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
index 2ef50a727ece..0e8cbc60689a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
@@ -13,13 +13,14 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Analysis/CFGStmtMap.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/Support/ErrorHandling.h"
using namespace clang;
using namespace ento;
@@ -27,24 +28,20 @@ using namespace ento;
namespace {
class AnalysisOrderChecker
- : public Checker<check::PreStmt<CastExpr>,
- check::PostStmt<CastExpr>,
- check::PreStmt<ArraySubscriptExpr>,
- check::PostStmt<ArraySubscriptExpr>,
- check::PreStmt<CXXNewExpr>,
- check::PostStmt<CXXNewExpr>,
- check::PreStmt<OffsetOfExpr>,
- check::PostStmt<OffsetOfExpr>,
- check::PreCall,
- check::PostCall,
- check::EndFunction,
- check::NewAllocator,
- check::Bind,
- check::PointerEscape,
- check::RegionChanges,
- check::LiveSymbols> {
-
- bool isCallbackEnabled(AnalyzerOptions &Opts, StringRef CallbackName) const {
+ : public Checker<
+ check::PreStmt<CastExpr>, check::PostStmt<CastExpr>,
+ check::PreStmt<ArraySubscriptExpr>,
+ check::PostStmt<ArraySubscriptExpr>, check::PreStmt<CXXNewExpr>,
+ check::PostStmt<CXXNewExpr>, check::PreStmt<CXXDeleteExpr>,
+ check::PostStmt<CXXDeleteExpr>, check::PreStmt<CXXConstructExpr>,
+ check::PostStmt<CXXConstructExpr>, check::PreStmt<OffsetOfExpr>,
+ check::PostStmt<OffsetOfExpr>, check::PreCall, check::PostCall,
+ check::EndFunction, check::EndAnalysis, check::NewAllocator,
+ check::Bind, check::PointerEscape, check::RegionChanges,
+ check::LiveSymbols, eval::Call> {
+
+ bool isCallbackEnabled(const AnalyzerOptions &Opts,
+ StringRef CallbackName) const {
return Opts.getCheckerBooleanOption(this, "*") ||
Opts.getCheckerBooleanOption(this, CallbackName);
}
@@ -95,6 +92,26 @@ public:
llvm::errs() << "PostStmt<CXXNewExpr>\n";
}
+ void checkPreStmt(const CXXDeleteExpr *NE, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PreStmtCXXDeleteExpr"))
+ llvm::errs() << "PreStmt<CXXDeleteExpr>\n";
+ }
+
+ void checkPostStmt(const CXXDeleteExpr *NE, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PostStmtCXXDeleteExpr"))
+ llvm::errs() << "PostStmt<CXXDeleteExpr>\n";
+ }
+
+ void checkPreStmt(const CXXConstructExpr *NE, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PreStmtCXXConstructExpr"))
+ llvm::errs() << "PreStmt<CXXConstructExpr>\n";
+ }
+
+ void checkPostStmt(const CXXConstructExpr *NE, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PostStmtCXXConstructExpr"))
+ llvm::errs() << "PostStmt<CXXConstructExpr>\n";
+ }
+
void checkPreStmt(const OffsetOfExpr *OOE, CheckerContext &C) const {
if (isCallbackEnabled(C, "PreStmtOffsetOfExpr"))
llvm::errs() << "PreStmt<OffsetOfExpr>\n";
@@ -105,11 +122,25 @@ public:
llvm::errs() << "PostStmt<OffsetOfExpr>\n";
}
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "EvalCall")) {
+ llvm::errs() << "EvalCall";
+ if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(Call.getDecl()))
+ llvm::errs() << " (" << ND->getQualifiedNameAsString() << ')';
+ llvm::errs() << " {argno: " << Call.getNumArgs() << '}';
+ llvm::errs() << " [" << Call.getKindAsString() << ']';
+ llvm::errs() << '\n';
+ return true;
+ }
+ return false;
+ }
+
void checkPreCall(const CallEvent &Call, CheckerContext &C) const {
if (isCallbackEnabled(C, "PreCall")) {
llvm::errs() << "PreCall";
if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(Call.getDecl()))
llvm::errs() << " (" << ND->getQualifiedNameAsString() << ')';
+ llvm::errs() << " [" << Call.getKindAsString() << ']';
llvm::errs() << '\n';
}
}
@@ -119,6 +150,7 @@ public:
llvm::errs() << "PostCall";
if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(Call.getDecl()))
llvm::errs() << " (" << ND->getQualifiedNameAsString() << ')';
+ llvm::errs() << " [" << Call.getKindAsString() << ']';
llvm::errs() << '\n';
}
}
@@ -140,7 +172,13 @@ public:
}
}
- void checkNewAllocator(const CXXNewExpr *CNE, SVal Target,
+ void checkEndAnalysis(ExplodedGraph &G, BugReporter &BR,
+ ExprEngine &Eng) const {
+ if (isCallbackEnabled(BR.getAnalyzerOptions(), "EndAnalysis"))
+ llvm::errs() << "EndAnalysis\n";
+ }
+
+ void checkNewAllocator(const CXXAllocatorCall &Call,
CheckerContext &C) const {
if (isCallbackEnabled(C, "NewAllocator"))
llvm::errs() << "NewAllocator\n";
@@ -186,6 +224,6 @@ void ento::registerAnalysisOrderChecker(CheckerManager &mgr) {
mgr.registerChecker<AnalysisOrderChecker>();
}
-bool ento::shouldRegisterAnalysisOrderChecker(const LangOptions &LO) {
+bool ento::shouldRegisterAnalysisOrderChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
index 20f3008b4a4b..c06604b6cffe 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
@@ -103,7 +103,7 @@ void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
NumBlocksUnreachable += unreachable;
NumBlocks += total;
- std::string NameOfRootFunction = output.str();
+ std::string NameOfRootFunction = std::string(output.str());
output << " -> Total CFGBlocks: " << total << " | Unreachable CFGBlocks: "
<< unreachable << " | Exhausted Block: "
@@ -140,6 +140,6 @@ void ento::registerAnalyzerStatsChecker(CheckerManager &mgr) {
mgr.registerChecker<AnalyzerStatsChecker>();
}
-bool ento::shouldRegisterAnalyzerStatsChecker(const LangOptions &LO) {
+bool ento::shouldRegisterAnalyzerStatsChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
index 8d4793e0802f..59163c1f31fa 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
@@ -16,6 +16,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
using namespace clang;
@@ -54,12 +55,11 @@ void ArrayBoundChecker::checkLocation(SVal l, bool isLoad, const Stmt* LoadS,
ProgramStateRef state = C.getState();
// Get the size of the array.
- DefinedOrUnknownSVal NumElements
- = C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
- ER->getValueType());
+ DefinedOrUnknownSVal ElementCount = getDynamicElementCount(
+ state, ER->getSuperRegion(), C.getSValBuilder(), ER->getValueType());
- ProgramStateRef StInBound = state->assumeInBound(Idx, NumElements, true);
- ProgramStateRef StOutBound = state->assumeInBound(Idx, NumElements, false);
+ ProgramStateRef StInBound = state->assumeInBound(Idx, ElementCount, true);
+ ProgramStateRef StOutBound = state->assumeInBound(Idx, ElementCount, false);
if (StOutBound && !StInBound) {
ExplodedNode *N = C.generateErrorNode(StOutBound);
if (!N)
@@ -92,6 +92,6 @@ void ento::registerArrayBoundChecker(CheckerManager &mgr) {
mgr.registerChecker<ArrayBoundChecker>();
}
-bool ento::shouldRegisterArrayBoundChecker(const LangOptions &LO) {
+bool ento::shouldRegisterArrayBoundChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
index 8f3bf138cae4..7c264bba4b6a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -12,13 +12,14 @@
//===----------------------------------------------------------------------===//
#include "Taint.h"
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/CharUnits.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
@@ -175,24 +176,23 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
}
do {
- // CHECK UPPER BOUND: Is byteOffset >= extent(baseRegion)? If so,
+ // CHECK UPPER BOUND: Is byteOffset >= size(baseRegion)? If so,
// we are doing a load/store after the last valid offset.
- DefinedOrUnknownSVal extentVal =
- rawOffset.getRegion()->getExtent(svalBuilder);
- if (!extentVal.getAs<NonLoc>())
+ const MemRegion *MR = rawOffset.getRegion();
+ DefinedOrUnknownSVal Size = getDynamicSize(state, MR, svalBuilder);
+ if (!Size.getAs<NonLoc>())
break;
- if (extentVal.getAs<nonloc::ConcreteInt>()) {
+ if (Size.getAs<nonloc::ConcreteInt>()) {
std::pair<NonLoc, nonloc::ConcreteInt> simplifiedOffsets =
getSimplifiedOffsets(rawOffset.getByteOffset(),
- extentVal.castAs<nonloc::ConcreteInt>(),
- svalBuilder);
+ Size.castAs<nonloc::ConcreteInt>(), svalBuilder);
rawOffsetVal = simplifiedOffsets.first;
- extentVal = simplifiedOffsets.second;
+ Size = simplifiedOffsets.second;
}
SVal upperbound = svalBuilder.evalBinOpNN(state, BO_GE, rawOffsetVal,
- extentVal.castAs<NonLoc>(),
+ Size.castAs<NonLoc>(),
svalBuilder.getConditionType());
Optional<NonLoc> upperboundToCheck = upperbound.getAs<NonLoc>();
@@ -356,6 +356,6 @@ void ento::registerArrayBoundCheckerV2(CheckerManager &mgr) {
mgr.registerChecker<ArrayBoundCheckerV2>();
}
-bool ento::shouldRegisterArrayBoundCheckerV2(const LangOptions &LO) {
+bool ento::shouldRegisterArrayBoundCheckerV2(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index 325952fe4ed4..918c6e361381 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -1243,7 +1243,7 @@ void ento::registerNilArgChecker(CheckerManager &mgr) {
mgr.registerChecker<NilArgChecker>();
}
-bool ento::shouldRegisterNilArgChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNilArgChecker(const CheckerManager &mgr) {
return true;
}
@@ -1251,7 +1251,7 @@ void ento::registerCFNumberChecker(CheckerManager &mgr) {
mgr.registerChecker<CFNumberChecker>();
}
-bool ento::shouldRegisterCFNumberChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCFNumberChecker(const CheckerManager &mgr) {
return true;
}
@@ -1259,7 +1259,7 @@ void ento::registerCFRetainReleaseChecker(CheckerManager &mgr) {
mgr.registerChecker<CFRetainReleaseChecker>();
}
-bool ento::shouldRegisterCFRetainReleaseChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCFRetainReleaseChecker(const CheckerManager &mgr) {
return true;
}
@@ -1267,7 +1267,7 @@ void ento::registerClassReleaseChecker(CheckerManager &mgr) {
mgr.registerChecker<ClassReleaseChecker>();
}
-bool ento::shouldRegisterClassReleaseChecker(const LangOptions &LO) {
+bool ento::shouldRegisterClassReleaseChecker(const CheckerManager &mgr) {
return true;
}
@@ -1275,7 +1275,7 @@ void ento::registerVariadicMethodTypeChecker(CheckerManager &mgr) {
mgr.registerChecker<VariadicMethodTypeChecker>();
}
-bool ento::shouldRegisterVariadicMethodTypeChecker(const LangOptions &LO) {
+bool ento::shouldRegisterVariadicMethodTypeChecker(const CheckerManager &mgr) {
return true;
}
@@ -1283,7 +1283,7 @@ void ento::registerObjCLoopChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCLoopChecker>();
}
-bool ento::shouldRegisterObjCLoopChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCLoopChecker(const CheckerManager &mgr) {
return true;
}
@@ -1291,6 +1291,6 @@ void ento::registerObjCNonNilReturnValueChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCNonNilReturnValueChecker>();
}
-bool ento::shouldRegisterObjCNonNilReturnValueChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCNonNilReturnValueChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
index 0eb3c3d1d0e6..2752b37f9b3f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
@@ -184,6 +184,6 @@ void ento::registerBlockInCriticalSectionChecker(CheckerManager &mgr) {
mgr.registerChecker<BlockInCriticalSectionChecker>();
}
-bool ento::shouldRegisterBlockInCriticalSectionChecker(const LangOptions &LO) {
+bool ento::shouldRegisterBlockInCriticalSectionChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
index 1423b9c39b26..6c0caf3c4e78 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
@@ -70,8 +70,8 @@ void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
// Get the value of the right-hand side. We only care about values
// that are defined (UnknownVals and UndefinedVals are handled by other
// checkers).
- Optional<DefinedSVal> DV = val.getAs<DefinedSVal>();
- if (!DV)
+ Optional<NonLoc> NV = val.getAs<NonLoc>();
+ if (!NV)
return;
// Check if the assigned value meets our criteria for correctness. It must
@@ -79,84 +79,23 @@ void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
// the value is possibly < 0 (for a negative value) or greater than 1.
ProgramStateRef state = C.getState();
SValBuilder &svalBuilder = C.getSValBuilder();
+ BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
ConstraintManager &CM = C.getConstraintManager();
- // First, ensure that the value is >= 0.
- DefinedSVal zeroVal = svalBuilder.makeIntVal(0, valTy);
- SVal greaterThanOrEqualToZeroVal =
- svalBuilder.evalBinOp(state, BO_GE, *DV, zeroVal,
- svalBuilder.getConditionType());
+ llvm::APSInt Zero = BVF.getValue(0, valTy);
+ llvm::APSInt One = BVF.getValue(1, valTy);
- Optional<DefinedSVal> greaterThanEqualToZero =
- greaterThanOrEqualToZeroVal.getAs<DefinedSVal>();
+ ProgramStateRef StIn, StOut;
+ std::tie(StIn, StOut) = CM.assumeInclusiveRangeDual(state, *NV, Zero, One);
- if (!greaterThanEqualToZero) {
- // The SValBuilder cannot construct a valid SVal for this condition.
- // This means we cannot properly reason about it.
- return;
- }
-
- ProgramStateRef stateLT, stateGE;
- std::tie(stateGE, stateLT) = CM.assumeDual(state, *greaterThanEqualToZero);
-
- // Is it possible for the value to be less than zero?
- if (stateLT) {
- // It is possible for the value to be less than zero. We only
- // want to emit a warning, however, if that value is fully constrained.
- // If it it possible for the value to be >= 0, then essentially the
- // value is underconstrained and there is nothing left to be done.
- if (!stateGE)
- emitReport(stateLT, C);
-
- // In either case, we are done.
- return;
- }
-
- // If we reach here, it must be the case that the value is constrained
- // to only be >= 0.
- assert(stateGE == state);
-
- // At this point we know that the value is >= 0.
- // Now check to ensure that the value is <= 1.
- DefinedSVal OneVal = svalBuilder.makeIntVal(1, valTy);
- SVal lessThanEqToOneVal =
- svalBuilder.evalBinOp(state, BO_LE, *DV, OneVal,
- svalBuilder.getConditionType());
-
- Optional<DefinedSVal> lessThanEqToOne =
- lessThanEqToOneVal.getAs<DefinedSVal>();
-
- if (!lessThanEqToOne) {
- // The SValBuilder cannot construct a valid SVal for this condition.
- // This means we cannot properly reason about it.
- return;
- }
-
- ProgramStateRef stateGT, stateLE;
- std::tie(stateLE, stateGT) = CM.assumeDual(state, *lessThanEqToOne);
-
- // Is it possible for the value to be greater than one?
- if (stateGT) {
- // It is possible for the value to be greater than one. We only
- // want to emit a warning, however, if that value is fully constrained.
- // If it is possible for the value to be <= 1, then essentially the
- // value is underconstrained and there is nothing left to be done.
- if (!stateLE)
- emitReport(stateGT, C);
-
- // In either case, we are done.
- return;
- }
-
- // If we reach here, it must be the case that the value is constrained
- // to only be <= 1.
- assert(stateLE == state);
+ if (!StIn)
+ emitReport(StOut, C);
}
void ento::registerBoolAssignmentChecker(CheckerManager &mgr) {
mgr.registerChecker<BoolAssignmentChecker>();
}
-bool ento::shouldRegisterBoolAssignmentChecker(const LangOptions &LO) {
+bool ento::shouldRegisterBoolAssignmentChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
index 10594e331cbe..233ce57c3ac9 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -10,12 +10,13 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/Basic/Builtins.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
using namespace clang;
using namespace ento;
@@ -63,10 +64,12 @@ bool BuiltinFunctionChecker::evalCall(const CallEvent &Call,
case Builtin::BI__builtin_unpredictable:
case Builtin::BI__builtin_expect:
+ case Builtin::BI__builtin_expect_with_probability:
case Builtin::BI__builtin_assume_aligned:
case Builtin::BI__builtin_addressof: {
- // For __builtin_unpredictable, __builtin_expect, and
- // __builtin_assume_aligned, just return the value of the subexpression.
+ // For __builtin_unpredictable, __builtin_expect,
+ // __builtin_expect_with_probability and __builtin_assume_aligned,
+ // just return the value of the subexpression.
// __builtin_addressof is going from a reference to a pointer, but those
// are represented the same way in the analyzer.
assert (Call.getNumArgs() > 0);
@@ -90,10 +93,10 @@ bool BuiltinFunctionChecker::evalCall(const CallEvent &Call,
return true; // Return true to model purity.
SValBuilder& svalBuilder = C.getSValBuilder();
- DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
- DefinedOrUnknownSVal extentMatchesSizeArg =
- svalBuilder.evalEQ(state, Extent, Size.castAs<DefinedOrUnknownSVal>());
- state = state->assume(extentMatchesSizeArg, true);
+ DefinedOrUnknownSVal DynSize = getDynamicSize(state, R, svalBuilder);
+ DefinedOrUnknownSVal DynSizeMatchesSizeArg =
+ svalBuilder.evalEQ(state, DynSize, Size.castAs<DefinedOrUnknownSVal>());
+ state = state->assume(DynSizeMatchesSizeArg, true);
assert(state && "The region should not have any previous constraints");
C.addTransition(state->BindExpr(CE, LCtx, loc::MemRegionVal(R)));
@@ -134,6 +137,6 @@ void ento::registerBuiltinFunctionChecker(CheckerManager &mgr) {
mgr.registerChecker<BuiltinFunctionChecker>();
}
-bool ento::shouldRegisterBuiltinFunctionChecker(const LangOptions &LO) {
+bool ento::shouldRegisterBuiltinFunctionChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 21c4bbc60264..30fd62f887c4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -11,23 +11,66 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "InterCheckerAPI.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace ento;
namespace {
+struct AnyArgExpr {
+ // FIXME: Remove constructor in C++17 to turn it into an aggregate.
+ AnyArgExpr(const Expr *Expression, unsigned ArgumentIndex)
+ : Expression{Expression}, ArgumentIndex{ArgumentIndex} {}
+ const Expr *Expression;
+ unsigned ArgumentIndex;
+};
+
+struct SourceArgExpr : AnyArgExpr {
+ using AnyArgExpr::AnyArgExpr; // FIXME: Remove using in C++17.
+};
+
+struct DestinationArgExpr : AnyArgExpr {
+ using AnyArgExpr::AnyArgExpr; // FIXME: Same.
+};
+
+struct SizeArgExpr : AnyArgExpr {
+ using AnyArgExpr::AnyArgExpr; // FIXME: Same.
+};
+
+using ErrorMessage = SmallString<128>;
+enum class AccessKind { write, read };
+
+static ErrorMessage createOutOfBoundErrorMsg(StringRef FunctionDescription,
+ AccessKind Access) {
+ ErrorMessage Message;
+ llvm::raw_svector_ostream Os(Message);
+
+ // Function classification like: Memory copy function
+ Os << toUppercase(FunctionDescription.front())
+ << &FunctionDescription.data()[1];
+
+ if (Access == AccessKind::write) {
+ Os << " overflows the destination buffer";
+ } else { // read access
+ Os << " accesses out-of-bound array element";
+ }
+
+ return Message;
+}
+
enum class ConcatFnKind { none = 0, strcat = 1, strlcat = 2 };
class CStringChecker : public Checker< eval::Call,
check::PreStmt<DeclStmt>,
@@ -111,12 +154,9 @@ public:
void evalMemmove(CheckerContext &C, const CallExpr *CE) const;
void evalBcopy(CheckerContext &C, const CallExpr *CE) const;
void evalCopyCommon(CheckerContext &C, const CallExpr *CE,
- ProgramStateRef state,
- const Expr *Size,
- const Expr *Source,
- const Expr *Dest,
- bool Restricted = false,
- bool IsMempcpy = false) const;
+ ProgramStateRef state, SizeArgExpr Size,
+ DestinationArgExpr Dest, SourceArgExpr Source,
+ bool Restricted, bool IsMempcpy) const;
void evalMemcmp(CheckerContext &C, const CallExpr *CE) const;
@@ -193,40 +233,17 @@ public:
ProgramStateRef &State);
// Re-usable checks
- ProgramStateRef checkNonNull(CheckerContext &C,
- ProgramStateRef state,
- const Expr *S,
- SVal l,
- unsigned IdxOfArg) const;
- ProgramStateRef CheckLocation(CheckerContext &C,
- ProgramStateRef state,
- const Expr *S,
- SVal l,
- const char *message = nullptr) const;
- ProgramStateRef CheckBufferAccess(CheckerContext &C,
- ProgramStateRef state,
- const Expr *Size,
- const Expr *FirstBuf,
- const Expr *SecondBuf,
- const char *firstMessage = nullptr,
- const char *secondMessage = nullptr,
- bool WarnAboutSize = false) const;
-
- ProgramStateRef CheckBufferAccess(CheckerContext &C,
- ProgramStateRef state,
- const Expr *Size,
- const Expr *Buf,
- const char *message = nullptr,
- bool WarnAboutSize = false) const {
- // This is a convenience overload.
- return CheckBufferAccess(C, state, Size, Buf, nullptr, message, nullptr,
- WarnAboutSize);
- }
- ProgramStateRef CheckOverlap(CheckerContext &C,
- ProgramStateRef state,
- const Expr *Size,
- const Expr *First,
- const Expr *Second) const;
+ ProgramStateRef checkNonNull(CheckerContext &C, ProgramStateRef State,
+ AnyArgExpr Arg, SVal l) const;
+ ProgramStateRef CheckLocation(CheckerContext &C, ProgramStateRef state,
+ AnyArgExpr Buffer, SVal Element,
+ AccessKind Access) const;
+ ProgramStateRef CheckBufferAccess(CheckerContext &C, ProgramStateRef State,
+ AnyArgExpr Buffer, SizeArgExpr Size,
+ AccessKind Access) const;
+ ProgramStateRef CheckOverlap(CheckerContext &C, ProgramStateRef state,
+ SizeArgExpr Size, AnyArgExpr First,
+ AnyArgExpr Second) const;
void emitOverlapBug(CheckerContext &C,
ProgramStateRef state,
const Stmt *First,
@@ -275,26 +292,26 @@ CStringChecker::assumeZero(CheckerContext &C, ProgramStateRef state, SVal V,
}
ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
- ProgramStateRef state,
- const Expr *S, SVal l,
- unsigned IdxOfArg) const {
+ ProgramStateRef State,
+ AnyArgExpr Arg, SVal l) const {
// If a previous check has failed, propagate the failure.
- if (!state)
+ if (!State)
return nullptr;
ProgramStateRef stateNull, stateNonNull;
- std::tie(stateNull, stateNonNull) = assumeZero(C, state, l, S->getType());
+ std::tie(stateNull, stateNonNull) =
+ assumeZero(C, State, l, Arg.Expression->getType());
if (stateNull && !stateNonNull) {
if (Filter.CheckCStringNullArg) {
SmallString<80> buf;
llvm::raw_svector_ostream OS(buf);
assert(CurrentFunctionDescription);
- OS << "Null pointer passed as " << IdxOfArg
- << llvm::getOrdinalSuffix(IdxOfArg) << " argument to "
+ OS << "Null pointer passed as " << (Arg.ArgumentIndex + 1)
+ << llvm::getOrdinalSuffix(Arg.ArgumentIndex + 1) << " argument to "
<< CurrentFunctionDescription;
- emitNullArgBug(C, stateNull, S, OS.str());
+ emitNullArgBug(C, stateNull, Arg.Expression, OS.str());
}
return nullptr;
}
@@ -306,19 +323,20 @@ ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
// FIXME: This was originally copied from ArrayBoundChecker.cpp. Refactor?
ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
- ProgramStateRef state,
- const Expr *S, SVal l,
- const char *warningMsg) const {
+ ProgramStateRef state,
+ AnyArgExpr Buffer, SVal Element,
+ AccessKind Access) const {
+
// If a previous check has failed, propagate the failure.
if (!state)
return nullptr;
// Check for out of bound array element access.
- const MemRegion *R = l.getAsRegion();
+ const MemRegion *R = Element.getAsRegion();
if (!R)
return state;
- const ElementRegion *ER = dyn_cast<ElementRegion>(R);
+ const auto *ER = dyn_cast<ElementRegion>(R);
if (!ER)
return state;
@@ -326,11 +344,9 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
return state;
// Get the size of the array.
- const SubRegion *superReg = cast<SubRegion>(ER->getSuperRegion());
- SValBuilder &svalBuilder = C.getSValBuilder();
- SVal Extent =
- svalBuilder.convertToArrayIndex(superReg->getExtent(svalBuilder));
- DefinedOrUnknownSVal Size = Extent.castAs<DefinedOrUnknownSVal>();
+ const auto *superReg = cast<SubRegion>(ER->getSuperRegion());
+ DefinedOrUnknownSVal Size =
+ getDynamicSize(state, superReg, C.getSValBuilder());
// Get the index of the accessed element.
DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
@@ -343,20 +359,11 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
// In the latter case we only do modeling but do not emit warning.
if (!Filter.CheckCStringOutOfBounds)
return nullptr;
- // Emit a bug report.
- if (warningMsg) {
- emitOutOfBoundsBug(C, StOutBound, S, warningMsg);
- } else {
- assert(CurrentFunctionDescription);
- assert(CurrentFunctionDescription[0] != '\0');
- SmallString<80> buf;
- llvm::raw_svector_ostream os(buf);
- os << toUppercase(CurrentFunctionDescription[0])
- << &CurrentFunctionDescription[1]
- << " accesses out-of-bound array element";
- emitOutOfBoundsBug(C, StOutBound, S, os.str());
- }
+ // Emit a bug report.
+ ErrorMessage Message =
+ createOutOfBoundErrorMsg(CurrentFunctionDescription, Access);
+ emitOutOfBoundsBug(C, StOutBound, Buffer.Expression, Message);
return nullptr;
}
@@ -366,89 +373,68 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
}
ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
- ProgramStateRef state,
- const Expr *Size,
- const Expr *FirstBuf,
- const Expr *SecondBuf,
- const char *firstMessage,
- const char *secondMessage,
- bool WarnAboutSize) const {
+ ProgramStateRef State,
+ AnyArgExpr Buffer,
+ SizeArgExpr Size,
+ AccessKind Access) const {
// If a previous check has failed, propagate the failure.
- if (!state)
+ if (!State)
return nullptr;
SValBuilder &svalBuilder = C.getSValBuilder();
ASTContext &Ctx = svalBuilder.getContext();
- const LocationContext *LCtx = C.getLocationContext();
- QualType sizeTy = Size->getType();
+ QualType SizeTy = Size.Expression->getType();
QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
// Check that the first buffer is non-null.
- SVal BufVal = C.getSVal(FirstBuf);
- state = checkNonNull(C, state, FirstBuf, BufVal, 1);
- if (!state)
+ SVal BufVal = C.getSVal(Buffer.Expression);
+ State = checkNonNull(C, State, Buffer, BufVal);
+ if (!State)
return nullptr;
// If out-of-bounds checking is turned off, skip the rest.
if (!Filter.CheckCStringOutOfBounds)
- return state;
+ return State;
// Get the access length and make sure it is known.
// FIXME: This assumes the caller has already checked that the access length
// is positive. And that it's unsigned.
- SVal LengthVal = C.getSVal(Size);
+ SVal LengthVal = C.getSVal(Size.Expression);
Optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
if (!Length)
- return state;
+ return State;
// Compute the offset of the last element to be accessed: size-1.
- NonLoc One = svalBuilder.makeIntVal(1, sizeTy).castAs<NonLoc>();
- SVal Offset = svalBuilder.evalBinOpNN(state, BO_Sub, *Length, One, sizeTy);
+ NonLoc One = svalBuilder.makeIntVal(1, SizeTy).castAs<NonLoc>();
+ SVal Offset = svalBuilder.evalBinOpNN(State, BO_Sub, *Length, One, SizeTy);
if (Offset.isUnknown())
return nullptr;
NonLoc LastOffset = Offset.castAs<NonLoc>();
// Check that the first buffer is sufficiently long.
- SVal BufStart = svalBuilder.evalCast(BufVal, PtrTy, FirstBuf->getType());
+ SVal BufStart =
+ svalBuilder.evalCast(BufVal, PtrTy, Buffer.Expression->getType());
if (Optional<Loc> BufLoc = BufStart.getAs<Loc>()) {
- const Expr *warningExpr = (WarnAboutSize ? Size : FirstBuf);
- SVal BufEnd = svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc,
- LastOffset, PtrTy);
- state = CheckLocation(C, state, warningExpr, BufEnd, firstMessage);
+ SVal BufEnd =
+ svalBuilder.evalBinOpLN(State, BO_Add, *BufLoc, LastOffset, PtrTy);
- // If the buffer isn't large enough, abort.
- if (!state)
- return nullptr;
- }
+ State = CheckLocation(C, State, Buffer, BufEnd, Access);
- // If there's a second buffer, check it as well.
- if (SecondBuf) {
- BufVal = state->getSVal(SecondBuf, LCtx);
- state = checkNonNull(C, state, SecondBuf, BufVal, 2);
- if (!state)
+ // If the buffer isn't large enough, abort.
+ if (!State)
return nullptr;
-
- BufStart = svalBuilder.evalCast(BufVal, PtrTy, SecondBuf->getType());
- if (Optional<Loc> BufLoc = BufStart.getAs<Loc>()) {
- const Expr *warningExpr = (WarnAboutSize ? Size : SecondBuf);
-
- SVal BufEnd = svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc,
- LastOffset, PtrTy);
- state = CheckLocation(C, state, warningExpr, BufEnd, secondMessage);
- }
}
// Large enough or not, return this state!
- return state;
+ return State;
}
ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
- ProgramStateRef state,
- const Expr *Size,
- const Expr *First,
- const Expr *Second) const {
+ ProgramStateRef state,
+ SizeArgExpr Size, AnyArgExpr First,
+ AnyArgExpr Second) const {
if (!Filter.CheckCStringBufferOverlap)
return state;
@@ -464,8 +450,8 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
// Get the buffer values and make sure they're known locations.
const LocationContext *LCtx = C.getLocationContext();
- SVal firstVal = state->getSVal(First, LCtx);
- SVal secondVal = state->getSVal(Second, LCtx);
+ SVal firstVal = state->getSVal(First.Expression, LCtx);
+ SVal secondVal = state->getSVal(Second.Expression, LCtx);
Optional<Loc> firstLoc = firstVal.getAs<Loc>();
if (!firstLoc)
@@ -478,11 +464,11 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
// Are the two values the same?
SValBuilder &svalBuilder = C.getSValBuilder();
std::tie(stateTrue, stateFalse) =
- state->assume(svalBuilder.evalEQ(state, *firstLoc, *secondLoc));
+ state->assume(svalBuilder.evalEQ(state, *firstLoc, *secondLoc));
if (stateTrue && !stateFalse) {
// If the values are known to be equal, that's automatically an overlap.
- emitOverlapBug(C, stateTrue, First, Second);
+ emitOverlapBug(C, stateTrue, First.Expression, Second.Expression);
return nullptr;
}
@@ -492,8 +478,8 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
// Which value comes first?
QualType cmpTy = svalBuilder.getConditionType();
- SVal reverse = svalBuilder.evalBinOpLL(state, BO_GT,
- *firstLoc, *secondLoc, cmpTy);
+ SVal reverse =
+ svalBuilder.evalBinOpLL(state, BO_GT, *firstLoc, *secondLoc, cmpTy);
Optional<DefinedOrUnknownSVal> reverseTest =
reverse.getAs<DefinedOrUnknownSVal>();
if (!reverseTest)
@@ -514,7 +500,7 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
}
// Get the length, and make sure it too is known.
- SVal LengthVal = state->getSVal(Size, LCtx);
+ SVal LengthVal = state->getSVal(Size.Expression, LCtx);
Optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
if (!Length)
return state;
@@ -523,22 +509,22 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
// Bail out if the cast fails.
ASTContext &Ctx = svalBuilder.getContext();
QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
- SVal FirstStart = svalBuilder.evalCast(*firstLoc, CharPtrTy,
- First->getType());
+ SVal FirstStart =
+ svalBuilder.evalCast(*firstLoc, CharPtrTy, First.Expression->getType());
Optional<Loc> FirstStartLoc = FirstStart.getAs<Loc>();
if (!FirstStartLoc)
return state;
// Compute the end of the first buffer. Bail out if THAT fails.
- SVal FirstEnd = svalBuilder.evalBinOpLN(state, BO_Add,
- *FirstStartLoc, *Length, CharPtrTy);
+ SVal FirstEnd = svalBuilder.evalBinOpLN(state, BO_Add, *FirstStartLoc,
+ *Length, CharPtrTy);
Optional<Loc> FirstEndLoc = FirstEnd.getAs<Loc>();
if (!FirstEndLoc)
return state;
// Is the end of the first buffer past the start of the second buffer?
- SVal Overlap = svalBuilder.evalBinOpLL(state, BO_GT,
- *FirstEndLoc, *secondLoc, cmpTy);
+ SVal Overlap =
+ svalBuilder.evalBinOpLL(state, BO_GT, *FirstEndLoc, *secondLoc, cmpTy);
Optional<DefinedOrUnknownSVal> OverlapTest =
Overlap.getAs<DefinedOrUnknownSVal>();
if (!OverlapTest)
@@ -548,7 +534,7 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
if (stateTrue && !stateFalse) {
// Overlap!
- emitOverlapBug(C, stateTrue, First, Second);
+ emitOverlapBug(C, stateTrue, First.Expression, Second.Expression);
return nullptr;
}
@@ -723,7 +709,8 @@ ProgramStateRef CStringChecker::setCStringLength(ProgramStateRef state,
case MemRegion::SymbolicRegionKind:
case MemRegion::AllocaRegionKind:
- case MemRegion::VarRegionKind:
+ case MemRegion::NonParamVarRegionKind:
+ case MemRegion::ParamVarRegionKind:
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
// These are the types we can currently track string lengths for.
@@ -828,7 +815,8 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
}
case MemRegion::SymbolicRegionKind:
case MemRegion::AllocaRegionKind:
- case MemRegion::VarRegionKind:
+ case MemRegion::NonParamVarRegionKind:
+ case MemRegion::ParamVarRegionKind:
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
return getCStringLengthForRegion(C, state, Ex, MR, hypothetical);
@@ -935,14 +923,12 @@ bool CStringChecker::IsFirstBufInBound(CheckerContext &C,
// Get the size of the array.
const SubRegion *superReg = cast<SubRegion>(ER->getSuperRegion());
- SVal Extent =
- svalBuilder.convertToArrayIndex(superReg->getExtent(svalBuilder));
- DefinedOrUnknownSVal ExtentSize = Extent.castAs<DefinedOrUnknownSVal>();
+ DefinedOrUnknownSVal SizeDV = getDynamicSize(state, superReg, svalBuilder);
// Get the index of the accessed element.
DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
- ProgramStateRef StInBound = state->assumeInBound(Idx, ExtentSize, true);
+ ProgramStateRef StInBound = state->assumeInBound(Idx, SizeDV, true);
return static_cast<bool>(StInBound);
}
@@ -1025,10 +1011,14 @@ bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
os << "a C++ temp object of type "
<< cast<TypedValueRegion>(MR)->getValueType().getAsString();
return true;
- case MemRegion::VarRegionKind:
+ case MemRegion::NonParamVarRegionKind:
os << "a variable of type"
<< cast<TypedValueRegion>(MR)->getValueType().getAsString();
return true;
+ case MemRegion::ParamVarRegionKind:
+ os << "a parameter of type"
+ << cast<TypedValueRegion>(MR)->getValueType().getAsString();
+ return true;
case MemRegion::FieldRegionKind:
os << "a field of type "
<< cast<TypedValueRegion>(MR)->getValueType().getAsString();
@@ -1069,13 +1059,12 @@ bool CStringChecker::memsetAux(const Expr *DstBuffer, SVal CharVal,
// For now we can only handle the case of offset is 0 and concrete char value.
if (Offset.isValid() && !Offset.hasSymbolicOffset() &&
Offset.getOffset() == 0) {
- // Get the base region's extent.
- auto *SubReg = cast<SubRegion>(BR);
- DefinedOrUnknownSVal Extent = SubReg->getExtent(svalBuilder);
+ // Get the base region's size.
+ DefinedOrUnknownSVal SizeDV = getDynamicSize(State, BR, svalBuilder);
ProgramStateRef StateWholeReg, StateNotWholeReg;
std::tie(StateWholeReg, StateNotWholeReg) =
- State->assume(svalBuilder.evalEQ(State, Extent, *SizeNL));
+ State->assume(svalBuilder.evalEQ(State, SizeDV, *SizeNL));
// With the semantic of 'memset()', we should convert the CharVal to
// unsigned char.
@@ -1134,25 +1123,24 @@ bool CStringChecker::memsetAux(const Expr *DstBuffer, SVal CharVal,
// evaluation of individual function calls.
//===----------------------------------------------------------------------===//
-void CStringChecker::evalCopyCommon(CheckerContext &C,
- const CallExpr *CE,
- ProgramStateRef state,
- const Expr *Size, const Expr *Dest,
- const Expr *Source, bool Restricted,
+void CStringChecker::evalCopyCommon(CheckerContext &C, const CallExpr *CE,
+ ProgramStateRef state, SizeArgExpr Size,
+ DestinationArgExpr Dest,
+ SourceArgExpr Source, bool Restricted,
bool IsMempcpy) const {
CurrentFunctionDescription = "memory copy function";
// See if the size argument is zero.
const LocationContext *LCtx = C.getLocationContext();
- SVal sizeVal = state->getSVal(Size, LCtx);
- QualType sizeTy = Size->getType();
+ SVal sizeVal = state->getSVal(Size.Expression, LCtx);
+ QualType sizeTy = Size.Expression->getType();
ProgramStateRef stateZeroSize, stateNonZeroSize;
std::tie(stateZeroSize, stateNonZeroSize) =
- assumeZero(C, state, sizeVal, sizeTy);
+ assumeZero(C, state, sizeVal, sizeTy);
// Get the value of the Dest.
- SVal destVal = state->getSVal(Dest, LCtx);
+ SVal destVal = state->getSVal(Dest.Expression, LCtx);
// If the size is zero, there won't be any actual memory access, so
// just bind the return value to the destination buffer and return.
@@ -1168,24 +1156,23 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
// Ensure the destination is not null. If it is NULL there will be a
// NULL pointer dereference.
- state = checkNonNull(C, state, Dest, destVal, 1);
+ state = checkNonNull(C, state, Dest, destVal);
if (!state)
return;
// Get the value of the Src.
- SVal srcVal = state->getSVal(Source, LCtx);
+ SVal srcVal = state->getSVal(Source.Expression, LCtx);
// Ensure the source is not null. If it is NULL there will be a
// NULL pointer dereference.
- state = checkNonNull(C, state, Source, srcVal, 2);
+ state = checkNonNull(C, state, Source, srcVal);
if (!state)
return;
// Ensure the accesses are valid and that the buffers do not overlap.
- const char * const writeWarning =
- "Memory copy function overflows destination buffer";
- state = CheckBufferAccess(C, state, Size, Dest, Source,
- writeWarning, /* sourceWarning = */ nullptr);
+ state = CheckBufferAccess(C, state, Dest, Size, AccessKind::write);
+ state = CheckBufferAccess(C, state, Source, Size, AccessKind::read);
+
if (Restricted)
state = CheckOverlap(C, state, Size, Dest, Source);
@@ -1200,9 +1187,9 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
ASTContext &Ctx = SvalBuilder.getContext();
QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
SVal DestRegCharVal =
- SvalBuilder.evalCast(destVal, CharPtrTy, Dest->getType());
+ SvalBuilder.evalCast(destVal, CharPtrTy, Dest.Expression->getType());
SVal lastElement = C.getSValBuilder().evalBinOp(
- state, BO_Add, DestRegCharVal, sizeVal, Dest->getType());
+ state, BO_Add, DestRegCharVal, sizeVal, Dest.Expression->getType());
// If we don't know how much we copied, we can at least
// conjure a return value for later.
if (lastElement.isUnknown())
@@ -1223,120 +1210,136 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
// can use LazyCompoundVals to copy the source values into the destination.
// This would probably remove any existing bindings past the end of the
// copied region, but that's still an improvement over blank invalidation.
- state = InvalidateBuffer(C, state, Dest, C.getSVal(Dest),
- /*IsSourceBuffer*/false, Size);
+ state =
+ InvalidateBuffer(C, state, Dest.Expression, C.getSVal(Dest.Expression),
+ /*IsSourceBuffer*/ false, Size.Expression);
// Invalidate the source (const-invalidation without const-pointer-escaping
// the address of the top-level region).
- state = InvalidateBuffer(C, state, Source, C.getSVal(Source),
- /*IsSourceBuffer*/true, nullptr);
+ state = InvalidateBuffer(C, state, Source.Expression,
+ C.getSVal(Source.Expression),
+ /*IsSourceBuffer*/ true, nullptr);
C.addTransition(state);
}
}
-
void CStringChecker::evalMemcpy(CheckerContext &C, const CallExpr *CE) const {
// void *memcpy(void *restrict dst, const void *restrict src, size_t n);
// The return value is the address of the destination buffer.
- const Expr *Dest = CE->getArg(0);
- ProgramStateRef state = C.getState();
+ DestinationArgExpr Dest = {CE->getArg(0), 0};
+ SourceArgExpr Src = {CE->getArg(1), 1};
+ SizeArgExpr Size = {CE->getArg(2), 2};
+
+ ProgramStateRef State = C.getState();
- evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1), true);
+ constexpr bool IsRestricted = true;
+ constexpr bool IsMempcpy = false;
+ evalCopyCommon(C, CE, State, Size, Dest, Src, IsRestricted, IsMempcpy);
}
void CStringChecker::evalMempcpy(CheckerContext &C, const CallExpr *CE) const {
// void *mempcpy(void *restrict dst, const void *restrict src, size_t n);
// The return value is a pointer to the byte following the last written byte.
- const Expr *Dest = CE->getArg(0);
- ProgramStateRef state = C.getState();
+ DestinationArgExpr Dest = {CE->getArg(0), 0};
+ SourceArgExpr Src = {CE->getArg(1), 1};
+ SizeArgExpr Size = {CE->getArg(2), 2};
- evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1), true, true);
+ constexpr bool IsRestricted = true;
+ constexpr bool IsMempcpy = true;
+ evalCopyCommon(C, CE, C.getState(), Size, Dest, Src, IsRestricted, IsMempcpy);
}
void CStringChecker::evalMemmove(CheckerContext &C, const CallExpr *CE) const {
// void *memmove(void *dst, const void *src, size_t n);
// The return value is the address of the destination buffer.
- const Expr *Dest = CE->getArg(0);
- ProgramStateRef state = C.getState();
+ DestinationArgExpr Dest = {CE->getArg(0), 0};
+ SourceArgExpr Src = {CE->getArg(1), 1};
+ SizeArgExpr Size = {CE->getArg(2), 2};
- evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1));
+ constexpr bool IsRestricted = false;
+ constexpr bool IsMempcpy = false;
+ evalCopyCommon(C, CE, C.getState(), Size, Dest, Src, IsRestricted, IsMempcpy);
}
void CStringChecker::evalBcopy(CheckerContext &C, const CallExpr *CE) const {
// void bcopy(const void *src, void *dst, size_t n);
- evalCopyCommon(C, CE, C.getState(),
- CE->getArg(2), CE->getArg(1), CE->getArg(0));
+ SourceArgExpr Src(CE->getArg(0), 0);
+ DestinationArgExpr Dest = {CE->getArg(1), 1};
+ SizeArgExpr Size = {CE->getArg(2), 2};
+
+ constexpr bool IsRestricted = false;
+ constexpr bool IsMempcpy = false;
+ evalCopyCommon(C, CE, C.getState(), Size, Dest, Src, IsRestricted, IsMempcpy);
}
void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
// int memcmp(const void *s1, const void *s2, size_t n);
CurrentFunctionDescription = "memory comparison function";
- const Expr *Left = CE->getArg(0);
- const Expr *Right = CE->getArg(1);
- const Expr *Size = CE->getArg(2);
+ AnyArgExpr Left = {CE->getArg(0), 0};
+ AnyArgExpr Right = {CE->getArg(1), 1};
+ SizeArgExpr Size = {CE->getArg(2), 2};
- ProgramStateRef state = C.getState();
- SValBuilder &svalBuilder = C.getSValBuilder();
+ ProgramStateRef State = C.getState();
+ SValBuilder &Builder = C.getSValBuilder();
+ const LocationContext *LCtx = C.getLocationContext();
// See if the size argument is zero.
- const LocationContext *LCtx = C.getLocationContext();
- SVal sizeVal = state->getSVal(Size, LCtx);
- QualType sizeTy = Size->getType();
+ SVal sizeVal = State->getSVal(Size.Expression, LCtx);
+ QualType sizeTy = Size.Expression->getType();
ProgramStateRef stateZeroSize, stateNonZeroSize;
std::tie(stateZeroSize, stateNonZeroSize) =
- assumeZero(C, state, sizeVal, sizeTy);
+ assumeZero(C, State, sizeVal, sizeTy);
// If the size can be zero, the result will be 0 in that case, and we don't
// have to check either of the buffers.
if (stateZeroSize) {
- state = stateZeroSize;
- state = state->BindExpr(CE, LCtx,
- svalBuilder.makeZeroVal(CE->getType()));
- C.addTransition(state);
+ State = stateZeroSize;
+ State = State->BindExpr(CE, LCtx, Builder.makeZeroVal(CE->getType()));
+ C.addTransition(State);
}
// If the size can be nonzero, we have to check the other arguments.
if (stateNonZeroSize) {
- state = stateNonZeroSize;
+ State = stateNonZeroSize;
// If we know the two buffers are the same, we know the result is 0.
// First, get the two buffers' addresses. Another checker will have already
// made sure they're not undefined.
DefinedOrUnknownSVal LV =
- state->getSVal(Left, LCtx).castAs<DefinedOrUnknownSVal>();
+ State->getSVal(Left.Expression, LCtx).castAs<DefinedOrUnknownSVal>();
DefinedOrUnknownSVal RV =
- state->getSVal(Right, LCtx).castAs<DefinedOrUnknownSVal>();
+ State->getSVal(Right.Expression, LCtx).castAs<DefinedOrUnknownSVal>();
// See if they are the same.
- DefinedOrUnknownSVal SameBuf = svalBuilder.evalEQ(state, LV, RV);
- ProgramStateRef StSameBuf, StNotSameBuf;
- std::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
+ ProgramStateRef SameBuffer, NotSameBuffer;
+ std::tie(SameBuffer, NotSameBuffer) =
+ State->assume(Builder.evalEQ(State, LV, RV));
// If the two arguments are the same buffer, we know the result is 0,
// and we only need to check one size.
- if (StSameBuf && !StNotSameBuf) {
- state = StSameBuf;
- state = CheckBufferAccess(C, state, Size, Left);
- if (state) {
- state = StSameBuf->BindExpr(CE, LCtx,
- svalBuilder.makeZeroVal(CE->getType()));
- C.addTransition(state);
+ if (SameBuffer && !NotSameBuffer) {
+ State = SameBuffer;
+ State = CheckBufferAccess(C, State, Left, Size, AccessKind::read);
+ if (State) {
+ State =
+ SameBuffer->BindExpr(CE, LCtx, Builder.makeZeroVal(CE->getType()));
+ C.addTransition(State);
}
return;
}
// If the two arguments might be different buffers, we have to check
// the size of both of them.
- assert(StNotSameBuf);
- state = CheckBufferAccess(C, state, Size, Left, Right);
- if (state) {
+ assert(NotSameBuffer);
+ State = CheckBufferAccess(C, State, Right, Size, AccessKind::read);
+ State = CheckBufferAccess(C, State, Left, Size, AccessKind::read);
+ if (State) {
// The return value is the comparison result, which we don't know.
- SVal CmpV =
- svalBuilder.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount());
- state = state->BindExpr(CE, LCtx, CmpV);
- C.addTransition(state);
+ SVal CmpV = Builder.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount());
+ State = State->BindExpr(CE, LCtx, CmpV);
+ C.addTransition(State);
}
}
}
@@ -1384,15 +1387,14 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
}
// Check that the string argument is non-null.
- const Expr *Arg = CE->getArg(0);
- SVal ArgVal = state->getSVal(Arg, LCtx);
-
- state = checkNonNull(C, state, Arg, ArgVal, 1);
+ AnyArgExpr Arg = {CE->getArg(0), 0};
+ SVal ArgVal = state->getSVal(Arg.Expression, LCtx);
+ state = checkNonNull(C, state, Arg, ArgVal);
if (!state)
return;
- SVal strLength = getCStringLength(C, state, Arg, ArgVal);
+ SVal strLength = getCStringLength(C, state, Arg.Expression, ArgVal);
// If the argument isn't a valid C string, there's no valid state to
// transition to.
@@ -1540,30 +1542,30 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
CurrentFunctionDescription = "string copy function";
else
CurrentFunctionDescription = "string concatenation function";
+
ProgramStateRef state = C.getState();
const LocationContext *LCtx = C.getLocationContext();
// Check that the destination is non-null.
- const Expr *Dst = CE->getArg(0);
- SVal DstVal = state->getSVal(Dst, LCtx);
-
- state = checkNonNull(C, state, Dst, DstVal, 1);
+ DestinationArgExpr Dst = {CE->getArg(0), 0};
+ SVal DstVal = state->getSVal(Dst.Expression, LCtx);
+ state = checkNonNull(C, state, Dst, DstVal);
if (!state)
return;
// Check that the source is non-null.
- const Expr *srcExpr = CE->getArg(1);
- SVal srcVal = state->getSVal(srcExpr, LCtx);
- state = checkNonNull(C, state, srcExpr, srcVal, 2);
+ SourceArgExpr srcExpr = {CE->getArg(1), 1};
+ SVal srcVal = state->getSVal(srcExpr.Expression, LCtx);
+ state = checkNonNull(C, state, srcExpr, srcVal);
if (!state)
return;
// Get the string length of the source.
- SVal strLength = getCStringLength(C, state, srcExpr, srcVal);
+ SVal strLength = getCStringLength(C, state, srcExpr.Expression, srcVal);
Optional<NonLoc> strLengthNL = strLength.getAs<NonLoc>();
// Get the string length of the destination buffer.
- SVal dstStrLength = getCStringLength(C, state, Dst, DstVal);
+ SVal dstStrLength = getCStringLength(C, state, Dst.Expression, DstVal);
Optional<NonLoc> dstStrLengthNL = dstStrLength.getAs<NonLoc>();
// If the source isn't a valid C string, give up.
@@ -1581,8 +1583,13 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
SVal maxLastElementIndex = UnknownVal();
const char *boundWarning = nullptr;
- state = CheckOverlap(C, state, IsBounded ? CE->getArg(2) : CE->getArg(1), Dst,
- srcExpr);
+ // FIXME: Why do we choose the srcExpr if the access has no size?
+ // Note that the 3rd argument of the call would be the size parameter.
+ SizeArgExpr SrcExprAsSizeDummy = {srcExpr.Expression, srcExpr.ArgumentIndex};
+ state = CheckOverlap(
+ C, state,
+ (IsBounded ? SizeArgExpr{CE->getArg(2), 2} : SrcExprAsSizeDummy), Dst,
+ srcExpr);
if (!state)
return;
@@ -1590,11 +1597,12 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// If the function is strncpy, strncat, etc... it is bounded.
if (IsBounded) {
// Get the max number of characters to copy.
- const Expr *lenExpr = CE->getArg(2);
- SVal lenVal = state->getSVal(lenExpr, LCtx);
+ SizeArgExpr lenExpr = {CE->getArg(2), 2};
+ SVal lenVal = state->getSVal(lenExpr.Expression, LCtx);
// Protect against misdeclared strncpy().
- lenVal = svalBuilder.evalCast(lenVal, sizeTy, lenExpr->getType());
+ lenVal =
+ svalBuilder.evalCast(lenVal, sizeTy, lenExpr.Expression->getType());
Optional<NonLoc> lenValNL = lenVal.getAs<NonLoc>();
@@ -1837,19 +1845,17 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// record the new string length.
if (Optional<loc::MemRegionVal> dstRegVal =
DstVal.getAs<loc::MemRegionVal>()) {
- QualType ptrTy = Dst->getType();
+ QualType ptrTy = Dst.Expression->getType();
// If we have an exact value on a bounded copy, use that to check for
// overflows, rather than our estimate about how much is actually copied.
- if (boundWarning) {
- if (Optional<NonLoc> maxLastNL = maxLastElementIndex.getAs<NonLoc>()) {
- SVal maxLastElement = svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal,
- *maxLastNL, ptrTy);
- state = CheckLocation(C, state, CE->getArg(2), maxLastElement,
- boundWarning);
- if (!state)
- return;
- }
+ if (Optional<NonLoc> maxLastNL = maxLastElementIndex.getAs<NonLoc>()) {
+ SVal maxLastElement =
+ svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal, *maxLastNL, ptrTy);
+
+ state = CheckLocation(C, state, Dst, maxLastElement, AccessKind::write);
+ if (!state)
+ return;
}
// Then, if the final length is known...
@@ -1859,9 +1865,7 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// ...and we haven't checked the bound, we'll check the actual copy.
if (!boundWarning) {
- const char * const warningMsg =
- "String copy function overflows destination buffer";
- state = CheckLocation(C, state, Dst, lastElement, warningMsg);
+ state = CheckLocation(C, state, Dst, lastElement, AccessKind::write);
if (!state)
return;
}
@@ -1878,13 +1882,13 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// can use LazyCompoundVals to copy the source values into the destination.
// This would probably remove any existing bindings past the end of the
// string, but that's still an improvement over blank invalidation.
- state = InvalidateBuffer(C, state, Dst, *dstRegVal,
- /*IsSourceBuffer*/false, nullptr);
+ state = InvalidateBuffer(C, state, Dst.Expression, *dstRegVal,
+ /*IsSourceBuffer*/ false, nullptr);
// Invalidate the source (const-invalidation without const-pointer-escaping
// the address of the top-level region).
- state = InvalidateBuffer(C, state, srcExpr, srcVal, /*IsSourceBuffer*/true,
- nullptr);
+ state = InvalidateBuffer(C, state, srcExpr.Expression, srcVal,
+ /*IsSourceBuffer*/ true, nullptr);
// Set the C string length of the destination, if we know it.
if (IsBounded && (appendK == ConcatFnKind::none)) {
@@ -1941,34 +1945,34 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
const LocationContext *LCtx = C.getLocationContext();
// Check that the first string is non-null
- const Expr *s1 = CE->getArg(0);
- SVal s1Val = state->getSVal(s1, LCtx);
- state = checkNonNull(C, state, s1, s1Val, 1);
+ AnyArgExpr Left = {CE->getArg(0), 0};
+ SVal LeftVal = state->getSVal(Left.Expression, LCtx);
+ state = checkNonNull(C, state, Left, LeftVal);
if (!state)
return;
// Check that the second string is non-null.
- const Expr *s2 = CE->getArg(1);
- SVal s2Val = state->getSVal(s2, LCtx);
- state = checkNonNull(C, state, s2, s2Val, 2);
+ AnyArgExpr Right = {CE->getArg(1), 1};
+ SVal RightVal = state->getSVal(Right.Expression, LCtx);
+ state = checkNonNull(C, state, Right, RightVal);
if (!state)
return;
// Get the string length of the first string or give up.
- SVal s1Length = getCStringLength(C, state, s1, s1Val);
- if (s1Length.isUndef())
+ SVal LeftLength = getCStringLength(C, state, Left.Expression, LeftVal);
+ if (LeftLength.isUndef())
return;
// Get the string length of the second string or give up.
- SVal s2Length = getCStringLength(C, state, s2, s2Val);
- if (s2Length.isUndef())
+ SVal RightLength = getCStringLength(C, state, Right.Expression, RightVal);
+ if (RightLength.isUndef())
return;
// If we know the two buffers are the same, we know the result is 0.
// First, get the two buffers' addresses. Another checker will have already
// made sure they're not undefined.
- DefinedOrUnknownSVal LV = s1Val.castAs<DefinedOrUnknownSVal>();
- DefinedOrUnknownSVal RV = s2Val.castAs<DefinedOrUnknownSVal>();
+ DefinedOrUnknownSVal LV = LeftVal.castAs<DefinedOrUnknownSVal>();
+ DefinedOrUnknownSVal RV = RightVal.castAs<DefinedOrUnknownSVal>();
// See if they are the same.
SValBuilder &svalBuilder = C.getSValBuilder();
@@ -1995,15 +1999,17 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
// For now, we only do this if they're both known string literals.
// Attempt to extract string literals from both expressions.
- const StringLiteral *s1StrLiteral = getCStringLiteral(C, state, s1, s1Val);
- const StringLiteral *s2StrLiteral = getCStringLiteral(C, state, s2, s2Val);
+ const StringLiteral *LeftStrLiteral =
+ getCStringLiteral(C, state, Left.Expression, LeftVal);
+ const StringLiteral *RightStrLiteral =
+ getCStringLiteral(C, state, Right.Expression, RightVal);
bool canComputeResult = false;
SVal resultVal = svalBuilder.conjureSymbolVal(nullptr, CE, LCtx,
C.blockCount());
- if (s1StrLiteral && s2StrLiteral) {
- StringRef s1StrRef = s1StrLiteral->getString();
- StringRef s2StrRef = s2StrLiteral->getString();
+ if (LeftStrLiteral && RightStrLiteral) {
+ StringRef LeftStrRef = LeftStrLiteral->getString();
+ StringRef RightStrRef = RightStrLiteral->getString();
if (IsBounded) {
// Get the max number of characters to compare.
@@ -2013,8 +2019,8 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
// If the length is known, we can get the right substrings.
if (const llvm::APSInt *len = svalBuilder.getKnownValue(state, lenVal)) {
// Create substrings of each to compare the prefix.
- s1StrRef = s1StrRef.substr(0, (size_t)len->getZExtValue());
- s2StrRef = s2StrRef.substr(0, (size_t)len->getZExtValue());
+ LeftStrRef = LeftStrRef.substr(0, (size_t)len->getZExtValue());
+ RightStrRef = RightStrRef.substr(0, (size_t)len->getZExtValue());
canComputeResult = true;
}
} else {
@@ -2024,17 +2030,17 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
if (canComputeResult) {
// Real strcmp stops at null characters.
- size_t s1Term = s1StrRef.find('\0');
+ size_t s1Term = LeftStrRef.find('\0');
if (s1Term != StringRef::npos)
- s1StrRef = s1StrRef.substr(0, s1Term);
+ LeftStrRef = LeftStrRef.substr(0, s1Term);
- size_t s2Term = s2StrRef.find('\0');
+ size_t s2Term = RightStrRef.find('\0');
if (s2Term != StringRef::npos)
- s2StrRef = s2StrRef.substr(0, s2Term);
+ RightStrRef = RightStrRef.substr(0, s2Term);
// Use StringRef's comparison methods to compute the actual result.
- int compareRes = IgnoreCase ? s1StrRef.compare_lower(s2StrRef)
- : s1StrRef.compare(s2StrRef);
+ int compareRes = IgnoreCase ? LeftStrRef.compare_lower(RightStrRef)
+ : LeftStrRef.compare(RightStrRef);
// The strcmp function returns an integer greater than, equal to, or less
// than zero, [c11, p7.24.4.2].
@@ -2064,8 +2070,9 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
//char *strsep(char **stringp, const char *delim);
// Sanity: does the search string parameter match the return type?
- const Expr *SearchStrPtr = CE->getArg(0);
- QualType CharPtrTy = SearchStrPtr->getType()->getPointeeType();
+ SourceArgExpr SearchStrPtr = {CE->getArg(0), 0};
+
+ QualType CharPtrTy = SearchStrPtr.Expression->getType()->getPointeeType();
if (CharPtrTy.isNull() ||
CE->getType().getUnqualifiedType() != CharPtrTy.getUnqualifiedType())
return;
@@ -2076,15 +2083,15 @@ void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
// Check that the search string pointer is non-null (though it may point to
// a null string).
- SVal SearchStrVal = State->getSVal(SearchStrPtr, LCtx);
- State = checkNonNull(C, State, SearchStrPtr, SearchStrVal, 1);
+ SVal SearchStrVal = State->getSVal(SearchStrPtr.Expression, LCtx);
+ State = checkNonNull(C, State, SearchStrPtr, SearchStrVal);
if (!State)
return;
// Check that the delimiter string is non-null.
- const Expr *DelimStr = CE->getArg(1);
- SVal DelimStrVal = State->getSVal(DelimStr, LCtx);
- State = checkNonNull(C, State, DelimStr, DelimStrVal, 2);
+ AnyArgExpr DelimStr = {CE->getArg(1), 1};
+ SVal DelimStrVal = State->getSVal(DelimStr.Expression, LCtx);
+ State = checkNonNull(C, State, DelimStr, DelimStrVal);
if (!State)
return;
@@ -2096,8 +2103,8 @@ void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
// Invalidate the search string, representing the change of one delimiter
// character to NUL.
- State = InvalidateBuffer(C, State, SearchStrPtr, Result,
- /*IsSourceBuffer*/false, nullptr);
+ State = InvalidateBuffer(C, State, SearchStrPtr.Expression, Result,
+ /*IsSourceBuffer*/ false, nullptr);
// Overwrite the search string pointer. The new value is either an address
// further along in the same string, or NULL if there are no more tokens.
@@ -2158,65 +2165,67 @@ void CStringChecker::evalStdCopyCommon(CheckerContext &C,
}
void CStringChecker::evalMemset(CheckerContext &C, const CallExpr *CE) const {
+ // void *memset(void *s, int c, size_t n);
CurrentFunctionDescription = "memory set function";
- const Expr *Mem = CE->getArg(0);
- const Expr *CharE = CE->getArg(1);
- const Expr *Size = CE->getArg(2);
+ DestinationArgExpr Buffer = {CE->getArg(0), 0};
+ AnyArgExpr CharE = {CE->getArg(1), 1};
+ SizeArgExpr Size = {CE->getArg(2), 2};
+
ProgramStateRef State = C.getState();
// See if the size argument is zero.
const LocationContext *LCtx = C.getLocationContext();
- SVal SizeVal = State->getSVal(Size, LCtx);
- QualType SizeTy = Size->getType();
+ SVal SizeVal = C.getSVal(Size.Expression);
+ QualType SizeTy = Size.Expression->getType();
- ProgramStateRef StateZeroSize, StateNonZeroSize;
- std::tie(StateZeroSize, StateNonZeroSize) =
- assumeZero(C, State, SizeVal, SizeTy);
+ ProgramStateRef ZeroSize, NonZeroSize;
+ std::tie(ZeroSize, NonZeroSize) = assumeZero(C, State, SizeVal, SizeTy);
// Get the value of the memory area.
- SVal MemVal = State->getSVal(Mem, LCtx);
+ SVal BufferPtrVal = C.getSVal(Buffer.Expression);
// If the size is zero, there won't be any actual memory access, so
- // just bind the return value to the Mem buffer and return.
- if (StateZeroSize && !StateNonZeroSize) {
- StateZeroSize = StateZeroSize->BindExpr(CE, LCtx, MemVal);
- C.addTransition(StateZeroSize);
+ // just bind the return value to the buffer and return.
+ if (ZeroSize && !NonZeroSize) {
+ ZeroSize = ZeroSize->BindExpr(CE, LCtx, BufferPtrVal);
+ C.addTransition(ZeroSize);
return;
}
// Ensure the memory area is not null.
// If it is NULL there will be a NULL pointer dereference.
- State = checkNonNull(C, StateNonZeroSize, Mem, MemVal, 1);
+ State = checkNonNull(C, NonZeroSize, Buffer, BufferPtrVal);
if (!State)
return;
- State = CheckBufferAccess(C, State, Size, Mem);
+ State = CheckBufferAccess(C, State, Buffer, Size, AccessKind::write);
if (!State)
return;
// According to the values of the arguments, bind the value of the second
// argument to the destination buffer and set string length, or just
// invalidate the destination buffer.
- if (!memsetAux(Mem, C.getSVal(CharE), Size, C, State))
+ if (!memsetAux(Buffer.Expression, C.getSVal(CharE.Expression),
+ Size.Expression, C, State))
return;
- State = State->BindExpr(CE, LCtx, MemVal);
+ State = State->BindExpr(CE, LCtx, BufferPtrVal);
C.addTransition(State);
}
void CStringChecker::evalBzero(CheckerContext &C, const CallExpr *CE) const {
CurrentFunctionDescription = "memory clearance function";
- const Expr *Mem = CE->getArg(0);
- const Expr *Size = CE->getArg(1);
+ DestinationArgExpr Buffer = {CE->getArg(0), 0};
+ SizeArgExpr Size = {CE->getArg(1), 1};
SVal Zero = C.getSValBuilder().makeZeroVal(C.getASTContext().IntTy);
ProgramStateRef State = C.getState();
// See if the size argument is zero.
- SVal SizeVal = C.getSVal(Size);
- QualType SizeTy = Size->getType();
+ SVal SizeVal = C.getSVal(Size.Expression);
+ QualType SizeTy = Size.Expression->getType();
ProgramStateRef StateZeroSize, StateNonZeroSize;
std::tie(StateZeroSize, StateNonZeroSize) =
@@ -2230,19 +2239,19 @@ void CStringChecker::evalBzero(CheckerContext &C, const CallExpr *CE) const {
}
// Get the value of the memory area.
- SVal MemVal = C.getSVal(Mem);
+ SVal MemVal = C.getSVal(Buffer.Expression);
// Ensure the memory area is not null.
// If it is NULL there will be a NULL pointer dereference.
- State = checkNonNull(C, StateNonZeroSize, Mem, MemVal, 1);
+ State = checkNonNull(C, StateNonZeroSize, Buffer, MemVal);
if (!State)
return;
- State = CheckBufferAccess(C, State, Size, Mem);
+ State = CheckBufferAccess(C, State, Buffer, Size, AccessKind::write);
if (!State)
return;
- if (!memsetAux(Mem, Zero, Size, C, State))
+ if (!memsetAux(Buffer.Expression, Zero, Size.Expression, C, State))
return;
C.addTransition(State);
@@ -2434,7 +2443,7 @@ void ento::registerCStringModeling(CheckerManager &Mgr) {
Mgr.registerChecker<CStringChecker>();
}
-bool ento::shouldRegisterCStringModeling(const LangOptions &LO) {
+bool ento::shouldRegisterCStringModeling(const CheckerManager &mgr) {
return true;
}
@@ -2445,7 +2454,7 @@ bool ento::shouldRegisterCStringModeling(const LangOptions &LO) {
checker->Filter.CheckName##name = mgr.getCurrentCheckerName(); \
} \
\
- bool ento::shouldRegister##name(const LangOptions &LO) { return true; }
+ bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
REGISTER_CHECKER(CStringNullArg)
REGISTER_CHECKER(CStringOutOfBounds)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
index d84fcc69a492..888724f7ea3b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
@@ -291,6 +291,6 @@ void ento::registerCStringSyntaxChecker(CheckerManager &mgr) {
mgr.registerChecker<CStringSyntaxChecker>();
}
-bool ento::shouldRegisterCStringSyntaxChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCStringSyntaxChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp
index 01f5b9c889e3..24776338ce10 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp
@@ -53,21 +53,21 @@ void CXXSelfAssignmentChecker::checkBeginFunction(CheckerContext &C) const {
ProgramStateRef SelfAssignState = State->bindLoc(Param, ThisVal, LCtx);
const NoteTag *SelfAssignTag =
- C.getNoteTag([MD](BugReport &BR) -> std::string {
+ C.getNoteTag([MD](PathSensitiveBugReport &BR) -> std::string {
SmallString<256> Msg;
llvm::raw_svector_ostream Out(Msg);
Out << "Assuming " << MD->getParamDecl(0)->getName() << " == *this";
- return Out.str();
+ return std::string(Out.str());
});
C.addTransition(SelfAssignState, SelfAssignTag);
ProgramStateRef NonSelfAssignState = State->bindLoc(Param, ParamVal, LCtx);
const NoteTag *NonSelfAssignTag =
- C.getNoteTag([MD](BugReport &BR) -> std::string {
+ C.getNoteTag([MD](PathSensitiveBugReport &BR) -> std::string {
SmallString<256> Msg;
llvm::raw_svector_ostream Out(Msg);
Out << "Assuming " << MD->getParamDecl(0)->getName() << " != *this";
- return Out.str();
+ return std::string(Out.str());
});
C.addTransition(NonSelfAssignState, NonSelfAssignTag);
}
@@ -76,6 +76,6 @@ void ento::registerCXXSelfAssignmentChecker(CheckerManager &Mgr) {
Mgr.registerChecker<CXXSelfAssignmentChecker>();
}
-bool ento::shouldRegisterCXXSelfAssignmentChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCXXSelfAssignmentChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index 2fcb765cd4ee..3e46e2372516 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -11,9 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/ParentMap.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -21,6 +22,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -29,11 +31,8 @@ using namespace ento;
namespace {
class CallAndMessageChecker
- : public Checker< check::PreStmt<CallExpr>,
- check::PreStmt<CXXDeleteExpr>,
- check::PreObjCMessage,
- check::ObjCMessageNil,
- check::PreCall > {
+ : public Checker<check::PreObjCMessage, check::ObjCMessageNil,
+ check::PreCall> {
mutable std::unique_ptr<BugType> BT_call_null;
mutable std::unique_ptr<BugType> BT_call_undef;
mutable std::unique_ptr<BugType> BT_cxx_call_null;
@@ -48,11 +47,37 @@ class CallAndMessageChecker
mutable std::unique_ptr<BugType> BT_call_few_args;
public:
- DefaultBool Check_CallAndMessageUnInitRefArg;
- CheckerNameRef CheckName_CallAndMessageUnInitRefArg;
+ // These correspond with the checker options. Looking at other checkers such
+ // as MallocChecker and CStringChecker, this is similar as to how they pull
+ // off having a modeling class, but emitting diagnostics under a smaller
+ // checker's name that can be safely disabled without disturbing the
+ // underlaying modeling engine.
+ // The reason behind having *checker options* rather then actual *checkers*
+ // here is that CallAndMessage is among the oldest checkers out there, and can
+ // be responsible for the majority of the reports on any given project. This
+ // is obviously not ideal, but changing checker name has the consequence of
+ // changing the issue hashes associated with the reports, and databases
+ // relying on this (CodeChecker, for instance) would suffer greatly.
+ // If we ever end up making changes to the issue hash generation algorithm, or
+ // the warning messages here, we should totally jump on the opportunity to
+ // convert these to actual checkers.
+ enum CheckKind {
+ CK_FunctionPointer,
+ CK_ParameterCount,
+ CK_CXXThisMethodCall,
+ CK_CXXDeallocationArg,
+ CK_ArgInitializedness,
+ CK_ArgPointeeInitializedness,
+ CK_NilReceiver,
+ CK_UndefReceiver,
+ CK_NumCheckKinds
+ };
+
+ DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ // The original core.CallAndMessage checker name. This should rather be an
+ // array, as seen in MallocChecker and CStringChecker.
+ CheckerNameRef OriginalName;
- void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
- void checkPreStmt(const CXXDeleteExpr *DE, CheckerContext &C) const;
void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
/// Fill in the return value that results from messaging nil based on the
@@ -62,6 +87,25 @@ public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ ProgramStateRef checkFunctionPointerCall(const CallExpr *CE,
+ CheckerContext &C,
+ ProgramStateRef State) const;
+
+ ProgramStateRef checkCXXMethodCall(const CXXInstanceCall *CC,
+ CheckerContext &C,
+ ProgramStateRef State) const;
+
+ ProgramStateRef checkParameterCount(const CallEvent &Call, CheckerContext &C,
+ ProgramStateRef State) const;
+
+ ProgramStateRef checkCXXDeallocation(const CXXDeallocatorCall *DC,
+ CheckerContext &C,
+ ProgramStateRef State) const;
+
+ ProgramStateRef checkArgInitializedness(const CallEvent &Call,
+ CheckerContext &C,
+ ProgramStateRef State) const;
+
private:
bool PreVisitProcessArg(CheckerContext &C, SVal V, SourceRange ArgRange,
const Expr *ArgEx, int ArgumentNumber,
@@ -79,7 +123,7 @@ private:
void LazyInit_BT(const char *desc, std::unique_ptr<BugType> &BT) const {
if (!BT)
- BT.reset(new BuiltinBug(this, desc));
+ BT.reset(new BuiltinBug(OriginalName, desc));
}
bool uninitRefOrPointer(CheckerContext &C, const SVal &V,
SourceRange ArgRange, const Expr *ArgEx,
@@ -144,7 +188,10 @@ bool CallAndMessageChecker::uninitRefOrPointer(
CheckerContext &C, const SVal &V, SourceRange ArgRange, const Expr *ArgEx,
std::unique_ptr<BugType> &BT, const ParmVarDecl *ParamDecl, const char *BD,
int ArgumentNumber) const {
- if (!Check_CallAndMessageUnInitRefArg)
+
+ // The pointee being uninitialized is a sign of code smell, not a bug, no need
+ // to sink here.
+ if (!ChecksEnabled[CK_ArgPointeeInitializedness])
return false;
// No parameter declaration available, i.e. variadic function argument.
@@ -246,6 +293,10 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
return true;
if (V.isUndef()) {
+ if (!ChecksEnabled[CK_ArgInitializedness]) {
+ C.addSink();
+ return true;
+ }
if (ExplodedNode *N = C.generateErrorNode()) {
LazyInit_BT(BD, BT);
// Generate a report for this bug.
@@ -272,6 +323,10 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
D->getStore());
if (F.Find(D->getRegion())) {
+ if (!ChecksEnabled[CK_ArgInitializedness]) {
+ C.addSink();
+ return true;
+ }
if (ExplodedNode *N = C.generateErrorNode()) {
LazyInit_BT(BD, BT);
SmallString<512> Str;
@@ -311,126 +366,158 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
return false;
}
-void CallAndMessageChecker::checkPreStmt(const CallExpr *CE,
- CheckerContext &C) const{
+ProgramStateRef CallAndMessageChecker::checkFunctionPointerCall(
+ const CallExpr *CE, CheckerContext &C, ProgramStateRef State) const {
const Expr *Callee = CE->getCallee()->IgnoreParens();
- ProgramStateRef State = C.getState();
const LocationContext *LCtx = C.getLocationContext();
SVal L = State->getSVal(Callee, LCtx);
if (L.isUndef()) {
+ if (!ChecksEnabled[CK_FunctionPointer]) {
+ C.addSink(State);
+ return nullptr;
+ }
if (!BT_call_undef)
BT_call_undef.reset(new BuiltinBug(
- this, "Called function pointer is an uninitialized pointer value"));
+ OriginalName,
+ "Called function pointer is an uninitialized pointer value"));
emitBadCall(BT_call_undef.get(), C, Callee);
- return;
+ return nullptr;
}
ProgramStateRef StNonNull, StNull;
std::tie(StNonNull, StNull) = State->assume(L.castAs<DefinedOrUnknownSVal>());
if (StNull && !StNonNull) {
+ if (!ChecksEnabled[CK_FunctionPointer]) {
+ C.addSink(StNull);
+ return nullptr;
+ }
if (!BT_call_null)
BT_call_null.reset(new BuiltinBug(
- this, "Called function pointer is null (null dereference)"));
+ OriginalName, "Called function pointer is null (null dereference)"));
emitBadCall(BT_call_null.get(), C, Callee);
- return;
+ return nullptr;
}
- C.addTransition(StNonNull);
+ return StNonNull;
}
-void CallAndMessageChecker::checkPreStmt(const CXXDeleteExpr *DE,
- CheckerContext &C) const {
+ProgramStateRef CallAndMessageChecker::checkParameterCount(
+ const CallEvent &Call, CheckerContext &C, ProgramStateRef State) const {
- SVal Arg = C.getSVal(DE->getArgument());
- if (Arg.isUndef()) {
- StringRef Desc;
- ExplodedNode *N = C.generateErrorNode();
- if (!N)
- return;
- if (!BT_cxx_delete_undef)
- BT_cxx_delete_undef.reset(
- new BuiltinBug(this, "Uninitialized argument value"));
- if (DE->isArrayFormAsWritten())
- Desc = "Argument to 'delete[]' is uninitialized";
- else
- Desc = "Argument to 'delete' is uninitialized";
- BugType *BT = BT_cxx_delete_undef.get();
- auto R = std::make_unique<PathSensitiveBugReport>(*BT, Desc, N);
- bugreporter::trackExpressionValue(N, DE, *R);
- C.emitReport(std::move(R));
- return;
+ // If we have a function or block declaration, we can make sure we pass
+ // enough parameters.
+ unsigned Params = Call.parameters().size();
+ if (Call.getNumArgs() >= Params)
+ return State;
+
+ if (!ChecksEnabled[CK_ParameterCount]) {
+ C.addSink(State);
+ return nullptr;
+ }
+
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return nullptr;
+
+ LazyInit_BT("Function call with too few arguments", BT_call_few_args);
+
+ SmallString<512> Str;
+ llvm::raw_svector_ostream os(Str);
+ if (isa<AnyFunctionCall>(Call)) {
+ os << "Function ";
+ } else {
+ assert(isa<BlockCall>(Call));
+ os << "Block ";
}
+ os << "taking " << Params << " argument" << (Params == 1 ? "" : "s")
+ << " is called with fewer (" << Call.getNumArgs() << ")";
+
+ C.emitReport(
+ std::make_unique<PathSensitiveBugReport>(*BT_call_few_args, os.str(), N));
+ return nullptr;
}
-void CallAndMessageChecker::checkPreCall(const CallEvent &Call,
- CheckerContext &C) const {
- ProgramStateRef State = C.getState();
+ProgramStateRef CallAndMessageChecker::checkCXXMethodCall(
+ const CXXInstanceCall *CC, CheckerContext &C, ProgramStateRef State) const {
- // If this is a call to a C++ method, check if the callee is null or
- // undefined.
- if (const CXXInstanceCall *CC = dyn_cast<CXXInstanceCall>(&Call)) {
- SVal V = CC->getCXXThisVal();
- if (V.isUndef()) {
- if (!BT_cxx_call_undef)
- BT_cxx_call_undef.reset(
- new BuiltinBug(this, "Called C++ object pointer is uninitialized"));
- emitBadCall(BT_cxx_call_undef.get(), C, CC->getCXXThisExpr());
- return;
+ SVal V = CC->getCXXThisVal();
+ if (V.isUndef()) {
+ if (!ChecksEnabled[CK_CXXThisMethodCall]) {
+ C.addSink(State);
+ return nullptr;
}
+ if (!BT_cxx_call_undef)
+ BT_cxx_call_undef.reset(new BuiltinBug(
+ OriginalName, "Called C++ object pointer is uninitialized"));
+ emitBadCall(BT_cxx_call_undef.get(), C, CC->getCXXThisExpr());
+ return nullptr;
+ }
- ProgramStateRef StNonNull, StNull;
- std::tie(StNonNull, StNull) =
- State->assume(V.castAs<DefinedOrUnknownSVal>());
+ ProgramStateRef StNonNull, StNull;
+ std::tie(StNonNull, StNull) = State->assume(V.castAs<DefinedOrUnknownSVal>());
- if (StNull && !StNonNull) {
- if (!BT_cxx_call_null)
- BT_cxx_call_null.reset(
- new BuiltinBug(this, "Called C++ object pointer is null"));
- emitBadCall(BT_cxx_call_null.get(), C, CC->getCXXThisExpr());
- return;
+ if (StNull && !StNonNull) {
+ if (!ChecksEnabled[CK_CXXThisMethodCall]) {
+ C.addSink(StNull);
+ return nullptr;
}
-
- State = StNonNull;
+ if (!BT_cxx_call_null)
+ BT_cxx_call_null.reset(
+ new BuiltinBug(OriginalName, "Called C++ object pointer is null"));
+ emitBadCall(BT_cxx_call_null.get(), C, CC->getCXXThisExpr());
+ return nullptr;
}
- const Decl *D = Call.getDecl();
- if (D && (isa<FunctionDecl>(D) || isa<BlockDecl>(D))) {
- // If we have a function or block declaration, we can make sure we pass
- // enough parameters.
- unsigned Params = Call.parameters().size();
- if (Call.getNumArgs() < Params) {
- ExplodedNode *N = C.generateErrorNode();
- if (!N)
- return;
-
- LazyInit_BT("Function call with too few arguments", BT_call_few_args);
-
- SmallString<512> Str;
- llvm::raw_svector_ostream os(Str);
- if (isa<FunctionDecl>(D)) {
- os << "Function ";
- } else {
- assert(isa<BlockDecl>(D));
- os << "Block ";
- }
- os << "taking " << Params << " argument"
- << (Params == 1 ? "" : "s") << " is called with fewer ("
- << Call.getNumArgs() << ")";
+ return StNonNull;
+}
- C.emitReport(std::make_unique<PathSensitiveBugReport>(*BT_call_few_args,
- os.str(), N));
- }
+ProgramStateRef
+CallAndMessageChecker::checkCXXDeallocation(const CXXDeallocatorCall *DC,
+ CheckerContext &C,
+ ProgramStateRef State) const {
+ const CXXDeleteExpr *DE = DC->getOriginExpr();
+ assert(DE);
+ SVal Arg = C.getSVal(DE->getArgument());
+ if (!Arg.isUndef())
+ return State;
+
+ if (!ChecksEnabled[CK_CXXDeallocationArg]) {
+ C.addSink(State);
+ return nullptr;
}
+ StringRef Desc;
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return nullptr;
+ if (!BT_cxx_delete_undef)
+ BT_cxx_delete_undef.reset(
+ new BuiltinBug(OriginalName, "Uninitialized argument value"));
+ if (DE->isArrayFormAsWritten())
+ Desc = "Argument to 'delete[]' is uninitialized";
+ else
+ Desc = "Argument to 'delete' is uninitialized";
+ BugType *BT = BT_cxx_delete_undef.get();
+ auto R = std::make_unique<PathSensitiveBugReport>(*BT, Desc, N);
+ bugreporter::trackExpressionValue(N, DE, *R);
+ C.emitReport(std::move(R));
+ return nullptr;
+}
+
+ProgramStateRef CallAndMessageChecker::checkArgInitializedness(
+ const CallEvent &Call, CheckerContext &C, ProgramStateRef State) const {
+
+ const Decl *D = Call.getDecl();
+
// Don't check for uninitialized field values in arguments if the
// caller has a body that is available and we have the chance to inline it.
// This is a hack, but is a reasonable compromise betweens sometimes warning
// and sometimes not depending on if we decide to inline a function.
const bool checkUninitFields =
- !(C.getAnalysisManager().shouldInlineCall() && (D && D->getBody()));
+ !(C.getAnalysisManager().shouldInlineCall() && (D && D->getBody()));
std::unique_ptr<BugType> *BT;
if (isa<ObjCMethodCall>(Call))
@@ -441,13 +528,45 @@ void CallAndMessageChecker::checkPreCall(const CallEvent &Call,
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
for (unsigned i = 0, e = Call.getNumArgs(); i != e; ++i) {
const ParmVarDecl *ParamDecl = nullptr;
- if(FD && i < FD->getNumParams())
+ if (FD && i < FD->getNumParams())
ParamDecl = FD->getParamDecl(i);
if (PreVisitProcessArg(C, Call.getArgSVal(i), Call.getArgSourceRange(i),
- Call.getArgExpr(i), i,
- checkUninitFields, Call, *BT, ParamDecl))
- return;
+ Call.getArgExpr(i), i, checkUninitFields, Call, *BT,
+ ParamDecl))
+ return nullptr;
}
+ return State;
+}
+
+void CallAndMessageChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ if (const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr()))
+ State = checkFunctionPointerCall(CE, C, State);
+
+ if (!State)
+ return;
+
+ if (Call.getDecl())
+ State = checkParameterCount(Call, C, State);
+
+ if (!State)
+ return;
+
+ if (const auto *CC = dyn_cast<CXXInstanceCall>(&Call))
+ State = checkCXXMethodCall(CC, C, State);
+
+ if (!State)
+ return;
+
+ if (const auto *DC = dyn_cast<CXXDeallocatorCall>(&Call))
+ State = checkCXXDeallocation(DC, C, State);
+
+ if (!State)
+ return;
+
+ State = checkArgInitializedness(Call, C, State);
// If we make it here, record our assumptions about the callee.
C.addTransition(State);
@@ -457,12 +576,16 @@ void CallAndMessageChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
CheckerContext &C) const {
SVal recVal = msg.getReceiverSVal();
if (recVal.isUndef()) {
+ if (!ChecksEnabled[CK_UndefReceiver]) {
+ C.addSink();
+ return;
+ }
if (ExplodedNode *N = C.generateErrorNode()) {
BugType *BT = nullptr;
switch (msg.getMessageKind()) {
case OCM_Message:
if (!BT_msg_undef)
- BT_msg_undef.reset(new BuiltinBug(this,
+ BT_msg_undef.reset(new BuiltinBug(OriginalName,
"Receiver in message expression "
"is an uninitialized value"));
BT = BT_msg_undef.get();
@@ -470,13 +593,15 @@ void CallAndMessageChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
case OCM_PropertyAccess:
if (!BT_objc_prop_undef)
BT_objc_prop_undef.reset(new BuiltinBug(
- this, "Property access on an uninitialized object pointer"));
+ OriginalName,
+ "Property access on an uninitialized object pointer"));
BT = BT_objc_prop_undef.get();
break;
case OCM_Subscript:
if (!BT_objc_subscript_undef)
BT_objc_subscript_undef.reset(new BuiltinBug(
- this, "Subscript access on an uninitialized object pointer"));
+ OriginalName,
+ "Subscript access on an uninitialized object pointer"));
BT = BT_objc_subscript_undef.get();
break;
}
@@ -503,10 +628,14 @@ void CallAndMessageChecker::checkObjCMessageNil(const ObjCMethodCall &msg,
void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
const ObjCMethodCall &msg,
ExplodedNode *N) const {
+ if (!ChecksEnabled[CK_NilReceiver]) {
+ C.addSink();
+ return;
+ }
if (!BT_msg_ret)
- BT_msg_ret.reset(
- new BuiltinBug(this, "Receiver in message expression is 'nil'"));
+ BT_msg_ret.reset(new BuiltinBug(OriginalName,
+ "Receiver in message expression is 'nil'"));
const ObjCMessageExpr *ME = msg.getOriginExpr();
@@ -601,20 +730,34 @@ void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
C.addTransition(state);
}
-void ento::registerCallAndMessageChecker(CheckerManager &mgr) {
+void ento::registerCallAndMessageModeling(CheckerManager &mgr) {
mgr.registerChecker<CallAndMessageChecker>();
}
-bool ento::shouldRegisterCallAndMessageChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCallAndMessageModeling(const CheckerManager &mgr) {
return true;
}
-void ento::registerCallAndMessageUnInitRefArg(CheckerManager &mgr) {
- CallAndMessageChecker *Checker = mgr.getChecker<CallAndMessageChecker>();
- Checker->Check_CallAndMessageUnInitRefArg = true;
- Checker->CheckName_CallAndMessageUnInitRefArg = mgr.getCurrentCheckerName();
+void ento::registerCallAndMessageChecker(CheckerManager &mgr) {
+ CallAndMessageChecker *checker = mgr.getChecker<CallAndMessageChecker>();
+
+ checker->OriginalName = mgr.getCurrentCheckerName();
+
+#define QUERY_CHECKER_OPTION(OPTION) \
+ checker->ChecksEnabled[CallAndMessageChecker::CK_##OPTION] = \
+ mgr.getAnalyzerOptions().getCheckerBooleanOption( \
+ mgr.getCurrentCheckerName(), #OPTION);
+
+ QUERY_CHECKER_OPTION(FunctionPointer)
+ QUERY_CHECKER_OPTION(ParameterCount)
+ QUERY_CHECKER_OPTION(CXXThisMethodCall)
+ QUERY_CHECKER_OPTION(CXXDeallocationArg)
+ QUERY_CHECKER_OPTION(ArgInitializedness)
+ QUERY_CHECKER_OPTION(ArgPointeeInitializedness)
+ QUERY_CHECKER_OPTION(NilReceiver)
+ QUERY_CHECKER_OPTION(UndefReceiver)
}
-bool ento::shouldRegisterCallAndMessageUnInitRefArg(const LangOptions &LO) {
+bool ento::shouldRegisterCallAndMessageChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
index 51c1d4409929..a498f252e693 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -10,12 +10,14 @@
// whether the size of the symbolic region is a multiple of the size of T.
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+
#include "clang/AST/CharUnits.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
using namespace clang;
using namespace ento;
@@ -109,12 +111,13 @@ void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
return;
SValBuilder &svalBuilder = C.getSValBuilder();
- SVal extent = SR->getExtent(svalBuilder);
- const llvm::APSInt *extentInt = svalBuilder.getKnownValue(state, extent);
- if (!extentInt)
+
+ DefinedOrUnknownSVal Size = getDynamicSize(state, SR, svalBuilder);
+ const llvm::APSInt *SizeInt = svalBuilder.getKnownValue(state, Size);
+ if (!SizeInt)
return;
- CharUnits regionSize = CharUnits::fromQuantity(extentInt->getSExtValue());
+ CharUnits regionSize = CharUnits::fromQuantity(SizeInt->getZExtValue());
CharUnits typeSize = C.getASTContext().getTypeSizeInChars(ToPointeeTy);
// Ignore void, and a few other un-sizeable types.
@@ -143,10 +146,11 @@ void ento::registerCastSizeChecker(CheckerManager &mgr) {
mgr.registerChecker<CastSizeChecker>();
}
-bool ento::shouldRegisterCastSizeChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCastSizeChecker(const CheckerManager &mgr) {
// PR31226: C++ is more complicated than what this checker currently supports.
// There are derived-to-base casts, there are different rules for 0-size
// structures, no flexible arrays, etc.
// FIXME: Disabled on C++ for now.
+ const LangOptions &LO = mgr.getLangOpts();
return !LO.CPlusPlus;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
index 93665596be29..e674ec43bcd9 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
@@ -120,6 +120,6 @@ void ento::registerCastToStructChecker(CheckerManager &mgr) {
mgr.registerChecker<CastToStructChecker>();
}
-bool ento::shouldRegisterCastToStructChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCastToStructChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
index cc1c9a66b90e..1ef70b650414 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
@@ -30,7 +30,7 @@ using namespace clang;
using namespace ento;
namespace {
-class CastValueChecker : public Checker<eval::Call> {
+class CastValueChecker : public Checker<check::DeadSymbols, eval::Call> {
enum class CallKind { Function, Method, InstanceOf };
using CastCheck =
@@ -51,6 +51,7 @@ public:
// 1) isa: The parameter is non-null, returns boolean.
// 2) isa_and_nonnull: The parameter is null or non-null, returns boolean.
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
private:
// These are known in the LLVM project. The pairs are in the following form:
@@ -129,7 +130,7 @@ static const NoteTag *getNoteTag(CheckerContext &C,
Out << ' ' << (CastSucceeds ? "is a" : "is not a") << " '" << CastToName
<< '\'';
- return Out.str();
+ return std::string(Out.str());
},
/*IsPrunable=*/true);
}
@@ -432,10 +433,15 @@ bool CastValueChecker::evalCall(const CallEvent &Call,
return true;
}
+void CastValueChecker::checkDeadSymbols(SymbolReaper &SR,
+ CheckerContext &C) const {
+ C.addTransition(removeDeadCasts(C.getState(), SR));
+}
+
void ento::registerCastValueChecker(CheckerManager &Mgr) {
Mgr.registerChecker<CastValueChecker>();
}
-bool ento::shouldRegisterCastValueChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCastValueChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 50b872bd8682..13836f08a61e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -1088,7 +1088,8 @@ void ento::registerObjCDeallocChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ObjCDeallocChecker>();
}
-bool ento::shouldRegisterObjCDeallocChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCDeallocChecker(const CheckerManager &mgr) {
// These checker only makes sense under MRR.
+ const LangOptions &LO = mgr.getLangOpts();
return LO.getGC() != LangOptions::GCOnly && !LO.ObjCAutoRefCount;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
index 1694c237cda4..175dfcef0df4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
@@ -138,6 +138,6 @@ void ento::registerObjCMethSigsChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCMethSigsChecker>();
}
-bool ento::shouldRegisterObjCMethSigsChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCMethSigsChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp
index 48fee4a0ffb7..dc9cd717be9e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp
@@ -1,6 +1,19 @@
+//==- CheckPlacementNew.cpp - Check for placement new operation --*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a check for misuse of the default placement new operator.
+//
+//===----------------------------------------------------------------------===//
+
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "llvm/Support/FormatVariadic.h"
using namespace clang;
@@ -12,51 +25,59 @@ public:
void checkPreStmt(const CXXNewExpr *NE, CheckerContext &C) const;
private:
+ bool checkPlaceCapacityIsSufficient(const CXXNewExpr *NE,
+ CheckerContext &C) const;
+
+ bool checkPlaceIsAlignedProperly(const CXXNewExpr *NE,
+ CheckerContext &C) const;
+
// Returns the size of the target in a placement new expression.
// E.g. in "new (&s) long" it returns the size of `long`.
- SVal getExtentSizeOfNewTarget(const CXXNewExpr *NE, ProgramStateRef State,
- CheckerContext &C) const;
+ SVal getExtentSizeOfNewTarget(const CXXNewExpr *NE, CheckerContext &C,
+ bool &IsArray) const;
// Returns the size of the place in a placement new expression.
// E.g. in "new (&s) long" it returns the size of `s`.
- SVal getExtentSizeOfPlace(const Expr *NE, ProgramStateRef State,
- CheckerContext &C) const;
- BugType BT{this, "Insufficient storage for placement new",
- categories::MemoryError};
+ SVal getExtentSizeOfPlace(const CXXNewExpr *NE, CheckerContext &C) const;
+
+ void emitBadAlignReport(const Expr *P, CheckerContext &C,
+ unsigned AllocatedTAlign,
+ unsigned StorageTAlign) const;
+ unsigned getStorageAlign(CheckerContext &C, const ValueDecl *VD) const;
+
+ void checkElementRegionAlign(const ElementRegion *R, CheckerContext &C,
+ const Expr *P, unsigned AllocatedTAlign) const;
+
+ void checkFieldRegionAlign(const FieldRegion *R, CheckerContext &C,
+ const Expr *P, unsigned AllocatedTAlign) const;
+
+ bool isVarRegionAlignedProperly(const VarRegion *R, CheckerContext &C,
+ const Expr *P,
+ unsigned AllocatedTAlign) const;
+
+ BugType SBT{this, "Insufficient storage for placement new",
+ categories::MemoryError};
+ BugType ABT{this, "Bad align storage for placement new",
+ categories::MemoryError};
};
} // namespace
-SVal PlacementNewChecker::getExtentSizeOfPlace(const Expr *Place,
- ProgramStateRef State,
+SVal PlacementNewChecker::getExtentSizeOfPlace(const CXXNewExpr *NE,
CheckerContext &C) const {
- const MemRegion *MRegion = C.getSVal(Place).getAsRegion();
- if (!MRegion)
- return UnknownVal();
- RegionOffset Offset = MRegion->getAsOffset();
- if (Offset.hasSymbolicOffset())
- return UnknownVal();
- const MemRegion *BaseRegion = MRegion->getBaseRegion();
- if (!BaseRegion)
- return UnknownVal();
-
- SValBuilder &SvalBuilder = C.getSValBuilder();
- NonLoc OffsetInBytes = SvalBuilder.makeArrayIndex(
- Offset.getOffset() / C.getASTContext().getCharWidth());
- DefinedOrUnknownSVal ExtentInBytes =
- BaseRegion->castAs<SubRegion>()->getExtent(SvalBuilder);
-
- return SvalBuilder.evalBinOp(State, BinaryOperator::Opcode::BO_Sub,
- ExtentInBytes, OffsetInBytes,
- SvalBuilder.getArrayIndexType());
+ const Expr *Place = NE->getPlacementArg(0);
+ return getDynamicSizeWithOffset(C.getState(), C.getSVal(Place));
}
SVal PlacementNewChecker::getExtentSizeOfNewTarget(const CXXNewExpr *NE,
- ProgramStateRef State,
- CheckerContext &C) const {
+ CheckerContext &C,
+ bool &IsArray) const {
+ ProgramStateRef State = C.getState();
SValBuilder &SvalBuilder = C.getSValBuilder();
QualType ElementType = NE->getAllocatedType();
ASTContext &AstContext = C.getASTContext();
CharUnits TypeSize = AstContext.getTypeSizeInChars(ElementType);
+ IsArray = false;
if (NE->isArray()) {
+ IsArray = true;
const Expr *SizeExpr = *NE->getArraySize();
SVal ElementCount = C.getSVal(SizeExpr);
if (auto ElementCountNL = ElementCount.getAs<NonLoc>()) {
@@ -78,44 +99,218 @@ SVal PlacementNewChecker::getExtentSizeOfNewTarget(const CXXNewExpr *NE,
return UnknownVal();
}
-void PlacementNewChecker::checkPreStmt(const CXXNewExpr *NE,
- CheckerContext &C) const {
- // Check only the default placement new.
- if (!NE->getOperatorNew()->isReservedGlobalPlacementOperator())
- return;
- if (NE->getNumPlacementArgs() == 0)
- return;
-
- ProgramStateRef State = C.getState();
- SVal SizeOfTarget = getExtentSizeOfNewTarget(NE, State, C);
- const Expr *Place = NE->getPlacementArg(0);
- SVal SizeOfPlace = getExtentSizeOfPlace(Place, State, C);
+bool PlacementNewChecker::checkPlaceCapacityIsSufficient(
+ const CXXNewExpr *NE, CheckerContext &C) const {
+ bool IsArrayTypeAllocated;
+ SVal SizeOfTarget = getExtentSizeOfNewTarget(NE, C, IsArrayTypeAllocated);
+ SVal SizeOfPlace = getExtentSizeOfPlace(NE, C);
const auto SizeOfTargetCI = SizeOfTarget.getAs<nonloc::ConcreteInt>();
if (!SizeOfTargetCI)
- return;
+ return true;
const auto SizeOfPlaceCI = SizeOfPlace.getAs<nonloc::ConcreteInt>();
if (!SizeOfPlaceCI)
- return;
+ return true;
- if (SizeOfPlaceCI->getValue() < SizeOfTargetCI->getValue()) {
- if (ExplodedNode *N = C.generateErrorNode(State)) {
- std::string Msg =
- llvm::formatv("Storage provided to placement new is only {0} bytes, "
- "whereas the allocated type requires {1} bytes",
- SizeOfPlaceCI->getValue(), SizeOfTargetCI->getValue());
+ if ((SizeOfPlaceCI->getValue() < SizeOfTargetCI->getValue()) ||
+ (IsArrayTypeAllocated &&
+ SizeOfPlaceCI->getValue() >= SizeOfTargetCI->getValue())) {
+ if (ExplodedNode *N = C.generateErrorNode(C.getState())) {
+ std::string Msg;
+ // TODO: use clang constant
+ if (IsArrayTypeAllocated &&
+ SizeOfPlaceCI->getValue() > SizeOfTargetCI->getValue())
+ Msg = std::string(llvm::formatv(
+ "{0} bytes is possibly not enough for array allocation which "
+ "requires {1} bytes. Current overhead requires the size of {2} "
+ "bytes",
+ SizeOfPlaceCI->getValue(), SizeOfTargetCI->getValue(),
+ SizeOfPlaceCI->getValue() - SizeOfTargetCI->getValue()));
+ else if (IsArrayTypeAllocated &&
+ SizeOfPlaceCI->getValue() == SizeOfTargetCI->getValue())
+ Msg = std::string(llvm::formatv(
+ "Storage provided to placement new is only {0} bytes, "
+ "whereas the allocated array type requires more space for "
+ "internal needs",
+ SizeOfPlaceCI->getValue(), SizeOfTargetCI->getValue()));
+ else
+ Msg = std::string(llvm::formatv(
+ "Storage provided to placement new is only {0} bytes, "
+ "whereas the allocated type requires {1} bytes",
+ SizeOfPlaceCI->getValue(), SizeOfTargetCI->getValue()));
- auto R = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
- bugreporter::trackExpressionValue(N, Place, *R);
+ auto R = std::make_unique<PathSensitiveBugReport>(SBT, Msg, N);
+ bugreporter::trackExpressionValue(N, NE->getPlacementArg(0), *R);
C.emitReport(std::move(R));
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void PlacementNewChecker::emitBadAlignReport(const Expr *P, CheckerContext &C,
+ unsigned AllocatedTAlign,
+ unsigned StorageTAlign) const {
+ ProgramStateRef State = C.getState();
+ if (ExplodedNode *N = C.generateErrorNode(State)) {
+ std::string Msg(llvm::formatv("Storage type is aligned to {0} bytes but "
+ "allocated type is aligned to {1} bytes",
+ StorageTAlign, AllocatedTAlign));
+
+ auto R = std::make_unique<PathSensitiveBugReport>(ABT, Msg, N);
+ bugreporter::trackExpressionValue(N, P, *R);
+ C.emitReport(std::move(R));
+ }
+}
+
+unsigned PlacementNewChecker::getStorageAlign(CheckerContext &C,
+ const ValueDecl *VD) const {
+ unsigned StorageTAlign = C.getASTContext().getTypeAlign(VD->getType());
+ if (unsigned SpecifiedAlignment = VD->getMaxAlignment())
+ StorageTAlign = SpecifiedAlignment;
+
+ return StorageTAlign / C.getASTContext().getCharWidth();
+}
+
+void PlacementNewChecker::checkElementRegionAlign(
+ const ElementRegion *R, CheckerContext &C, const Expr *P,
+ unsigned AllocatedTAlign) const {
+ auto IsBaseRegionAlignedProperly = [this, R, &C, P,
+ AllocatedTAlign]() -> bool {
+ // Unwind nested ElementRegion`s to get the type.
+ const MemRegion *SuperRegion = R;
+ while (true) {
+ if (SuperRegion->getKind() == MemRegion::ElementRegionKind) {
+ SuperRegion = cast<SubRegion>(SuperRegion)->getSuperRegion();
+ continue;
+ }
+
+ break;
+ }
+
+ const DeclRegion *TheElementDeclRegion = SuperRegion->getAs<DeclRegion>();
+ if (!TheElementDeclRegion)
+ return false;
+
+ const DeclRegion *BaseDeclRegion = R->getBaseRegion()->getAs<DeclRegion>();
+ if (!BaseDeclRegion)
+ return false;
+
+ unsigned BaseRegionAlign = 0;
+ // We must use alignment TheElementDeclRegion if it has its own alignment
+ // specifier
+ if (TheElementDeclRegion->getDecl()->getMaxAlignment())
+ BaseRegionAlign = getStorageAlign(C, TheElementDeclRegion->getDecl());
+ else
+ BaseRegionAlign = getStorageAlign(C, BaseDeclRegion->getDecl());
+
+ if (AllocatedTAlign > BaseRegionAlign) {
+ emitBadAlignReport(P, C, AllocatedTAlign, BaseRegionAlign);
+ return false;
+ }
+
+ return true;
+ };
+
+ auto CheckElementRegionOffset = [this, R, &C, P, AllocatedTAlign]() -> void {
+ RegionOffset TheOffsetRegion = R->getAsOffset();
+ if (TheOffsetRegion.hasSymbolicOffset())
return;
+
+ unsigned Offset =
+ TheOffsetRegion.getOffset() / C.getASTContext().getCharWidth();
+ unsigned AddressAlign = Offset % AllocatedTAlign;
+ if (AddressAlign != 0) {
+ emitBadAlignReport(P, C, AllocatedTAlign, AddressAlign);
+ return;
+ }
+ };
+
+ if (IsBaseRegionAlignedProperly()) {
+ CheckElementRegionOffset();
+ }
+}
+
+void PlacementNewChecker::checkFieldRegionAlign(
+ const FieldRegion *R, CheckerContext &C, const Expr *P,
+ unsigned AllocatedTAlign) const {
+ const MemRegion *BaseRegion = R->getBaseRegion();
+ if (!BaseRegion)
+ return;
+
+ if (const VarRegion *TheVarRegion = BaseRegion->getAs<VarRegion>()) {
+ if (isVarRegionAlignedProperly(TheVarRegion, C, P, AllocatedTAlign)) {
+ // We've checked type align but, unless FieldRegion
+ // offset is zero, we also need to check its own
+ // align.
+ RegionOffset Offset = R->getAsOffset();
+ if (Offset.hasSymbolicOffset())
+ return;
+
+ int64_t OffsetValue =
+ Offset.getOffset() / C.getASTContext().getCharWidth();
+ unsigned AddressAlign = OffsetValue % AllocatedTAlign;
+ if (AddressAlign != 0)
+ emitBadAlignReport(P, C, AllocatedTAlign, AddressAlign);
}
}
}
+bool PlacementNewChecker::isVarRegionAlignedProperly(
+ const VarRegion *R, CheckerContext &C, const Expr *P,
+ unsigned AllocatedTAlign) const {
+ const VarDecl *TheVarDecl = R->getDecl();
+ unsigned StorageTAlign = getStorageAlign(C, TheVarDecl);
+ if (AllocatedTAlign > StorageTAlign) {
+ emitBadAlignReport(P, C, AllocatedTAlign, StorageTAlign);
+
+ return false;
+ }
+
+ return true;
+}
+
+bool PlacementNewChecker::checkPlaceIsAlignedProperly(const CXXNewExpr *NE,
+ CheckerContext &C) const {
+ const Expr *Place = NE->getPlacementArg(0);
+
+ QualType AllocatedT = NE->getAllocatedType();
+ unsigned AllocatedTAlign = C.getASTContext().getTypeAlign(AllocatedT) /
+ C.getASTContext().getCharWidth();
+
+ SVal PlaceVal = C.getSVal(Place);
+ if (const MemRegion *MRegion = PlaceVal.getAsRegion()) {
+ if (const ElementRegion *TheElementRegion = MRegion->getAs<ElementRegion>())
+ checkElementRegionAlign(TheElementRegion, C, Place, AllocatedTAlign);
+ else if (const FieldRegion *TheFieldRegion = MRegion->getAs<FieldRegion>())
+ checkFieldRegionAlign(TheFieldRegion, C, Place, AllocatedTAlign);
+ else if (const VarRegion *TheVarRegion = MRegion->getAs<VarRegion>())
+ isVarRegionAlignedProperly(TheVarRegion, C, Place, AllocatedTAlign);
+ }
+
+ return true;
+}
+
+void PlacementNewChecker::checkPreStmt(const CXXNewExpr *NE,
+ CheckerContext &C) const {
+ // Check only the default placement new.
+ if (!NE->getOperatorNew()->isReservedGlobalPlacementOperator())
+ return;
+
+ if (NE->getNumPlacementArgs() == 0)
+ return;
+
+ if (!checkPlaceCapacityIsSufficient(NE, C))
+ return;
+
+ checkPlaceIsAlignedProperly(NE, C);
+}
+
void ento::registerPlacementNewChecker(CheckerManager &mgr) {
mgr.registerChecker<PlacementNewChecker>();
}
-bool ento::shouldRegisterPlacementNewChecker(const LangOptions &LO) {
+bool ento::shouldRegisterPlacementNewChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
index d9ffa562c0aa..d06c87631bfb 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -1076,7 +1076,7 @@ void ento::registerSecuritySyntaxChecker(CheckerManager &mgr) {
mgr.registerChecker<SecuritySyntaxChecker>();
}
-bool ento::shouldRegisterSecuritySyntaxChecker(const LangOptions &LO) {
+bool ento::shouldRegisterSecuritySyntaxChecker(const CheckerManager &mgr) {
return true;
}
@@ -1087,7 +1087,7 @@ bool ento::shouldRegisterSecuritySyntaxChecker(const LangOptions &LO) {
checker->filter.checkName_##name = mgr.getCurrentCheckerName(); \
} \
\
- bool ento::shouldRegister##name(const LangOptions &LO) { return true; }
+ bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
REGISTER_CHECKER(bcmp)
REGISTER_CHECKER(bcopy)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
index ec401cfa8985..0d2551f11583 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
@@ -91,6 +91,6 @@ void ento::registerSizeofPointerChecker(CheckerManager &mgr) {
mgr.registerChecker<SizeofPointerChecker>();
}
-bool ento::shouldRegisterSizeofPointerChecker(const LangOptions &LO) {
+bool ento::shouldRegisterSizeofPointerChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
index 7a41a7b6b216..fd53c04f4bbf 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
@@ -136,6 +136,6 @@ void ento::registerChrootChecker(CheckerManager &mgr) {
mgr.registerChecker<ChrootChecker>();
}
-bool ento::shouldRegisterChrootChecker(const LangOptions &LO) {
+bool ento::shouldRegisterChrootChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
index ce45b5be34c9..7968aed85e1b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
@@ -208,6 +208,6 @@ void ento::registerCloneChecker(CheckerManager &Mgr) {
.getCheckerStringOption(Checker, "IgnoredFilesPattern");
}
-bool ento::shouldRegisterCloneChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCloneChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
new file mode 100644
index 000000000000..73c6517fd0eb
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
@@ -0,0 +1,1083 @@
+//===-- ContainerModeling.cpp -------------------------------------*- C++ -*--//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a modeling-checker for modeling STL container-like containers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h"
+
+#include "Iterator.h"
+
+#include <utility>
+
+using namespace clang;
+using namespace ento;
+using namespace iterator;
+
+namespace {
+
+class ContainerModeling
+ : public Checker<check::PostCall, check::LiveSymbols, check::DeadSymbols> {
+
+ void handleBegin(CheckerContext &C, const Expr *CE, SVal RetVal,
+ SVal Cont) const;
+ void handleEnd(CheckerContext &C, const Expr *CE, SVal RetVal,
+ SVal Cont) const;
+ void handleAssignment(CheckerContext &C, SVal Cont, const Expr *CE = nullptr,
+ SVal OldCont = UndefinedVal()) const;
+ void handleAssign(CheckerContext &C, SVal Cont, const Expr *ContE) const;
+ void handleClear(CheckerContext &C, SVal Cont, const Expr *ContE) const;
+ void handlePushBack(CheckerContext &C, SVal Cont, const Expr *ContE) const;
+ void handlePopBack(CheckerContext &C, SVal Cont, const Expr *ContE) const;
+ void handlePushFront(CheckerContext &C, SVal Cont, const Expr *ContE) const;
+ void handlePopFront(CheckerContext &C, SVal Cont, const Expr *ContE) const;
+ void handleInsert(CheckerContext &C, SVal Cont, SVal Iter) const;
+ void handleErase(CheckerContext &C, SVal Cont, SVal Iter) const;
+ void handleErase(CheckerContext &C, SVal Cont, SVal Iter1, SVal Iter2) const;
+ void handleEraseAfter(CheckerContext &C, SVal Cont, SVal Iter) const;
+ void handleEraseAfter(CheckerContext &C, SVal Cont, SVal Iter1,
+ SVal Iter2) const;
+ const NoteTag *getChangeTag(CheckerContext &C, StringRef Text,
+ const MemRegion *ContReg,
+ const Expr *ContE) const;
+ void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
+ const char *Sep) const override;
+
+public:
+ ContainerModeling() = default;
+
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const;
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+
+ using NoItParamFn = void (ContainerModeling::*)(CheckerContext &, SVal,
+ const Expr *) const;
+ using OneItParamFn = void (ContainerModeling::*)(CheckerContext &, SVal,
+ SVal) const;
+ using TwoItParamFn = void (ContainerModeling::*)(CheckerContext &, SVal, SVal,
+ SVal) const;
+
+ CallDescriptionMap<NoItParamFn> NoIterParamFunctions = {
+ {{0, "clear", 0},
+ &ContainerModeling::handleClear},
+ {{0, "assign", 2},
+ &ContainerModeling::handleAssign},
+ {{0, "push_back", 1},
+ &ContainerModeling::handlePushBack},
+ {{0, "emplace_back", 1},
+ &ContainerModeling::handlePushBack},
+ {{0, "pop_back", 0},
+ &ContainerModeling::handlePopBack},
+ {{0, "push_front", 1},
+ &ContainerModeling::handlePushFront},
+ {{0, "emplace_front", 1},
+ &ContainerModeling::handlePushFront},
+ {{0, "pop_front", 0},
+ &ContainerModeling::handlePopFront},
+ };
+
+ CallDescriptionMap<OneItParamFn> OneIterParamFunctions = {
+ {{0, "insert", 2},
+ &ContainerModeling::handleInsert},
+ {{0, "emplace", 2},
+ &ContainerModeling::handleInsert},
+ {{0, "erase", 1},
+ &ContainerModeling::handleErase},
+ {{0, "erase_after", 1},
+ &ContainerModeling::handleEraseAfter},
+ };
+
+ CallDescriptionMap<TwoItParamFn> TwoIterParamFunctions = {
+ {{0, "erase", 2},
+ &ContainerModeling::handleErase},
+ {{0, "erase_after", 2},
+ &ContainerModeling::handleEraseAfter},
+ };
+
+};
+
+bool isBeginCall(const FunctionDecl *Func);
+bool isEndCall(const FunctionDecl *Func);
+bool hasSubscriptOperator(ProgramStateRef State, const MemRegion *Reg);
+bool frontModifiable(ProgramStateRef State, const MemRegion *Reg);
+bool backModifiable(ProgramStateRef State, const MemRegion *Reg);
+SymbolRef getContainerBegin(ProgramStateRef State, const MemRegion *Cont);
+SymbolRef getContainerEnd(ProgramStateRef State, const MemRegion *Cont);
+ProgramStateRef createContainerBegin(ProgramStateRef State,
+ const MemRegion *Cont, const Expr *E,
+ QualType T, const LocationContext *LCtx,
+ unsigned BlockCount);
+ProgramStateRef createContainerEnd(ProgramStateRef State, const MemRegion *Cont,
+ const Expr *E, QualType T,
+ const LocationContext *LCtx,
+ unsigned BlockCount);
+ProgramStateRef setContainerData(ProgramStateRef State, const MemRegion *Cont,
+ const ContainerData &CData);
+ProgramStateRef invalidateAllIteratorPositions(ProgramStateRef State,
+ const MemRegion *Cont);
+ProgramStateRef
+invalidateAllIteratorPositionsExcept(ProgramStateRef State,
+ const MemRegion *Cont, SymbolRef Offset,
+ BinaryOperator::Opcode Opc);
+ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
+ SymbolRef Offset,
+ BinaryOperator::Opcode Opc);
+ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
+ SymbolRef Offset1,
+ BinaryOperator::Opcode Opc1,
+ SymbolRef Offset2,
+ BinaryOperator::Opcode Opc2);
+ProgramStateRef reassignAllIteratorPositions(ProgramStateRef State,
+ const MemRegion *Cont,
+ const MemRegion *NewCont);
+ProgramStateRef reassignAllIteratorPositionsUnless(ProgramStateRef State,
+ const MemRegion *Cont,
+ const MemRegion *NewCont,
+ SymbolRef Offset,
+ BinaryOperator::Opcode Opc);
+ProgramStateRef rebaseSymbolInIteratorPositionsIf(
+ ProgramStateRef State, SValBuilder &SVB, SymbolRef OldSym,
+ SymbolRef NewSym, SymbolRef CondSym, BinaryOperator::Opcode Opc);
+SymbolRef rebaseSymbol(ProgramStateRef State, SValBuilder &SVB, SymbolRef Expr,
+ SymbolRef OldSym, SymbolRef NewSym);
+bool hasLiveIterators(ProgramStateRef State, const MemRegion *Cont);
+
+} // namespace
+
+void ContainerModeling::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const auto *Func = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
+ if (!Func)
+ return;
+
+ if (Func->isOverloadedOperator()) {
+ const auto Op = Func->getOverloadedOperator();
+ if (Op == OO_Equal) {
+ // Overloaded 'operator=' must be a non-static member function.
+ const auto *InstCall = cast<CXXInstanceCall>(&Call);
+ if (cast<CXXMethodDecl>(Func)->isMoveAssignmentOperator()) {
+ handleAssignment(C, InstCall->getCXXThisVal(), Call.getOriginExpr(),
+ Call.getArgSVal(0));
+ return;
+ }
+
+ handleAssignment(C, InstCall->getCXXThisVal());
+ return;
+ }
+ } else {
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ const NoItParamFn *Handler0 = NoIterParamFunctions.lookup(Call);
+ if (Handler0) {
+ (this->**Handler0)(C, InstCall->getCXXThisVal(),
+ InstCall->getCXXThisExpr());
+ return;
+ }
+
+ const OneItParamFn *Handler1 = OneIterParamFunctions.lookup(Call);
+ if (Handler1) {
+ (this->**Handler1)(C, InstCall->getCXXThisVal(), Call.getArgSVal(0));
+ return;
+ }
+
+ const TwoItParamFn *Handler2 = TwoIterParamFunctions.lookup(Call);
+ if (Handler2) {
+ (this->**Handler2)(C, InstCall->getCXXThisVal(), Call.getArgSVal(0),
+ Call.getArgSVal(1));
+ return;
+ }
+
+ const auto *OrigExpr = Call.getOriginExpr();
+ if (!OrigExpr)
+ return;
+
+ if (isBeginCall(Func)) {
+ handleBegin(C, OrigExpr, Call.getReturnValue(),
+ InstCall->getCXXThisVal());
+ return;
+ }
+
+ if (isEndCall(Func)) {
+ handleEnd(C, OrigExpr, Call.getReturnValue(),
+ InstCall->getCXXThisVal());
+ return;
+ }
+ }
+ }
+}
+
+void ContainerModeling::checkLiveSymbols(ProgramStateRef State,
+ SymbolReaper &SR) const {
+ // Keep symbolic expressions of container begins and ends alive
+ auto ContMap = State->get<ContainerMap>();
+ for (const auto &Cont : ContMap) {
+ const auto CData = Cont.second;
+ if (CData.getBegin()) {
+ SR.markLive(CData.getBegin());
+ if(const auto *SIE = dyn_cast<SymIntExpr>(CData.getBegin()))
+ SR.markLive(SIE->getLHS());
+ }
+ if (CData.getEnd()) {
+ SR.markLive(CData.getEnd());
+ if(const auto *SIE = dyn_cast<SymIntExpr>(CData.getEnd()))
+ SR.markLive(SIE->getLHS());
+ }
+ }
+}
+
+void ContainerModeling::checkDeadSymbols(SymbolReaper &SR,
+ CheckerContext &C) const {
+ // Cleanup
+ auto State = C.getState();
+
+ auto ContMap = State->get<ContainerMap>();
+ for (const auto &Cont : ContMap) {
+ if (!SR.isLiveRegion(Cont.first)) {
+ // We must keep the container data while it has live iterators to be able
+ // to compare them to the begin and the end of the container.
+ if (!hasLiveIterators(State, Cont.first)) {
+ State = State->remove<ContainerMap>(Cont.first);
+ }
+ }
+ }
+
+ C.addTransition(State);
+}
+
+void ContainerModeling::handleBegin(CheckerContext &C, const Expr *CE,
+ SVal RetVal, SVal Cont) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // If the container already has a begin symbol then use it. Otherwise first
+ // create a new one.
+ auto State = C.getState();
+ auto BeginSym = getContainerBegin(State, ContReg);
+ if (!BeginSym) {
+ State = createContainerBegin(State, ContReg, CE, C.getASTContext().LongTy,
+ C.getLocationContext(), C.blockCount());
+ BeginSym = getContainerBegin(State, ContReg);
+ }
+ State = setIteratorPosition(State, RetVal,
+ IteratorPosition::getPosition(ContReg, BeginSym));
+ C.addTransition(State);
+}
+
+void ContainerModeling::handleEnd(CheckerContext &C, const Expr *CE,
+ SVal RetVal, SVal Cont) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // If the container already has an end symbol then use it. Otherwise first
+ // create a new one.
+ auto State = C.getState();
+ auto EndSym = getContainerEnd(State, ContReg);
+ if (!EndSym) {
+ State = createContainerEnd(State, ContReg, CE, C.getASTContext().LongTy,
+ C.getLocationContext(), C.blockCount());
+ EndSym = getContainerEnd(State, ContReg);
+ }
+ State = setIteratorPosition(State, RetVal,
+ IteratorPosition::getPosition(ContReg, EndSym));
+ C.addTransition(State);
+}
+
+void ContainerModeling::handleAssignment(CheckerContext &C, SVal Cont,
+ const Expr *CE, SVal OldCont) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // Assignment of a new value to a container always invalidates all its
+ // iterators
+ auto State = C.getState();
+ const auto CData = getContainerData(State, ContReg);
+ if (CData) {
+ State = invalidateAllIteratorPositions(State, ContReg);
+ }
+
+ // In case of move, iterators of the old container (except the past-end
+ // iterators) remain valid but refer to the new container
+ if (!OldCont.isUndef()) {
+ const auto *OldContReg = OldCont.getAsRegion();
+ if (OldContReg) {
+ OldContReg = OldContReg->getMostDerivedObjectRegion();
+ const auto OldCData = getContainerData(State, OldContReg);
+ if (OldCData) {
+ if (const auto OldEndSym = OldCData->getEnd()) {
+ // If we already assigned an "end" symbol to the old container, then
+ // first reassign all iterator positions to the new container which
+ // are not past the container (thus not greater or equal to the
+ // current "end" symbol).
+ State = reassignAllIteratorPositionsUnless(State, OldContReg, ContReg,
+ OldEndSym, BO_GE);
+ auto &SymMgr = C.getSymbolManager();
+ auto &SVB = C.getSValBuilder();
+ // Then generate and assign a new "end" symbol for the new container.
+ auto NewEndSym =
+ SymMgr.conjureSymbol(CE, C.getLocationContext(),
+ C.getASTContext().LongTy, C.blockCount());
+ State = assumeNoOverflow(State, NewEndSym, 4);
+ if (CData) {
+ State = setContainerData(State, ContReg, CData->newEnd(NewEndSym));
+ } else {
+ State = setContainerData(State, ContReg,
+ ContainerData::fromEnd(NewEndSym));
+ }
+ // Finally, replace the old "end" symbol in the already reassigned
+ // iterator positions with the new "end" symbol.
+ State = rebaseSymbolInIteratorPositionsIf(
+ State, SVB, OldEndSym, NewEndSym, OldEndSym, BO_LT);
+ } else {
+ // There was no "end" symbol assigned yet to the old container,
+ // so reassign all iterator positions to the new container.
+ State = reassignAllIteratorPositions(State, OldContReg, ContReg);
+ }
+ if (const auto OldBeginSym = OldCData->getBegin()) {
+ // If we already assigned a "begin" symbol to the old container, then
+ // assign it to the new container and remove it from the old one.
+ if (CData) {
+ State =
+ setContainerData(State, ContReg, CData->newBegin(OldBeginSym));
+ } else {
+ State = setContainerData(State, ContReg,
+ ContainerData::fromBegin(OldBeginSym));
+ }
+ State =
+ setContainerData(State, OldContReg, OldCData->newBegin(nullptr));
+ }
+ } else {
+ // There was neither "begin" nor "end" symbol assigned yet to the old
+ // container, so reassign all iterator positions to the new container.
+ State = reassignAllIteratorPositions(State, OldContReg, ContReg);
+ }
+ }
+ }
+ C.addTransition(State);
+}
+
+void ContainerModeling::handleAssign(CheckerContext &C, SVal Cont,
+ const Expr *ContE) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // The assign() operation invalidates all the iterators
+ auto State = C.getState();
+ State = invalidateAllIteratorPositions(State, ContReg);
+ C.addTransition(State);
+}
+
+void ContainerModeling::handleClear(CheckerContext &C, SVal Cont,
+ const Expr *ContE) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // The clear() operation invalidates all the iterators, except the past-end
+ // iterators of list-like containers
+ auto State = C.getState();
+ if (!hasSubscriptOperator(State, ContReg) ||
+ !backModifiable(State, ContReg)) {
+ const auto CData = getContainerData(State, ContReg);
+ if (CData) {
+ if (const auto EndSym = CData->getEnd()) {
+ State =
+ invalidateAllIteratorPositionsExcept(State, ContReg, EndSym, BO_GE);
+ C.addTransition(State);
+ return;
+ }
+ }
+ }
+ const NoteTag *ChangeTag =
+ getChangeTag(C, "became empty", ContReg, ContE);
+ State = invalidateAllIteratorPositions(State, ContReg);
+ C.addTransition(State, ChangeTag);
+}
+
+void ContainerModeling::handlePushBack(CheckerContext &C, SVal Cont,
+ const Expr *ContE) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // For deque-like containers invalidate all iterator positions
+ auto State = C.getState();
+ if (hasSubscriptOperator(State, ContReg) && frontModifiable(State, ContReg)) {
+ State = invalidateAllIteratorPositions(State, ContReg);
+ C.addTransition(State);
+ return;
+ }
+
+ const auto CData = getContainerData(State, ContReg);
+ if (!CData)
+ return;
+
+ // For vector-like containers invalidate the past-end iterator positions
+ if (const auto EndSym = CData->getEnd()) {
+ if (hasSubscriptOperator(State, ContReg)) {
+ State = invalidateIteratorPositions(State, EndSym, BO_GE);
+ }
+ auto &SymMgr = C.getSymbolManager();
+ auto &BVF = SymMgr.getBasicVals();
+ auto &SVB = C.getSValBuilder();
+ const auto newEndSym =
+ SVB.evalBinOp(State, BO_Add,
+ nonloc::SymbolVal(EndSym),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
+ SymMgr.getType(EndSym)).getAsSymbol();
+ const NoteTag *ChangeTag =
+ getChangeTag(C, "extended to the back by 1 position", ContReg, ContE);
+ State = setContainerData(State, ContReg, CData->newEnd(newEndSym));
+ C.addTransition(State, ChangeTag);
+ }
+}
+
+void ContainerModeling::handlePopBack(CheckerContext &C, SVal Cont,
+ const Expr *ContE) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ auto State = C.getState();
+ const auto CData = getContainerData(State, ContReg);
+ if (!CData)
+ return;
+
+ if (const auto EndSym = CData->getEnd()) {
+ auto &SymMgr = C.getSymbolManager();
+ auto &BVF = SymMgr.getBasicVals();
+ auto &SVB = C.getSValBuilder();
+ const auto BackSym =
+ SVB.evalBinOp(State, BO_Sub,
+ nonloc::SymbolVal(EndSym),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
+ SymMgr.getType(EndSym)).getAsSymbol();
+ const NoteTag *ChangeTag =
+ getChangeTag(C, "shrank from the back by 1 position", ContReg, ContE);
+ // For vector-like and deque-like containers invalidate the last and the
+ // past-end iterator positions. For list-like containers only invalidate
+ // the last position
+ if (hasSubscriptOperator(State, ContReg) &&
+ backModifiable(State, ContReg)) {
+ State = invalidateIteratorPositions(State, BackSym, BO_GE);
+ State = setContainerData(State, ContReg, CData->newEnd(nullptr));
+ } else {
+ State = invalidateIteratorPositions(State, BackSym, BO_EQ);
+ }
+ auto newEndSym = BackSym;
+ State = setContainerData(State, ContReg, CData->newEnd(newEndSym));
+ C.addTransition(State, ChangeTag);
+ }
+}
+
+void ContainerModeling::handlePushFront(CheckerContext &C, SVal Cont,
+ const Expr *ContE) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // For deque-like containers invalidate all iterator positions
+ auto State = C.getState();
+ if (hasSubscriptOperator(State, ContReg)) {
+ State = invalidateAllIteratorPositions(State, ContReg);
+ C.addTransition(State);
+ } else {
+ const auto CData = getContainerData(State, ContReg);
+ if (!CData)
+ return;
+
+ if (const auto BeginSym = CData->getBegin()) {
+ auto &SymMgr = C.getSymbolManager();
+ auto &BVF = SymMgr.getBasicVals();
+ auto &SVB = C.getSValBuilder();
+ const auto newBeginSym =
+ SVB.evalBinOp(State, BO_Sub,
+ nonloc::SymbolVal(BeginSym),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
+ SymMgr.getType(BeginSym)).getAsSymbol();
+ const NoteTag *ChangeTag =
+ getChangeTag(C, "extended to the front by 1 position", ContReg, ContE);
+ State = setContainerData(State, ContReg, CData->newBegin(newBeginSym));
+ C.addTransition(State, ChangeTag);
+ }
+ }
+}
+
+void ContainerModeling::handlePopFront(CheckerContext &C, SVal Cont,
+ const Expr *ContE) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ auto State = C.getState();
+ const auto CData = getContainerData(State, ContReg);
+ if (!CData)
+ return;
+
+ // For deque-like containers invalidate all iterator positions. For list-like
+ // iterators only invalidate the first position
+ if (const auto BeginSym = CData->getBegin()) {
+ if (hasSubscriptOperator(State, ContReg)) {
+ State = invalidateIteratorPositions(State, BeginSym, BO_LE);
+ } else {
+ State = invalidateIteratorPositions(State, BeginSym, BO_EQ);
+ }
+ auto &SymMgr = C.getSymbolManager();
+ auto &BVF = SymMgr.getBasicVals();
+ auto &SVB = C.getSValBuilder();
+ const auto newBeginSym =
+ SVB.evalBinOp(State, BO_Add,
+ nonloc::SymbolVal(BeginSym),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
+ SymMgr.getType(BeginSym)).getAsSymbol();
+ const NoteTag *ChangeTag =
+ getChangeTag(C, "shrank from the front by 1 position", ContReg, ContE);
+ State = setContainerData(State, ContReg, CData->newBegin(newBeginSym));
+ C.addTransition(State, ChangeTag);
+ }
+}
+
+void ContainerModeling::handleInsert(CheckerContext &C, SVal Cont,
+ SVal Iter) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ auto State = C.getState();
+ const auto *Pos = getIteratorPosition(State, Iter);
+ if (!Pos)
+ return;
+
+ // For deque-like containers invalidate all iterator positions. For
+ // vector-like containers invalidate iterator positions after the insertion.
+ if (hasSubscriptOperator(State, ContReg) && backModifiable(State, ContReg)) {
+ if (frontModifiable(State, ContReg)) {
+ State = invalidateAllIteratorPositions(State, ContReg);
+ } else {
+ State = invalidateIteratorPositions(State, Pos->getOffset(), BO_GE);
+ }
+ if (const auto *CData = getContainerData(State, ContReg)) {
+ if (const auto EndSym = CData->getEnd()) {
+ State = invalidateIteratorPositions(State, EndSym, BO_GE);
+ State = setContainerData(State, ContReg, CData->newEnd(nullptr));
+ }
+ }
+ C.addTransition(State);
+ }
+}
+
+void ContainerModeling::handleErase(CheckerContext &C, SVal Cont,
+ SVal Iter) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ auto State = C.getState();
+ const auto *Pos = getIteratorPosition(State, Iter);
+ if (!Pos)
+ return;
+
+ // For deque-like containers invalidate all iterator positions. For
+ // vector-like containers invalidate iterator positions at and after the
+ // deletion. For list-like containers only invalidate the deleted position.
+ if (hasSubscriptOperator(State, ContReg) && backModifiable(State, ContReg)) {
+ if (frontModifiable(State, ContReg)) {
+ State = invalidateAllIteratorPositions(State, ContReg);
+ } else {
+ State = invalidateIteratorPositions(State, Pos->getOffset(), BO_GE);
+ }
+ if (const auto *CData = getContainerData(State, ContReg)) {
+ if (const auto EndSym = CData->getEnd()) {
+ State = invalidateIteratorPositions(State, EndSym, BO_GE);
+ State = setContainerData(State, ContReg, CData->newEnd(nullptr));
+ }
+ }
+ } else {
+ State = invalidateIteratorPositions(State, Pos->getOffset(), BO_EQ);
+ }
+ C.addTransition(State);
+}
+
+void ContainerModeling::handleErase(CheckerContext &C, SVal Cont, SVal Iter1,
+ SVal Iter2) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+ auto State = C.getState();
+ const auto *Pos1 = getIteratorPosition(State, Iter1);
+ const auto *Pos2 = getIteratorPosition(State, Iter2);
+ if (!Pos1 || !Pos2)
+ return;
+
+ // For deque-like containers invalidate all iterator positions. For
+ // vector-like containers invalidate iterator positions at and after the
+ // deletion range. For list-like containers only invalidate the deleted
+ // position range [first..last].
+ if (hasSubscriptOperator(State, ContReg) && backModifiable(State, ContReg)) {
+ if (frontModifiable(State, ContReg)) {
+ State = invalidateAllIteratorPositions(State, ContReg);
+ } else {
+ State = invalidateIteratorPositions(State, Pos1->getOffset(), BO_GE);
+ }
+ if (const auto *CData = getContainerData(State, ContReg)) {
+ if (const auto EndSym = CData->getEnd()) {
+ State = invalidateIteratorPositions(State, EndSym, BO_GE);
+ State = setContainerData(State, ContReg, CData->newEnd(nullptr));
+ }
+ }
+ } else {
+ State = invalidateIteratorPositions(State, Pos1->getOffset(), BO_GE,
+ Pos2->getOffset(), BO_LT);
+ }
+ C.addTransition(State);
+}
+
+void ContainerModeling::handleEraseAfter(CheckerContext &C, SVal Cont,
+ SVal Iter) const {
+ auto State = C.getState();
+ const auto *Pos = getIteratorPosition(State, Iter);
+ if (!Pos)
+ return;
+
+ // Invalidate the deleted iterator position, which is the position of the
+ // parameter plus one.
+ auto &SymMgr = C.getSymbolManager();
+ auto &BVF = SymMgr.getBasicVals();
+ auto &SVB = C.getSValBuilder();
+ const auto NextSym =
+ SVB.evalBinOp(State, BO_Add,
+ nonloc::SymbolVal(Pos->getOffset()),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
+ SymMgr.getType(Pos->getOffset())).getAsSymbol();
+ State = invalidateIteratorPositions(State, NextSym, BO_EQ);
+ C.addTransition(State);
+}
+
+void ContainerModeling::handleEraseAfter(CheckerContext &C, SVal Cont,
+ SVal Iter1, SVal Iter2) const {
+ auto State = C.getState();
+ const auto *Pos1 = getIteratorPosition(State, Iter1);
+ const auto *Pos2 = getIteratorPosition(State, Iter2);
+ if (!Pos1 || !Pos2)
+ return;
+
+ // Invalidate the deleted iterator position range (first..last)
+ State = invalidateIteratorPositions(State, Pos1->getOffset(), BO_GT,
+ Pos2->getOffset(), BO_LT);
+ C.addTransition(State);
+}
+
+const NoteTag *ContainerModeling::getChangeTag(CheckerContext &C,
+ StringRef Text,
+ const MemRegion *ContReg,
+ const Expr *ContE) const {
+ StringRef Name;
+ // First try to get the name of the variable from the region
+ if (const auto *DR = dyn_cast<DeclRegion>(ContReg)) {
+ Name = DR->getDecl()->getName();
+ // If the region is not a `DeclRegion` then use the expression instead
+ } else if (const auto *DRE =
+ dyn_cast<DeclRefExpr>(ContE->IgnoreParenCasts())) {
+ Name = DRE->getDecl()->getName();
+ }
+
+ return C.getNoteTag(
+ [Text, Name, ContReg](PathSensitiveBugReport &BR) -> std::string {
+ if (!BR.isInteresting(ContReg))
+ return "";
+
+ SmallString<256> Msg;
+ llvm::raw_svector_ostream Out(Msg);
+ Out << "Container " << (!Name.empty() ? ("'" + Name.str() + "' ") : "" )
+ << Text;
+ return std::string(Out.str());
+ });
+}
+
+void ContainerModeling::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
+ auto ContMap = State->get<ContainerMap>();
+
+ if (!ContMap.isEmpty()) {
+ Out << Sep << "Container Data :" << NL;
+ for (const auto &Cont : ContMap) {
+ Cont.first->dumpToStream(Out);
+ Out << " : [ ";
+ const auto CData = Cont.second;
+ if (CData.getBegin())
+ CData.getBegin()->dumpToStream(Out);
+ else
+ Out << "<Unknown>";
+ Out << " .. ";
+ if (CData.getEnd())
+ CData.getEnd()->dumpToStream(Out);
+ else
+ Out << "<Unknown>";
+ Out << " ]";
+ }
+ }
+}
+
+namespace {
+
+bool isBeginCall(const FunctionDecl *Func) {
+ const auto *IdInfo = Func->getIdentifier();
+ if (!IdInfo)
+ return false;
+ return IdInfo->getName().endswith_lower("begin");
+}
+
+bool isEndCall(const FunctionDecl *Func) {
+ const auto *IdInfo = Func->getIdentifier();
+ if (!IdInfo)
+ return false;
+ return IdInfo->getName().endswith_lower("end");
+}
+
+const CXXRecordDecl *getCXXRecordDecl(ProgramStateRef State,
+ const MemRegion *Reg) {
+ auto TI = getDynamicTypeInfo(State, Reg);
+ if (!TI.isValid())
+ return nullptr;
+
+ auto Type = TI.getType();
+ if (const auto *RefT = Type->getAs<ReferenceType>()) {
+ Type = RefT->getPointeeType();
+ }
+
+ return Type->getUnqualifiedDesugaredType()->getAsCXXRecordDecl();
+}
+
+bool hasSubscriptOperator(ProgramStateRef State, const MemRegion *Reg) {
+ const auto *CRD = getCXXRecordDecl(State, Reg);
+ if (!CRD)
+ return false;
+
+ for (const auto *Method : CRD->methods()) {
+ if (!Method->isOverloadedOperator())
+ continue;
+ const auto OPK = Method->getOverloadedOperator();
+ if (OPK == OO_Subscript) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool frontModifiable(ProgramStateRef State, const MemRegion *Reg) {
+ const auto *CRD = getCXXRecordDecl(State, Reg);
+ if (!CRD)
+ return false;
+
+ for (const auto *Method : CRD->methods()) {
+ if (!Method->getDeclName().isIdentifier())
+ continue;
+ if (Method->getName() == "push_front" || Method->getName() == "pop_front") {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool backModifiable(ProgramStateRef State, const MemRegion *Reg) {
+ const auto *CRD = getCXXRecordDecl(State, Reg);
+ if (!CRD)
+ return false;
+
+ for (const auto *Method : CRD->methods()) {
+ if (!Method->getDeclName().isIdentifier())
+ continue;
+ if (Method->getName() == "push_back" || Method->getName() == "pop_back") {
+ return true;
+ }
+ }
+ return false;
+}
+
+SymbolRef getContainerBegin(ProgramStateRef State, const MemRegion *Cont) {
+ const auto *CDataPtr = getContainerData(State, Cont);
+ if (!CDataPtr)
+ return nullptr;
+
+ return CDataPtr->getBegin();
+}
+
+SymbolRef getContainerEnd(ProgramStateRef State, const MemRegion *Cont) {
+ const auto *CDataPtr = getContainerData(State, Cont);
+ if (!CDataPtr)
+ return nullptr;
+
+ return CDataPtr->getEnd();
+}
+
+ProgramStateRef createContainerBegin(ProgramStateRef State,
+ const MemRegion *Cont, const Expr *E,
+ QualType T, const LocationContext *LCtx,
+ unsigned BlockCount) {
+ // Only create if it does not exist
+ const auto *CDataPtr = getContainerData(State, Cont);
+ if (CDataPtr && CDataPtr->getBegin())
+ return State;
+
+ auto &SymMgr = State->getSymbolManager();
+ const SymbolConjured *Sym = SymMgr.conjureSymbol(E, LCtx, T, BlockCount,
+ "begin");
+ State = assumeNoOverflow(State, Sym, 4);
+
+ if (CDataPtr) {
+ const auto CData = CDataPtr->newBegin(Sym);
+ return setContainerData(State, Cont, CData);
+ }
+
+ const auto CData = ContainerData::fromBegin(Sym);
+ return setContainerData(State, Cont, CData);
+}
+
+ProgramStateRef createContainerEnd(ProgramStateRef State, const MemRegion *Cont,
+ const Expr *E, QualType T,
+ const LocationContext *LCtx,
+ unsigned BlockCount) {
+ // Only create if it does not exist
+ const auto *CDataPtr = getContainerData(State, Cont);
+ if (CDataPtr && CDataPtr->getEnd())
+ return State;
+
+ auto &SymMgr = State->getSymbolManager();
+ const SymbolConjured *Sym = SymMgr.conjureSymbol(E, LCtx, T, BlockCount,
+ "end");
+ State = assumeNoOverflow(State, Sym, 4);
+
+ if (CDataPtr) {
+ const auto CData = CDataPtr->newEnd(Sym);
+ return setContainerData(State, Cont, CData);
+ }
+
+ const auto CData = ContainerData::fromEnd(Sym);
+ return setContainerData(State, Cont, CData);
+}
+
+ProgramStateRef setContainerData(ProgramStateRef State, const MemRegion *Cont,
+ const ContainerData &CData) {
+ return State->set<ContainerMap>(Cont, CData);
+}
+
+template <typename Condition, typename Process>
+ProgramStateRef processIteratorPositions(ProgramStateRef State, Condition Cond,
+ Process Proc) {
+ auto &RegionMapFactory = State->get_context<IteratorRegionMap>();
+ auto RegionMap = State->get<IteratorRegionMap>();
+ bool Changed = false;
+ for (const auto &Reg : RegionMap) {
+ if (Cond(Reg.second)) {
+ RegionMap = RegionMapFactory.add(RegionMap, Reg.first, Proc(Reg.second));
+ Changed = true;
+ }
+ }
+
+ if (Changed)
+ State = State->set<IteratorRegionMap>(RegionMap);
+
+ auto &SymbolMapFactory = State->get_context<IteratorSymbolMap>();
+ auto SymbolMap = State->get<IteratorSymbolMap>();
+ Changed = false;
+ for (const auto &Sym : SymbolMap) {
+ if (Cond(Sym.second)) {
+ SymbolMap = SymbolMapFactory.add(SymbolMap, Sym.first, Proc(Sym.second));
+ Changed = true;
+ }
+ }
+
+ if (Changed)
+ State = State->set<IteratorSymbolMap>(SymbolMap);
+
+ return State;
+}
+
+ProgramStateRef invalidateAllIteratorPositions(ProgramStateRef State,
+ const MemRegion *Cont) {
+ auto MatchCont = [&](const IteratorPosition &Pos) {
+ return Pos.getContainer() == Cont;
+ };
+ auto Invalidate = [&](const IteratorPosition &Pos) {
+ return Pos.invalidate();
+ };
+ return processIteratorPositions(State, MatchCont, Invalidate);
+}
+
+ProgramStateRef
+invalidateAllIteratorPositionsExcept(ProgramStateRef State,
+ const MemRegion *Cont, SymbolRef Offset,
+ BinaryOperator::Opcode Opc) {
+ auto MatchContAndCompare = [&](const IteratorPosition &Pos) {
+ return Pos.getContainer() == Cont &&
+ !compare(State, Pos.getOffset(), Offset, Opc);
+ };
+ auto Invalidate = [&](const IteratorPosition &Pos) {
+ return Pos.invalidate();
+ };
+ return processIteratorPositions(State, MatchContAndCompare, Invalidate);
+}
+
+ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
+ SymbolRef Offset,
+ BinaryOperator::Opcode Opc) {
+ auto Compare = [&](const IteratorPosition &Pos) {
+ return compare(State, Pos.getOffset(), Offset, Opc);
+ };
+ auto Invalidate = [&](const IteratorPosition &Pos) {
+ return Pos.invalidate();
+ };
+ return processIteratorPositions(State, Compare, Invalidate);
+}
+
+ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
+ SymbolRef Offset1,
+ BinaryOperator::Opcode Opc1,
+ SymbolRef Offset2,
+ BinaryOperator::Opcode Opc2) {
+ auto Compare = [&](const IteratorPosition &Pos) {
+ return compare(State, Pos.getOffset(), Offset1, Opc1) &&
+ compare(State, Pos.getOffset(), Offset2, Opc2);
+ };
+ auto Invalidate = [&](const IteratorPosition &Pos) {
+ return Pos.invalidate();
+ };
+ return processIteratorPositions(State, Compare, Invalidate);
+}
+
+ProgramStateRef reassignAllIteratorPositions(ProgramStateRef State,
+ const MemRegion *Cont,
+ const MemRegion *NewCont) {
+ auto MatchCont = [&](const IteratorPosition &Pos) {
+ return Pos.getContainer() == Cont;
+ };
+ auto ReAssign = [&](const IteratorPosition &Pos) {
+ return Pos.reAssign(NewCont);
+ };
+ return processIteratorPositions(State, MatchCont, ReAssign);
+}
+
+ProgramStateRef reassignAllIteratorPositionsUnless(ProgramStateRef State,
+ const MemRegion *Cont,
+ const MemRegion *NewCont,
+ SymbolRef Offset,
+ BinaryOperator::Opcode Opc) {
+ auto MatchContAndCompare = [&](const IteratorPosition &Pos) {
+ return Pos.getContainer() == Cont &&
+ !compare(State, Pos.getOffset(), Offset, Opc);
+ };
+ auto ReAssign = [&](const IteratorPosition &Pos) {
+ return Pos.reAssign(NewCont);
+ };
+ return processIteratorPositions(State, MatchContAndCompare, ReAssign);
+}
+
+// This function rebases symbolic expression `OldSym + Int` to `NewSym + Int`,
+// `OldSym - Int` to `NewSym - Int` and `OldSym` to `NewSym` in any iterator
+// position offsets where `CondSym` is true.
+ProgramStateRef rebaseSymbolInIteratorPositionsIf(
+ ProgramStateRef State, SValBuilder &SVB, SymbolRef OldSym,
+ SymbolRef NewSym, SymbolRef CondSym, BinaryOperator::Opcode Opc) {
+ auto LessThanEnd = [&](const IteratorPosition &Pos) {
+ return compare(State, Pos.getOffset(), CondSym, Opc);
+ };
+ auto RebaseSymbol = [&](const IteratorPosition &Pos) {
+ return Pos.setTo(rebaseSymbol(State, SVB, Pos.getOffset(), OldSym,
+ NewSym));
+ };
+ return processIteratorPositions(State, LessThanEnd, RebaseSymbol);
+}
+
+// This function rebases symbolic expression `OldExpr + Int` to `NewExpr + Int`,
+// `OldExpr - Int` to `NewExpr - Int` and `OldExpr` to `NewExpr` in expression
+// `OrigExpr`.
+SymbolRef rebaseSymbol(ProgramStateRef State, SValBuilder &SVB,
+ SymbolRef OrigExpr, SymbolRef OldExpr,
+ SymbolRef NewSym) {
+ auto &SymMgr = SVB.getSymbolManager();
+ auto Diff = SVB.evalBinOpNN(State, BO_Sub, nonloc::SymbolVal(OrigExpr),
+ nonloc::SymbolVal(OldExpr),
+ SymMgr.getType(OrigExpr));
+
+ const auto DiffInt = Diff.getAs<nonloc::ConcreteInt>();
+ if (!DiffInt)
+ return OrigExpr;
+
+ return SVB.evalBinOpNN(State, BO_Add, *DiffInt, nonloc::SymbolVal(NewSym),
+ SymMgr.getType(OrigExpr)).getAsSymbol();
+}
+
+bool hasLiveIterators(ProgramStateRef State, const MemRegion *Cont) {
+ auto RegionMap = State->get<IteratorRegionMap>();
+ for (const auto &Reg : RegionMap) {
+ if (Reg.second.getContainer() == Cont)
+ return true;
+ }
+
+ auto SymbolMap = State->get<IteratorSymbolMap>();
+ for (const auto &Sym : SymbolMap) {
+ if (Sym.second.getContainer() == Cont)
+ return true;
+ }
+
+ return false;
+}
+
+} // namespace
+
+void ento::registerContainerModeling(CheckerManager &mgr) {
+ mgr.registerChecker<ContainerModeling>();
+}
+
+bool ento::shouldRegisterContainerModeling(const CheckerManager &mgr) {
+ if (!mgr.getLangOpts().CPlusPlus)
+ return false;
+
+ if (!mgr.getAnalyzerOptions().ShouldAggressivelySimplifyBinaryOperation) {
+ mgr.getASTContext().getDiagnostics().Report(
+ diag::err_analyzer_checker_incompatible_analyzer_option)
+ << "aggressive-binary-operation-simplification" << "false";
+ return false;
+ }
+
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
index 8dd3132f07e2..4216a6883119 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
@@ -196,6 +196,6 @@ void ento::registerConversionChecker(CheckerManager &mgr) {
mgr.registerChecker<ConversionChecker>();
}
-bool ento::shouldRegisterConversionChecker(const LangOptions &LO) {
+bool ento::shouldRegisterConversionChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
index 61441889fc64..6bc186aa2755 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -540,6 +540,6 @@ void ento::registerDeadStoresChecker(CheckerManager &Mgr) {
AnOpts.getCheckerBooleanOption(Chk, "ShowFixIts");
}
-bool ento::shouldRegisterDeadStoresChecker(const LangOptions &LO) {
+bool ento::shouldRegisterDeadStoresChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
index 0cb4be2c7fdc..03b7cbd1c833 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -47,7 +47,7 @@ void ento::registerDominatorsTreeDumper(CheckerManager &mgr) {
mgr.registerChecker<DominatorsTreeDumper>();
}
-bool ento::shouldRegisterDominatorsTreeDumper(const LangOptions &LO) {
+bool ento::shouldRegisterDominatorsTreeDumper(const CheckerManager &mgr) {
return true;
}
@@ -73,7 +73,7 @@ void ento::registerPostDominatorsTreeDumper(CheckerManager &mgr) {
mgr.registerChecker<PostDominatorsTreeDumper>();
}
-bool ento::shouldRegisterPostDominatorsTreeDumper(const LangOptions &LO) {
+bool ento::shouldRegisterPostDominatorsTreeDumper(const CheckerManager &mgr) {
return true;
}
@@ -98,7 +98,7 @@ void ento::registerControlDependencyTreeDumper(CheckerManager &mgr) {
mgr.registerChecker<ControlDependencyTreeDumper>();
}
-bool ento::shouldRegisterControlDependencyTreeDumper(const LangOptions &LO) {
+bool ento::shouldRegisterControlDependencyTreeDumper(const CheckerManager &mgr) {
return true;
}
@@ -122,7 +122,7 @@ void ento::registerLiveVariablesDumper(CheckerManager &mgr) {
mgr.registerChecker<LiveVariablesDumper>();
}
-bool ento::shouldRegisterLiveVariablesDumper(const LangOptions &LO) {
+bool ento::shouldRegisterLiveVariablesDumper(const CheckerManager &mgr) {
return true;
}
@@ -145,7 +145,7 @@ void ento::registerLiveStatementsDumper(CheckerManager &mgr) {
mgr.registerChecker<LiveStatementsDumper>();
}
-bool ento::shouldRegisterLiveStatementsDumper(const LangOptions &LO) {
+bool ento::shouldRegisterLiveStatementsDumper(const CheckerManager &mgr) {
return true;
}
@@ -169,7 +169,7 @@ void ento::registerCFGViewer(CheckerManager &mgr) {
mgr.registerChecker<CFGViewer>();
}
-bool ento::shouldRegisterCFGViewer(const LangOptions &LO) {
+bool ento::shouldRegisterCFGViewer(const CheckerManager &mgr) {
return true;
}
@@ -199,7 +199,7 @@ void ento::registerCFGDumper(CheckerManager &mgr) {
mgr.registerChecker<CFGDumper>();
}
-bool ento::shouldRegisterCFGDumper(const LangOptions &LO) {
+bool ento::shouldRegisterCFGDumper(const CheckerManager &mgr) {
return true;
}
@@ -223,7 +223,7 @@ void ento::registerCallGraphViewer(CheckerManager &mgr) {
mgr.registerChecker<CallGraphViewer>();
}
-bool ento::shouldRegisterCallGraphViewer(const LangOptions &LO) {
+bool ento::shouldRegisterCallGraphViewer(const CheckerManager &mgr) {
return true;
}
@@ -247,7 +247,7 @@ void ento::registerCallGraphDumper(CheckerManager &mgr) {
mgr.registerChecker<CallGraphDumper>();
}
-bool ento::shouldRegisterCallGraphDumper(const LangOptions &LO) {
+bool ento::shouldRegisterCallGraphDumper(const CheckerManager &mgr) {
return true;
}
@@ -281,8 +281,6 @@ public:
llvm::errs() << Keys[I]->getKey() << " = "
<< (Keys[I]->second.empty() ? "\"\"" : Keys[I]->second)
<< '\n';
-
- llvm::errs() << "[stats]\n" << "num-entries = " << Keys.size() << '\n';
}
};
}
@@ -291,7 +289,7 @@ void ento::registerConfigDumper(CheckerManager &mgr) {
mgr.registerChecker<ConfigDumper>();
}
-bool ento::shouldRegisterConfigDumper(const LangOptions &LO) {
+bool ento::shouldRegisterConfigDumper(const CheckerManager &mgr) {
return true;
}
@@ -314,7 +312,7 @@ void ento::registerExplodedGraphViewer(CheckerManager &mgr) {
mgr.registerChecker<ExplodedGraphViewer>();
}
-bool ento::shouldRegisterExplodedGraphViewer(const LangOptions &LO) {
+bool ento::shouldRegisterExplodedGraphViewer(const CheckerManager &mgr) {
return true;
}
@@ -346,6 +344,6 @@ void ento::registerReportStmts(CheckerManager &mgr) {
mgr.registerChecker<ReportStmts>();
}
-bool ento::shouldRegisterReportStmts(const LangOptions &LO) {
+bool ento::shouldRegisterReportStmts(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp
new file mode 100644
index 000000000000..6fed999ffc80
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp
@@ -0,0 +1,150 @@
+//==-- DebugContainerModeling.cpp ---------------------------------*- C++ -*--//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a checker for debugging iterator modeling.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+#include "Iterator.h"
+
+using namespace clang;
+using namespace ento;
+using namespace iterator;
+
+namespace {
+
+class DebugContainerModeling
+ : public Checker<eval::Call> {
+
+ std::unique_ptr<BugType> DebugMsgBugType;
+
+ template <typename Getter>
+ void analyzerContainerDataField(const CallExpr *CE, CheckerContext &C,
+ Getter get) const;
+ void analyzerContainerBegin(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerContainerEnd(const CallExpr *CE, CheckerContext &C) const;
+ ExplodedNode *reportDebugMsg(llvm::StringRef Msg, CheckerContext &C) const;
+
+ typedef void (DebugContainerModeling::*FnCheck)(const CallExpr *,
+ CheckerContext &) const;
+
+ CallDescriptionMap<FnCheck> Callbacks = {
+ {{0, "clang_analyzer_container_begin", 1},
+ &DebugContainerModeling::analyzerContainerBegin},
+ {{0, "clang_analyzer_container_end", 1},
+ &DebugContainerModeling::analyzerContainerEnd},
+ };
+
+public:
+ DebugContainerModeling();
+
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const;
+};
+
+} //namespace
+
+DebugContainerModeling::DebugContainerModeling() {
+ DebugMsgBugType.reset(
+ new BugType(this, "Checking analyzer assumptions", "debug",
+ /*SuppressOnSink=*/true));
+}
+
+bool DebugContainerModeling::evalCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return false;
+
+ const FnCheck *Handler = Callbacks.lookup(Call);
+ if (!Handler)
+ return false;
+
+ (this->**Handler)(CE, C);
+ return true;
+}
+
+template <typename Getter>
+void DebugContainerModeling::analyzerContainerDataField(const CallExpr *CE,
+ CheckerContext &C,
+ Getter get) const {
+ if (CE->getNumArgs() == 0) {
+ reportDebugMsg("Missing container argument", C);
+ return;
+ }
+
+ auto State = C.getState();
+ const MemRegion *Cont = C.getSVal(CE->getArg(0)).getAsRegion();
+ if (Cont) {
+ const auto *Data = getContainerData(State, Cont);
+ if (Data) {
+ SymbolRef Field = get(Data);
+ if (Field) {
+ State = State->BindExpr(CE, C.getLocationContext(),
+ nonloc::SymbolVal(Field));
+
+ // Progpagate interestingness from the container's data (marked
+ // interesting by an `ExprInspection` debug call to the container
+ // itself.
+ const NoteTag *InterestingTag =
+ C.getNoteTag(
+ [Cont, Field](PathSensitiveBugReport &BR) -> std::string {
+ if (BR.isInteresting(Field)) {
+ BR.markInteresting(Cont);
+ }
+ return "";
+ });
+ C.addTransition(State, InterestingTag);
+ return;
+ }
+ }
+ }
+
+ auto &BVF = C.getSValBuilder().getBasicValueFactory();
+ State = State->BindExpr(CE, C.getLocationContext(),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(0))));
+}
+
+void DebugContainerModeling::analyzerContainerBegin(const CallExpr *CE,
+ CheckerContext &C) const {
+ analyzerContainerDataField(CE, C, [](const ContainerData *D) {
+ return D->getBegin();
+ });
+}
+
+void DebugContainerModeling::analyzerContainerEnd(const CallExpr *CE,
+ CheckerContext &C) const {
+ analyzerContainerDataField(CE, C, [](const ContainerData *D) {
+ return D->getEnd();
+ });
+}
+
+ExplodedNode *DebugContainerModeling::reportDebugMsg(llvm::StringRef Msg,
+ CheckerContext &C) const {
+ ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N)
+ return nullptr;
+
+ auto &BR = C.getBugReporter();
+ BR.emitReport(std::make_unique<PathSensitiveBugReport>(*DebugMsgBugType,
+ Msg, N));
+ return N;
+}
+
+void ento::registerDebugContainerModeling(CheckerManager &mgr) {
+ mgr.registerChecker<DebugContainerModeling>();
+}
+
+bool ento::shouldRegisterDebugContainerModeling(const CheckerManager &mgr) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp
index 4717fef96341..5833eea56da8 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp
@@ -30,11 +30,6 @@ class DebugIteratorModeling
std::unique_ptr<BugType> DebugMsgBugType;
template <typename Getter>
- void analyzerContainerDataField(const CallExpr *CE, CheckerContext &C,
- Getter get) const;
- void analyzerContainerBegin(const CallExpr *CE, CheckerContext &C) const;
- void analyzerContainerEnd(const CallExpr *CE, CheckerContext &C) const;
- template <typename Getter>
void analyzerIteratorDataField(const CallExpr *CE, CheckerContext &C,
Getter get, SVal Default) const;
void analyzerIteratorPosition(const CallExpr *CE, CheckerContext &C) const;
@@ -46,10 +41,6 @@ class DebugIteratorModeling
CheckerContext &) const;
CallDescriptionMap<FnCheck> Callbacks = {
- {{0, "clang_analyzer_container_begin", 1},
- &DebugIteratorModeling::analyzerContainerBegin},
- {{0, "clang_analyzer_container_end", 1},
- &DebugIteratorModeling::analyzerContainerEnd},
{{0, "clang_analyzer_iterator_position", 1},
&DebugIteratorModeling::analyzerIteratorPosition},
{{0, "clang_analyzer_iterator_container", 1},
@@ -87,49 +78,6 @@ bool DebugIteratorModeling::evalCall(const CallEvent &Call,
}
template <typename Getter>
-void DebugIteratorModeling::analyzerContainerDataField(const CallExpr *CE,
- CheckerContext &C,
- Getter get) const {
- if (CE->getNumArgs() == 0) {
- reportDebugMsg("Missing container argument", C);
- return;
- }
-
- auto State = C.getState();
- const MemRegion *Cont = C.getSVal(CE->getArg(0)).getAsRegion();
- if (Cont) {
- const auto *Data = getContainerData(State, Cont);
- if (Data) {
- SymbolRef Field = get(Data);
- if (Field) {
- State = State->BindExpr(CE, C.getLocationContext(),
- nonloc::SymbolVal(Field));
- C.addTransition(State);
- return;
- }
- }
- }
-
- auto &BVF = C.getSValBuilder().getBasicValueFactory();
- State = State->BindExpr(CE, C.getLocationContext(),
- nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(0))));
-}
-
-void DebugIteratorModeling::analyzerContainerBegin(const CallExpr *CE,
- CheckerContext &C) const {
- analyzerContainerDataField(CE, C, [](const ContainerData *D) {
- return D->getBegin();
- });
-}
-
-void DebugIteratorModeling::analyzerContainerEnd(const CallExpr *CE,
- CheckerContext &C) const {
- analyzerContainerDataField(CE, C, [](const ContainerData *D) {
- return D->getEnd();
- });
-}
-
-template <typename Getter>
void DebugIteratorModeling::analyzerIteratorDataField(const CallExpr *CE,
CheckerContext &C,
Getter get,
@@ -191,6 +139,6 @@ void ento::registerDebugIteratorModeling(CheckerManager &mgr) {
mgr.registerChecker<DebugIteratorModeling>();
}
-bool ento::shouldRegisterDebugIteratorModeling(const LangOptions &LO) {
+bool ento::shouldRegisterDebugIteratorModeling(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp
index 45c1984c5e15..7c5833762008 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp
@@ -92,6 +92,8 @@ void DeleteWithNonVirtualDtorChecker::checkPreStmt(const CXXDeleteExpr *DE,
"Logic error"));
ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N)
+ return;
auto R = std::make_unique<PathSensitiveBugReport>(*BT, BT->getDescription(), N);
// Mark region of problematic base class for later use in the BugVisitor.
@@ -148,6 +150,6 @@ void ento::registerDeleteWithNonVirtualDtorChecker(CheckerManager &mgr) {
}
bool ento::shouldRegisterDeleteWithNonVirtualDtorChecker(
- const LangOptions &LO) {
+ const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index 46100cd1dace..2411f0e2d058 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -304,6 +304,6 @@ void ento::registerDereferenceChecker(CheckerManager &mgr) {
mgr.registerChecker<DereferenceChecker>();
}
-bool ento::shouldRegisterDereferenceChecker(const LangOptions &LO) {
+bool ento::shouldRegisterDereferenceChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
index 0c46447e1985..df88b71ff063 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
@@ -219,19 +219,12 @@ static bool AttrFilter(const ObjCMethodDecl *M) {
// Register the checker that checks for direct accesses in all functions,
// except for the initialization and copy routines.
void ento::registerDirectIvarAssignment(CheckerManager &mgr) {
- mgr.registerChecker<DirectIvarAssignment>();
+ auto Chk = mgr.registerChecker<DirectIvarAssignment>();
+ if (mgr.getAnalyzerOptions().getCheckerBooleanOption(Chk,
+ "AnnotatedFunctions"))
+ Chk->ShouldSkipMethod = &AttrFilter;
}
-bool ento::shouldRegisterDirectIvarAssignment(const LangOptions &LO) {
- return true;
-}
-
-void ento::registerDirectIvarAssignmentForAnnotatedFunctions(
- CheckerManager &mgr) {
- mgr.getChecker<DirectIvarAssignment>()->ShouldSkipMethod = &AttrFilter;
-}
-
-bool ento::shouldRegisterDirectIvarAssignmentForAnnotatedFunctions(
- const LangOptions &LO) {
+bool ento::shouldRegisterDirectIvarAssignment(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
index 8798bde88dcd..2b3164ba4a2c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -101,6 +101,6 @@ void ento::registerDivZeroChecker(CheckerManager &mgr) {
mgr.registerChecker<DivZeroChecker>();
}
-bool ento::shouldRegisterDivZeroChecker(const LangOptions &LO) {
+bool ento::shouldRegisterDivZeroChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
index 8cc38f9735f3..dbc930d7d37b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
@@ -203,6 +203,6 @@ void ento::registerDynamicTypeChecker(CheckerManager &mgr) {
mgr.registerChecker<DynamicTypeChecker>();
}
-bool ento::shouldRegisterDynamicTypeChecker(const LangOptions &LO) {
+bool ento::shouldRegisterDynamicTypeChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
index cce3449b8873..14ba5d769969 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -55,6 +55,7 @@ class DynamicTypePropagation:
check::PostStmt<CXXNewExpr>,
check::PreObjCMessage,
check::PostObjCMessage > {
+
const ObjCObjectType *getObjectTypeForAllocAndNew(const ObjCMessageExpr *MsgE,
CheckerContext &C) const;
@@ -69,8 +70,8 @@ class DynamicTypePropagation:
mutable std::unique_ptr<BugType> ObjCGenericsBugType;
void initBugType() const {
if (!ObjCGenericsBugType)
- ObjCGenericsBugType.reset(
- new BugType(this, "Generics", categories::CoreFoundationObjectiveC));
+ ObjCGenericsBugType.reset(new BugType(
+ GenericCheckName, "Generics", categories::CoreFoundationObjectiveC));
}
class GenericsBugVisitor : public BugReporterVisitor {
@@ -108,12 +109,129 @@ public:
/// This value is set to true, when the Generics checker is turned on.
DefaultBool CheckGenerics;
+ CheckerNameRef GenericCheckName;
+};
+
+bool isObjCClassType(QualType Type) {
+ if (const auto *PointerType = dyn_cast<ObjCObjectPointerType>(Type)) {
+ return PointerType->getObjectType()->isObjCClass();
+ }
+ return false;
+}
+
+struct RuntimeType {
+ const ObjCObjectType *Type = nullptr;
+ bool Precise = false;
+
+ operator bool() const { return Type != nullptr; }
};
+
+RuntimeType inferReceiverType(const ObjCMethodCall &Message,
+ CheckerContext &C) {
+ const ObjCMessageExpr *MessageExpr = Message.getOriginExpr();
+
+ // Check if we can statically infer the actual type precisely.
+ //
+ // 1. Class is written directly in the message:
+ // \code
+ // [ActualClass classMethod];
+ // \endcode
+ if (MessageExpr->getReceiverKind() == ObjCMessageExpr::Class) {
+ return {MessageExpr->getClassReceiver()->getAs<ObjCObjectType>(),
+ /*Precise=*/true};
+ }
+
+ // 2. Receiver is 'super' from a class method (a.k.a 'super' is a
+ // class object).
+ // \code
+ // [super classMethod];
+ // \endcode
+ if (MessageExpr->getReceiverKind() == ObjCMessageExpr::SuperClass) {
+ return {MessageExpr->getSuperType()->getAs<ObjCObjectType>(),
+ /*Precise=*/true};
+ }
+
+ // 3. Receiver is 'super' from an instance method (a.k.a 'super' is an
+ // instance of a super class).
+ // \code
+ // [super instanceMethod];
+ // \encode
+ if (MessageExpr->getReceiverKind() == ObjCMessageExpr::SuperInstance) {
+ if (const auto *ObjTy =
+ MessageExpr->getSuperType()->getAs<ObjCObjectPointerType>())
+ return {ObjTy->getObjectType(), /*Precise=*/true};
+ }
+
+ const Expr *RecE = MessageExpr->getInstanceReceiver();
+
+ if (!RecE)
+ return {};
+
+ // Otherwise, let's try to get type information from our estimations of
+ // runtime types.
+ QualType InferredType;
+ SVal ReceiverSVal = C.getSVal(RecE);
+ ProgramStateRef State = C.getState();
+
+ if (const MemRegion *ReceiverRegion = ReceiverSVal.getAsRegion()) {
+ if (DynamicTypeInfo DTI = getDynamicTypeInfo(State, ReceiverRegion)) {
+ InferredType = DTI.getType().getCanonicalType();
+ }
+ }
+
+ if (SymbolRef ReceiverSymbol = ReceiverSVal.getAsSymbol()) {
+ if (InferredType.isNull()) {
+ InferredType = ReceiverSymbol->getType();
+ }
+
+ // If receiver is a Class object, we want to figure out the type it
+ // represents.
+ if (isObjCClassType(InferredType)) {
+ // We actually might have some info on what type is contained in there.
+ if (DynamicTypeInfo DTI =
+ getClassObjectDynamicTypeInfo(State, ReceiverSymbol)) {
+
+ // Types in Class objects can be ONLY Objective-C types
+ return {cast<ObjCObjectType>(DTI.getType()), !DTI.canBeASubClass()};
+ }
+
+ SVal SelfSVal = State->getSelfSVal(C.getLocationContext());
+
+ // Another way we can guess what is in Class object, is when it is a
+ // 'self' variable of the current class method.
+ if (ReceiverSVal == SelfSVal) {
+ // In this case, we should return the type of the enclosing class
+ // declaration.
+ if (const ObjCMethodDecl *MD =
+ dyn_cast<ObjCMethodDecl>(C.getStackFrame()->getDecl()))
+ if (const ObjCObjectType *ObjTy = dyn_cast<ObjCObjectType>(
+ MD->getClassInterface()->getTypeForDecl()))
+ return {ObjTy};
+ }
+ }
+ }
+
+ // Unfortunately, it seems like we have no idea what that type is.
+ if (InferredType.isNull()) {
+ return {};
+ }
+
+ // We can end up here if we got some dynamic type info and the
+ // receiver is not one of the known Class objects.
+ if (const auto *ReceiverInferredType =
+ dyn_cast<ObjCObjectPointerType>(InferredType)) {
+ return {ReceiverInferredType->getObjectType()};
+ }
+
+ // Any other type (like 'Class') is not really useful at this point.
+ return {};
+}
} // end anonymous namespace
void DynamicTypePropagation::checkDeadSymbols(SymbolReaper &SR,
CheckerContext &C) const {
ProgramStateRef State = removeDeadTypes(C.getState(), SR);
+ State = removeDeadClassObjectTypes(State, SR);
MostSpecializedTypeArgsMapTy TyArgMap =
State->get<MostSpecializedTypeArgsMap>();
@@ -209,12 +327,21 @@ void DynamicTypePropagation::checkPostCall(const CallEvent &Call,
case OMF_alloc:
case OMF_new: {
// Get the type of object that will get created.
- const ObjCMessageExpr *MsgE = Msg->getOriginExpr();
- const ObjCObjectType *ObjTy = getObjectTypeForAllocAndNew(MsgE, C);
+ RuntimeType ObjTy = inferReceiverType(*Msg, C);
+
if (!ObjTy)
return;
+
QualType DynResTy =
- C.getASTContext().getObjCObjectPointerType(QualType(ObjTy, 0));
+ C.getASTContext().getObjCObjectPointerType(QualType(ObjTy.Type, 0));
+ // We used to assume that whatever type we got from inferring the
+ // type is actually precise (and it is not exactly correct).
+ // A big portion of the existing behavior depends on that assumption
+ // (e.g. certain inlining won't take place). For this reason, we don't
+ // use ObjTy.Precise flag here.
+ //
+ // TODO: We should mitigate this problem some time in the future
+ // and replace hardcoded 'false' with '!ObjTy.Precise'.
C.addTransition(setDynamicTypeInfo(State, RetReg, DynResTy, false));
break;
}
@@ -303,40 +430,6 @@ void DynamicTypePropagation::checkPostStmt(const CXXNewExpr *NewE,
/*CanBeSubClassed=*/false));
}
-const ObjCObjectType *
-DynamicTypePropagation::getObjectTypeForAllocAndNew(const ObjCMessageExpr *MsgE,
- CheckerContext &C) const {
- if (MsgE->getReceiverKind() == ObjCMessageExpr::Class) {
- if (const ObjCObjectType *ObjTy
- = MsgE->getClassReceiver()->getAs<ObjCObjectType>())
- return ObjTy;
- }
-
- if (MsgE->getReceiverKind() == ObjCMessageExpr::SuperClass) {
- if (const ObjCObjectType *ObjTy
- = MsgE->getSuperType()->getAs<ObjCObjectType>())
- return ObjTy;
- }
-
- const Expr *RecE = MsgE->getInstanceReceiver();
- if (!RecE)
- return nullptr;
-
- RecE= RecE->IgnoreParenImpCasts();
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(RecE)) {
- const StackFrameContext *SFCtx = C.getStackFrame();
- // Are we calling [self alloc]? If this is self, get the type of the
- // enclosing ObjC class.
- if (DRE->getDecl() == SFCtx->getSelfDecl()) {
- if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(SFCtx->getDecl()))
- if (const ObjCObjectType *ObjTy =
- dyn_cast<ObjCObjectType>(MD->getClassInterface()->getTypeForDecl()))
- return ObjTy;
- }
- }
- return nullptr;
-}
-
// Return a better dynamic type if one can be derived from the cast.
// Compare the current dynamic type of the region and the new type to which we
// are casting. If the new type is lower in the inheritance hierarchy, pick it.
@@ -821,25 +914,56 @@ void DynamicTypePropagation::checkPostObjCMessage(const ObjCMethodCall &M,
Selector Sel = MessageExpr->getSelector();
ProgramStateRef State = C.getState();
- // Inference for class variables.
- // We are only interested in cases where the class method is invoked on a
- // class. This method is provided by the runtime and available on all classes.
- if (MessageExpr->getReceiverKind() == ObjCMessageExpr::Class &&
- Sel.getAsString() == "class") {
- QualType ReceiverType = MessageExpr->getClassReceiver();
- const auto *ReceiverClassType = ReceiverType->castAs<ObjCObjectType>();
- if (!ReceiverClassType->isSpecialized())
- return;
- QualType ReceiverClassPointerType =
- C.getASTContext().getObjCObjectPointerType(
- QualType(ReceiverClassType, 0));
- const auto *InferredType =
- ReceiverClassPointerType->castAs<ObjCObjectPointerType>();
+ // Here we try to propagate information on Class objects.
+ if (Sel.getAsString() == "class") {
+ // We try to figure out the type from the receiver of the 'class' message.
+ if (RuntimeType ReceiverRuntimeType = inferReceiverType(M, C)) {
+
+ ReceiverRuntimeType.Type->getSuperClassType();
+ QualType ReceiverClassType(ReceiverRuntimeType.Type, 0);
+
+ // We want to consider only precise information on generics.
+ if (ReceiverRuntimeType.Type->isSpecialized() &&
+ ReceiverRuntimeType.Precise) {
+ QualType ReceiverClassPointerType =
+ C.getASTContext().getObjCObjectPointerType(ReceiverClassType);
+ const auto *InferredType =
+ ReceiverClassPointerType->castAs<ObjCObjectPointerType>();
+ State = State->set<MostSpecializedTypeArgsMap>(RetSym, InferredType);
+ }
- State = State->set<MostSpecializedTypeArgsMap>(RetSym, InferredType);
- C.addTransition(State);
- return;
+ // Constrain the resulting class object to the inferred type.
+ State = setClassObjectDynamicTypeInfo(State, RetSym, ReceiverClassType,
+ !ReceiverRuntimeType.Precise);
+
+ C.addTransition(State);
+ return;
+ }
+ }
+
+ if (Sel.getAsString() == "superclass") {
+ // We try to figure out the type from the receiver of the 'superclass'
+ // message.
+ if (RuntimeType ReceiverRuntimeType = inferReceiverType(M, C)) {
+
+ // Result type would be a super class of the receiver's type.
+ QualType ReceiversSuperClass =
+ ReceiverRuntimeType.Type->getSuperClassType();
+
+ // Check if it really had super class.
+ //
+ // TODO: we can probably pay closer attention to cases when the class
+ // object can be 'nil' as the result of such message.
+ if (!ReceiversSuperClass.isNull()) {
+ // Constrain the resulting class object to the inferred type.
+ State = setClassObjectDynamicTypeInfo(
+ State, RetSym, ReceiversSuperClass, !ReceiverRuntimeType.Precise);
+
+ C.addTransition(State);
+ }
+ return;
+ }
}
// Tracking for return types.
@@ -979,9 +1103,10 @@ PathDiagnosticPieceRef DynamicTypePropagation::GenericsBugVisitor::VisitNode(
void ento::registerObjCGenericsChecker(CheckerManager &mgr) {
DynamicTypePropagation *checker = mgr.getChecker<DynamicTypePropagation>();
checker->CheckGenerics = true;
+ checker->GenericCheckName = mgr.getCurrentCheckerName();
}
-bool ento::shouldRegisterObjCGenericsChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCGenericsChecker(const CheckerManager &mgr) {
return true;
}
@@ -989,6 +1114,6 @@ void ento::registerDynamicTypePropagation(CheckerManager &mgr) {
mgr.registerChecker<DynamicTypePropagation>();
}
-bool ento::shouldRegisterDynamicTypePropagation(const LangOptions &LO) {
+bool ento::shouldRegisterDynamicTypePropagation(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
index 481a5685a71f..0e94b915a468 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
@@ -142,6 +142,6 @@ void ento::registerEnumCastOutOfRangeChecker(CheckerManager &mgr) {
mgr.registerChecker<EnumCastOutOfRangeChecker>();
}
-bool ento::shouldRegisterEnumCastOutOfRangeChecker(const LangOptions &LO) {
+bool ento::shouldRegisterEnumCastOutOfRangeChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
index 17c813962a23..4225d890c47a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -6,6 +6,7 @@
//
//===----------------------------------------------------------------------===//
+#include "Taint.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Checkers/SValExplainer.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -13,6 +14,7 @@
#include "clang/StaticAnalyzer/Core/IssueHash.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ScopedPrinter.h"
@@ -45,13 +47,17 @@ class ExprInspectionChecker : public Checker<eval::Call, check::DeadSymbols,
void analyzerHashDump(const CallExpr *CE, CheckerContext &C) const;
void analyzerDenote(const CallExpr *CE, CheckerContext &C) const;
void analyzerExpress(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerIsTainted(const CallExpr *CE, CheckerContext &C) const;
typedef void (ExprInspectionChecker::*FnCheck)(const CallExpr *,
CheckerContext &C) const;
- ExplodedNode *reportBug(llvm::StringRef Msg, CheckerContext &C) const;
+ // Optional parameter `ExprVal` for expression value to be marked interesting.
+ ExplodedNode *reportBug(llvm::StringRef Msg, CheckerContext &C,
+ Optional<SVal> ExprVal = None) const;
ExplodedNode *reportBug(llvm::StringRef Msg, BugReporter &BR,
- ExplodedNode *N) const;
+ ExplodedNode *N,
+ Optional<SVal> ExprVal = None) const;
public:
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
@@ -72,26 +78,34 @@ bool ExprInspectionChecker::evalCall(const CallEvent &Call,
// These checks should have no effect on the surrounding environment
// (globals should not be invalidated, etc), hence the use of evalCall.
- FnCheck Handler = llvm::StringSwitch<FnCheck>(C.getCalleeName(CE))
- .Case("clang_analyzer_eval", &ExprInspectionChecker::analyzerEval)
- .Case("clang_analyzer_checkInlined",
- &ExprInspectionChecker::analyzerCheckInlined)
- .Case("clang_analyzer_crash", &ExprInspectionChecker::analyzerCrash)
- .Case("clang_analyzer_warnIfReached",
- &ExprInspectionChecker::analyzerWarnIfReached)
- .Case("clang_analyzer_warnOnDeadSymbol",
- &ExprInspectionChecker::analyzerWarnOnDeadSymbol)
- .StartsWith("clang_analyzer_explain", &ExprInspectionChecker::analyzerExplain)
- .StartsWith("clang_analyzer_dump", &ExprInspectionChecker::analyzerDump)
- .Case("clang_analyzer_getExtent", &ExprInspectionChecker::analyzerGetExtent)
- .Case("clang_analyzer_printState",
- &ExprInspectionChecker::analyzerPrintState)
- .Case("clang_analyzer_numTimesReached",
- &ExprInspectionChecker::analyzerNumTimesReached)
- .Case("clang_analyzer_hashDump", &ExprInspectionChecker::analyzerHashDump)
- .Case("clang_analyzer_denote", &ExprInspectionChecker::analyzerDenote)
- .Case("clang_analyzer_express", &ExprInspectionChecker::analyzerExpress)
- .Default(nullptr);
+ FnCheck Handler =
+ llvm::StringSwitch<FnCheck>(C.getCalleeName(CE))
+ .Case("clang_analyzer_eval", &ExprInspectionChecker::analyzerEval)
+ .Case("clang_analyzer_checkInlined",
+ &ExprInspectionChecker::analyzerCheckInlined)
+ .Case("clang_analyzer_crash", &ExprInspectionChecker::analyzerCrash)
+ .Case("clang_analyzer_warnIfReached",
+ &ExprInspectionChecker::analyzerWarnIfReached)
+ .Case("clang_analyzer_warnOnDeadSymbol",
+ &ExprInspectionChecker::analyzerWarnOnDeadSymbol)
+ .StartsWith("clang_analyzer_explain",
+ &ExprInspectionChecker::analyzerExplain)
+ .StartsWith("clang_analyzer_dump",
+ &ExprInspectionChecker::analyzerDump)
+ .Case("clang_analyzer_getExtent",
+ &ExprInspectionChecker::analyzerGetExtent)
+ .Case("clang_analyzer_printState",
+ &ExprInspectionChecker::analyzerPrintState)
+ .Case("clang_analyzer_numTimesReached",
+ &ExprInspectionChecker::analyzerNumTimesReached)
+ .Case("clang_analyzer_hashDump",
+ &ExprInspectionChecker::analyzerHashDump)
+ .Case("clang_analyzer_denote", &ExprInspectionChecker::analyzerDenote)
+ .Case("clang_analyzer_express",
+ &ExprInspectionChecker::analyzerExpress)
+ .StartsWith("clang_analyzer_isTainted",
+ &ExprInspectionChecker::analyzerIsTainted)
+ .Default(nullptr);
if (!Handler)
return false;
@@ -133,22 +147,28 @@ static const char *getArgumentValueString(const CallExpr *CE,
}
ExplodedNode *ExprInspectionChecker::reportBug(llvm::StringRef Msg,
- CheckerContext &C) const {
+ CheckerContext &C,
+ Optional<SVal> ExprVal) const {
ExplodedNode *N = C.generateNonFatalErrorNode();
- reportBug(Msg, C.getBugReporter(), N);
+ reportBug(Msg, C.getBugReporter(), N, ExprVal);
return N;
}
ExplodedNode *ExprInspectionChecker::reportBug(llvm::StringRef Msg,
BugReporter &BR,
- ExplodedNode *N) const {
+ ExplodedNode *N,
+ Optional<SVal> ExprVal) const {
if (!N)
return nullptr;
if (!BT)
BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
- BR.emitReport(std::make_unique<PathSensitiveBugReport>(*BT, Msg, N));
+ auto R = std::make_unique<PathSensitiveBugReport>(*BT, Msg, N);
+ if (ExprVal) {
+ R->markInteresting(*ExprVal);
+ }
+ BR.emitReport(std::move(R));
return N;
}
@@ -234,8 +254,9 @@ void ExprInspectionChecker::analyzerGetExtent(const CallExpr *CE,
}
ProgramStateRef State = C.getState();
- State = State->BindExpr(CE, C.getLocationContext(),
- MR->getExtent(C.getSValBuilder()));
+ DefinedOrUnknownSVal Size = getDynamicSize(State, MR, C.getSValBuilder());
+
+ State = State->BindExpr(CE, C.getLocationContext(), Size);
C.addTransition(State);
}
@@ -394,7 +415,8 @@ void ExprInspectionChecker::analyzerExpress(const CallExpr *CE,
return;
}
- SymbolRef Sym = C.getSVal(CE->getArg(0)).getAsSymbol();
+ SVal ArgVal = C.getSVal(CE->getArg(0));
+ SymbolRef Sym = ArgVal.getAsSymbol();
if (!Sym) {
reportBug("Not a symbol", C);
return;
@@ -407,13 +429,24 @@ void ExprInspectionChecker::analyzerExpress(const CallExpr *CE,
return;
}
- reportBug(*Str, C);
+ reportBug(*Str, C, ArgVal);
+}
+
+void ExprInspectionChecker::analyzerIsTainted(const CallExpr *CE,
+ CheckerContext &C) const {
+ if (CE->getNumArgs() != 1) {
+ reportBug("clang_analyzer_isTainted() requires exactly one argument", C);
+ return;
+ }
+ const bool IsTainted =
+ taint::isTainted(C.getState(), CE->getArg(0), C.getLocationContext());
+ reportBug(IsTainted ? "YES" : "NO", C);
}
void ento::registerExprInspectionChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ExprInspectionChecker>();
}
-bool ento::shouldRegisterExprInspectionChecker(const LangOptions &LO) {
+bool ento::shouldRegisterExprInspectionChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
index b315a8452285..6275e49e51ae 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
@@ -66,6 +66,6 @@ void ento::registerFixedAddressChecker(CheckerManager &mgr) {
mgr.registerChecker<FixedAddressChecker>();
}
-bool ento::shouldRegisterFixedAddressChecker(const LangOptions &LO) {
+bool ento::shouldRegisterFixedAddressChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
index 3c04983df443..fc35082705fa 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
@@ -90,6 +90,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
+#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace ento;
@@ -149,6 +150,10 @@ public:
CASE(Kind::Released)
CASE(Kind::Escaped)
}
+ if (ErrorSym) {
+ OS << " ErrorSym: ";
+ ErrorSym->dumpToStream(OS);
+ }
}
LLVM_DUMP_METHOD void dump() const { dump(llvm::errs()); }
@@ -314,6 +319,17 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
// Function returns an open handle.
if (hasFuchsiaAttr<AcquireHandleAttr>(FuncDecl)) {
SymbolRef RetSym = Call.getReturnValue().getAsSymbol();
+ Notes.push_back([RetSym, FuncDecl](BugReport &BR) -> std::string {
+ auto *PathBR = static_cast<PathSensitiveBugReport *>(&BR);
+ if (auto IsInteresting = PathBR->getInterestingnessKind(RetSym)) {
+ std::string SBuf;
+ llvm::raw_string_ostream OS(SBuf);
+ OS << "Function '" << FuncDecl->getNameAsString()
+ << "' returns an open handle";
+ return OS.str();
+ } else
+ return "";
+ });
State =
State->set<HStateMap>(RetSym, HandleState::getMaybeAllocated(nullptr));
}
@@ -322,6 +338,7 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
if (Arg >= FuncDecl->getNumParams())
break;
const ParmVarDecl *PVD = FuncDecl->getParamDecl(Arg);
+ unsigned ParamDiagIdx = PVD->getFunctionScopeIndex() + 1;
SymbolRef Handle =
getFuchsiaHandleSymbol(PVD->getType(), Call.getArgSVal(Arg), State);
if (!Handle)
@@ -335,20 +352,28 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
reportDoubleRelease(Handle, Call.getArgSourceRange(Arg), C);
return;
} else {
- Notes.push_back([Handle](BugReport &BR) {
+ Notes.push_back([Handle, ParamDiagIdx](BugReport &BR) -> std::string {
auto *PathBR = static_cast<PathSensitiveBugReport *>(&BR);
if (auto IsInteresting = PathBR->getInterestingnessKind(Handle)) {
- return "Handle released here.";
+ std::string SBuf;
+ llvm::raw_string_ostream OS(SBuf);
+ OS << "Handle released through " << ParamDiagIdx
+ << llvm::getOrdinalSuffix(ParamDiagIdx) << " parameter";
+ return OS.str();
} else
return "";
});
State = State->set<HStateMap>(Handle, HandleState::getReleased());
}
} else if (hasFuchsiaAttr<AcquireHandleAttr>(PVD)) {
- Notes.push_back([Handle](BugReport &BR) {
+ Notes.push_back([Handle, ParamDiagIdx](BugReport &BR) -> std::string {
auto *PathBR = static_cast<PathSensitiveBugReport *>(&BR);
if (auto IsInteresting = PathBR->getInterestingnessKind(Handle)) {
- return "Handle allocated here.";
+ std::string SBuf;
+ llvm::raw_string_ostream OS(SBuf);
+ OS << "Handle allocated through " << ParamDiagIdx
+ << llvm::getOrdinalSuffix(ParamDiagIdx) << " parameter";
+ return OS.str();
} else
return "";
});
@@ -358,8 +383,8 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
}
const NoteTag *T = nullptr;
if (!Notes.empty()) {
- T = C.getNoteTag(
- [this, Notes{std::move(Notes)}](BugReport &BR) -> std::string {
+ T = C.getNoteTag([this, Notes{std::move(Notes)}](
+ PathSensitiveBugReport &BR) -> std::string {
if (&BR.getBugType() != &UseAfterReleaseBugType &&
&BR.getBugType() != &LeakBugType &&
&BR.getBugType() != &DoubleReleaseBugType)
@@ -381,7 +406,13 @@ void FuchsiaHandleChecker::checkDeadSymbols(SymbolReaper &SymReaper,
SmallVector<SymbolRef, 2> LeakedSyms;
HStateMapTy TrackedHandles = State->get<HStateMap>();
for (auto &CurItem : TrackedHandles) {
- if (!SymReaper.isDead(CurItem.first))
+ SymbolRef ErrorSym = CurItem.second.getErrorSym();
+ // Keeping zombie handle symbols. In case the error symbol is dying later
+ // than the handle symbol we might produce spurious leak warnings (in case
+ // we find out later from the status code that the handle allocation failed
+ // in the first place).
+ if (!SymReaper.isDead(CurItem.first) ||
+ (ErrorSym && !SymReaper.isDead(ErrorSym)))
continue;
if (CurItem.second.isAllocated() || CurItem.second.maybeAllocated())
LeakedSyms.push_back(CurItem.first);
@@ -535,7 +566,7 @@ void ento::registerFuchsiaHandleChecker(CheckerManager &mgr) {
mgr.registerChecker<FuchsiaHandleChecker>();
}
-bool ento::shouldRegisterFuchsiaHandleChecker(const LangOptions &LO) {
+bool ento::shouldRegisterFuchsiaHandleChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
index d471c23b83bf..63fbe75fd498 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
@@ -52,18 +52,16 @@ public:
BugReporter &BR) const;
};
-auto callsName(const char *FunctionName)
- -> decltype(callee(functionDecl())) {
+decltype(auto) callsName(const char *FunctionName) {
return callee(functionDecl(hasName(FunctionName)));
}
-auto equalsBoundArgDecl(int ArgIdx, const char *DeclName)
- -> decltype(hasArgument(0, expr())) {
+decltype(auto) equalsBoundArgDecl(int ArgIdx, const char *DeclName) {
return hasArgument(ArgIdx, ignoringParenCasts(declRefExpr(
to(varDecl(equalsBoundNode(DeclName))))));
}
-auto bindAssignmentToDecl(const char *DeclName) -> decltype(hasLHS(expr())) {
+decltype(auto) bindAssignmentToDecl(const char *DeclName) {
return hasLHS(ignoringParenImpCasts(
declRefExpr(to(varDecl().bind(DeclName)))));
}
@@ -227,6 +225,6 @@ void ento::registerGCDAntipattern(CheckerManager &Mgr) {
Mgr.registerChecker<GCDAntipatternChecker>();
}
-bool ento::shouldRegisterGCDAntipattern(const LangOptions &LO) {
+bool ento::shouldRegisterGCDAntipattern(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
index f4308f510f0b..8d9afbe88aa8 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
@@ -291,8 +291,9 @@ void ento::registerGTestChecker(CheckerManager &Mgr) {
Mgr.registerChecker<GTestChecker>();
}
-bool ento::shouldRegisterGTestChecker(const LangOptions &LO) {
+bool ento::shouldRegisterGTestChecker(const CheckerManager &mgr) {
// gtest is a C++ API so there is no sense running the checker
// if not compiling for C++.
+ const LangOptions &LO = mgr.getLangOpts();
return LO.CPlusPlus;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
index 302d5bb1bea8..c06d2fcd8e7d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -22,11 +22,14 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/Support/YAMLTraits.h"
+
#include <algorithm>
#include <limits>
+#include <memory>
#include <unordered_map>
#include <utility>
@@ -35,17 +38,15 @@ using namespace ento;
using namespace taint;
namespace {
-class GenericTaintChecker
- : public Checker<check::PostStmt<CallExpr>, check::PreStmt<CallExpr>> {
+class GenericTaintChecker : public Checker<check::PreCall, check::PostCall> {
public:
static void *getTag() {
static int Tag;
return &Tag;
}
- void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
-
- void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
const char *Sep) const override;
@@ -81,7 +82,7 @@ public:
/// Convert SignedArgVector to ArgVector.
ArgVector convertToArgVector(CheckerManager &Mgr, const std::string &Option,
- SignedArgVector Args);
+ const SignedArgVector &Args);
/// Parse the config.
void parseConfiguration(CheckerManager &Mgr, const std::string &Option,
@@ -96,7 +97,8 @@ private:
mutable std::unique_ptr<BugType> BT;
void initBugType() const {
if (!BT)
- BT.reset(new BugType(this, "Use of Untrusted Data", "Untrusted Data"));
+ BT = std::make_unique<BugType>(this, "Use of Untrusted Data",
+ "Untrusted Data");
}
struct FunctionData {
@@ -106,9 +108,12 @@ private:
FunctionData &operator=(const FunctionData &) = delete;
FunctionData &operator=(FunctionData &&) = delete;
- static Optional<FunctionData> create(const CallExpr *CE,
+ static Optional<FunctionData> create(const CallEvent &Call,
const CheckerContext &C) {
- const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+ if (!Call.getDecl())
+ return None;
+
+ const FunctionDecl *FDecl = Call.getDecl()->getAsFunction();
if (!FDecl || (FDecl->getKind() != Decl::Function &&
FDecl->getKind() != Decl::CXXMethod))
return None;
@@ -132,33 +137,33 @@ private:
/// Catch taint related bugs. Check if tainted data is passed to a
/// system call etc. Returns true on matching.
- bool checkPre(const CallExpr *CE, const FunctionData &FData,
+ bool checkPre(const CallEvent &Call, const FunctionData &FData,
CheckerContext &C) const;
/// Add taint sources on a pre-visit. Returns true on matching.
- bool addSourcesPre(const CallExpr *CE, const FunctionData &FData,
+ bool addSourcesPre(const CallEvent &Call, const FunctionData &FData,
CheckerContext &C) const;
/// Mark filter's arguments not tainted on a pre-visit. Returns true on
/// matching.
- bool addFiltersPre(const CallExpr *CE, const FunctionData &FData,
+ bool addFiltersPre(const CallEvent &Call, const FunctionData &FData,
CheckerContext &C) const;
/// Propagate taint generated at pre-visit. Returns true on matching.
- bool propagateFromPre(const CallExpr *CE, CheckerContext &C) const;
+ static bool propagateFromPre(const CallEvent &Call, CheckerContext &C);
/// Check if the region the expression evaluates to is the standard input,
/// and thus, is tainted.
static bool isStdin(const Expr *E, CheckerContext &C);
/// Given a pointer argument, return the value it points to.
- static Optional<SVal> getPointedToSVal(CheckerContext &C, const Expr *Arg);
+ static Optional<SVal> getPointeeOf(CheckerContext &C, const Expr *Arg);
/// Check for CWE-134: Uncontrolled Format String.
static constexpr llvm::StringLiteral MsgUncontrolledFormatString =
"Untrusted data is used as a format string "
"(CWE-134: Uncontrolled Format String)";
- bool checkUncontrolledFormatString(const CallExpr *CE,
+ bool checkUncontrolledFormatString(const CallEvent &Call,
CheckerContext &C) const;
/// Check for:
@@ -167,7 +172,7 @@ private:
static constexpr llvm::StringLiteral MsgSanitizeSystemArgs =
"Untrusted data is passed to a system call "
"(CERT/STR02-C. Sanitize data passed to complex subsystems)";
- bool checkSystemCall(const CallExpr *CE, StringRef Name,
+ bool checkSystemCall(const CallEvent &Call, StringRef Name,
CheckerContext &C) const;
/// Check if tainted data is used as a buffer size ins strn.. functions,
@@ -176,13 +181,12 @@ private:
"Untrusted data is used to specify the buffer size "
"(CERT/STR31-C. Guarantee that storage for strings has sufficient space "
"for character data and the null terminator)";
- bool checkTaintedBufferSize(const CallExpr *CE, const FunctionDecl *FDecl,
- CheckerContext &C) const;
+ bool checkTaintedBufferSize(const CallEvent &Call, CheckerContext &C) const;
/// Check if tainted data is used as a custom sink's parameter.
static constexpr llvm::StringLiteral MsgCustomSink =
"Untrusted data is passed to a user-defined sink";
- bool checkCustomSinks(const CallExpr *CE, const FunctionData &FData,
+ bool checkCustomSinks(const CallEvent &Call, const FunctionData &FData,
CheckerContext &C) const;
/// Generate a report if the expression is tainted or points to tainted data.
@@ -212,7 +216,7 @@ private:
/// ReturnValueIndex is added to the dst list, the return value will be
/// tainted.
struct TaintPropagationRule {
- using PropagationFuncType = bool (*)(bool IsTainted, const CallExpr *,
+ using PropagationFuncType = bool (*)(bool IsTainted, const CallEvent &Call,
CheckerContext &C);
/// List of arguments which can be taint sources and should be checked.
@@ -256,7 +260,8 @@ private:
return (llvm::find(DstArgs, ArgNum) != DstArgs.end());
}
- static bool isTaintedOrPointsToTainted(const Expr *E, ProgramStateRef State,
+ static bool isTaintedOrPointsToTainted(const Expr *E,
+ const ProgramStateRef &State,
CheckerContext &C) {
if (isTainted(State, E, C.getLocationContext()) || isStdin(E, C))
return true;
@@ -264,16 +269,16 @@ private:
if (!E->getType().getTypePtr()->isPointerType())
return false;
- Optional<SVal> V = getPointedToSVal(C, E);
+ Optional<SVal> V = getPointeeOf(C, E);
return (V && isTainted(State, *V));
}
/// Pre-process a function which propagates taint according to the
/// taint rule.
- ProgramStateRef process(const CallExpr *CE, CheckerContext &C) const;
+ ProgramStateRef process(const CallEvent &Call, CheckerContext &C) const;
// Functions for custom taintedness propagation.
- static bool postSocket(bool IsTainted, const CallExpr *CE,
+ static bool postSocket(bool IsTainted, const CallEvent &Call,
CheckerContext &C);
};
@@ -351,8 +356,10 @@ template <> struct MappingTraits<TaintConfig::NameScopeArgs> {
/// points to data, which should be tainted on return.
REGISTER_SET_WITH_PROGRAMSTATE(TaintArgsOnPostVisit, unsigned)
-GenericTaintChecker::ArgVector GenericTaintChecker::convertToArgVector(
- CheckerManager &Mgr, const std::string &Option, SignedArgVector Args) {
+GenericTaintChecker::ArgVector
+GenericTaintChecker::convertToArgVector(CheckerManager &Mgr,
+ const std::string &Option,
+ const SignedArgVector &Args) {
ArgVector Result;
for (int Arg : Args) {
if (Arg == -1)
@@ -396,7 +403,7 @@ void GenericTaintChecker::parseConfiguration(CheckerManager &Mgr,
template <typename T>
auto GenericTaintChecker::findFunctionInConfig(const ConfigDataMap<T> &Map,
const FunctionData &FData) {
- auto Range = Map.equal_range(FData.Name);
+ auto Range = Map.equal_range(std::string(FData.Name));
auto It =
std::find_if(Range.first, Range.second, [&FData](const auto &Entry) {
const auto &Value = Entry.second;
@@ -419,125 +426,125 @@ GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
llvm::StringSwitch<TaintPropagationRule>(FData.FullName)
// Source functions
// TODO: Add support for vfscanf & family.
- .Case("fdopen", TaintPropagationRule({}, {ReturnValueIndex}))
- .Case("fopen", TaintPropagationRule({}, {ReturnValueIndex}))
- .Case("freopen", TaintPropagationRule({}, {ReturnValueIndex}))
- .Case("getch", TaintPropagationRule({}, {ReturnValueIndex}))
- .Case("getchar", TaintPropagationRule({}, {ReturnValueIndex}))
- .Case("getchar_unlocked",
- TaintPropagationRule({}, {ReturnValueIndex}))
- .Case("getenv", TaintPropagationRule({}, {ReturnValueIndex}))
- .Case("gets", TaintPropagationRule({}, {0, ReturnValueIndex}))
- .Case("scanf", TaintPropagationRule({}, {}, VariadicType::Dst, 1))
- .Case("socket",
- TaintPropagationRule({}, {ReturnValueIndex}, VariadicType::None,
- InvalidArgIndex,
- &TaintPropagationRule::postSocket))
- .Case("wgetch", TaintPropagationRule({}, {ReturnValueIndex}))
+ .Case("fdopen", {{}, {ReturnValueIndex}})
+ .Case("fopen", {{}, {ReturnValueIndex}})
+ .Case("freopen", {{}, {ReturnValueIndex}})
+ .Case("getch", {{}, {ReturnValueIndex}})
+ .Case("getchar", {{}, {ReturnValueIndex}})
+ .Case("getchar_unlocked", {{}, {ReturnValueIndex}})
+ .Case("getenv", {{}, {ReturnValueIndex}})
+ .Case("gets", {{}, {0, ReturnValueIndex}})
+ .Case("scanf", {{}, {}, VariadicType::Dst, 1})
+ .Case("socket", {{},
+ {ReturnValueIndex},
+ VariadicType::None,
+ InvalidArgIndex,
+ &TaintPropagationRule::postSocket})
+ .Case("wgetch", {{}, {ReturnValueIndex}})
// Propagating functions
- .Case("atoi", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("atol", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("atoll", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("fgetc", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("fgetln", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("fgets", TaintPropagationRule({2}, {0, ReturnValueIndex}))
- .Case("fscanf", TaintPropagationRule({0}, {}, VariadicType::Dst, 2))
- .Case("getc", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("getc_unlocked", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("getdelim", TaintPropagationRule({3}, {0}))
- .Case("getline", TaintPropagationRule({2}, {0}))
- .Case("getw", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("pread",
- TaintPropagationRule({0, 1, 2, 3}, {1, ReturnValueIndex}))
- .Case("read", TaintPropagationRule({0, 2}, {1, ReturnValueIndex}))
- .Case("strchr", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("strrchr", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("tolower", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("toupper", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Default(TaintPropagationRule());
+ .Case("atoi", {{0}, {ReturnValueIndex}})
+ .Case("atol", {{0}, {ReturnValueIndex}})
+ .Case("atoll", {{0}, {ReturnValueIndex}})
+ .Case("fgetc", {{0}, {ReturnValueIndex}})
+ .Case("fgetln", {{0}, {ReturnValueIndex}})
+ .Case("fgets", {{2}, {0, ReturnValueIndex}})
+ .Case("fscanf", {{0}, {}, VariadicType::Dst, 2})
+ .Case("sscanf", {{0}, {}, VariadicType::Dst, 2})
+ .Case("getc", {{0}, {ReturnValueIndex}})
+ .Case("getc_unlocked", {{0}, {ReturnValueIndex}})
+ .Case("getdelim", {{3}, {0}})
+ .Case("getline", {{2}, {0}})
+ .Case("getw", {{0}, {ReturnValueIndex}})
+ .Case("pread", {{0, 1, 2, 3}, {1, ReturnValueIndex}})
+ .Case("read", {{0, 2}, {1, ReturnValueIndex}})
+ .Case("strchr", {{0}, {ReturnValueIndex}})
+ .Case("strrchr", {{0}, {ReturnValueIndex}})
+ .Case("tolower", {{0}, {ReturnValueIndex}})
+ .Case("toupper", {{0}, {ReturnValueIndex}})
+ .Default({});
if (!Rule.isNull())
return Rule;
+ assert(FData.FDecl);
// Check if it's one of the memory setting/copying functions.
// This check is specialized but faster then calling isCLibraryFunction.
const FunctionDecl *FDecl = FData.FDecl;
unsigned BId = 0;
- if ((BId = FDecl->getMemoryFunctionKind()))
+ if ((BId = FDecl->getMemoryFunctionKind())) {
switch (BId) {
case Builtin::BImemcpy:
case Builtin::BImemmove:
case Builtin::BIstrncpy:
case Builtin::BIstrncat:
- return TaintPropagationRule({1, 2}, {0, ReturnValueIndex});
+ return {{1, 2}, {0, ReturnValueIndex}};
case Builtin::BIstrlcpy:
case Builtin::BIstrlcat:
- return TaintPropagationRule({1, 2}, {0});
+ return {{1, 2}, {0}};
case Builtin::BIstrndup:
- return TaintPropagationRule({0, 1}, {ReturnValueIndex});
+ return {{0, 1}, {ReturnValueIndex}};
default:
break;
- };
+ }
+ }
// Process all other functions which could be defined as builtins.
if (Rule.isNull()) {
- if (C.isCLibraryFunction(FDecl, "snprintf"))
- return TaintPropagationRule({1}, {0, ReturnValueIndex}, VariadicType::Src,
- 3);
- else if (C.isCLibraryFunction(FDecl, "sprintf"))
- return TaintPropagationRule({}, {0, ReturnValueIndex}, VariadicType::Src,
- 2);
- else if (C.isCLibraryFunction(FDecl, "strcpy") ||
- C.isCLibraryFunction(FDecl, "stpcpy") ||
- C.isCLibraryFunction(FDecl, "strcat"))
- return TaintPropagationRule({1}, {0, ReturnValueIndex});
- else if (C.isCLibraryFunction(FDecl, "bcopy"))
- return TaintPropagationRule({0, 2}, {1});
- else if (C.isCLibraryFunction(FDecl, "strdup") ||
- C.isCLibraryFunction(FDecl, "strdupa"))
- return TaintPropagationRule({0}, {ReturnValueIndex});
- else if (C.isCLibraryFunction(FDecl, "wcsdup"))
- return TaintPropagationRule({0}, {ReturnValueIndex});
+ const auto OneOf = [FDecl](const auto &... Name) {
+ // FIXME: use fold expression in C++17
+ using unused = int[];
+ bool ret = false;
+ static_cast<void>(unused{
+ 0, (ret |= CheckerContext::isCLibraryFunction(FDecl, Name), 0)...});
+ return ret;
+ };
+ if (OneOf("snprintf"))
+ return {{1}, {0, ReturnValueIndex}, VariadicType::Src, 3};
+ if (OneOf("sprintf"))
+ return {{}, {0, ReturnValueIndex}, VariadicType::Src, 2};
+ if (OneOf("strcpy", "stpcpy", "strcat"))
+ return {{1}, {0, ReturnValueIndex}};
+ if (OneOf("bcopy"))
+ return {{0, 2}, {1}};
+ if (OneOf("strdup", "strdupa", "wcsdup"))
+ return {{0}, {ReturnValueIndex}};
}
- // Skipping the following functions, since they might be used for cleansing
- // or smart memory copy:
+ // Skipping the following functions, since they might be used for cleansing or
+ // smart memory copy:
// - memccpy - copying until hitting a special character.
auto It = findFunctionInConfig(CustomPropagations, FData);
- if (It != CustomPropagations.end()) {
- const auto &Value = It->second;
- return Value.second;
- }
-
- return TaintPropagationRule();
+ if (It != CustomPropagations.end())
+ return It->second.second;
+ return {};
}
-void GenericTaintChecker::checkPreStmt(const CallExpr *CE,
+void GenericTaintChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- Optional<FunctionData> FData = FunctionData::create(CE, C);
+ Optional<FunctionData> FData = FunctionData::create(Call, C);
if (!FData)
return;
// Check for taintedness related errors first: system call, uncontrolled
// format string, tainted buffer size.
- if (checkPre(CE, *FData, C))
+ if (checkPre(Call, *FData, C))
return;
// Marks the function's arguments and/or return value tainted if it present in
// the list.
- if (addSourcesPre(CE, *FData, C))
+ if (addSourcesPre(Call, *FData, C))
return;
- addFiltersPre(CE, *FData, C);
+ addFiltersPre(Call, *FData, C);
}
-void GenericTaintChecker::checkPostStmt(const CallExpr *CE,
+void GenericTaintChecker::checkPostCall(const CallEvent &Call,
CheckerContext &C) const {
// Set the marked values as tainted. The return value only accessible from
// checkPostStmt.
- propagateFromPre(CE, C);
+ propagateFromPre(Call, C);
}
void GenericTaintChecker::printState(raw_ostream &Out, ProgramStateRef State,
@@ -545,14 +552,14 @@ void GenericTaintChecker::printState(raw_ostream &Out, ProgramStateRef State,
printTaint(State, Out, NL, Sep);
}
-bool GenericTaintChecker::addSourcesPre(const CallExpr *CE,
+bool GenericTaintChecker::addSourcesPre(const CallEvent &Call,
const FunctionData &FData,
CheckerContext &C) const {
// First, try generating a propagation rule for this function.
TaintPropagationRule Rule = TaintPropagationRule::getTaintPropagationRule(
this->CustomPropagations, FData, C);
if (!Rule.isNull()) {
- ProgramStateRef State = Rule.process(CE, C);
+ ProgramStateRef State = Rule.process(Call, C);
if (State) {
C.addTransition(State);
return true;
@@ -561,7 +568,7 @@ bool GenericTaintChecker::addSourcesPre(const CallExpr *CE,
return false;
}
-bool GenericTaintChecker::addFiltersPre(const CallExpr *CE,
+bool GenericTaintChecker::addFiltersPre(const CallEvent &Call,
const FunctionData &FData,
CheckerContext &C) const {
auto It = findFunctionInConfig(CustomFilters, FData);
@@ -572,11 +579,11 @@ bool GenericTaintChecker::addFiltersPre(const CallExpr *CE,
const auto &Value = It->second;
const ArgVector &Args = Value.second;
for (unsigned ArgNum : Args) {
- if (ArgNum >= CE->getNumArgs())
+ if (ArgNum >= Call.getNumArgs())
continue;
- const Expr *Arg = CE->getArg(ArgNum);
- Optional<SVal> V = getPointedToSVal(C, Arg);
+ const Expr *Arg = Call.getArgExpr(ArgNum);
+ Optional<SVal> V = getPointeeOf(C, Arg);
if (V)
State = removeTaint(State, *V);
}
@@ -588,8 +595,8 @@ bool GenericTaintChecker::addFiltersPre(const CallExpr *CE,
return false;
}
-bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
- CheckerContext &C) const {
+bool GenericTaintChecker::propagateFromPre(const CallEvent &Call,
+ CheckerContext &C) {
ProgramStateRef State = C.getState();
// Depending on what was tainted at pre-visit, we determined a set of
@@ -602,16 +609,16 @@ bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
for (unsigned ArgNum : TaintArgs) {
// Special handling for the tainted return value.
if (ArgNum == ReturnValueIndex) {
- State = addTaint(State, CE, C.getLocationContext());
+ State = addTaint(State, Call.getReturnValue());
continue;
}
// The arguments are pointer arguments. The data they are pointing at is
// tainted after the call.
- if (CE->getNumArgs() < (ArgNum + 1))
+ if (Call.getNumArgs() < (ArgNum + 1))
return false;
- const Expr *Arg = CE->getArg(ArgNum);
- Optional<SVal> V = getPointedToSVal(C, Arg);
+ const Expr *Arg = Call.getArgExpr(ArgNum);
+ Optional<SVal> V = getPointeeOf(C, Arg);
if (V)
State = addTaint(State, *V);
}
@@ -626,27 +633,23 @@ bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
return false;
}
-bool GenericTaintChecker::checkPre(const CallExpr *CE,
+bool GenericTaintChecker::checkPre(const CallEvent &Call,
const FunctionData &FData,
CheckerContext &C) const {
-
- if (checkUncontrolledFormatString(CE, C))
- return true;
-
- if (checkSystemCall(CE, FData.Name, C))
+ if (checkUncontrolledFormatString(Call, C))
return true;
- if (checkTaintedBufferSize(CE, FData.FDecl, C))
+ if (checkSystemCall(Call, FData.Name, C))
return true;
- if (checkCustomSinks(CE, FData, C))
+ if (checkTaintedBufferSize(Call, C))
return true;
- return false;
+ return checkCustomSinks(Call, FData, C);
}
-Optional<SVal> GenericTaintChecker::getPointedToSVal(CheckerContext &C,
- const Expr *Arg) {
+Optional<SVal> GenericTaintChecker::getPointeeOf(CheckerContext &C,
+ const Expr *Arg) {
ProgramStateRef State = C.getState();
SVal AddrVal = C.getSVal(Arg->IgnoreParens());
if (AddrVal.isUnknownOrUndef())
@@ -671,31 +674,33 @@ Optional<SVal> GenericTaintChecker::getPointedToSVal(CheckerContext &C,
}
ProgramStateRef
-GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
+GenericTaintChecker::TaintPropagationRule::process(const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
// Check for taint in arguments.
bool IsTainted = true;
for (unsigned ArgNum : SrcArgs) {
- if (ArgNum >= CE->getNumArgs())
+ if (ArgNum >= Call.getNumArgs())
continue;
- if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(ArgNum), State, C)))
+ if ((IsTainted =
+ isTaintedOrPointsToTainted(Call.getArgExpr(ArgNum), State, C)))
break;
}
// Check for taint in variadic arguments.
if (!IsTainted && VariadicType::Src == VarType) {
// Check if any of the arguments is tainted
- for (unsigned i = VariadicIndex; i < CE->getNumArgs(); ++i) {
- if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(i), State, C)))
+ for (unsigned i = VariadicIndex; i < Call.getNumArgs(); ++i) {
+ if ((IsTainted =
+ isTaintedOrPointsToTainted(Call.getArgExpr(i), State, C)))
break;
}
}
if (PropagationFunc)
- IsTainted = PropagationFunc(IsTainted, CE, C);
+ IsTainted = PropagationFunc(IsTainted, Call, C);
if (!IsTainted)
return State;
@@ -708,7 +713,7 @@ GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
continue;
}
- if (ArgNum >= CE->getNumArgs())
+ if (ArgNum >= Call.getNumArgs())
continue;
// Mark the given argument.
@@ -721,14 +726,15 @@ GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
// If they are not pointing to const data, mark data as tainted.
// TODO: So far we are just going one level down; ideally we'd need to
// recurse here.
- for (unsigned i = VariadicIndex; i < CE->getNumArgs(); ++i) {
- const Expr *Arg = CE->getArg(i);
+ for (unsigned i = VariadicIndex; i < Call.getNumArgs(); ++i) {
+ const Expr *Arg = Call.getArgExpr(i);
// Process pointer argument.
const Type *ArgTy = Arg->getType().getTypePtr();
QualType PType = ArgTy->getPointeeType();
if ((!PType.isNull() && !PType.isConstQualified()) ||
- (ArgTy->isReferenceType() && !Arg->getType().isConstQualified()))
+ (ArgTy->isReferenceType() && !Arg->getType().isConstQualified())) {
State = State->add<TaintArgsOnPostVisit>(i);
+ }
}
}
@@ -736,16 +742,14 @@ GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
}
// If argument 0(protocol domain) is network, the return value should get taint.
-bool GenericTaintChecker::TaintPropagationRule::postSocket(bool /*IsTainted*/,
- const CallExpr *CE,
- CheckerContext &C) {
- SourceLocation DomLoc = CE->getArg(0)->getExprLoc();
+bool GenericTaintChecker::TaintPropagationRule::postSocket(
+ bool /*IsTainted*/, const CallEvent &Call, CheckerContext &C) {
+ SourceLocation DomLoc = Call.getArgExpr(0)->getExprLoc();
StringRef DomName = C.getMacroNameOrSpelling(DomLoc);
// White list the internal communication protocols.
if (DomName.equals("AF_SYSTEM") || DomName.equals("AF_LOCAL") ||
DomName.equals("AF_UNIX") || DomName.equals("AF_RESERVED_36"))
return false;
-
return true;
}
@@ -757,16 +761,15 @@ bool GenericTaintChecker::isStdin(const Expr *E, CheckerContext &C) {
const MemRegion *MemReg = Val.getAsRegion();
// The region should be symbolic, we do not know it's value.
- const SymbolicRegion *SymReg = dyn_cast_or_null<SymbolicRegion>(MemReg);
+ const auto *SymReg = dyn_cast_or_null<SymbolicRegion>(MemReg);
if (!SymReg)
return false;
// Get it's symbol and find the declaration region it's pointing to.
- const SymbolRegionValue *Sm =
- dyn_cast<SymbolRegionValue>(SymReg->getSymbol());
+ const auto *Sm = dyn_cast<SymbolRegionValue>(SymReg->getSymbol());
if (!Sm)
return false;
- const DeclRegion *DeclReg = dyn_cast_or_null<DeclRegion>(Sm->getRegion());
+ const auto *DeclReg = dyn_cast_or_null<DeclRegion>(Sm->getRegion());
if (!DeclReg)
return false;
@@ -784,23 +787,24 @@ bool GenericTaintChecker::isStdin(const Expr *E, CheckerContext &C) {
return false;
}
-static bool getPrintfFormatArgumentNum(const CallExpr *CE,
+static bool getPrintfFormatArgumentNum(const CallEvent &Call,
const CheckerContext &C,
unsigned &ArgNum) {
// Find if the function contains a format string argument.
// Handles: fprintf, printf, sprintf, snprintf, vfprintf, vprintf, vsprintf,
// vsnprintf, syslog, custom annotated functions.
- const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+ const FunctionDecl *FDecl = Call.getDecl()->getAsFunction();
if (!FDecl)
return false;
for (const auto *Format : FDecl->specific_attrs<FormatAttr>()) {
ArgNum = Format->getFormatIdx() - 1;
- if ((Format->getType()->getName() == "printf") && CE->getNumArgs() > ArgNum)
+ if ((Format->getType()->getName() == "printf") &&
+ Call.getNumArgs() > ArgNum)
return true;
}
// Or if a function is named setproctitle (this is a heuristic).
- if (C.getCalleeName(CE).find("setproctitle") != StringRef::npos) {
+ if (C.getCalleeName(FDecl).find("setproctitle") != StringRef::npos) {
ArgNum = 0;
return true;
}
@@ -814,7 +818,7 @@ bool GenericTaintChecker::generateReportIfTainted(const Expr *E, StringRef Msg,
// Check for taint.
ProgramStateRef State = C.getState();
- Optional<SVal> PointedToSVal = getPointedToSVal(C, E);
+ Optional<SVal> PointedToSVal = getPointeeOf(C, E);
SVal TaintedSVal;
if (PointedToSVal && isTainted(State, *PointedToSVal))
TaintedSVal = *PointedToSVal;
@@ -836,19 +840,19 @@ bool GenericTaintChecker::generateReportIfTainted(const Expr *E, StringRef Msg,
}
bool GenericTaintChecker::checkUncontrolledFormatString(
- const CallExpr *CE, CheckerContext &C) const {
+ const CallEvent &Call, CheckerContext &C) const {
// Check if the function contains a format string argument.
unsigned ArgNum = 0;
- if (!getPrintfFormatArgumentNum(CE, C, ArgNum))
+ if (!getPrintfFormatArgumentNum(Call, C, ArgNum))
return false;
// If either the format string content or the pointer itself are tainted,
// warn.
- return generateReportIfTainted(CE->getArg(ArgNum),
+ return generateReportIfTainted(Call.getArgExpr(ArgNum),
MsgUncontrolledFormatString, C);
}
-bool GenericTaintChecker::checkSystemCall(const CallExpr *CE, StringRef Name,
+bool GenericTaintChecker::checkSystemCall(const CallEvent &Call, StringRef Name,
CheckerContext &C) const {
// TODO: It might make sense to run this check on demand. In some cases,
// we should check if the environment has been cleansed here. We also might
@@ -866,21 +870,22 @@ bool GenericTaintChecker::checkSystemCall(const CallExpr *CE, StringRef Name,
.Case("dlopen", 0)
.Default(InvalidArgIndex);
- if (ArgNum == InvalidArgIndex || CE->getNumArgs() < (ArgNum + 1))
+ if (ArgNum == InvalidArgIndex || Call.getNumArgs() < (ArgNum + 1))
return false;
- return generateReportIfTainted(CE->getArg(ArgNum), MsgSanitizeSystemArgs, C);
+ return generateReportIfTainted(Call.getArgExpr(ArgNum), MsgSanitizeSystemArgs,
+ C);
}
// TODO: Should this check be a part of the CString checker?
// If yes, should taint be a global setting?
-bool GenericTaintChecker::checkTaintedBufferSize(const CallExpr *CE,
- const FunctionDecl *FDecl,
+bool GenericTaintChecker::checkTaintedBufferSize(const CallEvent &Call,
CheckerContext &C) const {
+ const auto *FDecl = Call.getDecl()->getAsFunction();
// If the function has a buffer size argument, set ArgNum.
unsigned ArgNum = InvalidArgIndex;
unsigned BId = 0;
- if ((BId = FDecl->getMemoryFunctionKind()))
+ if ((BId = FDecl->getMemoryFunctionKind())) {
switch (BId) {
case Builtin::BImemcpy:
case Builtin::BImemmove:
@@ -892,26 +897,29 @@ bool GenericTaintChecker::checkTaintedBufferSize(const CallExpr *CE,
break;
default:
break;
- };
+ }
+ }
if (ArgNum == InvalidArgIndex) {
- if (C.isCLibraryFunction(FDecl, "malloc") ||
- C.isCLibraryFunction(FDecl, "calloc") ||
- C.isCLibraryFunction(FDecl, "alloca"))
+ using CCtx = CheckerContext;
+ if (CCtx::isCLibraryFunction(FDecl, "malloc") ||
+ CCtx::isCLibraryFunction(FDecl, "calloc") ||
+ CCtx::isCLibraryFunction(FDecl, "alloca"))
ArgNum = 0;
- else if (C.isCLibraryFunction(FDecl, "memccpy"))
+ else if (CCtx::isCLibraryFunction(FDecl, "memccpy"))
ArgNum = 3;
- else if (C.isCLibraryFunction(FDecl, "realloc"))
+ else if (CCtx::isCLibraryFunction(FDecl, "realloc"))
ArgNum = 1;
- else if (C.isCLibraryFunction(FDecl, "bcopy"))
+ else if (CCtx::isCLibraryFunction(FDecl, "bcopy"))
ArgNum = 2;
}
- return ArgNum != InvalidArgIndex && CE->getNumArgs() > ArgNum &&
- generateReportIfTainted(CE->getArg(ArgNum), MsgTaintedBufferSize, C);
+ return ArgNum != InvalidArgIndex && Call.getNumArgs() > ArgNum &&
+ generateReportIfTainted(Call.getArgExpr(ArgNum), MsgTaintedBufferSize,
+ C);
}
-bool GenericTaintChecker::checkCustomSinks(const CallExpr *CE,
+bool GenericTaintChecker::checkCustomSinks(const CallEvent &Call,
const FunctionData &FData,
CheckerContext &C) const {
auto It = findFunctionInConfig(CustomSinks, FData);
@@ -921,10 +929,10 @@ bool GenericTaintChecker::checkCustomSinks(const CallExpr *CE,
const auto &Value = It->second;
const GenericTaintChecker::ArgVector &Args = Value.second;
for (unsigned ArgNum : Args) {
- if (ArgNum >= CE->getNumArgs())
+ if (ArgNum >= Call.getNumArgs())
continue;
- if (generateReportIfTainted(CE->getArg(ArgNum), MsgCustomSink, C))
+ if (generateReportIfTainted(Call.getArgExpr(ArgNum), MsgCustomSink, C))
return true;
}
@@ -942,6 +950,6 @@ void ento::registerGenericTaintChecker(CheckerManager &Mgr) {
Checker->parseConfiguration(Mgr, Option, std::move(Config.getValue()));
}
-bool ento::shouldRegisterGenericTaintChecker(const LangOptions &LO) {
+bool ento::shouldRegisterGenericTaintChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
index cc2cfb774227..1cf81b54e77d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
@@ -351,6 +351,8 @@ static bool isIdenticalStmt(const ASTContext &Ctx, const Stmt *Stmt1,
case Stmt::CallExprClass:
case Stmt::ArraySubscriptExprClass:
case Stmt::OMPArraySectionExprClass:
+ case Stmt::OMPArrayShapingExprClass:
+ case Stmt::OMPIteratorExprClass:
case Stmt::ImplicitCastExprClass:
case Stmt::ParenExprClass:
case Stmt::BreakStmtClass:
@@ -513,6 +515,6 @@ void ento::registerIdenticalExprChecker(CheckerManager &Mgr) {
Mgr.registerChecker<FindIdenticalExprChecker>();
}
-bool ento::shouldRegisterIdenticalExprChecker(const LangOptions &LO) {
+bool ento::shouldRegisterIdenticalExprChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
index dd89c53478e8..65e52e139ee4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
@@ -307,6 +307,6 @@ void ento::registerInnerPointerChecker(CheckerManager &Mgr) {
Mgr.registerChecker<InnerPointerChecker>();
}
-bool ento::shouldRegisterInnerPointerChecker(const LangOptions &LO) {
+bool ento::shouldRegisterInnerPointerChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
index 9642588d6a41..99731d6044a0 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
@@ -11,13 +11,19 @@
#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_INTERCHECKERAPI_H
#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_INTERCHECKERAPI_H
-namespace clang {
-class CheckerManager;
+// FIXME: This file goes against how a checker should be implemented either in
+// a single file, or be exposed in a header file. Let's try to get rid of it!
+
+namespace clang {
namespace ento {
+class CheckerManager;
+
/// Register the part of MallocChecker connected to InnerPointerChecker.
void registerInnerPointerCheckerAux(CheckerManager &Mgr);
-}}
+} // namespace ento
+} // namespace clang
+
#endif /* INTERCHECKERAPI_H_ */
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp
index d1a9a7df071d..6955ba11a28f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp
@@ -26,7 +26,10 @@ using namespace iterator;
namespace {
class InvalidatedIteratorChecker
- : public Checker<check::PreCall> {
+ : public Checker<check::PreCall, check::PreStmt<UnaryOperator>,
+ check::PreStmt<BinaryOperator>,
+ check::PreStmt<ArraySubscriptExpr>,
+ check::PreStmt<MemberExpr>> {
std::unique_ptr<BugType> InvalidatedBugType;
@@ -37,6 +40,10 @@ public:
InvalidatedIteratorChecker();
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPreStmt(const UnaryOperator *UO, CheckerContext &C) const;
+ void checkPreStmt(const BinaryOperator *BO, CheckerContext &C) const;
+ void checkPreStmt(const ArraySubscriptExpr *ASE, CheckerContext &C) const;
+ void checkPreStmt(const MemberExpr *ME, CheckerContext &C) const;
};
@@ -65,6 +72,48 @@ void InvalidatedIteratorChecker::checkPreCall(const CallEvent &Call,
}
}
+void InvalidatedIteratorChecker::checkPreStmt(const UnaryOperator *UO,
+ CheckerContext &C) const {
+ if (isa<CXXThisExpr>(UO->getSubExpr()))
+ return;
+
+ ProgramStateRef State = C.getState();
+ UnaryOperatorKind OK = UO->getOpcode();
+ SVal SubVal = State->getSVal(UO->getSubExpr(), C.getLocationContext());
+
+ if (isAccessOperator(OK)) {
+ verifyAccess(C, SubVal);
+ }
+}
+
+void InvalidatedIteratorChecker::checkPreStmt(const BinaryOperator *BO,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ BinaryOperatorKind OK = BO->getOpcode();
+ SVal LVal = State->getSVal(BO->getLHS(), C.getLocationContext());
+
+ if (isAccessOperator(OK)) {
+ verifyAccess(C, LVal);
+ }
+}
+
+void InvalidatedIteratorChecker::checkPreStmt(const ArraySubscriptExpr *ASE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal LVal = State->getSVal(ASE->getLHS(), C.getLocationContext());
+ verifyAccess(C, LVal);
+}
+
+void InvalidatedIteratorChecker::checkPreStmt(const MemberExpr *ME,
+ CheckerContext &C) const {
+ if (!ME->isArrow() || ME->isImplicitAccess())
+ return;
+
+ ProgramStateRef State = C.getState();
+ SVal BaseVal = State->getSVal(ME->getBase(), C.getLocationContext());
+ verifyAccess(C, BaseVal);
+}
+
void InvalidatedIteratorChecker::verifyAccess(CheckerContext &C, const SVal &Val) const {
auto State = C.getState();
const auto *Pos = getIteratorPosition(State, Val);
@@ -90,6 +139,6 @@ void ento::registerInvalidatedIteratorChecker(CheckerManager &mgr) {
mgr.registerChecker<InvalidatedIteratorChecker>();
}
-bool ento::shouldRegisterInvalidatedIteratorChecker(const LangOptions &LO) {
+bool ento::shouldRegisterInvalidatedIteratorChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
index 6bca5515724c..ac0f24603dd9 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
@@ -128,24 +128,54 @@ bool isAccessOperator(OverloadedOperatorKind OK) {
isDecrementOperator(OK) || isRandomIncrOrDecrOperator(OK);
}
+bool isAccessOperator(UnaryOperatorKind OK) {
+ return isDereferenceOperator(OK) || isIncrementOperator(OK) ||
+ isDecrementOperator(OK);
+}
+
+bool isAccessOperator(BinaryOperatorKind OK) {
+ return isDereferenceOperator(OK) || isRandomIncrOrDecrOperator(OK);
+}
+
bool isDereferenceOperator(OverloadedOperatorKind OK) {
return OK == OO_Star || OK == OO_Arrow || OK == OO_ArrowStar ||
OK == OO_Subscript;
}
+bool isDereferenceOperator(UnaryOperatorKind OK) {
+ return OK == UO_Deref;
+}
+
+bool isDereferenceOperator(BinaryOperatorKind OK) {
+ return OK == BO_PtrMemI;
+}
+
bool isIncrementOperator(OverloadedOperatorKind OK) {
return OK == OO_PlusPlus;
}
+bool isIncrementOperator(UnaryOperatorKind OK) {
+ return OK == UO_PreInc || OK == UO_PostInc;
+}
+
bool isDecrementOperator(OverloadedOperatorKind OK) {
return OK == OO_MinusMinus;
}
+bool isDecrementOperator(UnaryOperatorKind OK) {
+ return OK == UO_PreDec || OK == UO_PostDec;
+}
+
bool isRandomIncrOrDecrOperator(OverloadedOperatorKind OK) {
return OK == OO_Plus || OK == OO_PlusEqual || OK == OO_Minus ||
OK == OO_MinusEqual;
}
+bool isRandomIncrOrDecrOperator(BinaryOperatorKind OK) {
+ return OK == BO_Add || OK == BO_AddAssign ||
+ OK == BO_Sub || OK == BO_SubAssign;
+}
+
const ContainerData *getContainerData(ProgramStateRef State,
const MemRegion *Cont) {
return State->get<ContainerMap>(Cont);
@@ -177,6 +207,20 @@ ProgramStateRef setIteratorPosition(ProgramStateRef State, const SVal &Val,
return nullptr;
}
+ProgramStateRef createIteratorPosition(ProgramStateRef State, const SVal &Val,
+ const MemRegion *Cont, const Stmt* S,
+ const LocationContext *LCtx,
+ unsigned blockCount) {
+ auto &StateMgr = State->getStateManager();
+ auto &SymMgr = StateMgr.getSymbolManager();
+ auto &ACtx = StateMgr.getContext();
+
+ auto Sym = SymMgr.conjureSymbol(S, LCtx, ACtx.LongTy, blockCount);
+ State = assumeNoOverflow(State, Sym, 4);
+ return setIteratorPosition(State, Val,
+ IteratorPosition::getPosition(Cont, Sym));
+}
+
ProgramStateRef advancePosition(ProgramStateRef State, const SVal &Iter,
OverloadedOperatorKind Op,
const SVal &Distance) {
@@ -186,22 +230,70 @@ ProgramStateRef advancePosition(ProgramStateRef State, const SVal &Iter,
auto &SymMgr = State->getStateManager().getSymbolManager();
auto &SVB = State->getStateManager().getSValBuilder();
+ auto &BVF = State->getStateManager().getBasicVals();
assert ((Op == OO_Plus || Op == OO_PlusEqual ||
Op == OO_Minus || Op == OO_MinusEqual) &&
"Advance operator must be one of +, -, += and -=.");
auto BinOp = (Op == OO_Plus || Op == OO_PlusEqual) ? BO_Add : BO_Sub;
- if (const auto IntDist = Distance.getAs<nonloc::ConcreteInt>()) {
- // For concrete integers we can calculate the new position
- const auto NewPos =
- Pos->setTo(SVB.evalBinOp(State, BinOp,
- nonloc::SymbolVal(Pos->getOffset()),
- *IntDist, SymMgr.getType(Pos->getOffset()))
- .getAsSymbol());
- return setIteratorPosition(State, Iter, NewPos);
+ const auto IntDistOp = Distance.getAs<nonloc::ConcreteInt>();
+ if (!IntDistOp)
+ return nullptr;
+
+ // For concrete integers we can calculate the new position
+ nonloc::ConcreteInt IntDist = *IntDistOp;
+
+ if (IntDist.getValue().isNegative()) {
+ IntDist = nonloc::ConcreteInt(BVF.getValue(-IntDist.getValue()));
+ BinOp = (BinOp == BO_Add) ? BO_Sub : BO_Add;
}
+ const auto NewPos =
+ Pos->setTo(SVB.evalBinOp(State, BinOp,
+ nonloc::SymbolVal(Pos->getOffset()),
+ IntDist, SymMgr.getType(Pos->getOffset()))
+ .getAsSymbol());
+ return setIteratorPosition(State, Iter, NewPos);
+}
- return nullptr;
+// This function tells the analyzer's engine that symbols produced by our
+// checker, most notably iterator positions, are relatively small.
+// A distance between items in the container should not be very large.
+// By assuming that it is within around 1/8 of the address space,
+// we can help the analyzer perform operations on these symbols
+// without being afraid of integer overflows.
+// FIXME: Should we provide it as an API, so that all checkers could use it?
+ProgramStateRef assumeNoOverflow(ProgramStateRef State, SymbolRef Sym,
+ long Scale) {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ BasicValueFactory &BV = SVB.getBasicValueFactory();
+
+ QualType T = Sym->getType();
+ assert(T->isSignedIntegerOrEnumerationType());
+ APSIntType AT = BV.getAPSIntType(T);
+
+ ProgramStateRef NewState = State;
+
+ llvm::APSInt Max = AT.getMaxValue() / AT.getValue(Scale);
+ SVal IsCappedFromAbove =
+ SVB.evalBinOpNN(State, BO_LE, nonloc::SymbolVal(Sym),
+ nonloc::ConcreteInt(Max), SVB.getConditionType());
+ if (auto DV = IsCappedFromAbove.getAs<DefinedSVal>()) {
+ NewState = NewState->assume(*DV, true);
+ if (!NewState)
+ return State;
+ }
+
+ llvm::APSInt Min = -Max;
+ SVal IsCappedFromBelow =
+ SVB.evalBinOpNN(State, BO_GE, nonloc::SymbolVal(Sym),
+ nonloc::ConcreteInt(Min), SVB.getConditionType());
+ if (auto DV = IsCappedFromBelow.getAs<DefinedSVal>()) {
+ NewState = NewState->assume(*DV, true);
+ if (!NewState)
+ return State;
+ }
+
+ return NewState;
}
bool compare(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.h
index c10d86691693..37157492fe3e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.h
@@ -115,9 +115,12 @@ class IteratorSymbolMap {};
class IteratorRegionMap {};
class ContainerMap {};
-using IteratorSymbolMapTy = CLANG_ENTO_PROGRAMSTATE_MAP(SymbolRef, IteratorPosition);
-using IteratorRegionMapTy = CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *, IteratorPosition);
-using ContainerMapTy = CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *, ContainerData);
+using IteratorSymbolMapTy =
+ CLANG_ENTO_PROGRAMSTATE_MAP(SymbolRef, IteratorPosition);
+using IteratorRegionMapTy =
+ CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *, IteratorPosition);
+using ContainerMapTy =
+ CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *, ContainerData);
} // namespace iterator
@@ -149,20 +152,33 @@ bool isEraseCall(const FunctionDecl *Func);
bool isEraseAfterCall(const FunctionDecl *Func);
bool isEmplaceCall(const FunctionDecl *Func);
bool isAccessOperator(OverloadedOperatorKind OK);
+bool isAccessOperator(UnaryOperatorKind OK);
+bool isAccessOperator(BinaryOperatorKind OK);
bool isDereferenceOperator(OverloadedOperatorKind OK);
+bool isDereferenceOperator(UnaryOperatorKind OK);
+bool isDereferenceOperator(BinaryOperatorKind OK);
bool isIncrementOperator(OverloadedOperatorKind OK);
+bool isIncrementOperator(UnaryOperatorKind OK);
bool isDecrementOperator(OverloadedOperatorKind OK);
+bool isDecrementOperator(UnaryOperatorKind OK);
bool isRandomIncrOrDecrOperator(OverloadedOperatorKind OK);
+bool isRandomIncrOrDecrOperator(BinaryOperatorKind OK);
const ContainerData *getContainerData(ProgramStateRef State,
const MemRegion *Cont);
const IteratorPosition *getIteratorPosition(ProgramStateRef State,
const SVal &Val);
ProgramStateRef setIteratorPosition(ProgramStateRef State, const SVal &Val,
const IteratorPosition &Pos);
+ProgramStateRef createIteratorPosition(ProgramStateRef State, const SVal &Val,
+ const MemRegion *Cont, const Stmt* S,
+ const LocationContext *LCtx,
+ unsigned blockCount);
ProgramStateRef advancePosition(ProgramStateRef State,
const SVal &Iter,
OverloadedOperatorKind Op,
const SVal &Distance);
+ProgramStateRef assumeNoOverflow(ProgramStateRef State, SymbolRef Sym,
+ long Scale);
bool compare(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2,
BinaryOperator::Opcode Opc);
bool compare(ProgramStateRef State, NonLoc NL1, NonLoc NL2,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
index eb962a2ffd9e..fd8cbd694b24 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
@@ -6,8 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// Defines a checker for using iterators outside their range (past end). Usage
-// means here dereferencing, incrementing etc.
+// Defines a modeling-checker for modeling STL iterator-like iterators.
//
//===----------------------------------------------------------------------===//
//
@@ -84,9 +83,20 @@ using namespace iterator;
namespace {
class IteratorModeling
- : public Checker<check::PostCall, check::PostStmt<MaterializeTemporaryExpr>,
+ : public Checker<check::PostCall, check::PostStmt<UnaryOperator>,
+ check::PostStmt<BinaryOperator>,
+ check::PostStmt<MaterializeTemporaryExpr>,
check::Bind, check::LiveSymbols, check::DeadSymbols> {
+ using AdvanceFn = void (IteratorModeling::*)(CheckerContext &, const Expr *,
+ SVal, SVal, SVal) const;
+
+ void handleOverloadedOperator(CheckerContext &C, const CallEvent &Call,
+ OverloadedOperatorKind Op) const;
+ void handleAdvanceLikeFunction(CheckerContext &C, const CallEvent &Call,
+ const Expr *OrigExpr,
+ const AdvanceFn *Handler) const;
+
void handleComparison(CheckerContext &C, const Expr *CE, SVal RetVal,
const SVal &LVal, const SVal &RVal,
OverloadedOperatorKind Op) const;
@@ -100,35 +110,46 @@ class IteratorModeling
void handleRandomIncrOrDecr(CheckerContext &C, const Expr *CE,
OverloadedOperatorKind Op, const SVal &RetVal,
const SVal &LHS, const SVal &RHS) const;
- void handleBegin(CheckerContext &C, const Expr *CE, const SVal &RetVal,
- const SVal &Cont) const;
- void handleEnd(CheckerContext &C, const Expr *CE, const SVal &RetVal,
- const SVal &Cont) const;
+ void handlePtrIncrOrDecr(CheckerContext &C, const Expr *Iterator,
+ OverloadedOperatorKind OK, SVal Offset) const;
+ void handleAdvance(CheckerContext &C, const Expr *CE, SVal RetVal, SVal Iter,
+ SVal Amount) const;
+ void handlePrev(CheckerContext &C, const Expr *CE, SVal RetVal, SVal Iter,
+ SVal Amount) const;
+ void handleNext(CheckerContext &C, const Expr *CE, SVal RetVal, SVal Iter,
+ SVal Amount) const;
void assignToContainer(CheckerContext &C, const Expr *CE, const SVal &RetVal,
const MemRegion *Cont) const;
- void handleAssign(CheckerContext &C, const SVal &Cont,
- const Expr *CE = nullptr,
- const SVal &OldCont = UndefinedVal()) const;
- void handleClear(CheckerContext &C, const SVal &Cont) const;
- void handlePushBack(CheckerContext &C, const SVal &Cont) const;
- void handlePopBack(CheckerContext &C, const SVal &Cont) const;
- void handlePushFront(CheckerContext &C, const SVal &Cont) const;
- void handlePopFront(CheckerContext &C, const SVal &Cont) const;
- void handleInsert(CheckerContext &C, const SVal &Iter) const;
- void handleErase(CheckerContext &C, const SVal &Iter) const;
- void handleErase(CheckerContext &C, const SVal &Iter1,
- const SVal &Iter2) const;
- void handleEraseAfter(CheckerContext &C, const SVal &Iter) const;
- void handleEraseAfter(CheckerContext &C, const SVal &Iter1,
- const SVal &Iter2) const;
+ bool noChangeInAdvance(CheckerContext &C, SVal Iter, const Expr *CE) const;
void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
const char *Sep) const override;
+ // std::advance, std::prev & std::next
+ CallDescriptionMap<AdvanceFn> AdvanceLikeFunctions = {
+ // template<class InputIt, class Distance>
+ // void advance(InputIt& it, Distance n);
+ {{{"std", "advance"}, 2}, &IteratorModeling::handleAdvance},
+
+ // template<class BidirIt>
+ // BidirIt prev(
+ // BidirIt it,
+ // typename std::iterator_traits<BidirIt>::difference_type n = 1);
+ {{{"std", "prev"}, 2}, &IteratorModeling::handlePrev},
+
+ // template<class ForwardIt>
+ // ForwardIt next(
+ // ForwardIt it,
+ // typename std::iterator_traits<ForwardIt>::difference_type n = 1);
+ {{{"std", "next"}, 2}, &IteratorModeling::handleNext},
+ };
+
public:
- IteratorModeling() {}
+ IteratorModeling() = default;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const;
+ void checkPostStmt(const UnaryOperator *UO, CheckerContext &C) const;
+ void checkPostStmt(const BinaryOperator *BO, CheckerContext &C) const;
void checkPostStmt(const CXXConstructExpr *CCE, CheckerContext &C) const;
void checkPostStmt(const DeclStmt *DS, CheckerContext &C) const;
void checkPostStmt(const MaterializeTemporaryExpr *MTE,
@@ -137,68 +158,14 @@ public:
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
};
-bool isBeginCall(const FunctionDecl *Func);
-bool isEndCall(const FunctionDecl *Func);
-bool isAssignCall(const FunctionDecl *Func);
-bool isClearCall(const FunctionDecl *Func);
-bool isPushBackCall(const FunctionDecl *Func);
-bool isEmplaceBackCall(const FunctionDecl *Func);
-bool isPopBackCall(const FunctionDecl *Func);
-bool isPushFrontCall(const FunctionDecl *Func);
-bool isEmplaceFrontCall(const FunctionDecl *Func);
-bool isPopFrontCall(const FunctionDecl *Func);
-bool isAssignmentOperator(OverloadedOperatorKind OK);
bool isSimpleComparisonOperator(OverloadedOperatorKind OK);
-bool hasSubscriptOperator(ProgramStateRef State, const MemRegion *Reg);
-bool frontModifiable(ProgramStateRef State, const MemRegion *Reg);
-bool backModifiable(ProgramStateRef State, const MemRegion *Reg);
-SymbolRef getContainerBegin(ProgramStateRef State, const MemRegion *Cont);
-SymbolRef getContainerEnd(ProgramStateRef State, const MemRegion *Cont);
-ProgramStateRef createContainerBegin(ProgramStateRef State,
- const MemRegion *Cont, const Expr *E,
- QualType T, const LocationContext *LCtx,
- unsigned BlockCount);
-ProgramStateRef createContainerEnd(ProgramStateRef State, const MemRegion *Cont,
- const Expr *E, QualType T,
- const LocationContext *LCtx,
- unsigned BlockCount);
-ProgramStateRef setContainerData(ProgramStateRef State, const MemRegion *Cont,
- const ContainerData &CData);
+bool isSimpleComparisonOperator(BinaryOperatorKind OK);
ProgramStateRef removeIteratorPosition(ProgramStateRef State, const SVal &Val);
-ProgramStateRef assumeNoOverflow(ProgramStateRef State, SymbolRef Sym,
- long Scale);
-ProgramStateRef invalidateAllIteratorPositions(ProgramStateRef State,
- const MemRegion *Cont);
-ProgramStateRef
-invalidateAllIteratorPositionsExcept(ProgramStateRef State,
- const MemRegion *Cont, SymbolRef Offset,
- BinaryOperator::Opcode Opc);
-ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
- SymbolRef Offset,
- BinaryOperator::Opcode Opc);
-ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
- SymbolRef Offset1,
- BinaryOperator::Opcode Opc1,
- SymbolRef Offset2,
- BinaryOperator::Opcode Opc2);
-ProgramStateRef reassignAllIteratorPositions(ProgramStateRef State,
- const MemRegion *Cont,
- const MemRegion *NewCont);
-ProgramStateRef reassignAllIteratorPositionsUnless(ProgramStateRef State,
- const MemRegion *Cont,
- const MemRegion *NewCont,
- SymbolRef Offset,
- BinaryOperator::Opcode Opc);
-ProgramStateRef rebaseSymbolInIteratorPositionsIf(
- ProgramStateRef State, SValBuilder &SVB, SymbolRef OldSym,
- SymbolRef NewSym, SymbolRef CondSym, BinaryOperator::Opcode Opc);
ProgramStateRef relateSymbols(ProgramStateRef State, SymbolRef Sym1,
SymbolRef Sym2, bool Equal);
-SymbolRef rebaseSymbol(ProgramStateRef State, SValBuilder &SVB, SymbolRef Expr,
- SymbolRef OldSym, SymbolRef NewSym);
-bool hasLiveIterators(ProgramStateRef State, const MemRegion *Cont);
bool isBoundThroughLazyCompoundVal(const Environment &Env,
const MemRegion *Reg);
+const ExplodedNode *findCallEnter(const ExplodedNode *Node, const Expr *Call);
} // namespace
@@ -211,189 +178,57 @@ void IteratorModeling::checkPostCall(const CallEvent &Call,
if (Func->isOverloadedOperator()) {
const auto Op = Func->getOverloadedOperator();
- if (isAssignmentOperator(Op)) {
- // Overloaded 'operator=' must be a non-static member function.
- const auto *InstCall = cast<CXXInstanceCall>(&Call);
- if (cast<CXXMethodDecl>(Func)->isMoveAssignmentOperator()) {
- handleAssign(C, InstCall->getCXXThisVal(), Call.getOriginExpr(),
- Call.getArgSVal(0));
- return;
- }
-
- handleAssign(C, InstCall->getCXXThisVal());
- return;
- } else if (isSimpleComparisonOperator(Op)) {
- const auto *OrigExpr = Call.getOriginExpr();
- if (!OrigExpr)
- return;
-
- if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- handleComparison(C, OrigExpr, Call.getReturnValue(),
- InstCall->getCXXThisVal(), Call.getArgSVal(0), Op);
- return;
- }
-
- handleComparison(C, OrigExpr, Call.getReturnValue(), Call.getArgSVal(0),
- Call.getArgSVal(1), Op);
- return;
- } else if (isRandomIncrOrDecrOperator(Func->getOverloadedOperator())) {
- const auto *OrigExpr = Call.getOriginExpr();
- if (!OrigExpr)
- return;
-
- if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- if (Call.getNumArgs() >= 1 &&
- Call.getArgExpr(0)->getType()->isIntegralOrEnumerationType()) {
- handleRandomIncrOrDecr(C, OrigExpr, Func->getOverloadedOperator(),
- Call.getReturnValue(),
- InstCall->getCXXThisVal(), Call.getArgSVal(0));
- return;
- }
- } else {
- if (Call.getNumArgs() >= 2 &&
- Call.getArgExpr(1)->getType()->isIntegralOrEnumerationType()) {
- handleRandomIncrOrDecr(C, OrigExpr, Func->getOverloadedOperator(),
- Call.getReturnValue(), Call.getArgSVal(0),
- Call.getArgSVal(1));
- return;
- }
- }
- } else if (isIncrementOperator(Func->getOverloadedOperator())) {
- if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- handleIncrement(C, Call.getReturnValue(), InstCall->getCXXThisVal(),
- Call.getNumArgs());
- return;
- }
-
- handleIncrement(C, Call.getReturnValue(), Call.getArgSVal(0),
- Call.getNumArgs());
- return;
- } else if (isDecrementOperator(Func->getOverloadedOperator())) {
- if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- handleDecrement(C, Call.getReturnValue(), InstCall->getCXXThisVal(),
- Call.getNumArgs());
- return;
- }
-
- handleDecrement(C, Call.getReturnValue(), Call.getArgSVal(0),
- Call.getNumArgs());
- return;
- }
- } else {
- if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- if (isAssignCall(Func)) {
- handleAssign(C, InstCall->getCXXThisVal());
- return;
- }
-
- if (isClearCall(Func)) {
- handleClear(C, InstCall->getCXXThisVal());
- return;
- }
-
- if (isPushBackCall(Func) || isEmplaceBackCall(Func)) {
- handlePushBack(C, InstCall->getCXXThisVal());
- return;
- }
-
- if (isPopBackCall(Func)) {
- handlePopBack(C, InstCall->getCXXThisVal());
- return;
- }
-
- if (isPushFrontCall(Func) || isEmplaceFrontCall(Func)) {
- handlePushFront(C, InstCall->getCXXThisVal());
- return;
- }
+ handleOverloadedOperator(C, Call, Op);
+ return;
+ }
- if (isPopFrontCall(Func)) {
- handlePopFront(C, InstCall->getCXXThisVal());
- return;
- }
+ const auto *OrigExpr = Call.getOriginExpr();
+ if (!OrigExpr)
+ return;
- if (isInsertCall(Func) || isEmplaceCall(Func)) {
- handleInsert(C, Call.getArgSVal(0));
- return;
- }
+ const AdvanceFn *Handler = AdvanceLikeFunctions.lookup(Call);
+ if (Handler) {
+ handleAdvanceLikeFunction(C, Call, OrigExpr, Handler);
+ return;
+ }
- if (isEraseCall(Func)) {
- if (Call.getNumArgs() == 1) {
- handleErase(C, Call.getArgSVal(0));
- return;
- }
+ if (!isIteratorType(Call.getResultType()))
+ return;
- if (Call.getNumArgs() == 2) {
- handleErase(C, Call.getArgSVal(0), Call.getArgSVal(1));
- return;
- }
- }
+ auto State = C.getState();
- if (isEraseAfterCall(Func)) {
- if (Call.getNumArgs() == 1) {
- handleEraseAfter(C, Call.getArgSVal(0));
- return;
- }
+ // Already bound to container?
+ if (getIteratorPosition(State, Call.getReturnValue()))
+ return;
- if (Call.getNumArgs() == 2) {
- handleEraseAfter(C, Call.getArgSVal(0), Call.getArgSVal(1));
- return;
- }
+ // Copy-like and move constructors
+ if (isa<CXXConstructorCall>(&Call) && Call.getNumArgs() == 1) {
+ if (const auto *Pos = getIteratorPosition(State, Call.getArgSVal(0))) {
+ State = setIteratorPosition(State, Call.getReturnValue(), *Pos);
+ if (cast<CXXConstructorDecl>(Func)->isMoveConstructor()) {
+ State = removeIteratorPosition(State, Call.getArgSVal(0));
}
- }
-
- const auto *OrigExpr = Call.getOriginExpr();
- if (!OrigExpr)
- return;
-
- if (!isIteratorType(Call.getResultType()))
+ C.addTransition(State);
return;
-
- auto State = C.getState();
-
- if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- if (isBeginCall(Func)) {
- handleBegin(C, OrigExpr, Call.getReturnValue(),
- InstCall->getCXXThisVal());
- return;
- }
-
- if (isEndCall(Func)) {
- handleEnd(C, OrigExpr, Call.getReturnValue(),
- InstCall->getCXXThisVal());
- return;
- }
}
+ }
- // Already bound to container?
- if (getIteratorPosition(State, Call.getReturnValue()))
- return;
-
- // Copy-like and move constructors
- if (isa<CXXConstructorCall>(&Call) && Call.getNumArgs() == 1) {
- if (const auto *Pos = getIteratorPosition(State, Call.getArgSVal(0))) {
- State = setIteratorPosition(State, Call.getReturnValue(), *Pos);
- if (cast<CXXConstructorDecl>(Func)->isMoveConstructor()) {
- State = removeIteratorPosition(State, Call.getArgSVal(0));
- }
- C.addTransition(State);
+ // Assumption: if return value is an iterator which is not yet bound to a
+ // container, then look for the first iterator argument of the
+ // same type as the return value and bind the return value to
+ // the same container. This approach works for STL algorithms.
+ // FIXME: Add a more conservative mode
+ for (unsigned i = 0; i < Call.getNumArgs(); ++i) {
+ if (isIteratorType(Call.getArgExpr(i)->getType()) &&
+ Call.getArgExpr(i)->getType().getNonReferenceType().getDesugaredType(
+ C.getASTContext()).getTypePtr() ==
+ Call.getResultType().getDesugaredType(C.getASTContext()).getTypePtr()) {
+ if (const auto *Pos = getIteratorPosition(State, Call.getArgSVal(i))) {
+ assignToContainer(C, OrigExpr, Call.getReturnValue(),
+ Pos->getContainer());
return;
}
}
-
- // Assumption: if return value is an iterator which is not yet bound to a
- // container, then look for the first iterator argument, and
- // bind the return value to the same container. This approach
- // works for STL algorithms.
- // FIXME: Add a more conservative mode
- for (unsigned i = 0; i < Call.getNumArgs(); ++i) {
- if (isIteratorType(Call.getArgExpr(i)->getType())) {
- if (const auto *Pos = getIteratorPosition(State, Call.getArgSVal(i))) {
- assignToContainer(C, OrigExpr, Call.getReturnValue(),
- Pos->getContainer());
- return;
- }
- }
- }
}
}
@@ -413,6 +248,35 @@ void IteratorModeling::checkBind(SVal Loc, SVal Val, const Stmt *S,
}
}
+void IteratorModeling::checkPostStmt(const UnaryOperator *UO,
+ CheckerContext &C) const {
+ UnaryOperatorKind OK = UO->getOpcode();
+ if (!isIncrementOperator(OK) && !isDecrementOperator(OK))
+ return;
+
+ auto &SVB = C.getSValBuilder();
+ handlePtrIncrOrDecr(C, UO->getSubExpr(),
+ isIncrementOperator(OK) ? OO_Plus : OO_Minus,
+ SVB.makeArrayIndex(1));
+}
+
+void IteratorModeling::checkPostStmt(const BinaryOperator *BO,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ BinaryOperatorKind OK = BO->getOpcode();
+ SVal RVal = State->getSVal(BO->getRHS(), C.getLocationContext());
+
+ if (isSimpleComparisonOperator(BO->getOpcode())) {
+ SVal LVal = State->getSVal(BO->getLHS(), C.getLocationContext());
+ SVal Result = State->getSVal(BO, C.getLocationContext());
+ handleComparison(C, BO, Result, LVal, RVal,
+ BinaryOperator::getOverloadedOperator(OK));
+ } else if (isRandomIncrOrDecrOperator(OK)) {
+ handlePtrIncrOrDecr(C, BO->getLHS(),
+ BinaryOperator::getOverloadedOperator(OK), RVal);
+ }
+}
+
void IteratorModeling::checkPostStmt(const MaterializeTemporaryExpr *MTE,
CheckerContext &C) const {
/* Transfer iterator state to temporary objects */
@@ -426,8 +290,7 @@ void IteratorModeling::checkPostStmt(const MaterializeTemporaryExpr *MTE,
void IteratorModeling::checkLiveSymbols(ProgramStateRef State,
SymbolReaper &SR) const {
- // Keep symbolic expressions of iterator positions, container begins and ends
- // alive
+ // Keep symbolic expressions of iterator positions alive
auto RegionMap = State->get<IteratorRegionMap>();
for (const auto &Reg : RegionMap) {
const auto Offset = Reg.second.getOffset();
@@ -444,20 +307,6 @@ void IteratorModeling::checkLiveSymbols(ProgramStateRef State,
SR.markLive(*i);
}
- auto ContMap = State->get<ContainerMap>();
- for (const auto &Cont : ContMap) {
- const auto CData = Cont.second;
- if (CData.getBegin()) {
- SR.markLive(CData.getBegin());
- if(const auto *SIE = dyn_cast<SymIntExpr>(CData.getBegin()))
- SR.markLive(SIE->getLHS());
- }
- if (CData.getEnd()) {
- SR.markLive(CData.getEnd());
- if(const auto *SIE = dyn_cast<SymIntExpr>(CData.getEnd()))
- SR.markLive(SIE->getLHS());
- }
- }
}
void IteratorModeling::checkDeadSymbols(SymbolReaper &SR,
@@ -484,18 +333,92 @@ void IteratorModeling::checkDeadSymbols(SymbolReaper &SR,
}
}
- auto ContMap = State->get<ContainerMap>();
- for (const auto &Cont : ContMap) {
- if (!SR.isLiveRegion(Cont.first)) {
- // We must keep the container data while it has live iterators to be able
- // to compare them to the begin and the end of the container.
- if (!hasLiveIterators(State, Cont.first)) {
- State = State->remove<ContainerMap>(Cont.first);
+ C.addTransition(State);
+}
+
+void
+IteratorModeling::handleOverloadedOperator(CheckerContext &C,
+ const CallEvent &Call,
+ OverloadedOperatorKind Op) const {
+ if (isSimpleComparisonOperator(Op)) {
+ const auto *OrigExpr = Call.getOriginExpr();
+ if (!OrigExpr)
+ return;
+
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ handleComparison(C, OrigExpr, Call.getReturnValue(),
+ InstCall->getCXXThisVal(), Call.getArgSVal(0), Op);
+ return;
}
+
+ handleComparison(C, OrigExpr, Call.getReturnValue(), Call.getArgSVal(0),
+ Call.getArgSVal(1), Op);
+ return;
+ } else if (isRandomIncrOrDecrOperator(Op)) {
+ const auto *OrigExpr = Call.getOriginExpr();
+ if (!OrigExpr)
+ return;
+
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ if (Call.getNumArgs() >= 1 &&
+ Call.getArgExpr(0)->getType()->isIntegralOrEnumerationType()) {
+ handleRandomIncrOrDecr(C, OrigExpr, Op, Call.getReturnValue(),
+ InstCall->getCXXThisVal(), Call.getArgSVal(0));
+ return;
+ }
+ } else {
+ if (Call.getNumArgs() >= 2 &&
+ Call.getArgExpr(1)->getType()->isIntegralOrEnumerationType()) {
+ handleRandomIncrOrDecr(C, OrigExpr, Op, Call.getReturnValue(),
+ Call.getArgSVal(0), Call.getArgSVal(1));
+ return;
+ }
+ }
+ } else if (isIncrementOperator(Op)) {
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ handleIncrement(C, Call.getReturnValue(), InstCall->getCXXThisVal(),
+ Call.getNumArgs());
+ return;
+ }
+
+ handleIncrement(C, Call.getReturnValue(), Call.getArgSVal(0),
+ Call.getNumArgs());
+ return;
+ } else if (isDecrementOperator(Op)) {
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ handleDecrement(C, Call.getReturnValue(), InstCall->getCXXThisVal(),
+ Call.getNumArgs());
+ return;
+ }
+
+ handleDecrement(C, Call.getReturnValue(), Call.getArgSVal(0),
+ Call.getNumArgs());
+ return;
}
+}
+
+void
+IteratorModeling::handleAdvanceLikeFunction(CheckerContext &C,
+ const CallEvent &Call,
+ const Expr *OrigExpr,
+ const AdvanceFn *Handler) const {
+ if (!C.wasInlined) {
+ (this->**Handler)(C, OrigExpr, Call.getReturnValue(),
+ Call.getArgSVal(0), Call.getArgSVal(1));
+ return;
}
- C.addTransition(State);
+ // If std::advance() was inlined, but a non-standard function it calls inside
+ // was not, then we have to model it explicitly
+ const auto *IdInfo = cast<FunctionDecl>(Call.getDecl())->getIdentifier();
+ if (IdInfo) {
+ if (IdInfo->getName() == "advance") {
+ if (noChangeInAdvance(C, Call.getArgSVal(0), OrigExpr)) {
+ (this->**Handler)(C, OrigExpr, Call.getReturnValue(),
+ Call.getArgSVal(0), Call.getArgSVal(1));
+ }
+ }
+ }
}
void IteratorModeling::handleComparison(CheckerContext &C, const Expr *CE,
@@ -518,7 +441,7 @@ void IteratorModeling::handleComparison(CheckerContext &C, const Expr *CE,
if (!Cont)
return;
- // At least one of the iterators have recorded positions. If one of them has
+ // At least one of the iterators has recorded positions. If one of them does
// not then create a new symbol for the offset.
SymbolRef Sym;
if (!LPos || !RPos) {
@@ -538,7 +461,7 @@ void IteratorModeling::handleComparison(CheckerContext &C, const Expr *CE,
RPos = getIteratorPosition(State, RVal);
}
- // We cannot make assumpotions on `UnknownVal`. Let us conjure a symbol
+ // We cannot make assumptions on `UnknownVal`. Let us conjure a symbol
// instead.
if (RetVal.isUnknown()) {
auto &SymMgr = C.getSymbolManager();
@@ -574,7 +497,7 @@ void IteratorModeling::processComparison(CheckerContext &C,
StateTrue = StateTrue->assume(*ConditionVal, true);
C.addTransition(StateTrue);
}
-
+
if (auto StateFalse = relateSymbols(State, Sym1, Sym2, Op != OO_EqualEqual)) {
StateFalse = StateFalse->assume(*ConditionVal, false);
C.addTransition(StateFalse);
@@ -648,481 +571,139 @@ void IteratorModeling::handleRandomIncrOrDecr(CheckerContext &C,
return;
const auto *value = &RHS;
+ SVal val;
if (auto loc = RHS.getAs<Loc>()) {
- const auto val = State->getRawSVal(*loc);
+ val = State->getRawSVal(*loc);
value = &val;
}
auto &TgtVal = (Op == OO_PlusEqual || Op == OO_MinusEqual) ? LHS : RetVal;
- auto NewState =
- advancePosition(State, LHS, Op, *value);
- if (NewState) {
- const auto *NewPos = getIteratorPosition(NewState, LHS);
+ // `AdvancedState` is a state where the position of `LHS` is advanced. We
+ // only need this state to retrieve the new position, but we do not want
+ // to change the position of `LHS` (in every case).
+ auto AdvancedState = advancePosition(State, LHS, Op, *value);
+ if (AdvancedState) {
+ const auto *NewPos = getIteratorPosition(AdvancedState, LHS);
assert(NewPos &&
"Iterator should have position after successful advancement");
- State = setIteratorPosition(NewState, TgtVal, *NewPos);
+ State = setIteratorPosition(State, TgtVal, *NewPos);
C.addTransition(State);
} else {
assignToContainer(C, CE, TgtVal, Pos->getContainer());
}
}
-void IteratorModeling::handleBegin(CheckerContext &C, const Expr *CE,
- const SVal &RetVal, const SVal &Cont) const {
- const auto *ContReg = Cont.getAsRegion();
- if (!ContReg)
- return;
-
- ContReg = ContReg->getMostDerivedObjectRegion();
-
- // If the container already has a begin symbol then use it. Otherwise first
- // create a new one.
- auto State = C.getState();
- auto BeginSym = getContainerBegin(State, ContReg);
- if (!BeginSym) {
- State = createContainerBegin(State, ContReg, CE, C.getASTContext().LongTy,
- C.getLocationContext(), C.blockCount());
- BeginSym = getContainerBegin(State, ContReg);
- }
- State = setIteratorPosition(State, RetVal,
- IteratorPosition::getPosition(ContReg, BeginSym));
- C.addTransition(State);
-}
-
-void IteratorModeling::handleEnd(CheckerContext &C, const Expr *CE,
- const SVal &RetVal, const SVal &Cont) const {
- const auto *ContReg = Cont.getAsRegion();
- if (!ContReg)
- return;
-
- ContReg = ContReg->getMostDerivedObjectRegion();
-
- // If the container already has an end symbol then use it. Otherwise first
- // create a new one.
- auto State = C.getState();
- auto EndSym = getContainerEnd(State, ContReg);
- if (!EndSym) {
- State = createContainerEnd(State, ContReg, CE, C.getASTContext().LongTy,
- C.getLocationContext(), C.blockCount());
- EndSym = getContainerEnd(State, ContReg);
- }
- State = setIteratorPosition(State, RetVal,
- IteratorPosition::getPosition(ContReg, EndSym));
- C.addTransition(State);
-}
-
-void IteratorModeling::assignToContainer(CheckerContext &C, const Expr *CE,
- const SVal &RetVal,
- const MemRegion *Cont) const {
- Cont = Cont->getMostDerivedObjectRegion();
-
- auto State = C.getState();
- auto &SymMgr = C.getSymbolManager();
- auto Sym = SymMgr.conjureSymbol(CE, C.getLocationContext(),
- C.getASTContext().LongTy, C.blockCount());
- State = assumeNoOverflow(State, Sym, 4);
- State = setIteratorPosition(State, RetVal,
- IteratorPosition::getPosition(Cont, Sym));
- C.addTransition(State);
-}
-
-void IteratorModeling::handleAssign(CheckerContext &C, const SVal &Cont,
- const Expr *CE, const SVal &OldCont) const {
- const auto *ContReg = Cont.getAsRegion();
- if (!ContReg)
- return;
-
- ContReg = ContReg->getMostDerivedObjectRegion();
-
- // Assignment of a new value to a container always invalidates all its
- // iterators
- auto State = C.getState();
- const auto CData = getContainerData(State, ContReg);
- if (CData) {
- State = invalidateAllIteratorPositions(State, ContReg);
- }
-
- // In case of move, iterators of the old container (except the past-end
- // iterators) remain valid but refer to the new container
- if (!OldCont.isUndef()) {
- const auto *OldContReg = OldCont.getAsRegion();
- if (OldContReg) {
- OldContReg = OldContReg->getMostDerivedObjectRegion();
- const auto OldCData = getContainerData(State, OldContReg);
- if (OldCData) {
- if (const auto OldEndSym = OldCData->getEnd()) {
- // If we already assigned an "end" symbol to the old container, then
- // first reassign all iterator positions to the new container which
- // are not past the container (thus not greater or equal to the
- // current "end" symbol).
- State = reassignAllIteratorPositionsUnless(State, OldContReg, ContReg,
- OldEndSym, BO_GE);
- auto &SymMgr = C.getSymbolManager();
- auto &SVB = C.getSValBuilder();
- // Then generate and assign a new "end" symbol for the new container.
- auto NewEndSym =
- SymMgr.conjureSymbol(CE, C.getLocationContext(),
- C.getASTContext().LongTy, C.blockCount());
- State = assumeNoOverflow(State, NewEndSym, 4);
- if (CData) {
- State = setContainerData(State, ContReg, CData->newEnd(NewEndSym));
- } else {
- State = setContainerData(State, ContReg,
- ContainerData::fromEnd(NewEndSym));
- }
- // Finally, replace the old "end" symbol in the already reassigned
- // iterator positions with the new "end" symbol.
- State = rebaseSymbolInIteratorPositionsIf(
- State, SVB, OldEndSym, NewEndSym, OldEndSym, BO_LT);
- } else {
- // There was no "end" symbol assigned yet to the old container,
- // so reassign all iterator positions to the new container.
- State = reassignAllIteratorPositions(State, OldContReg, ContReg);
- }
- if (const auto OldBeginSym = OldCData->getBegin()) {
- // If we already assigned a "begin" symbol to the old container, then
- // assign it to the new container and remove it from the old one.
- if (CData) {
- State =
- setContainerData(State, ContReg, CData->newBegin(OldBeginSym));
- } else {
- State = setContainerData(State, ContReg,
- ContainerData::fromBegin(OldBeginSym));
- }
- State =
- setContainerData(State, OldContReg, OldCData->newEnd(nullptr));
- }
- } else {
- // There was neither "begin" nor "end" symbol assigned yet to the old
- // container, so reassign all iterator positions to the new container.
- State = reassignAllIteratorPositions(State, OldContReg, ContReg);
- }
- }
- }
- C.addTransition(State);
-}
-
-void IteratorModeling::handleClear(CheckerContext &C, const SVal &Cont) const {
- const auto *ContReg = Cont.getAsRegion();
- if (!ContReg)
- return;
-
- ContReg = ContReg->getMostDerivedObjectRegion();
-
- // The clear() operation invalidates all the iterators, except the past-end
- // iterators of list-like containers
- auto State = C.getState();
- if (!hasSubscriptOperator(State, ContReg) ||
- !backModifiable(State, ContReg)) {
- const auto CData = getContainerData(State, ContReg);
- if (CData) {
- if (const auto EndSym = CData->getEnd()) {
- State =
- invalidateAllIteratorPositionsExcept(State, ContReg, EndSym, BO_GE);
- C.addTransition(State);
- return;
- }
- }
- }
- State = invalidateAllIteratorPositions(State, ContReg);
- C.addTransition(State);
-}
-
-void IteratorModeling::handlePushBack(CheckerContext &C,
- const SVal &Cont) const {
- const auto *ContReg = Cont.getAsRegion();
- if (!ContReg)
+void IteratorModeling::handlePtrIncrOrDecr(CheckerContext &C,
+ const Expr *Iterator,
+ OverloadedOperatorKind OK,
+ SVal Offset) const {
+ QualType PtrType = Iterator->getType();
+ if (!PtrType->isPointerType())
return;
+ QualType ElementType = PtrType->getPointeeType();
- ContReg = ContReg->getMostDerivedObjectRegion();
+ ProgramStateRef State = C.getState();
+ SVal OldVal = State->getSVal(Iterator, C.getLocationContext());
- // For deque-like containers invalidate all iterator positions
- auto State = C.getState();
- if (hasSubscriptOperator(State, ContReg) && frontModifiable(State, ContReg)) {
- State = invalidateAllIteratorPositions(State, ContReg);
- C.addTransition(State);
+ const IteratorPosition *OldPos = getIteratorPosition(State, OldVal);
+ if (!OldPos)
return;
- }
- const auto CData = getContainerData(State, ContReg);
- if (!CData)
- return;
+ SVal NewVal;
+ if (OK == OO_Plus || OK == OO_PlusEqual)
+ NewVal = State->getLValue(ElementType, Offset, OldVal);
+ else {
+ const llvm::APSInt &OffsetInt =
+ Offset.castAs<nonloc::ConcreteInt>().getValue();
+ auto &BVF = C.getSymbolManager().getBasicVals();
+ SVal NegatedOffset = nonloc::ConcreteInt(BVF.getValue(-OffsetInt));
+ NewVal = State->getLValue(ElementType, NegatedOffset, OldVal);
+ }
+
+ // `AdvancedState` is a state where the position of `Old` is advanced. We
+ // only need this state to retrieve the new position, but we do not want
+ // ever to change the position of `OldVal`.
+ auto AdvancedState = advancePosition(State, OldVal, OK, Offset);
+ if (AdvancedState) {
+ const IteratorPosition *NewPos = getIteratorPosition(AdvancedState, OldVal);
+ assert(NewPos &&
+ "Iterator should have position after successful advancement");
- // For vector-like containers invalidate the past-end iterator positions
- if (const auto EndSym = CData->getEnd()) {
- if (hasSubscriptOperator(State, ContReg)) {
- State = invalidateIteratorPositions(State, EndSym, BO_GE);
- }
- auto &SymMgr = C.getSymbolManager();
- auto &BVF = SymMgr.getBasicVals();
- auto &SVB = C.getSValBuilder();
- const auto newEndSym =
- SVB.evalBinOp(State, BO_Add,
- nonloc::SymbolVal(EndSym),
- nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
- SymMgr.getType(EndSym)).getAsSymbol();
- State = setContainerData(State, ContReg, CData->newEnd(newEndSym));
+ ProgramStateRef NewState = setIteratorPosition(State, NewVal, *NewPos);
+ C.addTransition(NewState);
+ } else {
+ assignToContainer(C, Iterator, NewVal, OldPos->getContainer());
}
- C.addTransition(State);
}
-void IteratorModeling::handlePopBack(CheckerContext &C,
- const SVal &Cont) const {
- const auto *ContReg = Cont.getAsRegion();
- if (!ContReg)
- return;
-
- ContReg = ContReg->getMostDerivedObjectRegion();
-
- auto State = C.getState();
- const auto CData = getContainerData(State, ContReg);
- if (!CData)
- return;
-
- if (const auto EndSym = CData->getEnd()) {
- auto &SymMgr = C.getSymbolManager();
- auto &BVF = SymMgr.getBasicVals();
- auto &SVB = C.getSValBuilder();
- const auto BackSym =
- SVB.evalBinOp(State, BO_Sub,
- nonloc::SymbolVal(EndSym),
- nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
- SymMgr.getType(EndSym)).getAsSymbol();
- // For vector-like and deque-like containers invalidate the last and the
- // past-end iterator positions. For list-like containers only invalidate
- // the last position
- if (hasSubscriptOperator(State, ContReg) &&
- backModifiable(State, ContReg)) {
- State = invalidateIteratorPositions(State, BackSym, BO_GE);
- State = setContainerData(State, ContReg, CData->newEnd(nullptr));
- } else {
- State = invalidateIteratorPositions(State, BackSym, BO_EQ);
- }
- auto newEndSym = BackSym;
- State = setContainerData(State, ContReg, CData->newEnd(newEndSym));
- C.addTransition(State);
- }
+void IteratorModeling::handleAdvance(CheckerContext &C, const Expr *CE,
+ SVal RetVal, SVal Iter,
+ SVal Amount) const {
+ handleRandomIncrOrDecr(C, CE, OO_PlusEqual, RetVal, Iter, Amount);
}
-void IteratorModeling::handlePushFront(CheckerContext &C,
- const SVal &Cont) const {
- const auto *ContReg = Cont.getAsRegion();
- if (!ContReg)
- return;
-
- ContReg = ContReg->getMostDerivedObjectRegion();
-
- // For deque-like containers invalidate all iterator positions
- auto State = C.getState();
- if (hasSubscriptOperator(State, ContReg)) {
- State = invalidateAllIteratorPositions(State, ContReg);
- C.addTransition(State);
- } else {
- const auto CData = getContainerData(State, ContReg);
- if (!CData)
- return;
-
- if (const auto BeginSym = CData->getBegin()) {
- auto &SymMgr = C.getSymbolManager();
- auto &BVF = SymMgr.getBasicVals();
- auto &SVB = C.getSValBuilder();
- const auto newBeginSym =
- SVB.evalBinOp(State, BO_Sub,
- nonloc::SymbolVal(BeginSym),
- nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
- SymMgr.getType(BeginSym)).getAsSymbol();
- State = setContainerData(State, ContReg, CData->newBegin(newBeginSym));
- C.addTransition(State);
- }
- }
+void IteratorModeling::handlePrev(CheckerContext &C, const Expr *CE,
+ SVal RetVal, SVal Iter, SVal Amount) const {
+ handleRandomIncrOrDecr(C, CE, OO_Minus, RetVal, Iter, Amount);
}
-void IteratorModeling::handlePopFront(CheckerContext &C,
- const SVal &Cont) const {
- const auto *ContReg = Cont.getAsRegion();
- if (!ContReg)
- return;
-
- ContReg = ContReg->getMostDerivedObjectRegion();
-
- auto State = C.getState();
- const auto CData = getContainerData(State, ContReg);
- if (!CData)
- return;
-
- // For deque-like containers invalidate all iterator positions. For list-like
- // iterators only invalidate the first position
- if (const auto BeginSym = CData->getBegin()) {
- if (hasSubscriptOperator(State, ContReg)) {
- State = invalidateIteratorPositions(State, BeginSym, BO_LE);
- } else {
- State = invalidateIteratorPositions(State, BeginSym, BO_EQ);
- }
- auto &SymMgr = C.getSymbolManager();
- auto &BVF = SymMgr.getBasicVals();
- auto &SVB = C.getSValBuilder();
- const auto newBeginSym =
- SVB.evalBinOp(State, BO_Add,
- nonloc::SymbolVal(BeginSym),
- nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
- SymMgr.getType(BeginSym)).getAsSymbol();
- State = setContainerData(State, ContReg, CData->newBegin(newBeginSym));
- C.addTransition(State);
- }
+void IteratorModeling::handleNext(CheckerContext &C, const Expr *CE,
+ SVal RetVal, SVal Iter, SVal Amount) const {
+ handleRandomIncrOrDecr(C, CE, OO_Plus, RetVal, Iter, Amount);
}
-void IteratorModeling::handleInsert(CheckerContext &C, const SVal &Iter) const {
- auto State = C.getState();
- const auto *Pos = getIteratorPosition(State, Iter);
- if (!Pos)
- return;
-
- // For deque-like containers invalidate all iterator positions. For
- // vector-like containers invalidate iterator positions after the insertion.
- const auto *Cont = Pos->getContainer();
- if (hasSubscriptOperator(State, Cont) && backModifiable(State, Cont)) {
- if (frontModifiable(State, Cont)) {
- State = invalidateAllIteratorPositions(State, Cont);
- } else {
- State = invalidateIteratorPositions(State, Pos->getOffset(), BO_GE);
- }
- if (const auto *CData = getContainerData(State, Cont)) {
- if (const auto EndSym = CData->getEnd()) {
- State = invalidateIteratorPositions(State, EndSym, BO_GE);
- State = setContainerData(State, Cont, CData->newEnd(nullptr));
- }
- }
- C.addTransition(State);
- }
-}
+void IteratorModeling::assignToContainer(CheckerContext &C, const Expr *CE,
+ const SVal &RetVal,
+ const MemRegion *Cont) const {
+ Cont = Cont->getMostDerivedObjectRegion();
-void IteratorModeling::handleErase(CheckerContext &C, const SVal &Iter) const {
auto State = C.getState();
- const auto *Pos = getIteratorPosition(State, Iter);
- if (!Pos)
- return;
+ const auto *LCtx = C.getLocationContext();
+ State = createIteratorPosition(State, RetVal, Cont, CE, LCtx, C.blockCount());
- // For deque-like containers invalidate all iterator positions. For
- // vector-like containers invalidate iterator positions at and after the
- // deletion. For list-like containers only invalidate the deleted position.
- const auto *Cont = Pos->getContainer();
- if (hasSubscriptOperator(State, Cont) && backModifiable(State, Cont)) {
- if (frontModifiable(State, Cont)) {
- State = invalidateAllIteratorPositions(State, Cont);
- } else {
- State = invalidateIteratorPositions(State, Pos->getOffset(), BO_GE);
- }
- if (const auto *CData = getContainerData(State, Cont)) {
- if (const auto EndSym = CData->getEnd()) {
- State = invalidateIteratorPositions(State, EndSym, BO_GE);
- State = setContainerData(State, Cont, CData->newEnd(nullptr));
- }
- }
- } else {
- State = invalidateIteratorPositions(State, Pos->getOffset(), BO_EQ);
- }
C.addTransition(State);
}
-void IteratorModeling::handleErase(CheckerContext &C, const SVal &Iter1,
- const SVal &Iter2) const {
- auto State = C.getState();
- const auto *Pos1 = getIteratorPosition(State, Iter1);
- const auto *Pos2 = getIteratorPosition(State, Iter2);
- if (!Pos1 || !Pos2)
- return;
+bool IteratorModeling::noChangeInAdvance(CheckerContext &C, SVal Iter,
+ const Expr *CE) const {
+ // Compare the iterator position before and after the call. (To be called
+ // from `checkPostCall()`.)
+ const auto StateAfter = C.getState();
- // For deque-like containers invalidate all iterator positions. For
- // vector-like containers invalidate iterator positions at and after the
- // deletion range. For list-like containers only invalidate the deleted
- // position range [first..last].
- const auto *Cont = Pos1->getContainer();
- if (hasSubscriptOperator(State, Cont) && backModifiable(State, Cont)) {
- if (frontModifiable(State, Cont)) {
- State = invalidateAllIteratorPositions(State, Cont);
- } else {
- State = invalidateIteratorPositions(State, Pos1->getOffset(), BO_GE);
- }
- if (const auto *CData = getContainerData(State, Cont)) {
- if (const auto EndSym = CData->getEnd()) {
- State = invalidateIteratorPositions(State, EndSym, BO_GE);
- State = setContainerData(State, Cont, CData->newEnd(nullptr));
- }
- }
- } else {
- State = invalidateIteratorPositions(State, Pos1->getOffset(), BO_GE,
- Pos2->getOffset(), BO_LT);
- }
- C.addTransition(State);
-}
+ const auto *PosAfter = getIteratorPosition(StateAfter, Iter);
+ // If we have no position after the call of `std::advance`, then we are not
+ // interested. (Modeling of an inlined `std::advance()` should not remove the
+ // position in any case.)
+ if (!PosAfter)
+ return false;
-void IteratorModeling::handleEraseAfter(CheckerContext &C,
- const SVal &Iter) const {
- auto State = C.getState();
- const auto *Pos = getIteratorPosition(State, Iter);
- if (!Pos)
- return;
+ const ExplodedNode *N = findCallEnter(C.getPredecessor(), CE);
+ assert(N && "Any call should have a `CallEnter` node.");
- // Invalidate the deleted iterator position, which is the position of the
- // parameter plus one.
- auto &SymMgr = C.getSymbolManager();
- auto &BVF = SymMgr.getBasicVals();
- auto &SVB = C.getSValBuilder();
- const auto NextSym =
- SVB.evalBinOp(State, BO_Add,
- nonloc::SymbolVal(Pos->getOffset()),
- nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
- SymMgr.getType(Pos->getOffset())).getAsSymbol();
- State = invalidateIteratorPositions(State, NextSym, BO_EQ);
- C.addTransition(State);
-}
+ const auto StateBefore = N->getState();
+ const auto *PosBefore = getIteratorPosition(StateBefore, Iter);
-void IteratorModeling::handleEraseAfter(CheckerContext &C, const SVal &Iter1,
- const SVal &Iter2) const {
- auto State = C.getState();
- const auto *Pos1 = getIteratorPosition(State, Iter1);
- const auto *Pos2 = getIteratorPosition(State, Iter2);
- if (!Pos1 || !Pos2)
- return;
+ assert(PosBefore && "`std::advance() should not create new iterator "
+ "position but change existing ones");
- // Invalidate the deleted iterator position range (first..last)
- State = invalidateIteratorPositions(State, Pos1->getOffset(), BO_GT,
- Pos2->getOffset(), BO_LT);
- C.addTransition(State);
+ return PosBefore->getOffset() == PosAfter->getOffset();
}
void IteratorModeling::printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) const {
-
- auto ContMap = State->get<ContainerMap>();
-
- if (!ContMap.isEmpty()) {
- Out << Sep << "Container Data :" << NL;
- for (const auto &Cont : ContMap) {
- Cont.first->dumpToStream(Out);
- Out << " : [ ";
- const auto CData = Cont.second;
- if (CData.getBegin())
- CData.getBegin()->dumpToStream(Out);
- else
- Out << "<Unknown>";
- Out << " .. ";
- if (CData.getEnd())
- CData.getEnd()->dumpToStream(Out);
- else
- Out << "<Unknown>";
- Out << " ]" << NL;
- }
- }
-
auto SymbolMap = State->get<IteratorSymbolMap>();
auto RegionMap = State->get<IteratorRegionMap>();
+ // Use a counter to add newlines before every line except the first one.
+ unsigned Count = 0;
if (!SymbolMap.isEmpty() || !RegionMap.isEmpty()) {
Out << Sep << "Iterator Positions :" << NL;
for (const auto &Sym : SymbolMap) {
+ if (Count++)
+ Out << NL;
+
Sym.first->dumpToStream(Out);
Out << " : ";
const auto Pos = Sym.second;
@@ -1133,6 +714,9 @@ void IteratorModeling::printState(raw_ostream &Out, ProgramStateRef State,
}
for (const auto &Reg : RegionMap) {
+ if (Count++)
+ Out << NL;
+
Reg.first->dumpToStream(Out);
Out << " : ";
const auto Pos = Reg.second;
@@ -1144,229 +728,14 @@ void IteratorModeling::printState(raw_ostream &Out, ProgramStateRef State,
}
}
-
namespace {
-const CXXRecordDecl *getCXXRecordDecl(ProgramStateRef State,
- const MemRegion *Reg);
-
-bool isBeginCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- return IdInfo->getName().endswith_lower("begin");
-}
-
-bool isEndCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- return IdInfo->getName().endswith_lower("end");
-}
-
-bool isAssignCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- if (Func->getNumParams() > 2)
- return false;
- return IdInfo->getName() == "assign";
-}
-
-bool isClearCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- if (Func->getNumParams() > 0)
- return false;
- return IdInfo->getName() == "clear";
-}
-
-bool isPushBackCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- if (Func->getNumParams() != 1)
- return false;
- return IdInfo->getName() == "push_back";
-}
-
-bool isEmplaceBackCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- if (Func->getNumParams() < 1)
- return false;
- return IdInfo->getName() == "emplace_back";
-}
-
-bool isPopBackCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- if (Func->getNumParams() > 0)
- return false;
- return IdInfo->getName() == "pop_back";
-}
-
-bool isPushFrontCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- if (Func->getNumParams() != 1)
- return false;
- return IdInfo->getName() == "push_front";
-}
-
-bool isEmplaceFrontCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- if (Func->getNumParams() < 1)
- return false;
- return IdInfo->getName() == "emplace_front";
-}
-
-bool isPopFrontCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- if (Func->getNumParams() > 0)
- return false;
- return IdInfo->getName() == "pop_front";
-}
-
-bool isAssignmentOperator(OverloadedOperatorKind OK) { return OK == OO_Equal; }
-
bool isSimpleComparisonOperator(OverloadedOperatorKind OK) {
return OK == OO_EqualEqual || OK == OO_ExclaimEqual;
}
-bool hasSubscriptOperator(ProgramStateRef State, const MemRegion *Reg) {
- const auto *CRD = getCXXRecordDecl(State, Reg);
- if (!CRD)
- return false;
-
- for (const auto *Method : CRD->methods()) {
- if (!Method->isOverloadedOperator())
- continue;
- const auto OPK = Method->getOverloadedOperator();
- if (OPK == OO_Subscript) {
- return true;
- }
- }
- return false;
-}
-
-bool frontModifiable(ProgramStateRef State, const MemRegion *Reg) {
- const auto *CRD = getCXXRecordDecl(State, Reg);
- if (!CRD)
- return false;
-
- for (const auto *Method : CRD->methods()) {
- if (!Method->getDeclName().isIdentifier())
- continue;
- if (Method->getName() == "push_front" || Method->getName() == "pop_front") {
- return true;
- }
- }
- return false;
-}
-
-bool backModifiable(ProgramStateRef State, const MemRegion *Reg) {
- const auto *CRD = getCXXRecordDecl(State, Reg);
- if (!CRD)
- return false;
-
- for (const auto *Method : CRD->methods()) {
- if (!Method->getDeclName().isIdentifier())
- continue;
- if (Method->getName() == "push_back" || Method->getName() == "pop_back") {
- return true;
- }
- }
- return false;
-}
-
-const CXXRecordDecl *getCXXRecordDecl(ProgramStateRef State,
- const MemRegion *Reg) {
- auto TI = getDynamicTypeInfo(State, Reg);
- if (!TI.isValid())
- return nullptr;
-
- auto Type = TI.getType();
- if (const auto *RefT = Type->getAs<ReferenceType>()) {
- Type = RefT->getPointeeType();
- }
-
- return Type->getUnqualifiedDesugaredType()->getAsCXXRecordDecl();
-}
-
-SymbolRef getContainerBegin(ProgramStateRef State, const MemRegion *Cont) {
- const auto *CDataPtr = getContainerData(State, Cont);
- if (!CDataPtr)
- return nullptr;
-
- return CDataPtr->getBegin();
-}
-
-SymbolRef getContainerEnd(ProgramStateRef State, const MemRegion *Cont) {
- const auto *CDataPtr = getContainerData(State, Cont);
- if (!CDataPtr)
- return nullptr;
-
- return CDataPtr->getEnd();
-}
-
-ProgramStateRef createContainerBegin(ProgramStateRef State,
- const MemRegion *Cont, const Expr *E,
- QualType T, const LocationContext *LCtx,
- unsigned BlockCount) {
- // Only create if it does not exist
- const auto *CDataPtr = getContainerData(State, Cont);
- if (CDataPtr && CDataPtr->getBegin())
- return State;
-
- auto &SymMgr = State->getSymbolManager();
- const SymbolConjured *Sym = SymMgr.conjureSymbol(E, LCtx, T, BlockCount,
- "begin");
- State = assumeNoOverflow(State, Sym, 4);
-
- if (CDataPtr) {
- const auto CData = CDataPtr->newBegin(Sym);
- return setContainerData(State, Cont, CData);
- }
-
- const auto CData = ContainerData::fromBegin(Sym);
- return setContainerData(State, Cont, CData);
-}
-
-ProgramStateRef createContainerEnd(ProgramStateRef State, const MemRegion *Cont,
- const Expr *E, QualType T,
- const LocationContext *LCtx,
- unsigned BlockCount) {
- // Only create if it does not exist
- const auto *CDataPtr = getContainerData(State, Cont);
- if (CDataPtr && CDataPtr->getEnd())
- return State;
-
- auto &SymMgr = State->getSymbolManager();
- const SymbolConjured *Sym = SymMgr.conjureSymbol(E, LCtx, T, BlockCount,
- "end");
- State = assumeNoOverflow(State, Sym, 4);
-
- if (CDataPtr) {
- const auto CData = CDataPtr->newEnd(Sym);
- return setContainerData(State, Cont, CData);
- }
-
- const auto CData = ContainerData::fromEnd(Sym);
- return setContainerData(State, Cont, CData);
-}
-
-ProgramStateRef setContainerData(ProgramStateRef State, const MemRegion *Cont,
- const ContainerData &CData) {
- return State->set<ContainerMap>(Cont, CData);
+bool isSimpleComparisonOperator(BinaryOperatorKind OK) {
+ return OK == BO_EQ || OK == BO_NE;
}
ProgramStateRef removeIteratorPosition(ProgramStateRef State, const SVal &Val) {
@@ -1381,47 +750,6 @@ ProgramStateRef removeIteratorPosition(ProgramStateRef State, const SVal &Val) {
return nullptr;
}
-// This function tells the analyzer's engine that symbols produced by our
-// checker, most notably iterator positions, are relatively small.
-// A distance between items in the container should not be very large.
-// By assuming that it is within around 1/8 of the address space,
-// we can help the analyzer perform operations on these symbols
-// without being afraid of integer overflows.
-// FIXME: Should we provide it as an API, so that all checkers could use it?
-ProgramStateRef assumeNoOverflow(ProgramStateRef State, SymbolRef Sym,
- long Scale) {
- SValBuilder &SVB = State->getStateManager().getSValBuilder();
- BasicValueFactory &BV = SVB.getBasicValueFactory();
-
- QualType T = Sym->getType();
- assert(T->isSignedIntegerOrEnumerationType());
- APSIntType AT = BV.getAPSIntType(T);
-
- ProgramStateRef NewState = State;
-
- llvm::APSInt Max = AT.getMaxValue() / AT.getValue(Scale);
- SVal IsCappedFromAbove =
- SVB.evalBinOpNN(State, BO_LE, nonloc::SymbolVal(Sym),
- nonloc::ConcreteInt(Max), SVB.getConditionType());
- if (auto DV = IsCappedFromAbove.getAs<DefinedSVal>()) {
- NewState = NewState->assume(*DV, true);
- if (!NewState)
- return State;
- }
-
- llvm::APSInt Min = -Max;
- SVal IsCappedFromBelow =
- SVB.evalBinOpNN(State, BO_GE, nonloc::SymbolVal(Sym),
- nonloc::ConcreteInt(Min), SVB.getConditionType());
- if (auto DV = IsCappedFromBelow.getAs<DefinedSVal>()) {
- NewState = NewState->assume(*DV, true);
- if (!NewState)
- return State;
- }
-
- return NewState;
-}
-
ProgramStateRef relateSymbols(ProgramStateRef State, SymbolRef Sym1,
SymbolRef Sym2, bool Equal) {
auto &SVB = State->getStateManager().getSValBuilder();
@@ -1454,22 +782,6 @@ ProgramStateRef relateSymbols(ProgramStateRef State, SymbolRef Sym1,
return NewState;
}
-bool hasLiveIterators(ProgramStateRef State, const MemRegion *Cont) {
- auto RegionMap = State->get<IteratorRegionMap>();
- for (const auto &Reg : RegionMap) {
- if (Reg.second.getContainer() == Cont)
- return true;
- }
-
- auto SymbolMap = State->get<IteratorSymbolMap>();
- for (const auto &Sym : SymbolMap) {
- if (Sym.second.getContainer() == Cont)
- return true;
- }
-
- return false;
-}
-
bool isBoundThroughLazyCompoundVal(const Environment &Env,
const MemRegion *Reg) {
for (const auto &Binding : Env) {
@@ -1482,150 +794,18 @@ bool isBoundThroughLazyCompoundVal(const Environment &Env,
return false;
}
-template <typename Condition, typename Process>
-ProgramStateRef processIteratorPositions(ProgramStateRef State, Condition Cond,
- Process Proc) {
- auto &RegionMapFactory = State->get_context<IteratorRegionMap>();
- auto RegionMap = State->get<IteratorRegionMap>();
- bool Changed = false;
- for (const auto &Reg : RegionMap) {
- if (Cond(Reg.second)) {
- RegionMap = RegionMapFactory.add(RegionMap, Reg.first, Proc(Reg.second));
- Changed = true;
+const ExplodedNode *findCallEnter(const ExplodedNode *Node, const Expr *Call) {
+ while (Node) {
+ ProgramPoint PP = Node->getLocation();
+ if (auto Enter = PP.getAs<CallEnter>()) {
+ if (Enter->getCallExpr() == Call)
+ break;
}
- }
-
- if (Changed)
- State = State->set<IteratorRegionMap>(RegionMap);
- auto &SymbolMapFactory = State->get_context<IteratorSymbolMap>();
- auto SymbolMap = State->get<IteratorSymbolMap>();
- Changed = false;
- for (const auto &Sym : SymbolMap) {
- if (Cond(Sym.second)) {
- SymbolMap = SymbolMapFactory.add(SymbolMap, Sym.first, Proc(Sym.second));
- Changed = true;
- }
+ Node = Node->getFirstPred();
}
- if (Changed)
- State = State->set<IteratorSymbolMap>(SymbolMap);
-
- return State;
-}
-
-ProgramStateRef invalidateAllIteratorPositions(ProgramStateRef State,
- const MemRegion *Cont) {
- auto MatchCont = [&](const IteratorPosition &Pos) {
- return Pos.getContainer() == Cont;
- };
- auto Invalidate = [&](const IteratorPosition &Pos) {
- return Pos.invalidate();
- };
- return processIteratorPositions(State, MatchCont, Invalidate);
-}
-
-ProgramStateRef
-invalidateAllIteratorPositionsExcept(ProgramStateRef State,
- const MemRegion *Cont, SymbolRef Offset,
- BinaryOperator::Opcode Opc) {
- auto MatchContAndCompare = [&](const IteratorPosition &Pos) {
- return Pos.getContainer() == Cont &&
- !compare(State, Pos.getOffset(), Offset, Opc);
- };
- auto Invalidate = [&](const IteratorPosition &Pos) {
- return Pos.invalidate();
- };
- return processIteratorPositions(State, MatchContAndCompare, Invalidate);
-}
-
-ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
- SymbolRef Offset,
- BinaryOperator::Opcode Opc) {
- auto Compare = [&](const IteratorPosition &Pos) {
- return compare(State, Pos.getOffset(), Offset, Opc);
- };
- auto Invalidate = [&](const IteratorPosition &Pos) {
- return Pos.invalidate();
- };
- return processIteratorPositions(State, Compare, Invalidate);
-}
-
-ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
- SymbolRef Offset1,
- BinaryOperator::Opcode Opc1,
- SymbolRef Offset2,
- BinaryOperator::Opcode Opc2) {
- auto Compare = [&](const IteratorPosition &Pos) {
- return compare(State, Pos.getOffset(), Offset1, Opc1) &&
- compare(State, Pos.getOffset(), Offset2, Opc2);
- };
- auto Invalidate = [&](const IteratorPosition &Pos) {
- return Pos.invalidate();
- };
- return processIteratorPositions(State, Compare, Invalidate);
-}
-
-ProgramStateRef reassignAllIteratorPositions(ProgramStateRef State,
- const MemRegion *Cont,
- const MemRegion *NewCont) {
- auto MatchCont = [&](const IteratorPosition &Pos) {
- return Pos.getContainer() == Cont;
- };
- auto ReAssign = [&](const IteratorPosition &Pos) {
- return Pos.reAssign(NewCont);
- };
- return processIteratorPositions(State, MatchCont, ReAssign);
-}
-
-ProgramStateRef reassignAllIteratorPositionsUnless(ProgramStateRef State,
- const MemRegion *Cont,
- const MemRegion *NewCont,
- SymbolRef Offset,
- BinaryOperator::Opcode Opc) {
- auto MatchContAndCompare = [&](const IteratorPosition &Pos) {
- return Pos.getContainer() == Cont &&
- !compare(State, Pos.getOffset(), Offset, Opc);
- };
- auto ReAssign = [&](const IteratorPosition &Pos) {
- return Pos.reAssign(NewCont);
- };
- return processIteratorPositions(State, MatchContAndCompare, ReAssign);
-}
-
-// This function rebases symbolic expression `OldSym + Int` to `NewSym + Int`,
-// `OldSym - Int` to `NewSym - Int` and `OldSym` to `NewSym` in any iterator
-// position offsets where `CondSym` is true.
-ProgramStateRef rebaseSymbolInIteratorPositionsIf(
- ProgramStateRef State, SValBuilder &SVB, SymbolRef OldSym,
- SymbolRef NewSym, SymbolRef CondSym, BinaryOperator::Opcode Opc) {
- auto LessThanEnd = [&](const IteratorPosition &Pos) {
- return compare(State, Pos.getOffset(), CondSym, Opc);
- };
- auto RebaseSymbol = [&](const IteratorPosition &Pos) {
- return Pos.setTo(rebaseSymbol(State, SVB, Pos.getOffset(), OldSym,
- NewSym));
- };
- return processIteratorPositions(State, LessThanEnd, RebaseSymbol);
-}
-
-// This function rebases symbolic expression `OldExpr + Int` to `NewExpr + Int`,
-// `OldExpr - Int` to `NewExpr - Int` and `OldExpr` to `NewExpr` in expression
-// `OrigExpr`.
-SymbolRef rebaseSymbol(ProgramStateRef State, SValBuilder &SVB,
- SymbolRef OrigExpr, SymbolRef OldExpr,
- SymbolRef NewSym) {
- auto &SymMgr = SVB.getSymbolManager();
- auto Diff = SVB.evalBinOpNN(State, BO_Sub, nonloc::SymbolVal(OrigExpr),
- nonloc::SymbolVal(OldExpr),
- SymMgr.getType(OrigExpr));
-
- const auto DiffInt = Diff.getAs<nonloc::ConcreteInt>();
- if (!DiffInt)
- return OrigExpr;
-
- return SVB.evalBinOpNN(State, BO_Add, *DiffInt, nonloc::SymbolVal(NewSym),
- SymMgr.getType(OrigExpr)).getAsSymbol();
+ return Node;
}
} // namespace
@@ -1634,6 +814,6 @@ void ento::registerIteratorModeling(CheckerManager &mgr) {
mgr.registerChecker<IteratorModeling>();
}
-bool ento::shouldRegisterIteratorModeling(const LangOptions &LO) {
+bool ento::shouldRegisterIteratorModeling(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
index bd8b84d464b6..df8e379d1f20 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
@@ -27,22 +27,41 @@ using namespace iterator;
namespace {
class IteratorRangeChecker
- : public Checker<check::PreCall> {
+ : public Checker<check::PreCall, check::PreStmt<UnaryOperator>,
+ check::PreStmt<BinaryOperator>,
+ check::PreStmt<ArraySubscriptExpr>,
+ check::PreStmt<MemberExpr>> {
std::unique_ptr<BugType> OutOfRangeBugType;
- void verifyDereference(CheckerContext &C, const SVal &Val) const;
- void verifyIncrement(CheckerContext &C, const SVal &Iter) const;
- void verifyDecrement(CheckerContext &C, const SVal &Iter) const;
+ void verifyDereference(CheckerContext &C, SVal Val) const;
+ void verifyIncrement(CheckerContext &C, SVal Iter) const;
+ void verifyDecrement(CheckerContext &C, SVal Iter) const;
void verifyRandomIncrOrDecr(CheckerContext &C, OverloadedOperatorKind Op,
- const SVal &LHS, const SVal &RHS) const;
- void reportBug(const StringRef &Message, const SVal &Val,
- CheckerContext &C, ExplodedNode *ErrNode) const;
+ SVal LHS, SVal RHS) const;
+ void verifyAdvance(CheckerContext &C, SVal LHS, SVal RHS) const;
+ void verifyPrev(CheckerContext &C, SVal LHS, SVal RHS) const;
+ void verifyNext(CheckerContext &C, SVal LHS, SVal RHS) const;
+ void reportBug(const StringRef &Message, SVal Val, CheckerContext &C,
+ ExplodedNode *ErrNode) const;
+
public:
IteratorRangeChecker();
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
-
+ void checkPreStmt(const UnaryOperator *UO, CheckerContext &C) const;
+ void checkPreStmt(const BinaryOperator *BO, CheckerContext &C) const;
+ void checkPreStmt(const ArraySubscriptExpr *ASE, CheckerContext &C) const;
+ void checkPreStmt(const MemberExpr *ME, CheckerContext &C) const;
+
+ using AdvanceFn = void (IteratorRangeChecker::*)(CheckerContext &, SVal,
+ SVal) const;
+
+ CallDescriptionMap<AdvanceFn> AdvanceFunctions = {
+ {{{"std", "advance"}, 2}, &IteratorRangeChecker::verifyAdvance},
+ {{{"std", "prev"}, 2}, &IteratorRangeChecker::verifyPrev},
+ {{{"std", "next"}, 2}, &IteratorRangeChecker::verifyNext},
+ };
};
bool isPastTheEnd(ProgramStateRef State, const IteratorPosition &Pos);
@@ -107,11 +126,73 @@ void IteratorRangeChecker::checkPreCall(const CallEvent &Call,
verifyDereference(C, Call.getArgSVal(0));
}
}
+ } else {
+ const AdvanceFn *Verifier = AdvanceFunctions.lookup(Call);
+ if (Verifier) {
+ if (Call.getNumArgs() > 1) {
+ (this->**Verifier)(C, Call.getArgSVal(0), Call.getArgSVal(1));
+ } else {
+ auto &BVF = C.getSValBuilder().getBasicValueFactory();
+ (this->**Verifier)(
+ C, Call.getArgSVal(0),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))));
+ }
+ }
+ }
+}
+
+void IteratorRangeChecker::checkPreStmt(const UnaryOperator *UO,
+ CheckerContext &C) const {
+ if (isa<CXXThisExpr>(UO->getSubExpr()))
+ return;
+
+ ProgramStateRef State = C.getState();
+ UnaryOperatorKind OK = UO->getOpcode();
+ SVal SubVal = State->getSVal(UO->getSubExpr(), C.getLocationContext());
+
+ if (isDereferenceOperator(OK)) {
+ verifyDereference(C, SubVal);
+ } else if (isIncrementOperator(OK)) {
+ verifyIncrement(C, SubVal);
+ } else if (isDecrementOperator(OK)) {
+ verifyDecrement(C, SubVal);
+ }
+}
+
+void IteratorRangeChecker::checkPreStmt(const BinaryOperator *BO,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ BinaryOperatorKind OK = BO->getOpcode();
+ SVal LVal = State->getSVal(BO->getLHS(), C.getLocationContext());
+
+ if (isDereferenceOperator(OK)) {
+ verifyDereference(C, LVal);
+ } else if (isRandomIncrOrDecrOperator(OK)) {
+ SVal RVal = State->getSVal(BO->getRHS(), C.getLocationContext());
+ verifyRandomIncrOrDecr(C, BinaryOperator::getOverloadedOperator(OK), LVal,
+ RVal);
}
}
+void IteratorRangeChecker::checkPreStmt(const ArraySubscriptExpr *ASE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal LVal = State->getSVal(ASE->getLHS(), C.getLocationContext());
+ verifyDereference(C, LVal);
+}
+
+void IteratorRangeChecker::checkPreStmt(const MemberExpr *ME,
+ CheckerContext &C) const {
+ if (!ME->isArrow() || ME->isImplicitAccess())
+ return;
+
+ ProgramStateRef State = C.getState();
+ SVal BaseVal = State->getSVal(ME->getBase(), C.getLocationContext());
+ verifyDereference(C, BaseVal);
+}
+
void IteratorRangeChecker::verifyDereference(CheckerContext &C,
- const SVal &Val) const {
+ SVal Val) const {
auto State = C.getState();
const auto *Pos = getIteratorPosition(State, Val);
if (Pos && isPastTheEnd(State, *Pos)) {
@@ -123,24 +204,21 @@ void IteratorRangeChecker::verifyDereference(CheckerContext &C,
}
}
-void IteratorRangeChecker::verifyIncrement(CheckerContext &C,
- const SVal &Iter) const {
+void IteratorRangeChecker::verifyIncrement(CheckerContext &C, SVal Iter) const {
auto &BVF = C.getSValBuilder().getBasicValueFactory();
verifyRandomIncrOrDecr(C, OO_Plus, Iter,
nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))));
}
-void IteratorRangeChecker::verifyDecrement(CheckerContext &C,
- const SVal &Iter) const {
+void IteratorRangeChecker::verifyDecrement(CheckerContext &C, SVal Iter) const {
auto &BVF = C.getSValBuilder().getBasicValueFactory();
verifyRandomIncrOrDecr(C, OO_Minus, Iter,
nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))));
}
void IteratorRangeChecker::verifyRandomIncrOrDecr(CheckerContext &C,
- OverloadedOperatorKind Op,
- const SVal &LHS,
- const SVal &RHS) const {
+ OverloadedOperatorKind Op,
+ SVal LHS, SVal RHS) const {
auto State = C.getState();
auto Value = RHS;
@@ -180,12 +258,32 @@ void IteratorRangeChecker::verifyRandomIncrOrDecr(CheckerContext &C,
}
}
-void IteratorRangeChecker::reportBug(const StringRef &Message,
- const SVal &Val, CheckerContext &C,
- ExplodedNode *ErrNode) const {
+void IteratorRangeChecker::verifyAdvance(CheckerContext &C, SVal LHS,
+ SVal RHS) const {
+ verifyRandomIncrOrDecr(C, OO_PlusEqual, LHS, RHS);
+}
+
+void IteratorRangeChecker::verifyPrev(CheckerContext &C, SVal LHS,
+ SVal RHS) const {
+ verifyRandomIncrOrDecr(C, OO_Minus, LHS, RHS);
+}
+
+void IteratorRangeChecker::verifyNext(CheckerContext &C, SVal LHS,
+ SVal RHS) const {
+ verifyRandomIncrOrDecr(C, OO_Plus, LHS, RHS);
+}
+
+void IteratorRangeChecker::reportBug(const StringRef &Message, SVal Val,
+ CheckerContext &C,
+ ExplodedNode *ErrNode) const {
auto R = std::make_unique<PathSensitiveBugReport>(*OutOfRangeBugType, Message,
ErrNode);
+
+ const auto *Pos = getIteratorPosition(C.getState(), Val);
+ assert(Pos && "Iterator without known position cannot be out-of-range.");
+
R->markInteresting(Val);
+ R->markInteresting(Pos->getContainer());
C.emitReport(std::move(R));
}
@@ -268,6 +366,6 @@ void ento::registerIteratorRangeChecker(CheckerManager &mgr) {
mgr.registerChecker<IteratorRangeChecker>();
}
-bool ento::shouldRegisterIteratorRangeChecker(const LangOptions &LO) {
+bool ento::shouldRegisterIteratorRangeChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
index 0d64fbd6f62e..3e6756efe0e6 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
@@ -739,7 +739,7 @@ void ento::registerIvarInvalidationModeling(CheckerManager &mgr) {
mgr.registerChecker<IvarInvalidationChecker>();
}
-bool ento::shouldRegisterIvarInvalidationModeling(const LangOptions &LO) {
+bool ento::shouldRegisterIvarInvalidationModeling(const CheckerManager &mgr) {
return true;
}
@@ -751,7 +751,7 @@ bool ento::shouldRegisterIvarInvalidationModeling(const LangOptions &LO) {
checker->Filter.checkName_##name = mgr.getCurrentCheckerName(); \
} \
\
- bool ento::shouldRegister##name(const LangOptions &LO) { return true; }
+ bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
REGISTER_CHECKER(InstanceVariableInvalidation)
REGISTER_CHECKER(MissingInvalidationMethod)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
index 7522fdd0a99b..1f3d8844d330 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
@@ -314,6 +314,6 @@ void ento::registerLLVMConventionsChecker(CheckerManager &mgr) {
mgr.registerChecker<LLVMConventionsChecker>();
}
-bool ento::shouldRegisterLLVMConventionsChecker(const LangOptions &LO) {
+bool ento::shouldRegisterLLVMConventionsChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
index 79de1844e745..252377f24bd7 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
@@ -1403,7 +1403,7 @@ void ento::registerNonLocalizedStringChecker(CheckerManager &mgr) {
checker, "AggressiveReport");
}
-bool ento::shouldRegisterNonLocalizedStringChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNonLocalizedStringChecker(const CheckerManager &mgr) {
return true;
}
@@ -1412,7 +1412,7 @@ void ento::registerEmptyLocalizationContextChecker(CheckerManager &mgr) {
}
bool ento::shouldRegisterEmptyLocalizationContextChecker(
- const LangOptions &LO) {
+ const CheckerManager &mgr) {
return true;
}
@@ -1420,6 +1420,6 @@ void ento::registerPluralMisuseChecker(CheckerManager &mgr) {
mgr.registerChecker<PluralMisuseChecker>();
}
-bool ento::shouldRegisterPluralMisuseChecker(const LangOptions &LO) {
+bool ento::shouldRegisterPluralMisuseChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp
index d73e2eb92d42..837213875a60 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp
@@ -210,15 +210,16 @@ void MIGChecker::checkPostCall(const CallEvent &Call, CheckerContext &C) const {
if (!PVD || State->contains<RefCountedParameters>(PVD))
return;
- const NoteTag *T = C.getNoteTag([this, PVD](BugReport &BR) -> std::string {
- if (&BR.getBugType() != &BT)
- return "";
- SmallString<64> Str;
- llvm::raw_svector_ostream OS(Str);
- OS << "Value passed through parameter '" << PVD->getName()
- << "\' is deallocated";
- return OS.str();
- });
+ const NoteTag *T =
+ C.getNoteTag([this, PVD](PathSensitiveBugReport &BR) -> std::string {
+ if (&BR.getBugType() != &BT)
+ return "";
+ SmallString<64> Str;
+ llvm::raw_svector_ostream OS(Str);
+ OS << "Value passed through parameter '" << PVD->getName()
+ << "\' is deallocated";
+ return std::string(OS.str());
+ });
C.addTransition(State->set<ReleasedParameter>(true), T);
}
@@ -292,6 +293,6 @@ void ento::registerMIGChecker(CheckerManager &Mgr) {
Mgr.registerChecker<MIGChecker>();
}
-bool ento::shouldRegisterMIGChecker(const LangOptions &LO) {
+bool ento::shouldRegisterMIGChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
index 7f9ba0de1dc2..7ac7a38dacf3 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
@@ -16,6 +16,7 @@
#include "MPIChecker.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
namespace clang {
namespace ento {
@@ -146,7 +147,7 @@ void MPIChecker::allRegionsUsedByWait(
llvm::SmallVector<const MemRegion *, 2> &ReqRegions,
const MemRegion *const MR, const CallEvent &CE, CheckerContext &Ctx) const {
- MemRegionManager *const RegionManager = MR->getMemRegionManager();
+ MemRegionManager &RegionManager = MR->getMemRegionManager();
if (FuncClassifier->isMPI_Waitall(CE.getCalleeIdentifier())) {
const SubRegion *SuperRegion{nullptr};
@@ -160,15 +161,16 @@ void MPIChecker::allRegionsUsedByWait(
return;
}
- const auto &Size = Ctx.getStoreManager().getSizeInElements(
- Ctx.getState(), SuperRegion,
+ DefinedOrUnknownSVal ElementCount = getDynamicElementCount(
+ Ctx.getState(), SuperRegion, Ctx.getSValBuilder(),
CE.getArgExpr(1)->getType()->getPointeeType());
- const llvm::APSInt &ArrSize = Size.getAs<nonloc::ConcreteInt>()->getValue();
+ const llvm::APSInt &ArrSize =
+ ElementCount.getAs<nonloc::ConcreteInt>()->getValue();
for (size_t i = 0; i < ArrSize; ++i) {
const NonLoc Idx = Ctx.getSValBuilder().makeArrayIndex(i);
- const ElementRegion *const ER = RegionManager->getElementRegion(
+ const ElementRegion *const ER = RegionManager.getElementRegion(
CE.getArgExpr(1)->getType()->getPointeeType(), Idx, SuperRegion,
Ctx.getASTContext());
@@ -188,6 +190,6 @@ void clang::ento::registerMPIChecker(CheckerManager &MGR) {
MGR.registerChecker<clang::ento::mpi::MPIChecker>();
}
-bool clang::ento::shouldRegisterMPIChecker(const LangOptions &LO) {
+bool clang::ento::shouldRegisterMPIChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
index e064ca6bd88f..87477e96d2d1 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -66,7 +66,7 @@ public:
ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
bool Assumption) const;
void printState(raw_ostream &Out, ProgramStateRef State,
- const char *NL, const char *Sep) const;
+ const char *NL, const char *Sep) const override;
private:
typedef std::pair<SymbolRef, const AllocationState*> AllocationPair;
@@ -667,6 +667,6 @@ void ento::registerMacOSKeychainAPIChecker(CheckerManager &mgr) {
mgr.registerChecker<MacOSKeychainAPIChecker>();
}
-bool ento::shouldRegisterMacOSKeychainAPIChecker(const LangOptions &LO) {
+bool ento::shouldRegisterMacOSKeychainAPIChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
index 410721d8b6ff..04e7f8dec8d7 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
@@ -176,6 +176,6 @@ void ento::registerMacOSXAPIChecker(CheckerManager &mgr) {
mgr.registerChecker<MacOSXAPIChecker>();
}
-bool ento::shouldRegisterMacOSXAPIChecker(const LangOptions &LO) {
+bool ento::shouldRegisterMacOSXAPIChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 09306383d53f..d5b0a5b2220f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -44,34 +44,49 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "AllocationState.h"
#include "InterCheckerAPI.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/ParentMap.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Lexer.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
-#include "AllocationState.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
#include <climits>
+#include <functional>
#include <utility>
using namespace clang;
using namespace ento;
+using namespace std::placeholders;
//===----------------------------------------------------------------------===//
-// The types of allocation we're modeling.
+// The types of allocation we're modeling. This is used to check whether a
+// dynamically allocated object is deallocated with the correct function, like
+// not using operator delete on an object created by malloc(), or alloca regions
+// aren't ever deallocated manually.
//===----------------------------------------------------------------------===//
namespace {
@@ -87,26 +102,16 @@ enum AllocationFamily {
AF_InnerBuffer
};
-struct MemFunctionInfoTy;
-
} // end of anonymous namespace
-/// Determine family of a deallocation expression.
-static AllocationFamily
-getAllocationFamily(const MemFunctionInfoTy &MemFunctionInfo, CheckerContext &C,
- const Stmt *S);
-
/// Print names of allocators and deallocators.
///
/// \returns true on success.
-static bool printAllocDeallocName(raw_ostream &os, CheckerContext &C,
- const Expr *E);
+static bool printMemFnName(raw_ostream &os, CheckerContext &C, const Expr *E);
-/// Print expected name of an allocator based on the deallocator's
-/// family derived from the DeallocExpr.
-static void printExpectedAllocName(raw_ostream &os,
- const MemFunctionInfoTy &MemFunctionInfo,
- CheckerContext &C, const Expr *E);
+/// Print expected name of an allocator based on the deallocator's family
+/// derived from the DeallocExpr.
+static void printExpectedAllocName(raw_ostream &os, AllocationFamily Family);
/// Print expected name of a deallocator based on the allocator's
/// family.
@@ -207,7 +212,7 @@ static bool isReleased(SymbolRef Sym, CheckerContext &C);
/// value; if unspecified, the value of expression \p E is used.
static ProgramStateRef MallocUpdateRefState(CheckerContext &C, const Expr *E,
ProgramStateRef State,
- AllocationFamily Family = AF_Malloc,
+ AllocationFamily Family,
Optional<SVal> RetVal = None);
//===----------------------------------------------------------------------===//
@@ -265,60 +270,14 @@ struct ReallocPair {
REGISTER_MAP_WITH_PROGRAMSTATE(ReallocPairs, SymbolRef, ReallocPair)
-//===----------------------------------------------------------------------===//
-// Kinds of memory operations, information about resource managing functions.
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-enum class MemoryOperationKind { MOK_Allocate, MOK_Free, MOK_Any };
-
-struct MemFunctionInfoTy {
- /// The value of the MallocChecker:Optimistic is stored in this variable.
- ///
- /// In pessimistic mode, the checker assumes that it does not know which
- /// functions might free the memory.
- /// In optimistic mode, the checker assumes that all user-defined functions
- /// which might free a pointer are annotated.
- DefaultBool ShouldIncludeOwnershipAnnotatedFunctions;
-
- // TODO: Change these to CallDescription, and get rid of lazy initialization.
- mutable IdentifierInfo *II_alloca = nullptr, *II_win_alloca = nullptr,
- *II_malloc = nullptr, *II_free = nullptr,
- *II_realloc = nullptr, *II_calloc = nullptr,
- *II_valloc = nullptr, *II_reallocf = nullptr,
- *II_strndup = nullptr, *II_strdup = nullptr,
- *II_win_strdup = nullptr, *II_kmalloc = nullptr,
- *II_if_nameindex = nullptr,
- *II_if_freenameindex = nullptr, *II_wcsdup = nullptr,
- *II_win_wcsdup = nullptr, *II_g_malloc = nullptr,
- *II_g_malloc0 = nullptr, *II_g_realloc = nullptr,
- *II_g_try_malloc = nullptr,
- *II_g_try_malloc0 = nullptr,
- *II_g_try_realloc = nullptr, *II_g_free = nullptr,
- *II_g_memdup = nullptr, *II_g_malloc_n = nullptr,
- *II_g_malloc0_n = nullptr, *II_g_realloc_n = nullptr,
- *II_g_try_malloc_n = nullptr,
- *II_g_try_malloc0_n = nullptr, *II_kfree = nullptr,
- *II_g_try_realloc_n = nullptr;
-
- void initIdentifierInfo(ASTContext &C) const;
-
- ///@{
- /// Check if this is one of the functions which can allocate/reallocate
- /// memory pointed to by one of its arguments.
- bool isMemFunction(const FunctionDecl *FD, ASTContext &C) const;
- bool isCMemFunction(const FunctionDecl *FD, ASTContext &C,
- AllocationFamily Family,
- MemoryOperationKind MemKind) const;
-
- /// Tells if the callee is one of the builtin new/delete operators, including
- /// placement operators and other standard overloads.
- bool isStandardNewDelete(const FunctionDecl *FD, ASTContext &C) const;
- ///@}
-};
-
-} // end of anonymous namespace
+/// Tells if the callee is one of the builtin new/delete operators, including
+/// placement operators and other standard overloads.
+static bool isStandardNewDelete(const FunctionDecl *FD);
+static bool isStandardNewDelete(const CallEvent &Call) {
+ if (!Call.getDecl() || !isa<FunctionDecl>(Call.getDecl()))
+ return false;
+ return isStandardNewDelete(cast<FunctionDecl>(Call.getDecl()));
+}
//===----------------------------------------------------------------------===//
// Definition of the MallocChecker class.
@@ -329,13 +288,15 @@ namespace {
class MallocChecker
: public Checker<check::DeadSymbols, check::PointerEscape,
check::ConstPointerEscape, check::PreStmt<ReturnStmt>,
- check::EndFunction, check::PreCall,
- check::PostStmt<CallExpr>, check::PostStmt<CXXNewExpr>,
- check::NewAllocator, check::PreStmt<CXXDeleteExpr>,
- check::PostStmt<BlockExpr>, check::PostObjCMessage,
- check::Location, eval::Assume> {
+ check::EndFunction, check::PreCall, check::PostCall,
+ check::NewAllocator, check::PostStmt<BlockExpr>,
+ check::PostObjCMessage, check::Location, eval::Assume> {
public:
- MemFunctionInfoTy MemFunctionInfo;
+ /// In pessimistic mode, the checker assumes that it does not know which
+ /// functions might free the memory.
+ /// In optimistic mode, the checker assumes that all user-defined functions
+ /// which might free a pointer are annotated.
+ DefaultBool ShouldIncludeOwnershipAnnotatedFunctions;
/// Many checkers are essentially built into this one, so enabling them will
/// make MallocChecker perform additional modeling and reporting.
@@ -357,11 +318,8 @@ public:
CheckerNameRef CheckNames[CK_NumCheckKinds];
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
- void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
- void checkPostStmt(const CXXNewExpr *NE, CheckerContext &C) const;
- void checkNewAllocator(const CXXNewExpr *NE, SVal Target,
- CheckerContext &C) const;
- void checkPreStmt(const CXXDeleteExpr *DE, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkNewAllocator(const CXXAllocatorCall &Call, CheckerContext &C) const;
void checkPostObjCMessage(const ObjCMethodCall &Call, CheckerContext &C) const;
void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
@@ -395,23 +353,107 @@ private:
mutable std::unique_ptr<BugType> BT_OffsetFree[CK_NumCheckKinds];
mutable std::unique_ptr<BugType> BT_UseZerroAllocated[CK_NumCheckKinds];
+#define CHECK_FN(NAME) \
+ void NAME(const CallEvent &Call, CheckerContext &C) const;
+
+ CHECK_FN(checkFree)
+ CHECK_FN(checkIfNameIndex)
+ CHECK_FN(checkBasicAlloc)
+ CHECK_FN(checkKernelMalloc)
+ CHECK_FN(checkCalloc)
+ CHECK_FN(checkAlloca)
+ CHECK_FN(checkStrdup)
+ CHECK_FN(checkIfFreeNameIndex)
+ CHECK_FN(checkCXXNewOrCXXDelete)
+ CHECK_FN(checkGMalloc0)
+ CHECK_FN(checkGMemdup)
+ CHECK_FN(checkGMallocN)
+ CHECK_FN(checkGMallocN0)
+ CHECK_FN(checkReallocN)
+ CHECK_FN(checkOwnershipAttr)
+
+ void checkRealloc(const CallEvent &Call, CheckerContext &C,
+ bool ShouldFreeOnFail) const;
+
+ using CheckFn = std::function<void(const MallocChecker *,
+ const CallEvent &Call, CheckerContext &C)>;
+
+ const CallDescriptionMap<CheckFn> FreeingMemFnMap{
+ {{"free", 1}, &MallocChecker::checkFree},
+ {{"if_freenameindex", 1}, &MallocChecker::checkIfFreeNameIndex},
+ {{"kfree", 1}, &MallocChecker::checkFree},
+ {{"g_free", 1}, &MallocChecker::checkFree},
+ };
+
+ bool isFreeingCall(const CallEvent &Call) const;
+
+ CallDescriptionMap<CheckFn> AllocatingMemFnMap{
+ {{"alloca", 1}, &MallocChecker::checkAlloca},
+ {{"_alloca", 1}, &MallocChecker::checkAlloca},
+ {{"malloc", 1}, &MallocChecker::checkBasicAlloc},
+ {{"malloc", 3}, &MallocChecker::checkKernelMalloc},
+ {{"calloc", 2}, &MallocChecker::checkCalloc},
+ {{"valloc", 1}, &MallocChecker::checkBasicAlloc},
+ {{CDF_MaybeBuiltin, "strndup", 2}, &MallocChecker::checkStrdup},
+ {{CDF_MaybeBuiltin, "strdup", 1}, &MallocChecker::checkStrdup},
+ {{"_strdup", 1}, &MallocChecker::checkStrdup},
+ {{"kmalloc", 2}, &MallocChecker::checkKernelMalloc},
+ {{"if_nameindex", 1}, &MallocChecker::checkIfNameIndex},
+ {{CDF_MaybeBuiltin, "wcsdup", 1}, &MallocChecker::checkStrdup},
+ {{CDF_MaybeBuiltin, "_wcsdup", 1}, &MallocChecker::checkStrdup},
+ {{"g_malloc", 1}, &MallocChecker::checkBasicAlloc},
+ {{"g_malloc0", 1}, &MallocChecker::checkGMalloc0},
+ {{"g_try_malloc", 1}, &MallocChecker::checkBasicAlloc},
+ {{"g_try_malloc0", 1}, &MallocChecker::checkGMalloc0},
+ {{"g_memdup", 2}, &MallocChecker::checkGMemdup},
+ {{"g_malloc_n", 2}, &MallocChecker::checkGMallocN},
+ {{"g_malloc0_n", 2}, &MallocChecker::checkGMallocN0},
+ {{"g_try_malloc_n", 2}, &MallocChecker::checkGMallocN},
+ {{"g_try_malloc0_n", 2}, &MallocChecker::checkGMallocN0},
+ };
+
+ CallDescriptionMap<CheckFn> ReallocatingMemFnMap{
+ {{"realloc", 2},
+ std::bind(&MallocChecker::checkRealloc, _1, _2, _3, false)},
+ {{"reallocf", 2},
+ std::bind(&MallocChecker::checkRealloc, _1, _2, _3, true)},
+ {{"g_realloc", 2},
+ std::bind(&MallocChecker::checkRealloc, _1, _2, _3, false)},
+ {{"g_try_realloc", 2},
+ std::bind(&MallocChecker::checkRealloc, _1, _2, _3, false)},
+ {{"g_realloc_n", 3}, &MallocChecker::checkReallocN},
+ {{"g_try_realloc_n", 3}, &MallocChecker::checkReallocN},
+ };
+
+ bool isMemCall(const CallEvent &Call) const;
+
// TODO: Remove mutable by moving the initializtaion to the registry function.
mutable Optional<uint64_t> KernelZeroFlagVal;
+ using KernelZeroSizePtrValueTy = Optional<int>;
+ /// Store the value of macro called `ZERO_SIZE_PTR`.
+ /// The value is initialized at first use, before first use the outer
+ /// Optional is empty, afterwards it contains another Optional that indicates
+ /// if the macro value could be determined, and if yes the value itself.
+ mutable Optional<KernelZeroSizePtrValueTy> KernelZeroSizePtrValue;
+
/// Process C++ operator new()'s allocation, which is the part of C++
/// new-expression that goes before the constructor.
- void processNewAllocation(const CXXNewExpr *NE, CheckerContext &C,
- SVal Target) const;
+ LLVM_NODISCARD
+ ProgramStateRef processNewAllocation(const CXXAllocatorCall &Call,
+ CheckerContext &C,
+ AllocationFamily Family) const;
/// Perform a zero-allocation check.
///
- /// \param [in] E The expression that allocates memory.
+ /// \param [in] Call The expression that allocates memory.
/// \param [in] IndexOfSizeArg Index of the argument that specifies the size
/// of the memory that needs to be allocated. E.g. for malloc, this would be
/// 0.
/// \param [in] RetVal Specifies the newly allocated pointer value;
/// if unspecified, the value of expression \p E is used.
- static ProgramStateRef ProcessZeroAllocCheck(CheckerContext &C, const Expr *E,
+ LLVM_NODISCARD
+ static ProgramStateRef ProcessZeroAllocCheck(const CallEvent &Call,
const unsigned IndexOfSizeArg,
ProgramStateRef State,
Optional<SVal> RetVal = None);
@@ -428,50 +470,54 @@ private:
/// - first: name of the resource (e.g. 'malloc')
/// - (OPTIONAL) second: size of the allocated region
///
- /// \param [in] CE The expression that allocates memory.
+ /// \param [in] Call The expression that allocates memory.
/// \param [in] Att The ownership_returns attribute.
/// \param [in] State The \c ProgramState right before allocation.
/// \returns The ProgramState right after allocation.
- ProgramStateRef MallocMemReturnsAttr(CheckerContext &C,
- const CallExpr *CE,
- const OwnershipAttr* Att,
+ LLVM_NODISCARD
+ ProgramStateRef MallocMemReturnsAttr(CheckerContext &C, const CallEvent &Call,
+ const OwnershipAttr *Att,
ProgramStateRef State) const;
/// Models memory allocation.
///
- /// \param [in] CE The expression that allocates memory.
+ /// \param [in] Call The expression that allocates memory.
/// \param [in] SizeEx Size of the memory that needs to be allocated.
/// \param [in] Init The value the allocated memory needs to be initialized.
/// with. For example, \c calloc initializes the allocated memory to 0,
/// malloc leaves it undefined.
/// \param [in] State The \c ProgramState right before allocation.
/// \returns The ProgramState right after allocation.
- static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE,
+ LLVM_NODISCARD
+ static ProgramStateRef MallocMemAux(CheckerContext &C, const CallEvent &Call,
const Expr *SizeEx, SVal Init,
ProgramStateRef State,
- AllocationFamily Family = AF_Malloc);
+ AllocationFamily Family);
/// Models memory allocation.
///
- /// \param [in] CE The expression that allocates memory.
+ /// \param [in] Call The expression that allocates memory.
/// \param [in] Size Size of the memory that needs to be allocated.
/// \param [in] Init The value the allocated memory needs to be initialized.
/// with. For example, \c calloc initializes the allocated memory to 0,
/// malloc leaves it undefined.
/// \param [in] State The \c ProgramState right before allocation.
/// \returns The ProgramState right after allocation.
- static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE,
+ LLVM_NODISCARD
+ static ProgramStateRef MallocMemAux(CheckerContext &C, const CallEvent &Call,
SVal Size, SVal Init,
ProgramStateRef State,
- AllocationFamily Family = AF_Malloc);
+ AllocationFamily Family);
+ LLVM_NODISCARD
static ProgramStateRef addExtentSize(CheckerContext &C, const CXXNewExpr *NE,
ProgramStateRef State, SVal Target);
// Check if this malloc() for special flags. At present that means M_ZERO or
// __GFP_ZERO (in which case, treat it like calloc).
+ LLVM_NODISCARD
llvm::Optional<ProgramStateRef>
- performKernelMalloc(const CallExpr *CE, CheckerContext &C,
+ performKernelMalloc(const CallEvent &Call, CheckerContext &C,
const ProgramStateRef &State) const;
/// Model functions with the ownership_takes and ownership_holds attributes.
@@ -487,17 +533,18 @@ private:
/// - first: name of the resource (e.g. 'malloc')
/// - second: index of the parameter the attribute applies to
///
- /// \param [in] CE The expression that frees memory.
+ /// \param [in] Call The expression that frees memory.
/// \param [in] Att The ownership_takes or ownership_holds attribute.
/// \param [in] State The \c ProgramState right before allocation.
/// \returns The ProgramState right after deallocation.
- ProgramStateRef FreeMemAttr(CheckerContext &C, const CallExpr *CE,
- const OwnershipAttr* Att,
+ LLVM_NODISCARD
+ ProgramStateRef FreeMemAttr(CheckerContext &C, const CallEvent &Call,
+ const OwnershipAttr *Att,
ProgramStateRef State) const;
/// Models memory deallocation.
///
- /// \param [in] CE The expression that frees memory.
+ /// \param [in] Call The expression that frees memory.
/// \param [in] State The \c ProgramState right before allocation.
/// \param [in] Num Index of the argument that needs to be freed. This is
/// normally 0, but for custom free functions it may be different.
@@ -514,15 +561,17 @@ private:
/// \param [in] ReturnsNullOnFailure Whether the memory deallocation function
/// we're modeling returns with Null on failure.
/// \returns The ProgramState right after deallocation.
- ProgramStateRef FreeMemAux(CheckerContext &C, const CallExpr *CE,
+ LLVM_NODISCARD
+ ProgramStateRef FreeMemAux(CheckerContext &C, const CallEvent &Call,
ProgramStateRef State, unsigned Num, bool Hold,
bool &IsKnownToBeAllocated,
+ AllocationFamily Family,
bool ReturnsNullOnFailure = false) const;
/// Models memory deallocation.
///
/// \param [in] ArgExpr The variable who's pointee needs to be freed.
- /// \param [in] ParentExpr The expression that frees the memory.
+ /// \param [in] Call The expression that frees the memory.
/// \param [in] State The \c ProgramState right before allocation.
/// normally 0, but for custom free functions it may be different.
/// \param [in] Hold Whether the parameter at \p Index has the ownership_holds
@@ -538,9 +587,11 @@ private:
/// \param [in] ReturnsNullOnFailure Whether the memory deallocation function
/// we're modeling returns with Null on failure.
/// \returns The ProgramState right after deallocation.
+ LLVM_NODISCARD
ProgramStateRef FreeMemAux(CheckerContext &C, const Expr *ArgExpr,
- const Expr *ParentExpr, ProgramStateRef State,
+ const CallEvent &Call, ProgramStateRef State,
bool Hold, bool &IsKnownToBeAllocated,
+ AllocationFamily Family,
bool ReturnsNullOnFailure = false) const;
// TODO: Needs some refactoring, as all other deallocation modeling
@@ -549,15 +600,17 @@ private:
//
/// Models memory reallocation.
///
- /// \param [in] CE The expression that reallocated memory
+ /// \param [in] Call The expression that reallocated memory
/// \param [in] ShouldFreeOnFail Whether if reallocation fails, the supplied
/// memory should be freed.
/// \param [in] State The \c ProgramState right before reallocation.
/// \param [in] SuffixWithN Whether the reallocation function we're modeling
/// has an '_n' suffix, such as g_realloc_n.
/// \returns The ProgramState right after reallocation.
- ProgramStateRef ReallocMemAux(CheckerContext &C, const CallExpr *CE,
+ LLVM_NODISCARD
+ ProgramStateRef ReallocMemAux(CheckerContext &C, const CallEvent &Call,
bool ShouldFreeOnFail, ProgramStateRef State,
+ AllocationFamily Family,
bool SuffixWithN = false) const;
/// Evaluates the buffer size that needs to be allocated.
@@ -565,20 +618,22 @@ private:
/// \param [in] Blocks The amount of blocks that needs to be allocated.
/// \param [in] BlockBytes The size of a block.
/// \returns The symbolic value of \p Blocks * \p BlockBytes.
+ LLVM_NODISCARD
static SVal evalMulForBufferSize(CheckerContext &C, const Expr *Blocks,
const Expr *BlockBytes);
/// Models zero initialized array allocation.
///
- /// \param [in] CE The expression that reallocated memory
+ /// \param [in] Call The expression that reallocated memory
/// \param [in] State The \c ProgramState right before reallocation.
/// \returns The ProgramState right after allocation.
- static ProgramStateRef CallocMem(CheckerContext &C, const CallExpr *CE,
+ LLVM_NODISCARD
+ static ProgramStateRef CallocMem(CheckerContext &C, const CallEvent &Call,
ProgramStateRef State);
/// See if deallocation happens in a suspicious context. If so, escape the
/// pointers that otherwise would have been deallocated and return true.
- bool suppressDeallocationsInSuspiciousContexts(const CallExpr *CE,
+ bool suppressDeallocationsInSuspiciousContexts(const CallEvent &Call,
CheckerContext &C) const;
/// If in \p S \p Sym is used, check whether \p Sym was already freed.
@@ -607,6 +662,7 @@ private:
SymbolRef &EscapingSymbol) const;
/// Implementation of the checkPointerEscape callbacks.
+ LLVM_NODISCARD
ProgramStateRef checkPointerEscapeAux(ProgramStateRef State,
const InvalidatedSymbols &Escaped,
const CallEvent *Call,
@@ -622,44 +678,53 @@ private:
/// family/call/symbol.
Optional<CheckKind> getCheckIfTracked(AllocationFamily Family,
bool IsALeakCheck = false) const;
- Optional<CheckKind> getCheckIfTracked(CheckerContext &C,
- const Stmt *AllocDeallocStmt,
- bool IsALeakCheck = false) const;
+
Optional<CheckKind> getCheckIfTracked(CheckerContext &C, SymbolRef Sym,
bool IsALeakCheck = false) const;
///@}
static bool SummarizeValue(raw_ostream &os, SVal V);
static bool SummarizeRegion(raw_ostream &os, const MemRegion *MR);
- void ReportBadFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
- const Expr *DeallocExpr) const;
- void ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
+ void HandleNonHeapDealloc(CheckerContext &C, SVal ArgVal, SourceRange Range,
+ const Expr *DeallocExpr,
+ AllocationFamily Family) const;
+
+ void HandleFreeAlloca(CheckerContext &C, SVal ArgVal,
SourceRange Range) const;
- void ReportMismatchedDealloc(CheckerContext &C, SourceRange Range,
+
+ void HandleMismatchedDealloc(CheckerContext &C, SourceRange Range,
const Expr *DeallocExpr, const RefState *RS,
SymbolRef Sym, bool OwnershipTransferred) const;
- void ReportOffsetFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
- const Expr *DeallocExpr,
+
+ void HandleOffsetFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
+ const Expr *DeallocExpr, AllocationFamily Family,
const Expr *AllocExpr = nullptr) const;
- void ReportUseAfterFree(CheckerContext &C, SourceRange Range,
+
+ void HandleUseAfterFree(CheckerContext &C, SourceRange Range,
SymbolRef Sym) const;
- void ReportDoubleFree(CheckerContext &C, SourceRange Range, bool Released,
+
+ void HandleDoubleFree(CheckerContext &C, SourceRange Range, bool Released,
SymbolRef Sym, SymbolRef PrevSym) const;
- void ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const;
+ void HandleDoubleDelete(CheckerContext &C, SymbolRef Sym) const;
- void ReportUseZeroAllocated(CheckerContext &C, SourceRange Range,
- SymbolRef Sym) const;
+ void HandleUseZeroAlloc(CheckerContext &C, SourceRange Range,
+ SymbolRef Sym) const;
- void ReportFunctionPointerFree(CheckerContext &C, SVal ArgVal,
- SourceRange Range, const Expr *FreeExpr) const;
+ void HandleFunctionPtrFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
+ const Expr *FreeExpr,
+ AllocationFamily Family) const;
/// Find the location of the allocation for Sym on the path leading to the
/// exploded node N.
static LeakInfo getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
CheckerContext &C);
- void reportLeak(SymbolRef Sym, ExplodedNode *N, CheckerContext &C) const;
+ void HandleLeak(SymbolRef Sym, ExplodedNode *N, CheckerContext &C) const;
+
+ /// Test if value in ArgVal equals to value in macro `ZERO_SIZE_PTR`.
+ bool isArgZERO_SIZE_PTR(ProgramStateRef State, CheckerContext &C,
+ SVal ArgVal) const;
};
//===----------------------------------------------------------------------===//
@@ -782,7 +847,7 @@ private:
os << "Reallocation of " << ArgIndex << llvm::getOrdinalSuffix(ArgIndex)
<< " parameter failed";
- return os.str();
+ return std::string(os.str());
}
std::string getMessageForReturn(const CallExpr *CallExpr) override {
@@ -800,6 +865,7 @@ REGISTER_MAP_WITH_PROGRAMSTATE(FreeReturnValue, SymbolRef, SymbolRef)
namespace {
class StopTrackingCallback final : public SymbolVisitor {
ProgramStateRef state;
+
public:
StopTrackingCallback(ProgramStateRef st) : state(std::move(st)) {}
ProgramStateRef getState() const { return state; }
@@ -811,160 +877,57 @@ public:
};
} // end anonymous namespace
-//===----------------------------------------------------------------------===//
-// Methods of MemFunctionInfoTy.
-//===----------------------------------------------------------------------===//
-
-void MemFunctionInfoTy::initIdentifierInfo(ASTContext &Ctx) const {
- if (II_malloc)
- return;
- II_alloca = &Ctx.Idents.get("alloca");
- II_malloc = &Ctx.Idents.get("malloc");
- II_free = &Ctx.Idents.get("free");
- II_realloc = &Ctx.Idents.get("realloc");
- II_reallocf = &Ctx.Idents.get("reallocf");
- II_calloc = &Ctx.Idents.get("calloc");
- II_valloc = &Ctx.Idents.get("valloc");
- II_strdup = &Ctx.Idents.get("strdup");
- II_strndup = &Ctx.Idents.get("strndup");
- II_wcsdup = &Ctx.Idents.get("wcsdup");
- II_kmalloc = &Ctx.Idents.get("kmalloc");
- II_kfree = &Ctx.Idents.get("kfree");
- II_if_nameindex = &Ctx.Idents.get("if_nameindex");
- II_if_freenameindex = &Ctx.Idents.get("if_freenameindex");
-
- //MSVC uses `_`-prefixed instead, so we check for them too.
- II_win_strdup = &Ctx.Idents.get("_strdup");
- II_win_wcsdup = &Ctx.Idents.get("_wcsdup");
- II_win_alloca = &Ctx.Idents.get("_alloca");
-
- // Glib
- II_g_malloc = &Ctx.Idents.get("g_malloc");
- II_g_malloc0 = &Ctx.Idents.get("g_malloc0");
- II_g_realloc = &Ctx.Idents.get("g_realloc");
- II_g_try_malloc = &Ctx.Idents.get("g_try_malloc");
- II_g_try_malloc0 = &Ctx.Idents.get("g_try_malloc0");
- II_g_try_realloc = &Ctx.Idents.get("g_try_realloc");
- II_g_free = &Ctx.Idents.get("g_free");
- II_g_memdup = &Ctx.Idents.get("g_memdup");
- II_g_malloc_n = &Ctx.Idents.get("g_malloc_n");
- II_g_malloc0_n = &Ctx.Idents.get("g_malloc0_n");
- II_g_realloc_n = &Ctx.Idents.get("g_realloc_n");
- II_g_try_malloc_n = &Ctx.Idents.get("g_try_malloc_n");
- II_g_try_malloc0_n = &Ctx.Idents.get("g_try_malloc0_n");
- II_g_try_realloc_n = &Ctx.Idents.get("g_try_realloc_n");
-}
-
-bool MemFunctionInfoTy::isMemFunction(const FunctionDecl *FD,
- ASTContext &C) const {
- if (isCMemFunction(FD, C, AF_Malloc, MemoryOperationKind::MOK_Any))
- return true;
-
- if (isCMemFunction(FD, C, AF_IfNameIndex, MemoryOperationKind::MOK_Any))
- return true;
-
- if (isCMemFunction(FD, C, AF_Alloca, MemoryOperationKind::MOK_Any))
- return true;
-
- if (isStandardNewDelete(FD, C))
- return true;
-
- return false;
-}
-
-bool MemFunctionInfoTy::isCMemFunction(const FunctionDecl *FD, ASTContext &C,
- AllocationFamily Family,
- MemoryOperationKind MemKind) const {
+static bool isStandardNewDelete(const FunctionDecl *FD) {
if (!FD)
return false;
- bool CheckFree = (MemKind == MemoryOperationKind::MOK_Any ||
- MemKind == MemoryOperationKind::MOK_Free);
- bool CheckAlloc = (MemKind == MemoryOperationKind::MOK_Any ||
- MemKind == MemoryOperationKind::MOK_Allocate);
-
- if (FD->getKind() == Decl::Function) {
- const IdentifierInfo *FunI = FD->getIdentifier();
- initIdentifierInfo(C);
-
- if (Family == AF_Malloc && CheckFree) {
- if (FunI == II_free || FunI == II_realloc || FunI == II_reallocf ||
- FunI == II_g_free || FunI == II_kfree)
- return true;
- }
-
- if (Family == AF_Malloc && CheckAlloc) {
- if (FunI == II_malloc || FunI == II_realloc || FunI == II_reallocf ||
- FunI == II_calloc || FunI == II_valloc || FunI == II_strdup ||
- FunI == II_win_strdup || FunI == II_strndup || FunI == II_wcsdup ||
- FunI == II_win_wcsdup || FunI == II_kmalloc ||
- FunI == II_g_malloc || FunI == II_g_malloc0 ||
- FunI == II_g_realloc || FunI == II_g_try_malloc ||
- FunI == II_g_try_malloc0 || FunI == II_g_try_realloc ||
- FunI == II_g_memdup || FunI == II_g_malloc_n ||
- FunI == II_g_malloc0_n || FunI == II_g_realloc_n ||
- FunI == II_g_try_malloc_n || FunI == II_g_try_malloc0_n ||
- FunI == II_g_try_realloc_n)
- return true;
- }
-
- if (Family == AF_IfNameIndex && CheckFree) {
- if (FunI == II_if_freenameindex)
- return true;
- }
+ OverloadedOperatorKind Kind = FD->getOverloadedOperator();
+ if (Kind != OO_New && Kind != OO_Array_New && Kind != OO_Delete &&
+ Kind != OO_Array_Delete)
+ return false;
- if (Family == AF_IfNameIndex && CheckAlloc) {
- if (FunI == II_if_nameindex)
- return true;
- }
+ // This is standard if and only if it's not defined in a user file.
+ SourceLocation L = FD->getLocation();
+ // If the header for operator delete is not included, it's still defined
+ // in an invalid source location. Check to make sure we don't crash.
+ return !L.isValid() ||
+ FD->getASTContext().getSourceManager().isInSystemHeader(L);
+}
- if (Family == AF_Alloca && CheckAlloc) {
- if (FunI == II_alloca || FunI == II_win_alloca)
- return true;
- }
- }
+//===----------------------------------------------------------------------===//
+// Methods of MallocChecker and MallocBugVisitor.
+//===----------------------------------------------------------------------===//
- if (Family != AF_Malloc)
- return false;
+bool MallocChecker::isFreeingCall(const CallEvent &Call) const {
+ if (FreeingMemFnMap.lookup(Call) || ReallocatingMemFnMap.lookup(Call))
+ return true;
- if (ShouldIncludeOwnershipAnnotatedFunctions && FD->hasAttrs()) {
- for (const auto *I : FD->specific_attrs<OwnershipAttr>()) {
+ const auto *Func = dyn_cast<FunctionDecl>(Call.getDecl());
+ if (Func && Func->hasAttrs()) {
+ for (const auto *I : Func->specific_attrs<OwnershipAttr>()) {
OwnershipAttr::OwnershipKind OwnKind = I->getOwnKind();
- if(OwnKind == OwnershipAttr::Takes || OwnKind == OwnershipAttr::Holds) {
- if (CheckFree)
- return true;
- } else if (OwnKind == OwnershipAttr::Returns) {
- if (CheckAlloc)
- return true;
- }
+ if (OwnKind == OwnershipAttr::Takes || OwnKind == OwnershipAttr::Holds)
+ return true;
}
}
-
return false;
}
-bool MemFunctionInfoTy::isStandardNewDelete(const FunctionDecl *FD,
- ASTContext &C) const {
- if (!FD)
- return false;
- OverloadedOperatorKind Kind = FD->getOverloadedOperator();
- if (Kind != OO_New && Kind != OO_Array_New &&
- Kind != OO_Delete && Kind != OO_Array_Delete)
+bool MallocChecker::isMemCall(const CallEvent &Call) const {
+ if (FreeingMemFnMap.lookup(Call) || AllocatingMemFnMap.lookup(Call) ||
+ ReallocatingMemFnMap.lookup(Call))
+ return true;
+
+ if (!ShouldIncludeOwnershipAnnotatedFunctions)
return false;
- // This is standard if and only if it's not defined in a user file.
- SourceLocation L = FD->getLocation();
- // If the header for operator delete is not included, it's still defined
- // in an invalid source location. Check to make sure we don't crash.
- return !L.isValid() || C.getSourceManager().isInSystemHeader(L);
+ const auto *Func = dyn_cast<FunctionDecl>(Call.getDecl());
+ return Func && Func->hasAttr<OwnershipAttr>();
}
-//===----------------------------------------------------------------------===//
-// Methods of MallocChecker and MallocBugVisitor.
-//===----------------------------------------------------------------------===//
-
-llvm::Optional<ProgramStateRef> MallocChecker::performKernelMalloc(
- const CallExpr *CE, CheckerContext &C, const ProgramStateRef &State) const {
+llvm::Optional<ProgramStateRef>
+MallocChecker::performKernelMalloc(const CallEvent &Call, CheckerContext &C,
+ const ProgramStateRef &State) const {
// 3-argument malloc(), as commonly used in {Free,Net,Open}BSD Kernels:
//
// void *malloc(unsigned long size, struct malloc_type *mtp, int flags);
@@ -1006,10 +969,10 @@ llvm::Optional<ProgramStateRef> MallocChecker::performKernelMalloc(
// We treat the last argument as the flags argument, and callers fall-back to
// normal malloc on a None return. This works for the FreeBSD kernel malloc
// as well as Linux kmalloc.
- if (CE->getNumArgs() < 2)
+ if (Call.getNumArgs() < 2)
return None;
- const Expr *FlagsEx = CE->getArg(CE->getNumArgs() - 1);
+ const Expr *FlagsEx = Call.getArgExpr(Call.getNumArgs() - 1);
const SVal V = C.getSVal(FlagsEx);
if (!V.getAs<NonLoc>()) {
// The case where 'V' can be a location can only be due to a bad header,
@@ -1035,7 +998,8 @@ llvm::Optional<ProgramStateRef> MallocChecker::performKernelMalloc(
// If M_ZERO is set, treat this like calloc (initialized).
if (TrueState && !FalseState) {
SVal ZeroVal = C.getSValBuilder().makeZeroVal(Ctx.CharTy);
- return MallocMemAux(C, CE, CE->getArg(0), ZeroVal, TrueState);
+ return MallocMemAux(C, Call, Call.getArgExpr(0), ZeroVal, TrueState,
+ AF_Malloc);
}
return None;
@@ -1052,161 +1016,234 @@ SVal MallocChecker::evalMulForBufferSize(CheckerContext &C, const Expr *Blocks,
return TotalSize;
}
-void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
- if (C.wasInlined)
+void MallocChecker::checkBasicAlloc(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ State = MallocMemAux(C, Call, Call.getArgExpr(0), UndefinedVal(), State,
+ AF_Malloc);
+ State = ProcessZeroAllocCheck(Call, 0, State);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkKernelMalloc(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ llvm::Optional<ProgramStateRef> MaybeState =
+ performKernelMalloc(Call, C, State);
+ if (MaybeState.hasValue())
+ State = MaybeState.getValue();
+ else
+ State = MallocMemAux(C, Call, Call.getArgExpr(0), UndefinedVal(), State,
+ AF_Malloc);
+ C.addTransition(State);
+}
+
+static bool isStandardRealloc(const CallEvent &Call) {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(Call.getDecl());
+ assert(FD);
+ ASTContext &AC = FD->getASTContext();
+
+ if (isa<CXXMethodDecl>(FD))
+ return false;
+
+ return FD->getDeclaredReturnType().getDesugaredType(AC) == AC.VoidPtrTy &&
+ FD->getParamDecl(0)->getType().getDesugaredType(AC) == AC.VoidPtrTy &&
+ FD->getParamDecl(1)->getType().getDesugaredType(AC) ==
+ AC.getSizeType();
+}
+
+static bool isGRealloc(const CallEvent &Call) {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(Call.getDecl());
+ assert(FD);
+ ASTContext &AC = FD->getASTContext();
+
+ if (isa<CXXMethodDecl>(FD))
+ return false;
+
+ return FD->getDeclaredReturnType().getDesugaredType(AC) == AC.VoidPtrTy &&
+ FD->getParamDecl(0)->getType().getDesugaredType(AC) == AC.VoidPtrTy &&
+ FD->getParamDecl(1)->getType().getDesugaredType(AC) ==
+ AC.UnsignedLongTy;
+}
+
+void MallocChecker::checkRealloc(const CallEvent &Call, CheckerContext &C,
+ bool ShouldFreeOnFail) const {
+ // HACK: CallDescription currently recognizes non-standard realloc functions
+ // as standard because it doesn't check the type, or wether its a non-method
+ // function. This should be solved by making CallDescription smarter.
+ // Mind that this came from a bug report, and all other functions suffer from
+ // this.
+ // https://bugs.llvm.org/show_bug.cgi?id=46253
+ if (!isStandardRealloc(Call) && !isGRealloc(Call))
return;
+ ProgramStateRef State = C.getState();
+ State = ReallocMemAux(C, Call, ShouldFreeOnFail, State, AF_Malloc);
+ State = ProcessZeroAllocCheck(Call, 1, State);
+ C.addTransition(State);
+}
- const FunctionDecl *FD = C.getCalleeDecl(CE);
- if (!FD)
+void MallocChecker::checkCalloc(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ State = CallocMem(C, Call, State);
+ State = ProcessZeroAllocCheck(Call, 0, State);
+ State = ProcessZeroAllocCheck(Call, 1, State);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkFree(const CallEvent &Call, CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ bool IsKnownToBeAllocatedMemory = false;
+ if (suppressDeallocationsInSuspiciousContexts(Call, C))
return;
+ State = FreeMemAux(C, Call, State, 0, false, IsKnownToBeAllocatedMemory,
+ AF_Malloc);
+ C.addTransition(State);
+}
+void MallocChecker::checkAlloca(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ State = MallocMemAux(C, Call, Call.getArgExpr(0), UndefinedVal(), State,
+ AF_Alloca);
+ State = ProcessZeroAllocCheck(Call, 0, State);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkStrdup(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+ State = MallocUpdateRefState(C, CE, State, AF_Malloc);
+
+ C.addTransition(State);
+}
+
+void MallocChecker::checkIfNameIndex(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ // Should we model this differently? We can allocate a fixed number of
+ // elements with zeros in the last one.
+ State =
+ MallocMemAux(C, Call, UnknownVal(), UnknownVal(), State, AF_IfNameIndex);
+
+ C.addTransition(State);
+}
+
+void MallocChecker::checkIfFreeNameIndex(const CallEvent &Call,
+ CheckerContext &C) const {
ProgramStateRef State = C.getState();
bool IsKnownToBeAllocatedMemory = false;
+ State = FreeMemAux(C, Call, State, 0, false, IsKnownToBeAllocatedMemory,
+ AF_IfNameIndex);
+ C.addTransition(State);
+}
- if (FD->getKind() == Decl::Function) {
- MemFunctionInfo.initIdentifierInfo(C.getASTContext());
- IdentifierInfo *FunI = FD->getIdentifier();
+void MallocChecker::checkCXXNewOrCXXDelete(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ bool IsKnownToBeAllocatedMemory = false;
+ const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
- if (FunI == MemFunctionInfo.II_malloc ||
- FunI == MemFunctionInfo.II_g_malloc ||
- FunI == MemFunctionInfo.II_g_try_malloc) {
- switch (CE->getNumArgs()) {
- default:
- return;
- case 1:
- State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
- State = ProcessZeroAllocCheck(C, CE, 0, State);
- break;
- case 2:
- State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
- break;
- case 3:
- llvm::Optional<ProgramStateRef> MaybeState =
- performKernelMalloc(CE, C, State);
- if (MaybeState.hasValue())
- State = MaybeState.getValue();
- else
- State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
- break;
- }
- } else if (FunI == MemFunctionInfo.II_kmalloc) {
- if (CE->getNumArgs() < 1)
- return;
- llvm::Optional<ProgramStateRef> MaybeState =
- performKernelMalloc(CE, C, State);
- if (MaybeState.hasValue())
- State = MaybeState.getValue();
- else
- State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
- } else if (FunI == MemFunctionInfo.II_valloc) {
- if (CE->getNumArgs() < 1)
- return;
- State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
- State = ProcessZeroAllocCheck(C, CE, 0, State);
- } else if (FunI == MemFunctionInfo.II_realloc ||
- FunI == MemFunctionInfo.II_g_realloc ||
- FunI == MemFunctionInfo.II_g_try_realloc) {
- State = ReallocMemAux(C, CE, /*ShouldFreeOnFail*/ false, State);
- State = ProcessZeroAllocCheck(C, CE, 1, State);
- } else if (FunI == MemFunctionInfo.II_reallocf) {
- State = ReallocMemAux(C, CE, /*ShouldFreeOnFail*/ true, State);
- State = ProcessZeroAllocCheck(C, CE, 1, State);
- } else if (FunI == MemFunctionInfo.II_calloc) {
- State = CallocMem(C, CE, State);
- State = ProcessZeroAllocCheck(C, CE, 0, State);
- State = ProcessZeroAllocCheck(C, CE, 1, State);
- } else if (FunI == MemFunctionInfo.II_free ||
- FunI == MemFunctionInfo.II_g_free ||
- FunI == MemFunctionInfo.II_kfree) {
- if (suppressDeallocationsInSuspiciousContexts(CE, C))
- return;
+ assert(isStandardNewDelete(Call));
- State = FreeMemAux(C, CE, State, 0, false, IsKnownToBeAllocatedMemory);
- } else if (FunI == MemFunctionInfo.II_strdup ||
- FunI == MemFunctionInfo.II_win_strdup ||
- FunI == MemFunctionInfo.II_wcsdup ||
- FunI == MemFunctionInfo.II_win_wcsdup) {
- State = MallocUpdateRefState(C, CE, State);
- } else if (FunI == MemFunctionInfo.II_strndup) {
- State = MallocUpdateRefState(C, CE, State);
- } else if (FunI == MemFunctionInfo.II_alloca ||
- FunI == MemFunctionInfo.II_win_alloca) {
- if (CE->getNumArgs() < 1)
- return;
- State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State,
- AF_Alloca);
- State = ProcessZeroAllocCheck(C, CE, 0, State);
- } else if (MemFunctionInfo.isStandardNewDelete(FD, C.getASTContext())) {
- // Process direct calls to operator new/new[]/delete/delete[] functions
- // as distinct from new/new[]/delete/delete[] expressions that are
- // processed by the checkPostStmt callbacks for CXXNewExpr and
- // CXXDeleteExpr.
- switch (FD->getOverloadedOperator()) {
- case OO_New:
- State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State,
- AF_CXXNew);
- State = ProcessZeroAllocCheck(C, CE, 0, State);
- break;
- case OO_Array_New:
- State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State,
- AF_CXXNewArray);
- State = ProcessZeroAllocCheck(C, CE, 0, State);
- break;
- case OO_Delete:
- case OO_Array_Delete:
- State = FreeMemAux(C, CE, State, 0, false, IsKnownToBeAllocatedMemory);
- break;
- default:
- llvm_unreachable("not a new/delete operator");
- }
- } else if (FunI == MemFunctionInfo.II_if_nameindex) {
- // Should we model this differently? We can allocate a fixed number of
- // elements with zeros in the last one.
- State = MallocMemAux(C, CE, UnknownVal(), UnknownVal(), State,
- AF_IfNameIndex);
- } else if (FunI == MemFunctionInfo.II_if_freenameindex) {
- State = FreeMemAux(C, CE, State, 0, false, IsKnownToBeAllocatedMemory);
- } else if (FunI == MemFunctionInfo.II_g_malloc0 ||
- FunI == MemFunctionInfo.II_g_try_malloc0) {
- if (CE->getNumArgs() < 1)
- return;
- SValBuilder &svalBuilder = C.getSValBuilder();
- SVal zeroVal = svalBuilder.makeZeroVal(svalBuilder.getContext().CharTy);
- State = MallocMemAux(C, CE, CE->getArg(0), zeroVal, State);
- State = ProcessZeroAllocCheck(C, CE, 0, State);
- } else if (FunI == MemFunctionInfo.II_g_memdup) {
- if (CE->getNumArgs() < 2)
- return;
- State = MallocMemAux(C, CE, CE->getArg(1), UndefinedVal(), State);
- State = ProcessZeroAllocCheck(C, CE, 1, State);
- } else if (FunI == MemFunctionInfo.II_g_malloc_n ||
- FunI == MemFunctionInfo.II_g_try_malloc_n ||
- FunI == MemFunctionInfo.II_g_malloc0_n ||
- FunI == MemFunctionInfo.II_g_try_malloc0_n) {
- if (CE->getNumArgs() < 2)
- return;
- SVal Init = UndefinedVal();
- if (FunI == MemFunctionInfo.II_g_malloc0_n ||
- FunI == MemFunctionInfo.II_g_try_malloc0_n) {
- SValBuilder &SB = C.getSValBuilder();
- Init = SB.makeZeroVal(SB.getContext().CharTy);
- }
- SVal TotalSize = evalMulForBufferSize(C, CE->getArg(0), CE->getArg(1));
- State = MallocMemAux(C, CE, TotalSize, Init, State);
- State = ProcessZeroAllocCheck(C, CE, 0, State);
- State = ProcessZeroAllocCheck(C, CE, 1, State);
- } else if (FunI == MemFunctionInfo.II_g_realloc_n ||
- FunI == MemFunctionInfo.II_g_try_realloc_n) {
- if (CE->getNumArgs() < 3)
- return;
- State = ReallocMemAux(C, CE, /*ShouldFreeOnFail*/ false, State,
- /*SuffixWithN*/ true);
- State = ProcessZeroAllocCheck(C, CE, 1, State);
- State = ProcessZeroAllocCheck(C, CE, 2, State);
- }
+ // Process direct calls to operator new/new[]/delete/delete[] functions
+ // as distinct from new/new[]/delete/delete[] expressions that are
+ // processed by the checkPostStmt callbacks for CXXNewExpr and
+ // CXXDeleteExpr.
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ switch (FD->getOverloadedOperator()) {
+ case OO_New:
+ State =
+ MallocMemAux(C, Call, CE->getArg(0), UndefinedVal(), State, AF_CXXNew);
+ State = ProcessZeroAllocCheck(Call, 0, State);
+ break;
+ case OO_Array_New:
+ State = MallocMemAux(C, Call, CE->getArg(0), UndefinedVal(), State,
+ AF_CXXNewArray);
+ State = ProcessZeroAllocCheck(Call, 0, State);
+ break;
+ case OO_Delete:
+ State = FreeMemAux(C, Call, State, 0, false, IsKnownToBeAllocatedMemory,
+ AF_CXXNew);
+ break;
+ case OO_Array_Delete:
+ State = FreeMemAux(C, Call, State, 0, false, IsKnownToBeAllocatedMemory,
+ AF_CXXNewArray);
+ break;
+ default:
+ llvm_unreachable("not a new/delete operator");
}
- if (MemFunctionInfo.ShouldIncludeOwnershipAnnotatedFunctions ||
+ C.addTransition(State);
+}
+
+void MallocChecker::checkGMalloc0(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ SVal zeroVal = svalBuilder.makeZeroVal(svalBuilder.getContext().CharTy);
+ State = MallocMemAux(C, Call, Call.getArgExpr(0), zeroVal, State, AF_Malloc);
+ State = ProcessZeroAllocCheck(Call, 0, State);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkGMemdup(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ State = MallocMemAux(C, Call, Call.getArgExpr(1), UndefinedVal(), State,
+ AF_Malloc);
+ State = ProcessZeroAllocCheck(Call, 1, State);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkGMallocN(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal Init = UndefinedVal();
+ SVal TotalSize = evalMulForBufferSize(C, Call.getArgExpr(0), Call.getArgExpr(1));
+ State = MallocMemAux(C, Call, TotalSize, Init, State, AF_Malloc);
+ State = ProcessZeroAllocCheck(Call, 0, State);
+ State = ProcessZeroAllocCheck(Call, 1, State);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkGMallocN0(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SValBuilder &SB = C.getSValBuilder();
+ SVal Init = SB.makeZeroVal(SB.getContext().CharTy);
+ SVal TotalSize = evalMulForBufferSize(C, Call.getArgExpr(0), Call.getArgExpr(1));
+ State = MallocMemAux(C, Call, TotalSize, Init, State, AF_Malloc);
+ State = ProcessZeroAllocCheck(Call, 0, State);
+ State = ProcessZeroAllocCheck(Call, 1, State);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkReallocN(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ State = ReallocMemAux(C, Call, /*ShouldFreeOnFail=*/false, State, AF_Malloc,
+ /*SuffixWithN=*/true);
+ State = ProcessZeroAllocCheck(Call, 1, State);
+ State = ProcessZeroAllocCheck(Call, 2, State);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkOwnershipAttr(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD)
+ return;
+ if (ShouldIncludeOwnershipAnnotatedFunctions ||
ChecksEnabled[CK_MismatchedDeallocatorChecker]) {
// Check all the attributes, if there are any.
// There can be multiple of these attributes.
@@ -1214,11 +1251,11 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
for (const auto *I : FD->specific_attrs<OwnershipAttr>()) {
switch (I->getOwnKind()) {
case OwnershipAttr::Returns:
- State = MallocMemReturnsAttr(C, CE, I, State);
+ State = MallocMemReturnsAttr(C, Call, I, State);
break;
case OwnershipAttr::Takes:
case OwnershipAttr::Holds:
- State = FreeMemAttr(C, CE, I, State);
+ State = FreeMemAttr(C, Call, I, State);
break;
}
}
@@ -1226,40 +1263,73 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
C.addTransition(State);
}
+void MallocChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (C.wasInlined)
+ return;
+ if (!Call.getOriginExpr())
+ return;
+
+ ProgramStateRef State = C.getState();
+
+ if (const CheckFn *Callback = FreeingMemFnMap.lookup(Call)) {
+ (*Callback)(this, Call, C);
+ return;
+ }
+
+ if (const CheckFn *Callback = AllocatingMemFnMap.lookup(Call)) {
+ (*Callback)(this, Call, C);
+ return;
+ }
+
+ if (const CheckFn *Callback = ReallocatingMemFnMap.lookup(Call)) {
+ (*Callback)(this, Call, C);
+ return;
+ }
+
+ if (isStandardNewDelete(Call)) {
+ checkCXXNewOrCXXDelete(Call, C);
+ return;
+ }
+
+ checkOwnershipAttr(Call, C);
+}
+
// Performs a 0-sized allocations check.
ProgramStateRef MallocChecker::ProcessZeroAllocCheck(
- CheckerContext &C, const Expr *E, const unsigned IndexOfSizeArg,
- ProgramStateRef State, Optional<SVal> RetVal) {
+ const CallEvent &Call, const unsigned IndexOfSizeArg, ProgramStateRef State,
+ Optional<SVal> RetVal) {
if (!State)
return nullptr;
if (!RetVal)
- RetVal = C.getSVal(E);
+ RetVal = Call.getReturnValue();
const Expr *Arg = nullptr;
- if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(Call.getOriginExpr())) {
Arg = CE->getArg(IndexOfSizeArg);
- }
- else if (const CXXNewExpr *NE = dyn_cast<CXXNewExpr>(E)) {
- if (NE->isArray())
+ } else if (const CXXNewExpr *NE =
+ dyn_cast<CXXNewExpr>(Call.getOriginExpr())) {
+ if (NE->isArray()) {
Arg = *NE->getArraySize();
- else
+ } else {
return State;
- }
- else
+ }
+ } else
llvm_unreachable("not a CallExpr or CXXNewExpr");
assert(Arg);
- Optional<DefinedSVal> DefArgVal = C.getSVal(Arg).getAs<DefinedSVal>();
+ auto DefArgVal =
+ State->getSVal(Arg, Call.getLocationContext()).getAs<DefinedSVal>();
if (!DefArgVal)
return State;
// Check if the allocation size is 0.
ProgramStateRef TrueState, FalseState;
- SValBuilder &SvalBuilder = C.getSValBuilder();
+ SValBuilder &SvalBuilder = State->getStateManager().getSValBuilder();
DefinedSVal Zero =
SvalBuilder.makeZeroVal(Arg->getType()).castAs<DefinedSVal>();
@@ -1330,44 +1400,43 @@ static bool hasNonTrivialConstructorCall(const CXXNewExpr *NE) {
return false;
}
-void MallocChecker::processNewAllocation(const CXXNewExpr *NE,
- CheckerContext &C,
- SVal Target) const {
- if (!MemFunctionInfo.isStandardNewDelete(NE->getOperatorNew(),
- C.getASTContext()))
- return;
+ProgramStateRef
+MallocChecker::processNewAllocation(const CXXAllocatorCall &Call,
+ CheckerContext &C,
+ AllocationFamily Family) const {
+ if (!isStandardNewDelete(Call))
+ return nullptr;
+ const CXXNewExpr *NE = Call.getOriginExpr();
const ParentMap &PM = C.getLocationContext()->getParentMap();
+ ProgramStateRef State = C.getState();
// Non-trivial constructors have a chance to escape 'this', but marking all
// invocations of trivial constructors as escaped would cause too great of
// reduction of true positives, so let's just do that for constructors that
// have an argument of a pointer-to-record type.
if (!PM.isConsumedExpr(NE) && hasNonTrivialConstructorCall(NE))
- return;
+ return State;
- ProgramStateRef State = C.getState();
// The return value from operator new is bound to a specified initialization
// value (if any) and we don't want to loose this value. So we call
// MallocUpdateRefState() instead of MallocMemAux() which breaks the
// existing binding.
- State = MallocUpdateRefState(C, NE, State, NE->isArray() ? AF_CXXNewArray
- : AF_CXXNew, Target);
+ SVal Target = Call.getObjectUnderConstruction();
+ State = MallocUpdateRefState(C, NE, State, Family, Target);
State = addExtentSize(C, NE, State, Target);
- State = ProcessZeroAllocCheck(C, NE, 0, State, Target);
- C.addTransition(State);
-}
-
-void MallocChecker::checkPostStmt(const CXXNewExpr *NE,
- CheckerContext &C) const {
- if (!C.getAnalysisManager().getAnalyzerOptions().MayInlineCXXAllocator)
- processNewAllocation(NE, C, C.getSVal(NE));
+ State = ProcessZeroAllocCheck(Call, 0, State, Target);
+ return State;
}
-void MallocChecker::checkNewAllocator(const CXXNewExpr *NE, SVal Target,
+void MallocChecker::checkNewAllocator(const CXXAllocatorCall &Call,
CheckerContext &C) const {
- if (!C.wasInlined)
- processNewAllocation(NE, C, Target);
+ if (!C.wasInlined) {
+ ProgramStateRef State = processNewAllocation(
+ Call, C,
+ (Call.getOriginExpr()->isArray() ? AF_CXXNewArray : AF_CXXNew));
+ C.addTransition(State);
+ }
}
// Sets the extent value of the MemRegion allocated by
@@ -1402,38 +1471,20 @@ ProgramStateRef MallocChecker::addExtentSize(CheckerContext &C,
CharUnits TypeSize = AstContext.getTypeSizeInChars(ElementType);
if (ElementCount.getAs<NonLoc>()) {
- DefinedOrUnknownSVal Extent = Region->getExtent(svalBuilder);
+ DefinedOrUnknownSVal DynSize = getDynamicSize(State, Region, svalBuilder);
+
// size in Bytes = ElementCount*TypeSize
SVal SizeInBytes = svalBuilder.evalBinOpNN(
State, BO_Mul, ElementCount.castAs<NonLoc>(),
svalBuilder.makeArrayIndex(TypeSize.getQuantity()),
svalBuilder.getArrayIndexType());
- DefinedOrUnknownSVal extentMatchesSize = svalBuilder.evalEQ(
- State, Extent, SizeInBytes.castAs<DefinedOrUnknownSVal>());
- State = State->assume(extentMatchesSize, true);
+ DefinedOrUnknownSVal DynSizeMatchesSize = svalBuilder.evalEQ(
+ State, DynSize, SizeInBytes.castAs<DefinedOrUnknownSVal>());
+ State = State->assume(DynSizeMatchesSize, true);
}
return State;
}
-void MallocChecker::checkPreStmt(const CXXDeleteExpr *DE,
- CheckerContext &C) const {
-
- if (!ChecksEnabled[CK_NewDeleteChecker])
- if (SymbolRef Sym = C.getSVal(DE->getArgument()).getAsSymbol())
- checkUseAfterFree(Sym, C, DE->getArgument());
-
- if (!MemFunctionInfo.isStandardNewDelete(DE->getOperatorDelete(),
- C.getASTContext()))
- return;
-
- ProgramStateRef State = C.getState();
- bool IsKnownToBeAllocated;
- State = FreeMemAux(C, DE->getArgument(), DE, State,
- /*Hold*/ false, IsKnownToBeAllocated);
-
- C.addTransition(State);
-}
-
static bool isKnownDeallocObjCMethodName(const ObjCMethodCall &Call) {
// If the first selector piece is one of the names below, assume that the
// object takes ownership of the memory, promising to eventually deallocate it
@@ -1474,50 +1525,52 @@ void MallocChecker::checkPostObjCMessage(const ObjCMethodCall &Call,
bool IsKnownToBeAllocatedMemory;
ProgramStateRef State =
- FreeMemAux(C, Call.getArgExpr(0), Call.getOriginExpr(), C.getState(),
- /*Hold=*/true, IsKnownToBeAllocatedMemory,
+ FreeMemAux(C, Call.getArgExpr(0), Call, C.getState(),
+ /*Hold=*/true, IsKnownToBeAllocatedMemory, AF_Malloc,
/*RetNullOnFailure=*/true);
C.addTransition(State);
}
ProgramStateRef
-MallocChecker::MallocMemReturnsAttr(CheckerContext &C, const CallExpr *CE,
+MallocChecker::MallocMemReturnsAttr(CheckerContext &C, const CallEvent &Call,
const OwnershipAttr *Att,
ProgramStateRef State) const {
if (!State)
return nullptr;
- if (Att->getModule() != MemFunctionInfo.II_malloc)
+ if (Att->getModule()->getName() != "malloc")
return nullptr;
OwnershipAttr::args_iterator I = Att->args_begin(), E = Att->args_end();
if (I != E) {
- return MallocMemAux(C, CE, CE->getArg(I->getASTIndex()), UndefinedVal(),
- State);
+ return MallocMemAux(C, Call, Call.getArgExpr(I->getASTIndex()),
+ UndefinedVal(), State, AF_Malloc);
}
- return MallocMemAux(C, CE, UnknownVal(), UndefinedVal(), State);
+ return MallocMemAux(C, Call, UnknownVal(), UndefinedVal(), State, AF_Malloc);
}
ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
- const CallExpr *CE,
+ const CallEvent &Call,
const Expr *SizeEx, SVal Init,
ProgramStateRef State,
AllocationFamily Family) {
if (!State)
return nullptr;
- return MallocMemAux(C, CE, C.getSVal(SizeEx), Init, State, Family);
+ assert(SizeEx);
+ return MallocMemAux(C, Call, C.getSVal(SizeEx), Init, State, Family);
}
ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
- const CallExpr *CE,
- SVal Size, SVal Init,
- ProgramStateRef State,
- AllocationFamily Family) {
+ const CallEvent &Call, SVal Size,
+ SVal Init, ProgramStateRef State,
+ AllocationFamily Family) {
if (!State)
return nullptr;
+ const Expr *CE = Call.getOriginExpr();
+
// We expect the malloc functions to return a pointer.
if (!Loc::isLocType(CE->getType()))
return nullptr;
@@ -1542,12 +1595,12 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
return nullptr;
if (Optional<DefinedOrUnknownSVal> DefinedSize =
Size.getAs<DefinedOrUnknownSVal>()) {
- SValBuilder &svalBuilder = C.getSValBuilder();
- DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
- DefinedOrUnknownSVal extentMatchesSize =
- svalBuilder.evalEQ(State, Extent, *DefinedSize);
+ DefinedOrUnknownSVal DynSize = getDynamicSize(State, R, svalBuilder);
+
+ DefinedOrUnknownSVal DynSizeMatchesSize =
+ svalBuilder.evalEQ(State, DynSize, *DefinedSize);
- State = State->assume(extentMatchesSize, true);
+ State = State->assume(DynSizeMatchesSize, true);
assert(State);
}
@@ -1579,39 +1632,42 @@ static ProgramStateRef MallocUpdateRefState(CheckerContext &C, const Expr *E,
}
ProgramStateRef MallocChecker::FreeMemAttr(CheckerContext &C,
- const CallExpr *CE,
+ const CallEvent &Call,
const OwnershipAttr *Att,
ProgramStateRef State) const {
if (!State)
return nullptr;
- if (Att->getModule() != MemFunctionInfo.II_malloc)
+ if (Att->getModule()->getName() != "malloc")
return nullptr;
bool IsKnownToBeAllocated = false;
for (const auto &Arg : Att->args()) {
- ProgramStateRef StateI = FreeMemAux(
- C, CE, State, Arg.getASTIndex(),
- Att->getOwnKind() == OwnershipAttr::Holds, IsKnownToBeAllocated);
+ ProgramStateRef StateI =
+ FreeMemAux(C, Call, State, Arg.getASTIndex(),
+ Att->getOwnKind() == OwnershipAttr::Holds,
+ IsKnownToBeAllocated, AF_Malloc);
if (StateI)
State = StateI;
}
return State;
}
-ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr *CE,
+ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
+ const CallEvent &Call,
ProgramStateRef State, unsigned Num,
bool Hold, bool &IsKnownToBeAllocated,
+ AllocationFamily Family,
bool ReturnsNullOnFailure) const {
if (!State)
return nullptr;
- if (CE->getNumArgs() < (Num + 1))
+ if (Call.getNumArgs() < (Num + 1))
return nullptr;
- return FreeMemAux(C, CE->getArg(Num), CE, State, Hold, IsKnownToBeAllocated,
- ReturnsNullOnFailure);
+ return FreeMemAux(C, Call.getArgExpr(Num), Call, State, Hold,
+ IsKnownToBeAllocated, Family, ReturnsNullOnFailure);
}
/// Checks if the previous call to free on the given symbol failed - if free
@@ -1629,58 +1685,7 @@ static bool didPreviousFreeFail(ProgramStateRef State,
return false;
}
-static AllocationFamily
-getAllocationFamily(const MemFunctionInfoTy &MemFunctionInfo, CheckerContext &C,
- const Stmt *S) {
-
- if (!S)
- return AF_None;
-
- if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
- const FunctionDecl *FD = C.getCalleeDecl(CE);
-
- if (!FD)
- FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
-
- ASTContext &Ctx = C.getASTContext();
-
- if (MemFunctionInfo.isCMemFunction(FD, Ctx, AF_Malloc,
- MemoryOperationKind::MOK_Any))
- return AF_Malloc;
-
- if (MemFunctionInfo.isStandardNewDelete(FD, Ctx)) {
- OverloadedOperatorKind Kind = FD->getOverloadedOperator();
- if (Kind == OO_New || Kind == OO_Delete)
- return AF_CXXNew;
- else if (Kind == OO_Array_New || Kind == OO_Array_Delete)
- return AF_CXXNewArray;
- }
-
- if (MemFunctionInfo.isCMemFunction(FD, Ctx, AF_IfNameIndex,
- MemoryOperationKind::MOK_Any))
- return AF_IfNameIndex;
-
- if (MemFunctionInfo.isCMemFunction(FD, Ctx, AF_Alloca,
- MemoryOperationKind::MOK_Any))
- return AF_Alloca;
-
- return AF_None;
- }
-
- if (const CXXNewExpr *NE = dyn_cast<CXXNewExpr>(S))
- return NE->isArray() ? AF_CXXNewArray : AF_CXXNew;
-
- if (const CXXDeleteExpr *DE = dyn_cast<CXXDeleteExpr>(S))
- return DE->isArrayForm() ? AF_CXXNewArray : AF_CXXNew;
-
- if (isa<ObjCMessageExpr>(S))
- return AF_Malloc;
-
- return AF_None;
-}
-
-static bool printAllocDeallocName(raw_ostream &os, CheckerContext &C,
- const Expr *E) {
+static bool printMemFnName(raw_ostream &os, CheckerContext &C, const Expr *E) {
if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
// FIXME: This doesn't handle indirect calls.
const FunctionDecl *FD = CE->getDirectCallee();
@@ -1719,10 +1724,7 @@ static bool printAllocDeallocName(raw_ostream &os, CheckerContext &C,
return false;
}
-static void printExpectedAllocName(raw_ostream &os,
- const MemFunctionInfoTy &MemFunctionInfo,
- CheckerContext &C, const Expr *E) {
- AllocationFamily Family = getAllocationFamily(MemFunctionInfo, C, E);
+static void printExpectedAllocName(raw_ostream &os, AllocationFamily Family) {
switch(Family) {
case AF_Malloc: os << "malloc()"; return;
@@ -1747,12 +1749,10 @@ static void printExpectedDeallocName(raw_ostream &os, AllocationFamily Family) {
}
}
-ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
- const Expr *ArgExpr,
- const Expr *ParentExpr,
- ProgramStateRef State, bool Hold,
- bool &IsKnownToBeAllocated,
- bool ReturnsNullOnFailure) const {
+ProgramStateRef MallocChecker::FreeMemAux(
+ CheckerContext &C, const Expr *ArgExpr, const CallEvent &Call,
+ ProgramStateRef State, bool Hold, bool &IsKnownToBeAllocated,
+ AllocationFamily Family, bool ReturnsNullOnFailure) const {
if (!State)
return nullptr;
@@ -1778,11 +1778,28 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
return nullptr;
const MemRegion *R = ArgVal.getAsRegion();
+ const Expr *ParentExpr = Call.getOriginExpr();
+
+ // NOTE: We detected a bug, but the checker under whose name we would emit the
+ // error could be disabled. Generally speaking, the MallocChecker family is an
+ // integral part of the Static Analyzer, and disabling any part of it should
+ // only be done under exceptional circumstances, such as frequent false
+ // positives. If this is the case, we can reasonably believe that there are
+ // serious faults in our understanding of the source code, and even if we
+ // don't emit an warning, we should terminate further analysis with a sink
+ // node.
// Nonlocs can't be freed, of course.
// Non-region locations (labels and fixed addresses) also shouldn't be freed.
if (!R) {
- ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
+ // Exception:
+ // If the macro ZERO_SIZE_PTR is defined, this could be a kernel source
+ // code. In that case, the ZERO_SIZE_PTR defines a special value used for a
+ // zero-sized memory block which is allowed to be freed, despite not being a
+ // null pointer.
+ if (Family != AF_Malloc || !isArgZERO_SIZE_PTR(State, C, ArgVal))
+ HandleNonHeapDealloc(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr,
+ Family);
return nullptr;
}
@@ -1790,7 +1807,8 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
// Blocks might show up as heap data, but should not be free()d
if (isa<BlockDataRegion>(R)) {
- ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
+ HandleNonHeapDealloc(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr,
+ Family);
return nullptr;
}
@@ -1808,9 +1826,10 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
// False negatives are better than false positives.
if (isa<AllocaRegion>(R))
- ReportFreeAlloca(C, ArgVal, ArgExpr->getSourceRange());
+ HandleFreeAlloca(C, ArgVal, ArgExpr->getSourceRange());
else
- ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
+ HandleNonHeapDealloc(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr,
+ Family);
return nullptr;
}
@@ -1832,14 +1851,14 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
// Memory returned by alloca() shouldn't be freed.
if (RsBase->getAllocationFamily() == AF_Alloca) {
- ReportFreeAlloca(C, ArgVal, ArgExpr->getSourceRange());
+ HandleFreeAlloca(C, ArgVal, ArgExpr->getSourceRange());
return nullptr;
}
// Check for double free first.
if ((RsBase->isReleased() || RsBase->isRelinquished()) &&
!didPreviousFreeFail(State, SymBase, PreviousRetStatusSymbol)) {
- ReportDoubleFree(C, ParentExpr->getSourceRange(), RsBase->isReleased(),
+ HandleDoubleFree(C, ParentExpr->getSourceRange(), RsBase->isReleased(),
SymBase, PreviousRetStatusSymbol);
return nullptr;
@@ -1849,12 +1868,10 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
RsBase->isEscaped()) {
// Check if an expected deallocation function matches the real one.
- bool DeallocMatchesAlloc =
- RsBase->getAllocationFamily() ==
- getAllocationFamily(MemFunctionInfo, C, ParentExpr);
+ bool DeallocMatchesAlloc = RsBase->getAllocationFamily() == Family;
if (!DeallocMatchesAlloc) {
- ReportMismatchedDealloc(C, ArgExpr->getSourceRange(),
- ParentExpr, RsBase, SymBase, Hold);
+ HandleMismatchedDealloc(C, ArgExpr->getSourceRange(), ParentExpr,
+ RsBase, SymBase, Hold);
return nullptr;
}
@@ -1865,15 +1882,16 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
!Offset.hasSymbolicOffset() &&
Offset.getOffset() != 0) {
const Expr *AllocExpr = cast<Expr>(RsBase->getStmt());
- ReportOffsetFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr,
- AllocExpr);
+ HandleOffsetFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr,
+ Family, AllocExpr);
return nullptr;
}
}
}
if (SymBase->getType()->isFunctionPointerType()) {
- ReportFunctionPointerFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
+ HandleFunctionPtrFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr,
+ Family);
return nullptr;
}
@@ -1891,9 +1909,12 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
}
}
- AllocationFamily Family =
- RsBase ? RsBase->getAllocationFamily()
- : getAllocationFamily(MemFunctionInfo, C, ParentExpr);
+ // If we don't know anything about this symbol, a free on it may be totally
+ // valid. If this is the case, lets assume that the allocation family of the
+ // freeing function is the same as the symbols allocation family, and go with
+ // that.
+ assert(!RsBase || (RsBase && RsBase->getAllocationFamily() == Family));
+
// Normal free.
if (Hold)
return State->set<RegionState>(SymBase,
@@ -1940,14 +1961,6 @@ MallocChecker::getCheckIfTracked(AllocationFamily Family,
}
Optional<MallocChecker::CheckKind>
-MallocChecker::getCheckIfTracked(CheckerContext &C,
- const Stmt *AllocDeallocStmt,
- bool IsALeakCheck) const {
- return getCheckIfTracked(
- getAllocationFamily(MemFunctionInfo, C, AllocDeallocStmt), IsALeakCheck);
-}
-
-Optional<MallocChecker::CheckKind>
MallocChecker::getCheckIfTracked(CheckerContext &C, SymbolRef Sym,
bool IsALeakCheck) const {
if (C.getState()->contains<ReallocSizeZeroSymbols>(Sym))
@@ -2045,16 +2058,17 @@ bool MallocChecker::SummarizeRegion(raw_ostream &os,
}
}
-void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
- SourceRange Range,
- const Expr *DeallocExpr) const {
+void MallocChecker::HandleNonHeapDealloc(CheckerContext &C, SVal ArgVal,
+ SourceRange Range,
+ const Expr *DeallocExpr,
+ AllocationFamily Family) const {
- if (!ChecksEnabled[CK_MallocChecker] &&
- !ChecksEnabled[CK_NewDeleteChecker])
+ if (!ChecksEnabled[CK_MallocChecker] && !ChecksEnabled[CK_NewDeleteChecker]) {
+ C.addSink();
return;
+ }
- Optional<MallocChecker::CheckKind> CheckKind =
- getCheckIfTracked(C, DeallocExpr);
+ Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
if (!CheckKind.hasValue())
return;
@@ -2071,7 +2085,7 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
MR = ER->getSuperRegion();
os << "Argument to ";
- if (!printAllocDeallocName(os, C, DeallocExpr))
+ if (!printMemFnName(os, C, DeallocExpr))
os << "deallocator";
os << " is ";
@@ -2082,7 +2096,7 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
else
os << "not memory allocated by ";
- printExpectedAllocName(os, MemFunctionInfo, C, DeallocExpr);
+ printExpectedAllocName(os, Family);
auto R = std::make_unique<PathSensitiveBugReport>(*BT_BadFree[*CheckKind],
os.str(), N);
@@ -2092,7 +2106,7 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
}
}
-void MallocChecker::ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
+void MallocChecker::HandleFreeAlloca(CheckerContext &C, SVal ArgVal,
SourceRange Range) const {
Optional<MallocChecker::CheckKind> CheckKind;
@@ -2101,8 +2115,10 @@ void MallocChecker::ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
CheckKind = CK_MallocChecker;
else if (ChecksEnabled[CK_MismatchedDeallocatorChecker])
CheckKind = CK_MismatchedDeallocatorChecker;
- else
+ else {
+ C.addSink();
return;
+ }
if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_FreeAlloca[*CheckKind])
@@ -2118,15 +2134,16 @@ void MallocChecker::ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
}
}
-void MallocChecker::ReportMismatchedDealloc(CheckerContext &C,
+void MallocChecker::HandleMismatchedDealloc(CheckerContext &C,
SourceRange Range,
const Expr *DeallocExpr,
- const RefState *RS,
- SymbolRef Sym,
+ const RefState *RS, SymbolRef Sym,
bool OwnershipTransferred) const {
- if (!ChecksEnabled[CK_MismatchedDeallocatorChecker])
+ if (!ChecksEnabled[CK_MismatchedDeallocatorChecker]) {
+ C.addSink();
return;
+ }
if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_MismatchedDealloc)
@@ -2144,25 +2161,25 @@ void MallocChecker::ReportMismatchedDealloc(CheckerContext &C,
llvm::raw_svector_ostream DeallocOs(DeallocBuf);
if (OwnershipTransferred) {
- if (printAllocDeallocName(DeallocOs, C, DeallocExpr))
+ if (printMemFnName(DeallocOs, C, DeallocExpr))
os << DeallocOs.str() << " cannot";
else
os << "Cannot";
os << " take ownership of memory";
- if (printAllocDeallocName(AllocOs, C, AllocExpr))
+ if (printMemFnName(AllocOs, C, AllocExpr))
os << " allocated by " << AllocOs.str();
} else {
os << "Memory";
- if (printAllocDeallocName(AllocOs, C, AllocExpr))
+ if (printMemFnName(AllocOs, C, AllocExpr))
os << " allocated by " << AllocOs.str();
os << " should be deallocated by ";
printExpectedDeallocName(os, RS->getAllocationFamily());
- if (printAllocDeallocName(DeallocOs, C, DeallocExpr))
- os << ", not " << DeallocOs.str();
+ if (printMemFnName(DeallocOs, C, DeallocExpr))
+ os << ", not " << DeallocOs.str();
}
auto R = std::make_unique<PathSensitiveBugReport>(*BT_MismatchedDealloc,
@@ -2174,17 +2191,17 @@ void MallocChecker::ReportMismatchedDealloc(CheckerContext &C,
}
}
-void MallocChecker::ReportOffsetFree(CheckerContext &C, SVal ArgVal,
+void MallocChecker::HandleOffsetFree(CheckerContext &C, SVal ArgVal,
SourceRange Range, const Expr *DeallocExpr,
+ AllocationFamily Family,
const Expr *AllocExpr) const {
-
- if (!ChecksEnabled[CK_MallocChecker] &&
- !ChecksEnabled[CK_NewDeleteChecker])
+ if (!ChecksEnabled[CK_MallocChecker] && !ChecksEnabled[CK_NewDeleteChecker]) {
+ C.addSink();
return;
+ }
- Optional<MallocChecker::CheckKind> CheckKind =
- getCheckIfTracked(C, AllocExpr);
+ Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
if (!CheckKind.hasValue())
return;
@@ -2213,14 +2230,14 @@ void MallocChecker::ReportOffsetFree(CheckerContext &C, SVal ArgVal,
int offsetBytes = Offset.getOffset() / C.getASTContext().getCharWidth();
os << "Argument to ";
- if (!printAllocDeallocName(os, C, DeallocExpr))
+ if (!printMemFnName(os, C, DeallocExpr))
os << "deallocator";
os << " is offset by "
<< offsetBytes
<< " "
<< ((abs(offsetBytes) > 1) ? "bytes" : "byte")
<< " from the start of ";
- if (AllocExpr && printAllocDeallocName(AllocNameOs, C, AllocExpr))
+ if (AllocExpr && printMemFnName(AllocNameOs, C, AllocExpr))
os << "memory allocated by " << AllocNameOs.str();
else
os << "allocated memory";
@@ -2232,13 +2249,14 @@ void MallocChecker::ReportOffsetFree(CheckerContext &C, SVal ArgVal,
C.emitReport(std::move(R));
}
-void MallocChecker::ReportUseAfterFree(CheckerContext &C, SourceRange Range,
+void MallocChecker::HandleUseAfterFree(CheckerContext &C, SourceRange Range,
SymbolRef Sym) const {
- if (!ChecksEnabled[CK_MallocChecker] &&
- !ChecksEnabled[CK_NewDeleteChecker] &&
- !ChecksEnabled[CK_InnerPointerChecker])
+ if (!ChecksEnabled[CK_MallocChecker] && !ChecksEnabled[CK_NewDeleteChecker] &&
+ !ChecksEnabled[CK_InnerPointerChecker]) {
+ C.addSink();
return;
+ }
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
if (!CheckKind.hasValue())
@@ -2270,13 +2288,14 @@ void MallocChecker::ReportUseAfterFree(CheckerContext &C, SourceRange Range,
}
}
-void MallocChecker::ReportDoubleFree(CheckerContext &C, SourceRange Range,
+void MallocChecker::HandleDoubleFree(CheckerContext &C, SourceRange Range,
bool Released, SymbolRef Sym,
SymbolRef PrevSym) const {
- if (!ChecksEnabled[CK_MallocChecker] &&
- !ChecksEnabled[CK_NewDeleteChecker])
+ if (!ChecksEnabled[CK_MallocChecker] && !ChecksEnabled[CK_NewDeleteChecker]) {
+ C.addSink();
return;
+ }
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
if (!CheckKind.hasValue())
@@ -2301,10 +2320,12 @@ void MallocChecker::ReportDoubleFree(CheckerContext &C, SourceRange Range,
}
}
-void MallocChecker::ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const {
+void MallocChecker::HandleDoubleDelete(CheckerContext &C, SymbolRef Sym) const {
- if (!ChecksEnabled[CK_NewDeleteChecker])
+ if (!ChecksEnabled[CK_NewDeleteChecker]) {
+ C.addSink();
return;
+ }
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
if (!CheckKind.hasValue())
@@ -2325,13 +2346,13 @@ void MallocChecker::ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const {
}
}
-void MallocChecker::ReportUseZeroAllocated(CheckerContext &C,
- SourceRange Range,
- SymbolRef Sym) const {
+void MallocChecker::HandleUseZeroAlloc(CheckerContext &C, SourceRange Range,
+ SymbolRef Sym) const {
- if (!ChecksEnabled[CK_MallocChecker] &&
- !ChecksEnabled[CK_NewDeleteChecker])
+ if (!ChecksEnabled[CK_MallocChecker] && !ChecksEnabled[CK_NewDeleteChecker]) {
+ C.addSink();
return;
+ }
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
@@ -2356,13 +2377,16 @@ void MallocChecker::ReportUseZeroAllocated(CheckerContext &C,
}
}
-void MallocChecker::ReportFunctionPointerFree(CheckerContext &C, SVal ArgVal,
- SourceRange Range,
- const Expr *FreeExpr) const {
- if (!ChecksEnabled[CK_MallocChecker])
+void MallocChecker::HandleFunctionPtrFree(CheckerContext &C, SVal ArgVal,
+ SourceRange Range,
+ const Expr *FreeExpr,
+ AllocationFamily Family) const {
+ if (!ChecksEnabled[CK_MallocChecker]) {
+ C.addSink();
return;
+ }
- Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, FreeExpr);
+ Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
if (!CheckKind.hasValue())
return;
@@ -2379,7 +2403,7 @@ void MallocChecker::ReportFunctionPointerFree(CheckerContext &C, SVal ArgVal,
MR = ER->getSuperRegion();
Os << "Argument to ";
- if (!printAllocDeallocName(Os, C, FreeExpr))
+ if (!printMemFnName(Os, C, FreeExpr))
Os << "deallocator";
Os << " is a function pointer";
@@ -2392,14 +2416,15 @@ void MallocChecker::ReportFunctionPointerFree(CheckerContext &C, SVal ArgVal,
}
}
-ProgramStateRef MallocChecker::ReallocMemAux(CheckerContext &C,
- const CallExpr *CE,
- bool ShouldFreeOnFail,
- ProgramStateRef State,
- bool SuffixWithN) const {
+ProgramStateRef
+MallocChecker::ReallocMemAux(CheckerContext &C, const CallEvent &Call,
+ bool ShouldFreeOnFail, ProgramStateRef State,
+ AllocationFamily Family, bool SuffixWithN) const {
if (!State)
return nullptr;
+ const CallExpr *CE = cast<CallExpr>(Call.getOriginExpr());
+
if (SuffixWithN && CE->getNumArgs() < 3)
return nullptr;
else if (CE->getNumArgs() < 2)
@@ -2443,21 +2468,15 @@ ProgramStateRef MallocChecker::ReallocMemAux(CheckerContext &C,
// If the ptr is NULL and the size is not 0, the call is equivalent to
// malloc(size).
if (PrtIsNull && !SizeIsZero) {
- ProgramStateRef stateMalloc = MallocMemAux(C, CE, TotalSize,
- UndefinedVal(), StatePtrIsNull);
+ ProgramStateRef stateMalloc = MallocMemAux(
+ C, Call, TotalSize, UndefinedVal(), StatePtrIsNull, Family);
return stateMalloc;
}
if (PrtIsNull && SizeIsZero)
return State;
- // Get the from and to pointer symbols as in toPtr = realloc(fromPtr, size).
assert(!PrtIsNull);
- SymbolRef FromPtr = arg0Val.getAsSymbol();
- SVal RetVal = C.getSVal(CE);
- SymbolRef ToPtr = RetVal.getAsSymbol();
- if (!FromPtr || !ToPtr)
- return nullptr;
bool IsKnownToBeAllocated = false;
@@ -2467,16 +2486,16 @@ ProgramStateRef MallocChecker::ReallocMemAux(CheckerContext &C,
// If size was equal to 0, either NULL or a pointer suitable to be passed
// to free() is returned. We just free the input pointer and do not add
// any constrains on the output pointer.
- if (ProgramStateRef stateFree =
- FreeMemAux(C, CE, StateSizeIsZero, 0, false, IsKnownToBeAllocated))
+ if (ProgramStateRef stateFree = FreeMemAux(
+ C, Call, StateSizeIsZero, 0, false, IsKnownToBeAllocated, Family))
return stateFree;
// Default behavior.
if (ProgramStateRef stateFree =
- FreeMemAux(C, CE, State, 0, false, IsKnownToBeAllocated)) {
+ FreeMemAux(C, Call, State, 0, false, IsKnownToBeAllocated, Family)) {
- ProgramStateRef stateRealloc = MallocMemAux(C, CE, TotalSize,
- UnknownVal(), stateFree);
+ ProgramStateRef stateRealloc =
+ MallocMemAux(C, Call, TotalSize, UnknownVal(), stateFree, Family);
if (!stateRealloc)
return nullptr;
@@ -2486,6 +2505,14 @@ ProgramStateRef MallocChecker::ReallocMemAux(CheckerContext &C,
else if (!IsKnownToBeAllocated)
Kind = OAR_DoNotTrackAfterFailure;
+ // Get the from and to pointer symbols as in toPtr = realloc(fromPtr, size).
+ SymbolRef FromPtr = arg0Val.getLocSymbolInBase();
+ SVal RetVal = C.getSVal(CE);
+ SymbolRef ToPtr = RetVal.getAsSymbol();
+ assert(FromPtr && ToPtr &&
+ "By this point, FreeMemAux and MallocMemAux should have checked "
+ "whether the argument or the return value is symbolic!");
+
// Record the info about the reallocated symbol so that we could properly
// process failed reallocation.
stateRealloc = stateRealloc->set<ReallocPairs>(ToPtr,
@@ -2497,19 +2524,21 @@ ProgramStateRef MallocChecker::ReallocMemAux(CheckerContext &C,
return nullptr;
}
-ProgramStateRef MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE,
+ProgramStateRef MallocChecker::CallocMem(CheckerContext &C,
+ const CallEvent &Call,
ProgramStateRef State) {
if (!State)
return nullptr;
- if (CE->getNumArgs() < 2)
+ if (Call.getNumArgs() < 2)
return nullptr;
SValBuilder &svalBuilder = C.getSValBuilder();
SVal zeroVal = svalBuilder.makeZeroVal(svalBuilder.getContext().CharTy);
- SVal TotalSize = evalMulForBufferSize(C, CE->getArg(0), CE->getArg(1));
+ SVal TotalSize =
+ evalMulForBufferSize(C, Call.getArgExpr(0), Call.getArgExpr(1));
- return MallocMemAux(C, CE, TotalSize, zeroVal, State);
+ return MallocMemAux(C, Call, TotalSize, zeroVal, State, AF_Malloc);
}
MallocChecker::LeakInfo MallocChecker::getAllocationSite(const ExplodedNode *N,
@@ -2553,7 +2582,7 @@ MallocChecker::LeakInfo MallocChecker::getAllocationSite(const ExplodedNode *N,
return LeakInfo(AllocNode, ReferenceRegion);
}
-void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
+void MallocChecker::HandleLeak(SymbolRef Sym, ExplodedNode *N,
CheckerContext &C) const {
if (!ChecksEnabled[CK_MallocChecker] &&
@@ -2669,7 +2698,7 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
if (N) {
for (SmallVectorImpl<SymbolRef>::iterator
I = Errors.begin(), E = Errors.end(); I != E; ++I) {
- reportLeak(*I, N, C);
+ HandleLeak(*I, N, C);
}
}
}
@@ -2680,7 +2709,27 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
void MallocChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- if (const CXXDestructorCall *DC = dyn_cast<CXXDestructorCall>(&Call)) {
+ if (const auto *DC = dyn_cast<CXXDeallocatorCall>(&Call)) {
+ const CXXDeleteExpr *DE = DC->getOriginExpr();
+
+ if (!ChecksEnabled[CK_NewDeleteChecker])
+ if (SymbolRef Sym = C.getSVal(DE->getArgument()).getAsSymbol())
+ checkUseAfterFree(Sym, C, DE->getArgument());
+
+ if (!isStandardNewDelete(DC->getDecl()))
+ return;
+
+ ProgramStateRef State = C.getState();
+ bool IsKnownToBeAllocated;
+ State = FreeMemAux(C, DE->getArgument(), Call, State,
+ /*Hold*/ false, IsKnownToBeAllocated,
+ (DE->isArrayForm() ? AF_CXXNewArray : AF_CXXNew));
+
+ C.addTransition(State);
+ return;
+ }
+
+ if (const auto *DC = dyn_cast<CXXDestructorCall>(&Call)) {
SymbolRef Sym = DC->getCXXThisVal().getAsSymbol();
if (!Sym || checkDoubleDelete(Sym, C))
return;
@@ -2692,12 +2741,7 @@ void MallocChecker::checkPreCall(const CallEvent &Call,
if (!FD)
return;
- ASTContext &Ctx = C.getASTContext();
- if (ChecksEnabled[CK_MallocChecker] &&
- (MemFunctionInfo.isCMemFunction(FD, Ctx, AF_Malloc,
- MemoryOperationKind::MOK_Free) ||
- MemFunctionInfo.isCMemFunction(FD, Ctx, AF_IfNameIndex,
- MemoryOperationKind::MOK_Free)))
+ if (ChecksEnabled[CK_MallocChecker] && isFreeingCall(Call))
return;
}
@@ -2807,8 +2851,8 @@ static bool isReleased(SymbolRef Sym, CheckerContext &C) {
}
bool MallocChecker::suppressDeallocationsInSuspiciousContexts(
- const CallExpr *CE, CheckerContext &C) const {
- if (CE->getNumArgs() == 0)
+ const CallEvent &Call, CheckerContext &C) const {
+ if (Call.getNumArgs() == 0)
return false;
StringRef FunctionStr = "";
@@ -2826,7 +2870,7 @@ bool MallocChecker::suppressDeallocationsInSuspiciousContexts(
ProgramStateRef State = C.getState();
- for (const Expr *Arg : CE->arguments())
+ for (const Expr *Arg : cast<CallExpr>(Call.getOriginExpr())->arguments())
if (SymbolRef Sym = C.getSVal(Arg).getAsSymbol())
if (const RefState *RS = State->get<RegionState>(Sym))
State = State->set<RegionState>(Sym, RefState::getEscaped(RS));
@@ -2839,7 +2883,7 @@ bool MallocChecker::checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
const Stmt *S) const {
if (isReleased(Sym, C)) {
- ReportUseAfterFree(C, S->getSourceRange(), Sym);
+ HandleUseAfterFree(C, S->getSourceRange(), Sym);
return true;
}
@@ -2852,17 +2896,17 @@ void MallocChecker::checkUseZeroAllocated(SymbolRef Sym, CheckerContext &C,
if (const RefState *RS = C.getState()->get<RegionState>(Sym)) {
if (RS->isAllocatedOfSizeZero())
- ReportUseZeroAllocated(C, RS->getStmt()->getSourceRange(), Sym);
+ HandleUseZeroAlloc(C, RS->getStmt()->getSourceRange(), Sym);
}
else if (C.getState()->contains<ReallocSizeZeroSymbols>(Sym)) {
- ReportUseZeroAllocated(C, S->getSourceRange(), Sym);
+ HandleUseZeroAlloc(C, S->getSourceRange(), Sym);
}
}
bool MallocChecker::checkDoubleDelete(SymbolRef Sym, CheckerContext &C) const {
if (isReleased(Sym, C)) {
- ReportDoubleDelete(C, Sym);
+ HandleDoubleDelete(C, Sym);
return true;
}
return false;
@@ -2994,11 +3038,9 @@ bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
if (!FD)
return true;
- ASTContext &ASTC = State->getStateManager().getContext();
-
// If it's one of the allocation functions we can reason about, we model
// its behavior explicitly.
- if (MemFunctionInfo.isMemFunction(FD, ASTC))
+ if (isMemCall(*Call))
return false;
// If it's not a system call, assume it frees memory.
@@ -3142,6 +3184,18 @@ ProgramStateRef MallocChecker::checkPointerEscapeAux(
return State;
}
+bool MallocChecker::isArgZERO_SIZE_PTR(ProgramStateRef State, CheckerContext &C,
+ SVal ArgVal) const {
+ if (!KernelZeroSizePtrValue)
+ KernelZeroSizePtrValue =
+ tryExpandAsInteger("ZERO_SIZE_PTR", C.getPreprocessor());
+
+ const llvm::APSInt *ArgValKnown =
+ C.getSValBuilder().getKnownValue(State, ArgVal);
+ return ArgValKnown && *KernelZeroSizePtrValue &&
+ ArgValKnown->getSExtValue() == **KernelZeroSizePtrValue;
+}
+
static SymbolRef findFailedReallocSymbol(ProgramStateRef currState,
ProgramStateRef prevState) {
ReallocPairsTy currMap = currState->get<ReallocPairs>();
@@ -3404,11 +3458,11 @@ void ento::registerInnerPointerCheckerAux(CheckerManager &mgr) {
void ento::registerDynamicMemoryModeling(CheckerManager &mgr) {
auto *checker = mgr.registerChecker<MallocChecker>();
- checker->MemFunctionInfo.ShouldIncludeOwnershipAnnotatedFunctions =
+ checker->ShouldIncludeOwnershipAnnotatedFunctions =
mgr.getAnalyzerOptions().getCheckerBooleanOption(checker, "Optimistic");
}
-bool ento::shouldRegisterDynamicMemoryModeling(const LangOptions &LO) {
+bool ento::shouldRegisterDynamicMemoryModeling(const CheckerManager &mgr) {
return true;
}
@@ -3420,7 +3474,7 @@ bool ento::shouldRegisterDynamicMemoryModeling(const LangOptions &LO) {
mgr.getCurrentCheckerName(); \
} \
\
- bool ento::shouldRegister##name(const LangOptions &LO) { return true; }
+ bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
REGISTER_CHECKER(MallocChecker)
REGISTER_CHECKER(NewDeleteChecker)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
index 4fd06f24c5bc..e31630f63b5a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
@@ -337,6 +337,6 @@ void ento::registerMallocOverflowSecurityChecker(CheckerManager &mgr) {
mgr.registerChecker<MallocOverflowSecurityChecker>();
}
-bool ento::shouldRegisterMallocOverflowSecurityChecker(const LangOptions &LO) {
+bool ento::shouldRegisterMallocOverflowSecurityChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
index b5881a9e6533..71f593cb2b56 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -250,6 +250,6 @@ void ento::registerMallocSizeofChecker(CheckerManager &mgr) {
mgr.registerChecker<MallocSizeofChecker>();
}
-bool ento::shouldRegisterMallocSizeofChecker(const LangOptions &LO) {
+bool ento::shouldRegisterMallocSizeofChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MismatchedIteratorChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MismatchedIteratorChecker.cpp
index 143910588959..1960873599f7 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MismatchedIteratorChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MismatchedIteratorChecker.cpp
@@ -28,7 +28,7 @@ using namespace iterator;
namespace {
class MismatchedIteratorChecker
- : public Checker<check::PreCall> {
+ : public Checker<check::PreCall, check::PreStmt<BinaryOperator>> {
std::unique_ptr<BugType> MismatchedBugType;
@@ -47,6 +47,7 @@ public:
MismatchedIteratorChecker();
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPreStmt(const BinaryOperator *BO, CheckerContext &C) const;
};
@@ -141,7 +142,7 @@ void MismatchedIteratorChecker::checkPreCall(const CallEvent &Call,
// Example:
// template<typename I1, typename I2>
// void f(I1 first1, I1 last1, I2 first2, I2 last2);
- //
+ //
// In this case the first two arguments to f() must be iterators must belong
// to the same container and the last to also to the same container but
// not necessarily to the same as the first two.
@@ -188,6 +189,17 @@ void MismatchedIteratorChecker::checkPreCall(const CallEvent &Call,
}
}
+void MismatchedIteratorChecker::checkPreStmt(const BinaryOperator *BO,
+ CheckerContext &C) const {
+ if (!BO->isComparisonOp())
+ return;
+
+ ProgramStateRef State = C.getState();
+ SVal LVal = State->getSVal(BO->getLHS(), C.getLocationContext());
+ SVal RVal = State->getSVal(BO->getRHS(), C.getLocationContext());
+ verifyMatch(C, LVal, RVal);
+}
+
void MismatchedIteratorChecker::verifyMatch(CheckerContext &C, const SVal &Iter,
const MemRegion *Cont) const {
// Verify match between a container and the container of an iterator
@@ -290,6 +302,6 @@ void ento::registerMismatchedIteratorChecker(CheckerManager &mgr) {
mgr.registerChecker<MismatchedIteratorChecker>();
}
-bool ento::shouldRegisterMismatchedIteratorChecker(const LangOptions &LO) {
+bool ento::shouldRegisterMismatchedIteratorChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
index ceea62160545..5d63d6efd234 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
@@ -88,6 +88,6 @@ void ento::registerMmapWriteExecChecker(CheckerManager &mgr) {
.getCheckerIntegerOption(Mwec, "MmapProtRead");
}
-bool ento::shouldRegisterMmapWriteExecChecker(const LangOptions &LO) {
+bool ento::shouldRegisterMmapWriteExecChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
index 40eb113e3f8e..7f0519c695b0 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
@@ -757,6 +757,6 @@ void ento::registerMoveChecker(CheckerManager &mgr) {
mgr.getAnalyzerOptions().getCheckerStringOption(chk, "WarnOn"), mgr);
}
-bool ento::shouldRegisterMoveChecker(const LangOptions &LO) {
+bool ento::shouldRegisterMoveChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
index 41b7fe5e43b6..be17e401fb53 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
@@ -80,6 +80,7 @@ void ento::registerNSAutoreleasePoolChecker(CheckerManager &mgr) {
mgr.registerChecker<NSAutoreleasePoolChecker>();
}
-bool ento::shouldRegisterNSAutoreleasePoolChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNSAutoreleasePoolChecker(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
return LO.getGC() != LangOptions::NonGC;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
index 85370bf133cd..90c5583d8969 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
@@ -95,6 +95,15 @@ public:
};
}
+static bool hasReservedReturnType(const FunctionDecl *D) {
+ if (isa<CXXConstructorDecl>(D))
+ return true;
+
+ // operators delete and delete[] are required to have 'void' return type
+ auto OperatorKind = D->getOverloadedOperator();
+ return OperatorKind == OO_Delete || OperatorKind == OO_Array_Delete;
+}
+
void CFErrorFunctionChecker::checkASTDecl(const FunctionDecl *D,
AnalysisManager &mgr,
BugReporter &BR) const {
@@ -102,6 +111,8 @@ void CFErrorFunctionChecker::checkASTDecl(const FunctionDecl *D,
return;
if (!D->getReturnType()->isVoidType())
return;
+ if (hasReservedReturnType(D))
+ return;
if (!II)
II = &D->getASTContext().Idents.get("CFErrorRef");
@@ -133,14 +144,14 @@ namespace {
class NSErrorDerefBug : public BugType {
public:
- NSErrorDerefBug(const CheckerBase *Checker)
+ NSErrorDerefBug(const CheckerNameRef Checker)
: BugType(Checker, "NSError** null dereference",
"Coding conventions (Apple)") {}
};
class CFErrorDerefBug : public BugType {
public:
- CFErrorDerefBug(const CheckerBase *Checker)
+ CFErrorDerefBug(const CheckerNameRef Checker)
: BugType(Checker, "CFErrorRef* null dereference",
"Coding conventions (Apple)") {}
};
@@ -155,9 +166,9 @@ class NSOrCFErrorDerefChecker
mutable std::unique_ptr<NSErrorDerefBug> NSBT;
mutable std::unique_ptr<CFErrorDerefBug> CFBT;
public:
- bool ShouldCheckNSError, ShouldCheckCFError;
- NSOrCFErrorDerefChecker() : NSErrorII(nullptr), CFErrorII(nullptr),
- ShouldCheckNSError(0), ShouldCheckCFError(0) { }
+ DefaultBool ShouldCheckNSError, ShouldCheckCFError;
+ CheckerNameRef NSErrorName, CFErrorName;
+ NSOrCFErrorDerefChecker() : NSErrorII(nullptr), CFErrorII(nullptr) {}
void checkLocation(SVal loc, bool isLoad, const Stmt *S,
CheckerContext &C) const;
@@ -265,12 +276,12 @@ void NSOrCFErrorDerefChecker::checkEvent(ImplicitNullDerefEvent event) const {
BugType *bug = nullptr;
if (isNSError) {
if (!NSBT)
- NSBT.reset(new NSErrorDerefBug(this));
+ NSBT.reset(new NSErrorDerefBug(NSErrorName));
bug = NSBT.get();
}
else {
if (!CFBT)
- CFBT.reset(new CFErrorDerefBug(this));
+ CFBT.reset(new CFErrorDerefBug(CFErrorName));
bug = CFBT.get();
}
BR.emitReport(
@@ -312,7 +323,7 @@ void ento::registerNSOrCFErrorDerefChecker(CheckerManager &mgr) {
mgr.registerChecker<NSOrCFErrorDerefChecker>();
}
-bool ento::shouldRegisterNSOrCFErrorDerefChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNSOrCFErrorDerefChecker(const CheckerManager &mgr) {
return true;
}
@@ -320,9 +331,10 @@ void ento::registerNSErrorChecker(CheckerManager &mgr) {
mgr.registerChecker<NSErrorMethodChecker>();
NSOrCFErrorDerefChecker *checker = mgr.getChecker<NSOrCFErrorDerefChecker>();
checker->ShouldCheckNSError = true;
+ checker->NSErrorName = mgr.getCurrentCheckerName();
}
-bool ento::shouldRegisterNSErrorChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNSErrorChecker(const CheckerManager &mgr) {
return true;
}
@@ -330,8 +342,9 @@ void ento::registerCFErrorChecker(CheckerManager &mgr) {
mgr.registerChecker<CFErrorFunctionChecker>();
NSOrCFErrorDerefChecker *checker = mgr.getChecker<NSOrCFErrorDerefChecker>();
checker->ShouldCheckCFError = true;
+ checker->CFErrorName = mgr.getCurrentCheckerName();
}
-bool ento::shouldRegisterCFErrorChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCFErrorChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
index fc34255bf6c9..af208e867318 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
@@ -143,6 +143,6 @@ void ento::registerNoReturnFunctionChecker(CheckerManager &mgr) {
mgr.registerChecker<NoReturnFunctionChecker>();
}
-bool ento::shouldRegisterNoReturnFunctionChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNoReturnFunctionChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
index 6ffc89745365..534b5d68434f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
@@ -14,57 +14,97 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Attr.h"
+#include "clang/Analysis/AnyCall.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace ento;
namespace {
class NonNullParamChecker
- : public Checker< check::PreCall, EventDispatcher<ImplicitNullDerefEvent> > {
+ : public Checker<check::PreCall, check::BeginFunction,
+ EventDispatcher<ImplicitNullDerefEvent>> {
mutable std::unique_ptr<BugType> BTAttrNonNull;
mutable std::unique_ptr<BugType> BTNullRefArg;
public:
-
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkBeginFunction(CheckerContext &C) const;
std::unique_ptr<PathSensitiveBugReport>
- genReportNullAttrNonNull(const ExplodedNode *ErrorN,
- const Expr *ArgE,
+ genReportNullAttrNonNull(const ExplodedNode *ErrorN, const Expr *ArgE,
unsigned IdxOfArg) const;
std::unique_ptr<PathSensitiveBugReport>
genReportReferenceToNullPointer(const ExplodedNode *ErrorN,
const Expr *ArgE) const;
};
-} // end anonymous namespace
-/// \return Bitvector marking non-null attributes.
-static llvm::SmallBitVector getNonNullAttrs(const CallEvent &Call) {
+template <class CallType>
+void setBitsAccordingToFunctionAttributes(const CallType &Call,
+ llvm::SmallBitVector &AttrNonNull) {
const Decl *FD = Call.getDecl();
- unsigned NumArgs = Call.getNumArgs();
- llvm::SmallBitVector AttrNonNull(NumArgs);
+
for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
if (!NonNull->args_size()) {
- AttrNonNull.set(0, NumArgs);
+ // Lack of attribute parameters means that all of the parameters are
+ // implicitly marked as non-null.
+ AttrNonNull.set();
break;
}
+
for (const ParamIdx &Idx : NonNull->args()) {
+ // 'nonnull' attribute's parameters are 1-based and should be adjusted to
+ // match actual AST parameter/argument indices.
unsigned IdxAST = Idx.getASTIndex();
- if (IdxAST >= NumArgs)
+ if (IdxAST >= AttrNonNull.size())
continue;
AttrNonNull.set(IdxAST);
}
}
+}
+
+template <class CallType>
+void setBitsAccordingToParameterAttributes(const CallType &Call,
+ llvm::SmallBitVector &AttrNonNull) {
+ for (const ParmVarDecl *Parameter : Call.parameters()) {
+ unsigned ParameterIndex = Parameter->getFunctionScopeIndex();
+ if (ParameterIndex == AttrNonNull.size())
+ break;
+
+ if (Parameter->hasAttr<NonNullAttr>())
+ AttrNonNull.set(ParameterIndex);
+ }
+}
+
+template <class CallType>
+llvm::SmallBitVector getNonNullAttrsImpl(const CallType &Call,
+ unsigned ExpectedSize) {
+ llvm::SmallBitVector AttrNonNull(ExpectedSize);
+
+ setBitsAccordingToFunctionAttributes(Call, AttrNonNull);
+ setBitsAccordingToParameterAttributes(Call, AttrNonNull);
+
return AttrNonNull;
}
+/// \return Bitvector marking non-null attributes.
+llvm::SmallBitVector getNonNullAttrs(const CallEvent &Call) {
+ return getNonNullAttrsImpl(Call, Call.getNumArgs());
+}
+
+/// \return Bitvector marking non-null attributes.
+llvm::SmallBitVector getNonNullAttrs(const AnyCall &Call) {
+ return getNonNullAttrsImpl(Call, Call.param_size());
+}
+} // end anonymous namespace
+
void NonNullParamChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
if (!Call.getDecl())
@@ -74,7 +114,7 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
unsigned NumArgs = Call.getNumArgs();
ProgramStateRef state = C.getState();
- ArrayRef<ParmVarDecl*> parms = Call.parameters();
+ ArrayRef<ParmVarDecl *> parms = Call.parameters();
for (unsigned idx = 0; idx < NumArgs; ++idx) {
// For vararg functions, a corresponding parameter decl may not exist.
@@ -82,15 +122,11 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
// Check if the parameter is a reference. We want to report when reference
// to a null pointer is passed as a parameter.
- bool haveRefTypeParam =
+ bool HasRefTypeParam =
HasParam ? parms[idx]->getType()->isReferenceType() : false;
- bool haveAttrNonNull = AttrNonNull[idx];
+ bool ExpectedToBeNonNull = AttrNonNull.test(idx);
- // Check if the parameter is also marked 'nonnull'.
- if (!haveAttrNonNull && HasParam)
- haveAttrNonNull = parms[idx]->hasAttr<NonNullAttr>();
-
- if (!haveAttrNonNull && !haveRefTypeParam)
+ if (!ExpectedToBeNonNull && !HasRefTypeParam)
continue;
// If the value is unknown or undefined, we can't perform this check.
@@ -100,10 +136,10 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
if (!DV)
continue;
- assert(!haveRefTypeParam || DV->getAs<Loc>());
+ assert(!HasRefTypeParam || DV->getAs<Loc>());
// Process the case when the argument is not a location.
- if (haveAttrNonNull && !DV->getAs<Loc>()) {
+ if (ExpectedToBeNonNull && !DV->getAs<Loc>()) {
// If the argument is a union type, we want to handle a potential
// transparent_union GCC extension.
if (!ArgE)
@@ -144,9 +180,9 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
if (ExplodedNode *errorNode = C.generateErrorNode(stateNull)) {
std::unique_ptr<BugReport> R;
- if (haveAttrNonNull)
+ if (ExpectedToBeNonNull)
R = genReportNullAttrNonNull(errorNode, ArgE, idx + 1);
- else if (haveRefTypeParam)
+ else if (HasRefTypeParam)
R = genReportReferenceToNullPointer(errorNode, ArgE);
// Highlight the range of the argument that was null.
@@ -163,8 +199,8 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
if (stateNull) {
if (ExplodedNode *N = C.generateSink(stateNull, C.getPredecessor())) {
ImplicitNullDerefEvent event = {
- V, false, N, &C.getBugReporter(),
- /*IsDirectDereference=*/haveRefTypeParam};
+ V, false, N, &C.getBugReporter(),
+ /*IsDirectDereference=*/HasRefTypeParam};
dispatchEvent(event);
}
}
@@ -179,6 +215,65 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
C.addTransition(state);
}
+/// We want to trust developer annotations and consider all 'nonnull' parameters
+/// as non-null indeed. Each marked parameter will get a corresponding
+/// constraint.
+///
+/// This approach will not only help us to get rid of some false positives, but
+/// remove duplicates and shorten warning traces as well.
+///
+/// \code
+/// void foo(int *x) [[gnu::nonnull]] {
+/// // . . .
+/// *x = 42; // we don't want to consider this as an error...
+/// // . . .
+/// }
+///
+/// foo(nullptr); // ...and report here instead
+/// \endcode
+void NonNullParamChecker::checkBeginFunction(CheckerContext &Context) const {
+ // Planned assumption makes sense only for top-level functions.
+ // Inlined functions will get similar constraints as part of 'checkPreCall'.
+ if (!Context.inTopFrame())
+ return;
+
+ const LocationContext *LocContext = Context.getLocationContext();
+
+ const Decl *FD = LocContext->getDecl();
+ // AnyCall helps us here to avoid checking for FunctionDecl and ObjCMethodDecl
+ // separately and aggregates interfaces of these classes.
+ auto AbstractCall = AnyCall::forDecl(FD);
+ if (!AbstractCall)
+ return;
+
+ ProgramStateRef State = Context.getState();
+ llvm::SmallBitVector ParameterNonNullMarks = getNonNullAttrs(*AbstractCall);
+
+ for (const ParmVarDecl *Parameter : AbstractCall->parameters()) {
+ // 1. Check parameter if it is annotated as non-null
+ if (!ParameterNonNullMarks.test(Parameter->getFunctionScopeIndex()))
+ continue;
+
+ // 2. Check that parameter is a pointer.
+ // Nonnull attribute can be applied to non-pointers (by default
+ // __attribute__(nonnull) implies "all parameters").
+ if (!Parameter->getType()->isPointerType())
+ continue;
+
+ Loc ParameterLoc = State->getLValue(Parameter, LocContext);
+ // We never consider top-level function parameters undefined.
+ auto StoredVal =
+ State->getSVal(ParameterLoc).castAs<DefinedOrUnknownSVal>();
+
+ // 3. Assume that it is indeed non-null
+ if (ProgramStateRef NewState = State->assume(StoredVal, true)) {
+ State = NewState;
+ }
+ }
+
+ Context.addTransition(State);
+}
+
std::unique_ptr<PathSensitiveBugReport>
NonNullParamChecker::genReportNullAttrNonNull(const ExplodedNode *ErrorNode,
const Expr *ArgE,
@@ -226,6 +321,6 @@ void ento::registerNonNullParamChecker(CheckerManager &mgr) {
mgr.registerChecker<NonNullParamChecker>();
}
-bool ento::shouldRegisterNonNullParamChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNonNullParamChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
index 6efba433eed2..80b705fb7392 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
@@ -147,6 +147,6 @@ void ento::registerNonnullGlobalConstantsChecker(CheckerManager &Mgr) {
Mgr.registerChecker<NonnullGlobalConstantsChecker>();
}
-bool ento::shouldRegisterNonnullGlobalConstantsChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNonnullGlobalConstantsChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
index 922048733c7c..bc7a8a3b12a1 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
@@ -81,8 +81,7 @@ class NullabilityChecker
: public Checker<check::Bind, check::PreCall, check::PreStmt<ReturnStmt>,
check::PostCall, check::PostStmt<ExplicitCastExpr>,
check::PostObjCMessage, check::DeadSymbols,
- check::Event<ImplicitNullDerefEvent>> {
- mutable std::unique_ptr<BugType> BT;
+ check::Location, check::Event<ImplicitNullDerefEvent>> {
public:
// If true, the checker will not diagnose nullabilility issues for calls
@@ -101,25 +100,32 @@ public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
void checkEvent(ImplicitNullDerefEvent Event) const;
+ void checkLocation(SVal Location, bool IsLoad, const Stmt *S,
+ CheckerContext &C) const;
void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
const char *Sep) const override;
- struct NullabilityChecksFilter {
- DefaultBool CheckNullPassedToNonnull;
- DefaultBool CheckNullReturnedFromNonnull;
- DefaultBool CheckNullableDereferenced;
- DefaultBool CheckNullablePassedToNonnull;
- DefaultBool CheckNullableReturnedFromNonnull;
-
- CheckerNameRef CheckNameNullPassedToNonnull;
- CheckerNameRef CheckNameNullReturnedFromNonnull;
- CheckerNameRef CheckNameNullableDereferenced;
- CheckerNameRef CheckNameNullablePassedToNonnull;
- CheckerNameRef CheckNameNullableReturnedFromNonnull;
+ enum CheckKind {
+ CK_NullPassedToNonnull,
+ CK_NullReturnedFromNonnull,
+ CK_NullableDereferenced,
+ CK_NullablePassedToNonnull,
+ CK_NullableReturnedFromNonnull,
+ CK_NumCheckKinds
};
- NullabilityChecksFilter Filter;
+ DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ CheckerNameRef CheckNames[CK_NumCheckKinds];
+ mutable std::unique_ptr<BugType> BTs[CK_NumCheckKinds];
+
+ const std::unique_ptr<BugType> &getBugType(CheckKind Kind) const {
+ if (!BTs[Kind])
+ BTs[Kind].reset(new BugType(CheckNames[Kind], "Nullability",
+ categories::MemoryError));
+ return BTs[Kind];
+ }
+
// When set to false no nullability information will be tracked in
// NullabilityMap. It is possible to catch errors like passing a null pointer
// to a callee that expects nonnull argument without the information that is
@@ -151,18 +157,16 @@ private:
///
/// When \p SuppressPath is set to true, no more bugs will be reported on this
/// path by this checker.
- void reportBugIfInvariantHolds(StringRef Msg, ErrorKind Error,
+ void reportBugIfInvariantHolds(StringRef Msg, ErrorKind Error, CheckKind CK,
ExplodedNode *N, const MemRegion *Region,
CheckerContext &C,
const Stmt *ValueExpr = nullptr,
- bool SuppressPath = false) const;
+ bool SuppressPath = false) const;
- void reportBug(StringRef Msg, ErrorKind Error, ExplodedNode *N,
+ void reportBug(StringRef Msg, ErrorKind Error, CheckKind CK, ExplodedNode *N,
const MemRegion *Region, BugReporter &BR,
const Stmt *ValueExpr = nullptr) const {
- if (!BT)
- BT.reset(new BugType(this, "Nullability", categories::MemoryError));
-
+ const std::unique_ptr<BugType> &BT = getBugType(CK);
auto R = std::make_unique<PathSensitiveBugReport>(*BT, Msg, N);
if (Region) {
R->markInteresting(Region);
@@ -430,9 +434,10 @@ static bool checkInvariantViolation(ProgramStateRef State, ExplodedNode *N,
return false;
}
-void NullabilityChecker::reportBugIfInvariantHolds(StringRef Msg,
- ErrorKind Error, ExplodedNode *N, const MemRegion *Region,
- CheckerContext &C, const Stmt *ValueExpr, bool SuppressPath) const {
+void NullabilityChecker::reportBugIfInvariantHolds(
+ StringRef Msg, ErrorKind Error, CheckKind CK, ExplodedNode *N,
+ const MemRegion *Region, CheckerContext &C, const Stmt *ValueExpr,
+ bool SuppressPath) const {
ProgramStateRef OriginalState = N->getState();
if (checkInvariantViolation(OriginalState, N, C))
@@ -442,7 +447,7 @@ void NullabilityChecker::reportBugIfInvariantHolds(StringRef Msg,
N = C.addTransition(OriginalState, N);
}
- reportBug(Msg, Error, N, Region, C.getBugReporter(), ValueExpr);
+ reportBug(Msg, Error, CK, N, Region, C.getBugReporter(), ValueExpr);
}
/// Cleaning up the program state.
@@ -487,34 +492,76 @@ void NullabilityChecker::checkEvent(ImplicitNullDerefEvent Event) const {
if (!TrackedNullability)
return;
- if (Filter.CheckNullableDereferenced &&
+ if (ChecksEnabled[CK_NullableDereferenced] &&
TrackedNullability->getValue() == Nullability::Nullable) {
BugReporter &BR = *Event.BR;
// Do not suppress errors on defensive code paths, because dereferencing
// a nullable pointer is always an error.
if (Event.IsDirectDereference)
reportBug("Nullable pointer is dereferenced",
- ErrorKind::NullableDereferenced, Event.SinkNode, Region, BR);
+ ErrorKind::NullableDereferenced, CK_NullableDereferenced,
+ Event.SinkNode, Region, BR);
else {
reportBug("Nullable pointer is passed to a callee that requires a "
- "non-null", ErrorKind::NullablePassedToNonnull,
+ "non-null",
+ ErrorKind::NullablePassedToNonnull, CK_NullableDereferenced,
Event.SinkNode, Region, BR);
}
}
}
+// Whenever we see a load from a typed memory region that's been annotated as
+// 'nonnull', we want to trust the user on that and assume that it is is indeed
+// non-null.
+//
+// We do so even if the value is known to have been assigned to null.
+// The user should be warned on assigning the null value to a non-null pointer
+// as opposed to warning on the later dereference of this pointer.
+//
+// \code
+// int * _Nonnull var = 0; // we want to warn the user here...
+// // . . .
+// *var = 42; // ...and not here
+// \endcode
+void NullabilityChecker::checkLocation(SVal Location, bool IsLoad,
+ const Stmt *S,
+ CheckerContext &Context) const {
+ // We should care only about loads.
+ // The main idea is to add a constraint whenever we're loading a value from
+ // an annotated pointer type.
+ if (!IsLoad)
+ return;
+
+ // Annotations that we want to consider make sense only for types.
+ const auto *Region =
+ dyn_cast_or_null<TypedValueRegion>(Location.getAsRegion());
+ if (!Region)
+ return;
+
+ ProgramStateRef State = Context.getState();
+
+ auto StoredVal = State->getSVal(Region).getAs<loc::MemRegionVal>();
+ if (!StoredVal)
+ return;
+
+ Nullability NullabilityOfTheLoadedValue =
+ getNullabilityAnnotation(Region->getValueType());
+
+ if (NullabilityOfTheLoadedValue == Nullability::Nonnull) {
+ // It doesn't matter what we think about this particular pointer, it should
+ // be considered non-null as annotated by the developer.
+ if (ProgramStateRef NewState = State->assume(*StoredVal, true)) {
+ Context.addTransition(NewState);
+ }
+ }
+}
+
/// Find the outermost subexpression of E that is not an implicit cast.
/// This looks through the implicit casts to _Nonnull that ARC adds to
/// return expressions of ObjC types when the return type of the function or
/// method is non-null but the express is not.
static const Expr *lookThroughImplicitCasts(const Expr *E) {
- assert(E);
-
- while (auto *ICE = dyn_cast<ImplicitCastExpr>(E)) {
- E = ICE->getSubExpr();
- }
-
- return E;
+ return E->IgnoreImpCasts();
}
/// This method check when nullable pointer or null value is returned from a
@@ -572,11 +619,9 @@ void NullabilityChecker::checkPreStmt(const ReturnStmt *S,
bool NullReturnedFromNonNull = (RequiredNullability == Nullability::Nonnull &&
Nullness == NullConstraint::IsNull);
- if (Filter.CheckNullReturnedFromNonnull &&
- NullReturnedFromNonNull &&
+ if (ChecksEnabled[CK_NullReturnedFromNonnull] && NullReturnedFromNonNull &&
RetExprTypeLevelNullability != Nullability::Nonnull &&
- !InSuppressedMethodFamily &&
- C.getLocationContext()->inTopFrame()) {
+ !InSuppressedMethodFamily && C.getLocationContext()->inTopFrame()) {
static CheckerProgramPointTag Tag(this, "NullReturnedFromNonnull");
ExplodedNode *N = C.generateErrorNode(State, &Tag);
if (!N)
@@ -587,8 +632,8 @@ void NullabilityChecker::checkPreStmt(const ReturnStmt *S,
OS << (RetExpr->getType()->isObjCObjectPointerType() ? "nil" : "Null");
OS << " returned from a " << C.getDeclDescription(D) <<
" that is expected to return a non-null value";
- reportBugIfInvariantHolds(OS.str(),
- ErrorKind::NilReturnedToNonnull, N, nullptr, C,
+ reportBugIfInvariantHolds(OS.str(), ErrorKind::NilReturnedToNonnull,
+ CK_NullReturnedFromNonnull, N, nullptr, C,
RetExpr);
return;
}
@@ -609,7 +654,7 @@ void NullabilityChecker::checkPreStmt(const ReturnStmt *S,
State->get<NullabilityMap>(Region);
if (TrackedNullability) {
Nullability TrackedNullabValue = TrackedNullability->getValue();
- if (Filter.CheckNullableReturnedFromNonnull &&
+ if (ChecksEnabled[CK_NullableReturnedFromNonnull] &&
Nullness != NullConstraint::IsNotNull &&
TrackedNullabValue == Nullability::Nullable &&
RequiredNullability == Nullability::Nonnull) {
@@ -621,9 +666,8 @@ void NullabilityChecker::checkPreStmt(const ReturnStmt *S,
OS << "Nullable pointer is returned from a " << C.getDeclDescription(D) <<
" that is expected to return a non-null value";
- reportBugIfInvariantHolds(OS.str(),
- ErrorKind::NullableReturnedToNonnull, N,
- Region, C);
+ reportBugIfInvariantHolds(OS.str(), ErrorKind::NullableReturnedToNonnull,
+ CK_NullableReturnedFromNonnull, N, Region, C);
}
return;
}
@@ -674,7 +718,8 @@ void NullabilityChecker::checkPreCall(const CallEvent &Call,
unsigned ParamIdx = Param->getFunctionScopeIndex() + 1;
- if (Filter.CheckNullPassedToNonnull && Nullness == NullConstraint::IsNull &&
+ if (ChecksEnabled[CK_NullPassedToNonnull] &&
+ Nullness == NullConstraint::IsNull &&
ArgExprTypeLevelNullability != Nullability::Nonnull &&
RequiredNullability == Nullability::Nonnull &&
isDiagnosableCall(Call)) {
@@ -687,9 +732,9 @@ void NullabilityChecker::checkPreCall(const CallEvent &Call,
OS << (Param->getType()->isObjCObjectPointerType() ? "nil" : "Null");
OS << " passed to a callee that requires a non-null " << ParamIdx
<< llvm::getOrdinalSuffix(ParamIdx) << " parameter";
- reportBugIfInvariantHolds(OS.str(), ErrorKind::NilPassedToNonnull, N,
- nullptr, C,
- ArgExpr, /*SuppressPath=*/false);
+ reportBugIfInvariantHolds(OS.str(), ErrorKind::NilPassedToNonnull,
+ CK_NullPassedToNonnull, N, nullptr, C, ArgExpr,
+ /*SuppressPath=*/false);
return;
}
@@ -705,7 +750,7 @@ void NullabilityChecker::checkPreCall(const CallEvent &Call,
TrackedNullability->getValue() != Nullability::Nullable)
continue;
- if (Filter.CheckNullablePassedToNonnull &&
+ if (ChecksEnabled[CK_NullablePassedToNonnull] &&
RequiredNullability == Nullability::Nonnull &&
isDiagnosableCall(Call)) {
ExplodedNode *N = C.addTransition(State);
@@ -713,17 +758,18 @@ void NullabilityChecker::checkPreCall(const CallEvent &Call,
llvm::raw_svector_ostream OS(SBuf);
OS << "Nullable pointer is passed to a callee that requires a non-null "
<< ParamIdx << llvm::getOrdinalSuffix(ParamIdx) << " parameter";
- reportBugIfInvariantHolds(OS.str(),
- ErrorKind::NullablePassedToNonnull, N,
- Region, C, ArgExpr, /*SuppressPath=*/true);
+ reportBugIfInvariantHolds(OS.str(), ErrorKind::NullablePassedToNonnull,
+ CK_NullablePassedToNonnull, N, Region, C,
+ ArgExpr, /*SuppressPath=*/true);
return;
}
- if (Filter.CheckNullableDereferenced &&
+ if (ChecksEnabled[CK_NullableDereferenced] &&
Param->getType()->isReferenceType()) {
ExplodedNode *N = C.addTransition(State);
reportBugIfInvariantHolds("Nullable pointer is dereferenced",
- ErrorKind::NullableDereferenced, N, Region,
- C, ArgExpr, /*SuppressPath=*/true);
+ ErrorKind::NullableDereferenced,
+ CK_NullableDereferenced, N, Region, C,
+ ArgExpr, /*SuppressPath=*/true);
return;
}
continue;
@@ -1083,8 +1129,7 @@ void NullabilityChecker::checkBind(SVal L, SVal V, const Stmt *S,
bool NullAssignedToNonNull = (LocNullability == Nullability::Nonnull &&
RhsNullness == NullConstraint::IsNull);
- if (Filter.CheckNullPassedToNonnull &&
- NullAssignedToNonNull &&
+ if (ChecksEnabled[CK_NullPassedToNonnull] && NullAssignedToNonNull &&
ValNullability != Nullability::Nonnull &&
ValueExprTypeLevelNullability != Nullability::Nonnull &&
!isARCNilInitializedLocal(C, S)) {
@@ -1102,9 +1147,8 @@ void NullabilityChecker::checkBind(SVal L, SVal V, const Stmt *S,
llvm::raw_svector_ostream OS(SBuf);
OS << (LocType->isObjCObjectPointerType() ? "nil" : "Null");
OS << " assigned to a pointer which is expected to have non-null value";
- reportBugIfInvariantHolds(OS.str(),
- ErrorKind::NilAssignedToNonnull, N, nullptr, C,
- ValueStmt);
+ reportBugIfInvariantHolds(OS.str(), ErrorKind::NilAssignedToNonnull,
+ CK_NullPassedToNonnull, N, nullptr, C, ValueStmt);
return;
}
@@ -1130,14 +1174,14 @@ void NullabilityChecker::checkBind(SVal L, SVal V, const Stmt *S,
if (RhsNullness == NullConstraint::IsNotNull ||
TrackedNullability->getValue() != Nullability::Nullable)
return;
- if (Filter.CheckNullablePassedToNonnull &&
+ if (ChecksEnabled[CK_NullablePassedToNonnull] &&
LocNullability == Nullability::Nonnull) {
static CheckerProgramPointTag Tag(this, "NullablePassedToNonnull");
ExplodedNode *N = C.addTransition(State, C.getPredecessor(), &Tag);
reportBugIfInvariantHolds("Nullable pointer is assigned to a pointer "
"which is expected to have non-null value",
- ErrorKind::NullableAssignedToNonnull, N,
- ValueRegion, C);
+ ErrorKind::NullableAssignedToNonnull,
+ CK_NullablePassedToNonnull, N, ValueRegion, C);
}
return;
}
@@ -1188,15 +1232,16 @@ void ento::registerNullabilityBase(CheckerManager &mgr) {
mgr.registerChecker<NullabilityChecker>();
}
-bool ento::shouldRegisterNullabilityBase(const LangOptions &LO) {
+bool ento::shouldRegisterNullabilityBase(const CheckerManager &mgr) {
return true;
}
#define REGISTER_CHECKER(name, trackingRequired) \
void ento::register##name##Checker(CheckerManager &mgr) { \
NullabilityChecker *checker = mgr.getChecker<NullabilityChecker>(); \
- checker->Filter.Check##name = true; \
- checker->Filter.CheckName##name = mgr.getCurrentCheckerName(); \
+ checker->ChecksEnabled[NullabilityChecker::CK_##name] = true; \
+ checker->CheckNames[NullabilityChecker::CK_##name] = \
+ mgr.getCurrentCheckerName(); \
checker->NeedTracking = checker->NeedTracking || trackingRequired; \
checker->NoDiagnoseCallsToSystemHeaders = \
checker->NoDiagnoseCallsToSystemHeaders || \
@@ -1204,7 +1249,7 @@ bool ento::shouldRegisterNullabilityBase(const LangOptions &LO) {
checker, "NoDiagnoseCallsToSystemHeaders", true); \
} \
\
- bool ento::shouldRegister##name##Checker(const LangOptions &LO) { \
+ bool ento::shouldRegister##name##Checker(const CheckerManager &mgr) { \
return true; \
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
index 1053424ae6fa..df01cc760e7e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
@@ -57,7 +57,7 @@ public:
Callback(const NumberObjectConversionChecker *C,
BugReporter &BR, AnalysisDeclContext *ADC)
: C(C), BR(BR), ADC(ADC) {}
- virtual void run(const MatchFinder::MatchResult &Result);
+ void run(const MatchFinder::MatchResult &Result) override;
};
} // end of anonymous namespace
@@ -338,7 +338,7 @@ void NumberObjectConversionChecker::checkASTCodeBody(const Decl *D,
MatchFinder F;
Callback CB(this, BR, AM.getAnalysisDeclContext(D));
- F.addMatcher(stmt(forEachDescendant(FinalM)), &CB);
+ F.addMatcher(traverse(TK_AsIs, stmt(forEachDescendant(FinalM))), &CB);
F.match(*D->getBody(), AM.getASTContext());
}
@@ -349,6 +349,6 @@ void ento::registerNumberObjectConversionChecker(CheckerManager &Mgr) {
Mgr.getAnalyzerOptions().getCheckerBooleanOption(Chk, "Pedantic");
}
-bool ento::shouldRegisterNumberObjectConversionChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNumberObjectConversionChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp
index 5b9895c338d8..53ed0e187a4c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp
@@ -55,8 +55,7 @@ static void emitDiagnostics(const BoundNodes &Nodes,
CE->getSourceRange());
}
-static auto hasTypePointingTo(DeclarationMatcher DeclM)
- -> decltype(hasType(pointerType())) {
+static decltype(auto) hasTypePointingTo(DeclarationMatcher DeclM) {
return hasType(pointerType(pointee(hasDeclaration(DeclM))));
}
@@ -85,6 +84,6 @@ void ento::registerOSObjectCStyleCast(CheckerManager &Mgr) {
Mgr.registerChecker<OSObjectCStyleCastChecker>();
}
-bool ento::shouldRegisterOSObjectCStyleCast(const LangOptions &LO) {
+bool ento::shouldRegisterOSObjectCStyleCast(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
index 0e25817c8793..43af4bb14286 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
@@ -91,6 +91,7 @@ void ento::registerObjCAtSyncChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCAtSyncChecker>();
}
-bool ento::shouldRegisterObjCAtSyncChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCAtSyncChecker(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
return LO.ObjC;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
index d2371fe60d21..7fd6e2abef4c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
@@ -30,6 +30,7 @@
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "llvm/ADT/Twine.h"
@@ -44,6 +45,7 @@ const char *ProblematicWriteBind = "problematicwrite";
const char *CapturedBind = "capturedbind";
const char *ParamBind = "parambind";
const char *IsMethodBind = "ismethodbind";
+const char *IsARPBind = "isautoreleasepoolbind";
class ObjCAutoreleaseWriteChecker : public Checker<check::ASTCodeBody> {
public:
@@ -100,8 +102,7 @@ static inline std::vector<llvm::StringRef> toRefs(std::vector<std::string> V) {
return std::vector<llvm::StringRef>(V.begin(), V.end());
}
-static auto callsNames(std::vector<std::string> FunctionNames)
- -> decltype(callee(functionDecl())) {
+static decltype(auto) callsNames(std::vector<std::string> FunctionNames) {
return callee(functionDecl(hasAnyName(toRefs(FunctionNames))));
}
@@ -129,21 +130,39 @@ static void emitDiagnostics(BoundNodes &Match, const Decl *D, BugReporter &BR,
SourceRange Range = MarkedStmt->getSourceRange();
PathDiagnosticLocation Location = PathDiagnosticLocation::createBegin(
MarkedStmt, BR.getSourceManager(), ADC);
+
bool IsMethod = Match.getNodeAs<ObjCMethodDecl>(IsMethodBind) != nullptr;
- const char *Name = IsMethod ? "method" : "function";
-
- BR.EmitBasicReport(
- ADC->getDecl(), Checker,
- /*Name=*/(llvm::Twine(ActionMsg)
- + " autoreleasing out parameter inside autorelease pool").str(),
- /*BugCategory=*/"Memory",
- (llvm::Twine(ActionMsg) + " autoreleasing out parameter " +
- (IsCapture ? "'" + PVD->getName() + "'" + " " : "") + "inside " +
- "autorelease pool that may exit before " + Name + " returns; consider "
- "writing first to a strong local variable declared outside of the block")
- .str(),
- Location,
- Range);
+ const char *FunctionDescription = IsMethod ? "method" : "function";
+ bool IsARP = Match.getNodeAs<ObjCAutoreleasePoolStmt>(IsARPBind) != nullptr;
+
+ llvm::SmallString<128> BugNameBuf;
+ llvm::raw_svector_ostream BugName(BugNameBuf);
+ BugName << ActionMsg
+ << " autoreleasing out parameter inside autorelease pool";
+
+ llvm::SmallString<128> BugMessageBuf;
+ llvm::raw_svector_ostream BugMessage(BugMessageBuf);
+ BugMessage << ActionMsg << " autoreleasing out parameter ";
+ if (IsCapture)
+ BugMessage << "'" + PVD->getName() + "' ";
+
+ BugMessage << "inside ";
+ if (IsARP)
+ BugMessage << "locally-scoped autorelease pool;";
+ else
+ BugMessage << "autorelease pool that may exit before "
+ << FunctionDescription << " returns;";
+
+ BugMessage << " consider writing first to a strong local variable"
+ " declared outside ";
+ if (IsARP)
+ BugMessage << "of the autorelease pool";
+ else
+ BugMessage << "of the block";
+
+ BR.EmitBasicReport(ADC->getDecl(), Checker, BugName.str(),
+ categories::MemoryRefCount, BugMessage.str(), Location,
+ Range);
}
void ObjCAutoreleaseWriteChecker::checkASTCodeBody(const Decl *D,
@@ -189,9 +208,16 @@ void ObjCAutoreleaseWriteChecker::checkASTCodeBody(const Decl *D,
WritesOrCapturesInBlockM))
));
- auto HasParamAndWritesInMarkedFuncM = allOf(
- hasAnyParameter(DoublePointerParamM),
- forEachDescendant(BlockPassedToMarkedFuncM));
+ // WritesIntoM happens inside an explicit @autoreleasepool.
+ auto WritesOrCapturesInPoolM =
+ autoreleasePoolStmt(
+ forEachDescendant(stmt(anyOf(WritesIntoM, CapturedInParamM))))
+ .bind(IsARPBind);
+
+ auto HasParamAndWritesInMarkedFuncM =
+ allOf(hasAnyParameter(DoublePointerParamM),
+ anyOf(forEachDescendant(BlockPassedToMarkedFuncM),
+ forEachDescendant(WritesOrCapturesInPoolM)));
auto MatcherM = decl(anyOf(
objcMethodDecl(HasParamAndWritesInMarkedFuncM).bind(IsMethodBind),
@@ -207,6 +233,6 @@ void ento::registerAutoreleaseWriteChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ObjCAutoreleaseWriteChecker>();
}
-bool ento::shouldRegisterAutoreleaseWriteChecker(const LangOptions &LO) {
+bool ento::shouldRegisterAutoreleaseWriteChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
index 4450c464f89d..8428b2294ba6 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
@@ -172,6 +172,6 @@ void ento::registerObjCContainersASTChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCContainersASTChecker>();
}
-bool ento::shouldRegisterObjCContainersASTChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCContainersASTChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
index 8abb926d4862..8c2008a7ceb4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
@@ -58,7 +58,7 @@ public:
PointerEscapeKind Kind) const;
void printState(raw_ostream &OS, ProgramStateRef State,
- const char *NL, const char *Sep) const;
+ const char *NL, const char *Sep) const override;
};
} // end anonymous namespace
@@ -188,6 +188,6 @@ void ento::registerObjCContainersChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCContainersChecker>();
}
-bool ento::shouldRegisterObjCContainersChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCContainersChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
index 1870c08432de..24e2a4dea922 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
@@ -221,7 +221,7 @@ void ento::registerObjCSuperCallChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ObjCSuperCallChecker>();
}
-bool ento::shouldRegisterObjCSuperCallChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCSuperCallChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp
index 9a49200545e3..4636fd160511 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp
@@ -79,6 +79,6 @@ void ento::registerObjCPropertyChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ObjCPropertyChecker>();
}
-bool ento::shouldRegisterObjCPropertyChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCPropertyChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
index 344285750f0e..17d3c042ac40 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -437,6 +437,6 @@ void ento::registerObjCSelfInitChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCSelfInitChecker>();
}
-bool ento::shouldRegisterObjCSelfInitChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCSelfInitChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
index 0575be845374..3547b7bb61a2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
@@ -116,13 +116,14 @@ void ObjCSuperDeallocChecker::checkPostObjCMessage(const ObjCMethodCall &M,
return;
ProgramStateRef State = C.getState();
- SymbolRef ReceiverSymbol = M.getSelfSVal().getAsSymbol();
- assert(ReceiverSymbol && "No receiver symbol at call to [super dealloc]?");
+ const LocationContext *LC = C.getLocationContext();
+ SymbolRef SelfSymbol = State->getSelfSVal(LC).getAsSymbol();
+ assert(SelfSymbol && "No receiver symbol at call to [super dealloc]?");
// We add this transition in checkPostObjCMessage to avoid warning when
// we inline a call to [super dealloc] where the inlined call itself
// calls [super dealloc].
- State = State->add<CalledSuperDealloc>(ReceiverSymbol);
+ State = State->add<CalledSuperDealloc>(SelfSymbol);
C.addTransition(State);
}
@@ -284,6 +285,6 @@ void ento::registerObjCSuperDeallocChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ObjCSuperDeallocChecker>();
}
-bool ento::shouldRegisterObjCSuperDeallocChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCSuperDeallocChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
index cb4770451572..c9828c36a06a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
@@ -186,6 +186,6 @@ void ento::registerObjCUnusedIvarsChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCUnusedIvarsChecker>();
}
-bool ento::shouldRegisterObjCUnusedIvarsChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCUnusedIvarsChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index 4a3c2b8cd40e..0b00664c7c10 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -353,6 +353,6 @@ void ento::registerPaddingChecker(CheckerManager &Mgr) {
Checker, "AllowedPad", "a non-negative value");
}
-bool ento::shouldRegisterPaddingChecker(const LangOptions &LO) {
+bool ento::shouldRegisterPaddingChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
index 259f23abdc95..d3e2849a0ce6 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -343,6 +343,6 @@ void ento::registerPointerArithChecker(CheckerManager &mgr) {
mgr.registerChecker<PointerArithChecker>();
}
-bool ento::shouldRegisterPointerArithChecker(const LangOptions &LO) {
+bool ento::shouldRegisterPointerArithChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerIterationChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerIterationChecker.cpp
index 307e59b8eebc..8aca6d009cdb 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerIterationChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerIterationChecker.cpp
@@ -95,6 +95,7 @@ void ento::registerPointerIterationChecker(CheckerManager &Mgr) {
Mgr.registerChecker<PointerIterationChecker>();
}
-bool ento::shouldRegisterPointerIterationChecker(const LangOptions &LO) {
+bool ento::shouldRegisterPointerIterationChecker(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
return LO.CPlusPlus;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSortingChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSortingChecker.cpp
index 586d9d3af2a6..25d87f4acfc9 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSortingChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSortingChecker.cpp
@@ -54,7 +54,7 @@ static void emitDiagnostics(const BoundNodes &Match, const Decl *D,
OS.str(), Location, Range);
}
-auto callsName(const char *FunctionName) -> decltype(callee(functionDecl())) {
+decltype(auto) callsName(const char *FunctionName) {
return callee(functionDecl(hasName(FunctionName)));
}
@@ -86,8 +86,9 @@ auto matchSortWithPointers() -> decltype(decl()) {
)))
))));
- auto PointerSortM = stmt(callExpr(allOf(SortFuncM, IteratesPointerEltsM))
- ).bind(WarnAtNode);
+ auto PointerSortM = traverse(
+ TK_AsIs,
+ stmt(callExpr(allOf(SortFuncM, IteratesPointerEltsM))).bind(WarnAtNode));
return decl(forEachDescendant(PointerSortM));
}
@@ -108,6 +109,7 @@ void ento::registerPointerSortingChecker(CheckerManager &Mgr) {
Mgr.registerChecker<PointerSortingChecker>();
}
-bool ento::shouldRegisterPointerSortingChecker(const LangOptions &LO) {
+bool ento::shouldRegisterPointerSortingChecker(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
return LO.CPlusPlus;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
index 88d0eb2ae748..81c19d9a0940 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
@@ -74,6 +74,6 @@ void ento::registerPointerSubChecker(CheckerManager &mgr) {
mgr.registerChecker<PointerSubChecker>();
}
-bool ento::shouldRegisterPointerSubChecker(const LangOptions &LO) {
+bool ento::shouldRegisterPointerSubChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
index 8649b8b96dd0..285d2da104f1 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -6,8 +6,14 @@
//
//===----------------------------------------------------------------------===//
//
-// This defines PthreadLockChecker, a simple lock -> unlock checker.
-// Also handles XNU locks, which behave similarly enough to share code.
+// This file defines:
+// * PthreadLockChecker, a simple lock -> unlock checker.
+// Which also checks for XNU locks, which behave similarly enough to share
+// code.
+// * FuchsiaLocksChecker, which is also rather similar.
+// * C11LockChecker which also closely follows Pthread semantics.
+//
+// TODO: Path notes.
//
//===----------------------------------------------------------------------===//
@@ -15,8 +21,8 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
using namespace clang;
using namespace ento;
@@ -46,9 +52,7 @@ public:
return LockState(UnlockedAndPossiblyDestroyed);
}
- bool operator==(const LockState &X) const {
- return K == X.K;
- }
+ bool operator==(const LockState &X) const { return K == X.K; }
bool isLocked() const { return K == Locked; }
bool isUnlocked() const { return K == Unlocked; }
@@ -60,40 +64,182 @@ public:
return K == UnlockedAndPossiblyDestroyed;
}
- void Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddInteger(K);
- }
+ void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddInteger(K); }
};
-class PthreadLockChecker
- : public Checker<check::PostStmt<CallExpr>, check::DeadSymbols> {
- mutable std::unique_ptr<BugType> BT_doublelock;
- mutable std::unique_ptr<BugType> BT_doubleunlock;
- mutable std::unique_ptr<BugType> BT_destroylock;
- mutable std::unique_ptr<BugType> BT_initlock;
- mutable std::unique_ptr<BugType> BT_lor;
- enum LockingSemantics {
- NotApplicable = 0,
- PthreadSemantics,
- XNUSemantics
- };
+class PthreadLockChecker : public Checker<check::PostCall, check::DeadSymbols,
+ check::RegionChanges> {
public:
- void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
- void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
- void printState(raw_ostream &Out, ProgramStateRef State,
- const char *NL, const char *Sep) const override;
+ enum LockingSemantics { NotApplicable = 0, PthreadSemantics, XNUSemantics };
+ enum CheckerKind {
+ CK_PthreadLockChecker,
+ CK_FuchsiaLockChecker,
+ CK_C11LockChecker,
+ CK_NumCheckKinds
+ };
+ DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ CheckerNameRef CheckNames[CK_NumCheckKinds];
+
+private:
+ typedef void (PthreadLockChecker::*FnCheck)(const CallEvent &Call,
+ CheckerContext &C,
+ CheckerKind checkkind) const;
+ CallDescriptionMap<FnCheck> PThreadCallbacks = {
+ // Init.
+ {{"pthread_mutex_init", 2}, &PthreadLockChecker::InitAnyLock},
+ // TODO: pthread_rwlock_init(2 arguments).
+ // TODO: lck_mtx_init(3 arguments).
+ // TODO: lck_mtx_alloc_init(2 arguments) => returns the mutex.
+ // TODO: lck_rw_init(3 arguments).
+ // TODO: lck_rw_alloc_init(2 arguments) => returns the mutex.
+
+ // Acquire.
+ {{"pthread_mutex_lock", 1}, &PthreadLockChecker::AcquirePthreadLock},
+ {{"pthread_rwlock_rdlock", 1}, &PthreadLockChecker::AcquirePthreadLock},
+ {{"pthread_rwlock_wrlock", 1}, &PthreadLockChecker::AcquirePthreadLock},
+ {{"lck_mtx_lock", 1}, &PthreadLockChecker::AcquireXNULock},
+ {{"lck_rw_lock_exclusive", 1}, &PthreadLockChecker::AcquireXNULock},
+ {{"lck_rw_lock_shared", 1}, &PthreadLockChecker::AcquireXNULock},
+
+ // Try.
+ {{"pthread_mutex_trylock", 1}, &PthreadLockChecker::TryPthreadLock},
+ {{"pthread_rwlock_tryrdlock", 1}, &PthreadLockChecker::TryPthreadLock},
+ {{"pthread_rwlock_trywrlock", 1}, &PthreadLockChecker::TryPthreadLock},
+ {{"lck_mtx_try_lock", 1}, &PthreadLockChecker::TryXNULock},
+ {{"lck_rw_try_lock_exclusive", 1}, &PthreadLockChecker::TryXNULock},
+ {{"lck_rw_try_lock_shared", 1}, &PthreadLockChecker::TryXNULock},
+
+ // Release.
+ {{"pthread_mutex_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{"pthread_rwlock_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{"lck_mtx_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{"lck_rw_unlock_exclusive", 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{"lck_rw_unlock_shared", 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{"lck_rw_done", 1}, &PthreadLockChecker::ReleaseAnyLock},
+
+ // Destroy.
+ {{"pthread_mutex_destroy", 1}, &PthreadLockChecker::DestroyPthreadLock},
+ {{"lck_mtx_destroy", 2}, &PthreadLockChecker::DestroyXNULock},
+ // TODO: pthread_rwlock_destroy(1 argument).
+ // TODO: lck_rw_destroy(2 arguments).
+ };
- void AcquireLock(CheckerContext &C, const CallExpr *CE, SVal lock,
- bool isTryLock, enum LockingSemantics semantics) const;
+ CallDescriptionMap<FnCheck> FuchsiaCallbacks = {
+ // Init.
+ {{"spin_lock_init", 1}, &PthreadLockChecker::InitAnyLock},
+
+ // Acquire.
+ {{"spin_lock", 1}, &PthreadLockChecker::AcquirePthreadLock},
+ {{"spin_lock_save", 3}, &PthreadLockChecker::AcquirePthreadLock},
+ {{"sync_mutex_lock", 1}, &PthreadLockChecker::AcquirePthreadLock},
+ {{"sync_mutex_lock_with_waiter", 1},
+ &PthreadLockChecker::AcquirePthreadLock},
+
+ // Try.
+ {{"spin_trylock", 1}, &PthreadLockChecker::TryFuchsiaLock},
+ {{"sync_mutex_trylock", 1}, &PthreadLockChecker::TryFuchsiaLock},
+ {{"sync_mutex_timedlock", 2}, &PthreadLockChecker::TryFuchsiaLock},
+
+ // Release.
+ {{"spin_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{"spin_unlock_restore", 3}, &PthreadLockChecker::ReleaseAnyLock},
+ {{"sync_mutex_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
+ };
+
+ CallDescriptionMap<FnCheck> C11Callbacks = {
+ // Init.
+ {{"mtx_init", 2}, &PthreadLockChecker::InitAnyLock},
+
+ // Acquire.
+ {{"mtx_lock", 1}, &PthreadLockChecker::AcquirePthreadLock},
+
+ // Try.
+ {{"mtx_trylock", 1}, &PthreadLockChecker::TryC11Lock},
+ {{"mtx_timedlock", 2}, &PthreadLockChecker::TryC11Lock},
+
+ // Release.
+ {{"mtx_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
+
+ // Destroy
+ {{"mtx_destroy", 1}, &PthreadLockChecker::DestroyPthreadLock},
+ };
- void ReleaseLock(CheckerContext &C, const CallExpr *CE, SVal lock) const;
- void DestroyLock(CheckerContext &C, const CallExpr *CE, SVal Lock,
- enum LockingSemantics semantics) const;
- void InitLock(CheckerContext &C, const CallExpr *CE, SVal Lock) const;
- void reportUseDestroyedBug(CheckerContext &C, const CallExpr *CE) const;
ProgramStateRef resolvePossiblyDestroyedMutex(ProgramStateRef state,
const MemRegion *lockR,
const SymbolRef *sym) const;
+ void reportUseDestroyedBug(const CallEvent &Call, CheckerContext &C,
+ unsigned ArgNo, CheckerKind checkKind) const;
+
+ // Init.
+ void InitAnyLock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void InitLockAux(const CallEvent &Call, CheckerContext &C, unsigned ArgNo,
+ SVal Lock, CheckerKind checkkind) const;
+
+ // Lock, Try-lock.
+ void AcquirePthreadLock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void AcquireXNULock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void TryPthreadLock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void TryXNULock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void TryFuchsiaLock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void TryC11Lock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void AcquireLockAux(const CallEvent &Call, CheckerContext &C, unsigned ArgNo,
+ SVal lock, bool isTryLock, LockingSemantics semantics,
+ CheckerKind checkkind) const;
+
+ // Release.
+ void ReleaseAnyLock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void ReleaseLockAux(const CallEvent &Call, CheckerContext &C, unsigned ArgNo,
+ SVal lock, CheckerKind checkkind) const;
+
+ // Destroy.
+ void DestroyPthreadLock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void DestroyXNULock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void DestroyLockAux(const CallEvent &Call, CheckerContext &C, unsigned ArgNo,
+ SVal Lock, LockingSemantics semantics,
+ CheckerKind checkkind) const;
+
+public:
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef State, const InvalidatedSymbols *Symbols,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const LocationContext *LCtx, const CallEvent *Call) const;
+ void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
+ const char *Sep) const override;
+
+private:
+ mutable std::unique_ptr<BugType> BT_doublelock[CK_NumCheckKinds];
+ mutable std::unique_ptr<BugType> BT_doubleunlock[CK_NumCheckKinds];
+ mutable std::unique_ptr<BugType> BT_destroylock[CK_NumCheckKinds];
+ mutable std::unique_ptr<BugType> BT_initlock[CK_NumCheckKinds];
+ mutable std::unique_ptr<BugType> BT_lor[CK_NumCheckKinds];
+
+ void initBugType(CheckerKind checkKind) const {
+ if (BT_doublelock[checkKind])
+ return;
+ BT_doublelock[checkKind].reset(
+ new BugType{CheckNames[checkKind], "Double locking", "Lock checker"});
+ BT_doubleunlock[checkKind].reset(
+ new BugType{CheckNames[checkKind], "Double unlocking", "Lock checker"});
+ BT_destroylock[checkKind].reset(new BugType{
+ CheckNames[checkKind], "Use destroyed lock", "Lock checker"});
+ BT_initlock[checkKind].reset(new BugType{
+ CheckNames[checkKind], "Init invalid lock", "Lock checker"});
+ BT_lor[checkKind].reset(new BugType{CheckNames[checkKind],
+ "Lock order reversal", "Lock checker"});
+ }
};
} // end anonymous namespace
@@ -106,43 +252,23 @@ REGISTER_MAP_WITH_PROGRAMSTATE(LockMap, const MemRegion *, LockState)
// Return values for unresolved calls to pthread_mutex_destroy().
REGISTER_MAP_WITH_PROGRAMSTATE(DestroyRetVal, const MemRegion *, SymbolRef)
-void PthreadLockChecker::checkPostStmt(const CallExpr *CE,
+void PthreadLockChecker::checkPostCall(const CallEvent &Call,
CheckerContext &C) const {
- StringRef FName = C.getCalleeName(CE);
- if (FName.empty())
+ // An additional umbrella check that all functions modeled by this checker
+ // are global C functions.
+ // TODO: Maybe make this the default behavior of CallDescription
+ // with exactly one identifier?
+ // FIXME: Try to handle cases when the implementation was inlined rather
+ // than just giving up.
+ if (!Call.isGlobalCFunction() || C.wasInlined)
return;
- if (CE->getNumArgs() != 1 && CE->getNumArgs() != 2)
- return;
-
- if (FName == "pthread_mutex_lock" ||
- FName == "pthread_rwlock_rdlock" ||
- FName == "pthread_rwlock_wrlock")
- AcquireLock(C, CE, C.getSVal(CE->getArg(0)), false, PthreadSemantics);
- else if (FName == "lck_mtx_lock" ||
- FName == "lck_rw_lock_exclusive" ||
- FName == "lck_rw_lock_shared")
- AcquireLock(C, CE, C.getSVal(CE->getArg(0)), false, XNUSemantics);
- else if (FName == "pthread_mutex_trylock" ||
- FName == "pthread_rwlock_tryrdlock" ||
- FName == "pthread_rwlock_trywrlock")
- AcquireLock(C, CE, C.getSVal(CE->getArg(0)),
- true, PthreadSemantics);
- else if (FName == "lck_mtx_try_lock" ||
- FName == "lck_rw_try_lock_exclusive" ||
- FName == "lck_rw_try_lock_shared")
- AcquireLock(C, CE, C.getSVal(CE->getArg(0)), true, XNUSemantics);
- else if (FName == "pthread_mutex_unlock" ||
- FName == "pthread_rwlock_unlock" ||
- FName == "lck_mtx_unlock" ||
- FName == "lck_rw_done")
- ReleaseLock(C, CE, C.getSVal(CE->getArg(0)));
- else if (FName == "pthread_mutex_destroy")
- DestroyLock(C, CE, C.getSVal(CE->getArg(0)), PthreadSemantics);
- else if (FName == "lck_mtx_destroy")
- DestroyLock(C, CE, C.getSVal(CE->getArg(0)), XNUSemantics);
- else if (FName == "pthread_mutex_init")
- InitLock(C, CE, C.getSVal(CE->getArg(0)));
+ if (const FnCheck *Callback = PThreadCallbacks.lookup(Call))
+ (this->**Callback)(Call, C, CK_PthreadLockChecker);
+ else if (const FnCheck *Callback = FuchsiaCallbacks.lookup(Call))
+ (this->**Callback)(Call, C, CK_FuchsiaLockChecker);
+ else if (const FnCheck *Callback = C11Callbacks.lookup(Call))
+ (this->**Callback)(Call, C, CK_C11LockChecker);
}
// When a lock is destroyed, in some semantics(like PthreadSemantics) we are not
@@ -204,7 +330,7 @@ void PthreadLockChecker::printState(raw_ostream &Out, ProgramStateRef State,
LockSetTy LS = State->get<LockSet>();
if (!LS.isEmpty()) {
Out << Sep << "Mutex lock order:" << NL;
- for (auto I: LS) {
+ for (auto I : LS) {
I->dumpToStream(Out);
Out << NL;
}
@@ -213,9 +339,53 @@ void PthreadLockChecker::printState(raw_ostream &Out, ProgramStateRef State,
// TODO: Dump destroyed mutex symbols?
}
-void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
- SVal lock, bool isTryLock,
- enum LockingSemantics semantics) const {
+void PthreadLockChecker::AcquirePthreadLock(const CallEvent &Call,
+ CheckerContext &C,
+ CheckerKind checkKind) const {
+ AcquireLockAux(Call, C, 0, Call.getArgSVal(0), false, PthreadSemantics,
+ checkKind);
+}
+
+void PthreadLockChecker::AcquireXNULock(const CallEvent &Call,
+ CheckerContext &C,
+ CheckerKind checkKind) const {
+ AcquireLockAux(Call, C, 0, Call.getArgSVal(0), false, XNUSemantics,
+ checkKind);
+}
+
+void PthreadLockChecker::TryPthreadLock(const CallEvent &Call,
+ CheckerContext &C,
+ CheckerKind checkKind) const {
+ AcquireLockAux(Call, C, 0, Call.getArgSVal(0), true, PthreadSemantics,
+ checkKind);
+}
+
+void PthreadLockChecker::TryXNULock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkKind) const {
+ AcquireLockAux(Call, C, 0, Call.getArgSVal(0), true, PthreadSemantics,
+ checkKind);
+}
+
+void PthreadLockChecker::TryFuchsiaLock(const CallEvent &Call,
+ CheckerContext &C,
+ CheckerKind checkKind) const {
+ AcquireLockAux(Call, C, 0, Call.getArgSVal(0), true, PthreadSemantics,
+ checkKind);
+}
+
+void PthreadLockChecker::TryC11Lock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkKind) const {
+ AcquireLockAux(Call, C, 0, Call.getArgSVal(0), true, PthreadSemantics,
+ checkKind);
+}
+
+void PthreadLockChecker::AcquireLockAux(const CallEvent &Call,
+ CheckerContext &C, unsigned ArgNo,
+ SVal lock, bool isTryLock,
+ enum LockingSemantics semantics,
+ CheckerKind checkKind) const {
+ if (!ChecksEnabled[checkKind])
+ return;
const MemRegion *lockR = lock.getAsRegion();
if (!lockR)
@@ -226,27 +396,19 @@ void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
if (sym)
state = resolvePossiblyDestroyedMutex(state, lockR, sym);
- SVal X = C.getSVal(CE);
- if (X.isUnknownOrUndef())
- return;
-
- DefinedSVal retVal = X.castAs<DefinedSVal>();
-
if (const LockState *LState = state->get<LockMap>(lockR)) {
if (LState->isLocked()) {
- if (!BT_doublelock)
- BT_doublelock.reset(new BugType(this, "Double locking",
- "Lock checker"));
ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
+ initBugType(checkKind);
auto report = std::make_unique<PathSensitiveBugReport>(
- *BT_doublelock, "This lock has already been acquired", N);
- report->addRange(CE->getArg(0)->getSourceRange());
+ *BT_doublelock[checkKind], "This lock has already been acquired", N);
+ report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
C.emitReport(std::move(report));
return;
} else if (LState->isDestroyed()) {
- reportUseDestroyedBug(C, CE);
+ reportUseDestroyedBug(Call, C, ArgNo, checkKind);
return;
}
}
@@ -254,25 +416,35 @@ void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
ProgramStateRef lockSucc = state;
if (isTryLock) {
// Bifurcate the state, and allow a mode where the lock acquisition fails.
- ProgramStateRef lockFail;
- switch (semantics) {
- case PthreadSemantics:
- std::tie(lockFail, lockSucc) = state->assume(retVal);
- break;
- case XNUSemantics:
- std::tie(lockSucc, lockFail) = state->assume(retVal);
- break;
- default:
- llvm_unreachable("Unknown tryLock locking semantics");
+ SVal RetVal = Call.getReturnValue();
+ if (auto DefinedRetVal = RetVal.getAs<DefinedSVal>()) {
+ ProgramStateRef lockFail;
+ switch (semantics) {
+ case PthreadSemantics:
+ std::tie(lockFail, lockSucc) = state->assume(*DefinedRetVal);
+ break;
+ case XNUSemantics:
+ std::tie(lockSucc, lockFail) = state->assume(*DefinedRetVal);
+ break;
+ default:
+ llvm_unreachable("Unknown tryLock locking semantics");
+ }
+ assert(lockFail && lockSucc);
+ C.addTransition(lockFail);
}
- assert(lockFail && lockSucc);
- C.addTransition(lockFail);
-
+ // We might want to handle the case when the mutex lock function was inlined
+ // and returned an Unknown or Undefined value.
} else if (semantics == PthreadSemantics) {
// Assume that the return value was 0.
- lockSucc = state->assume(retVal, false);
- assert(lockSucc);
-
+ SVal RetVal = Call.getReturnValue();
+ if (auto DefinedRetVal = RetVal.getAs<DefinedSVal>()) {
+ // FIXME: If the lock function was inlined and returned true,
+ // we need to behave sanely - at least generate sink.
+ lockSucc = state->assume(*DefinedRetVal, false);
+ assert(lockSucc);
+ }
+ // We might want to handle the case when the mutex lock function was inlined
+ // and returned an Unknown or Undefined value.
} else {
// XNU locking semantics return void on non-try locks
assert((semantics == XNUSemantics) && "Unknown locking semantics");
@@ -285,8 +457,18 @@ void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
C.addTransition(lockSucc);
}
-void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE,
- SVal lock) const {
+void PthreadLockChecker::ReleaseAnyLock(const CallEvent &Call,
+ CheckerContext &C,
+ CheckerKind checkKind) const {
+ ReleaseLockAux(Call, C, 0, Call.getArgSVal(0), checkKind);
+}
+
+void PthreadLockChecker::ReleaseLockAux(const CallEvent &Call,
+ CheckerContext &C, unsigned ArgNo,
+ SVal lock,
+ CheckerKind checkKind) const {
+ if (!ChecksEnabled[checkKind])
+ return;
const MemRegion *lockR = lock.getAsRegion();
if (!lockR)
@@ -299,39 +481,37 @@ void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE,
if (const LockState *LState = state->get<LockMap>(lockR)) {
if (LState->isUnlocked()) {
- if (!BT_doubleunlock)
- BT_doubleunlock.reset(new BugType(this, "Double unlocking",
- "Lock checker"));
ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
+ initBugType(checkKind);
auto Report = std::make_unique<PathSensitiveBugReport>(
- *BT_doubleunlock, "This lock has already been unlocked", N);
- Report->addRange(CE->getArg(0)->getSourceRange());
+ *BT_doubleunlock[checkKind], "This lock has already been unlocked",
+ N);
+ Report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
C.emitReport(std::move(Report));
return;
} else if (LState->isDestroyed()) {
- reportUseDestroyedBug(C, CE);
+ reportUseDestroyedBug(Call, C, ArgNo, checkKind);
return;
}
}
LockSetTy LS = state->get<LockSet>();
- // FIXME: Better analysis requires IPA for wrappers.
-
if (!LS.isEmpty()) {
const MemRegion *firstLockR = LS.getHead();
if (firstLockR != lockR) {
- if (!BT_lor)
- BT_lor.reset(new BugType(this, "Lock order reversal", "Lock checker"));
ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
+ initBugType(checkKind);
auto report = std::make_unique<PathSensitiveBugReport>(
- *BT_lor, "This was not the most recently acquired lock. Possible "
- "lock order reversal", N);
- report->addRange(CE->getArg(0)->getSourceRange());
+ *BT_lor[checkKind],
+ "This was not the most recently acquired lock. Possible "
+ "lock order reversal",
+ N);
+ report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
C.emitReport(std::move(report));
return;
}
@@ -343,9 +523,25 @@ void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE,
C.addTransition(state);
}
-void PthreadLockChecker::DestroyLock(CheckerContext &C, const CallExpr *CE,
- SVal Lock,
- enum LockingSemantics semantics) const {
+void PthreadLockChecker::DestroyPthreadLock(const CallEvent &Call,
+ CheckerContext &C,
+ CheckerKind checkKind) const {
+ DestroyLockAux(Call, C, 0, Call.getArgSVal(0), PthreadSemantics, checkKind);
+}
+
+void PthreadLockChecker::DestroyXNULock(const CallEvent &Call,
+ CheckerContext &C,
+ CheckerKind checkKind) const {
+ DestroyLockAux(Call, C, 0, Call.getArgSVal(0), XNUSemantics, checkKind);
+}
+
+void PthreadLockChecker::DestroyLockAux(const CallEvent &Call,
+ CheckerContext &C, unsigned ArgNo,
+ SVal Lock,
+ enum LockingSemantics semantics,
+ CheckerKind checkKind) const {
+ if (!ChecksEnabled[checkKind])
+ return;
const MemRegion *LockR = Lock.getAsRegion();
if (!LockR)
@@ -362,7 +558,7 @@ void PthreadLockChecker::DestroyLock(CheckerContext &C, const CallExpr *CE,
// PthreadSemantics
if (semantics == PthreadSemantics) {
if (!LState || LState->isUnlocked()) {
- SymbolRef sym = C.getSVal(CE).getAsSymbol();
+ SymbolRef sym = Call.getReturnValue().getAsSymbol();
if (!sym) {
State = State->remove<LockMap>(LockR);
C.addTransition(State);
@@ -393,20 +589,26 @@ void PthreadLockChecker::DestroyLock(CheckerContext &C, const CallExpr *CE,
Message = "This lock has already been destroyed";
}
- if (!BT_destroylock)
- BT_destroylock.reset(new BugType(this, "Destroy invalid lock",
- "Lock checker"));
ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
- auto Report =
- std::make_unique<PathSensitiveBugReport>(*BT_destroylock, Message, N);
- Report->addRange(CE->getArg(0)->getSourceRange());
+ initBugType(checkKind);
+ auto Report = std::make_unique<PathSensitiveBugReport>(
+ *BT_destroylock[checkKind], Message, N);
+ Report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
C.emitReport(std::move(Report));
}
-void PthreadLockChecker::InitLock(CheckerContext &C, const CallExpr *CE,
- SVal Lock) const {
+void PthreadLockChecker::InitAnyLock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkKind) const {
+ InitLockAux(Call, C, 0, Call.getArgSVal(0), checkKind);
+}
+
+void PthreadLockChecker::InitLockAux(const CallEvent &Call, CheckerContext &C,
+ unsigned ArgNo, SVal Lock,
+ CheckerKind checkKind) const {
+ if (!ChecksEnabled[checkKind])
+ return;
const MemRegion *LockR = Lock.getAsRegion();
if (!LockR)
@@ -433,29 +635,27 @@ void PthreadLockChecker::InitLock(CheckerContext &C, const CallExpr *CE,
Message = "This lock has already been initialized";
}
- if (!BT_initlock)
- BT_initlock.reset(new BugType(this, "Init invalid lock",
- "Lock checker"));
ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
- auto Report =
- std::make_unique<PathSensitiveBugReport>(*BT_initlock, Message, N);
- Report->addRange(CE->getArg(0)->getSourceRange());
+ initBugType(checkKind);
+ auto Report = std::make_unique<PathSensitiveBugReport>(
+ *BT_initlock[checkKind], Message, N);
+ Report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
C.emitReport(std::move(Report));
}
-void PthreadLockChecker::reportUseDestroyedBug(CheckerContext &C,
- const CallExpr *CE) const {
- if (!BT_destroylock)
- BT_destroylock.reset(new BugType(this, "Use destroyed lock",
- "Lock checker"));
+void PthreadLockChecker::reportUseDestroyedBug(const CallEvent &Call,
+ CheckerContext &C,
+ unsigned ArgNo,
+ CheckerKind checkKind) const {
ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
+ initBugType(checkKind);
auto Report = std::make_unique<PathSensitiveBugReport>(
- *BT_destroylock, "This lock has already been destroyed", N);
- Report->addRange(CE->getArg(0)->getSourceRange());
+ *BT_destroylock[checkKind], "This lock has already been destroyed", N);
+ Report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
C.emitReport(std::move(Report));
}
@@ -463,26 +663,80 @@ void PthreadLockChecker::checkDeadSymbols(SymbolReaper &SymReaper,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- // TODO: Clean LockMap when a mutex region dies.
-
- DestroyRetValTy TrackedSymbols = State->get<DestroyRetVal>();
- for (DestroyRetValTy::iterator I = TrackedSymbols.begin(),
- E = TrackedSymbols.end();
- I != E; ++I) {
- const SymbolRef Sym = I->second;
- const MemRegion *lockR = I->first;
- bool IsSymDead = SymReaper.isDead(Sym);
- // Remove the dead symbol from the return value symbols map.
- if (IsSymDead)
- State = resolvePossiblyDestroyedMutex(State, lockR, &Sym);
+ for (auto I : State->get<DestroyRetVal>()) {
+ // Once the return value symbol dies, no more checks can be performed
+ // against it. See if the return value was checked before this point.
+ // This would remove the symbol from the map as well.
+ if (SymReaper.isDead(I.second))
+ State = resolvePossiblyDestroyedMutex(State, I.first, &I.second);
+ }
+
+ for (auto I : State->get<LockMap>()) {
+ // Stop tracking dead mutex regions as well.
+ if (!SymReaper.isLiveRegion(I.first))
+ State = State->remove<LockMap>(I.first);
}
+
+ // TODO: We probably need to clean up the lock stack as well.
+ // It is tricky though: even if the mutex cannot be unlocked anymore,
+ // it can still participate in lock order reversal resolution.
+
C.addTransition(State);
}
-void ento::registerPthreadLockChecker(CheckerManager &mgr) {
- mgr.registerChecker<PthreadLockChecker>();
+ProgramStateRef PthreadLockChecker::checkRegionChanges(
+ ProgramStateRef State, const InvalidatedSymbols *Symbols,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions, const LocationContext *LCtx,
+ const CallEvent *Call) const {
+
+ bool IsLibraryFunction = false;
+ if (Call && Call->isGlobalCFunction()) {
+ // Avoid invalidating mutex state when a known supported function is called.
+ if (PThreadCallbacks.lookup(*Call) || FuchsiaCallbacks.lookup(*Call) ||
+ C11Callbacks.lookup(*Call))
+ return State;
+
+ if (Call->isInSystemHeader())
+ IsLibraryFunction = true;
+ }
+
+ for (auto R : Regions) {
+ // We assume that system library function wouldn't touch the mutex unless
+ // it takes the mutex explicitly as an argument.
+ // FIXME: This is a bit quadratic.
+ if (IsLibraryFunction &&
+ std::find(ExplicitRegions.begin(), ExplicitRegions.end(), R) ==
+ ExplicitRegions.end())
+ continue;
+
+ State = State->remove<LockMap>(R);
+ State = State->remove<DestroyRetVal>(R);
+
+ // TODO: We need to invalidate the lock stack as well. This is tricky
+ // to implement correctly and efficiently though, because the effects
+ // of mutex escapes on lock order may be fairly varied.
+ }
+
+ return State;
}
-bool ento::shouldRegisterPthreadLockChecker(const LangOptions &LO) {
- return true;
+void ento::registerPthreadLockBase(CheckerManager &mgr) {
+ mgr.registerChecker<PthreadLockChecker>();
}
+
+bool ento::shouldRegisterPthreadLockBase(const CheckerManager &mgr) { return true; }
+
+#define REGISTER_CHECKER(name) \
+ void ento::register##name(CheckerManager &mgr) { \
+ PthreadLockChecker *checker = mgr.getChecker<PthreadLockChecker>(); \
+ checker->ChecksEnabled[PthreadLockChecker::CK_##name] = true; \
+ checker->CheckNames[PthreadLockChecker::CK_##name] = \
+ mgr.getCurrentCheckerName(); \
+ } \
+ \
+ bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
+
+REGISTER_CHECKER(PthreadLockChecker)
+REGISTER_CHECKER(FuchsiaLockChecker)
+REGISTER_CHECKER(C11LockChecker)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
index 6f8cb1432bb1..3f3267ff9391 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
@@ -12,12 +12,12 @@
//===----------------------------------------------------------------------===//
#include "RetainCountChecker.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
using namespace clang;
using namespace ento;
using namespace retaincountchecker;
-using llvm::StrInStrNoCase;
REGISTER_MAP_WITH_PROGRAMSTATE(RefBindings, SymbolRef, RefVal)
@@ -701,7 +701,7 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
for (ProgramStateRef St : Out) {
if (DeallocSent) {
- C.addTransition(St, C.getPredecessor(), &DeallocSentTag);
+ C.addTransition(St, C.getPredecessor(), &getDeallocSentTag());
} else {
C.addTransition(St);
}
@@ -844,13 +844,13 @@ RetainCountChecker::errorKindToBugKind(RefVal::Kind ErrorKind,
SymbolRef Sym) const {
switch (ErrorKind) {
case RefVal::ErrorUseAfterRelease:
- return useAfterRelease;
+ return *UseAfterRelease;
case RefVal::ErrorReleaseNotOwned:
- return releaseNotOwned;
+ return *ReleaseNotOwned;
case RefVal::ErrorDeallocNotOwned:
if (Sym->getType()->getPointeeCXXRecordDecl())
- return freeNotOwned;
- return deallocNotOwned;
+ return *FreeNotOwned;
+ return *DeallocNotOwned;
default:
llvm_unreachable("Unhandled error.");
}
@@ -946,7 +946,7 @@ bool RetainCountChecker::evalCall(const CallEvent &Call,
// Assume that output is zero on the other branch.
NullOutputState = NullOutputState->BindExpr(
CE, LCtx, C.getSValBuilder().makeNull(), /*Invalidate=*/false);
- C.addTransition(NullOutputState, &CastFailTag);
+ C.addTransition(NullOutputState, &getCastFailTag());
// And on the original branch assume that both input and
// output are non-zero.
@@ -1095,7 +1095,7 @@ ExplodedNode * RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
if (N) {
const LangOptions &LOpts = C.getASTContext().getLangOpts();
auto R =
- std::make_unique<RefLeakReport>(leakAtReturn, LOpts, N, Sym, C);
+ std::make_unique<RefLeakReport>(*LeakAtReturn, LOpts, N, Sym, C);
C.emitReport(std::move(R));
}
return N;
@@ -1120,7 +1120,7 @@ ExplodedNode * RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
ExplodedNode *N = C.addTransition(state, Pred, &ReturnNotOwnedTag);
if (N) {
auto R = std::make_unique<RefCountReport>(
- returnNotOwnedForOwned, C.getASTContext().getLangOpts(), N, Sym);
+ *ReturnNotOwnedForOwned, C.getASTContext().getLangOpts(), N, Sym);
C.emitReport(std::move(R));
}
return N;
@@ -1273,8 +1273,8 @@ RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
os << "has a +" << V.getCount() << " retain count";
const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
- auto R = std::make_unique<RefCountReport>(overAutorelease, LOpts, N, Sym,
- os.str());
+ auto R = std::make_unique<RefCountReport>(*OverAutorelease, LOpts, N, Sym,
+ os.str());
Ctx.emitReport(std::move(R));
}
@@ -1320,7 +1320,7 @@ RetainCountChecker::processLeaks(ProgramStateRef state,
if (N) {
for (SymbolRef L : Leaked) {
- const RefCountBug &BT = Pred ? leakWithinFunction : leakAtReturn;
+ const RefCountBug &BT = Pred ? *LeakWithinFunction : *LeakAtReturn;
Ctx.emitReport(std::make_unique<RefLeakReport>(BT, LOpts, N, L, Ctx));
}
}
@@ -1473,48 +1473,73 @@ void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State,
// Checker registration.
//===----------------------------------------------------------------------===//
+std::unique_ptr<CheckerProgramPointTag> RetainCountChecker::DeallocSentTag;
+std::unique_ptr<CheckerProgramPointTag> RetainCountChecker::CastFailTag;
+
void ento::registerRetainCountBase(CheckerManager &Mgr) {
- Mgr.registerChecker<RetainCountChecker>();
+ auto *Chk = Mgr.registerChecker<RetainCountChecker>();
+ Chk->DeallocSentTag =
+ std::make_unique<CheckerProgramPointTag>(Chk, "DeallocSent");
+ Chk->CastFailTag =
+ std::make_unique<CheckerProgramPointTag>(Chk, "DynamicCastFail");
}
-bool ento::shouldRegisterRetainCountBase(const LangOptions &LO) {
+bool ento::shouldRegisterRetainCountBase(const CheckerManager &mgr) {
return true;
}
-
-// FIXME: remove this, hack for backwards compatibility:
-// it should be possible to enable the NS/CF retain count checker as
-// osx.cocoa.RetainCount, and it should be possible to disable
-// osx.OSObjectRetainCount using osx.cocoa.RetainCount:CheckOSObject=false.
-static bool getOption(AnalyzerOptions &Options,
- StringRef Postfix,
- StringRef Value) {
- auto I = Options.Config.find(
- (StringRef("osx.cocoa.RetainCount:") + Postfix).str());
- if (I != Options.Config.end())
- return I->getValue() == Value;
- return false;
-}
-
void ento::registerRetainCountChecker(CheckerManager &Mgr) {
auto *Chk = Mgr.getChecker<RetainCountChecker>();
Chk->TrackObjCAndCFObjects = true;
- Chk->TrackNSCFStartParam = getOption(Mgr.getAnalyzerOptions(),
- "TrackNSCFStartParam",
- "true");
+ Chk->TrackNSCFStartParam = Mgr.getAnalyzerOptions().getCheckerBooleanOption(
+ Mgr.getCurrentCheckerName(), "TrackNSCFStartParam");
+
+#define INIT_BUGTYPE(KIND) \
+ Chk->KIND = std::make_unique<RefCountBug>(Mgr.getCurrentCheckerName(), \
+ RefCountBug::KIND);
+ // TODO: Ideally, we should have a checker for each of these bug types.
+ INIT_BUGTYPE(UseAfterRelease)
+ INIT_BUGTYPE(ReleaseNotOwned)
+ INIT_BUGTYPE(DeallocNotOwned)
+ INIT_BUGTYPE(FreeNotOwned)
+ INIT_BUGTYPE(OverAutorelease)
+ INIT_BUGTYPE(ReturnNotOwnedForOwned)
+ INIT_BUGTYPE(LeakWithinFunction)
+ INIT_BUGTYPE(LeakAtReturn)
+#undef INIT_BUGTYPE
}
-bool ento::shouldRegisterRetainCountChecker(const LangOptions &LO) {
+bool ento::shouldRegisterRetainCountChecker(const CheckerManager &mgr) {
return true;
}
void ento::registerOSObjectRetainCountChecker(CheckerManager &Mgr) {
auto *Chk = Mgr.getChecker<RetainCountChecker>();
- if (!getOption(Mgr.getAnalyzerOptions(),
- "CheckOSObject",
- "false"))
- Chk->TrackOSObjects = true;
+ Chk->TrackOSObjects = true;
+
+ // FIXME: We want bug reports to always have the same checker name associated
+ // with them, yet here, if RetainCountChecker is disabled but
+ // OSObjectRetainCountChecker is enabled, the checker names will be different.
+ // This hack will make it so that the checker name depends on which checker is
+ // enabled rather than on the registration order.
+ // For the most part, we want **non-hidden checkers** to be associated with
+ // diagnostics, and **hidden checker options** with the fine-tuning of
+ // modeling. Following this logic, OSObjectRetainCountChecker should be the
+ // latter, but we can't just remove it for backward compatibility reasons.
+#define LAZY_INIT_BUGTYPE(KIND) \
+ if (!Chk->KIND) \
+ Chk->KIND = std::make_unique<RefCountBug>(Mgr.getCurrentCheckerName(), \
+ RefCountBug::KIND);
+ LAZY_INIT_BUGTYPE(UseAfterRelease)
+ LAZY_INIT_BUGTYPE(ReleaseNotOwned)
+ LAZY_INIT_BUGTYPE(DeallocNotOwned)
+ LAZY_INIT_BUGTYPE(FreeNotOwned)
+ LAZY_INIT_BUGTYPE(OverAutorelease)
+ LAZY_INIT_BUGTYPE(ReturnNotOwnedForOwned)
+ LAZY_INIT_BUGTYPE(LeakWithinFunction)
+ LAZY_INIT_BUGTYPE(LeakAtReturn)
+#undef LAZY_INIT_BUGTYPE
}
-bool ento::shouldRegisterOSObjectRetainCountChecker(const LangOptions &LO) {
+bool ento::shouldRegisterOSObjectRetainCountChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
index dd79bbef321c..223e28c2c5b8 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
@@ -251,20 +251,20 @@ class RetainCountChecker
eval::Assume,
eval::Call > {
- RefCountBug useAfterRelease{this, RefCountBug::UseAfterRelease};
- RefCountBug releaseNotOwned{this, RefCountBug::ReleaseNotOwned};
- RefCountBug deallocNotOwned{this, RefCountBug::DeallocNotOwned};
- RefCountBug freeNotOwned{this, RefCountBug::FreeNotOwned};
- RefCountBug overAutorelease{this, RefCountBug::OverAutorelease};
- RefCountBug returnNotOwnedForOwned{this, RefCountBug::ReturnNotOwnedForOwned};
- RefCountBug leakWithinFunction{this, RefCountBug::LeakWithinFunction};
- RefCountBug leakAtReturn{this, RefCountBug::LeakAtReturn};
-
- CheckerProgramPointTag DeallocSentTag{this, "DeallocSent"};
- CheckerProgramPointTag CastFailTag{this, "DynamicCastFail"};
+public:
+ std::unique_ptr<RefCountBug> UseAfterRelease;
+ std::unique_ptr<RefCountBug> ReleaseNotOwned;
+ std::unique_ptr<RefCountBug> DeallocNotOwned;
+ std::unique_ptr<RefCountBug> FreeNotOwned;
+ std::unique_ptr<RefCountBug> OverAutorelease;
+ std::unique_ptr<RefCountBug> ReturnNotOwnedForOwned;
+ std::unique_ptr<RefCountBug> LeakWithinFunction;
+ std::unique_ptr<RefCountBug> LeakAtReturn;
mutable std::unique_ptr<RetainSummaryManager> Summaries;
-public:
+
+ static std::unique_ptr<CheckerProgramPointTag> DeallocSentTag;
+ static std::unique_ptr<CheckerProgramPointTag> CastFailTag;
/// Track Objective-C and CoreFoundation objects.
bool TrackObjCAndCFObjects = false;
@@ -360,13 +360,11 @@ public:
CheckerContext &Ctx,
ExplodedNode *Pred = nullptr) const;
- const CheckerProgramPointTag &getDeallocSentTag() const {
- return DeallocSentTag;
+ static const CheckerProgramPointTag &getDeallocSentTag() {
+ return *DeallocSentTag;
}
- const CheckerProgramPointTag &getCastFailTag() const {
- return CastFailTag;
- }
+ static const CheckerProgramPointTag &getCastFailTag() { return *CastFailTag; }
private:
/// Perform the necessary checks and state adjustments at the end of the
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
index 9853758f7f2c..1d8ed90f7590 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
@@ -18,7 +18,7 @@ using namespace clang;
using namespace ento;
using namespace retaincountchecker;
-StringRef RefCountBug::bugTypeToName(RefCountBug::RefCountBugType BT) {
+StringRef RefCountBug::bugTypeToName(RefCountBug::RefCountBugKind BT) {
switch (BT) {
case UseAfterRelease:
return "Use-after-release";
@@ -37,7 +37,7 @@ StringRef RefCountBug::bugTypeToName(RefCountBug::RefCountBugType BT) {
case LeakAtReturn:
return "Leak of returned object";
}
- llvm_unreachable("Unknown RefCountBugType");
+ llvm_unreachable("Unknown RefCountBugKind");
}
StringRef RefCountBug::getDescription() const {
@@ -60,13 +60,14 @@ StringRef RefCountBug::getDescription() const {
case LeakAtReturn:
return "";
}
- llvm_unreachable("Unknown RefCountBugType");
+ llvm_unreachable("Unknown RefCountBugKind");
}
-RefCountBug::RefCountBug(const CheckerBase *Checker, RefCountBugType BT)
+RefCountBug::RefCountBug(CheckerNameRef Checker, RefCountBugKind BT)
: BugType(Checker, bugTypeToName(BT), categories::MemoryRefCount,
- /*SuppressOnSink=*/BT == LeakWithinFunction || BT == LeakAtReturn),
- BT(BT), Checker(Checker) {}
+ /*SuppressOnSink=*/BT == LeakWithinFunction ||
+ BT == LeakAtReturn),
+ BT(BT) {}
static bool isNumericLiteralExpression(const Expr *E) {
// FIXME: This set of cases was copied from SemaExprObjC.
@@ -84,7 +85,7 @@ static std::string getPrettyTypeName(QualType QT) {
QualType PT = QT->getPointeeType();
if (!PT.isNull() && !QT->getAs<TypedefType>())
if (const auto *RD = PT->getAsCXXRecordDecl())
- return RD->getName();
+ return std::string(RD->getName());
return QT.getAsString();
}
@@ -453,8 +454,6 @@ RefCountReportVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
PathSensitiveBugReport &BR) {
const auto &BT = static_cast<const RefCountBug&>(BR.getBugType());
- const auto *Checker =
- static_cast<const RetainCountChecker *>(BT.getChecker());
bool IsFreeUnowned = BT.getBugType() == RefCountBug::FreeNotOwned ||
BT.getBugType() == RefCountBug::DeallocNotOwned;
@@ -545,11 +544,11 @@ RefCountReportVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
const ProgramPointTag *Tag = N->getLocation().getTag();
- if (Tag == &Checker->getCastFailTag()) {
+ if (Tag == &RetainCountChecker::getCastFailTag()) {
os << "Assuming dynamic cast returns null due to type mismatch";
}
- if (Tag == &Checker->getDeallocSentTag()) {
+ if (Tag == &RetainCountChecker::getDeallocSentTag()) {
// We only have summaries attached to nodes after evaluating CallExpr and
// ObjCMessageExprs.
const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
index e9e277754054..286a8ae2ef7d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
@@ -26,7 +26,7 @@ namespace retaincountchecker {
class RefCountBug : public BugType {
public:
- enum RefCountBugType {
+ enum RefCountBugKind {
UseAfterRelease,
ReleaseNotOwned,
DeallocNotOwned,
@@ -36,21 +36,14 @@ public:
LeakWithinFunction,
LeakAtReturn,
};
- RefCountBug(const CheckerBase *checker, RefCountBugType BT);
+ RefCountBug(CheckerNameRef Checker, RefCountBugKind BT);
StringRef getDescription() const;
- RefCountBugType getBugType() const {
- return BT;
- }
-
- const CheckerBase *getChecker() const {
- return Checker;
- }
+ RefCountBugKind getBugType() const { return BT; }
private:
- RefCountBugType BT;
- const CheckerBase *Checker;
- static StringRef bugTypeToName(RefCountBugType BT);
+ RefCountBugKind BT;
+ static StringRef bugTypeToName(RefCountBugKind BT);
};
class RefCountReport : public PathSensitiveBugReport {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
index abd1a074b487..599d4f306aa1 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -16,6 +16,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
using namespace clang;
@@ -51,15 +52,14 @@ void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
// pointer casts.
if (Idx.isZeroConstant())
return;
+
// FIXME: All of this out-of-bounds checking should eventually be refactored
// into a common place.
+ DefinedOrUnknownSVal ElementCount = getDynamicElementCount(
+ state, ER->getSuperRegion(), C.getSValBuilder(), ER->getValueType());
- DefinedOrUnknownSVal NumElements
- = C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
- ER->getValueType());
-
- ProgramStateRef StInBound = state->assumeInBound(Idx, NumElements, true);
- ProgramStateRef StOutBound = state->assumeInBound(Idx, NumElements, false);
+ ProgramStateRef StInBound = state->assumeInBound(Idx, ElementCount, true);
+ ProgramStateRef StOutBound = state->assumeInBound(Idx, ElementCount, false);
if (StOutBound && !StInBound) {
ExplodedNode *N = C.generateErrorNode(StOutBound);
@@ -91,6 +91,6 @@ void ento::registerReturnPointerRangeChecker(CheckerManager &mgr) {
mgr.registerChecker<ReturnPointerRangeChecker>();
}
-bool ento::shouldRegisterReturnPointerRangeChecker(const LangOptions &LO) {
+bool ento::shouldRegisterReturnPointerRangeChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
index fbd15d864424..5266cbf86b44 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
@@ -122,6 +122,6 @@ void ento::registerReturnUndefChecker(CheckerManager &mgr) {
mgr.registerChecker<ReturnUndefChecker>();
}
-bool ento::shouldRegisterReturnUndefChecker(const LangOptions &LO) {
+bool ento::shouldRegisterReturnUndefChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp
index 103208d8b5a5..14ecede17083 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp
@@ -99,13 +99,13 @@ void ReturnValueChecker::checkPostCall(const CallEvent &Call,
std::string Name = getName(Call);
const NoteTag *CallTag = C.getNoteTag(
- [Name, ExpectedValue](BugReport &) -> std::string {
+ [Name, ExpectedValue](PathSensitiveBugReport &) -> std::string {
SmallString<128> Msg;
llvm::raw_svector_ostream Out(Msg);
Out << '\'' << Name << "' returns "
<< (ExpectedValue ? "true" : "false");
- return Out.str();
+ return std::string(Out.str());
},
/*IsPrunable=*/true);
@@ -154,7 +154,7 @@ void ReturnValueChecker::checkEndFunction(const ReturnStmt *RS,
Out << '\'' << Name << "' returns "
<< (ExpectedValue ? "false" : "true");
- return Out.str();
+ return std::string(Out.str());
},
/*IsPrunable=*/false);
@@ -165,6 +165,6 @@ void ento::registerReturnValueChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ReturnValueChecker>();
}
-bool ento::shouldRegisterReturnValueChecker(const LangOptions &LO) {
+bool ento::shouldRegisterReturnValueChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
index 5e305aa709b6..d9dc72ddaa21 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
@@ -203,6 +203,6 @@ void ento::registerRunLoopAutoreleaseLeakChecker(CheckerManager &mgr) {
mgr.registerChecker<RunLoopAutoreleaseLeakChecker>();
}
-bool ento::shouldRegisterRunLoopAutoreleaseLeakChecker(const LangOptions &LO) {
+bool ento::shouldRegisterRunLoopAutoreleaseLeakChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
new file mode 100644
index 000000000000..933e0146ff59
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
@@ -0,0 +1,180 @@
+//===-- STLAlgorithmModeling.cpp -----------------------------------*- C++ -*--//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Models STL algorithms.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+#include "Iterator.h"
+
+using namespace clang;
+using namespace ento;
+using namespace iterator;
+
+namespace {
+
+class STLAlgorithmModeling : public Checker<eval::Call> {
+ bool evalFind(CheckerContext &C, const CallExpr *CE) const;
+
+ void Find(CheckerContext &C, const CallExpr *CE, unsigned paramNum) const;
+
+ using FnCheck = bool (STLAlgorithmModeling::*)(CheckerContext &,
+ const CallExpr *) const;
+
+ const CallDescriptionMap<FnCheck> Callbacks = {
+ {{{"std", "find"}, 3}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_if"}, 3}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_if"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_if_not"}, 3}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_if_not"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_first_of"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_first_of"}, 5}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_first_of"}, 6}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_end"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_end"}, 5}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_end"}, 6}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "lower_bound"}, 3}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "lower_bound"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "upper_bound"}, 3}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "upper_bound"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "search"}, 3}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "search"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "search"}, 5}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "search"}, 6}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "search_n"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "search_n"}, 5}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "search_n"}, 6}, &STLAlgorithmModeling::evalFind},
+ };
+
+public:
+ STLAlgorithmModeling() = default;
+
+ bool AggressiveStdFindModeling;
+
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const;
+}; //
+
+bool STLAlgorithmModeling::evalCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return false;
+
+ const FnCheck *Handler = Callbacks.lookup(Call);
+ if (!Handler)
+ return false;
+
+ return (this->**Handler)(C, CE);
+}
+
+bool STLAlgorithmModeling::evalFind(CheckerContext &C,
+ const CallExpr *CE) const {
+ // std::find()-like functions either take their primary range in the first
+ // two parameters, or if the first parameter is "execution policy" then in
+ // the second and third. This means that the second parameter must always be
+ // an iterator.
+ if (!isIteratorType(CE->getArg(1)->getType()))
+ return false;
+
+ // If no "execution policy" parameter is used then the first argument is the
+ // beginning of the range.
+ if (isIteratorType(CE->getArg(0)->getType())) {
+ Find(C, CE, 0);
+ return true;
+ }
+
+ // If "execution policy" parameter is used then the second argument is the
+ // beginning of the range.
+ if (isIteratorType(CE->getArg(2)->getType())) {
+ Find(C, CE, 1);
+ return true;
+ }
+
+ return false;
+}
+
+void STLAlgorithmModeling::Find(CheckerContext &C, const CallExpr *CE,
+ unsigned paramNum) const {
+ auto State = C.getState();
+ auto &SVB = C.getSValBuilder();
+ const auto *LCtx = C.getLocationContext();
+
+ SVal RetVal = SVB.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount());
+ SVal Param = State->getSVal(CE->getArg(paramNum), LCtx);
+
+ auto StateFound = State->BindExpr(CE, LCtx, RetVal);
+
+ // If we have an iterator position for the range-begin argument then we can
+ // assume that in case of successful search the position of the found element
+ // is not ahead of it.
+ // FIXME: Reverse iterators
+ const auto *Pos = getIteratorPosition(State, Param);
+ if (Pos) {
+ StateFound = createIteratorPosition(StateFound, RetVal, Pos->getContainer(),
+ CE, LCtx, C.blockCount());
+ const auto *NewPos = getIteratorPosition(StateFound, RetVal);
+ assert(NewPos && "Failed to create new iterator position.");
+
+ SVal GreaterOrEqual = SVB.evalBinOp(StateFound, BO_GE,
+ nonloc::SymbolVal(NewPos->getOffset()),
+ nonloc::SymbolVal(Pos->getOffset()),
+ SVB.getConditionType());
+ assert(GreaterOrEqual.getAs<DefinedSVal>() &&
+ "Symbol comparison must be a `DefinedSVal`");
+ StateFound = StateFound->assume(GreaterOrEqual.castAs<DefinedSVal>(), true);
+ }
+
+ Param = State->getSVal(CE->getArg(paramNum + 1), LCtx);
+
+ // If we have an iterator position for the range-end argument then we can
+ // assume that in case of successful search the position of the found element
+ // is ahead of it.
+ // FIXME: Reverse iterators
+ Pos = getIteratorPosition(State, Param);
+ if (Pos) {
+ StateFound = createIteratorPosition(StateFound, RetVal, Pos->getContainer(),
+ CE, LCtx, C.blockCount());
+ const auto *NewPos = getIteratorPosition(StateFound, RetVal);
+ assert(NewPos && "Failed to create new iterator position.");
+
+ SVal Less = SVB.evalBinOp(StateFound, BO_LT,
+ nonloc::SymbolVal(NewPos->getOffset()),
+ nonloc::SymbolVal(Pos->getOffset()),
+ SVB.getConditionType());
+ assert(Less.getAs<DefinedSVal>() &&
+ "Symbol comparison must be a `DefinedSVal`");
+ StateFound = StateFound->assume(Less.castAs<DefinedSVal>(), true);
+ }
+
+ C.addTransition(StateFound);
+
+ if (AggressiveStdFindModeling) {
+ auto StateNotFound = State->BindExpr(CE, LCtx, Param);
+ C.addTransition(StateNotFound);
+ }
+}
+
+} // namespace
+
+void ento::registerSTLAlgorithmModeling(CheckerManager &Mgr) {
+ auto *Checker = Mgr.registerChecker<STLAlgorithmModeling>();
+ Checker->AggressiveStdFindModeling =
+ Mgr.getAnalyzerOptions().getCheckerBooleanOption(Checker,
+ "AggressiveStdFindModeling");
+}
+
+bool ento::shouldRegisterSTLAlgorithmModeling(const CheckerManager &mgr) {
+ return true;
+}
+
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
index 8193bcbef4cd..8d380ed1b93d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
@@ -271,6 +271,6 @@ void ento::registerSimpleStreamChecker(CheckerManager &mgr) {
}
// This checker should be enabled regardless of how language options are set.
-bool ento::shouldRegisterSimpleStreamChecker(const LangOptions &LO) {
+bool ento::shouldRegisterSimpleStreamChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h
new file mode 100644
index 000000000000..ec43a23e30a9
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h
@@ -0,0 +1,33 @@
+//=== SmartPtr.h - Tracking smart pointer state. -------------------*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines inter-checker API for the smart pointer modeling. It allows
+// dependent checkers to figure out if an smart pointer is null or not.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_SMARTPTR_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_SMARTPTR_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+
+namespace clang {
+namespace ento {
+namespace smartptr {
+
+/// Returns true if the event call is on smart pointer.
+bool isStdSmartPtrCall(const CallEvent &Call);
+
+/// Returns whether the smart pointer is null or not.
+bool isNullSmartPtr(const ProgramStateRef State, const MemRegion *ThisRegion);
+
+} // namespace smartptr
+} // namespace ento
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_SMARTPTR_H
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrChecker.cpp
new file mode 100644
index 000000000000..7bb25f397d01
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrChecker.cpp
@@ -0,0 +1,80 @@
+// SmartPtrChecker.cpp - Check for smart pointer dereference - C++ --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a checker that check for null dereference of C++ smart
+// pointer.
+//
+//===----------------------------------------------------------------------===//
+#include "SmartPtr.h"
+
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Type.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class SmartPtrChecker : public Checker<check::PreCall> {
+ BugType NullDereferenceBugType{this, "Null SmartPtr dereference",
+ "C++ Smart Pointer"};
+
+public:
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+
+private:
+ void reportBug(CheckerContext &C, const CallEvent &Call) const;
+};
+} // end of anonymous namespace
+
+void SmartPtrChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (!smartptr::isStdSmartPtrCall(Call))
+ return;
+ ProgramStateRef State = C.getState();
+ const auto *OC = dyn_cast<CXXMemberOperatorCall>(&Call);
+ if (!OC)
+ return;
+ const MemRegion *ThisRegion = OC->getCXXThisVal().getAsRegion();
+ if (!ThisRegion)
+ return;
+
+ OverloadedOperatorKind OOK = OC->getOverloadedOperator();
+ if (OOK == OO_Star || OOK == OO_Arrow) {
+ if (smartptr::isNullSmartPtr(State, ThisRegion))
+ reportBug(C, Call);
+ }
+}
+
+void SmartPtrChecker::reportBug(CheckerContext &C,
+ const CallEvent &Call) const {
+ ExplodedNode *ErrNode = C.generateErrorNode();
+ if (!ErrNode)
+ return;
+
+ auto R = std::make_unique<PathSensitiveBugReport>(
+ NullDereferenceBugType, "Dereference of null smart pointer", ErrNode);
+ C.emitReport(std::move(R));
+}
+
+void ento::registerSmartPtrChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<SmartPtrChecker>();
+}
+
+bool ento::shouldRegisterSmartPtrChecker(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
+ return LO.CPlusPlus;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
index fd372aafa50d..bcc7d4103c1c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
@@ -12,27 +12,81 @@
//===----------------------------------------------------------------------===//
#include "Move.h"
+#include "SmartPtr.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Type.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
using namespace clang;
using namespace ento;
namespace {
-class SmartPtrModeling : public Checker<eval::Call> {
+class SmartPtrModeling : public Checker<eval::Call, check::DeadSymbols> {
+
bool isNullAfterMoveMethod(const CallEvent &Call) const;
public:
+ // Whether the checker should model for null dereferences of smart pointers.
+ DefaultBool ModelSmartPtrDereference;
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+
+private:
+ ProgramStateRef updateTrackedRegion(const CallEvent &Call, CheckerContext &C,
+ const MemRegion *ThisValRegion) const;
+ void handleReset(const CallEvent &Call, CheckerContext &C) const;
+ void handleRelease(const CallEvent &Call, CheckerContext &C) const;
+ void handleSwap(const CallEvent &Call, CheckerContext &C) const;
+
+ using SmartPtrMethodHandlerFn =
+ void (SmartPtrModeling::*)(const CallEvent &Call, CheckerContext &) const;
+ CallDescriptionMap<SmartPtrMethodHandlerFn> SmartPtrMethodHandlers{
+ {{"reset"}, &SmartPtrModeling::handleReset},
+ {{"release"}, &SmartPtrModeling::handleRelease},
+ {{"swap", 1}, &SmartPtrModeling::handleSwap}};
};
} // end of anonymous namespace
+REGISTER_MAP_WITH_PROGRAMSTATE(TrackedRegionMap, const MemRegion *, SVal)
+
+// Define the inter-checker API.
+namespace clang {
+namespace ento {
+namespace smartptr {
+bool isStdSmartPtrCall(const CallEvent &Call) {
+ const auto *MethodDecl = dyn_cast_or_null<CXXMethodDecl>(Call.getDecl());
+ if (!MethodDecl || !MethodDecl->getParent())
+ return false;
+
+ const auto *RecordDecl = MethodDecl->getParent();
+ if (!RecordDecl || !RecordDecl->getDeclContext()->isStdNamespace())
+ return false;
+
+ if (RecordDecl->getDeclName().isIdentifier()) {
+ StringRef Name = RecordDecl->getName();
+ return Name == "shared_ptr" || Name == "unique_ptr" || Name == "weak_ptr";
+ }
+ return false;
+}
+
+bool isNullSmartPtr(const ProgramStateRef State, const MemRegion *ThisRegion) {
+ const auto *InnerPointVal = State->get<TrackedRegionMap>(ThisRegion);
+ return InnerPointVal && InnerPointVal->isZeroConstant();
+}
+} // namespace smartptr
+} // namespace ento
+} // namespace clang
+
bool SmartPtrModeling::isNullAfterMoveMethod(const CallEvent &Call) const {
// TODO: Update CallDescription to support anonymous calls?
// TODO: Handle other methods, such as .get() or .release().
@@ -44,29 +98,136 @@ bool SmartPtrModeling::isNullAfterMoveMethod(const CallEvent &Call) const {
bool SmartPtrModeling::evalCall(const CallEvent &Call,
CheckerContext &C) const {
- if (!isNullAfterMoveMethod(Call))
+
+ if (!smartptr::isStdSmartPtrCall(Call))
return false;
- ProgramStateRef State = C.getState();
- const MemRegion *ThisR =
- cast<CXXInstanceCall>(&Call)->getCXXThisVal().getAsRegion();
+ if (isNullAfterMoveMethod(Call)) {
+ ProgramStateRef State = C.getState();
+ const MemRegion *ThisR =
+ cast<CXXInstanceCall>(&Call)->getCXXThisVal().getAsRegion();
+
+ if (!move::isMovedFrom(State, ThisR)) {
+ // TODO: Model this case as well. At least, avoid invalidation of globals.
+ return false;
+ }
+
+ // TODO: Add a note to bug reports describing this decision.
+ C.addTransition(
+ State->BindExpr(Call.getOriginExpr(), C.getLocationContext(),
+ C.getSValBuilder().makeZeroVal(Call.getResultType())));
+ return true;
+ }
- if (!move::isMovedFrom(State, ThisR)) {
- // TODO: Model this case as well. At least, avoid invalidation of globals.
+ if (!ModelSmartPtrDereference)
return false;
+
+ if (const auto *CC = dyn_cast<CXXConstructorCall>(&Call)) {
+ if (CC->getDecl()->isCopyOrMoveConstructor())
+ return false;
+
+ const MemRegion *ThisValRegion = CC->getCXXThisVal().getAsRegion();
+ if (!ThisValRegion)
+ return false;
+
+ auto State = updateTrackedRegion(Call, C, ThisValRegion);
+ C.addTransition(State);
+ return true;
+ }
+
+ const SmartPtrMethodHandlerFn *Handler = SmartPtrMethodHandlers.lookup(Call);
+ if (!Handler)
+ return false;
+ (this->**Handler)(Call, C);
+
+ return C.isDifferent();
+}
+
+void SmartPtrModeling::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ // Clean up dead regions from the region map.
+ TrackedRegionMapTy TrackedRegions = State->get<TrackedRegionMap>();
+ for (auto E : TrackedRegions) {
+ const MemRegion *Region = E.first;
+ bool IsRegDead = !SymReaper.isLiveRegion(Region);
+
+ if (IsRegDead)
+ State = State->remove<TrackedRegionMap>(Region);
+ }
+ C.addTransition(State);
+}
+
+void SmartPtrModeling::handleReset(const CallEvent &Call,
+ CheckerContext &C) const {
+ const auto *IC = dyn_cast<CXXInstanceCall>(&Call);
+ if (!IC)
+ return;
+
+ const MemRegion *ThisValRegion = IC->getCXXThisVal().getAsRegion();
+ if (!ThisValRegion)
+ return;
+ auto State = updateTrackedRegion(Call, C, ThisValRegion);
+ C.addTransition(State);
+ // TODO: Make sure to ivalidate the the region in the Store if we don't have
+ // time to model all methods.
+}
+
+void SmartPtrModeling::handleRelease(const CallEvent &Call,
+ CheckerContext &C) const {
+ const auto *IC = dyn_cast<CXXInstanceCall>(&Call);
+ if (!IC)
+ return;
+
+ const MemRegion *ThisValRegion = IC->getCXXThisVal().getAsRegion();
+ if (!ThisValRegion)
+ return;
+
+ auto State = updateTrackedRegion(Call, C, ThisValRegion);
+
+ const auto *InnerPointVal = State->get<TrackedRegionMap>(ThisValRegion);
+ if (InnerPointVal) {
+ State = State->BindExpr(Call.getOriginExpr(), C.getLocationContext(),
+ *InnerPointVal);
+ }
+ C.addTransition(State);
+ // TODO: Add support to enable MallocChecker to start tracking the raw
+ // pointer.
+}
+
+void SmartPtrModeling::handleSwap(const CallEvent &Call,
+ CheckerContext &C) const {
+ // TODO: Add support to handle swap method.
+}
+
+ProgramStateRef
+SmartPtrModeling::updateTrackedRegion(const CallEvent &Call, CheckerContext &C,
+ const MemRegion *ThisValRegion) const {
+ // TODO: Refactor and clean up handling too many things.
+ ProgramStateRef State = C.getState();
+ auto NumArgs = Call.getNumArgs();
+
+ if (NumArgs == 0) {
+ auto NullSVal = C.getSValBuilder().makeNull();
+ State = State->set<TrackedRegionMap>(ThisValRegion, NullSVal);
+ } else if (NumArgs == 1) {
+ auto ArgVal = Call.getArgSVal(0);
+ assert(Call.getArgExpr(0)->getType()->isPointerType() &&
+ "Adding a non pointer value to TrackedRegionMap");
+ State = State->set<TrackedRegionMap>(ThisValRegion, ArgVal);
}
- // TODO: Add a note to bug reports describing this decision.
- C.addTransition(
- State->BindExpr(Call.getOriginExpr(), C.getLocationContext(),
- C.getSValBuilder().makeZeroVal(Call.getResultType())));
- return true;
+ return State;
}
void ento::registerSmartPtrModeling(CheckerManager &Mgr) {
- Mgr.registerChecker<SmartPtrModeling>();
+ auto *Checker = Mgr.registerChecker<SmartPtrModeling>();
+ Checker->ModelSmartPtrDereference =
+ Mgr.getAnalyzerOptions().getCheckerBooleanOption(
+ Checker, "ModelSmartPtrDereference");
}
-bool ento::shouldRegisterSmartPtrModeling(const LangOptions &LO) {
+bool ento::shouldRegisterSmartPtrModeling(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
return LO.CPlusPlus;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
index 7285d27495a7..b5c9356322fc 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -43,6 +43,7 @@ public:
};
DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ CheckerNameRef CheckNames[CK_NumCheckKinds];
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
@@ -156,7 +157,8 @@ void StackAddrEscapeChecker::EmitStackError(CheckerContext &C,
return;
if (!BT_returnstack)
BT_returnstack = std::make_unique<BuiltinBug>(
- this, "Return of address to stack-allocated memory");
+ CheckNames[CK_StackAddrEscapeChecker],
+ "Return of address to stack-allocated memory");
// Generate a report for this bug.
SmallString<128> buf;
llvm::raw_svector_ostream os(buf);
@@ -195,7 +197,8 @@ void StackAddrEscapeChecker::checkAsyncExecutedBlockCaptures(
continue;
if (!BT_capturedstackasync)
BT_capturedstackasync = std::make_unique<BuiltinBug>(
- this, "Address of stack-allocated memory is captured");
+ CheckNames[CK_StackAddrAsyncEscapeChecker],
+ "Address of stack-allocated memory is captured");
SmallString<128> Buf;
llvm::raw_svector_ostream Out(Buf);
SourceRange Range = genName(Out, Region, C.getASTContext());
@@ -218,7 +221,8 @@ void StackAddrEscapeChecker::checkReturnedBlockCaptures(
continue;
if (!BT_capturedstackret)
BT_capturedstackret = std::make_unique<BuiltinBug>(
- this, "Address of stack-allocated memory is captured");
+ CheckNames[CK_StackAddrEscapeChecker],
+ "Address of stack-allocated memory is captured");
SmallString<128> Buf;
llvm::raw_svector_ostream Out(Buf);
SourceRange Range = genName(Out, Region, C.getASTContext());
@@ -277,7 +281,7 @@ void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
// The CK_CopyAndAutoreleaseBlockObject cast causes the block to be copied
// so the stack address is not escaping here.
- if (auto *ICE = dyn_cast<ImplicitCastExpr>(RetE)) {
+ if (const auto *ICE = dyn_cast<ImplicitCastExpr>(RetE)) {
if (isa<BlockDataRegion>(R) &&
ICE->getCastKind() == CK_CopyAndAutoreleaseBlockObject) {
return;
@@ -333,7 +337,8 @@ void StackAddrEscapeChecker::checkEndFunction(const ReturnStmt *RS,
if (!BT_stackleak)
BT_stackleak = std::make_unique<BuiltinBug>(
- this, "Stack address stored into global variable",
+ CheckNames[CK_StackAddrEscapeChecker],
+ "Stack address stored into global variable",
"Stack address was saved into a global variable. "
"This is dangerous because the address will become "
"invalid after returning from the function");
@@ -365,20 +370,19 @@ void ento::registerStackAddrEscapeBase(CheckerManager &mgr) {
mgr.registerChecker<StackAddrEscapeChecker>();
}
-bool ento::shouldRegisterStackAddrEscapeBase(const LangOptions &LO) {
+bool ento::shouldRegisterStackAddrEscapeBase(const CheckerManager &mgr) {
return true;
}
#define REGISTER_CHECKER(name) \
void ento::register##name(CheckerManager &Mgr) { \
- StackAddrEscapeChecker *Chk = \
- Mgr.getChecker<StackAddrEscapeChecker>(); \
+ StackAddrEscapeChecker *Chk = Mgr.getChecker<StackAddrEscapeChecker>(); \
Chk->ChecksEnabled[StackAddrEscapeChecker::CK_##name] = true; \
+ Chk->CheckNames[StackAddrEscapeChecker::CK_##name] = \
+ Mgr.getCurrentCheckerName(); \
} \
\
- bool ento::shouldRegister##name(const LangOptions &LO) { \
- return true; \
- }
+ bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
REGISTER_CHECKER(StackAddrEscapeChecker)
REGISTER_CHECKER(StackAddrAsyncEscapeChecker)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
index 2cdee8da375e..8b575f4f4759 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
@@ -7,9 +7,8 @@
//===----------------------------------------------------------------------===//
//
// This checker improves modeling of a few simple library functions.
-// It does not generate warnings.
//
-// This checker provides a specification format - `FunctionSummaryTy' - and
+// This checker provides a specification format - `Summary' - and
// contains descriptions of some library functions in this format. Each
// specification contains a list of branches for splitting the program state
// upon call, and range constraints on argument and return-value symbols that
@@ -21,7 +20,7 @@
// consider standard C function `ispunct(int x)', which returns a non-zero value
// iff `x' is a punctuation character, that is, when `x' is in range
// ['!', '/'] [':', '@'] U ['[', '\`'] U ['{', '~'].
-// `FunctionSummaryTy' provides only two branches for this function. However,
+// `Summary' provides only two branches for this function. However,
// any attempt to describe this range with if-statements in the body farm
// would result in many more branches. Because each branch needs to be analyzed
// independently, this significantly reduces performance. Additionally,
@@ -30,13 +29,13 @@
// which may lead to false positives because considering this particular path
// was not consciously intended, and therefore it might have been unreachable.
//
-// This checker uses eval::Call for modeling "pure" functions, for which
-// their `FunctionSummaryTy' is a precise model. This avoids unnecessary
-// invalidation passes. Conflicts with other checkers are unlikely because
-// if the function has no other effects, other checkers would probably never
-// want to improve upon the modeling done by this checker.
+// This checker uses eval::Call for modeling pure functions (functions without
+// side effets), for which their `Summary' is a precise model. This avoids
+// unnecessary invalidation passes. Conflicts with other checkers are unlikely
+// because if the function has no other effects, other checkers would probably
+// never want to improve upon the modeling done by this checker.
//
-// Non-"pure" functions, for which only partial improvement over the default
+// Non-pure functions, for which only partial improvement over the default
// behavior is expected, are modeled via check::PostCall, non-intrusively.
//
// The following standard C functions are currently supported:
@@ -51,203 +50,461 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
using namespace clang;
using namespace clang::ento;
namespace {
-class StdLibraryFunctionsChecker : public Checker<check::PostCall, eval::Call> {
- /// Below is a series of typedefs necessary to define function specs.
- /// We avoid nesting types here because each additional qualifier
- /// would need to be repeated in every function spec.
- struct FunctionSummaryTy;
+class StdLibraryFunctionsChecker
+ : public Checker<check::PreCall, check::PostCall, eval::Call> {
+
+ class Summary;
/// Specify how much the analyzer engine should entrust modeling this function
/// to us. If he doesn't, he performs additional invalidations.
- enum InvalidationKindTy { NoEvalCall, EvalCallAsPure };
-
- /// A pair of ValueRangeKindTy and IntRangeVectorTy would describe a range
- /// imposed on a particular argument or return value symbol.
- ///
- /// Given a range, should the argument stay inside or outside this range?
- /// The special `ComparesToArgument' value indicates that we should
- /// impose a constraint that involves other argument or return value symbols.
- enum ValueRangeKindTy { OutOfRange, WithinRange, ComparesToArgument };
+ enum InvalidationKind { NoEvalCall, EvalCallAsPure };
// The universal integral type to use in value range descriptions.
// Unsigned to make sure overflows are well-defined.
- typedef uint64_t RangeIntTy;
+ typedef uint64_t RangeInt;
/// Normally, describes a single range constraint, eg. {{0, 1}, {3, 4}} is
/// a non-negative integer, which less than 5 and not equal to 2. For
/// `ComparesToArgument', holds information about how exactly to compare to
/// the argument.
- typedef std::vector<std::pair<RangeIntTy, RangeIntTy>> IntRangeVectorTy;
+ typedef std::vector<std::pair<RangeInt, RangeInt>> IntRangeVector;
/// A reference to an argument or return value by its number.
/// ArgNo in CallExpr and CallEvent is defined as Unsigned, but
/// obviously uint32_t should be enough for all practical purposes.
- typedef uint32_t ArgNoTy;
- static const ArgNoTy Ret = std::numeric_limits<ArgNoTy>::max();
-
- /// Incapsulates a single range on a single symbol within a branch.
- class ValueRange {
- ArgNoTy ArgNo; // Argument to which we apply the range.
- ValueRangeKindTy Kind; // Kind of range definition.
- IntRangeVectorTy Args; // Polymorphic arguments.
-
+ typedef uint32_t ArgNo;
+ static const ArgNo Ret;
+
+ class ValueConstraint;
+
+ // Pointer to the ValueConstraint. We need a copyable, polymorphic and
+ // default initialize able type (vector needs that). A raw pointer was good,
+ // however, we cannot default initialize that. unique_ptr makes the Summary
+ // class non-copyable, therefore not an option. Releasing the copyability
+ // requirement would render the initialization of the Summary map infeasible.
+ using ValueConstraintPtr = std::shared_ptr<ValueConstraint>;
+
+ /// Polymorphic base class that represents a constraint on a given argument
+ /// (or return value) of a function. Derived classes implement different kind
+ /// of constraints, e.g range constraints or correlation between two
+ /// arguments.
+ class ValueConstraint {
public:
- ValueRange(ArgNoTy ArgNo, ValueRangeKindTy Kind,
- const IntRangeVectorTy &Args)
- : ArgNo(ArgNo), Kind(Kind), Args(Args) {}
-
- ArgNoTy getArgNo() const { return ArgNo; }
- ValueRangeKindTy getKind() const { return Kind; }
-
- BinaryOperator::Opcode getOpcode() const {
- assert(Kind == ComparesToArgument);
- assert(Args.size() == 1);
- BinaryOperator::Opcode Op =
- static_cast<BinaryOperator::Opcode>(Args[0].first);
- assert(BinaryOperator::isComparisonOp(Op) &&
- "Only comparison ops are supported for ComparesToArgument");
- return Op;
+ ValueConstraint(ArgNo ArgN) : ArgN(ArgN) {}
+ virtual ~ValueConstraint() {}
+ /// Apply the effects of the constraint on the given program state. If null
+ /// is returned then the constraint is not feasible.
+ virtual ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const = 0;
+ virtual ValueConstraintPtr negate() const {
+ llvm_unreachable("Not implemented");
+ };
+
+ // Check whether the constraint is malformed or not. It is malformed if the
+ // specified argument has a mismatch with the given FunctionDecl (e.g. the
+ // arg number is out-of-range of the function's argument list).
+ bool checkValidity(const FunctionDecl *FD) const {
+ const bool ValidArg = ArgN == Ret || ArgN < FD->getNumParams();
+ assert(ValidArg && "Arg out of range!");
+ if (!ValidArg)
+ return false;
+ // Subclasses may further refine the validation.
+ return checkSpecificValidity(FD);
}
+ ArgNo getArgNo() const { return ArgN; }
- ArgNoTy getOtherArgNo() const {
- assert(Kind == ComparesToArgument);
- assert(Args.size() == 1);
- return static_cast<ArgNoTy>(Args[0].second);
+ protected:
+ ArgNo ArgN; // Argument to which we apply the constraint.
+
+ /// Do polymorphic sanity check on the constraint.
+ virtual bool checkSpecificValidity(const FunctionDecl *FD) const {
+ return true;
}
+ };
+
+ /// Given a range, should the argument stay inside or outside this range?
+ enum RangeKind { OutOfRange, WithinRange };
+
+ /// Encapsulates a single range on a single symbol within a branch.
+ class RangeConstraint : public ValueConstraint {
+ RangeKind Kind; // Kind of range definition.
+ IntRangeVector Args; // Polymorphic arguments.
- const IntRangeVectorTy &getRanges() const {
- assert(Kind != ComparesToArgument);
+ public:
+ RangeConstraint(ArgNo ArgN, RangeKind Kind, const IntRangeVector &Args)
+ : ValueConstraint(ArgN), Kind(Kind), Args(Args) {}
+
+ const IntRangeVector &getRanges() const {
return Args;
}
- // We avoid creating a virtual apply() method because
- // it makes initializer lists harder to write.
private:
- ProgramStateRef
- applyAsOutOfRange(ProgramStateRef State, const CallEvent &Call,
- const FunctionSummaryTy &Summary) const;
- ProgramStateRef
- applyAsWithinRange(ProgramStateRef State, const CallEvent &Call,
- const FunctionSummaryTy &Summary) const;
- ProgramStateRef
- applyAsComparesToArgument(ProgramStateRef State, const CallEvent &Call,
- const FunctionSummaryTy &Summary) const;
-
+ ProgramStateRef applyAsOutOfRange(ProgramStateRef State,
+ const CallEvent &Call,
+ const Summary &Summary) const;
+ ProgramStateRef applyAsWithinRange(ProgramStateRef State,
+ const CallEvent &Call,
+ const Summary &Summary) const;
public:
ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
- const FunctionSummaryTy &Summary) const {
+ const Summary &Summary,
+ CheckerContext &C) const override {
switch (Kind) {
case OutOfRange:
return applyAsOutOfRange(State, Call, Summary);
case WithinRange:
return applyAsWithinRange(State, Call, Summary);
- case ComparesToArgument:
- return applyAsComparesToArgument(State, Call, Summary);
}
- llvm_unreachable("Unknown ValueRange kind!");
+ llvm_unreachable("Unknown range kind!");
+ }
+
+ ValueConstraintPtr negate() const override {
+ RangeConstraint Tmp(*this);
+ switch (Kind) {
+ case OutOfRange:
+ Tmp.Kind = WithinRange;
+ break;
+ case WithinRange:
+ Tmp.Kind = OutOfRange;
+ break;
+ }
+ return std::make_shared<RangeConstraint>(Tmp);
+ }
+
+ bool checkSpecificValidity(const FunctionDecl *FD) const override {
+ const bool ValidArg =
+ getArgType(FD, ArgN)->isIntegralType(FD->getASTContext());
+ assert(ValidArg &&
+ "This constraint should be applied on an integral type");
+ return ValidArg;
}
};
- /// The complete list of ranges that defines a single branch.
- typedef std::vector<ValueRange> ValueRangeSet;
+ class ComparisonConstraint : public ValueConstraint {
+ BinaryOperator::Opcode Opcode;
+ ArgNo OtherArgN;
+
+ public:
+ ComparisonConstraint(ArgNo ArgN, BinaryOperator::Opcode Opcode,
+ ArgNo OtherArgN)
+ : ValueConstraint(ArgN), Opcode(Opcode), OtherArgN(OtherArgN) {}
+ ArgNo getOtherArgNo() const { return OtherArgN; }
+ BinaryOperator::Opcode getOpcode() const { return Opcode; }
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override;
+ };
+
+ class NotNullConstraint : public ValueConstraint {
+ using ValueConstraint::ValueConstraint;
+ // This variable has a role when we negate the constraint.
+ bool CannotBeNull = true;
- /// Includes information about function prototype (which is necessary to
- /// ensure we're modeling the right function and casting values properly),
- /// approach to invalidation, and a list of branches - essentially, a list
- /// of list of ranges - essentially, a list of lists of lists of segments.
- struct FunctionSummaryTy {
- const std::vector<QualType> ArgTypes;
- const QualType RetType;
- const InvalidationKindTy InvalidationKind;
- const std::vector<ValueRangeSet> Ranges;
+ public:
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override {
+ SVal V = getArgSVal(Call, getArgNo());
+ if (V.isUndef())
+ return State;
+
+ DefinedOrUnknownSVal L = V.castAs<DefinedOrUnknownSVal>();
+ if (!L.getAs<Loc>())
+ return State;
+
+ return State->assume(L, CannotBeNull);
+ }
+
+ ValueConstraintPtr negate() const override {
+ NotNullConstraint Tmp(*this);
+ Tmp.CannotBeNull = !this->CannotBeNull;
+ return std::make_shared<NotNullConstraint>(Tmp);
+ }
+
+ bool checkSpecificValidity(const FunctionDecl *FD) const override {
+ const bool ValidArg = getArgType(FD, ArgN)->isPointerType();
+ assert(ValidArg &&
+ "This constraint should be applied only on a pointer type");
+ return ValidArg;
+ }
+ };
+
+ // Represents a buffer argument with an additional size argument.
+ // E.g. the first two arguments here:
+ // ctime_s(char *buffer, rsize_t bufsz, const time_t *time);
+ // Another example:
+ // size_t fread(void *ptr, size_t size, size_t nmemb, FILE *stream);
+ // // Here, ptr is the buffer, and its minimum size is `size * nmemb`.
+ class BufferSizeConstraint : public ValueConstraint {
+ // The argument which holds the size of the buffer.
+ ArgNo SizeArgN;
+ // The argument which is a multiplier to size. This is set in case of
+ // `fread` like functions where the size is computed as a multiplication of
+ // two arguments.
+ llvm::Optional<ArgNo> SizeMultiplierArgN;
+ // The operator we use in apply. This is negated in negate().
+ BinaryOperator::Opcode Op = BO_LE;
+
+ public:
+ BufferSizeConstraint(ArgNo Buffer, ArgNo BufSize)
+ : ValueConstraint(Buffer), SizeArgN(BufSize) {}
+
+ BufferSizeConstraint(ArgNo Buffer, ArgNo BufSize, ArgNo BufSizeMultiplier)
+ : ValueConstraint(Buffer), SizeArgN(BufSize),
+ SizeMultiplierArgN(BufSizeMultiplier) {}
+
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override {
+ SValBuilder &SvalBuilder = C.getSValBuilder();
+ // The buffer argument.
+ SVal BufV = getArgSVal(Call, getArgNo());
+ // The size argument.
+ SVal SizeV = getArgSVal(Call, SizeArgN);
+ // Multiply with another argument if given.
+ if (SizeMultiplierArgN) {
+ SVal SizeMulV = getArgSVal(Call, *SizeMultiplierArgN);
+ SizeV = SvalBuilder.evalBinOp(State, BO_Mul, SizeV, SizeMulV,
+ Summary.getArgType(SizeArgN));
+ }
+ // The dynamic size of the buffer argument, got from the analyzer engine.
+ SVal BufDynSize = getDynamicSizeWithOffset(State, BufV);
+
+ SVal Feasible = SvalBuilder.evalBinOp(State, Op, SizeV, BufDynSize,
+ SvalBuilder.getContext().BoolTy);
+ if (auto F = Feasible.getAs<DefinedOrUnknownSVal>())
+ return State->assume(*F, true);
+
+ // We can get here only if the size argument or the dynamic size is
+ // undefined. But the dynamic size should never be undefined, only
+ // unknown. So, here, the size of the argument is undefined, i.e. we
+ // cannot apply the constraint. Actually, other checkers like
+ // CallAndMessage should catch this situation earlier, because we call a
+ // function with an uninitialized argument.
+ llvm_unreachable("Size argument or the dynamic size is Undefined");
+ }
+
+ ValueConstraintPtr negate() const override {
+ BufferSizeConstraint Tmp(*this);
+ Tmp.Op = BinaryOperator::negateComparisonOp(Op);
+ return std::make_shared<BufferSizeConstraint>(Tmp);
+ }
+ };
+
+ /// The complete list of constraints that defines a single branch.
+ typedef std::vector<ValueConstraintPtr> ConstraintSet;
+
+ using ArgTypes = std::vector<QualType>;
+
+ // A placeholder type, we use it whenever we do not care about the concrete
+ // type in a Signature.
+ const QualType Irrelevant{};
+ bool static isIrrelevant(QualType T) { return T.isNull(); }
+
+ // The signature of a function we want to describe with a summary. This is a
+ // concessive signature, meaning there may be irrelevant types in the
+ // signature which we do not check against a function with concrete types.
+ struct Signature {
+ const ArgTypes ArgTys;
+ const QualType RetTy;
+ Signature(ArgTypes ArgTys, QualType RetTy) : ArgTys(ArgTys), RetTy(RetTy) {
+ assertRetTypeSuitableForSignature(RetTy);
+ for (size_t I = 0, E = ArgTys.size(); I != E; ++I) {
+ QualType ArgTy = ArgTys[I];
+ assertArgTypeSuitableForSignature(ArgTy);
+ }
+ }
+ bool matches(const FunctionDecl *FD) const;
private:
- static void assertTypeSuitableForSummary(QualType T) {
- assert(!T->isVoidType() &&
- "We should have had no significant void types in the spec");
- assert(T.isCanonical() &&
+ static void assertArgTypeSuitableForSignature(QualType T) {
+ assert((T.isNull() || !T->isVoidType()) &&
+ "We should have no void types in the spec");
+ assert((T.isNull() || T.isCanonical()) &&
+ "We should only have canonical types in the spec");
+ }
+ static void assertRetTypeSuitableForSignature(QualType T) {
+ assert((T.isNull() || T.isCanonical()) &&
"We should only have canonical types in the spec");
- // FIXME: lift this assert (but not the ones above!)
- assert(T->isIntegralOrEnumerationType() &&
- "We only support integral ranges in the spec");
}
+ };
+
+ static QualType getArgType(const FunctionDecl *FD, ArgNo ArgN) {
+ assert(FD && "Function must be set");
+ QualType T = (ArgN == Ret)
+ ? FD->getReturnType().getCanonicalType()
+ : FD->getParamDecl(ArgN)->getType().getCanonicalType();
+ return T;
+ }
+
+ using Cases = std::vector<ConstraintSet>;
+
+ /// A summary includes information about
+ /// * function prototype (signature)
+ /// * approach to invalidation,
+ /// * a list of branches - a list of list of ranges -
+ /// A branch represents a path in the exploded graph of a function (which
+ /// is a tree). So, a branch is a series of assumptions. In other words,
+ /// branches represent split states and additional assumptions on top of
+ /// the splitting assumption.
+ /// For example, consider the branches in `isalpha(x)`
+ /// Branch 1)
+ /// x is in range ['A', 'Z'] or in ['a', 'z']
+ /// then the return value is not 0. (I.e. out-of-range [0, 0])
+ /// Branch 2)
+ /// x is out-of-range ['A', 'Z'] and out-of-range ['a', 'z']
+ /// then the return value is 0.
+ /// * a list of argument constraints, that must be true on every branch.
+ /// If these constraints are not satisfied that means a fatal error
+ /// usually resulting in undefined behaviour.
+ ///
+ /// Application of a summary:
+ /// The signature and argument constraints together contain information
+ /// about which functions are handled by the summary. The signature can use
+ /// "wildcards", i.e. Irrelevant types. Irrelevant type of a parameter in
+ /// a signature means that type is not compared to the type of the parameter
+ /// in the found FunctionDecl. Argument constraints may specify additional
+ /// rules for the given parameter's type, those rules are checked once the
+ /// signature is matched.
+ class Summary {
+ const Signature Sign;
+ const InvalidationKind InvalidationKd;
+ Cases CaseConstraints;
+ ConstraintSet ArgConstraints;
+
+ // The function to which the summary applies. This is set after lookup and
+ // match to the signature.
+ const FunctionDecl *FD = nullptr;
public:
- QualType getArgType(ArgNoTy ArgNo) const {
- QualType T = (ArgNo == Ret) ? RetType : ArgTypes[ArgNo];
- assertTypeSuitableForSummary(T);
- return T;
+ Summary(ArgTypes ArgTys, QualType RetTy, InvalidationKind InvalidationKd)
+ : Sign(ArgTys, RetTy), InvalidationKd(InvalidationKd) {}
+
+ Summary &Case(ConstraintSet&& CS) {
+ CaseConstraints.push_back(std::move(CS));
+ return *this;
+ }
+ Summary &ArgConstraint(ValueConstraintPtr VC) {
+ ArgConstraints.push_back(VC);
+ return *this;
}
- /// Try our best to figure out if the call expression is the call of
- /// *the* library function to which this specification applies.
- bool matchesCall(const CallExpr *CE) const;
- };
+ InvalidationKind getInvalidationKd() const { return InvalidationKd; }
+ const Cases &getCaseConstraints() const { return CaseConstraints; }
+ const ConstraintSet &getArgConstraints() const { return ArgConstraints; }
- // The same function (as in, function identifier) may have different
- // summaries assigned to it, with different argument and return value types.
- // We call these "variants" of the function. This can be useful for handling
- // C++ function overloads, and also it can be used when the same function
- // may have different definitions on different platforms.
- typedef std::vector<FunctionSummaryTy> FunctionVariantsTy;
+ QualType getArgType(ArgNo ArgN) const {
+ return StdLibraryFunctionsChecker::getArgType(FD, ArgN);
+ }
+
+ // Returns true if the summary should be applied to the given function.
+ // And if yes then store the function declaration.
+ bool matchesAndSet(const FunctionDecl *FD) {
+ bool Result = Sign.matches(FD) && validateByConstraints(FD);
+ if (Result) {
+ assert(!this->FD && "FD must not be set more than once");
+ this->FD = FD;
+ }
+ return Result;
+ }
+
+ private:
+ // Once we know the exact type of the function then do sanity check on all
+ // the given constraints.
+ bool validateByConstraints(const FunctionDecl *FD) const {
+ for (const ConstraintSet &Case : CaseConstraints)
+ for (const ValueConstraintPtr &Constraint : Case)
+ if (!Constraint->checkValidity(FD))
+ return false;
+ for (const ValueConstraintPtr &Constraint : ArgConstraints)
+ if (!Constraint->checkValidity(FD))
+ return false;
+ return true;
+ }
+ };
// The map of all functions supported by the checker. It is initialized
// lazily, and it doesn't change after initialization.
- typedef llvm::StringMap<FunctionVariantsTy> FunctionSummaryMapTy;
- mutable FunctionSummaryMapTy FunctionSummaryMap;
+ using FunctionSummaryMapType = llvm::DenseMap<const FunctionDecl *, Summary>;
+ mutable FunctionSummaryMapType FunctionSummaryMap;
- // Auxiliary functions to support ArgNoTy within all structures
- // in a unified manner.
- static QualType getArgType(const FunctionSummaryTy &Summary, ArgNoTy ArgNo) {
- return Summary.getArgType(ArgNo);
- }
- static QualType getArgType(const CallEvent &Call, ArgNoTy ArgNo) {
- return ArgNo == Ret ? Call.getResultType().getCanonicalType()
- : Call.getArgExpr(ArgNo)->getType().getCanonicalType();
- }
- static QualType getArgType(const CallExpr *CE, ArgNoTy ArgNo) {
- return ArgNo == Ret ? CE->getType().getCanonicalType()
- : CE->getArg(ArgNo)->getType().getCanonicalType();
- }
- static SVal getArgSVal(const CallEvent &Call, ArgNoTy ArgNo) {
- return ArgNo == Ret ? Call.getReturnValue() : Call.getArgSVal(ArgNo);
+ mutable std::unique_ptr<BugType> BT_InvalidArg;
+
+ static SVal getArgSVal(const CallEvent &Call, ArgNo ArgN) {
+ return ArgN == Ret ? Call.getReturnValue() : Call.getArgSVal(ArgN);
}
public:
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
-private:
- Optional<FunctionSummaryTy> findFunctionSummary(const FunctionDecl *FD,
- const CallExpr *CE,
- CheckerContext &C) const;
+ enum CheckKind {
+ CK_StdCLibraryFunctionArgsChecker,
+ CK_StdCLibraryFunctionsTesterChecker,
+ CK_NumCheckKinds
+ };
+ DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ CheckerNameRef CheckNames[CK_NumCheckKinds];
+
+ bool DisplayLoadedSummaries = false;
+ bool ModelPOSIX = false;
- void initFunctionSummaries(BasicValueFactory &BVF) const;
+private:
+ Optional<Summary> findFunctionSummary(const FunctionDecl *FD,
+ CheckerContext &C) const;
+ Optional<Summary> findFunctionSummary(const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void initFunctionSummaries(CheckerContext &C) const;
+
+ void reportBug(const CallEvent &Call, ExplodedNode *N,
+ CheckerContext &C) const {
+ if (!ChecksEnabled[CK_StdCLibraryFunctionArgsChecker])
+ return;
+ // TODO Add detailed diagnostic.
+ StringRef Msg = "Function argument constraint is not satisfied";
+ if (!BT_InvalidArg)
+ BT_InvalidArg = std::make_unique<BugType>(
+ CheckNames[CK_StdCLibraryFunctionArgsChecker],
+ "Unsatisfied argument constraints", categories::LogicError);
+ auto R = std::make_unique<PathSensitiveBugReport>(*BT_InvalidArg, Msg, N);
+ bugreporter::trackExpressionValue(N, Call.getArgExpr(0), *R);
+ C.emitReport(std::move(R));
+ }
};
+
+const StdLibraryFunctionsChecker::ArgNo StdLibraryFunctionsChecker::Ret =
+ std::numeric_limits<ArgNo>::max();
+
} // end of anonymous namespace
-ProgramStateRef StdLibraryFunctionsChecker::ValueRange::applyAsOutOfRange(
+ProgramStateRef StdLibraryFunctionsChecker::RangeConstraint::applyAsOutOfRange(
ProgramStateRef State, const CallEvent &Call,
- const FunctionSummaryTy &Summary) const {
+ const Summary &Summary) const {
ProgramStateManager &Mgr = State->getStateManager();
SValBuilder &SVB = Mgr.getSValBuilder();
BasicValueFactory &BVF = SVB.getBasicValueFactory();
ConstraintManager &CM = Mgr.getConstraintManager();
- QualType T = getArgType(Summary, getArgNo());
+ QualType T = Summary.getArgType(getArgNo());
SVal V = getArgSVal(Call, getArgNo());
if (auto N = V.getAs<NonLoc>()) {
- const IntRangeVectorTy &R = getRanges();
+ const IntRangeVector &R = getRanges();
size_t E = R.size();
for (size_t I = 0; I != E; ++I) {
const llvm::APSInt &Min = BVF.getValue(R[I].first, T);
@@ -262,23 +519,28 @@ ProgramStateRef StdLibraryFunctionsChecker::ValueRange::applyAsOutOfRange(
return State;
}
-ProgramStateRef
-StdLibraryFunctionsChecker::ValueRange::applyAsWithinRange(
+ProgramStateRef StdLibraryFunctionsChecker::RangeConstraint::applyAsWithinRange(
ProgramStateRef State, const CallEvent &Call,
- const FunctionSummaryTy &Summary) const {
+ const Summary &Summary) const {
ProgramStateManager &Mgr = State->getStateManager();
SValBuilder &SVB = Mgr.getSValBuilder();
BasicValueFactory &BVF = SVB.getBasicValueFactory();
ConstraintManager &CM = Mgr.getConstraintManager();
- QualType T = getArgType(Summary, getArgNo());
+ QualType T = Summary.getArgType(getArgNo());
SVal V = getArgSVal(Call, getArgNo());
// "WithinRange R" is treated as "outside [T_MIN, T_MAX] \ R".
// We cut off [T_MIN, min(R) - 1] and [max(R) + 1, T_MAX] if necessary,
// and then cut away all holes in R one by one.
+ //
+ // E.g. consider a range list R as [A, B] and [C, D]
+ // -------+--------+------------------+------------+----------->
+ // A B C D
+ // Then we assume that the value is not in [-inf, A - 1],
+ // then not in [D + 1, +inf], then not in [B + 1, C - 1]
if (auto N = V.getAs<NonLoc>()) {
- const IntRangeVectorTy &R = getRanges();
+ const IntRangeVector &R = getRanges();
size_t E = R.size();
const llvm::APSInt &MinusInf = BVF.getMinValue(T);
@@ -303,31 +565,31 @@ StdLibraryFunctionsChecker::ValueRange::applyAsWithinRange(
for (size_t I = 1; I != E; ++I) {
const llvm::APSInt &Min = BVF.getValue(R[I - 1].second + 1ULL, T);
const llvm::APSInt &Max = BVF.getValue(R[I].first - 1ULL, T);
- assert(Min <= Max);
- State = CM.assumeInclusiveRange(State, *N, Min, Max, false);
- if (!State)
- return nullptr;
+ if (Min <= Max) {
+ State = CM.assumeInclusiveRange(State, *N, Min, Max, false);
+ if (!State)
+ return nullptr;
+ }
}
}
return State;
}
-ProgramStateRef
-StdLibraryFunctionsChecker::ValueRange::applyAsComparesToArgument(
- ProgramStateRef State, const CallEvent &Call,
- const FunctionSummaryTy &Summary) const {
+ProgramStateRef StdLibraryFunctionsChecker::ComparisonConstraint::apply(
+ ProgramStateRef State, const CallEvent &Call, const Summary &Summary,
+ CheckerContext &C) const {
ProgramStateManager &Mgr = State->getStateManager();
SValBuilder &SVB = Mgr.getSValBuilder();
QualType CondT = SVB.getConditionType();
- QualType T = getArgType(Summary, getArgNo());
+ QualType T = Summary.getArgType(getArgNo());
SVal V = getArgSVal(Call, getArgNo());
BinaryOperator::Opcode Op = getOpcode();
- ArgNoTy OtherArg = getOtherArgNo();
+ ArgNo OtherArg = getOtherArgNo();
SVal OtherV = getArgSVal(Call, OtherArg);
- QualType OtherT = getArgType(Call, OtherArg);
+ QualType OtherT = Summary.getArgType(OtherArg);
// Note: we avoid integral promotion for comparison.
OtherV = SVB.evalCast(OtherV, T, OtherT);
if (auto CompV = SVB.evalBinOp(State, Op, V, OtherV, CondT)
@@ -336,28 +598,53 @@ StdLibraryFunctionsChecker::ValueRange::applyAsComparesToArgument(
return State;
}
-void StdLibraryFunctionsChecker::checkPostCall(const CallEvent &Call,
- CheckerContext &C) const {
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
- if (!FD)
+void StdLibraryFunctionsChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ Optional<Summary> FoundSummary = findFunctionSummary(Call, C);
+ if (!FoundSummary)
return;
- const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
- return;
+ const Summary &Summary = *FoundSummary;
+ ProgramStateRef State = C.getState();
+
+ ProgramStateRef NewState = State;
+ for (const ValueConstraintPtr &Constraint : Summary.getArgConstraints()) {
+ ProgramStateRef SuccessSt = Constraint->apply(NewState, Call, Summary, C);
+ ProgramStateRef FailureSt =
+ Constraint->negate()->apply(NewState, Call, Summary, C);
+ // The argument constraint is not satisfied.
+ if (FailureSt && !SuccessSt) {
+ if (ExplodedNode *N = C.generateErrorNode(NewState))
+ reportBug(Call, N, C);
+ break;
+ } else {
+ // We will apply the constraint even if we cannot reason about the
+ // argument. This means both SuccessSt and FailureSt can be true. If we
+ // weren't applying the constraint that would mean that symbolic
+ // execution continues on a code whose behaviour is undefined.
+ assert(SuccessSt);
+ NewState = SuccessSt;
+ }
+ }
+ if (NewState && NewState != State)
+ C.addTransition(NewState);
+}
- Optional<FunctionSummaryTy> FoundSummary = findFunctionSummary(FD, CE, C);
+void StdLibraryFunctionsChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ Optional<Summary> FoundSummary = findFunctionSummary(Call, C);
if (!FoundSummary)
return;
- // Now apply ranges.
- const FunctionSummaryTy &Summary = *FoundSummary;
+ // Now apply the constraints.
+ const Summary &Summary = *FoundSummary;
ProgramStateRef State = C.getState();
- for (const auto &VRS: Summary.Ranges) {
+ // Apply case/branch specifications.
+ for (const ConstraintSet &Case : Summary.getCaseConstraints()) {
ProgramStateRef NewState = State;
- for (const auto &VR: VRS) {
- NewState = VR.apply(NewState, Call, Summary);
+ for (const ValueConstraintPtr &Constraint : Case) {
+ NewState = Constraint->apply(NewState, Call, Summary, C);
if (!NewState)
break;
}
@@ -369,23 +656,16 @@ void StdLibraryFunctionsChecker::checkPostCall(const CallEvent &Call,
bool StdLibraryFunctionsChecker::evalCall(const CallEvent &Call,
CheckerContext &C) const {
- const auto *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
- if (!FD)
- return false;
-
- const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
- return false;
-
- Optional<FunctionSummaryTy> FoundSummary = findFunctionSummary(FD, CE, C);
+ Optional<Summary> FoundSummary = findFunctionSummary(Call, C);
if (!FoundSummary)
return false;
- const FunctionSummaryTy &Summary = *FoundSummary;
- switch (Summary.InvalidationKind) {
+ const Summary &Summary = *FoundSummary;
+ switch (Summary.getInvalidationKd()) {
case EvalCallAsPure: {
ProgramStateRef State = C.getState();
const LocationContext *LC = C.getLocationContext();
+ const auto *CE = cast_or_null<CallExpr>(Call.getOriginExpr());
SVal V = C.getSValBuilder().conjureSymbolVal(
CE, LC, CE->getType().getCanonicalType(), C.blockCount());
State = State->BindExpr(CE, LC, V);
@@ -400,79 +680,86 @@ bool StdLibraryFunctionsChecker::evalCall(const CallEvent &Call,
llvm_unreachable("Unknown invalidation kind!");
}
-bool StdLibraryFunctionsChecker::FunctionSummaryTy::matchesCall(
- const CallExpr *CE) const {
+bool StdLibraryFunctionsChecker::Signature::matches(
+ const FunctionDecl *FD) const {
// Check number of arguments:
- if (CE->getNumArgs() != ArgTypes.size())
+ if (FD->param_size() != ArgTys.size())
return false;
- // Check return type if relevant:
- if (!RetType.isNull() && RetType != CE->getType().getCanonicalType())
- return false;
+ // Check return type.
+ if (!isIrrelevant(RetTy))
+ if (RetTy != FD->getReturnType().getCanonicalType())
+ return false;
- // Check argument types when relevant:
- for (size_t I = 0, E = ArgTypes.size(); I != E; ++I) {
- QualType FormalT = ArgTypes[I];
- // Null type marks irrelevant arguments.
- if (FormalT.isNull())
+ // Check argument types.
+ for (size_t I = 0, E = ArgTys.size(); I != E; ++I) {
+ QualType ArgTy = ArgTys[I];
+ if (isIrrelevant(ArgTy))
continue;
-
- assertTypeSuitableForSummary(FormalT);
-
- QualType ActualT = StdLibraryFunctionsChecker::getArgType(CE, I);
- assert(ActualT.isCanonical());
- if (ActualT != FormalT)
+ if (ArgTy != FD->getParamDecl(I)->getType().getCanonicalType())
return false;
}
return true;
}
-Optional<StdLibraryFunctionsChecker::FunctionSummaryTy>
+Optional<StdLibraryFunctionsChecker::Summary>
StdLibraryFunctionsChecker::findFunctionSummary(const FunctionDecl *FD,
- const CallExpr *CE,
CheckerContext &C) const {
- // Note: we cannot always obtain FD from CE
- // (eg. virtual call, or call by pointer).
- assert(CE);
-
if (!FD)
return None;
- SValBuilder &SVB = C.getSValBuilder();
- BasicValueFactory &BVF = SVB.getBasicValueFactory();
- initFunctionSummaries(BVF);
+ initFunctionSummaries(C);
- IdentifierInfo *II = FD->getIdentifier();
- if (!II)
- return None;
- StringRef Name = II->getName();
- if (Name.empty() || !C.isCLibraryFunction(FD, Name))
+ auto FSMI = FunctionSummaryMap.find(FD->getCanonicalDecl());
+ if (FSMI == FunctionSummaryMap.end())
return None;
+ return FSMI->second;
+}
- auto FSMI = FunctionSummaryMap.find(Name);
- if (FSMI == FunctionSummaryMap.end())
+Optional<StdLibraryFunctionsChecker::Summary>
+StdLibraryFunctionsChecker::findFunctionSummary(const CallEvent &Call,
+ CheckerContext &C) const {
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
+ if (!FD)
return None;
+ return findFunctionSummary(FD, C);
+}
- // Verify that function signature matches the spec in advance.
- // Otherwise we might be modeling the wrong function.
- // Strict checking is important because we will be conducting
- // very integral-type-sensitive operations on arguments and
- // return values.
- const FunctionVariantsTy &SpecVariants = FSMI->second;
- for (const FunctionSummaryTy &Spec : SpecVariants)
- if (Spec.matchesCall(CE))
- return Spec;
+static llvm::Optional<QualType> lookupType(StringRef Name,
+ const ASTContext &ACtx) {
+ IdentifierInfo &II = ACtx.Idents.get(Name);
+ auto LookupRes = ACtx.getTranslationUnitDecl()->lookup(&II);
+ if (LookupRes.size() == 0)
+ return None;
+ // Prioritze typedef declarations.
+ // This is needed in case of C struct typedefs. E.g.:
+ // typedef struct FILE FILE;
+ // In this case, we have a RecordDecl 'struct FILE' with the name 'FILE' and
+ // we have a TypedefDecl with the name 'FILE'.
+ for (Decl *D : LookupRes)
+ if (auto *TD = dyn_cast<TypedefNameDecl>(D))
+ return ACtx.getTypeDeclType(TD).getCanonicalType();
+
+ // Find the first TypeDecl.
+ // There maybe cases when a function has the same name as a struct.
+ // E.g. in POSIX: `struct stat` and the function `stat()`:
+ // int stat(const char *restrict path, struct stat *restrict buf);
+ for (Decl *D : LookupRes)
+ if (auto *TD = dyn_cast<TypeDecl>(D))
+ return ACtx.getTypeDeclType(TD).getCanonicalType();
return None;
}
void StdLibraryFunctionsChecker::initFunctionSummaries(
- BasicValueFactory &BVF) const {
+ CheckerContext &C) const {
if (!FunctionSummaryMap.empty())
return;
- ASTContext &ACtx = BVF.getContext();
+ SValBuilder &SVB = C.getSValBuilder();
+ BasicValueFactory &BVF = SVB.getBasicValueFactory();
+ const ASTContext &ACtx = BVF.getContext();
// These types are useful for writing specifications quickly,
// New specifications should probably introduce more types.
@@ -481,15 +768,105 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// of function summary for common cases (eg. ssize_t could be int or long
// or long long, so three summary variants would be enough).
// Of course, function variants are also useful for C++ overloads.
- QualType Irrelevant; // A placeholder, whenever we do not care about the type.
- QualType IntTy = ACtx.IntTy;
- QualType LongTy = ACtx.LongTy;
- QualType LongLongTy = ACtx.LongLongTy;
- QualType SizeTy = ACtx.getSizeType();
+ const QualType VoidTy = ACtx.VoidTy;
+ const QualType IntTy = ACtx.IntTy;
+ const QualType UnsignedIntTy = ACtx.UnsignedIntTy;
+ const QualType LongTy = ACtx.LongTy;
+ const QualType LongLongTy = ACtx.LongLongTy;
+ const QualType SizeTy = ACtx.getSizeType();
+
+ const QualType VoidPtrTy = ACtx.VoidPtrTy; // void *
+ const QualType IntPtrTy = ACtx.getPointerType(IntTy); // int *
+ const QualType UnsignedIntPtrTy =
+ ACtx.getPointerType(UnsignedIntTy); // unsigned int *
+ const QualType VoidPtrRestrictTy =
+ ACtx.getLangOpts().C99 ? ACtx.getRestrictType(VoidPtrTy) // void *restrict
+ : VoidPtrTy;
+ const QualType ConstVoidPtrTy =
+ ACtx.getPointerType(ACtx.VoidTy.withConst()); // const void *
+ const QualType CharPtrTy = ACtx.getPointerType(ACtx.CharTy); // char *
+ const QualType CharPtrRestrictTy =
+ ACtx.getLangOpts().C99 ? ACtx.getRestrictType(CharPtrTy) // char *restrict
+ : CharPtrTy;
+ const QualType ConstCharPtrTy =
+ ACtx.getPointerType(ACtx.CharTy.withConst()); // const char *
+ const QualType ConstCharPtrRestrictTy =
+ ACtx.getLangOpts().C99
+ ? ACtx.getRestrictType(ConstCharPtrTy) // const char *restrict
+ : ConstCharPtrTy;
+ const QualType Wchar_tPtrTy = ACtx.getPointerType(ACtx.WCharTy); // wchar_t *
+ const QualType ConstWchar_tPtrTy =
+ ACtx.getPointerType(ACtx.WCharTy.withConst()); // const wchar_t *
+ const QualType ConstVoidPtrRestrictTy =
+ ACtx.getLangOpts().C99
+ ? ACtx.getRestrictType(ConstVoidPtrTy) // const void *restrict
+ : ConstVoidPtrTy;
+
+ const RangeInt IntMax = BVF.getMaxValue(IntTy).getLimitedValue();
+ const RangeInt UnsignedIntMax =
+ BVF.getMaxValue(UnsignedIntTy).getLimitedValue();
+ const RangeInt LongMax = BVF.getMaxValue(LongTy).getLimitedValue();
+ const RangeInt LongLongMax = BVF.getMaxValue(LongLongTy).getLimitedValue();
+ const RangeInt SizeMax = BVF.getMaxValue(SizeTy).getLimitedValue();
+
+ // Set UCharRangeMax to min of int or uchar maximum value.
+ // The C standard states that the arguments of functions like isalpha must
+ // be representable as an unsigned char. Their type is 'int', so the max
+ // value of the argument should be min(UCharMax, IntMax). This just happen
+ // to be true for commonly used and well tested instruction set
+ // architectures, but not for others.
+ const RangeInt UCharRangeMax =
+ std::min(BVF.getMaxValue(ACtx.UnsignedCharTy).getLimitedValue(), IntMax);
+
+ // The platform dependent value of EOF.
+ // Try our best to parse this from the Preprocessor, otherwise fallback to -1.
+ const auto EOFv = [&C]() -> RangeInt {
+ if (const llvm::Optional<int> OptInt =
+ tryExpandAsInteger("EOF", C.getPreprocessor()))
+ return *OptInt;
+ return -1;
+ }();
+
+ // Auxiliary class to aid adding summaries to the summary map.
+ struct AddToFunctionSummaryMap {
+ const ASTContext &ACtx;
+ FunctionSummaryMapType &Map;
+ bool DisplayLoadedSummaries;
+ AddToFunctionSummaryMap(const ASTContext &ACtx, FunctionSummaryMapType &FSM,
+ bool DisplayLoadedSummaries)
+ : ACtx(ACtx), Map(FSM), DisplayLoadedSummaries(DisplayLoadedSummaries) {
+ }
- RangeIntTy IntMax = BVF.getMaxValue(IntTy).getLimitedValue();
- RangeIntTy LongMax = BVF.getMaxValue(LongTy).getLimitedValue();
- RangeIntTy LongLongMax = BVF.getMaxValue(LongLongTy).getLimitedValue();
+ // Add a summary to a FunctionDecl found by lookup. The lookup is performed
+ // by the given Name, and in the global scope. The summary will be attached
+ // to the found FunctionDecl only if the signatures match.
+ void operator()(StringRef Name, Summary S) {
+ IdentifierInfo &II = ACtx.Idents.get(Name);
+ auto LookupRes = ACtx.getTranslationUnitDecl()->lookup(&II);
+ if (LookupRes.size() == 0)
+ return;
+ for (Decl *D : LookupRes) {
+ if (auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (S.matchesAndSet(FD)) {
+ auto Res = Map.insert({FD->getCanonicalDecl(), S});
+ assert(Res.second && "Function already has a summary set!");
+ (void)Res;
+ if (DisplayLoadedSummaries) {
+ llvm::errs() << "Loaded summary for: ";
+ FD->print(llvm::errs());
+ llvm::errs() << "\n";
+ }
+ return;
+ }
+ }
+ }
+ }
+ // Add several summaries for the given name.
+ void operator()(StringRef Name, const std::vector<Summary> &Summaries) {
+ for (const Summary &S : Summaries)
+ operator()(Name, S);
+ }
+ } addToFunctionSummaryMap(ACtx, FunctionSummaryMap, DisplayLoadedSummaries);
// We are finally ready to define specifications for all supported functions.
//
@@ -516,550 +893,876 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// return value, however the correct range is [-1, 10].
//
// Please update the list of functions in the header after editing!
- //
- // The format is as follows:
- //
- //{ "function name",
- // { spec:
- // { argument types list, ... },
- // return type, purity, { range set list:
- // { range list:
- // { argument index, within or out of, {{from, to}, ...} },
- // { argument index, compares to argument, {{how, which}} },
- // ...
- // }
- // }
- // }
- //}
-
-#define SUMMARY_WITH_VARIANTS(identifier) {#identifier, {
-#define END_SUMMARY_WITH_VARIANTS }},
-#define VARIANT(argument_types, return_type, invalidation_approach) \
- { argument_types, return_type, invalidation_approach, {
-#define END_VARIANT } },
-#define SUMMARY(identifier, argument_types, return_type, \
- invalidation_approach) \
- { #identifier, { { argument_types, return_type, invalidation_approach, {
-#define END_SUMMARY } } } },
-#define ARGUMENT_TYPES(...) { __VA_ARGS__ }
-#define RETURN_TYPE(x) x
-#define INVALIDATION_APPROACH(x) x
-#define CASE {
-#define END_CASE },
-#define ARGUMENT_CONDITION(argument_number, condition_kind) \
- { argument_number, condition_kind, {
-#define END_ARGUMENT_CONDITION }},
-#define RETURN_VALUE_CONDITION(condition_kind) \
- { Ret, condition_kind, {
-#define END_RETURN_VALUE_CONDITION }},
-#define ARG_NO(x) x##U
-#define RANGE(x, y) { x, y },
-#define SINGLE_VALUE(x) RANGE(x, x)
-#define IS_LESS_THAN(arg) { BO_LE, arg }
-
- FunctionSummaryMap = {
- // The isascii() family of functions.
- SUMMARY(isalnum, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE // Boils down to isupper() or islower() or isdigit()
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE('0', '9')
- RANGE('A', 'Z')
- RANGE('a', 'z')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE // The locale-specific range.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(128, 255)
- END_ARGUMENT_CONDITION
- // No post-condition. We are completely unaware of
- // locale-specific return values.
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE('0', '9')
- RANGE('A', 'Z')
- RANGE('a', 'z')
- RANGE(128, 255)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isalpha, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE // isupper() or islower(). Note that 'Z' is less than 'a'.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE('A', 'Z')
- RANGE('a', 'z')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE // The locale-specific range.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(128, 255)
- END_ARGUMENT_CONDITION
- END_CASE
- CASE // Other.
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE('A', 'Z')
- RANGE('a', 'z')
- RANGE(128, 255)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isascii, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE // Is ASCII.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(0, 127)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE(0, 127)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isblank, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- SINGLE_VALUE('\t')
- SINGLE_VALUE(' ')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- SINGLE_VALUE('\t')
- SINGLE_VALUE(' ')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(iscntrl, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE // 0..31 or 127
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(0, 32)
- SINGLE_VALUE(127)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE(0, 32)
- SINGLE_VALUE(127)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isdigit, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE // Is a digit.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE('0', '9')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE('0', '9')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isgraph, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(33, 126)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE(33, 126)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(islower, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE // Is certainly lowercase.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE('a', 'z')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE // Is ascii but not lowercase.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(0, 127)
- END_ARGUMENT_CONDITION
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE('a', 'z')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE // The locale-specific range.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(128, 255)
- END_ARGUMENT_CONDITION
- END_CASE
- CASE // Is not an unsigned char.
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE(0, 255)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isprint, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(32, 126)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE(32, 126)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(ispunct, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE('!', '/')
- RANGE(':', '@')
- RANGE('[', '`')
- RANGE('{', '~')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE('!', '/')
- RANGE(':', '@')
- RANGE('[', '`')
- RANGE('{', '~')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isspace, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE // Space, '\f', '\n', '\r', '\t', '\v'.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(9, 13)
- SINGLE_VALUE(' ')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE // The locale-specific range.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(128, 255)
- END_ARGUMENT_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE(9, 13)
- SINGLE_VALUE(' ')
- RANGE(128, 255)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isupper, ARGUMENT_TYPES(IntTy), RETURN_TYPE (IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE // Is certainly uppercase.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE('A', 'Z')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE // The locale-specific range.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(128, 255)
- END_ARGUMENT_CONDITION
- END_CASE
- CASE // Other.
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE('A', 'Z') RANGE(128, 255)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isxdigit, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE('0', '9')
- RANGE('A', 'F')
- RANGE('a', 'f')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE('0', '9')
- RANGE('A', 'F')
- RANGE('a', 'f')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
-
- // The getc() family of functions that returns either a char or an EOF.
- SUMMARY(getc, ARGUMENT_TYPES(Irrelevant), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(NoEvalCall))
- CASE // FIXME: EOF is assumed to be defined as -1.
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, 255)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(fgetc, ARGUMENT_TYPES(Irrelevant), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(NoEvalCall))
- CASE // FIXME: EOF is assumed to be defined as -1.
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, 255)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(getchar, ARGUMENT_TYPES(), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(NoEvalCall))
- CASE // FIXME: EOF is assumed to be defined as -1.
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, 255)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
-
- // read()-like functions that never return more than buffer size.
- // We are not sure how ssize_t is defined on every platform, so we provide
- // three variants that should cover common cases.
- SUMMARY_WITH_VARIANTS(read)
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
- RETURN_TYPE(IntTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(ComparesToArgument)
- IS_LESS_THAN(ARG_NO(2))
- END_RETURN_VALUE_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, IntMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
- RETURN_TYPE(LongTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(ComparesToArgument)
- IS_LESS_THAN(ARG_NO(2))
- END_RETURN_VALUE_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, LongMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
- RETURN_TYPE(LongLongTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(ComparesToArgument)
- IS_LESS_THAN(ARG_NO(2))
- END_RETURN_VALUE_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, LongLongMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- END_SUMMARY_WITH_VARIANTS
- SUMMARY_WITH_VARIANTS(write)
- // Again, due to elusive nature of ssize_t, we have duplicate
- // our summaries to cover different variants.
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
- RETURN_TYPE(IntTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(ComparesToArgument)
- IS_LESS_THAN(ARG_NO(2))
- END_RETURN_VALUE_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, IntMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
- RETURN_TYPE(LongTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(ComparesToArgument)
- IS_LESS_THAN(ARG_NO(2))
- END_RETURN_VALUE_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, LongMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
- RETURN_TYPE(LongLongTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(ComparesToArgument)
- IS_LESS_THAN(ARG_NO(2))
- END_RETURN_VALUE_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, LongLongMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- END_SUMMARY_WITH_VARIANTS
- SUMMARY(fread,
- ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy, Irrelevant),
- RETURN_TYPE(SizeTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(ComparesToArgument)
- IS_LESS_THAN(ARG_NO(2))
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(fwrite,
- ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy, Irrelevant),
- RETURN_TYPE(SizeTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(ComparesToArgument)
- IS_LESS_THAN(ARG_NO(2))
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
-
- // getline()-like functions either fail or read at least the delimiter.
- SUMMARY_WITH_VARIANTS(getline)
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant),
- RETURN_TYPE(IntTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(-1)
- RANGE(1, IntMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant),
- RETURN_TYPE(LongTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(-1)
- RANGE(1, LongMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant),
- RETURN_TYPE(LongLongTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(-1)
- RANGE(1, LongLongMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- END_SUMMARY_WITH_VARIANTS
- SUMMARY_WITH_VARIANTS(getdelim)
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant, Irrelevant),
- RETURN_TYPE(IntTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(-1)
- RANGE(1, IntMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant, Irrelevant),
- RETURN_TYPE(LongTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(-1)
- RANGE(1, LongMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant, Irrelevant),
- RETURN_TYPE(LongLongTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(-1)
- RANGE(1, LongLongMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- END_SUMMARY_WITH_VARIANTS
+
+ // Below are helpers functions to create the summaries.
+ auto ArgumentCondition = [](ArgNo ArgN, RangeKind Kind,
+ IntRangeVector Ranges) {
+ return std::make_shared<RangeConstraint>(ArgN, Kind, Ranges);
+ };
+ auto BufferSize = [](auto... Args) {
+ return std::make_shared<BufferSizeConstraint>(Args...);
+ };
+ struct {
+ auto operator()(RangeKind Kind, IntRangeVector Ranges) {
+ return std::make_shared<RangeConstraint>(Ret, Kind, Ranges);
+ }
+ auto operator()(BinaryOperator::Opcode Op, ArgNo OtherArgN) {
+ return std::make_shared<ComparisonConstraint>(Ret, Op, OtherArgN);
+ }
+ } ReturnValueCondition;
+ auto Range = [](RangeInt b, RangeInt e) {
+ return IntRangeVector{std::pair<RangeInt, RangeInt>{b, e}};
+ };
+ auto SingleValue = [](RangeInt v) {
+ return IntRangeVector{std::pair<RangeInt, RangeInt>{v, v}};
};
+ auto LessThanOrEq = BO_LE;
+ auto NotNull = [&](ArgNo ArgN) {
+ return std::make_shared<NotNullConstraint>(ArgN);
+ };
+
+ Optional<QualType> FileTy = lookupType("FILE", ACtx);
+ Optional<QualType> FilePtrTy, FilePtrRestrictTy;
+ if (FileTy) {
+ // FILE *
+ FilePtrTy = ACtx.getPointerType(*FileTy);
+ // FILE *restrict
+ FilePtrRestrictTy =
+ ACtx.getLangOpts().C99 ? ACtx.getRestrictType(*FilePtrTy) : *FilePtrTy;
+ }
+
+ using RetType = QualType;
+ // Templates for summaries that are reused by many functions.
+ auto Getc = [&]() {
+ return Summary(ArgTypes{*FilePtrTy}, RetType{IntTy}, NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange,
+ {{EOFv, EOFv}, {0, UCharRangeMax}})});
+ };
+ auto Read = [&](RetType R, RangeInt Max) {
+ return Summary(ArgTypes{Irrelevant, Irrelevant, SizeTy}, RetType{R},
+ NoEvalCall)
+ .Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
+ ReturnValueCondition(WithinRange, Range(-1, Max))});
+ };
+ auto Fread = [&]() {
+ return Summary(
+ ArgTypes{VoidPtrRestrictTy, SizeTy, SizeTy, *FilePtrRestrictTy},
+ RetType{SizeTy}, NoEvalCall)
+ .Case({
+ ReturnValueCondition(LessThanOrEq, ArgNo(2)),
+ })
+ .ArgConstraint(NotNull(ArgNo(0)));
+ };
+ auto Fwrite = [&]() {
+ return Summary(ArgTypes{ConstVoidPtrRestrictTy, SizeTy, SizeTy,
+ *FilePtrRestrictTy},
+ RetType{SizeTy}, NoEvalCall)
+ .Case({
+ ReturnValueCondition(LessThanOrEq, ArgNo(2)),
+ })
+ .ArgConstraint(NotNull(ArgNo(0)));
+ };
+ auto Getline = [&](RetType R, RangeInt Max) {
+ return Summary(ArgTypes{Irrelevant, Irrelevant, Irrelevant}, RetType{R},
+ NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange, {{-1, -1}, {1, Max}})});
+ };
+
+ // The isascii() family of functions.
+ // The behavior is undefined if the value of the argument is not
+ // representable as unsigned char or is not equal to EOF. See e.g. C99
+ // 7.4.1.2 The isalpha function (p: 181-182).
+ addToFunctionSummaryMap(
+ "isalnum",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ // Boils down to isupper() or islower() or isdigit().
+ .Case({ArgumentCondition(0U, WithinRange,
+ {{'0', '9'}, {'A', 'Z'}, {'a', 'z'}}),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ // The locale-specific range.
+ // No post-condition. We are completely unaware of
+ // locale-specific return values.
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ .Case(
+ {ArgumentCondition(
+ 0U, OutOfRange,
+ {{'0', '9'}, {'A', 'Z'}, {'a', 'z'}, {128, UCharRangeMax}}),
+ ReturnValueCondition(WithinRange, SingleValue(0))})
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})));
+ addToFunctionSummaryMap(
+ "isalpha",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange, {{'A', 'Z'}, {'a', 'z'}}),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ // The locale-specific range.
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ .Case({ArgumentCondition(
+ 0U, OutOfRange,
+ {{'A', 'Z'}, {'a', 'z'}, {128, UCharRangeMax}}),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "isascii",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange, Range(0, 127)),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ .Case({ArgumentCondition(0U, OutOfRange, Range(0, 127)),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "isblank",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange, {{'\t', '\t'}, {' ', ' '}}),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ .Case({ArgumentCondition(0U, OutOfRange, {{'\t', '\t'}, {' ', ' '}}),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "iscntrl",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange, {{0, 32}, {127, 127}}),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ .Case({ArgumentCondition(0U, OutOfRange, {{0, 32}, {127, 127}}),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "isdigit",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange, Range('0', '9')),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ .Case({ArgumentCondition(0U, OutOfRange, Range('0', '9')),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "isgraph",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange, Range(33, 126)),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ .Case({ArgumentCondition(0U, OutOfRange, Range(33, 126)),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "islower",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ // Is certainly lowercase.
+ .Case({ArgumentCondition(0U, WithinRange, Range('a', 'z')),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ // Is ascii but not lowercase.
+ .Case({ArgumentCondition(0U, WithinRange, Range(0, 127)),
+ ArgumentCondition(0U, OutOfRange, Range('a', 'z')),
+ ReturnValueCondition(WithinRange, SingleValue(0))})
+ // The locale-specific range.
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ // Is not an unsigned char.
+ .Case({ArgumentCondition(0U, OutOfRange, Range(0, UCharRangeMax)),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "isprint",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange, Range(32, 126)),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ .Case({ArgumentCondition(0U, OutOfRange, Range(32, 126)),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "ispunct",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(
+ 0U, WithinRange,
+ {{'!', '/'}, {':', '@'}, {'[', '`'}, {'{', '~'}}),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ .Case({ArgumentCondition(
+ 0U, OutOfRange,
+ {{'!', '/'}, {':', '@'}, {'[', '`'}, {'{', '~'}}),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "isspace",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ // Space, '\f', '\n', '\r', '\t', '\v'.
+ .Case({ArgumentCondition(0U, WithinRange, {{9, 13}, {' ', ' '}}),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ // The locale-specific range.
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ .Case({ArgumentCondition(0U, OutOfRange,
+ {{9, 13}, {' ', ' '}, {128, UCharRangeMax}}),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "isupper",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ // Is certainly uppercase.
+ .Case({ArgumentCondition(0U, WithinRange, Range('A', 'Z')),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ // The locale-specific range.
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ // Other.
+ .Case({ArgumentCondition(0U, OutOfRange,
+ {{'A', 'Z'}, {128, UCharRangeMax}}),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "isxdigit",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange,
+ {{'0', '9'}, {'A', 'F'}, {'a', 'f'}}),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ .Case({ArgumentCondition(0U, OutOfRange,
+ {{'0', '9'}, {'A', 'F'}, {'a', 'f'}}),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+
+ // The getc() family of functions that returns either a char or an EOF.
+ if (FilePtrTy) {
+ addToFunctionSummaryMap("getc", Getc());
+ addToFunctionSummaryMap("fgetc", Getc());
+ }
+ addToFunctionSummaryMap(
+ "getchar", Summary(ArgTypes{}, RetType{IntTy}, NoEvalCall)
+ .Case({ReturnValueCondition(
+ WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})}));
+
+ // read()-like functions that never return more than buffer size.
+ if (FilePtrRestrictTy) {
+ addToFunctionSummaryMap("fread", Fread());
+ addToFunctionSummaryMap("fwrite", Fwrite());
+ }
+
+ // We are not sure how ssize_t is defined on every platform, so we
+ // provide three variants that should cover common cases.
+ // FIXME these are actually defined by POSIX and not by the C standard, we
+ // should handle them together with the rest of the POSIX functions.
+ addToFunctionSummaryMap("read", {Read(IntTy, IntMax), Read(LongTy, LongMax),
+ Read(LongLongTy, LongLongMax)});
+ addToFunctionSummaryMap("write", {Read(IntTy, IntMax), Read(LongTy, LongMax),
+ Read(LongLongTy, LongLongMax)});
+
+ // getline()-like functions either fail or read at least the delimiter.
+ // FIXME these are actually defined by POSIX and not by the C standard, we
+ // should handle them together with the rest of the POSIX functions.
+ addToFunctionSummaryMap("getline",
+ {Getline(IntTy, IntMax), Getline(LongTy, LongMax),
+ Getline(LongLongTy, LongLongMax)});
+ addToFunctionSummaryMap("getdelim",
+ {Getline(IntTy, IntMax), Getline(LongTy, LongMax),
+ Getline(LongLongTy, LongLongMax)});
+
+ if (ModelPOSIX) {
+
+ // long a64l(const char *str64);
+ addToFunctionSummaryMap(
+ "a64l", Summary(ArgTypes{ConstCharPtrTy}, RetType{LongTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // char *l64a(long value);
+ addToFunctionSummaryMap(
+ "l64a", Summary(ArgTypes{LongTy}, RetType{CharPtrTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, LongMax))));
+
+ // int access(const char *pathname, int amode);
+ addToFunctionSummaryMap("access", Summary(ArgTypes{ConstCharPtrTy, IntTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int faccessat(int dirfd, const char *pathname, int mode, int flags);
+ addToFunctionSummaryMap(
+ "faccessat", Summary(ArgTypes{IntTy, ConstCharPtrTy, IntTy, IntTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int dup(int fildes);
+ addToFunctionSummaryMap(
+ "dup", Summary(ArgTypes{IntTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
+ // int dup2(int fildes1, int filedes2);
+ addToFunctionSummaryMap(
+ "dup2",
+ Summary(ArgTypes{IntTy, IntTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(
+ ArgumentCondition(1, WithinRange, Range(0, IntMax))));
+
+ // int fdatasync(int fildes);
+ addToFunctionSummaryMap(
+ "fdatasync", Summary(ArgTypes{IntTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange,
+ Range(0, IntMax))));
+
+ // int fnmatch(const char *pattern, const char *string, int flags);
+ addToFunctionSummaryMap(
+ "fnmatch", Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy, IntTy},
+ RetType{IntTy}, EvalCallAsPure)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int fsync(int fildes);
+ addToFunctionSummaryMap(
+ "fsync", Summary(ArgTypes{IntTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
+ Optional<QualType> Off_tTy = lookupType("off_t", ACtx);
+
+ if (Off_tTy)
+ // int truncate(const char *path, off_t length);
+ addToFunctionSummaryMap("truncate",
+ Summary(ArgTypes{ConstCharPtrTy, *Off_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int symlink(const char *oldpath, const char *newpath);
+ addToFunctionSummaryMap("symlink",
+ Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int symlinkat(const char *oldpath, int newdirfd, const char *newpath);
+ addToFunctionSummaryMap(
+ "symlinkat",
+ Summary(ArgTypes{ConstCharPtrTy, IntTy, ConstCharPtrTy}, RetType{IntTy},
+ NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(ArgumentCondition(1, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(2))));
+
+ if (Off_tTy)
+ // int lockf(int fd, int cmd, off_t len);
+ addToFunctionSummaryMap(
+ "lockf",
+ Summary(ArgTypes{IntTy, IntTy, *Off_tTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
+ Optional<QualType> Mode_tTy = lookupType("mode_t", ACtx);
+
+ if (Mode_tTy)
+ // int creat(const char *pathname, mode_t mode);
+ addToFunctionSummaryMap("creat",
+ Summary(ArgTypes{ConstCharPtrTy, *Mode_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // unsigned int sleep(unsigned int seconds);
+ addToFunctionSummaryMap(
+ "sleep",
+ Summary(ArgTypes{UnsignedIntTy}, RetType{UnsignedIntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, UnsignedIntMax))));
+
+ Optional<QualType> DirTy = lookupType("DIR", ACtx);
+ Optional<QualType> DirPtrTy;
+ if (DirTy)
+ DirPtrTy = ACtx.getPointerType(*DirTy);
+
+ if (DirPtrTy)
+ // int dirfd(DIR *dirp);
+ addToFunctionSummaryMap(
+ "dirfd", Summary(ArgTypes{*DirPtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // unsigned int alarm(unsigned int seconds);
+ addToFunctionSummaryMap(
+ "alarm",
+ Summary(ArgTypes{UnsignedIntTy}, RetType{UnsignedIntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, UnsignedIntMax))));
+
+ if (DirPtrTy)
+ // int closedir(DIR *dir);
+ addToFunctionSummaryMap(
+ "closedir", Summary(ArgTypes{*DirPtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // char *strdup(const char *s);
+ addToFunctionSummaryMap("strdup", Summary(ArgTypes{ConstCharPtrTy},
+ RetType{CharPtrTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // char *strndup(const char *s, size_t n);
+ addToFunctionSummaryMap(
+ "strndup", Summary(ArgTypes{ConstCharPtrTy, SizeTy}, RetType{CharPtrTy},
+ NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(ArgumentCondition(1, WithinRange,
+ Range(0, SizeMax))));
+
+ // wchar_t *wcsdup(const wchar_t *s);
+ addToFunctionSummaryMap("wcsdup", Summary(ArgTypes{ConstWchar_tPtrTy},
+ RetType{Wchar_tPtrTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int mkstemp(char *template);
+ addToFunctionSummaryMap(
+ "mkstemp", Summary(ArgTypes{CharPtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // char *mkdtemp(char *template);
+ addToFunctionSummaryMap(
+ "mkdtemp", Summary(ArgTypes{CharPtrTy}, RetType{CharPtrTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // char *getcwd(char *buf, size_t size);
+ addToFunctionSummaryMap(
+ "getcwd",
+ Summary(ArgTypes{CharPtrTy, SizeTy}, RetType{CharPtrTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(1, WithinRange, Range(0, SizeMax))));
+
+ if (Mode_tTy) {
+ // int mkdir(const char *pathname, mode_t mode);
+ addToFunctionSummaryMap("mkdir",
+ Summary(ArgTypes{ConstCharPtrTy, *Mode_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int mkdirat(int dirfd, const char *pathname, mode_t mode);
+ addToFunctionSummaryMap(
+ "mkdirat", Summary(ArgTypes{IntTy, ConstCharPtrTy, *Mode_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(1))));
+ }
+
+ Optional<QualType> Dev_tTy = lookupType("dev_t", ACtx);
+
+ if (Mode_tTy && Dev_tTy) {
+ // int mknod(const char *pathname, mode_t mode, dev_t dev);
+ addToFunctionSummaryMap(
+ "mknod", Summary(ArgTypes{ConstCharPtrTy, *Mode_tTy, *Dev_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int mknodat(int dirfd, const char *pathname, mode_t mode, dev_t dev);
+ addToFunctionSummaryMap("mknodat", Summary(ArgTypes{IntTy, ConstCharPtrTy,
+ *Mode_tTy, *Dev_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(1))));
+ }
+
+ if (Mode_tTy) {
+ // int chmod(const char *path, mode_t mode);
+ addToFunctionSummaryMap("chmod",
+ Summary(ArgTypes{ConstCharPtrTy, *Mode_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int fchmodat(int dirfd, const char *pathname, mode_t mode, int flags);
+ addToFunctionSummaryMap(
+ "fchmodat", Summary(ArgTypes{IntTy, ConstCharPtrTy, *Mode_tTy, IntTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange,
+ Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int fchmod(int fildes, mode_t mode);
+ addToFunctionSummaryMap(
+ "fchmod",
+ Summary(ArgTypes{IntTy, *Mode_tTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+ }
+
+ Optional<QualType> Uid_tTy = lookupType("uid_t", ACtx);
+ Optional<QualType> Gid_tTy = lookupType("gid_t", ACtx);
+
+ if (Uid_tTy && Gid_tTy) {
+ // int fchownat(int dirfd, const char *pathname, uid_t owner, gid_t group,
+ // int flags);
+ addToFunctionSummaryMap(
+ "fchownat",
+ Summary(ArgTypes{IntTy, ConstCharPtrTy, *Uid_tTy, *Gid_tTy, IntTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int chown(const char *path, uid_t owner, gid_t group);
+ addToFunctionSummaryMap(
+ "chown", Summary(ArgTypes{ConstCharPtrTy, *Uid_tTy, *Gid_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int lchown(const char *path, uid_t owner, gid_t group);
+ addToFunctionSummaryMap(
+ "lchown", Summary(ArgTypes{ConstCharPtrTy, *Uid_tTy, *Gid_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int fchown(int fildes, uid_t owner, gid_t group);
+ addToFunctionSummaryMap(
+ "fchown", Summary(ArgTypes{IntTy, *Uid_tTy, *Gid_tTy}, RetType{IntTy},
+ NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange,
+ Range(0, IntMax))));
+ }
+
+ // int rmdir(const char *pathname);
+ addToFunctionSummaryMap(
+ "rmdir", Summary(ArgTypes{ConstCharPtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int chdir(const char *path);
+ addToFunctionSummaryMap(
+ "chdir", Summary(ArgTypes{ConstCharPtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int link(const char *oldpath, const char *newpath);
+ addToFunctionSummaryMap("link",
+ Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int linkat(int fd1, const char *path1, int fd2, const char *path2,
+ // int flag);
+ addToFunctionSummaryMap(
+ "linkat",
+ Summary(ArgTypes{IntTy, ConstCharPtrTy, IntTy, ConstCharPtrTy, IntTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(ArgumentCondition(2, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(3))));
+
+ // int unlink(const char *pathname);
+ addToFunctionSummaryMap(
+ "unlink", Summary(ArgTypes{ConstCharPtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int unlinkat(int fd, const char *path, int flag);
+ addToFunctionSummaryMap(
+ "unlinkat",
+ Summary(ArgTypes{IntTy, ConstCharPtrTy, IntTy}, RetType{IntTy},
+ NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ Optional<QualType> StructStatTy = lookupType("stat", ACtx);
+ Optional<QualType> StructStatPtrTy, StructStatPtrRestrictTy;
+ if (StructStatTy) {
+ StructStatPtrTy = ACtx.getPointerType(*StructStatTy);
+ StructStatPtrRestrictTy = ACtx.getLangOpts().C99
+ ? ACtx.getRestrictType(*StructStatPtrTy)
+ : *StructStatPtrTy;
+ }
+
+ if (StructStatPtrTy)
+ // int fstat(int fd, struct stat *statbuf);
+ addToFunctionSummaryMap(
+ "fstat",
+ Summary(ArgTypes{IntTy, *StructStatPtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ if (StructStatPtrRestrictTy) {
+ // int stat(const char *restrict path, struct stat *restrict buf);
+ addToFunctionSummaryMap(
+ "stat",
+ Summary(ArgTypes{ConstCharPtrRestrictTy, *StructStatPtrRestrictTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int lstat(const char *restrict path, struct stat *restrict buf);
+ addToFunctionSummaryMap(
+ "lstat",
+ Summary(ArgTypes{ConstCharPtrRestrictTy, *StructStatPtrRestrictTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int fstatat(int fd, const char *restrict path,
+ // struct stat *restrict buf, int flag);
+ addToFunctionSummaryMap(
+ "fstatat", Summary(ArgTypes{IntTy, ConstCharPtrRestrictTy,
+ *StructStatPtrRestrictTy, IntTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange,
+ Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(NotNull(ArgNo(2))));
+ }
+
+ if (DirPtrTy) {
+ // DIR *opendir(const char *name);
+ addToFunctionSummaryMap("opendir", Summary(ArgTypes{ConstCharPtrTy},
+ RetType{*DirPtrTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // DIR *fdopendir(int fd);
+ addToFunctionSummaryMap(
+ "fdopendir", Summary(ArgTypes{IntTy}, RetType{*DirPtrTy}, NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange,
+ Range(0, IntMax))));
+ }
+
+ // int isatty(int fildes);
+ addToFunctionSummaryMap(
+ "isatty", Summary(ArgTypes{IntTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
+ if (FilePtrTy) {
+ // FILE *popen(const char *command, const char *type);
+ addToFunctionSummaryMap("popen",
+ Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy},
+ RetType{*FilePtrTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int pclose(FILE *stream);
+ addToFunctionSummaryMap(
+ "pclose", Summary(ArgTypes{*FilePtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+ }
+
+ // int close(int fildes);
+ addToFunctionSummaryMap(
+ "close", Summary(ArgTypes{IntTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
+ // long fpathconf(int fildes, int name);
+ addToFunctionSummaryMap(
+ "fpathconf",
+ Summary(ArgTypes{IntTy, IntTy}, RetType{LongTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
+ // long pathconf(const char *path, int name);
+ addToFunctionSummaryMap("pathconf", Summary(ArgTypes{ConstCharPtrTy, IntTy},
+ RetType{LongTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ if (FilePtrTy)
+ // FILE *fdopen(int fd, const char *mode);
+ addToFunctionSummaryMap(
+ "fdopen", Summary(ArgTypes{IntTy, ConstCharPtrTy},
+ RetType{*FilePtrTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ if (DirPtrTy) {
+ // void rewinddir(DIR *dir);
+ addToFunctionSummaryMap(
+ "rewinddir", Summary(ArgTypes{*DirPtrTy}, RetType{VoidTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // void seekdir(DIR *dirp, long loc);
+ addToFunctionSummaryMap("seekdir", Summary(ArgTypes{*DirPtrTy, LongTy},
+ RetType{VoidTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+ }
+
+ // int rand_r(unsigned int *seedp);
+ addToFunctionSummaryMap("rand_r", Summary(ArgTypes{UnsignedIntPtrTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int strcasecmp(const char *s1, const char *s2);
+ addToFunctionSummaryMap("strcasecmp",
+ Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy},
+ RetType{IntTy}, EvalCallAsPure)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int strncasecmp(const char *s1, const char *s2, size_t n);
+ addToFunctionSummaryMap(
+ "strncasecmp", Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy, SizeTy},
+ RetType{IntTy}, EvalCallAsPure)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(ArgumentCondition(
+ 2, WithinRange, Range(0, SizeMax))));
+
+ if (FilePtrTy && Off_tTy) {
+
+ // int fileno(FILE *stream);
+ addToFunctionSummaryMap(
+ "fileno", Summary(ArgTypes{*FilePtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int fseeko(FILE *stream, off_t offset, int whence);
+ addToFunctionSummaryMap("fseeko",
+ Summary(ArgTypes{*FilePtrTy, *Off_tTy, IntTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // off_t ftello(FILE *stream);
+ addToFunctionSummaryMap(
+ "ftello", Summary(ArgTypes{*FilePtrTy}, RetType{*Off_tTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+ }
+
+ if (Off_tTy) {
+ Optional<RangeInt> Off_tMax = BVF.getMaxValue(*Off_tTy).getLimitedValue();
+
+ // void *mmap(void *addr, size_t length, int prot, int flags, int fd,
+ // off_t offset);
+ addToFunctionSummaryMap(
+ "mmap",
+ Summary(ArgTypes{VoidPtrTy, SizeTy, IntTy, IntTy, IntTy, *Off_tTy},
+ RetType{VoidPtrTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(1, WithinRange, Range(1, SizeMax)))
+ .ArgConstraint(
+ ArgumentCondition(4, WithinRange, Range(0, *Off_tMax))));
+ }
+
+ Optional<QualType> Off64_tTy = lookupType("off64_t", ACtx);
+ Optional<RangeInt> Off64_tMax;
+ if (Off64_tTy) {
+ Off64_tMax = BVF.getMaxValue(*Off_tTy).getLimitedValue();
+ // void *mmap64(void *addr, size_t length, int prot, int flags, int fd,
+ // off64_t offset);
+ addToFunctionSummaryMap(
+ "mmap64",
+ Summary(ArgTypes{VoidPtrTy, SizeTy, IntTy, IntTy, IntTy, *Off64_tTy},
+ RetType{VoidPtrTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(1, WithinRange, Range(1, SizeMax)))
+ .ArgConstraint(
+ ArgumentCondition(4, WithinRange, Range(0, *Off64_tMax))));
+ }
+
+ // int pipe(int fildes[2]);
+ addToFunctionSummaryMap(
+ "pipe", Summary(ArgTypes{IntPtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ if (Off_tTy)
+ // off_t lseek(int fildes, off_t offset, int whence);
+ addToFunctionSummaryMap(
+ "lseek", Summary(ArgTypes{IntTy, *Off_tTy, IntTy}, RetType{*Off_tTy},
+ NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange,
+ Range(0, IntMax))));
+
+ Optional<QualType> Ssize_tTy = lookupType("ssize_t", ACtx);
+
+ if (Ssize_tTy) {
+ // ssize_t readlink(const char *restrict path, char *restrict buf,
+ // size_t bufsize);
+ addToFunctionSummaryMap(
+ "readlink",
+ Summary(ArgTypes{ConstCharPtrRestrictTy, CharPtrRestrictTy, SizeTy},
+ RetType{*Ssize_tTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
+ /*BufSize=*/ArgNo(2)))
+ .ArgConstraint(
+ ArgumentCondition(2, WithinRange, Range(0, SizeMax))));
+
+ // ssize_t readlinkat(int fd, const char *restrict path,
+ // char *restrict buf, size_t bufsize);
+ addToFunctionSummaryMap(
+ "readlinkat", Summary(ArgTypes{IntTy, ConstCharPtrRestrictTy,
+ CharPtrRestrictTy, SizeTy},
+ RetType{*Ssize_tTy}, NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange,
+ Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(NotNull(ArgNo(2)))
+ .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(2),
+ /*BufSize=*/ArgNo(3)))
+ .ArgConstraint(ArgumentCondition(
+ 3, WithinRange, Range(0, SizeMax))));
+ }
+
+ // int renameat(int olddirfd, const char *oldpath, int newdirfd, const char
+ // *newpath);
+ addToFunctionSummaryMap("renameat", Summary(ArgTypes{IntTy, ConstCharPtrTy,
+ IntTy, ConstCharPtrTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(NotNull(ArgNo(3))));
+
+ // char *realpath(const char *restrict file_name,
+ // char *restrict resolved_name);
+ addToFunctionSummaryMap(
+ "realpath", Summary(ArgTypes{ConstCharPtrRestrictTy, CharPtrRestrictTy},
+ RetType{CharPtrTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ QualType CharPtrConstPtr = ACtx.getPointerType(CharPtrTy.withConst());
+
+ // int execv(const char *path, char *const argv[]);
+ addToFunctionSummaryMap("execv",
+ Summary(ArgTypes{ConstCharPtrTy, CharPtrConstPtr},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int execvp(const char *file, char *const argv[]);
+ addToFunctionSummaryMap("execvp",
+ Summary(ArgTypes{ConstCharPtrTy, CharPtrConstPtr},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int getopt(int argc, char * const argv[], const char *optstring);
+ addToFunctionSummaryMap(
+ "getopt",
+ Summary(ArgTypes{IntTy, CharPtrConstPtr, ConstCharPtrTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(NotNull(ArgNo(2))));
+ }
+
+ // Functions for testing.
+ if (ChecksEnabled[CK_StdCLibraryFunctionsTesterChecker]) {
+ addToFunctionSummaryMap(
+ "__two_constrained_args",
+ Summary(ArgTypes{IntTy, IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, WithinRange, SingleValue(1)))
+ .ArgConstraint(ArgumentCondition(1U, WithinRange, SingleValue(1))));
+ addToFunctionSummaryMap(
+ "__arg_constrained_twice",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, OutOfRange, SingleValue(1)))
+ .ArgConstraint(ArgumentCondition(0U, OutOfRange, SingleValue(2))));
+ addToFunctionSummaryMap(
+ "__defaultparam",
+ Summary(ArgTypes{Irrelevant, IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap("__variadic",
+ Summary(ArgTypes{VoidPtrTy, ConstCharPtrTy},
+ RetType{IntTy}, EvalCallAsPure)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+ addToFunctionSummaryMap(
+ "__buf_size_arg_constraint",
+ Summary(ArgTypes{ConstVoidPtrTy, SizeTy}, RetType{IntTy},
+ EvalCallAsPure)
+ .ArgConstraint(
+ BufferSize(/*Buffer=*/ArgNo(0), /*BufSize=*/ArgNo(1))));
+ addToFunctionSummaryMap(
+ "__buf_size_arg_constraint_mul",
+ Summary(ArgTypes{ConstVoidPtrTy, SizeTy, SizeTy}, RetType{IntTy},
+ EvalCallAsPure)
+ .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(0), /*BufSize=*/ArgNo(1),
+ /*BufSizeMultiplier=*/ArgNo(2))));
+ }
}
void ento::registerStdCLibraryFunctionsChecker(CheckerManager &mgr) {
- // If this checker grows large enough to support C++, Objective-C, or other
- // standard libraries, we could use multiple register...Checker() functions,
- // which would register various checkers with the help of the same Checker
- // class, turning on different function summaries.
- mgr.registerChecker<StdLibraryFunctionsChecker>();
+ auto *Checker = mgr.registerChecker<StdLibraryFunctionsChecker>();
+ Checker->DisplayLoadedSummaries =
+ mgr.getAnalyzerOptions().getCheckerBooleanOption(
+ Checker, "DisplayLoadedSummaries");
+ Checker->ModelPOSIX =
+ mgr.getAnalyzerOptions().getCheckerBooleanOption(Checker, "ModelPOSIX");
}
-bool ento::shouldRegisterStdCLibraryFunctionsChecker(const LangOptions &LO) {
+bool ento::shouldRegisterStdCLibraryFunctionsChecker(const CheckerManager &mgr) {
return true;
}
+
+#define REGISTER_CHECKER(name) \
+ void ento::register##name(CheckerManager &mgr) { \
+ StdLibraryFunctionsChecker *checker = \
+ mgr.getChecker<StdLibraryFunctionsChecker>(); \
+ checker->ChecksEnabled[StdLibraryFunctionsChecker::CK_##name] = true; \
+ checker->CheckNames[StdLibraryFunctionsChecker::CK_##name] = \
+ mgr.getCurrentCheckerName(); \
+ } \
+ \
+ bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
+
+REGISTER_CHECKER(StdCLibraryFunctionArgsChecker)
+REGISTER_CHECKER(StdCLibraryFunctionsTesterChecker)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 47099f2afb6a..f6abbe4f8f03 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -27,142 +27,453 @@ using namespace std::placeholders;
namespace {
+struct FnDescription;
+
+/// State of the stream error flags.
+/// Sometimes it is not known to the checker what error flags are set.
+/// This is indicated by setting more than one flag to true.
+/// This is an optimization to avoid state splits.
+/// A stream can either be in FEOF or FERROR but not both at the same time.
+/// Multiple flags are set to handle the corresponding states together.
+struct StreamErrorState {
+ /// The stream can be in state where none of the error flags set.
+ bool NoError = true;
+ /// The stream can be in state where the EOF indicator is set.
+ bool FEof = false;
+ /// The stream can be in state where the error indicator is set.
+ bool FError = false;
+
+ bool isNoError() const { return NoError && !FEof && !FError; }
+ bool isFEof() const { return !NoError && FEof && !FError; }
+ bool isFError() const { return !NoError && !FEof && FError; }
+
+ bool operator==(const StreamErrorState &ES) const {
+ return NoError == ES.NoError && FEof == ES.FEof && FError == ES.FError;
+ }
+
+ bool operator!=(const StreamErrorState &ES) const { return !(*this == ES); }
+
+ StreamErrorState operator|(const StreamErrorState &E) const {
+ return {NoError || E.NoError, FEof || E.FEof, FError || E.FError};
+ }
+
+ StreamErrorState operator&(const StreamErrorState &E) const {
+ return {NoError && E.NoError, FEof && E.FEof, FError && E.FError};
+ }
+
+ StreamErrorState operator~() const { return {!NoError, !FEof, !FError}; }
+
+ /// Returns if the StreamErrorState is a valid object.
+ operator bool() const { return NoError || FEof || FError; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddBoolean(NoError);
+ ID.AddBoolean(FEof);
+ ID.AddBoolean(FError);
+ }
+};
+
+const StreamErrorState ErrorNone{true, false, false};
+const StreamErrorState ErrorFEof{false, true, false};
+const StreamErrorState ErrorFError{false, false, true};
+
+/// Full state information about a stream pointer.
struct StreamState {
- enum Kind { Opened, Closed, OpenFailed, Escaped } K;
+ /// The last file operation called in the stream.
+ const FnDescription *LastOperation;
- StreamState(Kind k) : K(k) {}
+ /// State of a stream symbol.
+ /// FIXME: We need maybe an "escaped" state later.
+ enum KindTy {
+ Opened, /// Stream is opened.
+ Closed, /// Closed stream (an invalid stream pointer after it was closed).
+ OpenFailed /// The last open operation has failed.
+ } State;
- bool isOpened() const { return K == Opened; }
- bool isClosed() const { return K == Closed; }
- //bool isOpenFailed() const { return K == OpenFailed; }
- //bool isEscaped() const { return K == Escaped; }
+ /// State of the error flags.
+ /// Ignored in non-opened stream state but must be NoError.
+ StreamErrorState const ErrorState;
- bool operator==(const StreamState &X) const { return K == X.K; }
+ /// Indicate if the file has an "indeterminate file position indicator".
+ /// This can be set at a failing read or write or seek operation.
+ /// If it is set no more read or write is allowed.
+ /// This value is not dependent on the stream error flags:
+ /// The error flag may be cleared with `clearerr` but the file position
+ /// remains still indeterminate.
+ /// This value applies to all error states in ErrorState except FEOF.
+ /// An EOF+indeterminate state is the same as EOF state.
+ bool const FilePositionIndeterminate = false;
- static StreamState getOpened() { return StreamState(Opened); }
- static StreamState getClosed() { return StreamState(Closed); }
- static StreamState getOpenFailed() { return StreamState(OpenFailed); }
- static StreamState getEscaped() { return StreamState(Escaped); }
+ StreamState(const FnDescription *L, KindTy S, const StreamErrorState &ES,
+ bool IsFilePositionIndeterminate)
+ : LastOperation(L), State(S), ErrorState(ES),
+ FilePositionIndeterminate(IsFilePositionIndeterminate) {
+ assert((!ES.isFEof() || !IsFilePositionIndeterminate) &&
+ "FilePositionIndeterminate should be false in FEof case.");
+ assert((State == Opened || ErrorState.isNoError()) &&
+ "ErrorState should be None in non-opened stream state.");
+ }
+
+ bool isOpened() const { return State == Opened; }
+ bool isClosed() const { return State == Closed; }
+ bool isOpenFailed() const { return State == OpenFailed; }
+
+ bool operator==(const StreamState &X) const {
+ // In not opened state error state should always NoError, so comparison
+ // here is no problem.
+ return LastOperation == X.LastOperation && State == X.State &&
+ ErrorState == X.ErrorState &&
+ FilePositionIndeterminate == X.FilePositionIndeterminate;
+ }
+
+ static StreamState getOpened(const FnDescription *L,
+ const StreamErrorState &ES = ErrorNone,
+ bool IsFilePositionIndeterminate = false) {
+ return StreamState{L, Opened, ES, IsFilePositionIndeterminate};
+ }
+ static StreamState getClosed(const FnDescription *L) {
+ return StreamState{L, Closed, {}, false};
+ }
+ static StreamState getOpenFailed(const FnDescription *L) {
+ return StreamState{L, OpenFailed, {}, false};
+ }
void Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddInteger(K);
+ ID.AddPointer(LastOperation);
+ ID.AddInteger(State);
+ ID.AddInteger(ErrorState);
+ ID.AddBoolean(FilePositionIndeterminate);
}
};
-class StreamChecker : public Checker<eval::Call,
- check::DeadSymbols > {
- mutable std::unique_ptr<BuiltinBug> BT_nullfp, BT_illegalwhence,
- BT_doubleclose, BT_ResourceLeak;
+class StreamChecker;
+using FnCheck = std::function<void(const StreamChecker *, const FnDescription *,
+ const CallEvent &, CheckerContext &)>;
+
+using ArgNoTy = unsigned int;
+static const ArgNoTy ArgNone = std::numeric_limits<ArgNoTy>::max();
+
+struct FnDescription {
+ FnCheck PreFn;
+ FnCheck EvalFn;
+ ArgNoTy StreamArgNo;
+};
+
+/// Get the value of the stream argument out of the passed call event.
+/// The call should contain a function that is described by Desc.
+SVal getStreamArg(const FnDescription *Desc, const CallEvent &Call) {
+ assert(Desc && Desc->StreamArgNo != ArgNone &&
+ "Try to get a non-existing stream argument.");
+ return Call.getArgSVal(Desc->StreamArgNo);
+}
+
+/// Create a conjured symbol return value for a call expression.
+DefinedSVal makeRetVal(CheckerContext &C, const CallExpr *CE) {
+ assert(CE && "Expecting a call expression.");
+
+ const LocationContext *LCtx = C.getLocationContext();
+ return C.getSValBuilder()
+ .conjureSymbolVal(nullptr, CE, LCtx, C.blockCount())
+ .castAs<DefinedSVal>();
+}
+
+ProgramStateRef bindAndAssumeTrue(ProgramStateRef State, CheckerContext &C,
+ const CallExpr *CE) {
+ DefinedSVal RetVal = makeRetVal(C, CE);
+ State = State->BindExpr(CE, C.getLocationContext(), RetVal);
+ State = State->assume(RetVal, true);
+ assert(State && "Assumption on new value should not fail.");
+ return State;
+}
+
+ProgramStateRef bindInt(uint64_t Value, ProgramStateRef State,
+ CheckerContext &C, const CallExpr *CE) {
+ State = State->BindExpr(CE, C.getLocationContext(),
+ C.getSValBuilder().makeIntVal(Value, false));
+ return State;
+}
+
+class StreamChecker : public Checker<check::PreCall, eval::Call,
+ check::DeadSymbols, check::PointerEscape> {
+ BugType BT_FileNull{this, "NULL stream pointer", "Stream handling error"};
+ BugType BT_UseAfterClose{this, "Closed stream", "Stream handling error"};
+ BugType BT_UseAfterOpenFailed{this, "Invalid stream",
+ "Stream handling error"};
+ BugType BT_IndeterminatePosition{this, "Invalid stream state",
+ "Stream handling error"};
+ BugType BT_IllegalWhence{this, "Illegal whence argument",
+ "Stream handling error"};
+ BugType BT_StreamEof{this, "Stream already in EOF", "Stream handling error"};
+ BugType BT_ResourceLeak{this, "Resource leak", "Stream handling error"};
public:
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+ ProgramStateRef checkPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const;
+
+ /// If true, evaluate special testing stream functions.
+ bool TestMode = false;
private:
- using FnCheck = std::function<void(const StreamChecker *, const CallEvent &,
- CheckerContext &)>;
-
- CallDescriptionMap<FnCheck> Callbacks = {
- {{"fopen"}, &StreamChecker::evalFopen},
- {{"freopen", 3}, &StreamChecker::evalFreopen},
- {{"tmpfile"}, &StreamChecker::evalFopen},
- {{"fclose", 1}, &StreamChecker::evalFclose},
+ CallDescriptionMap<FnDescription> FnDescriptions = {
+ {{"fopen"}, {nullptr, &StreamChecker::evalFopen, ArgNone}},
+ {{"freopen", 3},
+ {&StreamChecker::preFreopen, &StreamChecker::evalFreopen, 2}},
+ {{"tmpfile"}, {nullptr, &StreamChecker::evalFopen, ArgNone}},
+ {{"fclose", 1},
+ {&StreamChecker::preDefault, &StreamChecker::evalFclose, 0}},
{{"fread", 4},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 3)},
+ {&StreamChecker::preFread,
+ std::bind(&StreamChecker::evalFreadFwrite, _1, _2, _3, _4, true), 3}},
{{"fwrite", 4},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 3)},
- {{"fseek", 3}, &StreamChecker::evalFseek},
- {{"ftell", 1},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 0)},
- {{"rewind", 1},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 0)},
- {{"fgetpos", 2},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 0)},
- {{"fsetpos", 2},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 0)},
+ {&StreamChecker::preFwrite,
+ std::bind(&StreamChecker::evalFreadFwrite, _1, _2, _3, _4, false), 3}},
+ {{"fseek", 3}, {&StreamChecker::preFseek, &StreamChecker::evalFseek, 0}},
+ {{"ftell", 1}, {&StreamChecker::preDefault, nullptr, 0}},
+ {{"rewind", 1}, {&StreamChecker::preDefault, nullptr, 0}},
+ {{"fgetpos", 2}, {&StreamChecker::preDefault, nullptr, 0}},
+ {{"fsetpos", 2}, {&StreamChecker::preDefault, nullptr, 0}},
{{"clearerr", 1},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 0)},
+ {&StreamChecker::preDefault, &StreamChecker::evalClearerr, 0}},
{{"feof", 1},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 0)},
+ {&StreamChecker::preDefault,
+ std::bind(&StreamChecker::evalFeofFerror, _1, _2, _3, _4, ErrorFEof),
+ 0}},
{{"ferror", 1},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 0)},
- {{"fileno", 1},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 0)},
+ {&StreamChecker::preDefault,
+ std::bind(&StreamChecker::evalFeofFerror, _1, _2, _3, _4, ErrorFError),
+ 0}},
+ {{"fileno", 1}, {&StreamChecker::preDefault, nullptr, 0}},
+ };
+
+ CallDescriptionMap<FnDescription> FnTestDescriptions = {
+ {{"StreamTesterChecker_make_feof_stream", 1},
+ {nullptr,
+ std::bind(&StreamChecker::evalSetFeofFerror, _1, _2, _3, _4, ErrorFEof),
+ 0}},
+ {{"StreamTesterChecker_make_ferror_stream", 1},
+ {nullptr,
+ std::bind(&StreamChecker::evalSetFeofFerror, _1, _2, _3, _4,
+ ErrorFError),
+ 0}},
+ };
+
+ void evalFopen(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void preFreopen(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+ void evalFreopen(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void evalFclose(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void preFread(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void preFwrite(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void evalFreadFwrite(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C, bool IsFread) const;
+
+ void preFseek(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+ void evalFseek(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void preDefault(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void evalClearerr(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void evalFeofFerror(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C,
+ const StreamErrorState &ErrorKind) const;
+
+ void evalSetFeofFerror(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C,
+ const StreamErrorState &ErrorKind) const;
+
+ /// Check that the stream (in StreamVal) is not NULL.
+ /// If it can only be NULL a fatal error is emitted and nullptr returned.
+ /// Otherwise the return value is a new state where the stream is constrained
+ /// to be non-null.
+ ProgramStateRef ensureStreamNonNull(SVal StreamVal, CheckerContext &C,
+ ProgramStateRef State) const;
+
+ /// Check that the stream is the opened state.
+ /// If the stream is known to be not opened an error is generated
+ /// and nullptr returned, otherwise the original state is returned.
+ ProgramStateRef ensureStreamOpened(SVal StreamVal, CheckerContext &C,
+ ProgramStateRef State) const;
+
+ /// Check that the stream has not an invalid ("indeterminate") file position,
+ /// generate warning for it.
+ /// (EOF is not an invalid position.)
+ /// The returned state can be nullptr if a fatal error was generated.
+ /// It can return non-null state if the stream has not an invalid position or
+ /// there is execution path with non-invalid position.
+ ProgramStateRef
+ ensureNoFilePositionIndeterminate(SVal StreamVal, CheckerContext &C,
+ ProgramStateRef State) const;
+
+ /// Check the legality of the 'whence' argument of 'fseek'.
+ /// Generate error and return nullptr if it is found to be illegal.
+ /// Otherwise returns the state.
+ /// (State is not changed here because the "whence" value is already known.)
+ ProgramStateRef ensureFseekWhenceCorrect(SVal WhenceVal, CheckerContext &C,
+ ProgramStateRef State) const;
+
+ /// Generate warning about stream in EOF state.
+ /// There will be always a state transition into the passed State,
+ /// by the new non-fatal error node or (if failed) a normal transition,
+ /// to ensure uniform handling.
+ void reportFEofWarning(CheckerContext &C, ProgramStateRef State) const;
+
+ /// Find the description data of the function called by a call event.
+ /// Returns nullptr if no function is recognized.
+ const FnDescription *lookupFn(const CallEvent &Call) const {
+ // Recognize "global C functions" with only integral or pointer arguments
+ // (and matching name) as stream functions.
+ if (!Call.isGlobalCFunction())
+ return nullptr;
+ for (auto P : Call.parameters()) {
+ QualType T = P->getType();
+ if (!T->isIntegralOrEnumerationType() && !T->isPointerType())
+ return nullptr;
+ }
+
+ return FnDescriptions.lookup(Call);
+ }
+
+ /// Generate a message for BugReporterVisitor if the stored symbol is
+ /// marked as interesting by the actual bug report.
+ struct NoteFn {
+ const CheckerNameRef CheckerName;
+ SymbolRef StreamSym;
+ std::string Message;
+
+ std::string operator()(PathSensitiveBugReport &BR) const {
+ if (BR.isInteresting(StreamSym) &&
+ CheckerName == BR.getBugType().getCheckerName())
+ return Message;
+
+ return "";
+ }
};
- void evalFopen(const CallEvent &Call, CheckerContext &C) const;
- void evalFreopen(const CallEvent &Call, CheckerContext &C) const;
- void evalFclose(const CallEvent &Call, CheckerContext &C) const;
- void evalFseek(const CallEvent &Call, CheckerContext &C) const;
-
- void checkArgNullStream(const CallEvent &Call, CheckerContext &C,
- unsigned ArgI) const;
- bool checkNullStream(SVal SV, CheckerContext &C,
- ProgramStateRef &State) const;
- void checkFseekWhence(SVal SV, CheckerContext &C,
- ProgramStateRef &State) const;
- bool checkDoubleClose(const CallEvent &Call, CheckerContext &C,
- ProgramStateRef &State) const;
+ const NoteTag *constructNoteTag(CheckerContext &C, SymbolRef StreamSym,
+ const std::string &Message) const {
+ return C.getNoteTag(NoteFn{getCheckerName(), StreamSym, Message});
+ }
+
+ /// Searches for the ExplodedNode where the file descriptor was acquired for
+ /// StreamSym.
+ static const ExplodedNode *getAcquisitionSite(const ExplodedNode *N,
+ SymbolRef StreamSym,
+ CheckerContext &C);
};
} // end anonymous namespace
REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
+inline void assertStreamStateOpened(const StreamState *SS) {
+ assert(SS->isOpened() &&
+ "Previous create of error node for non-opened stream failed?");
+}
-bool StreamChecker::evalCall(const CallEvent &Call, CheckerContext &C) const {
- const auto *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
- if (!FD || FD->getKind() != Decl::Function)
- return false;
+const ExplodedNode *StreamChecker::getAcquisitionSite(const ExplodedNode *N,
+ SymbolRef StreamSym,
+ CheckerContext &C) {
+ ProgramStateRef State = N->getState();
+ // When bug type is resource leak, exploded node N may not have state info
+ // for leaked file descriptor, but predecessor should have it.
+ if (!State->get<StreamMap>(StreamSym))
+ N = N->getFirstPred();
- // Recognize "global C functions" with only integral or pointer arguments
- // (and matching name) as stream functions.
- if (!Call.isGlobalCFunction())
- return false;
- for (auto P : Call.parameters()) {
- QualType T = P->getType();
- if (!T->isIntegralOrEnumerationType() && !T->isPointerType())
- return false;
+ const ExplodedNode *Pred = N;
+ while (N) {
+ State = N->getState();
+ if (!State->get<StreamMap>(StreamSym))
+ return Pred;
+ Pred = N;
+ N = N->getFirstPred();
}
- const FnCheck *Callback = Callbacks.lookup(Call);
- if (!Callback)
+ return nullptr;
+}
+
+void StreamChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const FnDescription *Desc = lookupFn(Call);
+ if (!Desc || !Desc->PreFn)
+ return;
+
+ Desc->PreFn(this, Desc, Call, C);
+}
+
+bool StreamChecker::evalCall(const CallEvent &Call, CheckerContext &C) const {
+ const FnDescription *Desc = lookupFn(Call);
+ if (!Desc && TestMode)
+ Desc = FnTestDescriptions.lookup(Call);
+ if (!Desc || !Desc->EvalFn)
return false;
- (*Callback)(this, Call, C);
+ Desc->EvalFn(this, Desc, Call, C);
return C.isDifferent();
}
-void StreamChecker::evalFopen(const CallEvent &Call, CheckerContext &C) const {
- ProgramStateRef state = C.getState();
- SValBuilder &svalBuilder = C.getSValBuilder();
- const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
- auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+void StreamChecker::evalFopen(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
if (!CE)
return;
- DefinedSVal RetVal =
- svalBuilder.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount())
- .castAs<DefinedSVal>();
- state = state->BindExpr(CE, C.getLocationContext(), RetVal);
+ DefinedSVal RetVal = makeRetVal(C, CE);
+ SymbolRef RetSym = RetVal.getAsSymbol();
+ assert(RetSym && "RetVal must be a symbol here.");
+
+ State = State->BindExpr(CE, C.getLocationContext(), RetVal);
- ConstraintManager &CM = C.getConstraintManager();
// Bifurcate the state into two: one with a valid FILE* pointer, the other
// with a NULL.
- ProgramStateRef stateNotNull, stateNull;
- std::tie(stateNotNull, stateNull) = CM.assumeDual(state, RetVal);
+ ProgramStateRef StateNotNull, StateNull;
+ std::tie(StateNotNull, StateNull) =
+ C.getConstraintManager().assumeDual(State, RetVal);
+
+ StateNotNull =
+ StateNotNull->set<StreamMap>(RetSym, StreamState::getOpened(Desc));
+ StateNull =
+ StateNull->set<StreamMap>(RetSym, StreamState::getOpenFailed(Desc));
+
+ C.addTransition(StateNotNull,
+ constructNoteTag(C, RetSym, "Stream opened here"));
+ C.addTransition(StateNull);
+}
- SymbolRef Sym = RetVal.getAsSymbol();
- assert(Sym && "RetVal must be a symbol here.");
- stateNotNull = stateNotNull->set<StreamMap>(Sym, StreamState::getOpened());
- stateNull = stateNull->set<StreamMap>(Sym, StreamState::getOpenFailed());
+void StreamChecker::preFreopen(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ // Do not allow NULL as passed stream pointer but allow a closed stream.
+ ProgramStateRef State = C.getState();
+ State = ensureStreamNonNull(getStreamArg(Desc, Call), C, State);
+ if (!State)
+ return;
- C.addTransition(stateNotNull);
- C.addTransition(stateNull);
+ C.addTransition(State);
}
-void StreamChecker::evalFreopen(const CallEvent &Call,
+void StreamChecker::evalFreopen(const FnDescription *Desc,
+ const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
@@ -170,21 +481,21 @@ void StreamChecker::evalFreopen(const CallEvent &Call,
if (!CE)
return;
- Optional<DefinedSVal> StreamVal = Call.getArgSVal(2).getAs<DefinedSVal>();
+ Optional<DefinedSVal> StreamVal =
+ getStreamArg(Desc, Call).getAs<DefinedSVal>();
if (!StreamVal)
return;
- // Do not allow NULL as passed stream pointer.
- // This is not specified in the man page but may crash on some system.
- checkNullStream(*StreamVal, C, State);
- // Check if error was generated.
- if (C.isDifferent())
- return;
SymbolRef StreamSym = StreamVal->getAsSymbol();
- // Do not care about special values for stream ("(FILE *)0x12345"?).
+ // Do not care about concrete values for stream ("(FILE *)0x12345"?).
+ // FIXME: Can be stdin, stdout, stderr such values?
if (!StreamSym)
return;
+ // Do not handle untracked stream. It is probably escaped.
+ if (!State->get<StreamMap>(StreamSym))
+ return;
+
// Generate state for non-failed case.
// Return value is the passed stream pointer.
// According to the documentations, the stream is closed first
@@ -197,129 +508,452 @@ void StreamChecker::evalFreopen(const CallEvent &Call,
C.getSValBuilder().makeNull());
StateRetNotNull =
- StateRetNotNull->set<StreamMap>(StreamSym, StreamState::getOpened());
+ StateRetNotNull->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
StateRetNull =
- StateRetNull->set<StreamMap>(StreamSym, StreamState::getOpenFailed());
+ StateRetNull->set<StreamMap>(StreamSym, StreamState::getOpenFailed(Desc));
- C.addTransition(StateRetNotNull);
+ C.addTransition(StateRetNotNull,
+ constructNoteTag(C, StreamSym, "Stream reopened here"));
C.addTransition(StateRetNull);
}
-void StreamChecker::evalFclose(const CallEvent &Call, CheckerContext &C) const {
+void StreamChecker::evalFclose(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
ProgramStateRef State = C.getState();
- if (checkDoubleClose(Call, C, State))
+ SymbolRef Sym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!Sym)
+ return;
+
+ const StreamState *SS = State->get<StreamMap>(Sym);
+ if (!SS)
+ return;
+
+ assertStreamStateOpened(SS);
+
+ // Close the File Descriptor.
+ // Regardless if the close fails or not, stream becomes "closed"
+ // and can not be used any more.
+ State = State->set<StreamMap>(Sym, StreamState::getClosed(Desc));
+
+ C.addTransition(State);
+}
+
+void StreamChecker::preFread(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal StreamVal = getStreamArg(Desc, Call);
+ State = ensureStreamNonNull(StreamVal, C, State);
+ if (!State)
+ return;
+ State = ensureStreamOpened(StreamVal, C, State);
+ if (!State)
+ return;
+ State = ensureNoFilePositionIndeterminate(StreamVal, C, State);
+ if (!State)
+ return;
+
+ SymbolRef Sym = StreamVal.getAsSymbol();
+ if (Sym && State->get<StreamMap>(Sym)) {
+ const StreamState *SS = State->get<StreamMap>(Sym);
+ if (SS->ErrorState & ErrorFEof)
+ reportFEofWarning(C, State);
+ } else {
C.addTransition(State);
+ }
}
-void StreamChecker::evalFseek(const CallEvent &Call, CheckerContext &C) const {
- const Expr *AE2 = Call.getArgExpr(2);
- if (!AE2)
+void StreamChecker::preFwrite(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal StreamVal = getStreamArg(Desc, Call);
+ State = ensureStreamNonNull(StreamVal, C, State);
+ if (!State)
return;
+ State = ensureStreamOpened(StreamVal, C, State);
+ if (!State)
+ return;
+ State = ensureNoFilePositionIndeterminate(StreamVal, C, State);
+ if (!State)
+ return;
+
+ C.addTransition(State);
+}
+void StreamChecker::evalFreadFwrite(const FnDescription *Desc,
+ const CallEvent &Call, CheckerContext &C,
+ bool IsFread) const {
ProgramStateRef State = C.getState();
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return;
+
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ Optional<NonLoc> SizeVal = Call.getArgSVal(1).getAs<NonLoc>();
+ if (!SizeVal)
+ return;
+ Optional<NonLoc> NMembVal = Call.getArgSVal(2).getAs<NonLoc>();
+ if (!NMembVal)
+ return;
- bool StateChanged = checkNullStream(Call.getArgSVal(0), C, State);
- // Check if error was generated.
- if (C.isDifferent())
+ const StreamState *SS = State->get<StreamMap>(StreamSym);
+ if (!SS)
return;
- // Check the legality of the 'whence' argument of 'fseek'.
- checkFseekWhence(State->getSVal(AE2, C.getLocationContext()), C, State);
+ assertStreamStateOpened(SS);
+
+ // C'99 standard, §7.19.8.1.3, the return value of fread:
+ // The fread function returns the number of elements successfully read, which
+ // may be less than nmemb if a read error or end-of-file is encountered. If
+ // size or nmemb is zero, fread returns zero and the contents of the array and
+ // the state of the stream remain unchanged.
- if (!C.isDifferent() && StateChanged)
+ if (State->isNull(*SizeVal).isConstrainedTrue() ||
+ State->isNull(*NMembVal).isConstrainedTrue()) {
+ // This is the "size or nmemb is zero" case.
+ // Just return 0, do nothing more (not clear the error flags).
+ State = bindInt(0, State, C, CE);
C.addTransition(State);
+ return;
+ }
- return;
+ // Generate a transition for the success state.
+ // If we know the state to be FEOF at fread, do not add a success state.
+ if (!IsFread || (SS->ErrorState != ErrorFEof)) {
+ ProgramStateRef StateNotFailed =
+ State->BindExpr(CE, C.getLocationContext(), *NMembVal);
+ if (StateNotFailed) {
+ StateNotFailed = StateNotFailed->set<StreamMap>(
+ StreamSym, StreamState::getOpened(Desc));
+ C.addTransition(StateNotFailed);
+ }
+ }
+
+ // Add transition for the failed state.
+ Optional<NonLoc> RetVal = makeRetVal(C, CE).castAs<NonLoc>();
+ assert(RetVal && "Value should be NonLoc.");
+ ProgramStateRef StateFailed =
+ State->BindExpr(CE, C.getLocationContext(), *RetVal);
+ if (!StateFailed)
+ return;
+ auto Cond = C.getSValBuilder()
+ .evalBinOpNN(State, BO_LT, *RetVal, *NMembVal,
+ C.getASTContext().IntTy)
+ .getAs<DefinedOrUnknownSVal>();
+ if (!Cond)
+ return;
+ StateFailed = StateFailed->assume(*Cond, true);
+ if (!StateFailed)
+ return;
+
+ StreamErrorState NewES;
+ if (IsFread)
+ NewES = (SS->ErrorState == ErrorFEof) ? ErrorFEof : ErrorFEof | ErrorFError;
+ else
+ NewES = ErrorFError;
+ // If a (non-EOF) error occurs, the resulting value of the file position
+ // indicator for the stream is indeterminate.
+ StreamState NewState = StreamState::getOpened(Desc, NewES, !NewES.isFEof());
+ StateFailed = StateFailed->set<StreamMap>(StreamSym, NewState);
+ C.addTransition(StateFailed);
}
-void StreamChecker::checkArgNullStream(const CallEvent &Call, CheckerContext &C,
- unsigned ArgI) const {
+void StreamChecker::preFseek(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
ProgramStateRef State = C.getState();
- if (checkNullStream(Call.getArgSVal(ArgI), C, State))
- C.addTransition(State);
+ SVal StreamVal = getStreamArg(Desc, Call);
+ State = ensureStreamNonNull(StreamVal, C, State);
+ if (!State)
+ return;
+ State = ensureStreamOpened(StreamVal, C, State);
+ if (!State)
+ return;
+ State = ensureFseekWhenceCorrect(Call.getArgSVal(2), C, State);
+ if (!State)
+ return;
+
+ C.addTransition(State);
}
-bool StreamChecker::checkNullStream(SVal SV, CheckerContext &C,
- ProgramStateRef &State) const {
- Optional<DefinedSVal> DV = SV.getAs<DefinedSVal>();
- if (!DV)
- return false;
+void StreamChecker::evalFseek(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return;
+
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ // Ignore the call if the stream is not tracked.
+ if (!State->get<StreamMap>(StreamSym))
+ return;
+
+ DefinedSVal RetVal = makeRetVal(C, CE);
+
+ // Make expression result.
+ State = State->BindExpr(CE, C.getLocationContext(), RetVal);
+
+ // Bifurcate the state into failed and non-failed.
+ // Return zero on success, nonzero on error.
+ ProgramStateRef StateNotFailed, StateFailed;
+ std::tie(StateFailed, StateNotFailed) =
+ C.getConstraintManager().assumeDual(State, RetVal);
+
+ // Reset the state to opened with no error.
+ StateNotFailed =
+ StateNotFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
+ // We get error.
+ // It is possible that fseek fails but sets none of the error flags.
+ // If fseek failed, assume that the file position becomes indeterminate in any
+ // case.
+ StateFailed = StateFailed->set<StreamMap>(
+ StreamSym,
+ StreamState::getOpened(Desc, ErrorNone | ErrorFEof | ErrorFError, true));
+
+ C.addTransition(StateNotFailed);
+ C.addTransition(StateFailed);
+}
+
+void StreamChecker::evalClearerr(const FnDescription *Desc,
+ const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return;
+
+ const StreamState *SS = State->get<StreamMap>(StreamSym);
+ if (!SS)
+ return;
+
+ assertStreamStateOpened(SS);
+
+ // FilePositionIndeterminate is not cleared.
+ State = State->set<StreamMap>(
+ StreamSym,
+ StreamState::getOpened(Desc, ErrorNone, SS->FilePositionIndeterminate));
+ C.addTransition(State);
+}
+
+void StreamChecker::evalFeofFerror(const FnDescription *Desc,
+ const CallEvent &Call, CheckerContext &C,
+ const StreamErrorState &ErrorKind) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return;
+
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ const StreamState *SS = State->get<StreamMap>(StreamSym);
+ if (!SS)
+ return;
+
+ assertStreamStateOpened(SS);
+
+ if (SS->ErrorState & ErrorKind) {
+ // Execution path with error of ErrorKind.
+ // Function returns true.
+ // From now on it is the only one error state.
+ ProgramStateRef TrueState = bindAndAssumeTrue(State, C, CE);
+ C.addTransition(TrueState->set<StreamMap>(
+ StreamSym, StreamState::getOpened(Desc, ErrorKind,
+ SS->FilePositionIndeterminate &&
+ !ErrorKind.isFEof())));
+ }
+ if (StreamErrorState NewES = SS->ErrorState & (~ErrorKind)) {
+ // Execution path(s) with ErrorKind not set.
+ // Function returns false.
+ // New error state is everything before minus ErrorKind.
+ ProgramStateRef FalseState = bindInt(0, State, C, CE);
+ C.addTransition(FalseState->set<StreamMap>(
+ StreamSym,
+ StreamState::getOpened(
+ Desc, NewES, SS->FilePositionIndeterminate && !NewES.isFEof())));
+ }
+}
+
+void StreamChecker::preDefault(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal StreamVal = getStreamArg(Desc, Call);
+ State = ensureStreamNonNull(StreamVal, C, State);
+ if (!State)
+ return;
+ State = ensureStreamOpened(StreamVal, C, State);
+ if (!State)
+ return;
+
+ C.addTransition(State);
+}
+
+void StreamChecker::evalSetFeofFerror(const FnDescription *Desc,
+ const CallEvent &Call, CheckerContext &C,
+ const StreamErrorState &ErrorKind) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ assert(StreamSym && "Operation not permitted on non-symbolic stream value.");
+ const StreamState *SS = State->get<StreamMap>(StreamSym);
+ assert(SS && "Stream should be tracked by the checker.");
+ State = State->set<StreamMap>(
+ StreamSym, StreamState::getOpened(SS->LastOperation, ErrorKind));
+ C.addTransition(State);
+}
+
+ProgramStateRef
+StreamChecker::ensureStreamNonNull(SVal StreamVal, CheckerContext &C,
+ ProgramStateRef State) const {
+ auto Stream = StreamVal.getAs<DefinedSVal>();
+ if (!Stream)
+ return State;
ConstraintManager &CM = C.getConstraintManager();
+
ProgramStateRef StateNotNull, StateNull;
- std::tie(StateNotNull, StateNull) = CM.assumeDual(C.getState(), *DV);
+ std::tie(StateNotNull, StateNull) = CM.assumeDual(C.getState(), *Stream);
if (!StateNotNull && StateNull) {
if (ExplodedNode *N = C.generateErrorNode(StateNull)) {
- if (!BT_nullfp)
- BT_nullfp.reset(new BuiltinBug(this, "NULL stream pointer",
- "Stream pointer might be NULL."));
C.emitReport(std::make_unique<PathSensitiveBugReport>(
- *BT_nullfp, BT_nullfp->getDescription(), N));
+ BT_FileNull, "Stream pointer might be NULL.", N));
}
- return false;
- }
-
- if (StateNotNull) {
- State = StateNotNull;
- return true;
+ return nullptr;
}
- return false;
+ return StateNotNull;
}
-void StreamChecker::checkFseekWhence(SVal SV, CheckerContext &C,
- ProgramStateRef &State) const {
- Optional<nonloc::ConcreteInt> CI = SV.getAs<nonloc::ConcreteInt>();
- if (!CI)
- return;
+ProgramStateRef StreamChecker::ensureStreamOpened(SVal StreamVal,
+ CheckerContext &C,
+ ProgramStateRef State) const {
+ SymbolRef Sym = StreamVal.getAsSymbol();
+ if (!Sym)
+ return State;
- int64_t X = CI->getValue().getSExtValue();
- if (X >= 0 && X <= 2)
- return;
+ const StreamState *SS = State->get<StreamMap>(Sym);
+ if (!SS)
+ return State;
- if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
- if (!BT_illegalwhence)
- BT_illegalwhence.reset(
- new BuiltinBug(this, "Illegal whence argument",
- "The whence argument to fseek() should be "
- "SEEK_SET, SEEK_END, or SEEK_CUR."));
- C.emitReport(std::make_unique<PathSensitiveBugReport>(
- *BT_illegalwhence, BT_illegalwhence->getDescription(), N));
+ if (SS->isClosed()) {
+ // Using a stream pointer after 'fclose' causes undefined behavior
+ // according to cppreference.com .
+ ExplodedNode *N = C.generateErrorNode();
+ if (N) {
+ C.emitReport(std::make_unique<PathSensitiveBugReport>(
+ BT_UseAfterClose,
+ "Stream might be already closed. Causes undefined behaviour.", N));
+ return nullptr;
+ }
+
+ return State;
}
+
+ if (SS->isOpenFailed()) {
+ // Using a stream that has failed to open is likely to cause problems.
+ // This should usually not occur because stream pointer is NULL.
+ // But freopen can cause a state when stream pointer remains non-null but
+ // failed to open.
+ ExplodedNode *N = C.generateErrorNode();
+ if (N) {
+ C.emitReport(std::make_unique<PathSensitiveBugReport>(
+ BT_UseAfterOpenFailed,
+ "Stream might be invalid after "
+ "(re-)opening it has failed. "
+ "Can cause undefined behaviour.",
+ N));
+ return nullptr;
+ }
+ return State;
+ }
+
+ return State;
}
-bool StreamChecker::checkDoubleClose(const CallEvent &Call, CheckerContext &C,
- ProgramStateRef &State) const {
- SymbolRef Sym = Call.getArgSVal(0).getAsSymbol();
+ProgramStateRef StreamChecker::ensureNoFilePositionIndeterminate(
+ SVal StreamVal, CheckerContext &C, ProgramStateRef State) const {
+ static const char *BugMessage =
+ "File position of the stream might be 'indeterminate' "
+ "after a failed operation. "
+ "Can cause undefined behavior.";
+
+ SymbolRef Sym = StreamVal.getAsSymbol();
if (!Sym)
- return false;
+ return State;
const StreamState *SS = State->get<StreamMap>(Sym);
-
- // If the file stream is not tracked, return.
if (!SS)
- return false;
+ return State;
+
+ assert(SS->isOpened() && "First ensure that stream is opened.");
+
+ if (SS->FilePositionIndeterminate) {
+ if (SS->ErrorState & ErrorFEof) {
+ // The error is unknown but may be FEOF.
+ // Continue analysis with the FEOF error state.
+ // Report warning because the other possible error states.
+ ExplodedNode *N = C.generateNonFatalErrorNode(State);
+ if (!N)
+ return nullptr;
- // Check: Double close a File Descriptor could cause undefined behaviour.
- // Conforming to man-pages
- if (SS->isClosed()) {
- ExplodedNode *N = C.generateErrorNode();
- if (N) {
- if (!BT_doubleclose)
- BT_doubleclose.reset(new BuiltinBug(
- this, "Double fclose", "Try to close a file Descriptor already"
- " closed. Cause undefined behaviour."));
C.emitReport(std::make_unique<PathSensitiveBugReport>(
- *BT_doubleclose, BT_doubleclose->getDescription(), N));
+ BT_IndeterminatePosition, BugMessage, N));
+ return State->set<StreamMap>(
+ Sym, StreamState::getOpened(SS->LastOperation, ErrorFEof, false));
}
- return false;
+
+ // Known or unknown error state without FEOF possible.
+ // Stop analysis, report error.
+ ExplodedNode *N = C.generateErrorNode(State);
+ if (N)
+ C.emitReport(std::make_unique<PathSensitiveBugReport>(
+ BT_IndeterminatePosition, BugMessage, N));
+
+ return nullptr;
}
- // Close the File Descriptor.
- State = State->set<StreamMap>(Sym, StreamState::getClosed());
+ return State;
+}
- return true;
+ProgramStateRef
+StreamChecker::ensureFseekWhenceCorrect(SVal WhenceVal, CheckerContext &C,
+ ProgramStateRef State) const {
+ Optional<nonloc::ConcreteInt> CI = WhenceVal.getAs<nonloc::ConcreteInt>();
+ if (!CI)
+ return State;
+
+ int64_t X = CI->getValue().getSExtValue();
+ if (X >= 0 && X <= 2)
+ return State;
+
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
+ C.emitReport(std::make_unique<PathSensitiveBugReport>(
+ BT_IllegalWhence,
+ "The whence argument to fseek() should be "
+ "SEEK_SET, SEEK_END, or SEEK_CUR.",
+ N));
+ return nullptr;
+ }
+
+ return State;
+}
+
+void StreamChecker::reportFEofWarning(CheckerContext &C,
+ ProgramStateRef State) const {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
+ C.emitReport(std::make_unique<PathSensitiveBugReport>(
+ BT_StreamEof,
+ "Read function called when stream is in EOF state. "
+ "Function has no effect.",
+ N));
+ return;
+ }
+ C.addTransition(State);
}
void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
@@ -328,7 +962,7 @@ void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
// TODO: Clean up the state.
const StreamMapTy &Map = State->get<StreamMap>();
- for (const auto &I: Map) {
+ for (const auto &I : Map) {
SymbolRef Sym = I.first;
const StreamState &SS = I.second;
if (!SymReaper.isDead(Sym) || !SS.isOpened())
@@ -338,19 +972,77 @@ void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
if (!N)
continue;
- if (!BT_ResourceLeak)
- BT_ResourceLeak.reset(
- new BuiltinBug(this, "Resource Leak",
- "Opened File never closed. Potential Resource leak."));
- C.emitReport(std::make_unique<PathSensitiveBugReport>(
- *BT_ResourceLeak, BT_ResourceLeak->getDescription(), N));
+ // Do not warn for non-closed stream at program exit.
+ ExplodedNode *Pred = C.getPredecessor();
+ if (Pred && Pred->getCFGBlock() &&
+ Pred->getCFGBlock()->hasNoReturnElement())
+ continue;
+
+ // Resource leaks can result in multiple warning that describe the same kind
+ // of programming error:
+ // void f() {
+ // FILE *F = fopen("a.txt");
+ // if (rand()) // state split
+ // return; // warning
+ // } // warning
+ // While this isn't necessarily true (leaking the same stream could result
+ // from a different kinds of errors), the reduction in redundant reports
+ // makes this a worthwhile heuristic.
+ // FIXME: Add a checker option to turn this uniqueing feature off.
+
+ const ExplodedNode *StreamOpenNode = getAcquisitionSite(N, Sym, C);
+ assert(StreamOpenNode && "Could not find place of stream opening.");
+ PathDiagnosticLocation LocUsedForUniqueing =
+ PathDiagnosticLocation::createBegin(
+ StreamOpenNode->getStmtForDiagnostics(), C.getSourceManager(),
+ StreamOpenNode->getLocationContext());
+
+ std::unique_ptr<PathSensitiveBugReport> R =
+ std::make_unique<PathSensitiveBugReport>(
+ BT_ResourceLeak,
+ "Opened stream never closed. Potential resource leak.", N,
+ LocUsedForUniqueing,
+ StreamOpenNode->getLocationContext()->getDecl());
+ R->markInteresting(Sym);
+ C.emitReport(std::move(R));
+ }
+}
+
+ProgramStateRef StreamChecker::checkPointerEscape(
+ ProgramStateRef State, const InvalidatedSymbols &Escaped,
+ const CallEvent *Call, PointerEscapeKind Kind) const {
+ // Check for file-handling system call that is not handled by the checker.
+ // FIXME: The checker should be updated to handle all system calls that take
+ // 'FILE*' argument. These are now ignored.
+ if (Kind == PSK_DirectEscapeOnCall && Call->isInSystemHeader())
+ return State;
+
+ for (SymbolRef Sym : Escaped) {
+ // The symbol escaped.
+ // From now the stream can be manipulated in unknown way to the checker,
+ // it is not possible to handle it any more.
+ // Optimistically, assume that the corresponding file handle will be closed
+ // somewhere else.
+ // Remove symbol from state so the following stream calls on this symbol are
+ // not handled by the checker.
+ State = State->remove<StreamMap>(Sym);
}
+ return State;
+}
+
+void ento::registerStreamChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<StreamChecker>();
+}
+
+bool ento::shouldRegisterStreamChecker(const CheckerManager &Mgr) {
+ return true;
}
-void ento::registerStreamChecker(CheckerManager &mgr) {
- mgr.registerChecker<StreamChecker>();
+void ento::registerStreamTesterChecker(CheckerManager &Mgr) {
+ auto *Checker = Mgr.getChecker<StreamChecker>();
+ Checker->TestMode = true;
}
-bool ento::shouldRegisterStreamChecker(const LangOptions &LO) {
+bool ento::shouldRegisterStreamTesterChecker(const CheckerManager &Mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
index f81705304f3a..916977c10c0c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
@@ -63,6 +63,6 @@ void ento::registerTaintTesterChecker(CheckerManager &mgr) {
mgr.registerChecker<TaintTesterChecker>();
}
-bool ento::shouldRegisterTaintTesterChecker(const LangOptions &LO) {
+bool ento::shouldRegisterTaintTesterChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
index 3663b0963692..eeec807ccee4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
@@ -261,6 +261,6 @@ void ento::registerTestAfterDivZeroChecker(CheckerManager &mgr) {
mgr.registerChecker<TestAfterDivZeroChecker>();
}
-bool ento::shouldRegisterTestAfterDivZeroChecker(const LangOptions &LO) {
+bool ento::shouldRegisterTestAfterDivZeroChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
index 73183aa468f6..2f316bd3b20d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
@@ -64,7 +64,7 @@ void ento::registerTraversalDumper(CheckerManager &mgr) {
mgr.registerChecker<TraversalDumper>();
}
-bool ento::shouldRegisterTraversalDumper(const LangOptions &LO) {
+bool ento::shouldRegisterTraversalDumper(const CheckerManager &mgr) {
return true;
}
@@ -116,6 +116,6 @@ void ento::registerCallDumper(CheckerManager &mgr) {
mgr.registerChecker<CallDumper>();
}
-bool ento::shouldRegisterCallDumper(const LangOptions &LO) {
+bool ento::shouldRegisterCallDumper(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
index 62a4c2ab0209..5cc713172527 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
@@ -252,6 +252,6 @@ void ento::registerTrustNonnullChecker(CheckerManager &Mgr) {
Mgr.registerChecker<TrustNonnullChecker>(Mgr.getASTContext());
}
-bool ento::shouldRegisterTrustNonnullChecker(const LangOptions &LO) {
+bool ento::shouldRegisterTrustNonnullChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
index 247cba7dc933..3e0caaf79ca0 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
@@ -110,6 +110,6 @@ void ento::registerUndefBranchChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefBranchChecker>();
}
-bool ento::shouldRegisterUndefBranchChecker(const LangOptions &LO) {
+bool ento::shouldRegisterUndefBranchChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
index 7b581bef3900..e457513d8de4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -101,6 +101,6 @@ void ento::registerUndefCapturedBlockVarChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefCapturedBlockVarChecker>();
}
-bool ento::shouldRegisterUndefCapturedBlockVarChecker(const LangOptions &LO) {
+bool ento::shouldRegisterUndefCapturedBlockVarChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
index a2f3e0da13fb..392da4818098 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -16,6 +16,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
@@ -50,10 +51,10 @@ static bool isArrayIndexOutOfBounds(CheckerContext &C, const Expr *Ex) {
return false;
DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
- DefinedOrUnknownSVal NumElements = C.getStoreManager().getSizeInElements(
- state, ER->getSuperRegion(), ER->getValueType());
- ProgramStateRef StInBound = state->assumeInBound(Idx, NumElements, true);
- ProgramStateRef StOutBound = state->assumeInBound(Idx, NumElements, false);
+ DefinedOrUnknownSVal ElementCount = getDynamicElementCount(
+ state, ER->getSuperRegion(), C.getSValBuilder(), ER->getValueType());
+ ProgramStateRef StInBound = state->assumeInBound(Idx, ElementCount, true);
+ ProgramStateRef StOutBound = state->assumeInBound(Idx, ElementCount, false);
return StOutBound && !StInBound;
}
@@ -186,6 +187,6 @@ void ento::registerUndefResultChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefResultChecker>();
}
-bool ento::shouldRegisterUndefResultChecker(const LangOptions &LO) {
+bool ento::shouldRegisterUndefResultChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
index 2f075eaeb03b..fdefe75e8201 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
@@ -62,6 +62,6 @@ void ento::registerUndefinedArraySubscriptChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefinedArraySubscriptChecker>();
}
-bool ento::shouldRegisterUndefinedArraySubscriptChecker(const LangOptions &LO) {
+bool ento::shouldRegisterUndefinedArraySubscriptChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
index 277a8a143328..05f8f6084c0b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -120,6 +120,6 @@ void ento::registerUndefinedAssignmentChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefinedAssignmentChecker>();
}
-bool ento::shouldRegisterUndefinedAssignmentChecker(const LangOptions &LO) {
+bool ento::shouldRegisterUndefinedAssignmentChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
index 020df8a1bb8c..4182b51c02b0 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
@@ -541,14 +541,11 @@ static bool hasUnguardedAccess(const FieldDecl *FD, ProgramStateRef State) {
auto FieldAccessM = memberExpr(hasDeclaration(equalsNode(FD))).bind("access");
auto AssertLikeM = callExpr(callee(functionDecl(
- anyOf(hasName("exit"), hasName("panic"), hasName("error"),
- hasName("Assert"), hasName("assert"), hasName("ziperr"),
- hasName("assfail"), hasName("db_error"), hasName("__assert"),
- hasName("__assert2"), hasName("_wassert"), hasName("__assert_rtn"),
- hasName("__assert_fail"), hasName("dtrace_assfail"),
- hasName("yy_fatal_error"), hasName("_XCAssertionFailureHandler"),
- hasName("_DTAssertionFailureHandler"),
- hasName("_TSAssertionFailureHandler")))));
+ hasAnyName("exit", "panic", "error", "Assert", "assert", "ziperr",
+ "assfail", "db_error", "__assert", "__assert2", "_wassert",
+ "__assert_rtn", "__assert_fail", "dtrace_assfail",
+ "yy_fatal_error", "_XCAssertionFailureHandler",
+ "_DTAssertionFailureHandler", "_TSAssertionFailureHandler"))));
auto NoReturnFuncM = callExpr(callee(functionDecl(isNoReturn())));
@@ -602,13 +599,13 @@ std::string clang::ento::getVariableName(const FieldDecl *Field) {
llvm_unreachable("No other capture type is expected!");
}
- return Field->getName();
+ return std::string(Field->getName());
}
void ento::registerUninitializedObjectChecker(CheckerManager &Mgr) {
auto Chk = Mgr.registerChecker<UninitializedObjectChecker>();
- AnalyzerOptions &AnOpts = Mgr.getAnalyzerOptions();
+ const AnalyzerOptions &AnOpts = Mgr.getAnalyzerOptions();
UninitObjCheckerOptions &ChOpts = Chk->Opts;
ChOpts.IsPedantic = AnOpts.getCheckerBooleanOption(Chk, "Pedantic");
@@ -617,7 +614,7 @@ void ento::registerUninitializedObjectChecker(CheckerManager &Mgr) {
ChOpts.CheckPointeeInitialization = AnOpts.getCheckerBooleanOption(
Chk, "CheckPointeeInitialization");
ChOpts.IgnoredRecordsWithFieldPattern =
- AnOpts.getCheckerStringOption(Chk, "IgnoreRecordsWithField");
+ std::string(AnOpts.getCheckerStringOption(Chk, "IgnoreRecordsWithField"));
ChOpts.IgnoreGuardedFields =
AnOpts.getCheckerBooleanOption(Chk, "IgnoreGuardedFields");
@@ -628,6 +625,6 @@ void ento::registerUninitializedObjectChecker(CheckerManager &Mgr) {
"\"" + ErrorMsg + "\"");
}
-bool ento::shouldRegisterUninitializedObjectChecker(const LangOptions &LO) {
+bool ento::shouldRegisterUninitializedObjectChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index f4e225d836f3..381334de068e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -20,6 +20,7 @@
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -503,7 +504,7 @@ void UnixAPIPortabilityChecker::checkPreStmt(const CallExpr *CE,
mgr.registerChecker<CHECKERNAME>(); \
} \
\
- bool ento::shouldRegister##CHECKERNAME(const LangOptions &LO) { \
+ bool ento::shouldRegister##CHECKERNAME(const CheckerManager &mgr) { \
return true; \
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
index 65dd82675df9..74eec81ffb3e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -257,6 +257,6 @@ void ento::registerUnreachableCodeChecker(CheckerManager &mgr) {
mgr.registerChecker<UnreachableCodeChecker>();
}
-bool ento::shouldRegisterUnreachableCodeChecker(const LangOptions &LO) {
+bool ento::shouldRegisterUnreachableCodeChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index b92757312dc6..d76b2a06aba5 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -14,12 +14,13 @@
//===----------------------------------------------------------------------===//
#include "Taint.h"
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/CharUnits.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
@@ -29,9 +30,28 @@ using namespace ento;
using namespace taint;
namespace {
-class VLASizeChecker : public Checker< check::PreStmt<DeclStmt> > {
+class VLASizeChecker
+ : public Checker<check::PreStmt<DeclStmt>,
+ check::PreStmt<UnaryExprOrTypeTraitExpr>> {
mutable std::unique_ptr<BugType> BT;
- enum VLASize_Kind { VLA_Garbage, VLA_Zero, VLA_Tainted, VLA_Negative };
+ enum VLASize_Kind {
+ VLA_Garbage,
+ VLA_Zero,
+ VLA_Tainted,
+ VLA_Negative,
+ VLA_Overflow
+ };
+
+ /// Check a VLA for validity.
+ /// Every dimension of the array and the total size is checked for validity.
+ /// Returns null or a new state where the size is validated.
+ /// 'ArraySize' will contain SVal that refers to the total size (in char)
+ /// of the array.
+ ProgramStateRef checkVLA(CheckerContext &C, ProgramStateRef State,
+ const VariableArrayType *VLA, SVal &ArraySize) const;
+ /// Check a single VLA index size expression for validity.
+ ProgramStateRef checkVLAIndexSize(CheckerContext &C, ProgramStateRef State,
+ const Expr *SizeE) const;
void reportBug(VLASize_Kind Kind, const Expr *SizeE, ProgramStateRef State,
CheckerContext &C,
@@ -39,9 +59,155 @@ class VLASizeChecker : public Checker< check::PreStmt<DeclStmt> > {
public:
void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const;
+ void checkPreStmt(const UnaryExprOrTypeTraitExpr *UETTE,
+ CheckerContext &C) const;
};
} // end anonymous namespace
+ProgramStateRef VLASizeChecker::checkVLA(CheckerContext &C,
+ ProgramStateRef State,
+ const VariableArrayType *VLA,
+ SVal &ArraySize) const {
+ assert(VLA && "Function should be called with non-null VLA argument.");
+
+ const VariableArrayType *VLALast = nullptr;
+ llvm::SmallVector<const Expr *, 2> VLASizes;
+
+ // Walk over the VLAs for every dimension until a non-VLA is found.
+ // There is a VariableArrayType for every dimension (fixed or variable) until
+ // the most inner array that is variably modified.
+ // Dimension sizes are collected into 'VLASizes'. 'VLALast' is set to the
+ // innermost VLA that was encountered.
+ // In "int vla[x][2][y][3]" this will be the array for index "y" (with type
+ // int[3]). 'VLASizes' contains 'x', '2', and 'y'.
+ while (VLA) {
+ const Expr *SizeE = VLA->getSizeExpr();
+ State = checkVLAIndexSize(C, State, SizeE);
+ if (!State)
+ return nullptr;
+ VLASizes.push_back(SizeE);
+ VLALast = VLA;
+ VLA = C.getASTContext().getAsVariableArrayType(VLA->getElementType());
+ };
+ assert(VLALast &&
+ "Array should have at least one variably-modified dimension.");
+
+ ASTContext &Ctx = C.getASTContext();
+ SValBuilder &SVB = C.getSValBuilder();
+ CanQualType SizeTy = Ctx.getSizeType();
+ uint64_t SizeMax =
+ SVB.getBasicValueFactory().getMaxValue(SizeTy).getZExtValue();
+
+ // Get the element size.
+ CharUnits EleSize = Ctx.getTypeSizeInChars(VLALast->getElementType());
+ NonLoc ArrSize =
+ SVB.makeIntVal(EleSize.getQuantity(), SizeTy).castAs<NonLoc>();
+
+ // Try to calculate the known real size of the array in KnownSize.
+ uint64_t KnownSize = 0;
+ if (const llvm::APSInt *KV = SVB.getKnownValue(State, ArrSize))
+ KnownSize = KV->getZExtValue();
+
+ for (const Expr *SizeE : VLASizes) {
+ auto SizeD = C.getSVal(SizeE).castAs<DefinedSVal>();
+ // Convert the array length to size_t.
+ NonLoc IndexLength =
+ SVB.evalCast(SizeD, SizeTy, SizeE->getType()).castAs<NonLoc>();
+ // Multiply the array length by the element size.
+ SVal Mul = SVB.evalBinOpNN(State, BO_Mul, ArrSize, IndexLength, SizeTy);
+ if (auto MulNonLoc = Mul.getAs<NonLoc>())
+ ArrSize = *MulNonLoc;
+ else
+ // Extent could not be determined.
+ return State;
+
+ if (const llvm::APSInt *IndexLVal = SVB.getKnownValue(State, IndexLength)) {
+ // Check if the array size will overflow.
+ // Size overflow check does not work with symbolic expressions because a
+ // overflow situation can not be detected easily.
+ uint64_t IndexL = IndexLVal->getZExtValue();
+ // FIXME: See https://reviews.llvm.org/D80903 for discussion of
+ // some difference in assume and getKnownValue that leads to
+ // unexpected behavior. Just bail on IndexL == 0 at this point.
+ if (IndexL == 0)
+ return nullptr;
+
+ if (KnownSize <= SizeMax / IndexL) {
+ KnownSize *= IndexL;
+ } else {
+ // Array size does not fit into size_t.
+ reportBug(VLA_Overflow, SizeE, State, C);
+ return nullptr;
+ }
+ } else {
+ KnownSize = 0;
+ }
+ }
+
+ ArraySize = ArrSize;
+
+ return State;
+}
+
+ProgramStateRef VLASizeChecker::checkVLAIndexSize(CheckerContext &C,
+ ProgramStateRef State,
+ const Expr *SizeE) const {
+ SVal SizeV = C.getSVal(SizeE);
+
+ if (SizeV.isUndef()) {
+ reportBug(VLA_Garbage, SizeE, State, C);
+ return nullptr;
+ }
+
+ // See if the size value is known. It can't be undefined because we would have
+ // warned about that already.
+ if (SizeV.isUnknown())
+ return nullptr;
+
+ // Check if the size is tainted.
+ if (isTainted(State, SizeV)) {
+ reportBug(VLA_Tainted, SizeE, nullptr, C,
+ std::make_unique<TaintBugVisitor>(SizeV));
+ return nullptr;
+ }
+
+ // Check if the size is zero.
+ DefinedSVal SizeD = SizeV.castAs<DefinedSVal>();
+
+ ProgramStateRef StateNotZero, StateZero;
+ std::tie(StateNotZero, StateZero) = State->assume(SizeD);
+
+ if (StateZero && !StateNotZero) {
+ reportBug(VLA_Zero, SizeE, StateZero, C);
+ return nullptr;
+ }
+
+ // From this point on, assume that the size is not zero.
+ State = StateNotZero;
+
+ // Check if the size is negative.
+ SValBuilder &SVB = C.getSValBuilder();
+
+ QualType SizeTy = SizeE->getType();
+ DefinedOrUnknownSVal Zero = SVB.makeZeroVal(SizeTy);
+
+ SVal LessThanZeroVal = SVB.evalBinOp(State, BO_LT, SizeD, Zero, SizeTy);
+ if (Optional<DefinedSVal> LessThanZeroDVal =
+ LessThanZeroVal.getAs<DefinedSVal>()) {
+ ConstraintManager &CM = C.getConstraintManager();
+ ProgramStateRef StatePos, StateNeg;
+
+ std::tie(StateNeg, StatePos) = CM.assumeDual(State, *LessThanZeroDVal);
+ if (StateNeg && !StatePos) {
+ reportBug(VLA_Negative, SizeE, State, C);
+ return nullptr;
+ }
+ State = StatePos;
+ }
+
+ return State;
+}
+
void VLASizeChecker::reportBug(
VLASize_Kind Kind, const Expr *SizeE, ProgramStateRef State,
CheckerContext &C, std::unique_ptr<BugReporterVisitor> Visitor) const {
@@ -70,6 +236,9 @@ void VLASizeChecker::reportBug(
case VLA_Negative:
os << "has negative size";
break;
+ case VLA_Overflow:
+ os << "has too large size";
+ break;
}
auto report = std::make_unique<PathSensitiveBugReport>(*BT, os.str(), N);
@@ -83,108 +252,89 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
if (!DS->isSingleDecl())
return;
- const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
- if (!VD)
- return;
-
ASTContext &Ctx = C.getASTContext();
- const VariableArrayType *VLA = Ctx.getAsVariableArrayType(VD->getType());
- if (!VLA)
- return;
+ SValBuilder &SVB = C.getSValBuilder();
+ ProgramStateRef State = C.getState();
+ QualType TypeToCheck;
- // FIXME: Handle multi-dimensional VLAs.
- const Expr *SE = VLA->getSizeExpr();
- ProgramStateRef state = C.getState();
- SVal sizeV = C.getSVal(SE);
+ const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
- if (sizeV.isUndef()) {
- reportBug(VLA_Garbage, SE, state, C);
+ if (VD)
+ TypeToCheck = VD->getType().getCanonicalType();
+ else if (const auto *TND = dyn_cast<TypedefNameDecl>(DS->getSingleDecl()))
+ TypeToCheck = TND->getUnderlyingType().getCanonicalType();
+ else
return;
- }
- // See if the size value is known. It can't be undefined because we would have
- // warned about that already.
- if (sizeV.isUnknown())
+ const VariableArrayType *VLA = Ctx.getAsVariableArrayType(TypeToCheck);
+ if (!VLA)
return;
- // Check if the size is tainted.
- if (isTainted(state, sizeV)) {
- reportBug(VLA_Tainted, SE, nullptr, C,
- std::make_unique<TaintBugVisitor>(sizeV));
- return;
- }
+ // Check the VLA sizes for validity.
- // Check if the size is zero.
- DefinedSVal sizeD = sizeV.castAs<DefinedSVal>();
+ SVal ArraySize;
- ProgramStateRef stateNotZero, stateZero;
- std::tie(stateNotZero, stateZero) = state->assume(sizeD);
+ State = checkVLA(C, State, VLA, ArraySize);
+ if (!State)
+ return;
- if (stateZero && !stateNotZero) {
- reportBug(VLA_Zero, SE, stateZero, C);
+ auto ArraySizeNL = ArraySize.getAs<NonLoc>();
+ if (!ArraySizeNL) {
+ // Array size could not be determined but state may contain new assumptions.
+ C.addTransition(State);
return;
}
- // From this point on, assume that the size is not zero.
- state = stateNotZero;
-
// VLASizeChecker is responsible for defining the extent of the array being
// declared. We do this by multiplying the array length by the element size,
// then matching that with the array region's extent symbol.
- // Check if the size is negative.
- SValBuilder &svalBuilder = C.getSValBuilder();
+ if (VD) {
+ // Assume that the array's size matches the region size.
+ const LocationContext *LC = C.getLocationContext();
+ DefinedOrUnknownSVal DynSize =
+ getDynamicSize(State, State->getRegion(VD, LC), SVB);
- QualType Ty = SE->getType();
- DefinedOrUnknownSVal Zero = svalBuilder.makeZeroVal(Ty);
+ DefinedOrUnknownSVal SizeIsKnown = SVB.evalEQ(State, DynSize, *ArraySizeNL);
+ State = State->assume(SizeIsKnown, true);
- SVal LessThanZeroVal = svalBuilder.evalBinOp(state, BO_LT, sizeD, Zero, Ty);
- if (Optional<DefinedSVal> LessThanZeroDVal =
- LessThanZeroVal.getAs<DefinedSVal>()) {
- ConstraintManager &CM = C.getConstraintManager();
- ProgramStateRef StatePos, StateNeg;
-
- std::tie(StateNeg, StatePos) = CM.assumeDual(state, *LessThanZeroDVal);
- if (StateNeg && !StatePos) {
- reportBug(VLA_Negative, SE, state, C);
- return;
- }
- state = StatePos;
+ // Assume should not fail at this point.
+ assert(State);
}
- // Convert the array length to size_t.
- QualType SizeTy = Ctx.getSizeType();
- NonLoc ArrayLength =
- svalBuilder.evalCast(sizeD, SizeTy, SE->getType()).castAs<NonLoc>();
+ // Remember our assumptions!
+ C.addTransition(State);
+}
- // Get the element size.
- CharUnits EleSize = Ctx.getTypeSizeInChars(VLA->getElementType());
- SVal EleSizeVal = svalBuilder.makeIntVal(EleSize.getQuantity(), SizeTy);
+void VLASizeChecker::checkPreStmt(const UnaryExprOrTypeTraitExpr *UETTE,
+ CheckerContext &C) const {
+ // Want to check for sizeof.
+ if (UETTE->getKind() != UETT_SizeOf)
+ return;
- // Multiply the array length by the element size.
- SVal ArraySizeVal = svalBuilder.evalBinOpNN(
- state, BO_Mul, ArrayLength, EleSizeVal.castAs<NonLoc>(), SizeTy);
+ // Ensure a type argument.
+ if (!UETTE->isArgumentType())
+ return;
- // Finally, assume that the array's extent matches the given size.
- const LocationContext *LC = C.getLocationContext();
- DefinedOrUnknownSVal Extent =
- state->getRegion(VD, LC)->getExtent(svalBuilder);
- DefinedOrUnknownSVal ArraySize = ArraySizeVal.castAs<DefinedOrUnknownSVal>();
- DefinedOrUnknownSVal sizeIsKnown =
- svalBuilder.evalEQ(state, Extent, ArraySize);
- state = state->assume(sizeIsKnown, true);
+ const VariableArrayType *VLA = C.getASTContext().getAsVariableArrayType(
+ UETTE->getTypeOfArgument().getCanonicalType());
+ // Ensure that the type is a VLA.
+ if (!VLA)
+ return;
- // Assume should not fail at this point.
- assert(state);
+ ProgramStateRef State = C.getState();
+ SVal ArraySize;
+ State = checkVLA(C, State, VLA, ArraySize);
+ if (!State)
+ return;
- // Remember our assumptions!
- C.addTransition(state);
+ C.addTransition(State);
}
void ento::registerVLASizeChecker(CheckerManager &mgr) {
mgr.registerChecker<VLASizeChecker>();
}
-bool ento::shouldRegisterVLASizeChecker(const LangOptions &LO) {
+bool ento::shouldRegisterVLASizeChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
index a3610514a924..dde5912b6d6e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
@@ -404,7 +404,7 @@ void ento::registerValistBase(CheckerManager &mgr) {
mgr.registerChecker<ValistChecker>();
}
-bool ento::shouldRegisterValistBase(const LangOptions &LO) {
+bool ento::shouldRegisterValistBase(const CheckerManager &mgr) {
return true;
}
@@ -416,7 +416,7 @@ bool ento::shouldRegisterValistBase(const LangOptions &LO) {
mgr.getCurrentCheckerName(); \
} \
\
- bool ento::shouldRegister##name##Checker(const LangOptions &LO) { \
+ bool ento::shouldRegister##name##Checker(const CheckerManager &mgr) { \
return true; \
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
index 6724eead5072..8f147026ae19 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
@@ -98,12 +98,13 @@ bool VforkChecker::isCallWhitelisted(const IdentifierInfo *II,
if (VforkWhitelist.empty()) {
// According to manpage.
const char *ids[] = {
- "_exit",
"_Exit",
+ "_exit",
"execl",
- "execlp",
"execle",
+ "execlp",
"execv",
+ "execve",
"execvp",
"execvpe",
nullptr
@@ -216,6 +217,6 @@ void ento::registerVforkChecker(CheckerManager &mgr) {
mgr.registerChecker<VforkChecker>();
}
-bool ento::shouldRegisterVforkChecker(const LangOptions &LO) {
+bool ento::shouldRegisterVforkChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
index fd93fc33115f..f49ee5fa5ad3 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
@@ -224,14 +224,17 @@ void ento::registerVirtualCallChecker(CheckerManager &Mgr) {
}
}
-bool ento::shouldRegisterVirtualCallModeling(const LangOptions &LO) {
+bool ento::shouldRegisterVirtualCallModeling(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
return LO.CPlusPlus;
}
-bool ento::shouldRegisterPureVirtualCallChecker(const LangOptions &LO) {
+bool ento::shouldRegisterPureVirtualCallChecker(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
return LO.CPlusPlus;
}
-bool ento::shouldRegisterVirtualCallChecker(const LangOptions &LO) {
+bool ento::shouldRegisterVirtualCallChecker(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
return LO.CPlusPlus;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
new file mode 100644
index 000000000000..34c072ac2241
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
@@ -0,0 +1,93 @@
+//=======- ASTUtils.cpp ------------------------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ASTUtils.h"
+#include "PtrTypesSemantics.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+
+using llvm::Optional;
+namespace clang {
+
+std::pair<const Expr *, bool>
+tryToFindPtrOrigin(const Expr *E, bool StopAtFirstRefCountedObj) {
+ while (E) {
+ if (auto *cast = dyn_cast<CastExpr>(E)) {
+ if (StopAtFirstRefCountedObj) {
+ if (auto *ConversionFunc =
+ dyn_cast_or_null<FunctionDecl>(cast->getConversionFunction())) {
+ if (isCtorOfRefCounted(ConversionFunc))
+ return {E, true};
+ }
+ }
+ // FIXME: This can give false "origin" that would lead to false negatives
+ // in checkers. See https://reviews.llvm.org/D37023 for reference.
+ E = cast->getSubExpr();
+ continue;
+ }
+ if (auto *call = dyn_cast<CallExpr>(E)) {
+ if (auto *memberCall = dyn_cast<CXXMemberCallExpr>(call)) {
+ if (isGetterOfRefCounted(memberCall->getMethodDecl())) {
+ E = memberCall->getImplicitObjectArgument();
+ if (StopAtFirstRefCountedObj) {
+ return {E, true};
+ }
+ continue;
+ }
+ }
+
+ if (auto *operatorCall = dyn_cast<CXXOperatorCallExpr>(E)) {
+ if (operatorCall->getNumArgs() == 1) {
+ E = operatorCall->getArg(0);
+ continue;
+ }
+ }
+
+ if (auto *callee = call->getDirectCallee()) {
+ if (isCtorOfRefCounted(callee)) {
+ if (StopAtFirstRefCountedObj)
+ return {E, true};
+
+ E = call->getArg(0);
+ continue;
+ }
+
+ if (isPtrConversion(callee)) {
+ E = call->getArg(0);
+ continue;
+ }
+ }
+ }
+ if (auto *unaryOp = dyn_cast<UnaryOperator>(E)) {
+ // FIXME: Currently accepts ANY unary operator. Is it OK?
+ E = unaryOp->getSubExpr();
+ continue;
+ }
+
+ break;
+ }
+ // Some other expression.
+ return {E, false};
+}
+
+bool isASafeCallArg(const Expr *E) {
+ assert(E);
+ if (auto *Ref = dyn_cast<DeclRefExpr>(E)) {
+ if (auto *D = dyn_cast_or_null<VarDecl>(Ref->getFoundDecl())) {
+ if (isa<ParmVarDecl>(D) || D->isLocalVarDecl())
+ return true;
+ }
+ }
+
+ // TODO: checker for method calls on non-refcounted objects
+ return isa<CXXThisExpr>(E);
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h
new file mode 100644
index 000000000000..ed4577755457
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h
@@ -0,0 +1,84 @@
+//=======- ASTUtis.h ---------------------------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYZER_WEBKIT_ASTUTILS_H
+#define LLVM_CLANG_ANALYZER_WEBKIT_ASTUTILS_H
+
+#include "clang/AST/Decl.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/Support/Casting.h"
+
+#include <string>
+#include <utility>
+
+namespace clang {
+class CXXRecordDecl;
+class CXXBaseSpecifier;
+class FunctionDecl;
+class CXXMethodDecl;
+class Expr;
+
+/// This function de-facto defines a set of transformations that we consider
+/// safe (in heuristical sense). These transformation if passed a safe value as
+/// an input should provide a safe value (or an object that provides safe
+/// values).
+///
+/// For more context see Static Analyzer checkers documentation - specifically
+/// webkit.UncountedCallArgsChecker checker. Whitelist of transformations:
+/// - constructors of ref-counted types (including factory methods)
+/// - getters of ref-counted types
+/// - member overloaded operators
+/// - casts
+/// - unary operators like ``&`` or ``*``
+///
+/// If passed expression is of type uncounted pointer/reference we try to find
+/// the "origin" of the pointer value.
+/// Origin can be for example a local variable, nullptr, constant or
+/// this-pointer.
+///
+/// Certain subexpression nodes represent transformations that don't affect
+/// where the memory address originates from. We try to traverse such
+/// subexpressions to get to the relevant child nodes. Whenever we encounter a
+/// subexpression that either can't be ignored, we don't model its semantics or
+/// that has multiple children we stop.
+///
+/// \p E is an expression of uncounted pointer/reference type.
+/// If \p StopAtFirstRefCountedObj is true and we encounter a subexpression that
+/// represents ref-counted object during the traversal we return relevant
+/// sub-expression and true.
+///
+/// \returns subexpression that we traversed to and if \p
+/// StopAtFirstRefCountedObj is true we also return whether we stopped early.
+std::pair<const clang::Expr *, bool>
+tryToFindPtrOrigin(const clang::Expr *E, bool StopAtFirstRefCountedObj);
+
+/// For \p E referring to a ref-countable/-counted pointer/reference we return
+/// whether it's a safe call argument. Examples: function parameter or
+/// this-pointer. The logic relies on the set of recursive rules we enforce for
+/// WebKit codebase.
+///
+/// \returns Whether \p E is a safe call arugment.
+bool isASafeCallArg(const clang::Expr *E);
+
+/// \returns name of AST node or empty string.
+template <typename T> std::string safeGetName(const T *ASTNode) {
+ const auto *const ND = llvm::dyn_cast_or_null<clang::NamedDecl>(ASTNode);
+ if (!ND)
+ return "";
+
+ // In case F is for example "operator|" the getName() method below would
+ // assert.
+ if (!ND->getDeclName().isIdentifier())
+ return "";
+
+ return ND->getName().str();
+}
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/DiagOutputUtils.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/DiagOutputUtils.h
new file mode 100644
index 000000000000..781a8d746001
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/DiagOutputUtils.h
@@ -0,0 +1,36 @@
+//=======- DiagOutputUtils.h -------------------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYZER_WEBKIT_DIAGPRINTUTILS_H
+#define LLVM_CLANG_ANALYZER_WEBKIT_DIAGPRINTUTILS_H
+
+#include "clang/AST/Decl.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+
+template <typename NamedDeclDerivedT>
+void printQuotedQualifiedName(llvm::raw_ostream &Os,
+ const NamedDeclDerivedT &D) {
+ Os << "'";
+ D->getNameForDiagnostic(Os, D->getASTContext().getPrintingPolicy(),
+ /*Qualified=*/true);
+ Os << "'";
+}
+
+template <typename NamedDeclDerivedT>
+void printQuotedName(llvm::raw_ostream &Os, const NamedDeclDerivedT &D) {
+ Os << "'";
+ D->getNameForDiagnostic(Os, D->getASTContext().getPrintingPolicy(),
+ /*Qualified=*/false);
+ Os << "'";
+}
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp
new file mode 100644
index 000000000000..3956db933b35
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp
@@ -0,0 +1,155 @@
+//=======- NoUncountedMembersChecker.cpp -------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ASTUtils.h"
+#include "DiagOutputUtils.h"
+#include "PtrTypesSemantics.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/Support/Casting.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class NoUncountedMemberChecker
+ : public Checker<check::ASTDecl<TranslationUnitDecl>> {
+private:
+ BugType Bug;
+ mutable BugReporter *BR;
+
+public:
+ NoUncountedMemberChecker()
+ : Bug(this,
+ "Member variable is a raw-poiner/reference to reference-countable "
+ "type",
+ "WebKit coding guidelines") {}
+
+ void checkASTDecl(const TranslationUnitDecl *TUD, AnalysisManager &MGR,
+ BugReporter &BRArg) const {
+ BR = &BRArg;
+
+ // The calls to checkAST* from AnalysisConsumer don't
+ // visit template instantiations or lambda classes. We
+ // want to visit those, so we make our own RecursiveASTVisitor.
+ struct LocalVisitor : public RecursiveASTVisitor<LocalVisitor> {
+ const NoUncountedMemberChecker *Checker;
+ explicit LocalVisitor(const NoUncountedMemberChecker *Checker)
+ : Checker(Checker) {
+ assert(Checker);
+ }
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return false; }
+
+ bool VisitRecordDecl(const RecordDecl *RD) {
+ Checker->visitRecordDecl(RD);
+ return true;
+ }
+ };
+
+ LocalVisitor visitor(this);
+ visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
+ }
+
+ void visitRecordDecl(const RecordDecl *RD) const {
+ if (shouldSkipDecl(RD))
+ return;
+
+ for (auto Member : RD->fields()) {
+ const Type *MemberType = Member->getType().getTypePtrOrNull();
+ if (!MemberType)
+ continue;
+
+ if (auto *MemberCXXRD = MemberType->getPointeeCXXRecordDecl()) {
+ // If we don't see the definition we just don't know.
+ if (MemberCXXRD->hasDefinition() && isRefCountable(MemberCXXRD))
+ reportBug(Member, MemberType, MemberCXXRD, RD);
+ }
+ }
+ }
+
+ bool shouldSkipDecl(const RecordDecl *RD) const {
+ if (!RD->isThisDeclarationADefinition())
+ return true;
+
+ if (RD->isImplicit())
+ return true;
+
+ if (RD->isLambda())
+ return true;
+
+ // If the construct doesn't have a source file, then it's not something
+ // we want to diagnose.
+ const auto RDLocation = RD->getLocation();
+ if (!RDLocation.isValid())
+ return true;
+
+ const auto Kind = RD->getTagKind();
+ // FIMXE: Should we check union members too?
+ if (Kind != TTK_Struct && Kind != TTK_Class)
+ return true;
+
+ // Ignore CXXRecords that come from system headers.
+ if (BR->getSourceManager().isInSystemHeader(RDLocation))
+ return true;
+
+ // Ref-counted smartpointers actually have raw-pointer to uncounted type as
+ // a member but we trust them to handle it correctly.
+ auto CXXRD = llvm::dyn_cast_or_null<CXXRecordDecl>(RD);
+ if (CXXRD)
+ return isRefCounted(CXXRD);
+
+ return false;
+ }
+
+ void reportBug(const FieldDecl *Member, const Type *MemberType,
+ const CXXRecordDecl *MemberCXXRD,
+ const RecordDecl *ClassCXXRD) const {
+ assert(Member);
+ assert(MemberType);
+ assert(MemberCXXRD);
+
+ SmallString<100> Buf;
+ llvm::raw_svector_ostream Os(Buf);
+
+ Os << "Member variable ";
+ printQuotedName(Os, Member);
+ Os << " in ";
+ printQuotedQualifiedName(Os, ClassCXXRD);
+ Os << " is a "
+ << (isa<PointerType>(MemberType) ? "raw pointer" : "reference")
+ << " to ref-countable type ";
+ printQuotedQualifiedName(Os, MemberCXXRD);
+ Os << "; member variables must be ref-counted.";
+
+ PathDiagnosticLocation BSLoc(Member->getSourceRange().getBegin(),
+ BR->getSourceManager());
+ auto Report = std::make_unique<BasicBugReport>(Bug, Os.str(), BSLoc);
+ Report->addRange(Member->getSourceRange());
+ BR->emitReport(std::move(Report));
+ }
+};
+} // namespace
+
+void ento::registerNoUncountedMemberChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<NoUncountedMemberChecker>();
+}
+
+bool ento::shouldRegisterNoUncountedMemberChecker(
+ const CheckerManager &Mgr) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
new file mode 100644
index 000000000000..168cfd511170
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
@@ -0,0 +1,172 @@
+//=======- PtrTypesSemantics.cpp ---------------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "PtrTypesSemantics.h"
+#include "ASTUtils.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+
+using llvm::Optional;
+using namespace clang;
+
+namespace {
+
+bool hasPublicRefAndDeref(const CXXRecordDecl *R) {
+ assert(R);
+
+ bool hasRef = false;
+ bool hasDeref = false;
+ for (const CXXMethodDecl *MD : R->methods()) {
+ const auto MethodName = safeGetName(MD);
+
+ if (MethodName == "ref" && MD->getAccess() == AS_public) {
+ if (hasDeref)
+ return true;
+ hasRef = true;
+ } else if (MethodName == "deref" && MD->getAccess() == AS_public) {
+ if (hasRef)
+ return true;
+ hasDeref = true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+namespace clang {
+
+const CXXRecordDecl *isRefCountable(const CXXBaseSpecifier *Base) {
+ assert(Base);
+
+ const Type *T = Base->getType().getTypePtrOrNull();
+ if (!T)
+ return nullptr;
+
+ const CXXRecordDecl *R = T->getAsCXXRecordDecl();
+ if (!R)
+ return nullptr;
+
+ return hasPublicRefAndDeref(R) ? R : nullptr;
+}
+
+bool isRefCountable(const CXXRecordDecl *R) {
+ assert(R);
+
+ R = R->getDefinition();
+ assert(R);
+
+ if (hasPublicRefAndDeref(R))
+ return true;
+
+ CXXBasePaths Paths;
+ Paths.setOrigin(const_cast<CXXRecordDecl *>(R));
+
+ const auto isRefCountableBase = [](const CXXBaseSpecifier *Base,
+ CXXBasePath &) {
+ return clang::isRefCountable(Base);
+ };
+
+ return R->lookupInBases(isRefCountableBase, Paths,
+ /*LookupInDependent =*/true);
+}
+
+bool isCtorOfRefCounted(const clang::FunctionDecl *F) {
+ assert(F);
+ const auto &FunctionName = safeGetName(F);
+
+ return FunctionName == "Ref" || FunctionName == "makeRef"
+
+ || FunctionName == "RefPtr" || FunctionName == "makeRefPtr"
+
+ || FunctionName == "UniqueRef" || FunctionName == "makeUniqueRef" ||
+ FunctionName == "makeUniqueRefWithoutFastMallocCheck"
+
+ || FunctionName == "String" || FunctionName == "AtomString" ||
+ FunctionName == "UniqueString"
+ // FIXME: Implement as attribute.
+ || FunctionName == "Identifier";
+}
+
+bool isUncounted(const CXXRecordDecl *Class) {
+ // Keep isRefCounted first as it's cheaper.
+ return !isRefCounted(Class) && isRefCountable(Class);
+}
+
+bool isUncountedPtr(const Type *T) {
+ assert(T);
+
+ if (T->isPointerType() || T->isReferenceType()) {
+ if (auto *CXXRD = T->getPointeeCXXRecordDecl()) {
+ return isUncounted(CXXRD);
+ }
+ }
+ return false;
+}
+
+bool isGetterOfRefCounted(const CXXMethodDecl *M) {
+ assert(M);
+
+ if (isa<CXXMethodDecl>(M)) {
+ const CXXRecordDecl *calleeMethodsClass = M->getParent();
+ auto className = safeGetName(calleeMethodsClass);
+ auto methodName = safeGetName(M);
+
+ if (((className == "Ref" || className == "RefPtr") &&
+ methodName == "get") ||
+ ((className == "String" || className == "AtomString" ||
+ className == "AtomStringImpl" || className == "UniqueString" ||
+ className == "UniqueStringImpl" || className == "Identifier") &&
+ methodName == "impl"))
+ return true;
+
+ // Ref<T> -> T conversion
+ // FIXME: Currently allowing any Ref<T> -> whatever cast.
+ if (className == "Ref" || className == "RefPtr") {
+ if (auto *maybeRefToRawOperator = dyn_cast<CXXConversionDecl>(M)) {
+ if (auto *targetConversionType =
+ maybeRefToRawOperator->getConversionType().getTypePtrOrNull()) {
+ if (isUncountedPtr(targetConversionType)) {
+ return true;
+ }
+ }
+ }
+ }
+ }
+ return false;
+}
+
+bool isRefCounted(const CXXRecordDecl *R) {
+ assert(R);
+ if (auto *TmplR = R->getTemplateInstantiationPattern()) {
+ // FIXME: String/AtomString/UniqueString
+ const auto &ClassName = safeGetName(TmplR);
+ return ClassName == "RefPtr" || ClassName == "Ref";
+ }
+ return false;
+}
+
+bool isPtrConversion(const FunctionDecl *F) {
+ assert(F);
+ if (isCtorOfRefCounted(F))
+ return true;
+
+ // FIXME: check # of params == 1
+ const auto FunctionName = safeGetName(F);
+ if (FunctionName == "getPtr" || FunctionName == "WeakPtr" ||
+ FunctionName == "makeWeakPtr"
+
+ || FunctionName == "downcast" || FunctionName == "bitwise_cast")
+ return true;
+
+ return false;
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
new file mode 100644
index 000000000000..83d9c0bcc13b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
@@ -0,0 +1,59 @@
+//=======- PtrTypesSemantics.cpp ---------------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYZER_WEBKIT_PTRTYPESEMANTICS_H
+#define LLVM_CLANG_ANALYZER_WEBKIT_PTRTYPESEMANTICS_H
+
+namespace clang {
+class CXXBaseSpecifier;
+class CXXMethodDecl;
+class CXXRecordDecl;
+class Expr;
+class FunctionDecl;
+class Type;
+
+// Ref-countability of a type is implicitly defined by Ref<T> and RefPtr<T>
+// implementation. It can be modeled as: type T having public methods ref() and
+// deref()
+
+// In WebKit there are two ref-counted templated smart pointers: RefPtr<T> and
+// Ref<T>.
+
+/// \returns CXXRecordDecl of the base if the type is ref-countable, nullptr if
+/// not.
+const clang::CXXRecordDecl *isRefCountable(const clang::CXXBaseSpecifier *Base);
+
+/// \returns true if \p Class is ref-countable, false if not.
+/// Asserts that \p Class IS a definition.
+bool isRefCountable(const clang::CXXRecordDecl *Class);
+
+/// \returns true if \p Class is ref-counted, false if not.
+bool isRefCounted(const clang::CXXRecordDecl *Class);
+
+/// \returns true if \p Class is ref-countable AND not ref-counted, false if
+/// not. Asserts that \p Class IS a definition.
+bool isUncounted(const clang::CXXRecordDecl *Class);
+
+/// \returns true if \p T is either a raw pointer or reference to an uncounted
+/// class, false if not.
+bool isUncountedPtr(const clang::Type *T);
+
+/// \returns true if \p F creates ref-countable object from uncounted parameter,
+/// false if not.
+bool isCtorOfRefCounted(const clang::FunctionDecl *F);
+
+/// \returns true if \p M is getter of a ref-counted class, false if not.
+bool isGetterOfRefCounted(const clang::CXXMethodDecl *Method);
+
+/// \returns true if \p F is a conversion between ref-countable or ref-counted
+/// pointer types.
+bool isPtrConversion(const FunctionDecl *F);
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
new file mode 100644
index 000000000000..81ce284c2dc7
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
@@ -0,0 +1,167 @@
+//=======- RefCntblBaseVirtualDtor.cpp ---------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "DiagOutputUtils.h"
+#include "PtrTypesSemantics.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class RefCntblBaseVirtualDtorChecker
+ : public Checker<check::ASTDecl<TranslationUnitDecl>> {
+private:
+ BugType Bug;
+ mutable BugReporter *BR;
+
+public:
+ RefCntblBaseVirtualDtorChecker()
+ : Bug(this,
+ "Reference-countable base class doesn't have virtual destructor",
+ "WebKit coding guidelines") {}
+
+ void checkASTDecl(const TranslationUnitDecl *TUD, AnalysisManager &MGR,
+ BugReporter &BRArg) const {
+ BR = &BRArg;
+
+ // The calls to checkAST* from AnalysisConsumer don't
+ // visit template instantiations or lambda classes. We
+ // want to visit those, so we make our own RecursiveASTVisitor.
+ struct LocalVisitor : public RecursiveASTVisitor<LocalVisitor> {
+ const RefCntblBaseVirtualDtorChecker *Checker;
+ explicit LocalVisitor(const RefCntblBaseVirtualDtorChecker *Checker)
+ : Checker(Checker) {
+ assert(Checker);
+ }
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return false; }
+
+ bool VisitCXXRecordDecl(const CXXRecordDecl *RD) {
+ Checker->visitCXXRecordDecl(RD);
+ return true;
+ }
+ };
+
+ LocalVisitor visitor(this);
+ visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
+ }
+
+ void visitCXXRecordDecl(const CXXRecordDecl *RD) const {
+ if (shouldSkipDecl(RD))
+ return;
+
+ CXXBasePaths Paths;
+ Paths.setOrigin(RD);
+
+ const CXXBaseSpecifier *ProblematicBaseSpecifier = nullptr;
+ const CXXRecordDecl *ProblematicBaseClass = nullptr;
+
+ const auto IsPublicBaseRefCntblWOVirtualDtor =
+ [RD, &ProblematicBaseSpecifier,
+ &ProblematicBaseClass](const CXXBaseSpecifier *Base, CXXBasePath &) {
+ const auto AccSpec = Base->getAccessSpecifier();
+ if (AccSpec == AS_protected || AccSpec == AS_private ||
+ (AccSpec == AS_none && RD->isClass()))
+ return false;
+
+ llvm::Optional<const clang::CXXRecordDecl *> MaybeRefCntblBaseRD =
+ isRefCountable(Base);
+ if (!MaybeRefCntblBaseRD.hasValue())
+ return false;
+
+ const CXXRecordDecl *RefCntblBaseRD = MaybeRefCntblBaseRD.getValue();
+ if (!RefCntblBaseRD)
+ return false;
+
+ const auto *Dtor = RefCntblBaseRD->getDestructor();
+ if (!Dtor || !Dtor->isVirtual()) {
+ ProblematicBaseSpecifier = Base;
+ ProblematicBaseClass = RefCntblBaseRD;
+ return true;
+ }
+
+ return false;
+ };
+
+ if (RD->lookupInBases(IsPublicBaseRefCntblWOVirtualDtor, Paths,
+ /*LookupInDependent =*/true)) {
+ reportBug(RD, ProblematicBaseSpecifier, ProblematicBaseClass);
+ }
+ }
+
+ bool shouldSkipDecl(const CXXRecordDecl *RD) const {
+ if (!RD->isThisDeclarationADefinition())
+ return true;
+
+ if (RD->isImplicit())
+ return true;
+
+ if (RD->isLambda())
+ return true;
+
+ // If the construct doesn't have a source file, then it's not something
+ // we want to diagnose.
+ const auto RDLocation = RD->getLocation();
+ if (!RDLocation.isValid())
+ return true;
+
+ const auto Kind = RD->getTagKind();
+ if (Kind != TTK_Struct && Kind != TTK_Class)
+ return true;
+
+ // Ignore CXXRecords that come from system headers.
+ if (BR->getSourceManager().getFileCharacteristic(RDLocation) !=
+ SrcMgr::C_User)
+ return true;
+
+ return false;
+ }
+
+ void reportBug(const CXXRecordDecl *DerivedClass,
+ const CXXBaseSpecifier *BaseSpec,
+ const CXXRecordDecl *ProblematicBaseClass) const {
+ assert(DerivedClass);
+ assert(BaseSpec);
+ assert(ProblematicBaseClass);
+
+ SmallString<100> Buf;
+ llvm::raw_svector_ostream Os(Buf);
+
+ Os << (ProblematicBaseClass->isClass() ? "Class" : "Struct") << " ";
+ printQuotedQualifiedName(Os, ProblematicBaseClass);
+
+ Os << " is used as a base of "
+ << (DerivedClass->isClass() ? "class" : "struct") << " ";
+ printQuotedQualifiedName(Os, DerivedClass);
+
+ Os << " but doesn't have virtual destructor";
+
+ PathDiagnosticLocation BSLoc(BaseSpec->getSourceRange().getBegin(),
+ BR->getSourceManager());
+ auto Report = std::make_unique<BasicBugReport>(Bug, Os.str(), BSLoc);
+ Report->addRange(BaseSpec->getSourceRange());
+ BR->emitReport(std::move(Report));
+ }
+};
+} // namespace
+
+void ento::registerRefCntblBaseVirtualDtorChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<RefCntblBaseVirtualDtorChecker>();
+}
+
+bool ento::shouldRegisterRefCntblBaseVirtualDtorChecker(
+ const CheckerManager &mgr) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp
new file mode 100644
index 000000000000..940a1f349831
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp
@@ -0,0 +1,195 @@
+//=======- UncountedCallArgsChecker.cpp --------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ASTUtils.h"
+#include "DiagOutputUtils.h"
+#include "PtrTypesSemantics.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "llvm/ADT/DenseSet.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class UncountedCallArgsChecker
+ : public Checker<check::ASTDecl<TranslationUnitDecl>> {
+ BugType Bug{this,
+ "Uncounted call argument for a raw pointer/reference parameter",
+ "WebKit coding guidelines"};
+ mutable BugReporter *BR;
+
+public:
+
+ void checkASTDecl(const TranslationUnitDecl *TUD, AnalysisManager &MGR,
+ BugReporter &BRArg) const {
+ BR = &BRArg;
+
+ // The calls to checkAST* from AnalysisConsumer don't
+ // visit template instantiations or lambda classes. We
+ // want to visit those, so we make our own RecursiveASTVisitor.
+ struct LocalVisitor : public RecursiveASTVisitor<LocalVisitor> {
+ const UncountedCallArgsChecker *Checker;
+ explicit LocalVisitor(const UncountedCallArgsChecker *Checker)
+ : Checker(Checker) {
+ assert(Checker);
+ }
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return false; }
+
+ bool VisitCallExpr(const CallExpr *CE) {
+ Checker->visitCallExpr(CE);
+ return true;
+ }
+ };
+
+ LocalVisitor visitor(this);
+ visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
+ }
+
+ void visitCallExpr(const CallExpr *CE) const {
+ if (shouldSkipCall(CE))
+ return;
+
+ if (auto *F = CE->getDirectCallee()) {
+ // Skip the first argument for overloaded member operators (e. g. lambda
+ // or std::function call operator).
+ unsigned ArgIdx =
+ isa<CXXOperatorCallExpr>(CE) && dyn_cast_or_null<CXXMethodDecl>(F);
+
+ for (auto P = F->param_begin();
+ // FIXME: Also check variadic function parameters.
+ // FIXME: Also check default function arguments. Probably a different
+ // checker. In case there are default arguments the call can have
+ // fewer arguments than the callee has parameters.
+ P < F->param_end() && ArgIdx < CE->getNumArgs(); ++P, ++ArgIdx) {
+ // TODO: attributes.
+ // if ((*P)->hasAttr<SafeRefCntblRawPtrAttr>())
+ // continue;
+
+ const auto *ArgType = (*P)->getType().getTypePtrOrNull();
+ if (!ArgType)
+ continue; // FIXME? Should we bail?
+
+ // FIXME: more complex types (arrays, references to raw pointers, etc)
+ if (!isUncountedPtr(ArgType))
+ continue;
+
+ const auto *Arg = CE->getArg(ArgIdx);
+
+ std::pair<const clang::Expr *, bool> ArgOrigin =
+ tryToFindPtrOrigin(Arg, true);
+
+ // Temporary ref-counted object created as part of the call argument
+ // would outlive the call.
+ if (ArgOrigin.second)
+ continue;
+
+ if (isa<CXXNullPtrLiteralExpr>(ArgOrigin.first)) {
+ // foo(nullptr)
+ continue;
+ }
+ if (isa<IntegerLiteral>(ArgOrigin.first)) {
+ // FIXME: Check the value.
+ // foo(NULL)
+ continue;
+ }
+
+ if (isASafeCallArg(ArgOrigin.first))
+ continue;
+
+ reportBug(Arg, *P);
+ }
+ }
+ }
+
+ bool shouldSkipCall(const CallExpr *CE) const {
+ if (CE->getNumArgs() == 0)
+ return false;
+
+ // If an assignment is problematic we should warn about the sole existence
+ // of object on LHS.
+ if (auto *MemberOp = dyn_cast<CXXOperatorCallExpr>(CE)) {
+ // Note: assignemnt to built-in type isn't derived from CallExpr.
+ if (MemberOp->isAssignmentOp())
+ return false;
+ }
+
+ const auto *Callee = CE->getDirectCallee();
+ if (!Callee)
+ return false;
+
+ auto overloadedOperatorType = Callee->getOverloadedOperator();
+ if (overloadedOperatorType == OO_EqualEqual ||
+ overloadedOperatorType == OO_ExclaimEqual ||
+ overloadedOperatorType == OO_LessEqual ||
+ overloadedOperatorType == OO_GreaterEqual ||
+ overloadedOperatorType == OO_Spaceship ||
+ overloadedOperatorType == OO_AmpAmp ||
+ overloadedOperatorType == OO_PipePipe)
+ return true;
+
+ if (isCtorOfRefCounted(Callee))
+ return true;
+
+ auto name = safeGetName(Callee);
+ if (name == "adoptRef" || name == "getPtr" || name == "WeakPtr" ||
+ name == "makeWeakPtr" || name == "downcast" || name == "bitwise_cast" ||
+ name == "is" || name == "equal" || name == "hash" ||
+ name == "isType"
+ // FIXME: Most/all of these should be implemented via attributes.
+ || name == "equalIgnoringASCIICase" ||
+ name == "equalIgnoringASCIICaseCommon" ||
+ name == "equalIgnoringNullity")
+ return true;
+
+ return false;
+ }
+
+ void reportBug(const Expr *CallArg, const ParmVarDecl *Param) const {
+ assert(CallArg);
+
+ SmallString<100> Buf;
+ llvm::raw_svector_ostream Os(Buf);
+
+ const std::string paramName = safeGetName(Param);
+ Os << "Call argument";
+ if (!paramName.empty()) {
+ Os << " for parameter ";
+ printQuotedQualifiedName(Os, Param);
+ }
+ Os << " is uncounted and unsafe.";
+
+ const SourceLocation SrcLocToReport =
+ isa<CXXDefaultArgExpr>(CallArg) ? Param->getDefaultArg()->getExprLoc()
+ : CallArg->getSourceRange().getBegin();
+
+ PathDiagnosticLocation BSLoc(SrcLocToReport, BR->getSourceManager());
+ auto Report = std::make_unique<BasicBugReport>(Bug, Os.str(), BSLoc);
+ Report->addRange(CallArg->getSourceRange());
+ BR->emitReport(std::move(Report));
+ }
+};
+} // namespace
+
+void ento::registerUncountedCallArgsChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<UncountedCallArgsChecker>();
+}
+
+bool ento::shouldRegisterUncountedCallArgsChecker(const CheckerManager &) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h
index 968c50e33f6d..ec612dde3b8b 100755
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_LIB_STATICANALYZER_CHECKER_YAML_H
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/YAMLTraits.h"
namespace clang {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp
new file mode 100644
index 000000000000..1c67bbd77ec8
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp
@@ -0,0 +1,66 @@
+//== PutenvWithAutoChecker.cpp --------------------------------- -*- C++ -*--=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines PutenvWithAutoChecker which finds calls of ``putenv``
+// function with automatic variable as the argument.
+// https://wiki.sei.cmu.edu/confluence/x/6NYxBQ
+//
+//===----------------------------------------------------------------------===//
+
+#include "../AllocationState.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class PutenvWithAutoChecker : public Checker<check::PostCall> {
+private:
+ BugType BT{this, "'putenv' function should not be called with auto variables",
+ categories::SecurityError};
+ const CallDescription Putenv{"putenv", 1};
+
+public:
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+};
+} // namespace
+
+void PutenvWithAutoChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (!Call.isCalled(Putenv))
+ return;
+
+ SVal ArgV = Call.getArgSVal(0);
+ const Expr *ArgExpr = Call.getArgExpr(0);
+ const MemSpaceRegion *MSR = ArgV.getAsRegion()->getMemorySpace();
+
+ if (!isa<StackSpaceRegion>(MSR))
+ return;
+
+ StringRef ErrorMsg = "The 'putenv' function should not be called with "
+ "arguments that have automatic storage";
+ ExplodedNode *N = C.generateErrorNode();
+ auto Report = std::make_unique<PathSensitiveBugReport>(BT, ErrorMsg, N);
+
+ // Track the argument.
+ bugreporter::trackExpressionValue(Report->getErrorNode(), ArgExpr, *Report);
+
+ C.emitReport(std::move(Report));
+}
+
+void ento::registerPutenvWithAuto(CheckerManager &Mgr) {
+ Mgr.registerChecker<PutenvWithAutoChecker>();
+}
+
+bool ento::shouldRegisterPutenvWithAuto(const CheckerManager &) { return true; }
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
index fdd03c75920d..ecfc7106560e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -13,7 +13,7 @@ using namespace ento;
void AnalysisManager::anchor() { }
-AnalysisManager::AnalysisManager(ASTContext &ASTCtx,
+AnalysisManager::AnalysisManager(ASTContext &ASTCtx, Preprocessor &PP,
const PathDiagnosticConsumers &PDC,
StoreManagerCreator storemgr,
ConstraintManagerCreator constraintmgr,
@@ -38,7 +38,7 @@ AnalysisManager::AnalysisManager(ASTContext &ASTCtx,
Options.ShouldElideConstructors,
/*addVirtualBaseBranches=*/true,
injector),
- Ctx(ASTCtx), LangOpts(ASTCtx.getLangOpts()),
+ Ctx(ASTCtx), PP(PP), LangOpts(ASTCtx.getLangOpts()),
PathConsumers(PDC), CreateStoreMgr(storemgr),
CreateConstraintMgr(constraintmgr), CheckerMgr(checkerMgr),
options(Options) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
index 7cd48bf44374..73f057f09550 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -239,7 +239,7 @@ BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
if (Amt >= V1.getBitWidth())
return nullptr;
- if (!Ctx.getLangOpts().CPlusPlus2a) {
+ if (!Ctx.getLangOpts().CPlusPlus20) {
if (V1.isSigned() && V1.isNegative())
return nullptr;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
index 1864bcef9b87..72be4e81c83d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -34,10 +34,12 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/CheckerRegistryData.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/ArrayRef.h"
@@ -51,6 +53,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
@@ -2105,6 +2108,53 @@ void BuiltinBug::anchor() {}
// Methods for BugReport and subclasses.
//===----------------------------------------------------------------------===//
+LLVM_ATTRIBUTE_USED static bool
+isDependency(const CheckerRegistryData &Registry, StringRef CheckerName) {
+ for (const std::pair<StringRef, StringRef> &Pair : Registry.Dependencies) {
+ if (Pair.second == CheckerName)
+ return true;
+ }
+ return false;
+}
+
+LLVM_ATTRIBUTE_USED static bool isHidden(const CheckerRegistryData &Registry,
+ StringRef CheckerName) {
+ for (const CheckerInfo &Checker : Registry.Checkers) {
+ if (Checker.FullName == CheckerName)
+ return Checker.IsHidden;
+ }
+ llvm_unreachable(
+ "Checker name not found in CheckerRegistry -- did you retrieve it "
+ "correctly from CheckerManager::getCurrentCheckerName?");
+}
+
+PathSensitiveBugReport::PathSensitiveBugReport(
+ const BugType &bt, StringRef shortDesc, StringRef desc,
+ const ExplodedNode *errorNode, PathDiagnosticLocation LocationToUnique,
+ const Decl *DeclToUnique)
+ : BugReport(Kind::PathSensitive, bt, shortDesc, desc), ErrorNode(errorNode),
+ ErrorNodeRange(getStmt() ? getStmt()->getSourceRange() : SourceRange()),
+ UniqueingLocation(LocationToUnique), UniqueingDecl(DeclToUnique) {
+ assert(!isDependency(ErrorNode->getState()
+ ->getAnalysisManager()
+ .getCheckerManager()
+ ->getCheckerRegistryData(),
+ bt.getCheckerName()) &&
+ "Some checkers depend on this one! We don't allow dependency "
+ "checkers to emit warnings, because checkers should depend on "
+ "*modeling*, not *diagnostics*.");
+
+ assert(
+ (bt.getCheckerName().startswith("debug") ||
+ !isHidden(ErrorNode->getState()
+ ->getAnalysisManager()
+ .getCheckerManager()
+ ->getCheckerRegistryData(),
+ bt.getCheckerName())) &&
+ "Hidden checkers musn't emit diagnostics as they are by definition "
+ "non-user facing!");
+}
+
void PathSensitiveBugReport::addVisitor(
std::unique_ptr<BugReporterVisitor> visitor) {
if (!visitor)
@@ -2193,12 +2243,12 @@ static void insertToInterestingnessMap(
return;
case bugreporter::TrackingKind::Condition:
return;
- }
+ }
- llvm_unreachable(
- "BugReport::markInteresting currently can only handle 2 different "
- "tracking kinds! Please define what tracking kind should this entitiy"
- "have, if it was already marked as interesting with a different kind!");
+ llvm_unreachable(
+ "BugReport::markInteresting currently can only handle 2 different "
+ "tracking kinds! Please define what tracking kind should this entitiy"
+ "have, if it was already marked as interesting with a different kind!");
}
void PathSensitiveBugReport::markInteresting(SymbolRef sym,
@@ -2389,6 +2439,7 @@ ProgramStateManager &PathSensitiveBugReporter::getStateManager() const {
return Eng.getStateManager();
}
+BugReporter::BugReporter(BugReporterData &d) : D(d) {}
BugReporter::~BugReporter() {
// Make sure reports are flushed.
assert(StrBugTypes.empty() &&
@@ -2409,7 +2460,7 @@ void BugReporter::FlushReports() {
// EmitBasicReport.
// FIXME: There are leaks from checkers that assume that the BugTypes they
// create will be destroyed by the BugReporter.
- llvm::DeleteContainerSeconds(StrBugTypes);
+ StrBugTypes.clear();
}
//===----------------------------------------------------------------------===//
@@ -2781,7 +2832,7 @@ Optional<PathDiagnosticBuilder> PathDiagnosticBuilder::findValidReport(
R->clearVisitors();
R->addVisitor(std::make_unique<FalsePositiveRefutationBRVisitor>());
- // We don't overrite the notes inserted by other visitors because the
+ // We don't overwrite the notes inserted by other visitors because the
// refutation manager does not add any new note to the path
generateVisitorsDiagnostics(R, BugPath->ErrorNode, BRC);
}
@@ -3262,8 +3313,8 @@ BugType *BugReporter::getBugTypeForName(CheckerNameRef CheckName,
SmallString<136> fullDesc;
llvm::raw_svector_ostream(fullDesc) << CheckName.getName() << ":" << name
<< ":" << category;
- BugType *&BT = StrBugTypes[fullDesc];
+ std::unique_ptr<BugType> &BT = StrBugTypes[fullDesc];
if (!BT)
- BT = new BugType(CheckName, name, category);
- return BT;
+ BT = std::make_unique<BugType>(CheckName, name, category);
+ return BT.get();
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index 0525b5c41e34..ef4d38ff498f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -45,7 +45,6 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
@@ -358,7 +357,7 @@ class NoStoreFuncVisitor final : public BugReporterVisitor {
public:
NoStoreFuncVisitor(const SubRegion *R, bugreporter::TrackingKind TKind)
- : RegionOfInterest(R), MmrMgr(*R->getMemRegionManager()),
+ : RegionOfInterest(R), MmrMgr(R->getMemRegionManager()),
SM(MmrMgr.getContext().getSourceManager()),
PP(MmrMgr.getContext().getPrintingPolicy()), TKind(TKind) {}
@@ -813,7 +812,7 @@ public:
const SourceManager &SMgr = BRC.getSourceManager();
if (auto Loc = matchAssignment(N)) {
if (isFunctionMacroExpansion(*Loc, SMgr)) {
- std::string MacroName = getMacroName(*Loc, BRC);
+ std::string MacroName = std::string(getMacroName(*Loc, BRC));
SourceLocation BugLoc = BugPoint->getStmt()->getBeginLoc();
if (!BugLoc.isMacroID() || getMacroName(BugLoc, BRC) != MacroName)
BR.markInvalid(getTag(), MacroName.c_str());
@@ -1735,10 +1734,9 @@ constructDebugPieceForTrackedCondition(const Expr *Cond,
!BRC.getAnalyzerOptions().ShouldTrackConditionsDebug)
return nullptr;
- std::string ConditionText = Lexer::getSourceText(
+ std::string ConditionText = std::string(Lexer::getSourceText(
CharSourceRange::getTokenRange(Cond->getSourceRange()),
- BRC.getSourceManager(),
- BRC.getASTContext().getLangOpts());
+ BRC.getSourceManager(), BRC.getASTContext().getLangOpts()));
return std::make_shared<PathDiagnosticEventPiece>(
PathDiagnosticLocation::createBegin(
@@ -2494,7 +2492,7 @@ PathDiagnosticPieceRef ConditionBRVisitor::VisitTrueTest(
Out << WillBeUsedForACondition;
// Convert 'field ...' to 'Field ...' if it is a MemberExpr.
- std::string Message = Out.str();
+ std::string Message = std::string(Out.str());
Message[0] = toupper(Message[0]);
// If we know the value create a pop-up note to the value part of 'BExpr'.
@@ -2821,7 +2819,7 @@ void FalsePositiveRefutationBRVisitor::finalizeVisitor(
BugReporterContext &BRC, const ExplodedNode *EndPathNode,
PathSensitiveBugReport &BR) {
// Collect new constraints
- VisitNode(EndPathNode, BRC, BR);
+ addConstraints(EndPathNode, /*OverwriteConstraintsOnExistingSyms=*/true);
// Create a refutation manager
llvm::SMTSolverRef RefutationSolver = llvm::CreateZ3Solver();
@@ -2832,30 +2830,30 @@ void FalsePositiveRefutationBRVisitor::finalizeVisitor(
const SymbolRef Sym = I.first;
auto RangeIt = I.second.begin();
- llvm::SMTExprRef Constraints = SMTConv::getRangeExpr(
+ llvm::SMTExprRef SMTConstraints = SMTConv::getRangeExpr(
RefutationSolver, Ctx, Sym, RangeIt->From(), RangeIt->To(),
/*InRange=*/true);
while ((++RangeIt) != I.second.end()) {
- Constraints = RefutationSolver->mkOr(
- Constraints, SMTConv::getRangeExpr(RefutationSolver, Ctx, Sym,
- RangeIt->From(), RangeIt->To(),
- /*InRange=*/true));
+ SMTConstraints = RefutationSolver->mkOr(
+ SMTConstraints, SMTConv::getRangeExpr(RefutationSolver, Ctx, Sym,
+ RangeIt->From(), RangeIt->To(),
+ /*InRange=*/true));
}
- RefutationSolver->addConstraint(Constraints);
+ RefutationSolver->addConstraint(SMTConstraints);
}
// And check for satisfiability
- Optional<bool> isSat = RefutationSolver->check();
- if (!isSat.hasValue())
+ Optional<bool> IsSAT = RefutationSolver->check();
+ if (!IsSAT.hasValue())
return;
- if (!isSat.getValue())
+ if (!IsSAT.getValue())
BR.markInvalid("Infeasible constraints", EndPathNode->getLocationContext());
}
-PathDiagnosticPieceRef FalsePositiveRefutationBRVisitor::VisitNode(
- const ExplodedNode *N, BugReporterContext &, PathSensitiveBugReport &) {
+void FalsePositiveRefutationBRVisitor::addConstraints(
+ const ExplodedNode *N, bool OverwriteConstraintsOnExistingSyms) {
// Collect new constraints
const ConstraintRangeTy &NewCs = N->getState()->get<ConstraintRange>();
ConstraintRangeTy::Factory &CF =
@@ -2865,10 +2863,19 @@ PathDiagnosticPieceRef FalsePositiveRefutationBRVisitor::VisitNode(
for (auto const &C : NewCs) {
const SymbolRef &Sym = C.first;
if (!Constraints.contains(Sym)) {
+ // This symbol is new, just add the constraint.
+ Constraints = CF.add(Constraints, Sym, C.second);
+ } else if (OverwriteConstraintsOnExistingSyms) {
+ // Overwrite the associated constraint of the Symbol.
+ Constraints = CF.remove(Constraints, Sym);
Constraints = CF.add(Constraints, Sym, C.second);
}
}
+}
+PathDiagnosticPieceRef FalsePositiveRefutationBRVisitor::VisitNode(
+ const ExplodedNode *N, BugReporterContext &, PathSensitiveBugReport &) {
+ addConstraints(N, /*OverwriteConstraintsOnExistingSyms=*/false);
return nullptr;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
index 168d6fe6ec48..78d13ddfb773 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -172,23 +172,9 @@ AnalysisDeclContext *CallEvent::getCalleeAnalysisDeclContext() const {
if (!D)
return nullptr;
- // TODO: For now we skip functions without definitions, even if we have
- // our own getDecl(), because it's hard to find out which re-declaration
- // is going to be used, and usually clients don't really care about this
- // situation because there's a loss of precision anyway because we cannot
- // inline the call.
- RuntimeDefinition RD = getRuntimeDefinition();
- if (!RD.getDecl())
- return nullptr;
-
AnalysisDeclContext *ADC =
LCtx->getAnalysisDeclContext()->getManager()->getContext(D);
- // TODO: For now we skip virtual functions, because this also rises
- // the problem of which decl to use, but now it's across different classes.
- if (RD.mayHaveOtherDefinitions() || RD.getDecl() != ADC->getDecl())
- return nullptr;
-
return ADC;
}
@@ -222,39 +208,17 @@ CallEvent::getCalleeStackFrame(unsigned BlockCount) const {
return ADC->getManager()->getStackFrame(ADC, LCtx, E, B, BlockCount, Idx);
}
-const VarRegion *CallEvent::getParameterLocation(unsigned Index,
- unsigned BlockCount) const {
+const ParamVarRegion
+*CallEvent::getParameterLocation(unsigned Index, unsigned BlockCount) const {
const StackFrameContext *SFC = getCalleeStackFrame(BlockCount);
// We cannot construct a VarRegion without a stack frame.
if (!SFC)
return nullptr;
- // Retrieve parameters of the definition, which are different from
- // CallEvent's parameters() because getDecl() isn't necessarily
- // the definition. SFC contains the definition that would be used
- // during analysis.
- const Decl *D = SFC->getDecl();
-
- // TODO: Refactor into a virtual method of CallEvent, like parameters().
- const ParmVarDecl *PVD = nullptr;
- if (const auto *FD = dyn_cast<FunctionDecl>(D))
- PVD = FD->parameters()[Index];
- else if (const auto *BD = dyn_cast<BlockDecl>(D))
- PVD = BD->parameters()[Index];
- else if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
- PVD = MD->parameters()[Index];
- else if (const auto *CD = dyn_cast<CXXConstructorDecl>(D))
- PVD = CD->parameters()[Index];
- assert(PVD && "Unexpected Decl kind!");
-
- const VarRegion *VR =
- State->getStateManager().getRegionManager().getVarRegion(PVD, SFC);
-
- // This sanity check would fail if our parameter declaration doesn't
- // correspond to the stack frame's function declaration.
- assert(VR->getStackFrame() == SFC);
-
- return VR;
+ const ParamVarRegion *PVR =
+ State->getStateManager().getRegionManager().getParamVarRegion(
+ getOriginExpr(), Index, SFC);
+ return PVR;
}
/// Returns true if a type is a pointer-to-const or reference-to-const
@@ -325,8 +289,9 @@ ProgramStateRef CallEvent::invalidateRegions(unsigned BlockCount,
if (getKind() != CE_CXXAllocator)
if (isArgumentConstructedDirectly(Idx))
if (auto AdjIdx = getAdjustedParameterIndex(Idx))
- if (const VarRegion *VR = getParameterLocation(*AdjIdx, BlockCount))
- ValuesToInvalidate.push_back(loc::MemRegionVal(VR));
+ if (const TypedValueRegion *TVR =
+ getParameterLocation(*AdjIdx, BlockCount))
+ ValuesToInvalidate.push_back(loc::MemRegionVal(TVR));
}
// Invalidate designated regions using the batch invalidation API.
@@ -450,8 +415,7 @@ void CallEvent::dump(raw_ostream &Out) const {
return;
}
- // FIXME: a string representation of the kind would be nice.
- Out << "Unknown call (type " << getKind() << ")";
+ Out << "Unknown call (type " << getKindAsString() << ")";
}
bool CallEvent::isCallStmt(const Stmt *S) {
@@ -515,8 +479,7 @@ static void addParameterValuesToBindings(const StackFrameContext *CalleeCtx,
unsigned Idx = 0;
ArrayRef<ParmVarDecl*>::iterator I = parameters.begin(), E = parameters.end();
for (; I != E && Idx < NumArgs; ++I, ++Idx) {
- const ParmVarDecl *ParamDecl = *I;
- assert(ParamDecl && "Formal parameter has no decl?");
+ assert(*I && "Formal parameter has no decl?");
// TODO: Support allocator calls.
if (Call.getKind() != CE_CXXAllocator)
@@ -528,7 +491,8 @@ static void addParameterValuesToBindings(const StackFrameContext *CalleeCtx,
// which makes getArgSVal() fail and return UnknownVal.
SVal ArgVal = Call.getArgSVal(Idx);
if (!ArgVal.isUnknown()) {
- Loc ParamLoc = SVB.makeLoc(MRMgr.getVarRegion(ParamDecl, CalleeCtx));
+ Loc ParamLoc = SVB.makeLoc(
+ MRMgr.getParamVarRegion(Call.getOriginExpr(), Idx, CalleeCtx));
Bindings.push_back(std::make_pair(ParamLoc, ArgVal));
}
}
@@ -536,6 +500,37 @@ static void addParameterValuesToBindings(const StackFrameContext *CalleeCtx,
// FIXME: Variadic arguments are not handled at all right now.
}
+const ConstructionContext *CallEvent::getConstructionContext() const {
+ const StackFrameContext *StackFrame = getCalleeStackFrame(0);
+ if (!StackFrame)
+ return nullptr;
+
+ const CFGElement Element = StackFrame->getCallSiteCFGElement();
+ if (const auto Ctor = Element.getAs<CFGConstructor>()) {
+ return Ctor->getConstructionContext();
+ }
+
+ if (const auto RecCall = Element.getAs<CFGCXXRecordTypedCall>()) {
+ return RecCall->getConstructionContext();
+ }
+
+ return nullptr;
+}
+
+Optional<SVal>
+CallEvent::getReturnValueUnderConstruction() const {
+ const auto *CC = getConstructionContext();
+ if (!CC)
+ return None;
+
+ EvalCallOptions CallOpts;
+ ExprEngine &Engine = getState()->getStateManager().getOwningEngine();
+ SVal RetVal =
+ Engine.computeObjectUnderConstruction(getOriginExpr(), getState(),
+ getLocationContext(), CC, CallOpts);
+ return RetVal;
+}
+
ArrayRef<ParmVarDecl*> AnyFunctionCall::parameters() const {
const FunctionDecl *D = getDecl();
if (!D)
@@ -565,7 +560,7 @@ RuntimeDefinition AnyFunctionCall::getRuntimeDefinition() const {
return RuntimeDefinition(Decl);
}
- SubEngine &Engine = getState()->getStateManager().getOwningEngine();
+ ExprEngine &Engine = getState()->getStateManager().getOwningEngine();
AnalyzerOptions &Opts = Engine.getAnalysisManager().options;
// Try to get CTU definition only if CTUDir is provided.
@@ -889,24 +884,22 @@ void BlockCall::getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
Params);
}
-SVal CXXConstructorCall::getCXXThisVal() const {
+SVal AnyCXXConstructorCall::getCXXThisVal() const {
if (Data)
return loc::MemRegionVal(static_cast<const MemRegion *>(Data));
return UnknownVal();
}
-void CXXConstructorCall::getExtraInvalidatedValues(ValueList &Values,
+void AnyCXXConstructorCall::getExtraInvalidatedValues(ValueList &Values,
RegionAndSymbolInvalidationTraits *ETraits) const {
- if (Data) {
- loc::MemRegionVal MV(static_cast<const MemRegion *>(Data));
- if (SymbolRef Sym = MV.getAsSymbol(true))
- ETraits->setTrait(Sym,
- RegionAndSymbolInvalidationTraits::TK_SuppressEscape);
- Values.push_back(MV);
- }
+ SVal V = getCXXThisVal();
+ if (SymbolRef Sym = V.getAsSymbol(true))
+ ETraits->setTrait(Sym,
+ RegionAndSymbolInvalidationTraits::TK_SuppressEscape);
+ Values.push_back(V);
}
-void CXXConstructorCall::getInitialStackFrameContents(
+void AnyCXXConstructorCall::getInitialStackFrameContents(
const StackFrameContext *CalleeCtx,
BindingsTy &Bindings) const {
AnyFunctionCall::getInitialStackFrameContents(CalleeCtx, Bindings);
@@ -920,6 +913,14 @@ void CXXConstructorCall::getInitialStackFrameContents(
}
}
+const StackFrameContext *
+CXXInheritedConstructorCall::getInheritingStackFrame() const {
+ const StackFrameContext *SFC = getLocationContext()->getStackFrame();
+ while (isa<CXXInheritedCtorInitExpr>(SFC->getCallSite()))
+ SFC = SFC->getParent()->getStackFrame();
+ return SFC;
+}
+
SVal CXXDestructorCall::getCXXThisVal() const {
if (Data)
return loc::MemRegionVal(DtorDataTy::getFromOpaqueValue(Data).getPointer());
@@ -967,14 +968,6 @@ void ObjCMethodCall::getExtraInvalidatedValues(
Values.push_back(getReceiverSVal());
}
-SVal ObjCMethodCall::getSelfSVal() const {
- const LocationContext *LCtx = getLocationContext();
- const ImplicitParamDecl *SelfDecl = LCtx->getSelfDecl();
- if (!SelfDecl)
- return SVal();
- return getState()->getSVal(getState()->getRegion(SelfDecl, LCtx));
-}
-
SVal ObjCMethodCall::getReceiverSVal() const {
// FIXME: Is this the best way to handle class receivers?
if (!isInstanceMessage())
@@ -986,7 +979,7 @@ SVal ObjCMethodCall::getReceiverSVal() const {
// An instance message with no expression means we are sending to super.
// In this case the object reference is the same as 'self'.
assert(getOriginExpr()->getReceiverKind() == ObjCMessageExpr::SuperInstance);
- SVal SelfVal = getSelfSVal();
+ SVal SelfVal = getState()->getSelfSVal(getLocationContext());
assert(SelfVal.isValid() && "Calling super but not in ObjC method");
return SelfVal;
}
@@ -1000,8 +993,9 @@ bool ObjCMethodCall::isReceiverSelfOrSuper() const {
return false;
SVal RecVal = getSVal(getOriginExpr()->getInstanceReceiver());
+ SVal SelfVal = getState()->getSelfSVal(getLocationContext());
- return (RecVal == getSelfSVal());
+ return (RecVal == SelfVal);
}
SourceRange ObjCMethodCall::getSourceRange() const {
@@ -1168,23 +1162,77 @@ static const ObjCMethodDecl *findDefiningRedecl(const ObjCMethodDecl *MD) {
return MD;
}
-static bool isCallToSelfClass(const ObjCMessageExpr *ME) {
- const Expr* InstRec = ME->getInstanceReceiver();
- if (!InstRec)
- return false;
- const auto *InstRecIg = dyn_cast<DeclRefExpr>(InstRec->IgnoreParenImpCasts());
+struct PrivateMethodKey {
+ const ObjCInterfaceDecl *Interface;
+ Selector LookupSelector;
+ bool IsClassMethod;
+};
- // Check that receiver is called 'self'.
- if (!InstRecIg || !InstRecIg->getFoundDecl() ||
- !InstRecIg->getFoundDecl()->getName().equals("self"))
- return false;
+namespace llvm {
+template <> struct DenseMapInfo<PrivateMethodKey> {
+ using InterfaceInfo = DenseMapInfo<const ObjCInterfaceDecl *>;
+ using SelectorInfo = DenseMapInfo<Selector>;
- // Check that the method name is 'class'.
- if (ME->getSelector().getNumArgs() != 0 ||
- !ME->getSelector().getNameForSlot(0).equals("class"))
- return false;
+ static inline PrivateMethodKey getEmptyKey() {
+ return {InterfaceInfo::getEmptyKey(), SelectorInfo::getEmptyKey(), false};
+ }
- return true;
+ static inline PrivateMethodKey getTombstoneKey() {
+ return {InterfaceInfo::getTombstoneKey(), SelectorInfo::getTombstoneKey(),
+ true};
+ }
+
+ static unsigned getHashValue(const PrivateMethodKey &Key) {
+ return llvm::hash_combine(
+ llvm::hash_code(InterfaceInfo::getHashValue(Key.Interface)),
+ llvm::hash_code(SelectorInfo::getHashValue(Key.LookupSelector)),
+ Key.IsClassMethod);
+ }
+
+ static bool isEqual(const PrivateMethodKey &LHS,
+ const PrivateMethodKey &RHS) {
+ return InterfaceInfo::isEqual(LHS.Interface, RHS.Interface) &&
+ SelectorInfo::isEqual(LHS.LookupSelector, RHS.LookupSelector) &&
+ LHS.IsClassMethod == RHS.IsClassMethod;
+ }
+};
+} // end namespace llvm
+
+static const ObjCMethodDecl *
+lookupRuntimeDefinition(const ObjCInterfaceDecl *Interface,
+ Selector LookupSelector, bool InstanceMethod) {
+ // Repeatedly calling lookupPrivateMethod() is expensive, especially
+ // when in many cases it returns null. We cache the results so
+ // that repeated queries on the same ObjCIntefaceDecl and Selector
+ // don't incur the same cost. On some test cases, we can see the
+ // same query being issued thousands of times.
+ //
+ // NOTE: This cache is essentially a "global" variable, but it
+ // only gets lazily created when we get here. The value of the
+ // cache probably comes from it being global across ExprEngines,
+ // where the same queries may get issued. If we are worried about
+ // concurrency, or possibly loading/unloading ASTs, etc., we may
+ // need to revisit this someday. In terms of memory, this table
+ // stays around until clang quits, which also may be bad if we
+ // need to release memory.
+ using PrivateMethodCache =
+ llvm::DenseMap<PrivateMethodKey, Optional<const ObjCMethodDecl *>>;
+
+ static PrivateMethodCache PMC;
+ Optional<const ObjCMethodDecl *> &Val =
+ PMC[{Interface, LookupSelector, InstanceMethod}];
+
+ // Query lookupPrivateMethod() if the cache does not hit.
+ if (!Val.hasValue()) {
+ Val = Interface->lookupPrivateMethod(LookupSelector, InstanceMethod);
+
+ if (!*Val) {
+ // Query 'lookupMethod' as a backup.
+ Val = Interface->lookupMethod(LookupSelector, InstanceMethod);
+ }
+ }
+
+ return Val.getValue();
}
RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
@@ -1194,8 +1242,9 @@ RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
if (E->isInstanceMessage()) {
// Find the receiver type.
- const ObjCObjectPointerType *ReceiverT = nullptr;
+ const ObjCObjectType *ReceiverT = nullptr;
bool CanBeSubClassed = false;
+ bool LookingForInstanceMethod = true;
QualType SupersType = E->getSuperType();
const MemRegion *Receiver = nullptr;
@@ -1203,7 +1252,7 @@ RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
// The receiver is guaranteed to be 'super' in this case.
// Super always means the type of immediate predecessor to the method
// where the call occurs.
- ReceiverT = cast<ObjCObjectPointerType>(SupersType);
+ ReceiverT = cast<ObjCObjectPointerType>(SupersType)->getObjectType();
} else {
Receiver = getReceiverSVal().getAsRegion();
if (!Receiver)
@@ -1218,100 +1267,59 @@ RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
QualType DynType = DTI.getType();
CanBeSubClassed = DTI.canBeASubClass();
- ReceiverT = dyn_cast<ObjCObjectPointerType>(DynType.getCanonicalType());
- if (ReceiverT && CanBeSubClassed)
- if (ObjCInterfaceDecl *IDecl = ReceiverT->getInterfaceDecl())
- if (!canBeOverridenInSubclass(IDecl, Sel))
- CanBeSubClassed = false;
- }
+ const auto *ReceiverDynT =
+ dyn_cast<ObjCObjectPointerType>(DynType.getCanonicalType());
+
+ if (ReceiverDynT) {
+ ReceiverT = ReceiverDynT->getObjectType();
- // Handle special cases of '[self classMethod]' and
- // '[[self class] classMethod]', which are treated by the compiler as
- // instance (not class) messages. We will statically dispatch to those.
- if (auto *PT = dyn_cast_or_null<ObjCObjectPointerType>(ReceiverT)) {
- // For [self classMethod], return the compiler visible declaration.
- if (PT->getObjectType()->isObjCClass() &&
- Receiver == getSelfSVal().getAsRegion())
- return RuntimeDefinition(findDefiningRedecl(E->getMethodDecl()));
-
- // Similarly, handle [[self class] classMethod].
- // TODO: We are currently doing a syntactic match for this pattern with is
- // limiting as the test cases in Analysis/inlining/InlineObjCClassMethod.m
- // shows. A better way would be to associate the meta type with the symbol
- // using the dynamic type info tracking and use it here. We can add a new
- // SVal for ObjC 'Class' values that know what interface declaration they
- // come from. Then 'self' in a class method would be filled in with
- // something meaningful in ObjCMethodCall::getReceiverSVal() and we could
- // do proper dynamic dispatch for class methods just like we do for
- // instance methods now.
- if (E->getInstanceReceiver())
- if (const auto *M = dyn_cast<ObjCMessageExpr>(E->getInstanceReceiver()))
- if (isCallToSelfClass(M))
+ // It can be actually class methods called with Class object as a
+ // receiver. This type of messages is treated by the compiler as
+ // instance (not class).
+ if (ReceiverT->isObjCClass()) {
+
+ SVal SelfVal = getState()->getSelfSVal(getLocationContext());
+ // For [self classMethod], return compiler visible declaration.
+ if (Receiver == SelfVal.getAsRegion()) {
return RuntimeDefinition(findDefiningRedecl(E->getMethodDecl()));
+ }
+
+ // Otherwise, let's check if we know something about the type
+ // inside of this class object.
+ if (SymbolRef ReceiverSym = getReceiverSVal().getAsSymbol()) {
+ DynamicTypeInfo DTI =
+ getClassObjectDynamicTypeInfo(getState(), ReceiverSym);
+ if (DTI.isValid()) {
+ // Let's use this type for lookup.
+ ReceiverT =
+ cast<ObjCObjectType>(DTI.getType().getCanonicalType());
+
+ CanBeSubClassed = DTI.canBeASubClass();
+ // And it should be a class method instead.
+ LookingForInstanceMethod = false;
+ }
+ }
+ }
+
+ if (CanBeSubClassed)
+ if (ObjCInterfaceDecl *IDecl = ReceiverT->getInterface())
+ // Even if `DynamicTypeInfo` told us that it can be
+ // not necessarily this type, but its descendants, we still want
+ // to check again if this selector can be actually overridden.
+ CanBeSubClassed = canBeOverridenInSubclass(IDecl, Sel);
+ }
}
// Lookup the instance method implementation.
if (ReceiverT)
- if (ObjCInterfaceDecl *IDecl = ReceiverT->getInterfaceDecl()) {
- // Repeatedly calling lookupPrivateMethod() is expensive, especially
- // when in many cases it returns null. We cache the results so
- // that repeated queries on the same ObjCIntefaceDecl and Selector
- // don't incur the same cost. On some test cases, we can see the
- // same query being issued thousands of times.
- //
- // NOTE: This cache is essentially a "global" variable, but it
- // only gets lazily created when we get here. The value of the
- // cache probably comes from it being global across ExprEngines,
- // where the same queries may get issued. If we are worried about
- // concurrency, or possibly loading/unloading ASTs, etc., we may
- // need to revisit this someday. In terms of memory, this table
- // stays around until clang quits, which also may be bad if we
- // need to release memory.
- using PrivateMethodKey = std::pair<const ObjCInterfaceDecl *, Selector>;
- using PrivateMethodCache =
- llvm::DenseMap<PrivateMethodKey, Optional<const ObjCMethodDecl *>>;
-
- static PrivateMethodCache PMC;
- Optional<const ObjCMethodDecl *> &Val = PMC[std::make_pair(IDecl, Sel)];
-
- // Query lookupPrivateMethod() if the cache does not hit.
- if (!Val.hasValue()) {
- Val = IDecl->lookupPrivateMethod(Sel);
-
- // If the method is a property accessor, we should try to "inline" it
- // even if we don't actually have an implementation.
- if (!*Val)
- if (const ObjCMethodDecl *CompileTimeMD = E->getMethodDecl())
- if (CompileTimeMD->isPropertyAccessor()) {
- if (!CompileTimeMD->getSelfDecl() &&
- isa<ObjCCategoryDecl>(CompileTimeMD->getDeclContext())) {
- // If the method is an accessor in a category, and it doesn't
- // have a self declaration, first
- // try to find the method in a class extension. This
- // works around a bug in Sema where multiple accessors
- // are synthesized for properties in class
- // extensions that are redeclared in a category and the
- // the implicit parameters are not filled in for
- // the method on the category.
- // This ensures we find the accessor in the extension, which
- // has the implicit parameters filled in.
- auto *ID = CompileTimeMD->getClassInterface();
- for (auto *CatDecl : ID->visible_extensions()) {
- Val = CatDecl->getMethod(Sel,
- CompileTimeMD->isInstanceMethod());
- if (*Val)
- break;
- }
- }
- if (!*Val)
- Val = IDecl->lookupInstanceMethod(Sel);
- }
- }
+ if (ObjCInterfaceDecl *IDecl = ReceiverT->getInterface()) {
+ const ObjCMethodDecl *MD =
+ lookupRuntimeDefinition(IDecl, Sel, LookingForInstanceMethod);
- const ObjCMethodDecl *MD = Val.getValue();
if (MD && !MD->hasBody())
MD = MD->getCanonicalDecl();
+
if (CanBeSubClassed)
return RuntimeDefinition(MD, Receiver);
else
@@ -1392,17 +1400,20 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
if (CallEventRef<> Out = getCall(CallSite, State, CallerCtx))
return Out;
- // All other cases are handled by getCall.
- assert(isa<CXXConstructExpr>(CallSite) &&
- "This is not an inlineable statement");
-
SValBuilder &SVB = State->getStateManager().getSValBuilder();
const auto *Ctor = cast<CXXMethodDecl>(CalleeCtx->getDecl());
Loc ThisPtr = SVB.getCXXThis(Ctor, CalleeCtx);
SVal ThisVal = State->getSVal(ThisPtr);
- return getCXXConstructorCall(cast<CXXConstructExpr>(CallSite),
- ThisVal.getAsRegion(), State, CallerCtx);
+ if (const auto *CE = dyn_cast<CXXConstructExpr>(CallSite))
+ return getCXXConstructorCall(CE, ThisVal.getAsRegion(), State, CallerCtx);
+ else if (const auto *CIE = dyn_cast<CXXInheritedCtorInitExpr>(CallSite))
+ return getCXXInheritedConstructorCall(CIE, ThisVal.getAsRegion(), State,
+ CallerCtx);
+ else {
+ // All other cases are handled by getCall.
+ llvm_unreachable("This is not an inlineable statement");
+ }
}
// Fall back to the CFG. The only thing we haven't handled yet is
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
index 11693132de68..cae728815b41 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
@@ -13,6 +13,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
+#include "clang/Lex/Preprocessor.h"
namespace clang {
@@ -109,6 +110,43 @@ Nullability getNullabilityAnnotation(QualType Type) {
return Nullability::Unspecified;
}
+llvm::Optional<int> tryExpandAsInteger(StringRef Macro,
+ const Preprocessor &PP) {
+ const auto *MacroII = PP.getIdentifierInfo(Macro);
+ if (!MacroII)
+ return llvm::None;
+ const MacroInfo *MI = PP.getMacroInfo(MacroII);
+ if (!MI)
+ return llvm::None;
+
+ // Filter out parens.
+ std::vector<Token> FilteredTokens;
+ FilteredTokens.reserve(MI->tokens().size());
+ for (auto &T : MI->tokens())
+ if (!T.isOneOf(tok::l_paren, tok::r_paren))
+ FilteredTokens.push_back(T);
+
+ // Parse an integer at the end of the macro definition.
+ const Token &T = FilteredTokens.back();
+ // FIXME: EOF macro token coming from a PCH file on macOS while marked as
+ // literal, doesn't contain any literal data
+ if (!T.isLiteral() || !T.getLiteralData())
+ return llvm::None;
+ StringRef ValueStr = StringRef(T.getLiteralData(), T.getLength());
+ llvm::APInt IntValue;
+ constexpr unsigned AutoSenseRadix = 0;
+ if (ValueStr.getAsInteger(AutoSenseRadix, IntValue))
+ return llvm::None;
+
+ // Parse an optional minus sign.
+ size_t Size = FilteredTokens.size();
+ if (Size >= 2) {
+ if (FilteredTokens[Size - 2].is(tok::minus))
+ IntValue = -IntValue;
+ }
+
+ return IntValue.getSExtValue();
+}
-} // end namespace ento
-} // end namespace clang
+} // namespace ento
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
index a9361837cf68..86cecf6524f0 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -61,12 +61,12 @@ void CheckerManager::finishedCheckerRegistration() {
}
void CheckerManager::reportInvalidCheckerOptionValue(
- const CheckerBase *C, StringRef OptionName, StringRef ExpectedValueDesc) {
+ const CheckerBase *C, StringRef OptionName,
+ StringRef ExpectedValueDesc) const {
- Context.getDiagnostics()
- .Report(diag::err_analyzer_checker_option_invalid_input)
- << (llvm::Twine() + C->getTagDescription() + ":" + OptionName).str()
- << ExpectedValueDesc;
+ getDiagnostics().Report(diag::err_analyzer_checker_option_invalid_input)
+ << (llvm::Twine() + C->getTagDescription() + ":" + OptionName).str()
+ << ExpectedValueDesc;
}
//===----------------------------------------------------------------------===//
@@ -243,13 +243,13 @@ void CheckerManager::runCheckersForObjCMessage(ObjCMessageVisitKind visitKind,
const ObjCMethodCall &msg,
ExprEngine &Eng,
bool WasInlined) {
- auto &checkers = getObjCMessageCheckers(visitKind);
+ const auto &checkers = getObjCMessageCheckers(visitKind);
CheckObjCMessageContext C(visitKind, checkers, msg, Eng, WasInlined);
expandGraphWithCheckers(C, Dst, Src);
}
const std::vector<CheckerManager::CheckObjCMessageFunc> &
-CheckerManager::getObjCMessageCheckers(ObjCMessageVisitKind Kind) {
+CheckerManager::getObjCMessageCheckers(ObjCMessageVisitKind Kind) const {
switch (Kind) {
case ObjCMessageVisitKind::Pre:
return PreObjCMessageCheckers;
@@ -507,35 +507,38 @@ namespace {
using CheckersTy = std::vector<CheckerManager::CheckNewAllocatorFunc>;
const CheckersTy &Checkers;
- const CXXNewExpr *NE;
- SVal Target;
+ const CXXAllocatorCall &Call;
bool WasInlined;
ExprEngine &Eng;
- CheckNewAllocatorContext(const CheckersTy &Checkers, const CXXNewExpr *NE,
- SVal Target, bool WasInlined, ExprEngine &Eng)
- : Checkers(Checkers), NE(NE), Target(Target), WasInlined(WasInlined),
- Eng(Eng) {}
+ CheckNewAllocatorContext(const CheckersTy &Checkers,
+ const CXXAllocatorCall &Call, bool WasInlined,
+ ExprEngine &Eng)
+ : Checkers(Checkers), Call(Call), WasInlined(WasInlined), Eng(Eng) {}
CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
void runChecker(CheckerManager::CheckNewAllocatorFunc checkFn,
NodeBuilder &Bldr, ExplodedNode *Pred) {
- ProgramPoint L = PostAllocatorCall(NE, Pred->getLocationContext());
+ ProgramPoint L =
+ PostAllocatorCall(Call.getOriginExpr(), Pred->getLocationContext());
CheckerContext C(Bldr, Eng, Pred, L, WasInlined);
- checkFn(NE, Target, C);
+ checkFn(cast<CXXAllocatorCall>(*Call.cloneWithState(Pred->getState())),
+ C);
}
};
} // namespace
-void CheckerManager::runCheckersForNewAllocator(
- const CXXNewExpr *NE, SVal Target, ExplodedNodeSet &Dst, ExplodedNode *Pred,
- ExprEngine &Eng, bool WasInlined) {
+void CheckerManager::runCheckersForNewAllocator(const CXXAllocatorCall &Call,
+ ExplodedNodeSet &Dst,
+ ExplodedNode *Pred,
+ ExprEngine &Eng,
+ bool WasInlined) {
ExplodedNodeSet Src;
Src.insert(Pred);
- CheckNewAllocatorContext C(NewAllocatorCheckers, NE, Target, WasInlined, Eng);
+ CheckNewAllocatorContext C(NewAllocatorCheckers, Call, WasInlined, Eng);
expandGraphWithCheckers(C, Dst, Src);
}
@@ -650,8 +653,9 @@ CheckerManager::runCheckersForEvalAssume(ProgramStateRef state,
void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
const CallEvent &Call,
- ExprEngine &Eng) {
- for (const auto Pred : Src) {
+ ExprEngine &Eng,
+ const EvalCallOptions &CallOpts) {
+ for (auto *const Pred : Src) {
bool anyEvaluated = false;
ExplodedNodeSet checkDst;
@@ -662,10 +666,8 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
// TODO: Support the situation when the call doesn't correspond
// to any Expr.
ProgramPoint L = ProgramPoint::getProgramPoint(
- cast<CallExpr>(Call.getOriginExpr()),
- ProgramPoint::PostStmtKind,
- Pred->getLocationContext(),
- EvalCallChecker.Checker);
+ Call.getOriginExpr(), ProgramPoint::PostStmtKind,
+ Pred->getLocationContext(), EvalCallChecker.Checker);
bool evaluated = false;
{ // CheckerContext generates transitions(populates checkDest) on
// destruction, so introduce the scope to make sure it gets properly
@@ -687,7 +689,7 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
// If none of the checkers evaluated the call, ask ExprEngine to handle it.
if (!anyEvaluated) {
NodeBuilder B(Pred, Dst, Eng.getBuilderContext());
- Eng.defaultEvalCall(B, Pred, Call);
+ Eng.defaultEvalCall(B, Pred, Call, CallOpts);
}
}
}
@@ -902,8 +904,3 @@ CheckerManager::getCachedStmtCheckersFor(const Stmt *S, bool isPreVisit) {
Checkers.push_back(Info.CheckFn);
return Checkers;
}
-
-CheckerManager::~CheckerManager() {
- for (const auto &CheckerDtor : CheckerDtors)
- CheckerDtor();
-}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerRegistryData.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerRegistryData.cpp
new file mode 100644
index 000000000000..1b3e8b11549d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerRegistryData.cpp
@@ -0,0 +1,241 @@
+//===- CheckerRegistry.h - Maintains all available checkers -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/CheckerRegistryData.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "llvm/ADT/Twine.h"
+#include <map>
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Methods of CmdLineOption, PackageInfo and CheckerInfo.
+//===----------------------------------------------------------------------===//
+
+LLVM_DUMP_METHOD void CmdLineOption::dump() const {
+ dumpToStream(llvm::errs());
+}
+
+LLVM_DUMP_METHOD void
+CmdLineOption::dumpToStream(llvm::raw_ostream &Out) const {
+ // The description can be just checked in Checkers.inc, the point here is to
+ // debug whether we succeeded in parsing it.
+ Out << OptionName << " (" << OptionType << ", "
+ << (IsHidden ? "hidden, " : "") << DevelopmentStatus << ") default: \""
+ << DefaultValStr;
+}
+
+static StringRef toString(StateFromCmdLine Kind) {
+ switch (Kind) {
+ case StateFromCmdLine::State_Disabled:
+ return "Disabled";
+ case StateFromCmdLine::State_Enabled:
+ return "Enabled";
+ case StateFromCmdLine::State_Unspecified:
+ return "Unspecified";
+ }
+ llvm_unreachable("Unhandled StateFromCmdLine enum");
+}
+
+LLVM_DUMP_METHOD void CheckerInfo::dump() const { dumpToStream(llvm::errs()); }
+
+LLVM_DUMP_METHOD void CheckerInfo::dumpToStream(llvm::raw_ostream &Out) const {
+ // The description can be just checked in Checkers.inc, the point here is to
+ // debug whether we succeeded in parsing it. Same with documentation uri.
+ Out << FullName << " (" << toString(State) << (IsHidden ? ", hidden" : "")
+ << ")\n";
+ Out << " Options:\n";
+ for (const CmdLineOption &Option : CmdLineOptions) {
+ Out << " ";
+ Option.dumpToStream(Out);
+ Out << '\n';
+ }
+ Out << " Dependencies:\n";
+ for (const CheckerInfo *Dependency : Dependencies) {
+ Out << " " << Dependency->FullName << '\n';
+ }
+ Out << " Weak dependencies:\n";
+ for (const CheckerInfo *Dependency : WeakDependencies) {
+ Out << " " << Dependency->FullName << '\n';
+ }
+}
+
+LLVM_DUMP_METHOD void PackageInfo::dump() const { dumpToStream(llvm::errs()); }
+
+LLVM_DUMP_METHOD void PackageInfo::dumpToStream(llvm::raw_ostream &Out) const {
+ Out << FullName << "\n";
+ Out << " Options:\n";
+ for (const CmdLineOption &Option : CmdLineOptions) {
+ Out << " ";
+ Option.dumpToStream(Out);
+ Out << '\n';
+ }
+}
+
+static constexpr char PackageSeparator = '.';
+
+static bool isInPackage(const CheckerInfo &Checker, StringRef PackageName) {
+ // Does the checker's full name have the package as a prefix?
+ if (!Checker.FullName.startswith(PackageName))
+ return false;
+
+ // Is the package actually just the name of a specific checker?
+ if (Checker.FullName.size() == PackageName.size())
+ return true;
+
+ // Is the checker in the package (or a subpackage)?
+ if (Checker.FullName[PackageName.size()] == PackageSeparator)
+ return true;
+
+ return false;
+}
+
+CheckerInfoListRange
+CheckerRegistryData::getMutableCheckersForCmdLineArg(StringRef CmdLineArg) {
+ auto It = checker_registry::binaryFind(Checkers, CmdLineArg);
+
+ if (!isInPackage(*It, CmdLineArg))
+ return {Checkers.end(), Checkers.end()};
+
+ // See how large the package is.
+ // If the package doesn't exist, assume the option refers to a single
+ // checker.
+ size_t Size = 1;
+ llvm::StringMap<size_t>::const_iterator PackageSize =
+ PackageSizes.find(CmdLineArg);
+
+ if (PackageSize != PackageSizes.end())
+ Size = PackageSize->getValue();
+
+ return {It, It + Size};
+}
+//===----------------------------------------------------------------------===//
+// Printing functions.
+//===----------------------------------------------------------------------===//
+
+void CheckerRegistryData::printCheckerWithDescList(
+ const AnalyzerOptions &AnOpts, raw_ostream &Out,
+ size_t MaxNameChars) const {
+ // FIXME: Print available packages.
+
+ Out << "CHECKERS:\n";
+
+ // Find the maximum option length.
+ size_t OptionFieldWidth = 0;
+ for (const auto &Checker : Checkers) {
+ // Limit the amount of padding we are willing to give up for alignment.
+ // Package.Name Description [Hidden]
+ size_t NameLength = Checker.FullName.size();
+ if (NameLength <= MaxNameChars)
+ OptionFieldWidth = std::max(OptionFieldWidth, NameLength);
+ }
+
+ const size_t InitialPad = 2;
+
+ auto Print = [=](llvm::raw_ostream &Out, const CheckerInfo &Checker,
+ StringRef Description) {
+ AnalyzerOptions::printFormattedEntry(Out, {Checker.FullName, Description},
+ InitialPad, OptionFieldWidth);
+ Out << '\n';
+ };
+
+ for (const auto &Checker : Checkers) {
+ // The order of this if branches is significant, we wouldn't like to display
+ // developer checkers even in the alpha output. For example,
+ // alpha.cplusplus.IteratorModeling is a modeling checker, hence it's hidden
+ // by default, and users (even when the user is a developer of an alpha
+ // checker) shouldn't normally tinker with whether they should be enabled.
+
+ if (Checker.IsHidden) {
+ if (AnOpts.ShowCheckerHelpDeveloper)
+ Print(Out, Checker, Checker.Desc);
+ continue;
+ }
+
+ if (Checker.FullName.startswith("alpha")) {
+ if (AnOpts.ShowCheckerHelpAlpha)
+ Print(Out, Checker,
+ ("(Enable only for development!) " + Checker.Desc).str());
+ continue;
+ }
+
+ if (AnOpts.ShowCheckerHelp)
+ Print(Out, Checker, Checker.Desc);
+ }
+}
+
+void CheckerRegistryData::printEnabledCheckerList(raw_ostream &Out) const {
+ for (const auto *i : EnabledCheckers)
+ Out << i->FullName << '\n';
+}
+
+void CheckerRegistryData::printCheckerOptionList(const AnalyzerOptions &AnOpts,
+ raw_ostream &Out) const {
+ Out << "OVERVIEW: Clang Static Analyzer Checker and Package Option List\n\n";
+ Out << "USAGE: -analyzer-config <OPTION1=VALUE,OPTION2=VALUE,...>\n\n";
+ Out << " -analyzer-config OPTION1=VALUE, -analyzer-config "
+ "OPTION2=VALUE, ...\n\n";
+ Out << "OPTIONS:\n\n";
+
+ // It's usually ill-advised to use multimap, but clang will terminate after
+ // this function.
+ std::multimap<StringRef, const CmdLineOption &> OptionMap;
+
+ for (const CheckerInfo &Checker : Checkers) {
+ for (const CmdLineOption &Option : Checker.CmdLineOptions) {
+ OptionMap.insert({Checker.FullName, Option});
+ }
+ }
+
+ for (const PackageInfo &Package : Packages) {
+ for (const CmdLineOption &Option : Package.CmdLineOptions) {
+ OptionMap.insert({Package.FullName, Option});
+ }
+ }
+
+ auto Print = [](llvm::raw_ostream &Out, StringRef FullOption,
+ StringRef Desc) {
+ AnalyzerOptions::printFormattedEntry(Out, {FullOption, Desc},
+ /*InitialPad*/ 2,
+ /*EntryWidth*/ 50,
+ /*MinLineWidth*/ 90);
+ Out << "\n\n";
+ };
+ for (const std::pair<const StringRef, const CmdLineOption &> &Entry :
+ OptionMap) {
+ const CmdLineOption &Option = Entry.second;
+ std::string FullOption = (Entry.first + ":" + Option.OptionName).str();
+
+ std::string Desc =
+ ("(" + Option.OptionType + ") " + Option.Description + " (default: " +
+ (Option.DefaultValStr.empty() ? "\"\"" : Option.DefaultValStr) + ")")
+ .str();
+
+ // The list of these if branches is significant, we wouldn't like to
+ // display hidden alpha checker options for
+ // -analyzer-checker-option-help-alpha.
+
+ if (Option.IsHidden) {
+ if (AnOpts.ShowCheckerOptionDeveloperList)
+ Print(Out, FullOption, Desc);
+ continue;
+ }
+
+ if (Option.DevelopmentStatus == "alpha" ||
+ Entry.first.startswith("alpha")) {
+ if (AnOpts.ShowCheckerOptionAlphaList)
+ Print(Out, FullOption,
+ llvm::Twine("(Enable only for development!) " + Desc).str());
+ continue;
+ }
+
+ if (AnOpts.ShowCheckerOptionList)
+ Print(Out, FullOption, Desc);
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
index bdae3e605eff..a601370775b4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
@@ -9,13 +9,18 @@
#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
// Common strings used for the "category" of many static analyzer issues.
-namespace clang { namespace ento { namespace categories {
+namespace clang {
+namespace ento {
+namespace categories {
-const char * const CoreFoundationObjectiveC = "Core Foundation/Objective-C";
-const char * const LogicError = "Logic error";
-const char * const MemoryRefCount =
- "Memory (Core Foundation/Objective-C/OSObject)";
-const char * const MemoryError = "Memory error";
-const char * const UnixAPI = "Unix API";
-const char * const CXXObjectLifecycle = "C++ object lifecycle";
-}}}
+const char *const CoreFoundationObjectiveC = "Core Foundation/Objective-C";
+const char *const LogicError = "Logic error";
+const char *const MemoryRefCount =
+ "Memory (Core Foundation/Objective-C/OSObject)";
+const char *const MemoryError = "Memory error";
+const char *const UnixAPI = "Unix API";
+const char *const CXXObjectLifecycle = "C++ object lifecycle";
+const char *const SecurityError = "Security error";
+} // namespace categories
+} // namespace ento
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
index 94cf74de8293..70deb13a8e1a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -23,8 +23,8 @@
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/WorkList.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
@@ -52,8 +52,7 @@ STATISTIC(NumPathsExplored,
// Core analysis engine.
//===----------------------------------------------------------------------===//
-static std::unique_ptr<WorkList> generateWorkList(AnalyzerOptions &Opts,
- SubEngine &subengine) {
+static std::unique_ptr<WorkList> generateWorkList(AnalyzerOptions &Opts) {
switch (Opts.getExplorationStrategy()) {
case ExplorationStrategyKind::DFS:
return WorkList::makeDFS();
@@ -71,9 +70,9 @@ static std::unique_ptr<WorkList> generateWorkList(AnalyzerOptions &Opts,
llvm_unreachable("Unknown AnalyzerOptions::ExplorationStrategyKind");
}
-CoreEngine::CoreEngine(SubEngine &subengine, FunctionSummariesTy *FS,
+CoreEngine::CoreEngine(ExprEngine &exprengine, FunctionSummariesTy *FS,
AnalyzerOptions &Opts)
- : SubEng(subengine), WList(generateWorkList(Opts, subengine)),
+ : ExprEng(exprengine), WList(generateWorkList(Opts)),
BCounterFactory(G.getAllocator()), FunctionSummaries(FS) {}
/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps.
@@ -104,7 +103,7 @@ bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
WList->setBlockCounter(BCounterFactory.GetEmptyCounter());
if (!InitState)
- InitState = SubEng.getInitialState(L);
+ InitState = ExprEng.getInitialState(L);
bool IsNew;
ExplodedNode *Node = G.getNode(StartLoc, InitState, false, &IsNew);
@@ -113,7 +112,7 @@ bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
NodeBuilderContext BuilderCtx(*this, StartLoc.getDst(), Node);
ExplodedNodeSet DstBegin;
- SubEng.processBeginOfFunction(BuilderCtx, Node, DstBegin, StartLoc);
+ ExprEng.processBeginOfFunction(BuilderCtx, Node, DstBegin, StartLoc);
enqueue(DstBegin);
}
@@ -147,7 +146,7 @@ bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
dispatchWorkItem(Node, Node->getLocation(), WU);
}
- SubEng.processEndWorklist();
+ ExprEng.processEndWorklist();
return WList->hasWork();
}
@@ -172,7 +171,7 @@ void CoreEngine::dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc,
break;
case ProgramPoint::CallExitBeginKind:
- SubEng.processCallExit(Pred);
+ ExprEng.processCallExit(Pred);
break;
case ProgramPoint::EpsilonKind: {
@@ -221,7 +220,7 @@ void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
if (L.getSrc()->getTerminator().isVirtualBaseBranch() &&
L.getDst() == *L.getSrc()->succ_begin()) {
ProgramPoint P = L.withTag(getNoteTags().makeNoteTag(
- [](BugReporterContext &, BugReport &) -> std::string {
+ [](BugReporterContext &, PathSensitiveBugReport &) -> std::string {
// TODO: Just call out the name of the most derived class
// when we know it.
return "Virtual base initialization skipped because "
@@ -253,17 +252,17 @@ void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
}
// Process the final state transition.
- SubEng.processEndOfFunction(BuilderCtx, Pred, RS);
+ ExprEng.processEndOfFunction(BuilderCtx, Pred, RS);
// This path is done. Don't enqueue any more nodes.
return;
}
- // Call into the SubEngine to process entering the CFGBlock.
+ // Call into the ExprEngine to process entering the CFGBlock.
ExplodedNodeSet dstNodes;
BlockEntrance BE(Blk, Pred->getLocationContext());
NodeBuilderWithSinks nodeBuilder(Pred, dstNodes, BuilderCtx, BE);
- SubEng.processCFGBlockEntrance(L, nodeBuilder, Pred);
+ ExprEng.processCFGBlockEntrance(L, nodeBuilder, Pred);
// Auto-generate a node.
if (!nodeBuilder.hasGeneratedNodes()) {
@@ -287,7 +286,7 @@ void CoreEngine::HandleBlockEntrance(const BlockEntrance &L,
// Process the entrance of the block.
if (Optional<CFGElement> E = L.getFirstElement()) {
NodeBuilderContext Ctx(*this, L.getBlock(), Pred);
- SubEng.processCFGElement(*E, Pred, 0, &Ctx);
+ ExprEng.processCFGElement(*E, Pred, 0, &Ctx);
}
else
HandleBlockExit(L.getBlock(), Pred);
@@ -367,7 +366,7 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
builder(Pred, B, cast<IndirectGotoStmt>(Term)->getTarget(),
*(B->succ_begin()), this);
- SubEng.processIndirectGoto(builder);
+ ExprEng.processIndirectGoto(builder);
return;
}
@@ -378,7 +377,7 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
// 'element' variable to a value.
// (2) in a terminator, which represents the branch.
//
- // For (1), subengines will bind a value (i.e., 0 or 1) indicating
+ // For (1), ExprEngine will bind a value (i.e., 0 or 1) indicating
// whether or not collection contains any more elements. We cannot
// just test to see if the element is nil because a container can
// contain nil elements.
@@ -389,7 +388,7 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
SwitchNodeBuilder builder(Pred, B, cast<SwitchStmt>(Term)->getCond(),
this);
- SubEng.processSwitch(builder);
+ ExprEng.processSwitch(builder);
return;
}
@@ -418,7 +417,7 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
void CoreEngine::HandleCallEnter(const CallEnter &CE, ExplodedNode *Pred) {
NodeBuilderContext BuilderCtx(*this, CE.getEntry(), Pred);
- SubEng.processCallEnter(BuilderCtx, CE, Pred);
+ ExprEng.processCallEnter(BuilderCtx, CE, Pred);
}
void CoreEngine::HandleBranch(const Stmt *Cond, const Stmt *Term,
@@ -426,7 +425,7 @@ void CoreEngine::HandleBranch(const Stmt *Cond, const Stmt *Term,
assert(B->succ_size() == 2);
NodeBuilderContext Ctx(*this, B, Pred);
ExplodedNodeSet Dst;
- SubEng.processBranch(Cond, Ctx, Pred, Dst, *(B->succ_begin()),
+ ExprEng.processBranch(Cond, Ctx, Pred, Dst, *(B->succ_begin()),
*(B->succ_begin() + 1));
// Enqueue the new frontier onto the worklist.
enqueue(Dst);
@@ -438,7 +437,7 @@ void CoreEngine::HandleCleanupTemporaryBranch(const CXXBindTemporaryExpr *BTE,
assert(B->succ_size() == 2);
NodeBuilderContext Ctx(*this, B, Pred);
ExplodedNodeSet Dst;
- SubEng.processCleanupTemporaryBranch(BTE, Ctx, Pred, Dst, *(B->succ_begin()),
+ ExprEng.processCleanupTemporaryBranch(BTE, Ctx, Pred, Dst, *(B->succ_begin()),
*(B->succ_begin() + 1));
// Enqueue the new frontier onto the worklist.
enqueue(Dst);
@@ -449,7 +448,7 @@ void CoreEngine::HandleStaticInit(const DeclStmt *DS, const CFGBlock *B,
assert(B->succ_size() == 2);
NodeBuilderContext Ctx(*this, B, Pred);
ExplodedNodeSet Dst;
- SubEng.processStaticInitializer(DS, Ctx, Pred, Dst,
+ ExprEng.processStaticInitializer(DS, Ctx, Pred, Dst,
*(B->succ_begin()), *(B->succ_begin()+1));
// Enqueue the new frontier onto the worklist.
enqueue(Dst);
@@ -464,7 +463,7 @@ void CoreEngine::HandlePostStmt(const CFGBlock *B, unsigned StmtIdx,
HandleBlockExit(B, Pred);
else {
NodeBuilderContext Ctx(*this, B, Pred);
- SubEng.processCFGElement((*B)[StmtIdx], Pred, StmtIdx, &Ctx);
+ ExprEng.processCFGElement((*B)[StmtIdx], Pred, StmtIdx, &Ctx);
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicSize.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicSize.cpp
new file mode 100644
index 000000000000..8b2172db445c
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicSize.cpp
@@ -0,0 +1,71 @@
+//===- DynamicSize.cpp - Dynamic size related APIs --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines APIs that track and query dynamic size information.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+
+namespace clang {
+namespace ento {
+
+DefinedOrUnknownSVal getDynamicSize(ProgramStateRef State, const MemRegion *MR,
+ SValBuilder &SVB) {
+ return MR->getMemRegionManager().getStaticSize(MR, SVB);
+}
+
+DefinedOrUnknownSVal getDynamicElementCount(ProgramStateRef State,
+ const MemRegion *MR,
+ SValBuilder &SVB,
+ QualType ElementTy) {
+ MemRegionManager &MemMgr = MR->getMemRegionManager();
+ ASTContext &Ctx = MemMgr.getContext();
+
+ DefinedOrUnknownSVal Size = getDynamicSize(State, MR, SVB);
+ SVal ElementSizeV = SVB.makeIntVal(
+ Ctx.getTypeSizeInChars(ElementTy).getQuantity(), SVB.getArrayIndexType());
+
+ SVal DivisionV =
+ SVB.evalBinOp(State, BO_Div, Size, ElementSizeV, SVB.getArrayIndexType());
+
+ return DivisionV.castAs<DefinedOrUnknownSVal>();
+}
+
+SVal getDynamicSizeWithOffset(ProgramStateRef State, const SVal &BufV) {
+ SValBuilder &SvalBuilder = State->getStateManager().getSValBuilder();
+ const MemRegion *MRegion = BufV.getAsRegion();
+ if (!MRegion)
+ return UnknownVal();
+ RegionOffset Offset = MRegion->getAsOffset();
+ if (Offset.hasSymbolicOffset())
+ return UnknownVal();
+ const MemRegion *BaseRegion = MRegion->getBaseRegion();
+ if (!BaseRegion)
+ return UnknownVal();
+
+ NonLoc OffsetInBytes = SvalBuilder.makeArrayIndex(
+ Offset.getOffset() /
+ MRegion->getMemRegionManager().getContext().getCharWidth());
+ DefinedOrUnknownSVal ExtentInBytes =
+ getDynamicSize(State, BaseRegion, SvalBuilder);
+
+ return SvalBuilder.evalBinOp(State, BinaryOperator::Opcode::BO_Sub,
+ ExtentInBytes, OffsetInBytes,
+ SvalBuilder.getArrayIndexType());
+}
+
+} // namespace ento
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicType.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicType.cpp
index a78e0e05e903..e9b64fd79614 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicType.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicType.cpp
@@ -34,6 +34,10 @@ REGISTER_SET_FACTORY_WITH_PROGRAMSTATE(CastSet, clang::ento::DynamicCastInfo)
REGISTER_MAP_WITH_PROGRAMSTATE(DynamicCastMap, const clang::ento::MemRegion *,
CastSet)
+// A map from Class object symbols to the most likely pointed-to type.
+REGISTER_MAP_WITH_PROGRAMSTATE(DynamicClassObjectMap, clang::ento::SymbolRef,
+ clang::ento::DynamicTypeInfo)
+
namespace clang {
namespace ento {
@@ -76,6 +80,12 @@ const DynamicCastInfo *getDynamicCastInfo(ProgramStateRef State,
return nullptr;
}
+DynamicTypeInfo getClassObjectDynamicTypeInfo(ProgramStateRef State,
+ SymbolRef Sym) {
+ const DynamicTypeInfo *DTI = State->get<DynamicClassObjectMap>(Sym);
+ return DTI ? *DTI : DynamicTypeInfo{};
+}
+
ProgramStateRef setDynamicTypeInfo(ProgramStateRef State, const MemRegion *MR,
DynamicTypeInfo NewTy) {
State = State->set<DynamicTypeMap>(MR->StripCasts(), NewTy);
@@ -118,111 +128,165 @@ ProgramStateRef setDynamicTypeAndCastInfo(ProgramStateRef State,
return State;
}
+ProgramStateRef setClassObjectDynamicTypeInfo(ProgramStateRef State,
+ SymbolRef Sym,
+ DynamicTypeInfo NewTy) {
+ State = State->set<DynamicClassObjectMap>(Sym, NewTy);
+ return State;
+}
+
+ProgramStateRef setClassObjectDynamicTypeInfo(ProgramStateRef State,
+ SymbolRef Sym, QualType NewTy,
+ bool CanBeSubClassed) {
+ return setClassObjectDynamicTypeInfo(State, Sym,
+ DynamicTypeInfo(NewTy, CanBeSubClassed));
+}
+
+static bool isLive(SymbolReaper &SR, const MemRegion *MR) {
+ return SR.isLiveRegion(MR);
+}
+
+static bool isLive(SymbolReaper &SR, SymbolRef Sym) { return SR.isLive(Sym); }
+
template <typename MapTy>
-ProgramStateRef removeDead(ProgramStateRef State, const MapTy &Map,
- SymbolReaper &SR) {
+static ProgramStateRef removeDeadImpl(ProgramStateRef State, SymbolReaper &SR) {
+ const auto &Map = State->get<MapTy>();
+
for (const auto &Elem : Map)
- if (!SR.isLiveRegion(Elem.first))
- State = State->remove<DynamicCastMap>(Elem.first);
+ if (!isLive(SR, Elem.first))
+ State = State->remove<MapTy>(Elem.first);
return State;
}
ProgramStateRef removeDeadTypes(ProgramStateRef State, SymbolReaper &SR) {
- return removeDead(State, State->get<DynamicTypeMap>(), SR);
+ return removeDeadImpl<DynamicTypeMap>(State, SR);
}
ProgramStateRef removeDeadCasts(ProgramStateRef State, SymbolReaper &SR) {
- return removeDead(State, State->get<DynamicCastMap>(), SR);
+ return removeDeadImpl<DynamicCastMap>(State, SR);
}
-static void printDynamicTypesJson(raw_ostream &Out, ProgramStateRef State,
- const char *NL, unsigned int Space,
- bool IsDot) {
- Indent(Out, Space, IsDot) << "\"dynamic_types\": ";
+ProgramStateRef removeDeadClassObjectTypes(ProgramStateRef State,
+ SymbolReaper &SR) {
+ return removeDeadImpl<DynamicClassObjectMap>(State, SR);
+}
- const DynamicTypeMapTy &Map = State->get<DynamicTypeMap>();
- if (Map.isEmpty()) {
- Out << "null," << NL;
- return;
- }
+//===----------------------------------------------------------------------===//
+// Implementation of the 'printer-to-JSON' function
+//===----------------------------------------------------------------------===//
- ++Space;
- Out << '[' << NL;
- for (DynamicTypeMapTy::iterator I = Map.begin(); I != Map.end(); ++I) {
- const MemRegion *MR = I->first;
- const DynamicTypeInfo &DTI = I->second;
- Indent(Out, Space, IsDot)
- << "{ \"region\": \"" << MR << "\", \"dyn_type\": ";
- if (!DTI.isValid()) {
- Out << "null";
- } else {
- Out << '\"' << DTI.getType()->getPointeeType().getAsString()
- << "\", \"sub_classable\": "
- << (DTI.canBeASubClass() ? "true" : "false");
- }
- Out << " }";
-
- if (std::next(I) != Map.end())
- Out << ',';
- Out << NL;
+static raw_ostream &printJson(const MemRegion *Region, raw_ostream &Out,
+ const char *NL, unsigned int Space, bool IsDot) {
+ return Out << "\"region\": \"" << Region << "\"";
+}
+
+static raw_ostream &printJson(const SymExpr *Symbol, raw_ostream &Out,
+ const char *NL, unsigned int Space, bool IsDot) {
+ return Out << "\"symbol\": \"" << Symbol << "\"";
+}
+
+static raw_ostream &printJson(const DynamicTypeInfo &DTI, raw_ostream &Out,
+ const char *NL, unsigned int Space, bool IsDot) {
+ Out << "\"dyn_type\": ";
+ if (!DTI.isValid()) {
+ Out << "null";
+ } else {
+ QualType ToPrint = DTI.getType();
+ if (ToPrint->isAnyPointerType())
+ ToPrint = ToPrint->getPointeeType();
+
+ Out << '\"' << ToPrint.getAsString() << "\", \"sub_classable\": "
+ << (DTI.canBeASubClass() ? "true" : "false");
}
+ return Out;
+}
- --Space;
- Indent(Out, Space, IsDot) << "]," << NL;
+static raw_ostream &printJson(const DynamicCastInfo &DCI, raw_ostream &Out,
+ const char *NL, unsigned int Space, bool IsDot) {
+ return Out << "\"from\": \"" << DCI.from().getAsString() << "\", \"to\": \""
+ << DCI.to().getAsString() << "\", \"kind\": \""
+ << (DCI.succeeds() ? "success" : "fail") << "\"";
}
-static void printDynamicCastsJson(raw_ostream &Out, ProgramStateRef State,
- const char *NL, unsigned int Space,
- bool IsDot) {
- Indent(Out, Space, IsDot) << "\"dynamic_casts\": ";
+template <class T, class U>
+static raw_ostream &printJson(const std::pair<T, U> &Pair, raw_ostream &Out,
+ const char *NL, unsigned int Space, bool IsDot) {
+ printJson(Pair.first, Out, NL, Space, IsDot) << ", ";
+ return printJson(Pair.second, Out, NL, Space, IsDot);
+}
- const DynamicCastMapTy &Map = State->get<DynamicCastMap>();
- if (Map.isEmpty()) {
- Out << "null," << NL;
- return;
+template <class ContainerTy>
+static raw_ostream &printJsonContainer(const ContainerTy &Container,
+ raw_ostream &Out, const char *NL,
+ unsigned int Space, bool IsDot) {
+ if (Container.isEmpty()) {
+ return Out << "null";
}
++Space;
Out << '[' << NL;
- for (DynamicCastMapTy::iterator I = Map.begin(); I != Map.end(); ++I) {
- const MemRegion *MR = I->first;
- const CastSet &Set = I->second;
-
- Indent(Out, Space, IsDot) << "{ \"region\": \"" << MR << "\", \"casts\": ";
- if (Set.isEmpty()) {
- Out << "null ";
- } else {
- ++Space;
- Out << '[' << NL;
- for (CastSet::iterator SI = Set.begin(); SI != Set.end(); ++SI) {
- Indent(Out, Space, IsDot)
- << "{ \"from\": \"" << SI->from().getAsString() << "\", \"to\": \""
- << SI->to().getAsString() << "\", \"kind\": \""
- << (SI->succeeds() ? "success" : "fail") << "\" }";
-
- if (std::next(SI) != Set.end())
- Out << ',';
- Out << NL;
- }
- --Space;
- Indent(Out, Space, IsDot) << ']';
- }
- Out << '}';
-
- if (std::next(I) != Map.end())
+ for (auto I = Container.begin(); I != Container.end(); ++I) {
+ const auto &Element = *I;
+
+ Indent(Out, Space, IsDot) << "{ ";
+ printJson(Element, Out, NL, Space, IsDot) << " }";
+
+ if (std::next(I) != Container.end())
Out << ',';
Out << NL;
}
--Space;
- Indent(Out, Space, IsDot) << "]," << NL;
+ return Indent(Out, Space, IsDot) << "]";
+}
+
+static raw_ostream &printJson(const CastSet &Set, raw_ostream &Out,
+ const char *NL, unsigned int Space, bool IsDot) {
+ Out << "\"casts\": ";
+ return printJsonContainer(Set, Out, NL, Space, IsDot);
+}
+
+template <class MapTy>
+static void printJsonImpl(raw_ostream &Out, ProgramStateRef State,
+ const char *Name, const char *NL, unsigned int Space,
+ bool IsDot, bool PrintEvenIfEmpty = true) {
+ const auto &Map = State->get<MapTy>();
+ if (Map.isEmpty() && !PrintEvenIfEmpty)
+ return;
+
+ Indent(Out, Space, IsDot) << "\"" << Name << "\": ";
+ printJsonContainer(Map, Out, NL, Space, IsDot) << "," << NL;
+}
+
+static void printDynamicTypesJson(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, unsigned int Space,
+ bool IsDot) {
+ printJsonImpl<DynamicTypeMap>(Out, State, "dynamic_types", NL, Space, IsDot);
+}
+
+static void printDynamicCastsJson(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, unsigned int Space,
+ bool IsDot) {
+ printJsonImpl<DynamicCastMap>(Out, State, "dynamic_casts", NL, Space, IsDot);
+}
+
+static void printClassObjectDynamicTypesJson(raw_ostream &Out,
+ ProgramStateRef State,
+ const char *NL, unsigned int Space,
+ bool IsDot) {
+ // Let's print Class object type information only if we have something
+ // meaningful to print.
+ printJsonImpl<DynamicClassObjectMap>(Out, State, "class_object_types", NL,
+ Space, IsDot,
+ /*PrintEvenIfEmpty=*/false);
}
void printDynamicTypeInfoJson(raw_ostream &Out, ProgramStateRef State,
const char *NL, unsigned int Space, bool IsDot) {
printDynamicTypesJson(Out, State, NL, Space, IsDot);
printDynamicCastsJson(Out, State, NL, Space, IsDot);
+ printClassObjectDynamicTypesJson(Out, State, NL, Space, IsDot);
}
} // namespace ento
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp
index 1ccf4c6104a6..9e6d79bb7dcc 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp
@@ -183,12 +183,18 @@ EnvironmentManager::removeDeadBindings(Environment Env,
F.getTreeFactory());
// Iterate over the block-expr bindings.
- for (Environment::iterator I = Env.begin(), E = Env.end();
- I != E; ++I) {
+ for (Environment::iterator I = Env.begin(), E = Env.end(); I != E; ++I) {
const EnvironmentEntry &BlkExpr = I.getKey();
const SVal &X = I.getData();
- if (SymReaper.isLive(BlkExpr.getStmt(), BlkExpr.getLocationContext())) {
+ const bool IsBlkExprLive =
+ SymReaper.isLive(BlkExpr.getStmt(), BlkExpr.getLocationContext());
+
+ assert((isa<Expr>(BlkExpr.getStmt()) || !IsBlkExprLive) &&
+ "Only Exprs can be live, LivenessAnalysis argues about the liveness "
+ "of *values*!");
+
+ if (IsBlkExprLive) {
// Copy the binding to the new map.
EBMapRef = EBMapRef.add(BlkExpr, X);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
index c4838492271c..635495e9bf60 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -50,9 +50,8 @@ ExplodedGraph::~ExplodedGraph() = default;
bool ExplodedGraph::isInterestingLValueExpr(const Expr *Ex) {
if (!Ex->isLValue())
return false;
- return isa<DeclRefExpr>(Ex) ||
- isa<MemberExpr>(Ex) ||
- isa<ObjCIvarRefExpr>(Ex);
+ return isa<DeclRefExpr>(Ex) || isa<MemberExpr>(Ex) ||
+ isa<ObjCIvarRefExpr>(Ex) || isa<ArraySubscriptExpr>(Ex);
}
bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index b542cf2c0303..265dcd134213 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -1210,9 +1210,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
switch (S->getStmtClass()) {
// C++, OpenMP and ARC stuff we don't support yet.
- case Expr::ObjCIndirectCopyRestoreExprClass:
case Stmt::CXXDependentScopeMemberExprClass:
- case Stmt::CXXInheritedCtorInitExprClass:
case Stmt::CXXTryStmtClass:
case Stmt::CXXTypeidExprClass:
case Stmt::CXXUuidofExprClass:
@@ -1226,6 +1224,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::UnresolvedLookupExprClass:
case Stmt::UnresolvedMemberExprClass:
case Stmt::TypoExprClass:
+ case Stmt::RecoveryExprClass:
case Stmt::CXXNoexceptExprClass:
case Stmt::PackExpansionExprClass:
case Stmt::SubstNonTypeTemplateParmPackExprClass:
@@ -1258,6 +1257,8 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPTaskwaitDirectiveClass:
case Stmt::OMPTaskgroupDirectiveClass:
case Stmt::OMPFlushDirectiveClass:
+ case Stmt::OMPDepobjDirectiveClass:
+ case Stmt::OMPScanDirectiveClass:
case Stmt::OMPOrderedDirectiveClass:
case Stmt::OMPAtomicDirectiveClass:
case Stmt::OMPTargetDirectiveClass:
@@ -1411,6 +1412,8 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::SubstNonTypeTemplateParmExprClass:
case Stmt::CXXNullPtrLiteralExprClass:
case Stmt::OMPArraySectionExprClass:
+ case Stmt::OMPArrayShapingExprClass:
+ case Stmt::OMPIteratorExprClass:
case Stmt::TypeTraitExprClass: {
Bldr.takeNodes(Pred);
ExplodedNodeSet preVisit;
@@ -1511,6 +1514,10 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.addNodes(Dst);
break;
+ case Stmt::MatrixSubscriptExprClass:
+ llvm_unreachable("Support for MatrixSubscriptExpr is not implemented.");
+ break;
+
case Stmt::GCCAsmStmtClass:
Bldr.takeNodes(Pred);
VisitGCCAsmStmt(cast<GCCAsmStmt>(S), Pred, Dst);
@@ -1618,6 +1625,13 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.addNodes(Dst);
break;
+ case Stmt::CXXInheritedCtorInitExprClass:
+ Bldr.takeNodes(Pred);
+ VisitCXXInheritedCtorInitExpr(cast<CXXInheritedCtorInitExpr>(S), Pred,
+ Dst);
+ Bldr.addNodes(Dst);
+ break;
+
case Stmt::CXXNewExprClass: {
Bldr.takeNodes(Pred);
@@ -1638,8 +1652,10 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
ExplodedNodeSet PreVisit;
const auto *CDE = cast<CXXDeleteExpr>(S);
getCheckerManager().runCheckersForPreStmt(PreVisit, Pred, S, *this);
+ ExplodedNodeSet PostVisit;
+ getCheckerManager().runCheckersForPostStmt(PostVisit, PreVisit, S, *this);
- for (const auto i : PreVisit)
+ for (const auto i : PostVisit)
VisitCXXDeleteExpr(CDE, i, Dst);
Bldr.addNodes(Dst);
@@ -1705,7 +1721,8 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::CXXConstCastExprClass:
case Stmt::CXXFunctionalCastExprClass:
case Stmt::BuiltinBitCastExprClass:
- case Stmt::ObjCBridgedCastExprClass: {
+ case Stmt::ObjCBridgedCastExprClass:
+ case Stmt::CXXAddrspaceCastExprClass: {
Bldr.takeNodes(Pred);
const auto *C = cast<CastExpr>(S);
ExplodedNodeSet dstExpr;
@@ -1852,6 +1869,21 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.addNodes(Dst);
break;
}
+
+ case Expr::ObjCIndirectCopyRestoreExprClass: {
+ // ObjCIndirectCopyRestoreExpr implies passing a temporary for
+ // correctness of lifetime management. Due to limited analysis
+ // of ARC, this is implemented as direct arg passing.
+ Bldr.takeNodes(Pred);
+ ProgramStateRef state = Pred->getState();
+ const auto *OIE = cast<ObjCIndirectCopyRestoreExpr>(S);
+ const Expr *E = OIE->getSubExpr();
+ SVal V = state->getSVal(E, Pred->getLocationContext());
+ Bldr.generateNode(S, Pred,
+ state->BindExpr(S, Pred->getLocationContext(), V));
+ Bldr.addNodes(Dst);
+ break;
+ }
}
}
@@ -3161,11 +3193,13 @@ std::string ExprEngine::DumpGraph(bool trim, StringRef Filename) {
return DumpGraph(Src, Filename);
} else {
return llvm::WriteGraph(&G, "ExprEngine", /*ShortNames=*/false,
- /*Title=*/"Exploded Graph", /*Filename=*/Filename);
+ /*Title=*/"Exploded Graph",
+ /*Filename=*/std::string(Filename));
}
-#endif
+#else
llvm::errs() << "Warning: dumping graph requires assertions" << "\n";
return "";
+#endif
}
std::string ExprEngine::DumpGraph(ArrayRef<const ExplodedNode*> Nodes,
@@ -3179,7 +3213,7 @@ std::string ExprEngine::DumpGraph(ArrayRef<const ExplodedNode*> Nodes,
return llvm::WriteGraph(TrimmedG.get(), "TrimmedExprEngine",
/*ShortNames=*/false,
/*Title=*/"Trimmed Exploded Graph",
- /*Filename=*/Filename);
+ /*Filename=*/std::string(Filename));
}
#endif
llvm::errs() << "Warning: dumping graph requires assertions" << "\n";
@@ -3190,3 +3224,5 @@ void *ProgramStateTrait<ReplayWithoutInlining>::GDMIndex() {
static int index = 0;
return &index;
}
+
+void ExprEngine::anchor() { }
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index b17f26aa9c53..c5e38cc7423d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -218,7 +218,7 @@ void ExprEngine::VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
auto CE = BD->capture_end();
for (; I != E; ++I) {
const VarRegion *capturedR = I.getCapturedRegion();
- const VarRegion *originalR = I.getOriginalRegion();
+ const TypedValueRegion *originalR = I.getOriginalRegion();
// If the capture had a copy expression, use the result of evaluating
// that expression, otherwise use the original value.
@@ -573,6 +573,18 @@ void ExprEngine::VisitCompoundLiteralExpr(const CompoundLiteralExpr *CL,
void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
+ if (isa<TypedefNameDecl>(*DS->decl_begin())) {
+ // C99 6.7.7 "Any array size expressions associated with variable length
+ // array declarators are evaluated each time the declaration of the typedef
+ // name is reached in the order of execution."
+ // The checkers should know about typedef to be able to handle VLA size
+ // expressions.
+ ExplodedNodeSet DstPre;
+ getCheckerManager().runCheckersForPreStmt(DstPre, Pred, DS, *this);
+ getCheckerManager().runCheckersForPostStmt(Dst, DstPre, DS, *this);
+ return;
+ }
+
// Assumption: The CFG has one DeclStmt per Decl.
const VarDecl *VD = dyn_cast_or_null<VarDecl>(*DS->decl_begin());
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index b816aab7c18f..38a680eb04c0 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -109,15 +109,14 @@ SVal ExprEngine::makeZeroElementRegion(ProgramStateRef State, SVal LValue,
return LValue;
}
-std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
+SVal ExprEngine::computeObjectUnderConstruction(
const Expr *E, ProgramStateRef State, const LocationContext *LCtx,
const ConstructionContext *CC, EvalCallOptions &CallOpts) {
SValBuilder &SVB = getSValBuilder();
MemRegionManager &MRMgr = SVB.getRegionManager();
ASTContext &ACtx = SVB.getContext();
- // See if we're constructing an existing region by looking at the
- // current construction context.
+ // Compute the target region by exploring the construction context.
if (CC) {
switch (CC->getKind()) {
case ConstructionContext::CXX17ElidedCopyVariableKind:
@@ -125,13 +124,9 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
const auto *DSCC = cast<VariableConstructionContext>(CC);
const auto *DS = DSCC->getDeclStmt();
const auto *Var = cast<VarDecl>(DS->getSingleDecl());
- SVal LValue = State->getLValue(Var, LCtx);
QualType Ty = Var->getType();
- LValue =
- makeZeroElementRegion(State, LValue, Ty, CallOpts.IsArrayCtorOrDtor);
- State =
- addObjectUnderConstruction(State, DSCC->getDeclStmt(), LCtx, LValue);
- return std::make_pair(State, LValue);
+ return makeZeroElementRegion(State, State->getLValue(Var, LCtx), Ty,
+ CallOpts.IsArrayCtorOrDtor);
}
case ConstructionContext::CXX17ElidedCopyConstructorInitializerKind:
case ConstructionContext::SimpleConstructorInitializerKind: {
@@ -139,8 +134,7 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
const auto *Init = ICC->getCXXCtorInitializer();
assert(Init->isAnyMemberInitializer());
const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
- Loc ThisPtr =
- SVB.getCXXThis(CurCtor, LCtx->getStackFrame());
+ Loc ThisPtr = SVB.getCXXThis(CurCtor, LCtx->getStackFrame());
SVal ThisVal = State->getSVal(ThisPtr);
const ValueDecl *Field;
@@ -154,10 +148,8 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
}
QualType Ty = Field->getType();
- FieldVal = makeZeroElementRegion(State, FieldVal, Ty,
- CallOpts.IsArrayCtorOrDtor);
- State = addObjectUnderConstruction(State, Init, LCtx, FieldVal);
- return std::make_pair(State, FieldVal);
+ return makeZeroElementRegion(State, FieldVal, Ty,
+ CallOpts.IsArrayCtorOrDtor);
}
case ConstructionContext::NewAllocatedObjectKind: {
if (AMgr.getAnalyzerOptions().MayInlineCXXAllocator) {
@@ -170,11 +162,10 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
// TODO: In fact, we need to call the constructor for every
// allocated element, not just the first one!
CallOpts.IsArrayCtorOrDtor = true;
- return std::make_pair(
- State, loc::MemRegionVal(getStoreManager().GetElementZeroRegion(
- MR, NE->getType()->getPointeeType())));
+ return loc::MemRegionVal(getStoreManager().GetElementZeroRegion(
+ MR, NE->getType()->getPointeeType()));
}
- return std::make_pair(State, V);
+ return V;
}
// TODO: Detect when the allocator returns a null pointer.
// Constructor shall not be called in this case.
@@ -202,7 +193,7 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
CallerLCtx = CallerLCtx->getParent();
assert(!isa<BlockInvocationContext>(CallerLCtx));
}
- return prepareForObjectConstruction(
+ return computeObjectUnderConstruction(
cast<Expr>(SFC->getCallSite()), State, CallerLCtx,
RTC->getConstructionContext(), CallOpts);
} else {
@@ -223,64 +214,46 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
assert(RetE && "Void returns should not have a construction context");
QualType ReturnTy = RetE->getType();
QualType RegionTy = ACtx.getPointerType(ReturnTy);
- SVal V = SVB.conjureSymbolVal(&TopLevelSymRegionTag, RetE, SFC,
- RegionTy, currBldrCtx->blockCount());
- return std::make_pair(State, V);
+ return SVB.conjureSymbolVal(&TopLevelSymRegionTag, RetE, SFC, RegionTy,
+ currBldrCtx->blockCount());
}
llvm_unreachable("Unhandled return value construction context!");
}
case ConstructionContext::ElidedTemporaryObjectKind: {
assert(AMgr.getAnalyzerOptions().ShouldElideConstructors);
const auto *TCC = cast<ElidedTemporaryObjectConstructionContext>(CC);
- const CXXBindTemporaryExpr *BTE = TCC->getCXXBindTemporaryExpr();
- const MaterializeTemporaryExpr *MTE = TCC->getMaterializedTemporaryExpr();
- const CXXConstructExpr *CE = TCC->getConstructorAfterElision();
// Support pre-C++17 copy elision. We'll have the elidable copy
// constructor in the AST and in the CFG, but we'll skip it
// and construct directly into the final object. This call
// also sets the CallOpts flags for us.
- SVal V;
// If the elided copy/move constructor is not supported, there's still
// benefit in trying to model the non-elided constructor.
// Stash our state before trying to elide, as it'll get overwritten.
ProgramStateRef PreElideState = State;
EvalCallOptions PreElideCallOpts = CallOpts;
- std::tie(State, V) = prepareForObjectConstruction(
- CE, State, LCtx, TCC->getConstructionContextAfterElision(), CallOpts);
+ SVal V = computeObjectUnderConstruction(
+ TCC->getConstructorAfterElision(), State, LCtx,
+ TCC->getConstructionContextAfterElision(), CallOpts);
// FIXME: This definition of "copy elision has not failed" is unreliable.
// It doesn't indicate that the constructor will actually be inlined
- // later; it is still up to evalCall() to decide.
- if (!CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion) {
- // Remember that we've elided the constructor.
- State = addObjectUnderConstruction(State, CE, LCtx, V);
-
- // Remember that we've elided the destructor.
- if (BTE)
- State = elideDestructor(State, BTE, LCtx);
-
- // Instead of materialization, shamelessly return
- // the final object destination.
- if (MTE)
- State = addObjectUnderConstruction(State, MTE, LCtx, V);
-
- return std::make_pair(State, V);
- } else {
- // Copy elision failed. Revert the changes and proceed as if we have
- // a simple temporary.
- State = PreElideState;
- CallOpts = PreElideCallOpts;
- }
+ // later; this is still up to evalCall() to decide.
+ if (!CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
+ return V;
+
+ // Copy elision failed. Revert the changes and proceed as if we have
+ // a simple temporary.
+ CallOpts = PreElideCallOpts;
+ CallOpts.IsElidableCtorThatHasNotBeenElided = true;
LLVM_FALLTHROUGH;
}
case ConstructionContext::SimpleTemporaryObjectKind: {
const auto *TCC = cast<TemporaryObjectConstructionContext>(CC);
- const CXXBindTemporaryExpr *BTE = TCC->getCXXBindTemporaryExpr();
const MaterializeTemporaryExpr *MTE = TCC->getMaterializedTemporaryExpr();
- SVal V = UnknownVal();
+ CallOpts.IsTemporaryCtorOrDtor = true;
if (MTE) {
if (const ValueDecl *VD = MTE->getExtendingDecl()) {
assert(MTE->getStorageDuration() != SD_FullExpression);
@@ -296,20 +269,10 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
if (MTE->getStorageDuration() == SD_Static ||
MTE->getStorageDuration() == SD_Thread)
- V = loc::MemRegionVal(MRMgr.getCXXStaticTempObjectRegion(E));
+ return loc::MemRegionVal(MRMgr.getCXXStaticTempObjectRegion(E));
}
- if (V.isUnknown())
- V = loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx));
-
- if (BTE)
- State = addObjectUnderConstruction(State, BTE, LCtx, V);
-
- if (MTE)
- State = addObjectUnderConstruction(State, MTE, LCtx, V);
-
- CallOpts.IsTemporaryCtorOrDtor = true;
- return std::make_pair(State, V);
+ return loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx));
}
case ConstructionContext::ArgumentKind: {
// Arguments are technically temporaries.
@@ -318,10 +281,8 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
const auto *ACC = cast<ArgumentConstructionContext>(CC);
const Expr *E = ACC->getCallLikeExpr();
unsigned Idx = ACC->getIndex();
- const CXXBindTemporaryExpr *BTE = ACC->getCXXBindTemporaryExpr();
CallEventManager &CEMgr = getStateManager().getCallEventManager();
- SVal V = UnknownVal();
auto getArgLoc = [&](CallEventRef<> Caller) -> Optional<SVal> {
const LocationContext *FutureSFC =
Caller->getCalleeStackFrame(currBldrCtx->blockCount());
@@ -342,76 +303,171 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
// Operator arguments do not correspond to operator parameters
// because this-argument is implemented as a normal argument in
// operator call expressions but not in operator declarations.
- const VarRegion *VR = Caller->getParameterLocation(
+ const TypedValueRegion *TVR = Caller->getParameterLocation(
*Caller->getAdjustedParameterIndex(Idx), currBldrCtx->blockCount());
- if (!VR)
+ if (!TVR)
return None;
- return loc::MemRegionVal(VR);
+ return loc::MemRegionVal(TVR);
};
if (const auto *CE = dyn_cast<CallExpr>(E)) {
CallEventRef<> Caller = CEMgr.getSimpleCall(CE, State, LCtx);
- if (auto OptV = getArgLoc(Caller))
- V = *OptV;
+ if (Optional<SVal> V = getArgLoc(Caller))
+ return *V;
else
break;
- State = addObjectUnderConstruction(State, {CE, Idx}, LCtx, V);
} else if (const auto *CCE = dyn_cast<CXXConstructExpr>(E)) {
// Don't bother figuring out the target region for the future
// constructor because we won't need it.
CallEventRef<> Caller =
CEMgr.getCXXConstructorCall(CCE, /*Target=*/nullptr, State, LCtx);
- if (auto OptV = getArgLoc(Caller))
- V = *OptV;
+ if (Optional<SVal> V = getArgLoc(Caller))
+ return *V;
else
break;
- State = addObjectUnderConstruction(State, {CCE, Idx}, LCtx, V);
} else if (const auto *ME = dyn_cast<ObjCMessageExpr>(E)) {
CallEventRef<> Caller = CEMgr.getObjCMethodCall(ME, State, LCtx);
- if (auto OptV = getArgLoc(Caller))
- V = *OptV;
+ if (Optional<SVal> V = getArgLoc(Caller))
+ return *V;
else
break;
- State = addObjectUnderConstruction(State, {ME, Idx}, LCtx, V);
+ }
+ }
+ } // switch (CC->getKind())
+ }
+
+ // If we couldn't find an existing region to construct into, assume we're
+ // constructing a temporary. Notify the caller of our failure.
+ CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion = true;
+ return loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx));
+}
+
+ProgramStateRef ExprEngine::updateObjectsUnderConstruction(
+ SVal V, const Expr *E, ProgramStateRef State, const LocationContext *LCtx,
+ const ConstructionContext *CC, const EvalCallOptions &CallOpts) {
+ if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion) {
+ // Sounds like we failed to find the target region and therefore
+ // copy elision failed. There's nothing we can do about it here.
+ return State;
+ }
+
+ // See if we're constructing an existing region by looking at the
+ // current construction context.
+ assert(CC && "Computed target region without construction context?");
+ switch (CC->getKind()) {
+ case ConstructionContext::CXX17ElidedCopyVariableKind:
+ case ConstructionContext::SimpleVariableKind: {
+ const auto *DSCC = cast<VariableConstructionContext>(CC);
+ return addObjectUnderConstruction(State, DSCC->getDeclStmt(), LCtx, V);
+ }
+ case ConstructionContext::CXX17ElidedCopyConstructorInitializerKind:
+ case ConstructionContext::SimpleConstructorInitializerKind: {
+ const auto *ICC = cast<ConstructorInitializerConstructionContext>(CC);
+ return addObjectUnderConstruction(State, ICC->getCXXCtorInitializer(),
+ LCtx, V);
+ }
+ case ConstructionContext::NewAllocatedObjectKind: {
+ return State;
+ }
+ case ConstructionContext::SimpleReturnedValueKind:
+ case ConstructionContext::CXX17ElidedCopyReturnedValueKind: {
+ const StackFrameContext *SFC = LCtx->getStackFrame();
+ const LocationContext *CallerLCtx = SFC->getParent();
+ if (!CallerLCtx) {
+ // No extra work is necessary in top frame.
+ return State;
}
- assert(!V.isUnknown());
+ auto RTC = (*SFC->getCallSiteBlock())[SFC->getIndex()]
+ .getAs<CFGCXXRecordTypedCall>();
+ assert(RTC && "Could not have had a target region without it");
+ if (isa<BlockInvocationContext>(CallerLCtx)) {
+ // Unwrap block invocation contexts. They're mostly part of
+ // the current stack frame.
+ CallerLCtx = CallerLCtx->getParent();
+ assert(!isa<BlockInvocationContext>(CallerLCtx));
+ }
- if (BTE)
+ return updateObjectsUnderConstruction(V,
+ cast<Expr>(SFC->getCallSite()), State, CallerLCtx,
+ RTC->getConstructionContext(), CallOpts);
+ }
+ case ConstructionContext::ElidedTemporaryObjectKind: {
+ assert(AMgr.getAnalyzerOptions().ShouldElideConstructors);
+ if (!CallOpts.IsElidableCtorThatHasNotBeenElided) {
+ const auto *TCC = cast<ElidedTemporaryObjectConstructionContext>(CC);
+ State = updateObjectsUnderConstruction(
+ V, TCC->getConstructorAfterElision(), State, LCtx,
+ TCC->getConstructionContextAfterElision(), CallOpts);
+
+ // Remember that we've elided the constructor.
+ State = addObjectUnderConstruction(
+ State, TCC->getConstructorAfterElision(), LCtx, V);
+
+ // Remember that we've elided the destructor.
+ if (const auto *BTE = TCC->getCXXBindTemporaryExpr())
+ State = elideDestructor(State, BTE, LCtx);
+
+ // Instead of materialization, shamelessly return
+ // the final object destination.
+ if (const auto *MTE = TCC->getMaterializedTemporaryExpr())
+ State = addObjectUnderConstruction(State, MTE, LCtx, V);
+
+ return State;
+ }
+ // If we decided not to elide the constructor, proceed as if
+ // it's a simple temporary.
+ LLVM_FALLTHROUGH;
+ }
+ case ConstructionContext::SimpleTemporaryObjectKind: {
+ const auto *TCC = cast<TemporaryObjectConstructionContext>(CC);
+ if (const auto *BTE = TCC->getCXXBindTemporaryExpr())
State = addObjectUnderConstruction(State, BTE, LCtx, V);
- return std::make_pair(State, V);
+ if (const auto *MTE = TCC->getMaterializedTemporaryExpr())
+ State = addObjectUnderConstruction(State, MTE, LCtx, V);
+
+ return State;
}
+ case ConstructionContext::ArgumentKind: {
+ const auto *ACC = cast<ArgumentConstructionContext>(CC);
+ if (const auto *BTE = ACC->getCXXBindTemporaryExpr())
+ State = addObjectUnderConstruction(State, BTE, LCtx, V);
+
+ return addObjectUnderConstruction(
+ State, {ACC->getCallLikeExpr(), ACC->getIndex()}, LCtx, V);
}
}
- // If we couldn't find an existing region to construct into, assume we're
- // constructing a temporary. Notify the caller of our failure.
- CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion = true;
- return std::make_pair(
- State, loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx)));
+ llvm_unreachable("Unhandled construction context!");
}
-void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
- ExplodedNode *Pred,
- ExplodedNodeSet &destNodes) {
+void ExprEngine::handleConstructor(const Expr *E,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &destNodes) {
+ const auto *CE = dyn_cast<CXXConstructExpr>(E);
+ const auto *CIE = dyn_cast<CXXInheritedCtorInitExpr>(E);
+ assert(CE || CIE);
+
const LocationContext *LCtx = Pred->getLocationContext();
ProgramStateRef State = Pred->getState();
SVal Target = UnknownVal();
- if (Optional<SVal> ElidedTarget =
- getObjectUnderConstruction(State, CE, LCtx)) {
- // We've previously modeled an elidable constructor by pretending that it in
- // fact constructs into the correct target. This constructor can therefore
- // be skipped.
- Target = *ElidedTarget;
- StmtNodeBuilder Bldr(Pred, destNodes, *currBldrCtx);
- State = finishObjectConstruction(State, CE, LCtx);
- if (auto L = Target.getAs<Loc>())
- State = State->BindExpr(CE, LCtx, State->getSVal(*L, CE->getType()));
- Bldr.generateNode(CE, Pred, State);
- return;
+ if (CE) {
+ if (Optional<SVal> ElidedTarget =
+ getObjectUnderConstruction(State, CE, LCtx)) {
+ // We've previously modeled an elidable constructor by pretending that it
+ // in fact constructs into the correct target. This constructor can
+ // therefore be skipped.
+ Target = *ElidedTarget;
+ StmtNodeBuilder Bldr(Pred, destNodes, *currBldrCtx);
+ State = finishObjectConstruction(State, CE, LCtx);
+ if (auto L = Target.getAs<Loc>())
+ State = State->BindExpr(CE, LCtx, State->getSVal(*L, CE->getType()));
+ Bldr.generateNode(CE, Pred, State);
+ return;
+ }
}
// FIXME: Handle arrays, which run the same constructor for every element.
@@ -423,10 +479,16 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
assert(C || getCurrentCFGElement().getAs<CFGStmt>());
const ConstructionContext *CC = C ? C->getConstructionContext() : nullptr;
- switch (CE->getConstructionKind()) {
+ const CXXConstructExpr::ConstructionKind CK =
+ CE ? CE->getConstructionKind() : CIE->getConstructionKind();
+ switch (CK) {
case CXXConstructExpr::CK_Complete: {
+ // Inherited constructors are always base class constructors.
+ assert(CE && !CIE && "A complete constructor is inherited?!");
+
+ // The target region is found from construction context.
std::tie(State, Target) =
- prepareForObjectConstruction(CE, State, LCtx, CC, CallOpts);
+ handleConstructionContext(CE, State, LCtx, CC, CallOpts);
break;
}
case CXXConstructExpr::CK_VirtualBase: {
@@ -455,9 +517,9 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
// FIXME: Instead of relying on the ParentMap, we should have the
// trigger-statement (InitListExpr in this case) passed down from CFG or
// otherwise always available during construction.
- if (dyn_cast_or_null<InitListExpr>(LCtx->getParentMap().getParent(CE))) {
+ if (dyn_cast_or_null<InitListExpr>(LCtx->getParentMap().getParent(E))) {
MemRegionManager &MRMgr = getSValBuilder().getRegionManager();
- Target = loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(CE, LCtx));
+ Target = loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx));
CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion = true;
break;
}
@@ -468,14 +530,13 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
LCtx->getStackFrame());
SVal ThisVal = State->getSVal(ThisPtr);
- if (CE->getConstructionKind() == CXXConstructExpr::CK_Delegating) {
+ if (CK == CXXConstructExpr::CK_Delegating) {
Target = ThisVal;
} else {
// Cast to the base type.
- bool IsVirtual =
- (CE->getConstructionKind() == CXXConstructExpr::CK_VirtualBase);
- SVal BaseVal = getStoreManager().evalDerivedToBase(ThisVal, CE->getType(),
- IsVirtual);
+ bool IsVirtual = (CK == CXXConstructExpr::CK_VirtualBase);
+ SVal BaseVal =
+ getStoreManager().evalDerivedToBase(ThisVal, E->getType(), IsVirtual);
Target = BaseVal;
}
break;
@@ -487,23 +548,27 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
"Prepare for object construction");
ExplodedNodeSet DstPrepare;
StmtNodeBuilder BldrPrepare(Pred, DstPrepare, *currBldrCtx);
- BldrPrepare.generateNode(CE, Pred, State, &T, ProgramPoint::PreStmtKind);
+ BldrPrepare.generateNode(E, Pred, State, &T, ProgramPoint::PreStmtKind);
assert(DstPrepare.size() <= 1);
if (DstPrepare.size() == 0)
return;
Pred = *BldrPrepare.begin();
}
+ const MemRegion *TargetRegion = Target.getAsRegion();
CallEventManager &CEMgr = getStateManager().getCallEventManager();
- CallEventRef<CXXConstructorCall> Call =
- CEMgr.getCXXConstructorCall(CE, Target.getAsRegion(), State, LCtx);
+ CallEventRef<> Call =
+ CIE ? (CallEventRef<>)CEMgr.getCXXInheritedConstructorCall(
+ CIE, TargetRegion, State, LCtx)
+ : (CallEventRef<>)CEMgr.getCXXConstructorCall(
+ CE, TargetRegion, State, LCtx);
ExplodedNodeSet DstPreVisit;
- getCheckerManager().runCheckersForPreStmt(DstPreVisit, Pred, CE, *this);
+ getCheckerManager().runCheckersForPreStmt(DstPreVisit, Pred, E, *this);
- // FIXME: Is it possible and/or useful to do this before PreStmt?
ExplodedNodeSet PreInitialized;
- {
+ if (CE) {
+ // FIXME: Is it possible and/or useful to do this before PreStmt?
StmtNodeBuilder Bldr(DstPreVisit, PreInitialized, *currBldrCtx);
for (ExplodedNodeSet::iterator I = DstPreVisit.begin(),
E = DstPreVisit.end();
@@ -528,6 +593,8 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
Bldr.generateNode(CE, *I, State, /*tag=*/nullptr,
ProgramPoint::PreStmtKind);
}
+ } else {
+ PreInitialized = DstPreVisit;
}
ExplodedNodeSet DstPreCall;
@@ -537,7 +604,7 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
ExplodedNodeSet DstEvaluated;
StmtNodeBuilder Bldr(DstPreCall, DstEvaluated, *currBldrCtx);
- if (CE->getConstructor()->isTrivial() &&
+ if (CE && CE->getConstructor()->isTrivial() &&
CE->getConstructor()->isCopyOrMoveConstructor() &&
!CallOpts.IsArrayCtorOrDtor) {
// FIXME: Handle other kinds of trivial constructors as well.
@@ -548,7 +615,8 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
} else {
for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
I != E; ++I)
- defaultEvalCall(Bldr, *I, *Call, CallOpts);
+ getCheckerManager().runCheckersForEvalCall(DstEvaluated, *I, *Call, *this,
+ CallOpts);
}
// If the CFG was constructed without elements for temporary destructors
@@ -560,9 +628,10 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
// paths when no-return temporary destructors are used for assertions.
const AnalysisDeclContext *ADC = LCtx->getAnalysisDeclContext();
if (!ADC->getCFGBuildOptions().AddTemporaryDtors) {
- const MemRegion *Target = Call->getCXXThisVal().getAsRegion();
- if (Target && isa<CXXTempObjectRegion>(Target) &&
- Call->getDecl()->getParent()->isAnyDestructorNoReturn()) {
+ if (llvm::isa_and_nonnull<CXXTempObjectRegion>(TargetRegion) &&
+ cast<CXXConstructorDecl>(Call->getDecl())
+ ->getParent()
+ ->isAnyDestructorNoReturn()) {
// If we've inlined the constructor, then DstEvaluated would be empty.
// In this case we still want a sink, which could be implemented
@@ -575,7 +644,7 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
"We should not have inlined this constructor!");
for (ExplodedNode *N : DstEvaluated) {
- Bldr.generateSink(CE, N, N->getState());
+ Bldr.generateSink(E, N, N->getState());
}
// There is no need to run the PostCall and PostStmt checker
@@ -586,7 +655,7 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
}
ExplodedNodeSet DstPostArgumentCleanup;
- for (auto I : DstEvaluated)
+ for (ExplodedNode *I : DstEvaluated)
finishArgumentConstruction(DstPostArgumentCleanup, I, *Call);
// If there were other constructors called for object-type arguments
@@ -595,7 +664,19 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
getCheckerManager().runCheckersForPostCall(DstPostCall,
DstPostArgumentCleanup,
*Call, *this);
- getCheckerManager().runCheckersForPostStmt(destNodes, DstPostCall, CE, *this);
+ getCheckerManager().runCheckersForPostStmt(destNodes, DstPostCall, E, *this);
+}
+
+void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ handleConstructor(CE, Pred, Dst);
+}
+
+void ExprEngine::VisitCXXInheritedCtorInitExpr(
+ const CXXInheritedCtorInitExpr *CE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ handleConstructor(CE, Pred, Dst);
}
void ExprEngine::VisitCXXDestructor(QualType ObjectType,
@@ -683,7 +764,7 @@ void ExprEngine::VisitCXXNewAllocatorCall(const CXXNewExpr *CNE,
ExplodedNodeSet DstPostCall;
StmtNodeBuilder CallBldr(DstPreCall, DstPostCall, *currBldrCtx);
- for (auto I : DstPreCall) {
+ for (ExplodedNode *I : DstPreCall) {
// FIXME: Provide evalCall for checkers?
defaultEvalCall(CallBldr, I, *Call);
}
@@ -693,7 +774,7 @@ void ExprEngine::VisitCXXNewAllocatorCall(const CXXNewExpr *CNE,
// CXXNewExpr gets processed.
ExplodedNodeSet DstPostValue;
StmtNodeBuilder ValueBldr(DstPostCall, DstPostValue, *currBldrCtx);
- for (auto I : DstPostCall) {
+ for (ExplodedNode *I : DstPostCall) {
// FIXME: Because CNE serves as the "call site" for the allocator (due to
// lack of a better expression in the AST), the conjured return value symbol
// is going to be of the same type (C++ object pointer type). Technically
@@ -727,10 +808,8 @@ void ExprEngine::VisitCXXNewAllocatorCall(const CXXNewExpr *CNE,
ExplodedNodeSet DstPostPostCallCallback;
getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback,
DstPostValue, *Call, *this);
- for (auto I : DstPostPostCallCallback) {
- getCheckerManager().runCheckersForNewAllocator(
- CNE, *getObjectUnderConstruction(I->getState(), CNE, LCtx), Dst, I,
- *this);
+ for (ExplodedNode *I : DstPostPostCallCallback) {
+ getCheckerManager().runCheckersForNewAllocator(*Call, Dst, I, *this);
}
}
@@ -846,13 +925,18 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE,
ExplodedNode *Pred, ExplodedNodeSet &Dst) {
- StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
- ProgramStateRef state = Pred->getState();
- Bldr.generateNode(CDE, Pred, state);
+
+ CallEventManager &CEMgr = getStateManager().getCallEventManager();
+ CallEventRef<CXXDeallocatorCall> Call = CEMgr.getCXXDeallocatorCall(
+ CDE, Pred->getState(), Pred->getLocationContext());
+
+ ExplodedNodeSet DstPreCall;
+ getCheckerManager().runCheckersForPreCall(DstPreCall, Pred, *Call, *this);
+
+ getCheckerManager().runCheckersForPostCall(Dst, DstPreCall, *Call, *this);
}
-void ExprEngine::VisitCXXCatchStmt(const CXXCatchStmt *CS,
- ExplodedNode *Pred,
+void ExprEngine::VisitCXXCatchStmt(const CXXCatchStmt *CS, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
const VarDecl *VD = CS->getExceptionDecl();
if (!VD) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 01a371e664b2..52ba17d59ae0 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -10,17 +10,19 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/Decl.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "PrettyStackTraceLocationContext.h"
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/ConstructionContext.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
@@ -324,17 +326,14 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
ExplodedNodeSet DstPostCall;
- if (const CXXNewExpr *CNE = dyn_cast_or_null<CXXNewExpr>(CE)) {
+ if (llvm::isa_and_nonnull<CXXNewExpr>(CE)) {
ExplodedNodeSet DstPostPostCallCallback;
getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback,
CEENode, *UpdatedCall, *this,
/*wasInlined=*/true);
- for (auto I : DstPostPostCallCallback) {
+ for (ExplodedNode *I : DstPostPostCallCallback) {
getCheckerManager().runCheckersForNewAllocator(
- CNE,
- *getObjectUnderConstruction(I->getState(), CNE,
- calleeCtx->getParent()),
- DstPostCall, I, *this,
+ cast<CXXAllocatorCall>(*UpdatedCall), DstPostCall, I, *this,
/*wasInlined=*/true);
}
} else {
@@ -585,12 +584,12 @@ void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
// defaultEvalCall if all of them fail.
ExplodedNodeSet dstCallEvaluated;
getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
- Call, *this);
+ Call, *this, EvalCallOptions());
// If there were other constructors called for object-type arguments
// of this call, clean them up.
ExplodedNodeSet dstArgumentCleanup;
- for (auto I : dstCallEvaluated)
+ for (ExplodedNode *I : dstCallEvaluated)
finishArgumentConstruction(dstArgumentCleanup, I, Call);
ExplodedNodeSet dstPostCall;
@@ -604,7 +603,7 @@ void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
// Run pointerEscape callback with the newly conjured symbols.
SmallVector<std::pair<SVal, SVal>, 8> Escaped;
- for (auto I : dstPostCall) {
+ for (ExplodedNode *I : dstPostCall) {
NodeBuilder B(I, Dst, *currBldrCtx);
ProgramStateRef State = I->getState();
Escaped.clear();
@@ -668,8 +667,8 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
assert(RTC->getStmt() == Call.getOriginExpr());
EvalCallOptions CallOpts; // FIXME: We won't really need those.
std::tie(State, Target) =
- prepareForObjectConstruction(Call.getOriginExpr(), State, LCtx,
- RTC->getConstructionContext(), CallOpts);
+ handleConstructionContext(Call.getOriginExpr(), State, LCtx,
+ RTC->getConstructionContext(), CallOpts);
const MemRegion *TargetR = Target.getAsRegion();
assert(TargetR);
// Invalidate the region so that it didn't look uninitialized. If this is
@@ -718,7 +717,7 @@ void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
ExprEngine::CallInlinePolicy
ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
AnalyzerOptions &Opts,
- const ExprEngine::EvalCallOptions &CallOpts) {
+ const EvalCallOptions &CallOpts) {
const LocationContext *CurLC = Pred->getLocationContext();
const StackFrameContext *CallerSFC = CurLC->getStackFrame();
switch (Call.getKind()) {
@@ -742,7 +741,7 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
const ConstructionContext *CC = CCE ? CCE->getConstructionContext()
: nullptr;
- if (CC && isa<NewAllocatedObjectConstructionContext>(CC) &&
+ if (llvm::isa_and_nonnull<NewAllocatedObjectConstructionContext>(CC) &&
!Opts.MayInlineCXXAllocator)
return CIP_DisallowedOnce;
@@ -789,6 +788,11 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
break;
}
+ case CE_CXXInheritedConstructor: {
+ // This doesn't really increase the cost of inlining ever, because
+ // the stack frame of the inherited constructor is trivial.
+ return CIP_Allowed;
+ }
case CE_CXXDestructor: {
if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
return CIP_DisallowedAlways;
@@ -814,6 +818,8 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
return CIP_DisallowedOnce;
break;
}
+ case CE_CXXDeallocator:
+ LLVM_FALLTHROUGH;
case CE_CXXAllocator:
if (Opts.MayInlineCXXAllocator)
break;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index 002b6070ddcd..bc7c41d039c4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -66,11 +66,9 @@ class HTMLDiagnostics : public PathDiagnosticConsumer {
const bool SupportsCrossFileDiagnostics;
public:
- HTMLDiagnostics(AnalyzerOptions &AnalyzerOpts,
- const std::string& prefix,
- const Preprocessor &pp,
- bool supportsMultipleFiles)
- : Directory(prefix), PP(pp), AnalyzerOpts(AnalyzerOpts),
+ HTMLDiagnostics(AnalyzerOptions &AnalyzerOpts, const std::string &OutputDir,
+ const Preprocessor &pp, bool supportsMultipleFiles)
+ : Directory(OutputDir), PP(pp), AnalyzerOpts(AnalyzerOpts),
SupportsCrossFileDiagnostics(supportsMultipleFiles) {}
~HTMLDiagnostics() override { FlushDiagnostics(nullptr); }
@@ -136,16 +134,45 @@ private:
void ento::createHTMLDiagnosticConsumer(
AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
- const std::string &prefix, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &) {
- C.push_back(new HTMLDiagnostics(AnalyzerOpts, prefix, PP, true));
+ const std::string &OutputDir, const Preprocessor &PP,
+ const cross_tu::CrossTranslationUnitContext &CTU) {
+
+ // FIXME: HTML is currently our default output type, but if the output
+ // directory isn't specified, it acts like if it was in the minimal text
+ // output mode. This doesn't make much sense, we should have the minimal text
+ // as our default. In the case of backward compatibility concerns, this could
+ // be preserved with -analyzer-config-compatibility-mode=true.
+ createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, OutputDir, PP, CTU);
+
+ // TODO: Emit an error here.
+ if (OutputDir.empty())
+ return;
+
+ C.push_back(new HTMLDiagnostics(AnalyzerOpts, OutputDir, PP, true));
}
void ento::createHTMLSingleFileDiagnosticConsumer(
AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
+ const std::string &OutputDir, const Preprocessor &PP,
+ const cross_tu::CrossTranslationUnitContext &CTU) {
+
+ // TODO: Emit an error here.
+ if (OutputDir.empty())
+ return;
+
+ C.push_back(new HTMLDiagnostics(AnalyzerOpts, OutputDir, PP, false));
+ createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, OutputDir, PP, CTU);
+}
+
+void ento::createPlistHTMLDiagnosticConsumer(
+ AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
const std::string &prefix, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &) {
- C.push_back(new HTMLDiagnostics(AnalyzerOpts, prefix, PP, false));
+ const cross_tu::CrossTranslationUnitContext &CTU) {
+ createHTMLDiagnosticConsumer(
+ AnalyzerOpts, C, std::string(llvm::sys::path::parent_path(prefix)), PP,
+ CTU);
+ createPlistMultiFileDiagnosticConsumer(AnalyzerOpts, C, prefix, PP, CTU);
+ createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, prefix, PP, CTU);
}
//===----------------------------------------------------------------------===//
@@ -1043,8 +1070,13 @@ StringRef HTMLDiagnostics::generateKeyboardNavigationJavascript() {
<script type='text/javascript'>
var digitMatcher = new RegExp("[0-9]+");
+var querySelectorAllArray = function(selector) {
+ return Array.prototype.slice.call(
+ document.querySelectorAll(selector));
+}
+
document.addEventListener("DOMContentLoaded", function() {
- document.querySelectorAll(".PathNav > a").forEach(
+ querySelectorAllArray(".PathNav > a").forEach(
function(currentValue, currentIndex) {
var hrefValue = currentValue.getAttribute("href");
currentValue.onclick = function() {
@@ -1064,7 +1096,7 @@ var findNum = function() {
};
var scrollTo = function(el) {
- document.querySelectorAll(".selected").forEach(function(s) {
+ querySelectorAllArray(".selected").forEach(function(s) {
s.classList.remove("selected");
});
el.classList.add("selected");
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
index 1a09a521f116..dc268e562237 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
@@ -130,10 +130,10 @@ static internal::Matcher<Stmt> hasSuspiciousStmt(StringRef NodeName) {
// Escaping and not known mutation of the loop counter is handled
// by exclusion of assigning and address-of operators and
// pass-by-ref function calls on the loop counter from the body.
- changeIntBoundNode(equalsBoundNode(NodeName)),
- callByRef(equalsBoundNode(NodeName)),
- getAddrTo(equalsBoundNode(NodeName)),
- assignedToRef(equalsBoundNode(NodeName)))));
+ changeIntBoundNode(equalsBoundNode(std::string(NodeName))),
+ callByRef(equalsBoundNode(std::string(NodeName))),
+ getAddrTo(equalsBoundNode(std::string(NodeName))),
+ assignedToRef(equalsBoundNode(std::string(NodeName))))));
}
static internal::Matcher<Stmt> forLoopMatcher() {
@@ -164,6 +164,11 @@ static bool isPossiblyEscaped(const VarDecl *VD, ExplodedNode *N) {
if (VD->hasGlobalStorage())
return true;
+ const bool isParm = isa<ParmVarDecl>(VD);
+ // Reference parameters are assumed as escaped variables.
+ if (isParm && VD->getType()->isReferenceType())
+ return true;
+
while (!N->pred_empty()) {
// FIXME: getStmtForDiagnostics() does nasty things in order to provide
// a valid statement for body farms, do we need this behavior here?
@@ -193,6 +198,11 @@ static bool isPossiblyEscaped(const VarDecl *VD, ExplodedNode *N) {
N = N->getFirstPred();
}
+
+ // Parameter declaration will not be found.
+ if (isParm)
+ return false;
+
llvm_unreachable("Reached root without finding the declaration of VD");
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp
index 9a7b1a24b819..47e34dd84b9a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp
@@ -67,8 +67,10 @@ ProgramStateRef getWidenedLoopState(ProgramStateRef PrevState,
}
// References should not be invalidated.
- auto Matches = match(findAll(stmt(hasDescendant(varDecl(hasType(referenceType())).bind(MatchRef)))),
- *LCtx->getDecl()->getBody(), ASTCtx);
+ auto Matches = match(
+ findAll(stmt(hasDescendant(
+ varDecl(hasType(hasCanonicalType(referenceType()))).bind(MatchRef)))),
+ *LCtx->getDecl()->getBody(), ASTCtx);
for (BoundNodes Match : Matches) {
const VarDecl *VD = Match.getNodeAs<VarDecl>(MatchRef);
assert(VD);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
index a10d7e69ad7e..455adf53ac99 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -142,7 +142,7 @@ bool SubRegion::isSubRegionOf(const MemRegion* R) const {
return false;
}
-MemRegionManager* SubRegion::getMemRegionManager() const {
+MemRegionManager &SubRegion::getMemRegionManager() const {
const SubRegion* r = this;
do {
const MemRegion *superRegion = r->getSuperRegion();
@@ -159,62 +159,10 @@ const StackFrameContext *VarRegion::getStackFrame() const {
return SSR ? SSR->getStackFrame() : nullptr;
}
-//===----------------------------------------------------------------------===//
-// Region extents.
-//===----------------------------------------------------------------------===//
-
-DefinedOrUnknownSVal TypedValueRegion::getExtent(SValBuilder &svalBuilder) const {
- ASTContext &Ctx = svalBuilder.getContext();
- QualType T = getDesugaredValueType(Ctx);
-
- if (isa<VariableArrayType>(T))
- return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
- if (T->isIncompleteType())
- return UnknownVal();
-
- CharUnits size = Ctx.getTypeSizeInChars(T);
- QualType sizeTy = svalBuilder.getArrayIndexType();
- return svalBuilder.makeIntVal(size.getQuantity(), sizeTy);
-}
-
-DefinedOrUnknownSVal FieldRegion::getExtent(SValBuilder &svalBuilder) const {
- // Force callers to deal with bitfields explicitly.
- if (getDecl()->isBitField())
- return UnknownVal();
-
- DefinedOrUnknownSVal Extent = DeclRegion::getExtent(svalBuilder);
-
- // A zero-length array at the end of a struct often stands for dynamically-
- // allocated extra memory.
- if (Extent.isZeroConstant()) {
- QualType T = getDesugaredValueType(svalBuilder.getContext());
-
- if (isa<ConstantArrayType>(T))
- return UnknownVal();
- }
-
- return Extent;
-}
-
-DefinedOrUnknownSVal AllocaRegion::getExtent(SValBuilder &svalBuilder) const {
- return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
-}
-
-DefinedOrUnknownSVal SymbolicRegion::getExtent(SValBuilder &svalBuilder) const {
- return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
-}
-
-DefinedOrUnknownSVal StringRegion::getExtent(SValBuilder &svalBuilder) const {
- return svalBuilder.makeIntVal(getStringLiteral()->getByteLength()+1,
- svalBuilder.getArrayIndexType());
-}
-
ObjCIvarRegion::ObjCIvarRegion(const ObjCIvarDecl *ivd, const SubRegion *sReg)
- : DeclRegion(ivd, sReg, ObjCIvarRegionKind) {}
+ : DeclRegion(sReg, ObjCIvarRegionKind), IVD(ivd) {}
-const ObjCIvarDecl *ObjCIvarRegion::getDecl() const {
- return cast<ObjCIvarDecl>(D);
-}
+const ObjCIvarDecl *ObjCIvarRegion::getDecl() const { return IVD; }
QualType ObjCIvarRegion::getValueType() const {
return getDecl()->getType();
@@ -228,6 +176,33 @@ QualType CXXDerivedObjectRegion::getValueType() const {
return QualType(getDecl()->getTypeForDecl(), 0);
}
+QualType ParamVarRegion::getValueType() const {
+ assert(getDecl() &&
+ "`ParamVarRegion` support functions without `Decl` not implemented"
+ " yet.");
+ return getDecl()->getType();
+}
+
+const ParmVarDecl *ParamVarRegion::getDecl() const {
+ const Decl *D = getStackFrame()->getDecl();
+
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ assert(Index < FD->param_size());
+ return FD->parameters()[Index];
+ } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
+ assert(Index < BD->param_size());
+ return BD->parameters()[Index];
+ } else if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ assert(Index < MD->param_size());
+ return MD->parameters()[Index];
+ } else if (const auto *CD = dyn_cast<CXXConstructorDecl>(D)) {
+ assert(Index < CD->param_size());
+ return CD->parameters()[Index];
+ } else {
+ llvm_unreachable("Unexpected Decl kind!");
+ }
+}
+
//===----------------------------------------------------------------------===//
// FoldingSet profiling.
//===----------------------------------------------------------------------===//
@@ -299,25 +274,44 @@ void CXXThisRegion::Profile(llvm::FoldingSetNodeID &ID) const {
CXXThisRegion::ProfileRegion(ID, ThisPointerTy, superRegion);
}
+void FieldRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ProfileRegion(ID, getDecl(), superRegion);
+}
+
void ObjCIvarRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
const ObjCIvarDecl *ivd,
const MemRegion* superRegion) {
- DeclRegion::ProfileRegion(ID, ivd, superRegion, ObjCIvarRegionKind);
+ ID.AddInteger(static_cast<unsigned>(ObjCIvarRegionKind));
+ ID.AddPointer(ivd);
+ ID.AddPointer(superRegion);
}
-void DeclRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, const Decl *D,
- const MemRegion* superRegion, Kind k) {
- ID.AddInteger(static_cast<unsigned>(k));
- ID.AddPointer(D);
+void ObjCIvarRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ProfileRegion(ID, getDecl(), superRegion);
+}
+
+void NonParamVarRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+ const VarDecl *VD,
+ const MemRegion *superRegion) {
+ ID.AddInteger(static_cast<unsigned>(NonParamVarRegionKind));
+ ID.AddPointer(VD);
ID.AddPointer(superRegion);
}
-void DeclRegion::Profile(llvm::FoldingSetNodeID& ID) const {
- DeclRegion::ProfileRegion(ID, D, superRegion, getKind());
+void NonParamVarRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ProfileRegion(ID, getDecl(), superRegion);
}
-void VarRegion::Profile(llvm::FoldingSetNodeID &ID) const {
- VarRegion::ProfileRegion(ID, getDecl(), superRegion);
+void ParamVarRegion::ProfileRegion(llvm::FoldingSetNodeID &ID, const Expr *OE,
+ unsigned Idx, const MemRegion *SReg) {
+ ID.AddInteger(static_cast<unsigned>(ParamVarRegionKind));
+ ID.AddPointer(OE);
+ ID.AddInteger(Idx);
+ ID.AddPointer(SReg);
+}
+
+void ParamVarRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ProfileRegion(ID, getOriginExpr(), getIndex(), superRegion);
}
void SymbolicRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, SymbolRef sym,
@@ -529,12 +523,11 @@ void SymbolicRegion::dumpToStream(raw_ostream &os) const {
os << "SymRegion{" << sym << '}';
}
-void VarRegion::dumpToStream(raw_ostream &os) const {
- const auto *VD = cast<VarDecl>(D);
+void NonParamVarRegion::dumpToStream(raw_ostream &os) const {
if (const IdentifierInfo *ID = VD->getIdentifier())
os << ID->getName();
else
- os << "VarRegion{D" << VD->getID() << '}';
+ os << "NonParamVarRegion{D" << VD->getID() << '}';
}
LLVM_DUMP_METHOD void RegionRawOffset::dump() const {
@@ -581,6 +574,18 @@ void StackLocalsSpaceRegion::dumpToStream(raw_ostream &os) const {
os << "StackLocalsSpaceRegion";
}
+void ParamVarRegion::dumpToStream(raw_ostream &os) const {
+ const ParmVarDecl *PVD = getDecl();
+ assert(PVD &&
+ "`ParamVarRegion` support functions without `Decl` not implemented"
+ " yet.");
+ if (const IdentifierInfo *ID = PVD->getIdentifier()) {
+ os << ID->getName();
+ } else {
+ os << "ParamVarRegion{P" << PVD->getID() << '}';
+ }
+}
+
bool MemRegion::canPrintPretty() const {
return canPrintPrettyAsExpr();
}
@@ -600,11 +605,18 @@ void MemRegion::printPrettyAsExpr(raw_ostream &) const {
llvm_unreachable("This region cannot be printed pretty.");
}
-bool VarRegion::canPrintPrettyAsExpr() const {
- return true;
+bool NonParamVarRegion::canPrintPrettyAsExpr() const { return true; }
+
+void NonParamVarRegion::printPrettyAsExpr(raw_ostream &os) const {
+ os << getDecl()->getName();
}
-void VarRegion::printPrettyAsExpr(raw_ostream &os) const {
+bool ParamVarRegion::canPrintPrettyAsExpr() const { return true; }
+
+void ParamVarRegion::printPrettyAsExpr(raw_ostream &os) const {
+ assert(getDecl() &&
+ "`ParamVarRegion` support functions without `Decl` not implemented"
+ " yet.");
os << getDecl()->getName();
}
@@ -717,11 +729,79 @@ SourceRange MemRegion::sourceRange() const {
// MemRegionManager methods.
//===----------------------------------------------------------------------===//
+static DefinedOrUnknownSVal getTypeSize(QualType Ty, ASTContext &Ctx,
+ SValBuilder &SVB) {
+ CharUnits Size = Ctx.getTypeSizeInChars(Ty);
+ QualType SizeTy = SVB.getArrayIndexType();
+ return SVB.makeIntVal(Size.getQuantity(), SizeTy);
+}
+
+DefinedOrUnknownSVal MemRegionManager::getStaticSize(const MemRegion *MR,
+ SValBuilder &SVB) const {
+ const auto *SR = cast<SubRegion>(MR);
+ SymbolManager &SymMgr = SVB.getSymbolManager();
+
+ switch (SR->getKind()) {
+ case MemRegion::AllocaRegionKind:
+ case MemRegion::SymbolicRegionKind:
+ return nonloc::SymbolVal(SymMgr.getExtentSymbol(SR));
+ case MemRegion::StringRegionKind:
+ return SVB.makeIntVal(
+ cast<StringRegion>(SR)->getStringLiteral()->getByteLength() + 1,
+ SVB.getArrayIndexType());
+ case MemRegion::CompoundLiteralRegionKind:
+ case MemRegion::CXXBaseObjectRegionKind:
+ case MemRegion::CXXDerivedObjectRegionKind:
+ case MemRegion::CXXTempObjectRegionKind:
+ case MemRegion::CXXThisRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
+ case MemRegion::NonParamVarRegionKind:
+ case MemRegion::ParamVarRegionKind:
+ case MemRegion::ElementRegionKind:
+ case MemRegion::ObjCStringRegionKind: {
+ QualType Ty = cast<TypedValueRegion>(SR)->getDesugaredValueType(Ctx);
+ if (isa<VariableArrayType>(Ty))
+ return nonloc::SymbolVal(SymMgr.getExtentSymbol(SR));
+
+ if (Ty->isIncompleteType())
+ return UnknownVal();
+
+ return getTypeSize(Ty, Ctx, SVB);
+ }
+ case MemRegion::FieldRegionKind: {
+ // Force callers to deal with bitfields explicitly.
+ if (cast<FieldRegion>(SR)->getDecl()->isBitField())
+ return UnknownVal();
+
+ QualType Ty = cast<TypedValueRegion>(SR)->getDesugaredValueType(Ctx);
+ DefinedOrUnknownSVal Size = getTypeSize(Ty, Ctx, SVB);
+
+ // A zero-length array at the end of a struct often stands for dynamically
+ // allocated extra memory.
+ if (Size.isZeroConstant()) {
+ if (isa<ConstantArrayType>(Ty))
+ return UnknownVal();
+ }
+
+ return Size;
+ }
+ // FIXME: The following are being used in 'SimpleSValBuilder' and in
+ // 'ArrayBoundChecker::checkLocation' because there is no symbol to
+ // represent the regions more appropriately.
+ case MemRegion::BlockDataRegionKind:
+ case MemRegion::BlockCodeRegionKind:
+ case MemRegion::FunctionCodeRegionKind:
+ return nonloc::SymbolVal(SymMgr.getExtentSymbol(SR));
+ default:
+ llvm_unreachable("Unhandled region");
+ }
+}
+
template <typename REG>
const REG *MemRegionManager::LazyAllocate(REG*& region) {
if (!region) {
region = A.Allocate<REG>();
- new (region) REG(this);
+ new (region) REG(*this);
}
return region;
@@ -746,7 +826,7 @@ MemRegionManager::getStackLocalsRegion(const StackFrameContext *STC) {
return R;
R = A.Allocate<StackLocalsSpaceRegion>();
- new (R) StackLocalsSpaceRegion(this, STC);
+ new (R) StackLocalsSpaceRegion(*this, STC);
return R;
}
@@ -759,7 +839,7 @@ MemRegionManager::getStackArgumentsRegion(const StackFrameContext *STC) {
return R;
R = A.Allocate<StackArgumentsSpaceRegion>();
- new (R) StackArgumentsSpaceRegion(this, STC);
+ new (R) StackArgumentsSpaceRegion(*this, STC);
return R;
}
@@ -781,7 +861,7 @@ const GlobalsSpaceRegion
return R;
R = A.Allocate<StaticGlobalSpaceRegion>();
- new (R) StaticGlobalSpaceRegion(this, CR);
+ new (R) StaticGlobalSpaceRegion(*this, CR);
return R;
}
@@ -825,15 +905,16 @@ getStackOrCaptureRegionForDeclContext(const LocationContext *LC,
return SFC;
}
if (const auto *BC = dyn_cast<BlockInvocationContext>(LC)) {
- const auto *BR =
- static_cast<const BlockDataRegion *>(BC->getContextData());
+ const auto *BR = static_cast<const BlockDataRegion *>(BC->getData());
// FIXME: This can be made more efficient.
for (BlockDataRegion::referenced_vars_iterator
I = BR->referenced_vars_begin(),
E = BR->referenced_vars_end(); I != E; ++I) {
- const VarRegion *VR = I.getOriginalRegion();
- if (VR->getDecl() == VD)
- return cast<VarRegion>(I.getCapturedRegion());
+ const TypedValueRegion *OrigR = I.getOriginalRegion();
+ if (const auto *VR = dyn_cast<VarRegion>(OrigR)) {
+ if (VR->getDecl() == VD)
+ return cast<VarRegion>(I.getCapturedRegion());
+ }
}
}
@@ -842,15 +923,37 @@ getStackOrCaptureRegionForDeclContext(const LocationContext *LC,
return (const StackFrameContext *)nullptr;
}
-const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
+const VarRegion *MemRegionManager::getVarRegion(const VarDecl *D,
const LocationContext *LC) {
+ const auto *PVD = dyn_cast<ParmVarDecl>(D);
+ if (PVD) {
+ unsigned Index = PVD->getFunctionScopeIndex();
+ const StackFrameContext *SFC = LC->getStackFrame();
+ const Stmt *CallSite = SFC->getCallSite();
+ if (CallSite) {
+ const Decl *D = SFC->getDecl();
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (Index < FD->param_size() && FD->parameters()[Index] == PVD)
+ return getSubRegion<ParamVarRegion>(cast<Expr>(CallSite), Index,
+ getStackArgumentsRegion(SFC));
+ } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
+ if (Index < BD->param_size() && BD->parameters()[Index] == PVD)
+ return getSubRegion<ParamVarRegion>(cast<Expr>(CallSite), Index,
+ getStackArgumentsRegion(SFC));
+ } else {
+ return getSubRegion<ParamVarRegion>(cast<Expr>(CallSite), Index,
+ getStackArgumentsRegion(SFC));
+ }
+ }
+ }
+
D = D->getCanonicalDecl();
const MemRegion *sReg = nullptr;
if (D->hasGlobalStorage() && !D->isStaticLocal()) {
// First handle the globals defined in system headers.
- if (C.getSourceManager().isInSystemHeader(D->getLocation())) {
+ if (Ctx.getSourceManager().isInSystemHeader(D->getLocation())) {
// Whitelist the system globals which often DO GET modified, assume the
// rest are immutable.
if (D->getName().find("errno") != StringRef::npos)
@@ -914,7 +1017,7 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
T = getContext().getBlockPointerType(T);
const BlockCodeRegion *BTR =
- getBlockCodeRegion(BD, C.getCanonicalType(T),
+ getBlockCodeRegion(BD, Ctx.getCanonicalType(T),
STC->getAnalysisDeclContext());
sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind,
BTR);
@@ -926,13 +1029,23 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
}
}
- return getSubRegion<VarRegion>(D, sReg);
+ return getSubRegion<NonParamVarRegion>(D, sReg);
}
-const VarRegion *MemRegionManager::getVarRegion(const VarDecl *D,
- const MemRegion *superR) {
+const NonParamVarRegion *
+MemRegionManager::getNonParamVarRegion(const VarDecl *D,
+ const MemRegion *superR) {
D = D->getCanonicalDecl();
- return getSubRegion<VarRegion>(D, superR);
+ return getSubRegion<NonParamVarRegion>(D, superR);
+}
+
+const ParamVarRegion *
+MemRegionManager::getParamVarRegion(const Expr *OriginExpr, unsigned Index,
+ const LocationContext *LC) {
+ const StackFrameContext *SFC = LC->getStackFrame();
+ assert(SFC);
+ return getSubRegion<ParamVarRegion>(OriginExpr, Index,
+ getStackArgumentsRegion(SFC));
}
const BlockDataRegion *
@@ -1325,7 +1438,8 @@ static RegionOffset calculateOffset(const MemRegion *R) {
case MemRegion::CXXThisRegionKind:
case MemRegion::StringRegionKind:
case MemRegion::ObjCStringRegionKind:
- case MemRegion::VarRegionKind:
+ case MemRegion::NonParamVarRegionKind:
+ case MemRegion::ParamVarRegionKind:
case MemRegion::CXXTempObjectRegionKind:
// Usual base regions.
goto Finish;
@@ -1476,12 +1590,12 @@ RegionOffset MemRegion::getAsOffset() const {
std::pair<const VarRegion *, const VarRegion *>
BlockDataRegion::getCaptureRegions(const VarDecl *VD) {
- MemRegionManager &MemMgr = *getMemRegionManager();
+ MemRegionManager &MemMgr = getMemRegionManager();
const VarRegion *VR = nullptr;
const VarRegion *OriginalVR = nullptr;
if (!VD->hasAttr<BlocksAttr>() && VD->hasLocalStorage()) {
- VR = MemMgr.getVarRegion(VD, this);
+ VR = MemMgr.getNonParamVarRegion(VD, this);
OriginalVR = MemMgr.getVarRegion(VD, LC);
}
else {
@@ -1490,7 +1604,7 @@ BlockDataRegion::getCaptureRegions(const VarDecl *VD) {
OriginalVR = VR;
}
else {
- VR = MemMgr.getVarRegion(VD, MemMgr.getUnknownRegion());
+ VR = MemMgr.getNonParamVarRegion(VD, MemMgr.getUnknownRegion());
OriginalVR = MemMgr.getVarRegion(VD, LC);
}
}
@@ -1511,7 +1625,7 @@ void BlockDataRegion::LazyInitializeReferencedVars() {
return;
}
- MemRegionManager &MemMgr = *getMemRegionManager();
+ MemRegionManager &MemMgr = getMemRegionManager();
llvm::BumpPtrAllocator &A = MemMgr.getAllocator();
BumpVectorContext BC(A);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index 3a3942a8301b..ed62778623a8 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -45,8 +45,8 @@ namespace {
AnalyzerOptions &AnOpts;
const bool SupportsCrossFileDiagnostics;
public:
- PlistDiagnostics(AnalyzerOptions &AnalyzerOpts, const std::string &prefix,
- const Preprocessor &PP,
+ PlistDiagnostics(AnalyzerOptions &AnalyzerOpts,
+ const std::string &OutputFile, const Preprocessor &PP,
const cross_tu::CrossTranslationUnitContext &CTU,
bool supportsMultipleFiles);
@@ -582,19 +582,32 @@ PlistDiagnostics::PlistDiagnostics(
void ento::createPlistDiagnosticConsumer(
AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
- const std::string &s, const Preprocessor &PP,
+ const std::string &OutputFile, const Preprocessor &PP,
const cross_tu::CrossTranslationUnitContext &CTU) {
- C.push_back(new PlistDiagnostics(AnalyzerOpts, s, PP, CTU,
+
+ // TODO: Emit an error here.
+ if (OutputFile.empty())
+ return;
+
+ C.push_back(new PlistDiagnostics(AnalyzerOpts, OutputFile, PP, CTU,
/*supportsMultipleFiles*/ false));
+ createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, OutputFile, PP, CTU);
}
void ento::createPlistMultiFileDiagnosticConsumer(
AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
- const std::string &s, const Preprocessor &PP,
+ const std::string &OutputFile, const Preprocessor &PP,
const cross_tu::CrossTranslationUnitContext &CTU) {
- C.push_back(new PlistDiagnostics(AnalyzerOpts, s, PP, CTU,
+
+ // TODO: Emit an error here.
+ if (OutputFile.empty())
+ return;
+
+ C.push_back(new PlistDiagnostics(AnalyzerOpts, OutputFile, PP, CTU,
/*supportsMultipleFiles*/ true));
+ createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, OutputFile, PP, CTU);
}
+
void PlistDiagnostics::FlushDiagnosticsImpl(
std::vector<const PathDiagnostic *> &Diags,
FilesMade *filesMade) {
@@ -939,7 +952,7 @@ getExpandedMacro(SourceLocation MacroLoc, const Preprocessor &PP,
std::string MacroName = getMacroNameAndPrintExpansion(
Printer, MacroLoc, *PPToUse, MacroArgMap{}, AlreadyProcessedTokens);
- return { MacroName, OS.str() };
+ return {MacroName, std::string(OS.str())};
}
static std::string getMacroNameAndPrintExpansion(
@@ -960,9 +973,8 @@ static std::string getMacroNameAndPrintExpansion(
// in this case we don't get the full expansion text in the Plist file. See
// the test file where "value" is expanded to "garbage_" instead of
// "garbage_value".
- if (AlreadyProcessedTokens.find(IDInfo) != AlreadyProcessedTokens.end())
+ if (!AlreadyProcessedTokens.insert(IDInfo).second)
return Info.Name;
- AlreadyProcessedTokens.insert(IDInfo);
if (!Info.MI)
return Info.Name;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
index 14006f79fd0f..006a4006b7fc 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -16,8 +16,8 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -76,12 +76,12 @@ ProgramStateManager::ProgramStateManager(ASTContext &Ctx,
StoreManagerCreator CreateSMgr,
ConstraintManagerCreator CreateCMgr,
llvm::BumpPtrAllocator &alloc,
- SubEngine *SubEng)
- : Eng(SubEng), EnvMgr(alloc), GDMFactory(alloc),
+ ExprEngine *ExprEng)
+ : Eng(ExprEng), EnvMgr(alloc), GDMFactory(alloc),
svalBuilder(createSimpleSValBuilder(alloc, Ctx, *this)),
CallEventMgr(new CallEventManager(alloc)), Alloc(alloc) {
StoreMgr = (*CreateSMgr)(*this);
- ConstraintMgr = (*CreateCMgr)(*this, SubEng);
+ ConstraintMgr = (*CreateCMgr)(*this, ExprEng);
}
@@ -189,7 +189,7 @@ ProgramState::invalidateRegionsImpl(ValueList Values,
RegionAndSymbolInvalidationTraits *ITraits,
const CallEvent *Call) const {
ProgramStateManager &Mgr = getStateManager();
- SubEngine &Eng = Mgr.getOwningEngine();
+ ExprEngine &Eng = Mgr.getOwningEngine();
InvalidatedSymbols InvalidatedSyms;
if (!IS)
@@ -240,6 +240,13 @@ ProgramState::enterStackFrame(const CallEvent &Call,
return makeWithStore(NewStore);
}
+SVal ProgramState::getSelfSVal(const LocationContext *LCtx) const {
+ const ImplicitParamDecl *SelfDecl = LCtx->getSelfDecl();
+ if (!SelfDecl)
+ return SVal();
+ return getSVal(getRegion(SelfDecl, LCtx));
+}
+
SVal ProgramState::getSValAsScalarOrLoc(const MemRegion *R) const {
// We only want to do fetches from regions that we can actually bind
// values. For example, SymbolicRegions of type 'id<...>' cannot
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index 9752a0e22832..cb6f61e86ae3 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -16,6 +16,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValVisitor.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableSet.h"
#include "llvm/Support/raw_ostream.h"
@@ -23,10 +24,89 @@
using namespace clang;
using namespace ento;
+// This class can be extended with other tables which will help to reason
+// about ranges more precisely.
+class OperatorRelationsTable {
+ static_assert(BO_LT < BO_GT && BO_GT < BO_LE && BO_LE < BO_GE &&
+ BO_GE < BO_EQ && BO_EQ < BO_NE,
+ "This class relies on operators order. Rework it otherwise.");
+
+public:
+ enum TriStateKind {
+ False = 0,
+ True,
+ Unknown,
+ };
+
+private:
+ // CmpOpTable holds states which represent the corresponding range for
+ // branching an exploded graph. We can reason about the branch if there is
+ // a previously known fact of the existence of a comparison expression with
+ // operands used in the current expression.
+ // E.g. assuming (x < y) is true that means (x != y) is surely true.
+ // if (x previous_operation y) // < | != | >
+ // if (x operation y) // != | > | <
+ // tristate // True | Unknown | False
+ //
+ // CmpOpTable represents next:
+ // __|< |> |<=|>=|==|!=|UnknownX2|
+ // < |1 |0 |* |0 |0 |* |1 |
+ // > |0 |1 |0 |* |0 |* |1 |
+ // <=|1 |0 |1 |* |1 |* |0 |
+ // >=|0 |1 |* |1 |1 |* |0 |
+ // ==|0 |0 |* |* |1 |0 |1 |
+ // !=|1 |1 |* |* |0 |1 |0 |
+ //
+ // Columns stands for a previous operator.
+ // Rows stands for a current operator.
+ // Each row has exactly two `Unknown` cases.
+ // UnknownX2 means that both `Unknown` previous operators are met in code,
+ // and there is a special column for that, for example:
+ // if (x >= y)
+ // if (x != y)
+ // if (x <= y)
+ // False only
+ static constexpr size_t CmpOpCount = BO_NE - BO_LT + 1;
+ const TriStateKind CmpOpTable[CmpOpCount][CmpOpCount + 1] = {
+ // < > <= >= == != UnknownX2
+ {True, False, Unknown, False, False, Unknown, True}, // <
+ {False, True, False, Unknown, False, Unknown, True}, // >
+ {True, False, True, Unknown, True, Unknown, False}, // <=
+ {False, True, Unknown, True, True, Unknown, False}, // >=
+ {False, False, Unknown, Unknown, True, False, True}, // ==
+ {True, True, Unknown, Unknown, False, True, False}, // !=
+ };
+
+ static size_t getIndexFromOp(BinaryOperatorKind OP) {
+ return static_cast<size_t>(OP - BO_LT);
+ }
+
+public:
+ constexpr size_t getCmpOpCount() const { return CmpOpCount; }
+
+ static BinaryOperatorKind getOpFromIndex(size_t Index) {
+ return static_cast<BinaryOperatorKind>(Index + BO_LT);
+ }
+
+ TriStateKind getCmpOpState(BinaryOperatorKind CurrentOP,
+ BinaryOperatorKind QueriedOP) const {
+ return CmpOpTable[getIndexFromOp(CurrentOP)][getIndexFromOp(QueriedOP)];
+ }
+
+ TriStateKind getCmpOpStateForUnknownX2(BinaryOperatorKind CurrentOP) const {
+ return CmpOpTable[getIndexFromOp(CurrentOP)][CmpOpCount];
+ }
+};
+//===----------------------------------------------------------------------===//
+// RangeSet implementation
+//===----------------------------------------------------------------------===//
+
void RangeSet::IntersectInRange(BasicValueFactory &BV, Factory &F,
- const llvm::APSInt &Lower, const llvm::APSInt &Upper,
- PrimRangeSet &newRanges, PrimRangeSet::iterator &i,
- PrimRangeSet::iterator &e) const {
+ const llvm::APSInt &Lower,
+ const llvm::APSInt &Upper,
+ PrimRangeSet &newRanges,
+ PrimRangeSet::iterator &i,
+ PrimRangeSet::iterator &e) const {
// There are six cases for each range R in the set:
// 1. R is entirely before the intersection range.
// 2. R is entirely after the intersection range.
@@ -62,10 +142,27 @@ void RangeSet::IntersectInRange(BasicValueFactory &BV, Factory &F,
const llvm::APSInt &RangeSet::getMinValue() const {
assert(!isEmpty());
- return ranges.begin()->From();
+ return begin()->From();
+}
+
+const llvm::APSInt &RangeSet::getMaxValue() const {
+ assert(!isEmpty());
+ // NOTE: It's a shame that we can't implement 'getMaxValue' without scanning
+ // the whole tree to get to the last element.
+ // llvm::ImmutableSet should support decrement for 'end' iterators
+ // or reverse order iteration.
+ auto It = begin();
+ for (auto End = end(); std::next(It) != End; ++It) {
+ }
+ return It->To();
}
bool RangeSet::pin(llvm::APSInt &Lower, llvm::APSInt &Upper) const {
+ if (isEmpty()) {
+ // This range is already infeasible.
+ return false;
+ }
+
// This function has nine cases, the cartesian product of range-testing
// both the upper and lower bounds against the symbol's type.
// Each case requires a different pinning operation.
@@ -155,11 +252,11 @@ bool RangeSet::pin(llvm::APSInt &Lower, llvm::APSInt &Upper) const {
// or, alternatively, /removing/ all integers between Upper and Lower.
RangeSet RangeSet::Intersect(BasicValueFactory &BV, Factory &F,
llvm::APSInt Lower, llvm::APSInt Upper) const {
- if (!pin(Lower, Upper))
- return F.getEmptySet();
-
PrimRangeSet newRanges = F.getEmptySet();
+ if (isEmpty() || !pin(Lower, Upper))
+ return newRanges;
+
PrimRangeSet::iterator i = begin(), e = end();
if (Lower <= Upper)
IntersectInRange(BV, F, Lower, Upper, newRanges, i, e);
@@ -190,33 +287,78 @@ RangeSet RangeSet::Intersect(BasicValueFactory &BV, Factory &F,
return newRanges;
}
-// Turn all [A, B] ranges to [-B, -A]. Ranges [MIN, B] are turned to range set
-// [MIN, MIN] U [-B, MAX], when MIN and MAX are the minimal and the maximal
-// signed values of the type.
+// Turn all [A, B] ranges to [-B, -A], when "-" is a C-like unary minus
+// operation under the values of the type.
+//
+// We also handle MIN because applying unary minus to MIN does not change it.
+// Example 1:
+// char x = -128; // -128 is a MIN value in a range of 'char'
+// char y = -x; // y: -128
+// Example 2:
+// unsigned char x = 0; // 0 is a MIN value in a range of 'unsigned char'
+// unsigned char y = -x; // y: 0
+//
+// And it makes us to separate the range
+// like [MIN, N] to [MIN, MIN] U [-N,MAX].
+// For instance, whole range is {-128..127} and subrange is [-128,-126],
+// thus [-128,-127,-126,.....] negates to [-128,.....,126,127].
+//
+// Negate restores disrupted ranges on bounds,
+// e.g. [MIN, B] => [MIN, MIN] U [-B, MAX] => [MIN, B].
RangeSet RangeSet::Negate(BasicValueFactory &BV, Factory &F) const {
PrimRangeSet newRanges = F.getEmptySet();
- for (iterator i = begin(), e = end(); i != e; ++i) {
- const llvm::APSInt &from = i->From(), &to = i->To();
- const llvm::APSInt &newTo = (from.isMinSignedValue() ?
- BV.getMaxValue(from) :
- BV.getValue(- from));
- if (to.isMaxSignedValue() && !newRanges.isEmpty() &&
- newRanges.begin()->From().isMinSignedValue()) {
- assert(newRanges.begin()->To().isMinSignedValue() &&
- "Ranges should not overlap");
- assert(!from.isMinSignedValue() && "Ranges should not overlap");
- const llvm::APSInt &newFrom = newRanges.begin()->From();
- newRanges =
- F.add(F.remove(newRanges, *newRanges.begin()), Range(newFrom, newTo));
- } else if (!to.isMinSignedValue()) {
- const llvm::APSInt &newFrom = BV.getValue(- to);
- newRanges = F.add(newRanges, Range(newFrom, newTo));
- }
- if (from.isMinSignedValue()) {
- newRanges = F.add(newRanges, Range(BV.getMinValue(from),
- BV.getMinValue(from)));
+ if (isEmpty())
+ return newRanges;
+
+ const llvm::APSInt sampleValue = getMinValue();
+ const llvm::APSInt &MIN = BV.getMinValue(sampleValue);
+ const llvm::APSInt &MAX = BV.getMaxValue(sampleValue);
+
+ // Handle a special case for MIN value.
+ iterator i = begin();
+ const llvm::APSInt &from = i->From();
+ const llvm::APSInt &to = i->To();
+ if (from == MIN) {
+ // If [from, to] are [MIN, MAX], then just return the same [MIN, MAX].
+ if (to == MAX) {
+ newRanges = ranges;
+ } else {
+ // Add separate range for the lowest value.
+ newRanges = F.add(newRanges, Range(MIN, MIN));
+ // Skip adding the second range in case when [from, to] are [MIN, MIN].
+ if (to != MIN) {
+ newRanges = F.add(newRanges, Range(BV.getValue(-to), MAX));
+ }
}
+ // Skip the first range in the loop.
+ ++i;
+ }
+
+ // Negate all other ranges.
+ for (iterator e = end(); i != e; ++i) {
+ // Negate int values.
+ const llvm::APSInt &newFrom = BV.getValue(-i->To());
+ const llvm::APSInt &newTo = BV.getValue(-i->From());
+ // Add a negated range.
+ newRanges = F.add(newRanges, Range(newFrom, newTo));
+ }
+
+ if (newRanges.isSingleton())
+ return newRanges;
+
+ // Try to find and unite next ranges:
+ // [MIN, MIN] & [MIN + 1, N] => [MIN, N].
+ iterator iter1 = newRanges.begin();
+ iterator iter2 = std::next(iter1);
+
+ if (iter1->To() == MIN && (iter2->From() - 1) == MIN) {
+ const llvm::APSInt &to = iter2->To();
+ // remove adjacent ranges
+ newRanges = F.remove(newRanges, *iter1);
+ newRanges = F.remove(newRanges, *newRanges.begin());
+ // add united range
+ newRanges = F.add(newRanges, Range(MIN, to));
}
return newRanges;
@@ -238,10 +380,534 @@ void RangeSet::print(raw_ostream &os) const {
}
namespace {
+
+/// A little component aggregating all of the reasoning we have about
+/// the ranges of symbolic expressions.
+///
+/// Even when we don't know the exact values of the operands, we still
+/// can get a pretty good estimate of the result's range.
+class SymbolicRangeInferrer
+ : public SymExprVisitor<SymbolicRangeInferrer, RangeSet> {
+public:
+ static RangeSet inferRange(BasicValueFactory &BV, RangeSet::Factory &F,
+ ProgramStateRef State, SymbolRef Sym) {
+ SymbolicRangeInferrer Inferrer(BV, F, State);
+ return Inferrer.infer(Sym);
+ }
+
+ RangeSet VisitSymExpr(SymbolRef Sym) {
+ // If we got to this function, the actual type of the symbolic
+ // expression is not supported for advanced inference.
+ // In this case, we simply backoff to the default "let's simply
+ // infer the range from the expression's type".
+ return infer(Sym->getType());
+ }
+
+ RangeSet VisitSymIntExpr(const SymIntExpr *Sym) {
+ return VisitBinaryOperator(Sym);
+ }
+
+ RangeSet VisitIntSymExpr(const IntSymExpr *Sym) {
+ return VisitBinaryOperator(Sym);
+ }
+
+ RangeSet VisitSymSymExpr(const SymSymExpr *Sym) {
+ return VisitBinaryOperator(Sym);
+ }
+
+private:
+ SymbolicRangeInferrer(BasicValueFactory &BV, RangeSet::Factory &F,
+ ProgramStateRef S)
+ : ValueFactory(BV), RangeFactory(F), State(S) {}
+
+ /// Infer range information from the given integer constant.
+ ///
+ /// It's not a real "inference", but is here for operating with
+ /// sub-expressions in a more polymorphic manner.
+ RangeSet inferAs(const llvm::APSInt &Val, QualType) {
+ return {RangeFactory, Val};
+ }
+
+ /// Infer range information from symbol in the context of the given type.
+ RangeSet inferAs(SymbolRef Sym, QualType DestType) {
+ QualType ActualType = Sym->getType();
+ // Check that we can reason about the symbol at all.
+ if (ActualType->isIntegralOrEnumerationType() ||
+ Loc::isLocType(ActualType)) {
+ return infer(Sym);
+ }
+ // Otherwise, let's simply infer from the destination type.
+ // We couldn't figure out nothing else about that expression.
+ return infer(DestType);
+ }
+
+ RangeSet infer(SymbolRef Sym) {
+ const RangeSet *AssociatedRange = State->get<ConstraintRange>(Sym);
+
+ // If Sym is a difference of symbols A - B, then maybe we have range set
+ // stored for B - A.
+ const RangeSet *RangeAssociatedWithNegatedSym =
+ getRangeForMinusSymbol(State, Sym);
+
+ // If we have range set stored for both A - B and B - A then calculate the
+ // effective range set by intersecting the range set for A - B and the
+ // negated range set of B - A.
+ if (AssociatedRange && RangeAssociatedWithNegatedSym)
+ return AssociatedRange->Intersect(
+ ValueFactory, RangeFactory,
+ RangeAssociatedWithNegatedSym->Negate(ValueFactory, RangeFactory));
+
+ if (AssociatedRange)
+ return *AssociatedRange;
+
+ if (RangeAssociatedWithNegatedSym)
+ return RangeAssociatedWithNegatedSym->Negate(ValueFactory, RangeFactory);
+
+ // If Sym is a comparison expression (except <=>),
+ // find any other comparisons with the same operands.
+ // See function description.
+ const RangeSet CmpRangeSet = getRangeForComparisonSymbol(State, Sym);
+ if (!CmpRangeSet.isEmpty())
+ return CmpRangeSet;
+
+ return Visit(Sym);
+ }
+
+ /// Infer range information solely from the type.
+ RangeSet infer(QualType T) {
+ // Lazily generate a new RangeSet representing all possible values for the
+ // given symbol type.
+ RangeSet Result(RangeFactory, ValueFactory.getMinValue(T),
+ ValueFactory.getMaxValue(T));
+
+ // References are known to be non-zero.
+ if (T->isReferenceType())
+ return assumeNonZero(Result, T);
+
+ return Result;
+ }
+
+ template <class BinarySymExprTy>
+ RangeSet VisitBinaryOperator(const BinarySymExprTy *Sym) {
+ // TODO #1: VisitBinaryOperator implementation might not make a good
+ // use of the inferred ranges. In this case, we might be calculating
+ // everything for nothing. This being said, we should introduce some
+ // sort of laziness mechanism here.
+ //
+ // TODO #2: We didn't go into the nested expressions before, so it
+ // might cause us spending much more time doing the inference.
+ // This can be a problem for deeply nested expressions that are
+ // involved in conditions and get tested continuously. We definitely
+ // need to address this issue and introduce some sort of caching
+ // in here.
+ QualType ResultType = Sym->getType();
+ return VisitBinaryOperator(inferAs(Sym->getLHS(), ResultType),
+ Sym->getOpcode(),
+ inferAs(Sym->getRHS(), ResultType), ResultType);
+ }
+
+ RangeSet VisitBinaryOperator(RangeSet LHS, BinaryOperator::Opcode Op,
+ RangeSet RHS, QualType T) {
+ switch (Op) {
+ case BO_Or:
+ return VisitBinaryOperator<BO_Or>(LHS, RHS, T);
+ case BO_And:
+ return VisitBinaryOperator<BO_And>(LHS, RHS, T);
+ case BO_Rem:
+ return VisitBinaryOperator<BO_Rem>(LHS, RHS, T);
+ default:
+ return infer(T);
+ }
+ }
+
+ //===----------------------------------------------------------------------===//
+ // Ranges and operators
+ //===----------------------------------------------------------------------===//
+
+ /// Return a rough approximation of the given range set.
+ ///
+ /// For the range set:
+ /// { [x_0, y_0], [x_1, y_1], ... , [x_N, y_N] }
+ /// it will return the range [x_0, y_N].
+ static Range fillGaps(RangeSet Origin) {
+ assert(!Origin.isEmpty());
+ return {Origin.getMinValue(), Origin.getMaxValue()};
+ }
+
+ /// Try to convert given range into the given type.
+ ///
+ /// It will return llvm::None only when the trivial conversion is possible.
+ llvm::Optional<Range> convert(const Range &Origin, APSIntType To) {
+ if (To.testInRange(Origin.From(), false) != APSIntType::RTR_Within ||
+ To.testInRange(Origin.To(), false) != APSIntType::RTR_Within) {
+ return llvm::None;
+ }
+ return Range(ValueFactory.Convert(To, Origin.From()),
+ ValueFactory.Convert(To, Origin.To()));
+ }
+
+ template <BinaryOperator::Opcode Op>
+ RangeSet VisitBinaryOperator(RangeSet LHS, RangeSet RHS, QualType T) {
+ // We should propagate information about unfeasbility of one of the
+ // operands to the resulting range.
+ if (LHS.isEmpty() || RHS.isEmpty()) {
+ return RangeFactory.getEmptySet();
+ }
+
+ Range CoarseLHS = fillGaps(LHS);
+ Range CoarseRHS = fillGaps(RHS);
+
+ APSIntType ResultType = ValueFactory.getAPSIntType(T);
+
+ // We need to convert ranges to the resulting type, so we can compare values
+ // and combine them in a meaningful (in terms of the given operation) way.
+ auto ConvertedCoarseLHS = convert(CoarseLHS, ResultType);
+ auto ConvertedCoarseRHS = convert(CoarseRHS, ResultType);
+
+ // It is hard to reason about ranges when conversion changes
+ // borders of the ranges.
+ if (!ConvertedCoarseLHS || !ConvertedCoarseRHS) {
+ return infer(T);
+ }
+
+ return VisitBinaryOperator<Op>(*ConvertedCoarseLHS, *ConvertedCoarseRHS, T);
+ }
+
+ template <BinaryOperator::Opcode Op>
+ RangeSet VisitBinaryOperator(Range LHS, Range RHS, QualType T) {
+ return infer(T);
+ }
+
+ /// Return a symmetrical range for the given range and type.
+ ///
+ /// If T is signed, return the smallest range [-x..x] that covers the original
+ /// range, or [-min(T), max(T)] if the aforementioned symmetric range doesn't
+ /// exist due to original range covering min(T)).
+ ///
+ /// If T is unsigned, return the smallest range [0..x] that covers the
+ /// original range.
+ Range getSymmetricalRange(Range Origin, QualType T) {
+ APSIntType RangeType = ValueFactory.getAPSIntType(T);
+
+ if (RangeType.isUnsigned()) {
+ return Range(ValueFactory.getMinValue(RangeType), Origin.To());
+ }
+
+ if (Origin.From().isMinSignedValue()) {
+ // If mini is a minimal signed value, absolute value of it is greater
+ // than the maximal signed value. In order to avoid these
+ // complications, we simply return the whole range.
+ return {ValueFactory.getMinValue(RangeType),
+ ValueFactory.getMaxValue(RangeType)};
+ }
+
+ // At this point, we are sure that the type is signed and we can safely
+ // use unary - operator.
+ //
+ // While calculating absolute maximum, we can use the following formula
+ // because of these reasons:
+ // * If From >= 0 then To >= From and To >= -From.
+ // AbsMax == To == max(To, -From)
+ // * If To <= 0 then -From >= -To and -From >= From.
+ // AbsMax == -From == max(-From, To)
+ // * Otherwise, From <= 0, To >= 0, and
+ // AbsMax == max(abs(From), abs(To))
+ llvm::APSInt AbsMax = std::max(-Origin.From(), Origin.To());
+
+ // Intersection is guaranteed to be non-empty.
+ return {ValueFactory.getValue(-AbsMax), ValueFactory.getValue(AbsMax)};
+ }
+
+ /// Return a range set subtracting zero from \p Domain.
+ RangeSet assumeNonZero(RangeSet Domain, QualType T) {
+ APSIntType IntType = ValueFactory.getAPSIntType(T);
+ return Domain.Intersect(ValueFactory, RangeFactory,
+ ++IntType.getZeroValue(), --IntType.getZeroValue());
+ }
+
+ // FIXME: Once SValBuilder supports unary minus, we should use SValBuilder to
+ // obtain the negated symbolic expression instead of constructing the
+ // symbol manually. This will allow us to support finding ranges of not
+ // only negated SymSymExpr-type expressions, but also of other, simpler
+ // expressions which we currently do not know how to negate.
+ const RangeSet *getRangeForMinusSymbol(ProgramStateRef State, SymbolRef Sym) {
+ if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(Sym)) {
+ if (SSE->getOpcode() == BO_Sub) {
+ QualType T = Sym->getType();
+ SymbolManager &SymMgr = State->getSymbolManager();
+ SymbolRef negSym =
+ SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub, SSE->getLHS(), T);
+
+ if (const RangeSet *negV = State->get<ConstraintRange>(negSym)) {
+ // Unsigned range set cannot be negated, unless it is [0, 0].
+ if (T->isUnsignedIntegerOrEnumerationType() ||
+ T->isSignedIntegerOrEnumerationType())
+ return negV;
+ }
+ }
+ }
+ return nullptr;
+ }
+
+ // Returns ranges only for binary comparison operators (except <=>)
+ // when left and right operands are symbolic values.
+ // Finds any other comparisons with the same operands.
+ // Then do logical calculations and refuse impossible branches.
+ // E.g. (x < y) and (x > y) at the same time are impossible.
+ // E.g. (x >= y) and (x != y) at the same time makes (x > y) true only.
+ // E.g. (x == y) and (y == x) are just reversed but the same.
+ // It covers all possible combinations (see CmpOpTable description).
+ // Note that `x` and `y` can also stand for subexpressions,
+ // not only for actual symbols.
+ RangeSet getRangeForComparisonSymbol(ProgramStateRef State, SymbolRef Sym) {
+ const RangeSet EmptyRangeSet = RangeFactory.getEmptySet();
+
+ auto SSE = dyn_cast<SymSymExpr>(Sym);
+ if (!SSE)
+ return EmptyRangeSet;
+
+ BinaryOperatorKind CurrentOP = SSE->getOpcode();
+
+ // We currently do not support <=> (C++20).
+ if (!BinaryOperator::isComparisonOp(CurrentOP) || (CurrentOP == BO_Cmp))
+ return EmptyRangeSet;
+
+ static const OperatorRelationsTable CmpOpTable{};
+
+ const SymExpr *LHS = SSE->getLHS();
+ const SymExpr *RHS = SSE->getRHS();
+ QualType T = SSE->getType();
+
+ SymbolManager &SymMgr = State->getSymbolManager();
+ const llvm::APSInt &Zero = ValueFactory.getValue(0, T);
+ const llvm::APSInt &One = ValueFactory.getValue(1, T);
+ const RangeSet TrueRangeSet(RangeFactory, One, One);
+ const RangeSet FalseRangeSet(RangeFactory, Zero, Zero);
+
+ int UnknownStates = 0;
+
+ // Loop goes through all of the columns exept the last one ('UnknownX2').
+ // We treat `UnknownX2` column separately at the end of the loop body.
+ for (size_t i = 0; i < CmpOpTable.getCmpOpCount(); ++i) {
+
+ // Let's find an expression e.g. (x < y).
+ BinaryOperatorKind QueriedOP = OperatorRelationsTable::getOpFromIndex(i);
+ const SymSymExpr *SymSym = SymMgr.getSymSymExpr(LHS, QueriedOP, RHS, T);
+ const RangeSet *QueriedRangeSet = State->get<ConstraintRange>(SymSym);
+
+ // If ranges were not previously found,
+ // try to find a reversed expression (y > x).
+ if (!QueriedRangeSet) {
+ const BinaryOperatorKind ROP =
+ BinaryOperator::reverseComparisonOp(QueriedOP);
+ SymSym = SymMgr.getSymSymExpr(RHS, ROP, LHS, T);
+ QueriedRangeSet = State->get<ConstraintRange>(SymSym);
+ }
+
+ if (!QueriedRangeSet || QueriedRangeSet->isEmpty())
+ continue;
+
+ const llvm::APSInt *ConcreteValue = QueriedRangeSet->getConcreteValue();
+ const bool isInFalseBranch =
+ ConcreteValue ? (*ConcreteValue == 0) : false;
+
+ // If it is a false branch, we shall be guided by opposite operator,
+ // because the table is made assuming we are in the true branch.
+ // E.g. when (x <= y) is false, then (x > y) is true.
+ if (isInFalseBranch)
+ QueriedOP = BinaryOperator::negateComparisonOp(QueriedOP);
+
+ OperatorRelationsTable::TriStateKind BranchState =
+ CmpOpTable.getCmpOpState(CurrentOP, QueriedOP);
+
+ if (BranchState == OperatorRelationsTable::Unknown) {
+ if (++UnknownStates == 2)
+ // If we met both Unknown states.
+ // if (x <= y) // assume true
+ // if (x != y) // assume true
+ // if (x < y) // would be also true
+ // Get a state from `UnknownX2` column.
+ BranchState = CmpOpTable.getCmpOpStateForUnknownX2(CurrentOP);
+ else
+ continue;
+ }
+
+ return (BranchState == OperatorRelationsTable::True) ? TrueRangeSet
+ : FalseRangeSet;
+ }
+
+ return EmptyRangeSet;
+ }
+
+ BasicValueFactory &ValueFactory;
+ RangeSet::Factory &RangeFactory;
+ ProgramStateRef State;
+};
+
+template <>
+RangeSet SymbolicRangeInferrer::VisitBinaryOperator<BO_Or>(Range LHS, Range RHS,
+ QualType T) {
+ APSIntType ResultType = ValueFactory.getAPSIntType(T);
+ llvm::APSInt Zero = ResultType.getZeroValue();
+
+ bool IsLHSPositiveOrZero = LHS.From() >= Zero;
+ bool IsRHSPositiveOrZero = RHS.From() >= Zero;
+
+ bool IsLHSNegative = LHS.To() < Zero;
+ bool IsRHSNegative = RHS.To() < Zero;
+
+ // Check if both ranges have the same sign.
+ if ((IsLHSPositiveOrZero && IsRHSPositiveOrZero) ||
+ (IsLHSNegative && IsRHSNegative)) {
+ // The result is definitely greater or equal than any of the operands.
+ const llvm::APSInt &Min = std::max(LHS.From(), RHS.From());
+
+ // We estimate maximal value for positives as the maximal value for the
+ // given type. For negatives, we estimate it with -1 (e.g. 0x11111111).
+ //
+ // TODO: We basically, limit the resulting range from below, but don't do
+ // anything with the upper bound.
+ //
+ // For positive operands, it can be done as follows: for the upper
+ // bound of LHS and RHS we calculate the most significant bit set.
+ // Let's call it the N-th bit. Then we can estimate the maximal
+ // number to be 2^(N+1)-1, i.e. the number with all the bits up to
+ // the N-th bit set.
+ const llvm::APSInt &Max = IsLHSNegative
+ ? ValueFactory.getValue(--Zero)
+ : ValueFactory.getMaxValue(ResultType);
+
+ return {RangeFactory, ValueFactory.getValue(Min), Max};
+ }
+
+ // Otherwise, let's check if at least one of the operands is negative.
+ if (IsLHSNegative || IsRHSNegative) {
+ // This means that the result is definitely negative as well.
+ return {RangeFactory, ValueFactory.getMinValue(ResultType),
+ ValueFactory.getValue(--Zero)};
+ }
+
+ RangeSet DefaultRange = infer(T);
+
+ // It is pretty hard to reason about operands with different signs
+ // (and especially with possibly different signs). We simply check if it
+ // can be zero. In order to conclude that the result could not be zero,
+ // at least one of the operands should be definitely not zero itself.
+ if (!LHS.Includes(Zero) || !RHS.Includes(Zero)) {
+ return assumeNonZero(DefaultRange, T);
+ }
+
+ // Nothing much else to do here.
+ return DefaultRange;
+}
+
+template <>
+RangeSet SymbolicRangeInferrer::VisitBinaryOperator<BO_And>(Range LHS,
+ Range RHS,
+ QualType T) {
+ APSIntType ResultType = ValueFactory.getAPSIntType(T);
+ llvm::APSInt Zero = ResultType.getZeroValue();
+
+ bool IsLHSPositiveOrZero = LHS.From() >= Zero;
+ bool IsRHSPositiveOrZero = RHS.From() >= Zero;
+
+ bool IsLHSNegative = LHS.To() < Zero;
+ bool IsRHSNegative = RHS.To() < Zero;
+
+ // Check if both ranges have the same sign.
+ if ((IsLHSPositiveOrZero && IsRHSPositiveOrZero) ||
+ (IsLHSNegative && IsRHSNegative)) {
+ // The result is definitely less or equal than any of the operands.
+ const llvm::APSInt &Max = std::min(LHS.To(), RHS.To());
+
+ // We conservatively estimate lower bound to be the smallest positive
+ // or negative value corresponding to the sign of the operands.
+ const llvm::APSInt &Min = IsLHSNegative
+ ? ValueFactory.getMinValue(ResultType)
+ : ValueFactory.getValue(Zero);
+
+ return {RangeFactory, Min, Max};
+ }
+
+ // Otherwise, let's check if at least one of the operands is positive.
+ if (IsLHSPositiveOrZero || IsRHSPositiveOrZero) {
+ // This makes result definitely positive.
+ //
+ // We can also reason about a maximal value by finding the maximal
+ // value of the positive operand.
+ const llvm::APSInt &Max = IsLHSPositiveOrZero ? LHS.To() : RHS.To();
+
+ // The minimal value on the other hand is much harder to reason about.
+ // The only thing we know for sure is that the result is positive.
+ return {RangeFactory, ValueFactory.getValue(Zero),
+ ValueFactory.getValue(Max)};
+ }
+
+ // Nothing much else to do here.
+ return infer(T);
+}
+
+template <>
+RangeSet SymbolicRangeInferrer::VisitBinaryOperator<BO_Rem>(Range LHS,
+ Range RHS,
+ QualType T) {
+ llvm::APSInt Zero = ValueFactory.getAPSIntType(T).getZeroValue();
+
+ Range ConservativeRange = getSymmetricalRange(RHS, T);
+
+ llvm::APSInt Max = ConservativeRange.To();
+ llvm::APSInt Min = ConservativeRange.From();
+
+ if (Max == Zero) {
+ // It's an undefined behaviour to divide by 0 and it seems like we know
+ // for sure that RHS is 0. Let's say that the resulting range is
+ // simply infeasible for that matter.
+ return RangeFactory.getEmptySet();
+ }
+
+ // At this point, our conservative range is closed. The result, however,
+ // couldn't be greater than the RHS' maximal absolute value. Because of
+ // this reason, we turn the range into open (or half-open in case of
+ // unsigned integers).
+ //
+ // While we operate on integer values, an open interval (a, b) can be easily
+ // represented by the closed interval [a + 1, b - 1]. And this is exactly
+ // what we do next.
+ //
+ // If we are dealing with unsigned case, we shouldn't move the lower bound.
+ if (Min.isSigned()) {
+ ++Min;
+ }
+ --Max;
+
+ bool IsLHSPositiveOrZero = LHS.From() >= Zero;
+ bool IsRHSPositiveOrZero = RHS.From() >= Zero;
+
+ // Remainder operator results with negative operands is implementation
+ // defined. Positive cases are much easier to reason about though.
+ if (IsLHSPositiveOrZero && IsRHSPositiveOrZero) {
+ // If maximal value of LHS is less than maximal value of RHS,
+ // the result won't get greater than LHS.To().
+ Max = std::min(LHS.To(), Max);
+ // We want to check if it is a situation similar to the following:
+ //
+ // <------------|---[ LHS ]--------[ RHS ]----->
+ // -INF 0 +INF
+ //
+ // In this situation, we can conclude that (LHS / RHS) == 0 and
+ // (LHS % RHS) == LHS.
+ Min = LHS.To() < RHS.From() ? LHS.From() : Zero;
+ }
+
+ // Nevertheless, the symmetrical range for RHS is a conservative estimate
+ // for any sign of either LHS, or RHS.
+ return {RangeFactory, ValueFactory.getValue(Min), ValueFactory.getValue(Max)};
+}
+
class RangeConstraintManager : public RangedConstraintManager {
public:
- RangeConstraintManager(SubEngine *SE, SValBuilder &SVB)
- : RangedConstraintManager(SE, SVB) {}
+ RangeConstraintManager(ExprEngine *EE, SValBuilder &SVB)
+ : RangedConstraintManager(EE, SVB) {}
//===------------------------------------------------------------------===//
// Implementation for interface from ConstraintManager.
@@ -305,8 +971,6 @@ private:
RangeSet::Factory F;
RangeSet getRange(ProgramStateRef State, SymbolRef Sym);
- const RangeSet* getRangeForMinusSymbol(ProgramStateRef State,
- SymbolRef Sym);
RangeSet getSymLTRange(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
@@ -323,13 +987,13 @@ private:
RangeSet getSymGERange(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment);
-
};
} // end anonymous namespace
std::unique_ptr<ConstraintManager>
-ento::CreateRangeConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) {
+ento::CreateRangeConstraintManager(ProgramStateManager &StMgr,
+ ExprEngine *Eng) {
return std::make_unique<RangeConstraintManager>(Eng, StMgr.getSValBuilder());
}
@@ -429,113 +1093,9 @@ RangeConstraintManager::removeDeadBindings(ProgramStateRef State,
return Changed ? State->set<ConstraintRange>(CR) : State;
}
-/// Return a range set subtracting zero from \p Domain.
-static RangeSet assumeNonZero(
- BasicValueFactory &BV,
- RangeSet::Factory &F,
- SymbolRef Sym,
- RangeSet Domain) {
- APSIntType IntType = BV.getAPSIntType(Sym->getType());
- return Domain.Intersect(BV, F, ++IntType.getZeroValue(),
- --IntType.getZeroValue());
-}
-
-/// Apply implicit constraints for bitwise OR- and AND-.
-/// For unsigned types, bitwise OR with a constant always returns
-/// a value greater-or-equal than the constant, and bitwise AND
-/// returns a value less-or-equal then the constant.
-///
-/// Pattern matches the expression \p Sym against those rule,
-/// and applies the required constraints.
-/// \p Input Previously established expression range set
-static RangeSet applyBitwiseConstraints(
- BasicValueFactory &BV,
- RangeSet::Factory &F,
- RangeSet Input,
- const SymIntExpr* SIE) {
- QualType T = SIE->getType();
- bool IsUnsigned = T->isUnsignedIntegerType();
- const llvm::APSInt &RHS = SIE->getRHS();
- const llvm::APSInt &Zero = BV.getAPSIntType(T).getZeroValue();
- BinaryOperator::Opcode Operator = SIE->getOpcode();
-
- // For unsigned types, the output of bitwise-or is bigger-or-equal than RHS.
- if (Operator == BO_Or && IsUnsigned)
- return Input.Intersect(BV, F, RHS, BV.getMaxValue(T));
-
- // Bitwise-or with a non-zero constant is always non-zero.
- if (Operator == BO_Or && RHS != Zero)
- return assumeNonZero(BV, F, SIE, Input);
-
- // For unsigned types, or positive RHS,
- // bitwise-and output is always smaller-or-equal than RHS (assuming two's
- // complement representation of signed types).
- if (Operator == BO_And && (IsUnsigned || RHS >= Zero))
- return Input.Intersect(BV, F, BV.getMinValue(T), RHS);
-
- return Input;
-}
-
RangeSet RangeConstraintManager::getRange(ProgramStateRef State,
SymbolRef Sym) {
- ConstraintRangeTy::data_type *V = State->get<ConstraintRange>(Sym);
-
- // If Sym is a difference of symbols A - B, then maybe we have range set
- // stored for B - A.
- BasicValueFactory &BV = getBasicVals();
- const RangeSet *R = getRangeForMinusSymbol(State, Sym);
-
- // If we have range set stored for both A - B and B - A then calculate the
- // effective range set by intersecting the range set for A - B and the
- // negated range set of B - A.
- if (V && R)
- return V->Intersect(BV, F, R->Negate(BV, F));
- if (V)
- return *V;
- if (R)
- return R->Negate(BV, F);
-
- // Lazily generate a new RangeSet representing all possible values for the
- // given symbol type.
- QualType T = Sym->getType();
-
- RangeSet Result(F, BV.getMinValue(T), BV.getMaxValue(T));
-
- // References are known to be non-zero.
- if (T->isReferenceType())
- return assumeNonZero(BV, F, Sym, Result);
-
- // Known constraints on ranges of bitwise expressions.
- if (const SymIntExpr* SIE = dyn_cast<SymIntExpr>(Sym))
- return applyBitwiseConstraints(BV, F, Result, SIE);
-
- return Result;
-}
-
-// FIXME: Once SValBuilder supports unary minus, we should use SValBuilder to
-// obtain the negated symbolic expression instead of constructing the
-// symbol manually. This will allow us to support finding ranges of not
-// only negated SymSymExpr-type expressions, but also of other, simpler
-// expressions which we currently do not know how to negate.
-const RangeSet*
-RangeConstraintManager::getRangeForMinusSymbol(ProgramStateRef State,
- SymbolRef Sym) {
- if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(Sym)) {
- if (SSE->getOpcode() == BO_Sub) {
- QualType T = Sym->getType();
- SymbolManager &SymMgr = State->getSymbolManager();
- SymbolRef negSym = SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub,
- SSE->getLHS(), T);
- if (const RangeSet *negV = State->get<ConstraintRange>(negSym)) {
- // Unsigned range set cannot be negated, unless it is [0, 0].
- if ((negV->getConcreteValue() &&
- (*negV->getConcreteValue() == 0)) ||
- T->isSignedIntegerOrEnumerationType())
- return negV;
- }
- }
- }
- return nullptr;
+ return SymbolicRangeInferrer::inferRange(getBasicVals(), F, State, Sym);
}
//===------------------------------------------------------------------------===
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index 4797f564a837..57fde32bc01d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -23,10 +23,11 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/Support/raw_ostream.h"
@@ -381,7 +382,7 @@ public:
: StoreManager(mgr), Features(f),
RBFactory(mgr.getAllocator()), CBFactory(mgr.getAllocator()),
SmallStructLimit(0) {
- SubEngine &Eng = StateMgr.getOwningEngine();
+ ExprEngine &Eng = StateMgr.getOwningEngine();
AnalyzerOptions &Options = Eng.getAnalysisManager().options;
SmallStructLimit = Options.RegionStoreSmallStructLimit;
}
@@ -622,15 +623,6 @@ public: // Part of public interface to class.
SymbolReaper& SymReaper) override;
//===------------------------------------------------------------------===//
- // Region "extents".
- //===------------------------------------------------------------------===//
-
- // FIXME: This method will soon be eliminated; see the note in Store.h.
- DefinedOrUnknownSVal getSizeInElements(ProgramStateRef state,
- const MemRegion* R,
- QualType EleTy) override;
-
- //===------------------------------------------------------------------===//
// Utility methods.
//===------------------------------------------------------------------===//
@@ -876,7 +868,7 @@ collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
// Find the length (in bits) of the region being invalidated.
uint64_t Length = UINT64_MAX;
- SVal Extent = Top->getExtent(SVB);
+ SVal Extent = Top->getMemRegionManager().getStaticSize(Top, SVB);
if (Optional<nonloc::ConcreteInt> ExtentCI =
Extent.getAs<nonloc::ConcreteInt>()) {
const llvm::APSInt &ExtentInt = ExtentCI->getValue();
@@ -1387,37 +1379,6 @@ RegionStoreManager::invalidateRegions(Store store,
}
//===----------------------------------------------------------------------===//
-// Extents for regions.
-//===----------------------------------------------------------------------===//
-
-DefinedOrUnknownSVal
-RegionStoreManager::getSizeInElements(ProgramStateRef state,
- const MemRegion *R,
- QualType EleTy) {
- SVal Size = cast<SubRegion>(R)->getExtent(svalBuilder);
- const llvm::APSInt *SizeInt = svalBuilder.getKnownValue(state, Size);
- if (!SizeInt)
- return UnknownVal();
-
- CharUnits RegionSize = CharUnits::fromQuantity(SizeInt->getSExtValue());
-
- if (Ctx.getAsVariableArrayType(EleTy)) {
- // FIXME: We need to track extra state to properly record the size
- // of VLAs. Returning UnknownVal here, however, is a stop-gap so that
- // we don't have a divide-by-zero below.
- return UnknownVal();
- }
-
- CharUnits EleSize = Ctx.getTypeSizeInChars(EleTy);
-
- // If a variable is reinterpreted as a type that doesn't fit into a larger
- // type evenly, round it down.
- // This is a signed value, since it's used in arithmetic with signed indices.
- return svalBuilder.makeIntVal(RegionSize / EleSize,
- svalBuilder.getArrayIndexType());
-}
-
-//===----------------------------------------------------------------------===//
// Location and region casting.
//===----------------------------------------------------------------------===//
@@ -1667,10 +1628,6 @@ RegionStoreManager::findLazyBinding(RegionBindingsConstRef B,
SVal RegionStoreManager::getBindingForElement(RegionBindingsConstRef B,
const ElementRegion* R) {
- // We do not currently model bindings of the CompoundLiteralregion.
- if (isa<CompoundLiteralRegion>(R->getBaseRegion()))
- return UnknownVal();
-
// Check if the region has a binding.
if (const Optional<SVal> &V = B.getDirectBinding(R))
return *V;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp
index 6ad12ca0a688..7395622a659c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp
@@ -13,6 +13,6 @@ using namespace clang;
using namespace ento;
std::unique_ptr<ConstraintManager>
-ento::CreateZ3ConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) {
+ento::CreateZ3ConstraintManager(ProgramStateManager &StMgr, ExprEngine *Eng) {
return std::make_unique<SMTConstraintManager>(Eng, StMgr.getSValBuilder());
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
index 3a5841137e1a..c00a2c8ba8a2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -24,12 +24,12 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/APSInt.h"
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
index 12332aaf936f..8c2e85601576 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/Version.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
@@ -49,8 +50,14 @@ public:
void ento::createSarifDiagnosticConsumer(
AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
const std::string &Output, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &) {
+ const cross_tu::CrossTranslationUnitContext &CTU) {
+
+ // TODO: Emit an error here.
+ if (Output.empty())
+ return;
+
C.push_back(new SarifDiagnostics(AnalyzerOpts, Output, PP.getLangOpts()));
+ createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, Output, PP, CTU);
}
static StringRef getFileName(const FileEntry &FE) {
@@ -106,7 +113,7 @@ static std::string fileNameToURI(StringRef Filename) {
}
});
- return Ret.str().str();
+ return std::string(Ret);
}
static json::Object createArtifactLocation(const FileEntry &FE) {
@@ -322,7 +329,7 @@ static json::Object createRule(const PathDiagnostic &Diag) {
{"name", CheckName},
{"id", CheckName}};
- std::string RuleURI = getRuleHelpURIStr(CheckName);
+ std::string RuleURI = std::string(getRuleHelpURIStr(CheckName));
if (!RuleURI.empty())
Ret["helpUri"] = RuleURI;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
index 85f60231a276..3709106ad44c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
@@ -44,8 +44,8 @@ ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef State,
ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef State,
NonLoc Cond, bool Assumption) {
State = assumeAux(State, Cond, Assumption);
- if (NotifyAssumeClients && SU)
- return SU->processAssume(State, Cond, Assumption);
+ if (NotifyAssumeClients && EE)
+ return EE->processAssume(State, Cond, Assumption);
return State;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index 84c52f53ca5e..2e269f6a596e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -13,8 +13,8 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValVisitor.h"
using namespace clang;
@@ -652,6 +652,11 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
if (LHSValue == 0)
return evalCastFromNonLoc(lhs, resultTy);
return makeSymExprValNN(op, InputLHS, InputRHS, resultTy);
+ case BO_Rem:
+ // 0 % x == 0
+ if (LHSValue == 0)
+ return makeZeroVal(resultTy);
+ LLVM_FALLTHROUGH;
default:
return makeSymExprValNN(op, InputLHS, InputRHS, resultTy);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
index b33129c88cea..ea617bbeeba1 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
@@ -134,7 +134,8 @@ const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy)
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
case MemRegion::ObjCStringRegionKind:
- case MemRegion::VarRegionKind:
+ case MemRegion::NonParamVarRegionKind:
+ case MemRegion::ParamVarRegionKind:
case MemRegion::CXXTempObjectRegionKind:
case MemRegion::CXXBaseObjectRegionKind:
case MemRegion::CXXDerivedObjectRegionKind:
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SubEngine.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SubEngine.cpp
deleted file mode 100644
index d7ddd9cf4610..000000000000
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SubEngine.cpp
+++ /dev/null
@@ -1,13 +0,0 @@
-//== SubEngine.cpp - Interface of the subengine of CoreEngine ------*- C++ -*-//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
-
-using namespace clang::ento;
-
-void SubEngine::anchor() { }
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
index 675209f6fd7e..6ca7aec9caec 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -34,45 +34,27 @@ using namespace ento;
void SymExpr::anchor() {}
-LLVM_DUMP_METHOD void SymExpr::dump() const {
- dumpToStream(llvm::errs());
-}
+LLVM_DUMP_METHOD void SymExpr::dump() const { dumpToStream(llvm::errs()); }
-void SymIntExpr::dumpToStream(raw_ostream &os) const {
- os << '(';
- getLHS()->dumpToStream(os);
- os << ") "
- << BinaryOperator::getOpcodeStr(getOpcode()) << ' ';
- if (getRHS().isUnsigned())
- os << getRHS().getZExtValue();
- else
- os << getRHS().getSExtValue();
- if (getRHS().isUnsigned())
- os << 'U';
+void BinarySymExpr::dumpToStreamImpl(raw_ostream &OS, const SymExpr *Sym) {
+ OS << '(';
+ Sym->dumpToStream(OS);
+ OS << ')';
}
-void IntSymExpr::dumpToStream(raw_ostream &os) const {
- if (getLHS().isUnsigned())
- os << getLHS().getZExtValue();
+void BinarySymExpr::dumpToStreamImpl(raw_ostream &OS,
+ const llvm::APSInt &Value) {
+ if (Value.isUnsigned())
+ OS << Value.getZExtValue();
else
- os << getLHS().getSExtValue();
- if (getLHS().isUnsigned())
- os << 'U';
- os << ' '
- << BinaryOperator::getOpcodeStr(getOpcode())
- << " (";
- getRHS()->dumpToStream(os);
- os << ')';
+ OS << Value.getSExtValue();
+ if (Value.isUnsigned())
+ OS << 'U';
}
-void SymSymExpr::dumpToStream(raw_ostream &os) const {
- os << '(';
- getLHS()->dumpToStream(os);
- os << ") "
- << BinaryOperator::getOpcodeStr(getOpcode())
- << " (";
- getRHS()->dumpToStream(os);
- os << ')';
+void BinarySymExpr::dumpToStreamImpl(raw_ostream &OS,
+ BinaryOperator::Opcode Op) {
+ OS << ' ' << BinaryOperator::getOpcodeStr(Op) << ' ';
}
void SymbolCast::dumpToStream(raw_ostream &os) const {
@@ -329,7 +311,7 @@ QualType SymbolDerived::getType() const {
}
QualType SymbolExtent::getType() const {
- ASTContext &Ctx = R->getMemRegionManager()->getContext();
+ ASTContext &Ctx = R->getMemRegionManager().getContext();
return Ctx.getSizeType();
}
@@ -341,10 +323,6 @@ QualType SymbolRegionValue::getType() const {
return R->getValueType();
}
-SymbolManager::~SymbolManager() {
- llvm::DeleteContainerSeconds(SymbolDependencies);
-}
-
bool SymbolManager::canSymbolicate(QualType T) {
T = T.getCanonicalType();
@@ -362,13 +340,9 @@ bool SymbolManager::canSymbolicate(QualType T) {
void SymbolManager::addSymbolDependency(const SymbolRef Primary,
const SymbolRef Dependent) {
- SymbolDependTy::iterator I = SymbolDependencies.find(Primary);
- SymbolRefSmallVectorTy *dependencies = nullptr;
- if (I == SymbolDependencies.end()) {
- dependencies = new SymbolRefSmallVectorTy();
- SymbolDependencies[Primary] = dependencies;
- } else {
- dependencies = I->second;
+ auto &dependencies = SymbolDependencies[Primary];
+ if (!dependencies) {
+ dependencies = std::make_unique<SymbolRefSmallVectorTy>();
}
dependencies->push_back(Dependent);
}
@@ -378,7 +352,7 @@ const SymbolRefSmallVectorTy *SymbolManager::getDependentSymbols(
SymbolDependTy::const_iterator I = SymbolDependencies.find(Primary);
if (I == SymbolDependencies.end())
return nullptr;
- return I->second;
+ return I->second.get();
}
void SymbolReaper::markDependentsLive(SymbolRef sym) {
@@ -542,6 +516,11 @@ bool SymbolReaper::isLive(const VarRegion *VR, bool includeStoreBindings) const{
if (!Loc)
return true;
+ // Anonymous parameters of an inheriting constructor are live for the entire
+ // duration of the constructor.
+ if (isa<CXXInheritedCtorInitExpr>(Loc))
+ return true;
+
if (LCtx->getAnalysis<RelaxedLiveVariables>()->isLive(Loc, VR->getDecl()))
return true;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
new file mode 100644
index 000000000000..f4c7e5978e19
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
@@ -0,0 +1,156 @@
+//===--- TextDiagnostics.cpp - Text Diagnostics for Paths -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TextDiagnostics object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Version.h"
+#include "clang/CrossTU/CrossTranslationUnit.h"
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
+#include "clang/Tooling/Core/Replacement.h"
+#include "clang/Tooling/Tooling.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Casting.h"
+
+using namespace clang;
+using namespace ento;
+using namespace tooling;
+
+namespace {
+/// Emitsd minimal diagnostics (report message + notes) for the 'none' output
+/// type to the standard error, or to to compliment many others. Emits detailed
+/// diagnostics in textual format for the 'text' output type.
+class TextDiagnostics : public PathDiagnosticConsumer {
+ DiagnosticsEngine &DiagEng;
+ const LangOptions &LO;
+ const bool IncludePath = false;
+ const bool ShouldEmitAsError = false;
+ const bool ApplyFixIts = false;
+ const bool ShouldDisplayCheckerName = false;
+
+public:
+ TextDiagnostics(DiagnosticsEngine &DiagEng, const LangOptions &LO,
+ bool ShouldIncludePath, const AnalyzerOptions &AnOpts)
+ : DiagEng(DiagEng), LO(LO), IncludePath(ShouldIncludePath),
+ ShouldEmitAsError(AnOpts.AnalyzerWerror),
+ ApplyFixIts(AnOpts.ShouldApplyFixIts),
+ ShouldDisplayCheckerName(AnOpts.ShouldDisplayCheckerNameForText) {}
+ ~TextDiagnostics() override {}
+
+ StringRef getName() const override { return "TextDiagnostics"; }
+
+ bool supportsLogicalOpControlFlow() const override { return true; }
+ bool supportsCrossFileDiagnostics() const override { return true; }
+
+ PathGenerationScheme getGenerationScheme() const override {
+ return IncludePath ? Minimal : None;
+ }
+
+ void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+ FilesMade *filesMade) override {
+ unsigned WarnID =
+ ShouldEmitAsError
+ ? DiagEng.getCustomDiagID(DiagnosticsEngine::Error, "%0")
+ : DiagEng.getCustomDiagID(DiagnosticsEngine::Warning, "%0");
+ unsigned NoteID = DiagEng.getCustomDiagID(DiagnosticsEngine::Note, "%0");
+ SourceManager &SM = DiagEng.getSourceManager();
+
+ Replacements Repls;
+ auto reportPiece = [&](unsigned ID, FullSourceLoc Loc, StringRef String,
+ ArrayRef<SourceRange> Ranges,
+ ArrayRef<FixItHint> Fixits) {
+ if (!ApplyFixIts) {
+ DiagEng.Report(Loc, ID) << String << Ranges << Fixits;
+ return;
+ }
+
+ DiagEng.Report(Loc, ID) << String << Ranges;
+ for (const FixItHint &Hint : Fixits) {
+ Replacement Repl(SM, Hint.RemoveRange, Hint.CodeToInsert);
+
+ if (llvm::Error Err = Repls.add(Repl)) {
+ llvm::errs() << "Error applying replacement " << Repl.toString()
+ << ": " << Err << "\n";
+ }
+ }
+ };
+
+ for (std::vector<const PathDiagnostic *>::iterator I = Diags.begin(),
+ E = Diags.end();
+ I != E; ++I) {
+ const PathDiagnostic *PD = *I;
+ std::string WarningMsg =
+ (ShouldDisplayCheckerName ? " [" + PD->getCheckerName() + "]" : "")
+ .str();
+
+ reportPiece(WarnID, PD->getLocation().asLocation(),
+ (PD->getShortDescription() + WarningMsg).str(),
+ PD->path.back()->getRanges(), PD->path.back()->getFixits());
+
+ // First, add extra notes, even if paths should not be included.
+ for (const auto &Piece : PD->path) {
+ if (!isa<PathDiagnosticNotePiece>(Piece.get()))
+ continue;
+
+ reportPiece(NoteID, Piece->getLocation().asLocation(),
+ Piece->getString(), Piece->getRanges(),
+ Piece->getFixits());
+ }
+
+ if (!IncludePath)
+ continue;
+
+ // Then, add the path notes if necessary.
+ PathPieces FlatPath = PD->path.flatten(/*ShouldFlattenMacros=*/true);
+ for (const auto &Piece : FlatPath) {
+ if (isa<PathDiagnosticNotePiece>(Piece.get()))
+ continue;
+
+ reportPiece(NoteID, Piece->getLocation().asLocation(),
+ Piece->getString(), Piece->getRanges(),
+ Piece->getFixits());
+ }
+ }
+
+ if (!ApplyFixIts || Repls.empty())
+ return;
+
+ Rewriter Rewrite(SM, LO);
+ if (!applyAllReplacements(Repls, Rewrite)) {
+ llvm::errs() << "An error occured during applying fix-it.\n";
+ }
+
+ Rewrite.overwriteChangedFiles();
+ }
+};
+} // end anonymous namespace
+
+void ento::createTextPathDiagnosticConsumer(
+ AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
+ const std::string &Prefix, const clang::Preprocessor &PP,
+ const cross_tu::CrossTranslationUnitContext &CTU) {
+ C.emplace_back(new TextDiagnostics(PP.getDiagnostics(), PP.getLangOpts(),
+ /*ShouldIncludePath*/ true, AnalyzerOpts));
+}
+
+void ento::createTextMinimalPathDiagnosticConsumer(
+ AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
+ const std::string &Prefix, const clang::Preprocessor &PP,
+ const cross_tu::CrossTranslationUnitContext &CTU) {
+ C.emplace_back(new TextDiagnostics(PP.getDiagnostics(), PP.getLangOpts(),
+ /*ShouldIncludePath*/ false,
+ AnalyzerOpts));
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index fea8100c3b3b..392049e21c6e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -12,7 +12,6 @@
#include "clang/StaticAnalyzer/Frontend/AnalysisConsumer.h"
#include "ModelInjector.h"
-#include "clang/Analysis/PathDiagnostic.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
@@ -21,10 +20,12 @@
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CallGraph.h"
#include "clang/Analysis/CodeInjector.h"
+#include "clang/Analysis/PathDiagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/CrossTU/CrossTranslationUnit.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
@@ -32,7 +33,6 @@
#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/FileSystem.h"
@@ -61,114 +61,6 @@ STATISTIC(PercentReachableBlocks, "The % of reachable basic blocks.");
STATISTIC(MaxCFGSize, "The maximum number of basic blocks in a function.");
//===----------------------------------------------------------------------===//
-// Special PathDiagnosticConsumers.
-//===----------------------------------------------------------------------===//
-
-void ento::createPlistHTMLDiagnosticConsumer(
- AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
- const std::string &prefix, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU) {
- createHTMLDiagnosticConsumer(AnalyzerOpts, C,
- llvm::sys::path::parent_path(prefix), PP, CTU);
- createPlistMultiFileDiagnosticConsumer(AnalyzerOpts, C, prefix, PP, CTU);
-}
-
-void ento::createTextPathDiagnosticConsumer(
- AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
- const std::string &Prefix, const clang::Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU) {
- llvm_unreachable("'text' consumer should be enabled on ClangDiags");
-}
-
-namespace {
-class ClangDiagPathDiagConsumer : public PathDiagnosticConsumer {
- DiagnosticsEngine &Diag;
- bool IncludePath = false, ShouldEmitAsError = false, FixitsAsRemarks = false;
-
-public:
- ClangDiagPathDiagConsumer(DiagnosticsEngine &Diag)
- : Diag(Diag) {}
- ~ClangDiagPathDiagConsumer() override {}
- StringRef getName() const override { return "ClangDiags"; }
-
- bool supportsLogicalOpControlFlow() const override { return true; }
- bool supportsCrossFileDiagnostics() const override { return true; }
-
- PathGenerationScheme getGenerationScheme() const override {
- return IncludePath ? Minimal : None;
- }
-
- void enablePaths() { IncludePath = true; }
- void enableWerror() { ShouldEmitAsError = true; }
- void enableFixitsAsRemarks() { FixitsAsRemarks = true; }
-
- void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
- FilesMade *filesMade) override {
- unsigned WarnID =
- ShouldEmitAsError
- ? Diag.getCustomDiagID(DiagnosticsEngine::Error, "%0")
- : Diag.getCustomDiagID(DiagnosticsEngine::Warning, "%0");
- unsigned NoteID = Diag.getCustomDiagID(DiagnosticsEngine::Note, "%0");
- unsigned RemarkID = Diag.getCustomDiagID(DiagnosticsEngine::Remark, "%0");
-
- auto reportPiece =
- [&](unsigned ID, SourceLocation Loc, StringRef String,
- ArrayRef<SourceRange> Ranges, ArrayRef<FixItHint> Fixits) {
- if (!FixitsAsRemarks) {
- Diag.Report(Loc, ID) << String << Ranges << Fixits;
- } else {
- Diag.Report(Loc, ID) << String << Ranges;
- for (const FixItHint &Hint : Fixits) {
- SourceManager &SM = Diag.getSourceManager();
- llvm::SmallString<128> Str;
- llvm::raw_svector_ostream OS(Str);
- // FIXME: Add support for InsertFromRange and
- // BeforePreviousInsertion.
- assert(!Hint.InsertFromRange.isValid() && "Not implemented yet!");
- assert(!Hint.BeforePreviousInsertions && "Not implemented yet!");
- OS << SM.getSpellingColumnNumber(Hint.RemoveRange.getBegin())
- << "-" << SM.getSpellingColumnNumber(Hint.RemoveRange.getEnd())
- << ": '" << Hint.CodeToInsert << "'";
- Diag.Report(Loc, RemarkID) << OS.str();
- }
- }
- };
-
- for (std::vector<const PathDiagnostic *>::iterator I = Diags.begin(),
- E = Diags.end();
- I != E; ++I) {
- const PathDiagnostic *PD = *I;
- reportPiece(WarnID, PD->getLocation().asLocation(),
- PD->getShortDescription(), PD->path.back()->getRanges(),
- PD->path.back()->getFixits());
-
- // First, add extra notes, even if paths should not be included.
- for (const auto &Piece : PD->path) {
- if (!isa<PathDiagnosticNotePiece>(Piece.get()))
- continue;
-
- reportPiece(NoteID, Piece->getLocation().asLocation(),
- Piece->getString(), Piece->getRanges(), Piece->getFixits());
- }
-
- if (!IncludePath)
- continue;
-
- // Then, add the path notes if necessary.
- PathPieces FlatPath = PD->path.flatten(/*ShouldFlattenMacros=*/true);
- for (const auto &Piece : FlatPath) {
- if (isa<PathDiagnosticNotePiece>(Piece.get()))
- continue;
-
- reportPiece(NoteID, Piece->getLocation().asLocation(),
- Piece->getString(), Piece->getRanges(), Piece->getFixits());
- }
- }
- }
-};
-} // end anonymous namespace
-
-//===----------------------------------------------------------------------===//
// AnalysisConsumer declaration.
//===----------------------------------------------------------------------===//
@@ -192,7 +84,7 @@ class AnalysisConsumer : public AnalysisASTConsumer,
public:
ASTContext *Ctx;
- const Preprocessor &PP;
+ Preprocessor &PP;
const std::string OutDir;
AnalyzerOptionsRef Opts;
ArrayRef<std::string> Plugins;
@@ -253,31 +145,16 @@ public:
}
void DigestAnalyzerOptions() {
- if (Opts->AnalysisDiagOpt != PD_NONE) {
- // Create the PathDiagnosticConsumer.
- ClangDiagPathDiagConsumer *clangDiags =
- new ClangDiagPathDiagConsumer(PP.getDiagnostics());
- PathConsumers.push_back(clangDiags);
-
- if (Opts->AnalyzerWerror)
- clangDiags->enableWerror();
-
- if (Opts->ShouldEmitFixItHintsAsRemarks)
- clangDiags->enableFixitsAsRemarks();
-
- if (Opts->AnalysisDiagOpt == PD_TEXT) {
- clangDiags->enablePaths();
-
- } else if (!OutDir.empty()) {
- switch (Opts->AnalysisDiagOpt) {
- default:
+ switch (Opts->AnalysisDiagOpt) {
+ case PD_NONE:
+ break;
#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN) \
case PD_##NAME: \
CREATEFN(*Opts.get(), PathConsumers, OutDir, PP, CTU); \
break;
#include "clang/StaticAnalyzer/Core/Analyses.def"
- }
- }
+ default:
+ llvm_unreachable("Unknown analyzer output type!");
}
// Create the analyzer component creators.
@@ -313,30 +190,29 @@ public:
else if (Mode == AM_Path) {
llvm::errs() << " (Path, ";
switch (IMode) {
- case ExprEngine::Inline_Minimal:
- llvm::errs() << " Inline_Minimal";
- break;
- case ExprEngine::Inline_Regular:
- llvm::errs() << " Inline_Regular";
- break;
+ case ExprEngine::Inline_Minimal:
+ llvm::errs() << " Inline_Minimal";
+ break;
+ case ExprEngine::Inline_Regular:
+ llvm::errs() << " Inline_Regular";
+ break;
}
llvm::errs() << ")";
- }
- else
+ } else
assert(Mode == (AM_Syntax | AM_Path) && "Unexpected mode!");
- llvm::errs() << ": " << Loc.getFilename() << ' '
- << getFunctionName(D) << '\n';
+ llvm::errs() << ": " << Loc.getFilename() << ' ' << getFunctionName(D)
+ << '\n';
}
}
void Initialize(ASTContext &Context) override {
Ctx = &Context;
- checkerMgr = createCheckerManager(
- *Ctx, *Opts, Plugins, CheckerRegistrationFns, PP.getDiagnostics());
+ checkerMgr = std::make_unique<CheckerManager>(*Ctx, *Opts, PP, Plugins,
+ CheckerRegistrationFns);
- Mgr = std::make_unique<AnalysisManager>(*Ctx, PathConsumers, CreateStoreMgr,
- CreateConstraintMgr,
+ Mgr = std::make_unique<AnalysisManager>(*Ctx, PP, PathConsumers,
+ CreateStoreMgr, CreateConstraintMgr,
checkerMgr.get(), *Opts, Injector);
}
@@ -469,7 +345,7 @@ private:
/// Print \p S to stderr if \c Opts->AnalyzerDisplayProgress is set.
void reportAnalyzerProgress(StringRef S);
-};
+}; // namespace
} // end anonymous namespace
@@ -503,6 +379,13 @@ static bool shouldSkipFunction(const Decl *D,
if (VisitedAsTopLevel.count(D))
return true;
+ // Skip analysis of inheriting constructors as top-level functions. These
+ // constructors don't even have a body written down in the code, so even if
+ // we find a bug, we won't be able to display it.
+ if (const auto *CD = dyn_cast<CXXConstructorDecl>(D))
+ if (CD->isInheritingConstructor())
+ return true;
+
// We want to re-analyse the functions as top level in the following cases:
// - The 'init' methods should be reanalyzed because
// ObjCNonNilReturnValueChecker assumes that '[super init]' never returns
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalyzerHelpFlags.cpp
index f4f06e32cd1d..eb6014a0629d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalyzerHelpFlags.cpp
@@ -10,8 +10,9 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
+#include "clang/StaticAnalyzer/Frontend/AnalyzerHelpFlags.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -24,53 +25,36 @@
using namespace clang;
using namespace ento;
-std::unique_ptr<CheckerManager> ento::createCheckerManager(
- ASTContext &context,
- AnalyzerOptions &opts,
- ArrayRef<std::string> plugins,
- ArrayRef<std::function<void(CheckerRegistry &)>> checkerRegistrationFns,
- DiagnosticsEngine &diags) {
- auto checkerMgr = std::make_unique<CheckerManager>(context, opts);
-
- CheckerRegistry allCheckers(plugins, diags, opts, context.getLangOpts(),
- checkerRegistrationFns);
-
- allCheckers.initializeManager(*checkerMgr);
- allCheckers.validateCheckerOptions();
- checkerMgr->finishedCheckerRegistration();
-
- return checkerMgr;
-}
-
-void ento::printCheckerHelp(raw_ostream &out, ArrayRef<std::string> plugins,
- AnalyzerOptions &anopts,
- DiagnosticsEngine &diags,
- const LangOptions &langOpts) {
+void ento::printCheckerHelp(raw_ostream &out, CompilerInstance &CI) {
out << "OVERVIEW: Clang Static Analyzer Checkers List\n\n";
out << "USAGE: -analyzer-checker <CHECKER or PACKAGE,...>\n\n";
- CheckerRegistry(plugins, diags, anopts, langOpts)
- .printCheckerWithDescList(out);
+ auto CheckerMgr = std::make_unique<CheckerManager>(
+ *CI.getAnalyzerOpts(), CI.getLangOpts(), CI.getDiagnostics(),
+ CI.getFrontendOpts().Plugins);
+
+ CheckerMgr->getCheckerRegistryData().printCheckerWithDescList(
+ *CI.getAnalyzerOpts(), out);
}
-void ento::printEnabledCheckerList(raw_ostream &out,
- ArrayRef<std::string> plugins,
- AnalyzerOptions &anopts,
- DiagnosticsEngine &diags,
- const LangOptions &langOpts) {
+void ento::printEnabledCheckerList(raw_ostream &out, CompilerInstance &CI) {
out << "OVERVIEW: Clang Static Analyzer Enabled Checkers List\n\n";
- CheckerRegistry(plugins, diags, anopts, langOpts)
- .printEnabledCheckerList(out);
+ auto CheckerMgr = std::make_unique<CheckerManager>(
+ *CI.getAnalyzerOpts(), CI.getLangOpts(), CI.getDiagnostics(),
+ CI.getFrontendOpts().Plugins);
+
+ CheckerMgr->getCheckerRegistryData().printEnabledCheckerList(out);
}
-void ento::printCheckerConfigList(raw_ostream &OS,
- ArrayRef<std::string> plugins,
- AnalyzerOptions &opts,
- DiagnosticsEngine &diags,
- const LangOptions &LangOpts) {
- CheckerRegistry(plugins, diags, opts, LangOpts)
- .printCheckerOptionList(OS);
+void ento::printCheckerConfigList(raw_ostream &out, CompilerInstance &CI) {
+
+ auto CheckerMgr = std::make_unique<CheckerManager>(
+ *CI.getAnalyzerOpts(), CI.getLangOpts(), CI.getDiagnostics(),
+ CI.getFrontendOpts().Plugins);
+
+ CheckerMgr->getCheckerRegistryData().printCheckerOptionList(
+ *CI.getAnalyzerOpts(), out);
}
void ento::printAnalyzerConfigList(raw_ostream &out) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
index f5c05281adab..528284ca8985 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
@@ -25,9 +25,12 @@
using namespace clang;
using namespace ento;
+using namespace checker_registry;
using llvm::sys::DynamicLibrary;
-using RegisterCheckersFn = void (*)(CheckerRegistry &);
+//===----------------------------------------------------------------------===//
+// Utilities.
+//===----------------------------------------------------------------------===//
static bool isCompatibleAPIVersion(const char *VersionString) {
// If the version string is null, its not an analyzer plugin.
@@ -39,80 +42,17 @@ static bool isCompatibleAPIVersion(const char *VersionString) {
return strcmp(VersionString, CLANG_ANALYZER_API_VERSION_STRING) == 0;
}
-namespace {
-template <class T> struct FullNameLT {
- bool operator()(const T &Lhs, const T &Rhs) {
- return Lhs.FullName < Rhs.FullName;
- }
-};
-
-using PackageNameLT = FullNameLT<CheckerRegistry::PackageInfo>;
-using CheckerNameLT = FullNameLT<CheckerRegistry::CheckerInfo>;
-} // end of anonymous namespace
-
-template <class CheckerOrPackageInfoList>
-static
- typename std::conditional<std::is_const<CheckerOrPackageInfoList>::value,
- typename CheckerOrPackageInfoList::const_iterator,
- typename CheckerOrPackageInfoList::iterator>::type
- binaryFind(CheckerOrPackageInfoList &Collection, StringRef FullName) {
-
- using CheckerOrPackage = typename CheckerOrPackageInfoList::value_type;
- using CheckerOrPackageFullNameLT = FullNameLT<CheckerOrPackage>;
-
- assert(std::is_sorted(Collection.begin(), Collection.end(),
- CheckerOrPackageFullNameLT{}) &&
- "In order to efficiently gather checkers/packages, this function "
- "expects them to be already sorted!");
-
- return llvm::lower_bound(Collection, CheckerOrPackage(FullName),
- CheckerOrPackageFullNameLT{});
-}
-
static constexpr char PackageSeparator = '.';
-static bool isInPackage(const CheckerRegistry::CheckerInfo &Checker,
- StringRef PackageName) {
- // Does the checker's full name have the package as a prefix?
- if (!Checker.FullName.startswith(PackageName))
- return false;
-
- // Is the package actually just the name of a specific checker?
- if (Checker.FullName.size() == PackageName.size())
- return true;
-
- // Is the checker in the package (or a subpackage)?
- if (Checker.FullName[PackageName.size()] == PackageSeparator)
- return true;
-
- return false;
-}
-
-CheckerRegistry::CheckerInfoListRange
-CheckerRegistry::getMutableCheckersForCmdLineArg(StringRef CmdLineArg) {
- auto It = binaryFind(Checkers, CmdLineArg);
-
- if (!isInPackage(*It, CmdLineArg))
- return {Checkers.end(), Checkers.end()};
-
- // See how large the package is.
- // If the package doesn't exist, assume the option refers to a single
- // checker.
- size_t Size = 1;
- llvm::StringMap<size_t>::const_iterator PackageSize =
- PackageSizes.find(CmdLineArg);
-
- if (PackageSize != PackageSizes.end())
- Size = PackageSize->getValue();
-
- return {It, It + Size};
-}
+//===----------------------------------------------------------------------===//
+// Methods of CheckerRegistry.
+//===----------------------------------------------------------------------===//
CheckerRegistry::CheckerRegistry(
- ArrayRef<std::string> Plugins, DiagnosticsEngine &Diags,
- AnalyzerOptions &AnOpts, const LangOptions &LangOpts,
+ CheckerRegistryData &Data, ArrayRef<std::string> Plugins,
+ DiagnosticsEngine &Diags, AnalyzerOptions &AnOpts,
ArrayRef<std::function<void(CheckerRegistry &)>> CheckerRegistrationFns)
- : Diags(Diags), AnOpts(AnOpts), LangOpts(LangOpts) {
+ : Data(Data), Diags(Diags), AnOpts(AnOpts) {
// Register builtin checkers.
#define GET_CHECKERS
@@ -152,9 +92,10 @@ CheckerRegistry::CheckerRegistry(
continue;
}
+ using RegisterPluginCheckerFn = void (*)(CheckerRegistry &);
// Register its checkers.
- RegisterCheckersFn RegisterPluginCheckers =
- reinterpret_cast<RegisterCheckersFn>(
+ RegisterPluginCheckerFn RegisterPluginCheckers =
+ reinterpret_cast<RegisterPluginCheckerFn>(
Lib.getAddressOfSymbol("clang_registerCheckers"));
if (RegisterPluginCheckers)
RegisterPluginCheckers(*this);
@@ -171,38 +112,67 @@ CheckerRegistry::CheckerRegistry(
// FIXME: Alphabetical sort puts 'experimental' in the middle.
// Would it be better to name it '~experimental' or something else
// that's ASCIIbetically last?
- llvm::sort(Packages, PackageNameLT{});
- llvm::sort(Checkers, CheckerNameLT{});
+ llvm::sort(Data.Packages, checker_registry::PackageNameLT{});
+ llvm::sort(Data.Checkers, checker_registry::CheckerNameLT{});
#define GET_CHECKER_DEPENDENCIES
#define CHECKER_DEPENDENCY(FULLNAME, DEPENDENCY) \
addDependency(FULLNAME, DEPENDENCY);
+#define GET_CHECKER_WEAK_DEPENDENCIES
+
+#define CHECKER_WEAK_DEPENDENCY(FULLNAME, DEPENDENCY) \
+ addWeakDependency(FULLNAME, DEPENDENCY);
+
#define GET_CHECKER_OPTIONS
-#define CHECKER_OPTION(TYPE, FULLNAME, CMDFLAG, DESC, DEFAULT_VAL, DEVELOPMENT_STATUS, IS_HIDDEN) \
- addCheckerOption(TYPE, FULLNAME, CMDFLAG, DEFAULT_VAL, DESC, DEVELOPMENT_STATUS, IS_HIDDEN);
+#define CHECKER_OPTION(TYPE, FULLNAME, CMDFLAG, DESC, DEFAULT_VAL, \
+ DEVELOPMENT_STATUS, IS_HIDDEN) \
+ addCheckerOption(TYPE, FULLNAME, CMDFLAG, DEFAULT_VAL, DESC, \
+ DEVELOPMENT_STATUS, IS_HIDDEN);
#define GET_PACKAGE_OPTIONS
-#define PACKAGE_OPTION(TYPE, FULLNAME, CMDFLAG, DESC, DEFAULT_VAL, DEVELOPMENT_STATUS, IS_HIDDEN) \
- addPackageOption(TYPE, FULLNAME, CMDFLAG, DEFAULT_VAL, DESC, DEVELOPMENT_STATUS, IS_HIDDEN);
+#define PACKAGE_OPTION(TYPE, FULLNAME, CMDFLAG, DESC, DEFAULT_VAL, \
+ DEVELOPMENT_STATUS, IS_HIDDEN) \
+ addPackageOption(TYPE, FULLNAME, CMDFLAG, DEFAULT_VAL, DESC, \
+ DEVELOPMENT_STATUS, IS_HIDDEN);
#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
#undef CHECKER_DEPENDENCY
#undef GET_CHECKER_DEPENDENCIES
+#undef CHECKER_WEAK_DEPENDENCY
+#undef GET_CHECKER_WEAK_DEPENDENCIES
#undef CHECKER_OPTION
#undef GET_CHECKER_OPTIONS
#undef PACKAGE_OPTION
#undef GET_PACKAGE_OPTIONS
- resolveDependencies();
+ resolveDependencies<true>();
+ resolveDependencies<false>();
+
+#ifndef NDEBUG
+ for (auto &DepPair : Data.Dependencies) {
+ for (auto &WeakDepPair : Data.WeakDependencies) {
+ // Some assertions to enforce that strong dependencies are relations in
+ // between purely modeling checkers, and weak dependencies are about
+ // diagnostics.
+ assert(WeakDepPair != DepPair &&
+ "A checker cannot strong and weak depend on the same checker!");
+ assert(WeakDepPair.first != DepPair.second &&
+ "A strong dependency mustn't have weak dependencies!");
+ assert(WeakDepPair.second != DepPair.second &&
+ "A strong dependency mustn't be a weak dependency as well!");
+ }
+ }
+#endif
+
resolveCheckerAndPackageOptions();
// Parse '-analyzer-checker' and '-analyzer-disable-checker' options from the
// command line.
for (const std::pair<std::string, bool> &Opt : AnOpts.CheckersAndPackages) {
CheckerInfoListRange CheckerForCmdLineArg =
- getMutableCheckersForCmdLineArg(Opt.first);
+ Data.getMutableCheckersForCmdLineArg(Opt.first);
if (CheckerForCmdLineArg.begin() == CheckerForCmdLineArg.end()) {
Diags.Report(diag::err_unknown_analyzer_checker_or_package) << Opt.first;
@@ -214,109 +184,169 @@ CheckerRegistry::CheckerRegistry(
: StateFromCmdLine::State_Disabled;
}
}
+ validateCheckerOptions();
}
-/// Collects dependencies in \p ret, returns false on failure.
-static bool
-collectDependenciesImpl(const CheckerRegistry::ConstCheckerInfoList &Deps,
- const LangOptions &LO,
- CheckerRegistry::CheckerInfoSet &Ret);
-
-/// Collects dependenies in \p enabledCheckers. Return None on failure.
-LLVM_NODISCARD
-static llvm::Optional<CheckerRegistry::CheckerInfoSet>
-collectDependencies(const CheckerRegistry::CheckerInfo &checker,
- const LangOptions &LO) {
-
- CheckerRegistry::CheckerInfoSet Ret;
- // Add dependencies to the enabled checkers only if all of them can be
- // enabled.
- if (!collectDependenciesImpl(checker.Dependencies, LO, Ret))
- return None;
-
- return Ret;
-}
-
-static bool
-collectDependenciesImpl(const CheckerRegistry::ConstCheckerInfoList &Deps,
- const LangOptions &LO,
- CheckerRegistry::CheckerInfoSet &Ret) {
+//===----------------------------------------------------------------------===//
+// Dependency resolving.
+//===----------------------------------------------------------------------===//
- for (const CheckerRegistry::CheckerInfo *Dependency : Deps) {
+template <typename IsEnabledFn>
+static bool collectStrongDependencies(const ConstCheckerInfoList &Deps,
+ const CheckerManager &Mgr,
+ CheckerInfoSet &Ret,
+ IsEnabledFn IsEnabled);
+
+/// Collects weak dependencies in \p enabledData.Checkers.
+template <typename IsEnabledFn>
+static void collectWeakDependencies(const ConstCheckerInfoList &Deps,
+ const CheckerManager &Mgr,
+ CheckerInfoSet &Ret, IsEnabledFn IsEnabled);
+
+void CheckerRegistry::initializeRegistry(const CheckerManager &Mgr) {
+ // First, we calculate the list of enabled checkers as specified by the
+ // invocation. Weak dependencies will not enable their unspecified strong
+ // depenencies, but its only after resolving strong dependencies for all
+ // checkers when we know whether they will be enabled.
+ CheckerInfoSet Tmp;
+ auto IsEnabledFromCmdLine = [&](const CheckerInfo *Checker) {
+ return !Checker->isDisabled(Mgr);
+ };
+ for (const CheckerInfo &Checker : Data.Checkers) {
+ if (!Checker.isEnabled(Mgr))
+ continue;
- if (Dependency->isDisabled(LO))
- return false;
+ CheckerInfoSet Deps;
+ if (!collectStrongDependencies(Checker.Dependencies, Mgr, Deps,
+ IsEnabledFromCmdLine)) {
+ // If we failed to enable any of the dependencies, don't enable this
+ // checker.
+ continue;
+ }
- // Collect dependencies recursively.
- if (!collectDependenciesImpl(Dependency->Dependencies, LO, Ret))
- return false;
+ Tmp.insert(Deps.begin(), Deps.end());
- Ret.insert(Dependency);
+ // Enable the checker.
+ Tmp.insert(&Checker);
}
- return true;
-}
-
-CheckerRegistry::CheckerInfoSet CheckerRegistry::getEnabledCheckers() const {
-
- CheckerInfoSet EnabledCheckers;
-
- for (const CheckerInfo &Checker : Checkers) {
- if (!Checker.isEnabled(LangOpts))
+ // Calculate enabled checkers with the correct registration order. As this is
+ // done recursively, its arguably cheaper, but for sure less error prone to
+ // recalculate from scratch.
+ auto IsEnabled = [&](const CheckerInfo *Checker) {
+ return llvm::is_contained(Tmp, Checker);
+ };
+ for (const CheckerInfo &Checker : Data.Checkers) {
+ if (!Checker.isEnabled(Mgr))
continue;
- // Recursively enable its dependencies.
- llvm::Optional<CheckerInfoSet> Deps =
- collectDependencies(Checker, LangOpts);
+ CheckerInfoSet Deps;
- if (!Deps) {
+ collectWeakDependencies(Checker.WeakDependencies, Mgr, Deps, IsEnabled);
+
+ if (!collectStrongDependencies(Checker.Dependencies, Mgr, Deps,
+ IsEnabledFromCmdLine)) {
// If we failed to enable any of the dependencies, don't enable this
// checker.
continue;
}
// Note that set_union also preserves the order of insertion.
- EnabledCheckers.set_union(*Deps);
+ Data.EnabledCheckers.set_union(Deps);
+ Data.EnabledCheckers.insert(&Checker);
+ }
+}
- // Enable the checker.
- EnabledCheckers.insert(&Checker);
+template <typename IsEnabledFn>
+static bool collectStrongDependencies(const ConstCheckerInfoList &Deps,
+ const CheckerManager &Mgr,
+ CheckerInfoSet &Ret,
+ IsEnabledFn IsEnabled) {
+
+ for (const CheckerInfo *Dependency : Deps) {
+ if (!IsEnabled(Dependency))
+ return false;
+
+ // Collect dependencies recursively.
+ if (!collectStrongDependencies(Dependency->Dependencies, Mgr, Ret,
+ IsEnabled))
+ return false;
+ Ret.insert(Dependency);
}
- return EnabledCheckers;
+ return true;
+}
+
+template <typename IsEnabledFn>
+static void collectWeakDependencies(const ConstCheckerInfoList &WeakDeps,
+ const CheckerManager &Mgr,
+ CheckerInfoSet &Ret,
+ IsEnabledFn IsEnabled) {
+
+ for (const CheckerInfo *Dependency : WeakDeps) {
+ // Don't enable this checker if strong dependencies are unsatisfied, but
+ // assume that weak dependencies are transitive.
+ collectWeakDependencies(Dependency->WeakDependencies, Mgr, Ret, IsEnabled);
+
+ if (IsEnabled(Dependency) &&
+ collectStrongDependencies(Dependency->Dependencies, Mgr, Ret,
+ IsEnabled))
+ Ret.insert(Dependency);
+ }
}
-void CheckerRegistry::resolveDependencies() {
- for (const std::pair<StringRef, StringRef> &Entry : Dependencies) {
- auto CheckerIt = binaryFind(Checkers, Entry.first);
- assert(CheckerIt != Checkers.end() && CheckerIt->FullName == Entry.first &&
+template <bool IsWeak> void CheckerRegistry::resolveDependencies() {
+ for (const std::pair<StringRef, StringRef> &Entry :
+ (IsWeak ? Data.WeakDependencies : Data.Dependencies)) {
+
+ auto CheckerIt = binaryFind(Data.Checkers, Entry.first);
+ assert(CheckerIt != Data.Checkers.end() &&
+ CheckerIt->FullName == Entry.first &&
"Failed to find the checker while attempting to set up its "
"dependencies!");
- auto DependencyIt = binaryFind(Checkers, Entry.second);
- assert(DependencyIt != Checkers.end() &&
+ auto DependencyIt = binaryFind(Data.Checkers, Entry.second);
+ assert(DependencyIt != Data.Checkers.end() &&
DependencyIt->FullName == Entry.second &&
"Failed to find the dependency of a checker!");
- CheckerIt->Dependencies.emplace_back(&*DependencyIt);
+ // We do allow diagnostics from unit test/example dependency checkers.
+ assert((DependencyIt->FullName.startswith("test") ||
+ DependencyIt->FullName.startswith("example") || IsWeak ||
+ DependencyIt->IsHidden) &&
+ "Strong dependencies are modeling checkers, and as such "
+ "non-user facing! Mark them hidden in Checkers.td!");
+
+ if (IsWeak)
+ CheckerIt->WeakDependencies.emplace_back(&*DependencyIt);
+ else
+ CheckerIt->Dependencies.emplace_back(&*DependencyIt);
}
-
- Dependencies.clear();
}
void CheckerRegistry::addDependency(StringRef FullName, StringRef Dependency) {
- Dependencies.emplace_back(FullName, Dependency);
+ Data.Dependencies.emplace_back(FullName, Dependency);
}
+void CheckerRegistry::addWeakDependency(StringRef FullName,
+ StringRef Dependency) {
+ Data.WeakDependencies.emplace_back(FullName, Dependency);
+}
+
+//===----------------------------------------------------------------------===//
+// Checker option resolving and validating.
+//===----------------------------------------------------------------------===//
+
/// Insert the checker/package option to AnalyzerOptions' config table, and
/// validate it, if the user supplied it on the command line.
-static void insertAndValidate(StringRef FullName,
- const CheckerRegistry::CmdLineOption &Option,
+static void insertAndValidate(StringRef FullName, const CmdLineOption &Option,
AnalyzerOptions &AnOpts,
DiagnosticsEngine &Diags) {
std::string FullOption = (FullName + ":" + Option.OptionName).str();
- auto It = AnOpts.Config.insert({FullOption, Option.DefaultValStr});
+ auto It =
+ AnOpts.Config.insert({FullOption, std::string(Option.DefaultValStr)});
// Insertation was successful -- CmdLineOption's constructor will validate
// whether values received from plugins or TableGen files are correct.
@@ -337,7 +367,7 @@ static void insertAndValidate(StringRef FullName,
<< FullOption << "a boolean value";
}
- It.first->setValue(Option.DefaultValStr);
+ It.first->setValue(std::string(Option.DefaultValStr));
}
return;
}
@@ -351,17 +381,17 @@ static void insertAndValidate(StringRef FullName,
<< FullOption << "an integer value";
}
- It.first->setValue(Option.DefaultValStr);
+ It.first->setValue(std::string(Option.DefaultValStr));
}
return;
}
}
template <class T>
-static void
-insertOptionToCollection(StringRef FullName, T &Collection,
- const CheckerRegistry::CmdLineOption &Option,
- AnalyzerOptions &AnOpts, DiagnosticsEngine &Diags) {
+static void insertOptionToCollection(StringRef FullName, T &Collection,
+ const CmdLineOption &Option,
+ AnalyzerOptions &AnOpts,
+ DiagnosticsEngine &Diags) {
auto It = binaryFind(Collection, FullName);
assert(It != Collection.end() &&
"Failed to find the checker while attempting to add a command line "
@@ -374,22 +404,20 @@ insertOptionToCollection(StringRef FullName, T &Collection,
void CheckerRegistry::resolveCheckerAndPackageOptions() {
for (const std::pair<StringRef, CmdLineOption> &CheckerOptEntry :
- CheckerOptions) {
- insertOptionToCollection(CheckerOptEntry.first, Checkers,
+ Data.CheckerOptions) {
+ insertOptionToCollection(CheckerOptEntry.first, Data.Checkers,
CheckerOptEntry.second, AnOpts, Diags);
}
- CheckerOptions.clear();
for (const std::pair<StringRef, CmdLineOption> &PackageOptEntry :
- PackageOptions) {
- insertOptionToCollection(PackageOptEntry.first, Packages,
+ Data.PackageOptions) {
+ insertOptionToCollection(PackageOptEntry.first, Data.Packages,
PackageOptEntry.second, AnOpts, Diags);
}
- PackageOptions.clear();
}
void CheckerRegistry::addPackage(StringRef FullName) {
- Packages.emplace_back(PackageInfo(FullName));
+ Data.Packages.emplace_back(PackageInfo(FullName));
}
void CheckerRegistry::addPackageOption(StringRef OptionType,
@@ -399,22 +427,22 @@ void CheckerRegistry::addPackageOption(StringRef OptionType,
StringRef Description,
StringRef DevelopmentStatus,
bool IsHidden) {
- PackageOptions.emplace_back(
+ Data.PackageOptions.emplace_back(
PackageFullName, CmdLineOption{OptionType, OptionName, DefaultValStr,
Description, DevelopmentStatus, IsHidden});
}
-void CheckerRegistry::addChecker(InitializationFunction Rfn,
+void CheckerRegistry::addChecker(RegisterCheckerFn Rfn,
ShouldRegisterFunction Sfn, StringRef Name,
StringRef Desc, StringRef DocsUri,
bool IsHidden) {
- Checkers.emplace_back(Rfn, Sfn, Name, Desc, DocsUri, IsHidden);
+ Data.Checkers.emplace_back(Rfn, Sfn, Name, Desc, DocsUri, IsHidden);
// Record the presence of the checker in its packages.
StringRef PackageName, LeafName;
std::tie(PackageName, LeafName) = Name.rsplit(PackageSeparator);
while (!LeafName.empty()) {
- PackageSizes[PackageName] += 1;
+ Data.PackageSizes[PackageName] += 1;
std::tie(PackageName, LeafName) = PackageName.rsplit(PackageSeparator);
}
}
@@ -426,37 +454,33 @@ void CheckerRegistry::addCheckerOption(StringRef OptionType,
StringRef Description,
StringRef DevelopmentStatus,
bool IsHidden) {
- CheckerOptions.emplace_back(
+ Data.CheckerOptions.emplace_back(
CheckerFullName, CmdLineOption{OptionType, OptionName, DefaultValStr,
Description, DevelopmentStatus, IsHidden});
}
void CheckerRegistry::initializeManager(CheckerManager &CheckerMgr) const {
- // Collect checkers enabled by the options.
- CheckerInfoSet enabledCheckers = getEnabledCheckers();
-
// Initialize the CheckerManager with all enabled checkers.
- for (const auto *Checker : enabledCheckers) {
+ for (const auto *Checker : Data.EnabledCheckers) {
CheckerMgr.setCurrentCheckerName(CheckerNameRef(Checker->FullName));
Checker->Initialize(CheckerMgr);
}
}
-static void
-isOptionContainedIn(const CheckerRegistry::CmdLineOptionList &OptionList,
- StringRef SuppliedChecker, StringRef SuppliedOption,
- const AnalyzerOptions &AnOpts, DiagnosticsEngine &Diags) {
+static void isOptionContainedIn(const CmdLineOptionList &OptionList,
+ StringRef SuppliedChecker,
+ StringRef SuppliedOption,
+ const AnalyzerOptions &AnOpts,
+ DiagnosticsEngine &Diags) {
if (!AnOpts.ShouldEmitErrorsOnInvalidConfigValue)
return;
- using CmdLineOption = CheckerRegistry::CmdLineOption;
-
auto SameOptName = [SuppliedOption](const CmdLineOption &Opt) {
return Opt.OptionName == SuppliedOption;
};
- auto OptionIt = llvm::find_if(OptionList, SameOptName);
+ const auto *OptionIt = llvm::find_if(OptionList, SameOptName);
if (OptionIt == OptionList.end()) {
Diags.Report(diag::err_analyzer_checker_option_unknown)
@@ -485,16 +509,16 @@ void CheckerRegistry::validateCheckerOptions() const {
// it would return with an iterator to the first checker in the core, so we
// we really have to use find here, which uses operator==.
auto CheckerIt =
- llvm::find(Checkers, CheckerInfo(SuppliedCheckerOrPackage));
- if (CheckerIt != Checkers.end()) {
+ llvm::find(Data.Checkers, CheckerInfo(SuppliedCheckerOrPackage));
+ if (CheckerIt != Data.Checkers.end()) {
isOptionContainedIn(CheckerIt->CmdLineOptions, SuppliedCheckerOrPackage,
SuppliedOption, AnOpts, Diags);
continue;
}
- auto PackageIt =
- llvm::find(Packages, PackageInfo(SuppliedCheckerOrPackage));
- if (PackageIt != Packages.end()) {
+ const auto *PackageIt =
+ llvm::find(Data.Packages, PackageInfo(SuppliedCheckerOrPackage));
+ if (PackageIt != Data.Packages.end()) {
isOptionContainedIn(PackageIt->CmdLineOptions, SuppliedCheckerOrPackage,
SuppliedOption, AnOpts, Diags);
continue;
@@ -505,121 +529,3 @@ void CheckerRegistry::validateCheckerOptions() const {
}
}
-void CheckerRegistry::printCheckerWithDescList(raw_ostream &Out,
- size_t MaxNameChars) const {
- // FIXME: Print available packages.
-
- Out << "CHECKERS:\n";
-
- // Find the maximum option length.
- size_t OptionFieldWidth = 0;
- for (const auto &Checker : Checkers) {
- // Limit the amount of padding we are willing to give up for alignment.
- // Package.Name Description [Hidden]
- size_t NameLength = Checker.FullName.size();
- if (NameLength <= MaxNameChars)
- OptionFieldWidth = std::max(OptionFieldWidth, NameLength);
- }
-
- const size_t InitialPad = 2;
-
- auto Print = [=](llvm::raw_ostream &Out, const CheckerInfo &Checker,
- StringRef Description) {
- AnalyzerOptions::printFormattedEntry(Out, {Checker.FullName, Description},
- InitialPad, OptionFieldWidth);
- Out << '\n';
- };
-
- for (const auto &Checker : Checkers) {
- // The order of this if branches is significant, we wouldn't like to display
- // developer checkers even in the alpha output. For example,
- // alpha.cplusplus.IteratorModeling is a modeling checker, hence it's hidden
- // by default, and users (even when the user is a developer of an alpha
- // checker) shouldn't normally tinker with whether they should be enabled.
-
- if (Checker.IsHidden) {
- if (AnOpts.ShowCheckerHelpDeveloper)
- Print(Out, Checker, Checker.Desc);
- continue;
- }
-
- if (Checker.FullName.startswith("alpha")) {
- if (AnOpts.ShowCheckerHelpAlpha)
- Print(Out, Checker,
- ("(Enable only for development!) " + Checker.Desc).str());
- continue;
- }
-
- if (AnOpts.ShowCheckerHelp)
- Print(Out, Checker, Checker.Desc);
- }
-}
-
-void CheckerRegistry::printEnabledCheckerList(raw_ostream &Out) const {
- // Collect checkers enabled by the options.
- CheckerInfoSet EnabledCheckers = getEnabledCheckers();
-
- for (const auto *i : EnabledCheckers)
- Out << i->FullName << '\n';
-}
-
-void CheckerRegistry::printCheckerOptionList(raw_ostream &Out) const {
- Out << "OVERVIEW: Clang Static Analyzer Checker and Package Option List\n\n";
- Out << "USAGE: -analyzer-config <OPTION1=VALUE,OPTION2=VALUE,...>\n\n";
- Out << " -analyzer-config OPTION1=VALUE, -analyzer-config "
- "OPTION2=VALUE, ...\n\n";
- Out << "OPTIONS:\n\n";
-
- std::multimap<StringRef, const CmdLineOption &> OptionMap;
-
- for (const CheckerInfo &Checker : Checkers) {
- for (const CmdLineOption &Option : Checker.CmdLineOptions) {
- OptionMap.insert({Checker.FullName, Option});
- }
- }
-
- for (const PackageInfo &Package : Packages) {
- for (const CmdLineOption &Option : Package.CmdLineOptions) {
- OptionMap.insert({Package.FullName, Option});
- }
- }
-
- auto Print = [] (llvm::raw_ostream &Out, StringRef FullOption, StringRef Desc) {
- AnalyzerOptions::printFormattedEntry(Out, {FullOption, Desc},
- /*InitialPad*/ 2,
- /*EntryWidth*/ 50,
- /*MinLineWidth*/ 90);
- Out << "\n\n";
- };
- for (const std::pair<const StringRef, const CmdLineOption &> &Entry :
- OptionMap) {
- const CmdLineOption &Option = Entry.second;
- std::string FullOption = (Entry.first + ":" + Option.OptionName).str();
-
- std::string Desc =
- ("(" + Option.OptionType + ") " + Option.Description + " (default: " +
- (Option.DefaultValStr.empty() ? "\"\"" : Option.DefaultValStr) + ")")
- .str();
-
- // The list of these if branches is significant, we wouldn't like to
- // display hidden alpha checker options for
- // -analyzer-checker-option-help-alpha.
-
- if (Option.IsHidden) {
- if (AnOpts.ShowCheckerOptionDeveloperList)
- Print(Out, FullOption, Desc);
- continue;
- }
-
- if (Option.DevelopmentStatus == "alpha" ||
- Entry.first.startswith("alpha")) {
- if (AnOpts.ShowCheckerOptionAlphaList)
- Print(Out, FullOption,
- llvm::Twine("(Enable only for development!) " + Desc).str());
- continue;
- }
-
- if (AnOpts.ShowCheckerOptionList)
- Print(Out, FullOption, Desc);
- }
-}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CreateCheckerManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CreateCheckerManager.cpp
new file mode 100644
index 000000000000..21a60785eb52
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CreateCheckerManager.cpp
@@ -0,0 +1,50 @@
+//===- CheckerManager.h - Static Analyzer Checker Manager -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the Static Analyzer Checker Manager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Frontend/CheckerRegistry.h"
+#include <memory>
+
+namespace clang {
+namespace ento {
+
+CheckerManager::CheckerManager(
+ ASTContext &Context, AnalyzerOptions &AOptions, const Preprocessor &PP,
+ ArrayRef<std::string> plugins,
+ ArrayRef<std::function<void(CheckerRegistry &)>> checkerRegistrationFns)
+ : Context(&Context), LangOpts(Context.getLangOpts()), AOptions(AOptions),
+ PP(&PP), Diags(Context.getDiagnostics()),
+ RegistryData(std::make_unique<CheckerRegistryData>()) {
+ CheckerRegistry Registry(*RegistryData, plugins, Context.getDiagnostics(),
+ AOptions, checkerRegistrationFns);
+ Registry.initializeRegistry(*this);
+ Registry.initializeManager(*this);
+ finishedCheckerRegistration();
+}
+
+CheckerManager::CheckerManager(AnalyzerOptions &AOptions,
+ const LangOptions &LangOpts,
+ DiagnosticsEngine &Diags,
+ ArrayRef<std::string> plugins)
+ : LangOpts(LangOpts), AOptions(AOptions), Diags(Diags),
+ RegistryData(std::make_unique<CheckerRegistryData>()) {
+ CheckerRegistry Registry(*RegistryData, plugins, Diags, AOptions, {});
+ Registry.initializeRegistry(*this);
+}
+
+CheckerManager::~CheckerManager() {
+ for (const auto &CheckerDtor : CheckerDtors)
+ CheckerDtor();
+}
+
+} // namespace ento
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp b/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp
new file mode 100644
index 000000000000..cd4d8c188da9
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp
@@ -0,0 +1,70 @@
+//===--- CommandLineArgs.cpp ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Testing/CommandLineArgs.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace clang {
+
+std::vector<std::string> getCommandLineArgsForTesting(TestLanguage Lang) {
+ std::vector<std::string> Args;
+ // Test with basic arguments.
+ switch (Lang) {
+ case Lang_C89:
+ Args = {"-x", "c", "-std=c89"};
+ break;
+ case Lang_C99:
+ Args = {"-x", "c", "-std=c99"};
+ break;
+ case Lang_CXX03:
+ Args = {"-std=c++03", "-frtti"};
+ break;
+ case Lang_CXX11:
+ Args = {"-std=c++11", "-frtti"};
+ break;
+ case Lang_CXX14:
+ Args = {"-std=c++14", "-frtti"};
+ break;
+ case Lang_CXX17:
+ Args = {"-std=c++17", "-frtti"};
+ break;
+ case Lang_CXX20:
+ Args = {"-std=c++20", "-frtti"};
+ break;
+ case Lang_OBJCXX:
+ Args = {"-x", "objective-c++", "-frtti"};
+ break;
+ case Lang_OpenCL:
+ llvm_unreachable("Not implemented yet!");
+ }
+ return Args;
+}
+
+StringRef getFilenameForTesting(TestLanguage Lang) {
+ switch (Lang) {
+ case Lang_C89:
+ case Lang_C99:
+ return "input.c";
+
+ case Lang_CXX03:
+ case Lang_CXX11:
+ case Lang_CXX14:
+ case Lang_CXX17:
+ case Lang_CXX20:
+ return "input.cc";
+
+ case Lang_OpenCL:
+ return "input.cl";
+
+ case Lang_OBJCXX:
+ return "input.mm";
+ }
+ llvm_unreachable("Unhandled TestLanguage enum");
+}
+
+} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Tooling/ASTDiff/ASTDiff.cpp b/contrib/llvm-project/clang/lib/Tooling/ASTDiff/ASTDiff.cpp
index 4d495228cb51..0821863adcc6 100644
--- a/contrib/llvm-project/clang/lib/Tooling/ASTDiff/ASTDiff.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/ASTDiff/ASTDiff.cpp
@@ -11,8 +11,9 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/ASTDiff/ASTDiff.h"
-
+#include "clang/AST/ParentMapContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
#include "llvm/ADT/PriorityQueue.h"
@@ -116,12 +117,12 @@ public:
Impl(SyntaxTree *Parent, Stmt *N, ASTContext &AST);
template <class T>
Impl(SyntaxTree *Parent,
- typename std::enable_if<std::is_base_of<Stmt, T>::value, T>::type *Node,
+ std::enable_if_t<std::is_base_of<Stmt, T>::value, T> *Node,
ASTContext &AST)
: Impl(Parent, dyn_cast<Stmt>(Node), AST) {}
template <class T>
Impl(SyntaxTree *Parent,
- typename std::enable_if<std::is_base_of<Decl, T>::value, T>::type *Node,
+ std::enable_if_t<std::is_base_of<Decl, T>::value, T> *Node,
ASTContext &AST)
: Impl(Parent, dyn_cast<Decl>(Node), AST) {}
@@ -397,7 +398,7 @@ static const DeclContext *getEnclosingDeclContext(ASTContext &AST,
static std::string getInitializerValue(const CXXCtorInitializer *Init,
const PrintingPolicy &TypePP) {
if (Init->isAnyMemberInitializer())
- return Init->getAnyMember()->getName();
+ return std::string(Init->getAnyMember()->getName());
if (Init->isBaseInitializer())
return QualType(Init->getBaseClass(), 0).getAsString(TypePP);
if (Init->isDelegatingInitializer())
@@ -434,36 +435,36 @@ std::string SyntaxTree::Impl::getDeclValue(const Decl *D) const {
T->getTypeForDecl()->getCanonicalTypeInternal().getAsString(TypePP) +
";";
if (auto *U = dyn_cast<UsingDirectiveDecl>(D))
- return U->getNominatedNamespace()->getName();
+ return std::string(U->getNominatedNamespace()->getName());
if (auto *A = dyn_cast<AccessSpecDecl>(D)) {
CharSourceRange Range(A->getSourceRange(), false);
- return Lexer::getSourceText(Range, AST.getSourceManager(),
- AST.getLangOpts());
+ return std::string(
+ Lexer::getSourceText(Range, AST.getSourceManager(), AST.getLangOpts()));
}
return Value;
}
std::string SyntaxTree::Impl::getStmtValue(const Stmt *S) const {
if (auto *U = dyn_cast<UnaryOperator>(S))
- return UnaryOperator::getOpcodeStr(U->getOpcode());
+ return std::string(UnaryOperator::getOpcodeStr(U->getOpcode()));
if (auto *B = dyn_cast<BinaryOperator>(S))
- return B->getOpcodeStr();
+ return std::string(B->getOpcodeStr());
if (auto *M = dyn_cast<MemberExpr>(S))
return getRelativeName(M->getMemberDecl());
if (auto *I = dyn_cast<IntegerLiteral>(S)) {
SmallString<256> Str;
I->getValue().toString(Str, /*Radix=*/10, /*Signed=*/false);
- return Str.str();
+ return std::string(Str.str());
}
if (auto *F = dyn_cast<FloatingLiteral>(S)) {
SmallString<256> Str;
F->getValue().toString(Str);
- return Str.str();
+ return std::string(Str.str());
}
if (auto *D = dyn_cast<DeclRefExpr>(S))
return getRelativeName(D->getDecl(), getEnclosingDeclContext(AST, S));
if (auto *String = dyn_cast<StringLiteral>(S))
- return String->getString();
+ return std::string(String->getString());
if (auto *B = dyn_cast<CXXBoolLiteralExpr>(S))
return B->getValue() ? "true" : "false";
return "";
@@ -683,9 +684,7 @@ private:
}
};
-ast_type_traits::ASTNodeKind Node::getType() const {
- return ASTNode.getNodeKind();
-}
+ASTNodeKind Node::getType() const { return ASTNode.getNodeKind(); }
StringRef Node::getTypeLabel() const { return getType().asStringRef(); }
diff --git a/contrib/llvm-project/clang/lib/Tooling/AllTUsExecution.cpp b/contrib/llvm-project/clang/lib/Tooling/AllTUsExecution.cpp
index d85075f59607..7707c99c21d0 100644
--- a/contrib/llvm-project/clang/lib/Tooling/AllTUsExecution.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/AllTUsExecution.cpp
@@ -8,8 +8,9 @@
#include "clang/Tooling/AllTUsExecution.h"
#include "clang/Tooling/ToolExecutorPluginRegistry.h"
-#include "llvm/Support/Threading.h"
+#include "llvm/Support/Regex.h"
#include "llvm/Support/ThreadPool.h"
+#include "llvm/Support/Threading.h"
#include "llvm/Support/VirtualFileSystem.h"
namespace clang {
@@ -114,8 +115,7 @@ llvm::Error AllTUsToolExecutor::execute(
auto &Action = Actions.front();
{
- llvm::ThreadPool Pool(ThreadCount == 0 ? llvm::hardware_concurrency()
- : ThreadCount);
+ llvm::ThreadPool Pool(llvm::hardware_concurrency(ThreadCount));
for (std::string File : Files) {
Pool.async(
[&](std::string Path) {
diff --git a/contrib/llvm-project/clang/lib/Tooling/ArgumentsAdjusters.cpp b/contrib/llvm-project/clang/lib/Tooling/ArgumentsAdjusters.cpp
index ec15311d4bac..a857b57fbf7b 100644
--- a/contrib/llvm-project/clang/lib/Tooling/ArgumentsAdjusters.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/ArgumentsAdjusters.cpp
@@ -26,7 +26,7 @@ ArgumentsAdjuster getClangSyntaxOnlyAdjuster() {
return [](const CommandLineArguments &Args, StringRef /*unused*/) {
CommandLineArguments AdjustedArgs;
bool HasSyntaxOnly = false;
- const std::vector<llvm::StringRef> OutputCommands = {
+ constexpr llvm::StringRef OutputCommands[] = {
// FIXME: Add other options that generate output.
"-save-temps",
"--save-temps",
@@ -98,7 +98,8 @@ ArgumentsAdjuster getClangStripDependencyFileAdjuster() {
StringRef Arg = Args[i];
// All dependency-file options begin with -M. These include -MM,
// -MF, -MG, -MP, -MT, -MQ, -MD, and -MMD.
- if (!Arg.startswith("-M")) {
+ if (!Arg.startswith("-M") && !Arg.startswith("/showIncludes") &&
+ !Arg.startswith("-showIncludes")) {
AdjustedArgs.push_back(Args[i]);
continue;
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/CompilationDatabase.cpp b/contrib/llvm-project/clang/lib/Tooling/CompilationDatabase.cpp
index c453e8d7df19..2b4c26dab96f 100644
--- a/contrib/llvm-project/clang/lib/Tooling/CompilationDatabase.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/CompilationDatabase.cpp
@@ -64,16 +64,14 @@ std::unique_ptr<CompilationDatabase>
CompilationDatabase::loadFromDirectory(StringRef BuildDirectory,
std::string &ErrorMessage) {
llvm::raw_string_ostream ErrorStream(ErrorMessage);
- for (CompilationDatabasePluginRegistry::iterator
- It = CompilationDatabasePluginRegistry::begin(),
- Ie = CompilationDatabasePluginRegistry::end();
- It != Ie; ++It) {
+ for (const CompilationDatabasePluginRegistry::entry &Database :
+ CompilationDatabasePluginRegistry::entries()) {
std::string DatabaseErrorMessage;
- std::unique_ptr<CompilationDatabasePlugin> Plugin(It->instantiate());
+ std::unique_ptr<CompilationDatabasePlugin> Plugin(Database.instantiate());
if (std::unique_ptr<CompilationDatabase> DB =
Plugin->loadFromDirectory(BuildDirectory, DatabaseErrorMessage))
return DB;
- ErrorStream << It->getName() << ": " << DatabaseErrorMessage << "\n";
+ ErrorStream << Database.getName() << ": " << DatabaseErrorMessage << "\n";
}
return nullptr;
}
@@ -164,7 +162,7 @@ private:
case driver::Action::InputClass:
if (Collect) {
const auto *IA = cast<driver::InputAction>(A);
- Inputs.push_back(IA->getInputArg().getSpelling());
+ Inputs.push_back(std::string(IA->getInputArg().getSpelling()));
}
break;
@@ -233,7 +231,7 @@ std::string GetClangToolCommand() {
SmallString<128> ClangToolPath;
ClangToolPath = llvm::sys::path::parent_path(ClangExecutable);
llvm::sys::path::append(ClangToolPath, "clang-tool");
- return ClangToolPath.str();
+ return std::string(ClangToolPath.str());
}
} // namespace
@@ -368,8 +366,14 @@ FixedCompilationDatabase::loadFromFile(StringRef Path, std::string &ErrorMsg) {
ErrorMsg = "Error while opening fixed database: " + Result.message();
return nullptr;
}
- std::vector<std::string> Args{llvm::line_iterator(**File),
- llvm::line_iterator()};
+ std::vector<std::string> Args;
+ for (llvm::StringRef Line :
+ llvm::make_range(llvm::line_iterator(**File), llvm::line_iterator())) {
+ // Stray whitespace is almost certainly unintended.
+ Line = Line.trim();
+ if (!Line.empty())
+ Args.push_back(Line.str());
+ }
return std::make_unique<FixedCompilationDatabase>(
llvm::sys::path::parent_path(Path), std::move(Args));
}
@@ -387,8 +391,8 @@ FixedCompilationDatabase(Twine Directory, ArrayRef<std::string> CommandLine) {
std::vector<CompileCommand>
FixedCompilationDatabase::getCompileCommands(StringRef FilePath) const {
std::vector<CompileCommand> Result(CompileCommands);
- Result[0].CommandLine.push_back(FilePath);
- Result[0].Filename = FilePath;
+ Result[0].CommandLine.push_back(std::string(FilePath));
+ Result[0].Filename = std::string(FilePath);
return Result;
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Core/Diagnostic.cpp b/contrib/llvm-project/clang/lib/Tooling/Core/Diagnostic.cpp
index 235bd7fc1433..b0c4ea8c5608 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Core/Diagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Core/Diagnostic.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/Core/Diagnostic.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/STLExtras.h"
@@ -25,7 +26,7 @@ DiagnosticMessage::DiagnosticMessage(llvm::StringRef Message,
SourceLocation Loc)
: Message(Message), FileOffset(0) {
assert(Loc.isValid() && Loc.isFileID());
- FilePath = Sources.getFilename(Loc);
+ FilePath = std::string(Sources.getFilename(Loc));
// Don't store offset in the scratch space. It doesn't tell anything to the
// user. Moreover, it depends on the history of macro expansions and thus
@@ -34,6 +35,16 @@ DiagnosticMessage::DiagnosticMessage(llvm::StringRef Message,
FileOffset = Sources.getFileOffset(Loc);
}
+FileByteRange::FileByteRange(
+ const SourceManager &Sources, CharSourceRange Range)
+ : FileOffset(0), Length(0) {
+ FilePath = std::string(Sources.getFilename(Range.getBegin()));
+ if (!FilePath.empty()) {
+ FileOffset = Sources.getFileOffset(Range.getBegin());
+ Length = Sources.getFileOffset(Range.getEnd()) - FileOffset;
+ }
+}
+
Diagnostic::Diagnostic(llvm::StringRef DiagnosticName,
Diagnostic::Level DiagLevel, StringRef BuildDirectory)
: DiagnosticName(DiagnosticName), DiagLevel(DiagLevel),
@@ -42,9 +53,10 @@ Diagnostic::Diagnostic(llvm::StringRef DiagnosticName,
Diagnostic::Diagnostic(llvm::StringRef DiagnosticName,
const DiagnosticMessage &Message,
const SmallVector<DiagnosticMessage, 1> &Notes,
- Level DiagLevel, llvm::StringRef BuildDirectory)
+ Level DiagLevel, llvm::StringRef BuildDirectory,
+ const SmallVector<FileByteRange, 1> &Ranges)
: DiagnosticName(DiagnosticName), Message(Message), Notes(Notes),
- DiagLevel(DiagLevel), BuildDirectory(BuildDirectory) {}
+ DiagLevel(DiagLevel), BuildDirectory(BuildDirectory), Ranges(Ranges) {}
const llvm::StringMap<Replacements> *selectFirstFix(const Diagnostic& D) {
if (!D.Message.Fix.empty())
diff --git a/contrib/llvm-project/clang/lib/Tooling/Core/Lookup.cpp b/contrib/llvm-project/clang/lib/Tooling/Core/Lookup.cpp
index 735a5df5ed21..712724a268fb 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Core/Lookup.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Core/Lookup.cpp
@@ -11,10 +11,12 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/Core/Lookup.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclarationName.h"
#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallVector.h"
using namespace clang;
using namespace clang::tooling;
@@ -129,7 +131,7 @@ static std::string disambiguateSpellingInScope(StringRef Spelling,
assert(QName.startswith("::"));
assert(QName.endswith(Spelling));
if (Spelling.startswith("::"))
- return Spelling;
+ return std::string(Spelling);
auto UnspelledSpecifier = QName.drop_back(Spelling.size());
llvm::SmallVector<llvm::StringRef, 2> UnspelledScopes;
@@ -168,7 +170,7 @@ static std::string disambiguateSpellingInScope(StringRef Spelling,
};
// Add more qualifiers until the spelling is not ambiguous.
- std::string Disambiguated = Spelling;
+ std::string Disambiguated = std::string(Spelling);
while (IsAmbiguousSpelling(Disambiguated)) {
if (UnspelledScopes.empty()) {
Disambiguated = "::" + Disambiguated;
@@ -206,8 +208,9 @@ std::string tooling::replaceNestedName(const NestedNameSpecifier *Use,
!usingFromDifferentCanonicalNamespace(FromDecl->getDeclContext(),
UseContext)) {
auto Pos = ReplacementString.rfind("::");
- return Pos != StringRef::npos ? ReplacementString.substr(Pos + 2)
- : ReplacementString;
+ return std::string(Pos != StringRef::npos
+ ? ReplacementString.substr(Pos + 2)
+ : ReplacementString);
}
// We did not match this because of a using statement, so we will need to
// figure out how good a namespace match we have with our destination type.
diff --git a/contrib/llvm-project/clang/lib/Tooling/Core/Replacement.cpp b/contrib/llvm-project/clang/lib/Tooling/Core/Replacement.cpp
index 9ed03655bf2c..ab8e20539559 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Core/Replacement.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Core/Replacement.cpp
@@ -46,8 +46,8 @@ Replacement::Replacement() : FilePath(InvalidLocation) {}
Replacement::Replacement(StringRef FilePath, unsigned Offset, unsigned Length,
StringRef ReplacementText)
- : FilePath(FilePath), ReplacementRange(Offset, Length),
- ReplacementText(ReplacementText) {}
+ : FilePath(std::string(FilePath)), ReplacementRange(Offset, Length),
+ ReplacementText(std::string(ReplacementText)) {}
Replacement::Replacement(const SourceManager &Sources, SourceLocation Start,
unsigned Length, StringRef ReplacementText) {
@@ -123,9 +123,9 @@ void Replacement::setFromSourceLocation(const SourceManager &Sources,
const std::pair<FileID, unsigned> DecomposedLocation =
Sources.getDecomposedLoc(Start);
const FileEntry *Entry = Sources.getFileEntryForID(DecomposedLocation.first);
- this->FilePath = Entry ? Entry->getName() : InvalidLocation;
+ this->FilePath = std::string(Entry ? Entry->getName() : InvalidLocation);
this->ReplacementRange = Range(DecomposedLocation.second, Length);
- this->ReplacementText = ReplacementText;
+ this->ReplacementText = std::string(ReplacementText);
}
// FIXME: This should go into the Lexer, but we need to figure out how
@@ -367,8 +367,8 @@ class MergedReplacement {
public:
MergedReplacement(const Replacement &R, bool MergeSecond, int D)
: MergeSecond(MergeSecond), Delta(D), FilePath(R.getFilePath()),
- Offset(R.getOffset() + (MergeSecond ? 0 : Delta)), Length(R.getLength()),
- Text(R.getReplacementText()) {
+ Offset(R.getOffset() + (MergeSecond ? 0 : Delta)),
+ Length(R.getLength()), Text(std::string(R.getReplacementText())) {
Delta += MergeSecond ? 0 : Text.size() - Length;
DeltaFirst = MergeSecond ? Text.size() - Length : 0;
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
index b4d5a29ca695..b1b87e7fa573 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
@@ -106,7 +106,8 @@ DependencyScanningFilesystemSharedCache::
// sharding gives a performance edge by reducing the lock contention.
// FIXME: A better heuristic might also consider the OS to account for
// the different cost of lock contention on different OSes.
- NumShards = std::max(2u, llvm::hardware_concurrency() / 4);
+ NumShards =
+ std::max(2u, llvm::hardware_concurrency().compute_thread_count() / 4);
CacheShards = std::make_unique<CacheShard[]>(NumShards);
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
index f643c538f8f9..16040c2f4626 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
@@ -8,24 +8,25 @@
#include "clang/Tooling/DependencyScanning/DependencyScanningTool.h"
#include "clang/Frontend/Utils.h"
-#include "llvm/Support/JSON.h"
-
-static llvm::json::Array toJSONSorted(const llvm::StringSet<> &Set) {
- std::vector<llvm::StringRef> Strings;
- for (auto &&I : Set)
- Strings.push_back(I.getKey());
- std::sort(Strings.begin(), Strings.end());
- return llvm::json::Array(Strings);
-}
namespace clang{
namespace tooling{
namespace dependencies{
+std::vector<std::string> FullDependencies::getAdditionalCommandLine(
+ std::function<StringRef(ClangModuleDep)> LookupPCMPath,
+ std::function<const ModuleDeps &(ClangModuleDep)> LookupModuleDeps) const {
+ std::vector<std::string> Ret = AdditionalNonPathCommandLine;
+
+ dependencies::detail::appendCommonModuleArguments(
+ ClangModuleDeps, LookupPCMPath, LookupModuleDeps, Ret);
+
+ return Ret;
+}
+
DependencyScanningTool::DependencyScanningTool(
DependencyScanningService &Service)
- : Format(Service.getFormat()), Worker(Service) {
-}
+ : Worker(Service) {}
llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
const tooling::CompilationDatabase &Compilations, StringRef CWD) {
@@ -36,7 +37,7 @@ llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
StringRef File) override {
if (!this->Opts)
this->Opts = std::make_unique<DependencyOutputOptions>(Opts);
- Dependencies.push_back(File);
+ Dependencies.push_back(std::string(File));
}
void handleModuleDependency(ModuleDeps MD) override {
@@ -75,11 +76,36 @@ llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
std::vector<std::string> Dependencies;
};
+ // We expect a single command here because if a source file occurs multiple
+ // times in the original CDB, then `computeDependencies` would run the
+ // `DependencyScanningAction` once for every time the input occured in the
+ // CDB. Instead we split up the CDB into single command chunks to avoid this
+ // behavior.
+ assert(Compilations.getAllCompileCommands().size() == 1 &&
+ "Expected a compilation database with a single command!");
+ std::string Input = Compilations.getAllCompileCommands().front().Filename;
+
+ MakeDependencyPrinterConsumer Consumer;
+ auto Result = Worker.computeDependencies(Input, CWD, Compilations, Consumer);
+ if (Result)
+ return std::move(Result);
+ std::string Output;
+ Consumer.printDependencies(Output);
+ return Output;
+}
+
+llvm::Expected<FullDependenciesResult>
+DependencyScanningTool::getFullDependencies(
+ const tooling::CompilationDatabase &Compilations, StringRef CWD,
+ const llvm::StringSet<> &AlreadySeen) {
class FullDependencyPrinterConsumer : public DependencyConsumer {
public:
+ FullDependencyPrinterConsumer(const llvm::StringSet<> &AlreadySeen)
+ : AlreadySeen(AlreadySeen) {}
+
void handleFileDependency(const DependencyOutputOptions &Opts,
StringRef File) override {
- Dependencies.push_back(File);
+ Dependencies.push_back(std::string(File));
}
void handleModuleDependency(ModuleDeps MD) override {
@@ -90,55 +116,41 @@ llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
ContextHash = std::move(Hash);
}
- void printDependencies(std::string &S, StringRef MainFile) {
- // Sort the modules by name to get a deterministic order.
- std::vector<StringRef> Modules;
- for (auto &&Dep : ClangModuleDeps)
- Modules.push_back(Dep.first);
- std::sort(Modules.begin(), Modules.end());
+ FullDependenciesResult getFullDependencies() const {
+ FullDependencies FD;
- llvm::raw_string_ostream OS(S);
+ FD.ContextHash = std::move(ContextHash);
- using namespace llvm::json;
+ FD.FileDeps.assign(Dependencies.begin(), Dependencies.end());
- Array Imports;
- for (auto &&ModName : Modules) {
- auto &MD = ClangModuleDeps[ModName];
+ for (auto &&M : ClangModuleDeps) {
+ auto &MD = M.second;
if (MD.ImportedByMainFile)
- Imports.push_back(MD.ModuleName);
+ FD.ClangModuleDeps.push_back({MD.ModuleName, ContextHash});
}
- Array Mods;
- for (auto &&ModName : Modules) {
- auto &MD = ClangModuleDeps[ModName];
- Object Mod{
- {"name", MD.ModuleName},
- {"file-deps", toJSONSorted(MD.FileDeps)},
- {"clang-module-deps", toJSONSorted(MD.ClangModuleDeps)},
- {"clang-modulemap-file", MD.ClangModuleMapFile},
- };
- Mods.push_back(std::move(Mod));
- }
+ FullDependenciesResult FDR;
- Object O{
- {"input-file", MainFile},
- {"clang-context-hash", ContextHash},
- {"file-deps", Dependencies},
- {"clang-module-deps", std::move(Imports)},
- {"clang-modules", std::move(Mods)},
- };
+ for (auto &&M : ClangModuleDeps) {
+ // TODO: Avoid handleModuleDependency even being called for modules
+ // we've already seen.
+ if (AlreadySeen.count(M.first))
+ continue;
+ FDR.DiscoveredModules.push_back(std::move(M.second));
+ }
- S = llvm::formatv("{0:2},\n", Value(std::move(O))).str();
- return;
+ FDR.FullDeps = std::move(FD);
+ return FDR;
}
private:
std::vector<std::string> Dependencies;
std::unordered_map<std::string, ModuleDeps> ClangModuleDeps;
std::string ContextHash;
+ std::vector<std::string> OutputPaths;
+ const llvm::StringSet<> &AlreadySeen;
};
-
// We expect a single command here because if a source file occurs multiple
// times in the original CDB, then `computeDependencies` would run the
// `DependencyScanningAction` once for every time the input occured in the
@@ -147,26 +159,13 @@ llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
assert(Compilations.getAllCompileCommands().size() == 1 &&
"Expected a compilation database with a single command!");
std::string Input = Compilations.getAllCompileCommands().front().Filename;
-
- if (Format == ScanningOutputFormat::Make) {
- MakeDependencyPrinterConsumer Consumer;
- auto Result =
- Worker.computeDependencies(Input, CWD, Compilations, Consumer);
- if (Result)
- return std::move(Result);
- std::string Output;
- Consumer.printDependencies(Output);
- return Output;
- } else {
- FullDependencyPrinterConsumer Consumer;
- auto Result =
- Worker.computeDependencies(Input, CWD, Compilations, Consumer);
- if (Result)
- return std::move(Result);
- std::string Output;
- Consumer.printDependencies(Output, Input);
- return Output;
- }
+
+ FullDependencyPrinterConsumer Consumer(AlreadySeen);
+ llvm::Error Result =
+ Worker.computeDependencies(Input, CWD, Compilations, Consumer);
+ if (Result)
+ return std::move(Result);
+ return Consumer.getFullDependencies();
}
} // end namespace dependencies
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
index edf2cf8bd70f..32bbc578d2db 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
@@ -118,7 +118,7 @@ public:
.ExcludedConditionalDirectiveSkipMappings = PPSkipMappings;
}
- FileMgr->getFileSystemOpts().WorkingDir = WorkingDirectory;
+ FileMgr->getFileSystemOpts().WorkingDir = std::string(WorkingDirectory);
Compiler.setFileManager(FileMgr);
Compiler.createSourceManager(*FileMgr);
@@ -142,12 +142,17 @@ public:
Consumer));
break;
case ScanningOutputFormat::Full:
- Compiler.addDependencyCollector(
- std::make_shared<ModuleDepCollector>(Compiler, Consumer));
+ Compiler.addDependencyCollector(std::make_shared<ModuleDepCollector>(
+ std::move(Opts), Compiler, Consumer));
break;
}
- Consumer.handleContextHash(Compiler.getInvocation().getModuleHash());
+ // Consider different header search and diagnostic options to create
+ // different modules. This avoids the unsound aliasing of module PCMs.
+ //
+ // TODO: Implement diagnostic bucketing and header search pruning to reduce
+ // the impact of strict context hashing.
+ Compiler.getHeaderSearchOpts().ModulesStrictContextHash = true;
auto Action = std::make_unique<PreprocessOnlyAction>();
const bool Result = Compiler.ExecuteAction(*Action);
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
index 422940047f2d..4f6eff799f22 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
@@ -17,12 +17,60 @@ using namespace clang;
using namespace tooling;
using namespace dependencies;
+std::vector<std::string> ModuleDeps::getFullCommandLine(
+ std::function<StringRef(ClangModuleDep)> LookupPCMPath,
+ std::function<const ModuleDeps &(ClangModuleDep)> LookupModuleDeps) const {
+ std::vector<std::string> Ret = NonPathCommandLine;
+
+ // TODO: Build full command line. That also means capturing the original
+ // command line into NonPathCommandLine.
+
+ dependencies::detail::appendCommonModuleArguments(
+ ClangModuleDeps, LookupPCMPath, LookupModuleDeps, Ret);
+
+ return Ret;
+}
+
+void dependencies::detail::appendCommonModuleArguments(
+ llvm::ArrayRef<ClangModuleDep> Modules,
+ std::function<StringRef(ClangModuleDep)> LookupPCMPath,
+ std::function<const ModuleDeps &(ClangModuleDep)> LookupModuleDeps,
+ std::vector<std::string> &Result) {
+ llvm::StringSet<> AlreadyAdded;
+
+ std::function<void(llvm::ArrayRef<ClangModuleDep>)> AddArgs =
+ [&](llvm::ArrayRef<ClangModuleDep> Modules) {
+ for (const ClangModuleDep &CMD : Modules) {
+ if (!AlreadyAdded.insert(CMD.ModuleName + CMD.ContextHash).second)
+ continue;
+ const ModuleDeps &M = LookupModuleDeps(CMD);
+ // Depth first traversal.
+ AddArgs(M.ClangModuleDeps);
+ Result.push_back(("-fmodule-file=" + LookupPCMPath(CMD)).str());
+ if (!M.ClangModuleMapFile.empty()) {
+ Result.push_back("-fmodule-map-file=" + M.ClangModuleMapFile);
+ }
+ }
+ };
+
+ Result.push_back("-fno-implicit-modules");
+ Result.push_back("-fno-implicit-module-maps");
+ AddArgs(Modules);
+}
+
void ModuleDepCollectorPP::FileChanged(SourceLocation Loc,
FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
FileID PrevFID) {
if (Reason != PPCallbacks::EnterFile)
return;
+
+ // This has to be delayed as the context hash can change at the start of
+ // `CompilerInstance::ExecuteAction`.
+ if (MDC.ContextHash.empty()) {
+ MDC.ContextHash = Instance.getInvocation().getModuleHash();
+ MDC.Consumer.handleContextHash(MDC.ContextHash);
+ }
SourceManager &SM = Instance.getSourceManager();
@@ -37,7 +85,7 @@ void ModuleDepCollectorPP::FileChanged(SourceLocation Loc,
StringRef FileName =
llvm::sys::path::remove_leading_dotslash(File->getName());
- MDC.MainDeps.push_back(FileName);
+ MDC.MainDeps.push_back(std::string(FileName));
}
void ModuleDepCollectorPP::InclusionDirective(
@@ -48,9 +96,18 @@ void ModuleDepCollectorPP::InclusionDirective(
if (!File && !Imported) {
// This is a non-modular include that HeaderSearch failed to find. Add it
// here as `FileChanged` will never see it.
- MDC.MainDeps.push_back(FileName);
+ MDC.MainDeps.push_back(std::string(FileName));
}
+ handleImport(Imported);
+}
+void ModuleDepCollectorPP::moduleImport(SourceLocation ImportLoc,
+ ModuleIdPath Path,
+ const Module *Imported) {
+ handleImport(Imported);
+}
+
+void ModuleDepCollectorPP::handleImport(const Module *Imported) {
if (!Imported)
return;
@@ -61,8 +118,8 @@ void ModuleDepCollectorPP::InclusionDirective(
void ModuleDepCollectorPP::EndOfMainFile() {
FileID MainFileID = Instance.getSourceManager().getMainFileID();
- MDC.MainFile =
- Instance.getSourceManager().getFileEntryForID(MainFileID)->getName();
+ MDC.MainFile = std::string(
+ Instance.getSourceManager().getFileEntryForID(MainFileID)->getName());
for (const Module *M : DirectDeps) {
handleTopLevelModule(M);
@@ -71,9 +128,8 @@ void ModuleDepCollectorPP::EndOfMainFile() {
for (auto &&I : MDC.Deps)
MDC.Consumer.handleModuleDependency(I.second);
- DependencyOutputOptions Opts;
for (auto &&I : MDC.MainDeps)
- MDC.Consumer.handleFileDependency(Opts, I);
+ MDC.Consumer.handleFileDependency(*MDC.Opts, I);
}
void ModuleDepCollectorPP::handleTopLevelModule(const Module *M) {
@@ -92,9 +148,9 @@ void ModuleDepCollectorPP::handleTopLevelModule(const Module *M) {
.getModuleMap()
.getContainingModuleMapFile(M);
- MD.ClangModuleMapFile = ModuleMap ? ModuleMap->getName() : "";
+ MD.ClangModuleMapFile = std::string(ModuleMap ? ModuleMap->getName() : "");
MD.ModuleName = M->getFullModuleName();
- MD.ModulePCMPath = M->getASTFile()->getName();
+ MD.ImplicitModulePCMPath = std::string(M->getASTFile()->getName());
MD.ContextHash = MDC.ContextHash;
serialization::ModuleFile *MF =
MDC.Instance.getASTReader()->getModuleManager().lookup(M->getASTFile());
@@ -103,30 +159,37 @@ void ModuleDepCollectorPP::handleTopLevelModule(const Module *M) {
MD.FileDeps.insert(IF.getFile()->getName());
});
- addAllSubmoduleDeps(M, MD);
+ llvm::DenseSet<const Module *> AddedModules;
+ addAllSubmoduleDeps(M, MD, AddedModules);
}
-void ModuleDepCollectorPP::addAllSubmoduleDeps(const Module *M,
- ModuleDeps &MD) {
- addModuleDep(M, MD);
+void ModuleDepCollectorPP::addAllSubmoduleDeps(
+ const Module *M, ModuleDeps &MD,
+ llvm::DenseSet<const Module *> &AddedModules) {
+ addModuleDep(M, MD, AddedModules);
for (const Module *SubM : M->submodules())
- addAllSubmoduleDeps(SubM, MD);
+ addAllSubmoduleDeps(SubM, MD, AddedModules);
}
-void ModuleDepCollectorPP::addModuleDep(const Module *M, ModuleDeps &MD) {
+void ModuleDepCollectorPP::addModuleDep(
+ const Module *M, ModuleDeps &MD,
+ llvm::DenseSet<const Module *> &AddedModules) {
for (const Module *Import : M->Imports) {
if (Import->getTopLevelModule() != M->getTopLevelModule()) {
- MD.ClangModuleDeps.insert(Import->getTopLevelModuleName());
+ if (AddedModules.insert(Import->getTopLevelModule()).second)
+ MD.ClangModuleDeps.push_back(
+ {std::string(Import->getTopLevelModuleName()),
+ Instance.getInvocation().getModuleHash()});
handleTopLevelModule(Import->getTopLevelModule());
}
}
}
-ModuleDepCollector::ModuleDepCollector(CompilerInstance &I,
- DependencyConsumer &C)
- : Instance(I), Consumer(C), ContextHash(I.getInvocation().getModuleHash()) {
-}
+ModuleDepCollector::ModuleDepCollector(
+ std::unique_ptr<DependencyOutputOptions> Opts, CompilerInstance &I,
+ DependencyConsumer &C)
+ : Instance(I), Consumer(C), Opts(std::move(Opts)) {}
void ModuleDepCollector::attachToPreprocessor(Preprocessor &PP) {
PP.addPPCallbacks(std::make_unique<ModuleDepCollectorPP>(Instance, *this));
diff --git a/contrib/llvm-project/clang/lib/Tooling/Execution.cpp b/contrib/llvm-project/clang/lib/Tooling/Execution.cpp
index c39a4fcdac82..247b260b97ed 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Execution.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Execution.cpp
@@ -63,18 +63,16 @@ createExecutorFromCommandLineArgsImpl(int &argc, const char **argv,
/*Overview=*/Overview);
if (!OptionsParser)
return OptionsParser.takeError();
- for (auto I = ToolExecutorPluginRegistry::begin(),
- E = ToolExecutorPluginRegistry::end();
- I != E; ++I) {
- if (I->getName() != ExecutorName) {
+ for (const auto &TEPlugin : ToolExecutorPluginRegistry::entries()) {
+ if (TEPlugin.getName() != ExecutorName) {
continue;
}
- std::unique_ptr<ToolExecutorPlugin> Plugin(I->instantiate());
+ std::unique_ptr<ToolExecutorPlugin> Plugin(TEPlugin.instantiate());
llvm::Expected<std::unique_ptr<ToolExecutor>> Executor =
Plugin->create(*OptionsParser);
if (!Executor) {
return llvm::make_error<llvm::StringError>(
- llvm::Twine("Failed to create '") + I->getName() +
+ llvm::Twine("Failed to create '") + TEPlugin.getName() +
"': " + llvm::toString(Executor.takeError()) + "\n",
llvm::inconvertibleErrorCode());
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp b/contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
index 99298316718b..f1ab2aed54c0 100644
--- a/contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
@@ -12,6 +12,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/StringSaver.h"
diff --git a/contrib/llvm-project/clang/lib/Tooling/FileMatchTrie.cpp b/contrib/llvm-project/clang/lib/Tooling/FileMatchTrie.cpp
index 7df5a16fd88f..88dea6bb6c9f 100644
--- a/contrib/llvm-project/clang/lib/Tooling/FileMatchTrie.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/FileMatchTrie.cpp
@@ -63,7 +63,7 @@ public:
return;
if (Path.empty()) {
// This is an empty leaf. Store NewPath and return.
- Path = NewPath;
+ Path = std::string(NewPath);
return;
}
if (Children.empty()) {
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp b/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
index 37a0816c803e..681fcc5c762a 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/Inclusions/HeaderIncludes.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
#include "llvm/ADT/Optional.h"
@@ -319,7 +320,7 @@ HeaderIncludes::insert(llvm::StringRef IncludeName, bool IsAngled) const {
(!IsAngled && StringRef(Inc.Name).startswith("\"")))
return llvm::None;
std::string Quoted =
- llvm::formatv(IsAngled ? "<{0}>" : "\"{0}\"", IncludeName);
+ std::string(llvm::formatv(IsAngled ? "<{0}>" : "\"{0}\"", IncludeName));
StringRef QuotedName = Quoted;
int Priority = Categories.getIncludePriority(
QuotedName, /*CheckMainHeader=*/FirstIncludeOffset < 0);
@@ -336,7 +337,8 @@ HeaderIncludes::insert(llvm::StringRef IncludeName, bool IsAngled) const {
}
}
assert(InsertOffset <= Code.size());
- std::string NewInclude = llvm::formatv("#include {0}\n", QuotedName);
+ std::string NewInclude =
+ std::string(llvm::formatv("#include {0}\n", QuotedName));
// When inserting headers at end of the code, also append '\n' to the code
// if it does not end with '\n'.
// FIXME: when inserting multiple #includes at the end of code, only one
diff --git a/contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp b/contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
index 2cc819a498c6..fa61560e5123 100644
--- a/contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
@@ -114,6 +114,9 @@ static types::ID foldType(types::ID Lang) {
case types::TY_ObjCXX:
case types::TY_ObjCXXHeader:
return types::TY_ObjCXX;
+ case types::TY_CUDA:
+ case types::TY_CUDA_DEVICE:
+ return types::TY_CUDA;
default:
return types::TY_INVALID;
}
@@ -203,7 +206,7 @@ struct TransferableCommand {
// Produce a CompileCommand for \p filename, based on this one.
CompileCommand transferTo(StringRef Filename) const {
CompileCommand Result = Cmd;
- Result.Filename = Filename;
+ Result.Filename = std::string(Filename);
bool TypeCertain;
auto TargetType = guessType(Filename, &TypeCertain);
// If the filename doesn't determine the language (.h), transfer with -x.
@@ -217,7 +220,7 @@ struct TransferableCommand {
if (ClangCLMode) {
const StringRef Flag = toCLFlag(TargetType);
if (!Flag.empty())
- Result.CommandLine.push_back(Flag);
+ Result.CommandLine.push_back(std::string(Flag));
} else {
Result.CommandLine.push_back("-x");
Result.CommandLine.push_back(types::getTypeName(TargetType));
@@ -230,7 +233,7 @@ struct TransferableCommand {
llvm::Twine(ClangCLMode ? "/std:" : "-std=") +
LangStandard::getLangStandardForKind(Std).getName()).str());
}
- Result.CommandLine.push_back(Filename);
+ Result.CommandLine.push_back(std::string(Filename));
Result.Heuristic = "inferred from " + Cmd.Filename;
return Result;
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp b/contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp
index 04dd4dbf6248..4af361f538cb 100644
--- a/contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp
@@ -305,7 +305,7 @@ nodeToCommandLine(JSONCommandLineSyntax Syntax,
Arguments = unescapeCommandLine(Syntax, Nodes[0]->getValue(Storage));
else
for (const auto *Node : Nodes)
- Arguments.push_back(Node->getValue(Storage));
+ Arguments.push_back(std::string(Node->getValue(Storage)));
// There may be multiple wrappers: using distcc and ccache together is common.
while (unwrapCommand(Arguments))
;
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring/ASTSelection.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring/ASTSelection.cpp
index 64e57af59011..af1eb491a20a 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring/ASTSelection.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring/ASTSelection.cpp
@@ -13,7 +13,6 @@
using namespace clang;
using namespace tooling;
-using ast_type_traits::DynTypedNode;
namespace {
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring/AtomicChange.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring/AtomicChange.cpp
index 4cf63306d262..069e9c1eb36e 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring/AtomicChange.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring/AtomicChange.cpp
@@ -200,10 +200,16 @@ AtomicChange::AtomicChange(const SourceManager &SM,
FullKeyPosition.getSpellingLoc().getDecomposedLoc();
const FileEntry *FE = SM.getFileEntryForID(FileIDAndOffset.first);
assert(FE && "Cannot create AtomicChange with invalid location.");
- FilePath = FE->getName();
+ FilePath = std::string(FE->getName());
Key = FilePath + ":" + std::to_string(FileIDAndOffset.second);
}
+AtomicChange::AtomicChange(const SourceManager &SM, SourceLocation KeyPosition,
+ llvm::Any M)
+ : AtomicChange(SM, KeyPosition) {
+ Metadata = std::move(M);
+}
+
AtomicChange::AtomicChange(std::string Key, std::string FilePath,
std::string Error,
std::vector<std::string> InsertedHeaders,
@@ -284,11 +290,11 @@ llvm::Error AtomicChange::insert(const SourceManager &SM, SourceLocation Loc,
}
void AtomicChange::addHeader(llvm::StringRef Header) {
- InsertedHeaders.push_back(Header);
+ InsertedHeaders.push_back(std::string(Header));
}
void AtomicChange::removeHeader(llvm::StringRef Header) {
- RemovedHeaders.push_back(Header);
+ RemovedHeaders.push_back(std::string(Header));
}
llvm::Expected<std::string>
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/RenamingAction.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/RenamingAction.cpp
index b0634912e3fc..72598601d47d 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/RenamingAction.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/RenamingAction.cpp
@@ -170,7 +170,8 @@ static void convertChangesToFileReplacements(
std::map<std::string, tooling::Replacements> *FileToReplaces) {
for (const auto &AtomicChange : AtomicChanges) {
for (const auto &Replace : AtomicChange.getReplacements()) {
- llvm::Error Err = (*FileToReplaces)[Replace.getFilePath()].add(Replace);
+ llvm::Error Err =
+ (*FileToReplaces)[std::string(Replace.getFilePath())].add(Replace);
if (Err) {
llvm::errs() << "Renaming failed in " << Replace.getFilePath() << "! "
<< llvm::toString(std::move(Err)) << "\n";
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFinder.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFinder.cpp
index 55111202ac88..23f567f1c9ec 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFinder.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFinder.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/AST.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Index/USRGeneration.h"
#include "clang/Lex/Lexer.h"
#include "clang/Tooling/Refactoring/RecursiveSymbolVisitor.h"
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
index d966a5ef23c2..43dc32e158d3 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
@@ -126,15 +126,24 @@ private:
addUSRsOfCtorDtors(TemplateDecl->getTemplatedDecl());
}
- void addUSRsOfCtorDtors(const CXXRecordDecl *RecordDecl) {
- RecordDecl = RecordDecl->getDefinition();
+ void addUSRsOfCtorDtors(const CXXRecordDecl *RD) {
+ const auto* RecordDecl = RD->getDefinition();
// Skip if the CXXRecordDecl doesn't have definition.
- if (!RecordDecl)
+ if (!RecordDecl) {
+ USRSet.insert(getUSRForDecl(RD));
return;
+ }
for (const auto *CtorDecl : RecordDecl->ctors())
USRSet.insert(getUSRForDecl(CtorDecl));
+ // Add template constructor decls, they are not in ctors() unfortunately.
+ if (RecordDecl->hasUserDeclaredConstructor())
+ for (const auto *D : RecordDecl->decls())
+ if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(D))
+ if (const auto *Ctor =
+ dyn_cast<CXXConstructorDecl>(FTD->getTemplatedDecl()))
+ USRSet.insert(getUSRForDecl(Ctor));
USRSet.insert(getUSRForDecl(RecordDecl->getDestructor()));
USRSet.insert(getUSRForDecl(RecordDecl));
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
index 408e184f5bf5..dfc319dd0639 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
@@ -15,6 +15,7 @@
#include "clang/Tooling/Refactoring/Rename/USRLocFinder.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ParentMapContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
@@ -426,8 +427,7 @@ public:
StartLoc,
EndLoc,
TemplateSpecType->getTemplateName().getAsTemplateDecl(),
- getClosestAncestorDecl(
- ast_type_traits::DynTypedNode::create(TargetLoc)),
+ getClosestAncestorDecl(DynTypedNode::create(TargetLoc)),
GetNestedNameForType(TargetLoc),
/*IgnorePrefixQualifers=*/false};
RenameInfos.push_back(Info);
@@ -466,8 +466,7 @@ private:
// FIXME: figure out how to handle it when there are multiple parents.
if (Parents.size() != 1)
return nullptr;
- if (ast_type_traits::ASTNodeKind::getFromNodeKind<Decl>().isBaseOf(
- Parents[0].getNodeKind()))
+ if (ASTNodeKind::getFromNodeKind<Decl>().isBaseOf(Parents[0].getNodeKind()))
return Parents[0].template get<Decl>();
return getClosestAncestorDecl(Parents[0]);
}
@@ -536,7 +535,7 @@ createRenameAtomicChanges(llvm::ArrayRef<std::string> USRs,
// Get the name without prefix qualifiers from NewName.
size_t LastColonPos = NewName.find_last_of(':');
if (LastColonPos != std::string::npos)
- ReplacedName = NewName.substr(LastColonPos + 1);
+ ReplacedName = std::string(NewName.substr(LastColonPos + 1));
} else {
if (RenameInfo.FromDecl && RenameInfo.Context) {
if (!llvm::isa<clang::TranslationUnitDecl>(
diff --git a/contrib/llvm-project/clang/lib/Tooling/RefactoringCallbacks.cpp b/contrib/llvm-project/clang/lib/Tooling/RefactoringCallbacks.cpp
index 919b83beb357..e3fc91afeb59 100644
--- a/contrib/llvm-project/clang/lib/Tooling/RefactoringCallbacks.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/RefactoringCallbacks.cpp
@@ -50,8 +50,8 @@ public:
for (const auto &Callback : Refactoring.Callbacks) {
for (const auto &Replacement : Callback->getReplacements()) {
llvm::Error Err =
- Refactoring.FileToReplaces[Replacement.getFilePath()].add(
- Replacement);
+ Refactoring.FileToReplaces[std::string(Replacement.getFilePath())]
+ .add(Replacement);
if (Err) {
llvm::errs() << "Skipping replacement " << Replacement.toString()
<< " due to this error:\n"
@@ -83,7 +83,7 @@ static Replacement replaceStmtWithStmt(SourceManager &Sources, const Stmt &From,
}
ReplaceStmtWithText::ReplaceStmtWithText(StringRef FromId, StringRef ToText)
- : FromId(FromId), ToText(ToText) {}
+ : FromId(std::string(FromId)), ToText(std::string(ToText)) {}
void ReplaceStmtWithText::run(
const ast_matchers::MatchFinder::MatchResult &Result) {
@@ -101,7 +101,7 @@ void ReplaceStmtWithText::run(
}
ReplaceStmtWithStmt::ReplaceStmtWithStmt(StringRef FromId, StringRef ToId)
- : FromId(FromId), ToId(ToId) {}
+ : FromId(std::string(FromId)), ToId(std::string(ToId)) {}
void ReplaceStmtWithStmt::run(
const ast_matchers::MatchFinder::MatchResult &Result) {
@@ -121,7 +121,7 @@ void ReplaceStmtWithStmt::run(
ReplaceIfStmtWithItsBody::ReplaceIfStmtWithItsBody(StringRef Id,
bool PickTrueBranch)
- : Id(Id), PickTrueBranch(PickTrueBranch) {}
+ : Id(std::string(Id)), PickTrueBranch(PickTrueBranch) {}
void ReplaceIfStmtWithItsBody::run(
const ast_matchers::MatchFinder::MatchResult &Result) {
@@ -153,7 +153,7 @@ void ReplaceIfStmtWithItsBody::run(
ReplaceNodeWithTemplate::ReplaceNodeWithTemplate(
llvm::StringRef FromId, std::vector<TemplateElement> Template)
- : FromId(FromId), Template(std::move(Template)) {}
+ : FromId(std::string(FromId)), Template(std::move(Template)) {}
llvm::Expected<std::unique_ptr<ReplaceNodeWithTemplate>>
ReplaceNodeWithTemplate::create(StringRef FromId, StringRef ToTemplate) {
@@ -172,8 +172,8 @@ ReplaceNodeWithTemplate::create(StringRef FromId, StringRef ToTemplate) {
ToTemplate.substr(Index),
llvm::inconvertibleErrorCode());
}
- std::string SourceNodeName =
- ToTemplate.substr(Index + 2, EndOfIdentifier - Index - 2);
+ std::string SourceNodeName = std::string(
+ ToTemplate.substr(Index + 2, EndOfIdentifier - Index - 2));
ParsedTemplate.push_back(
TemplateElement{TemplateElement::Identifier, SourceNodeName});
Index = EndOfIdentifier + 1;
@@ -185,9 +185,9 @@ ReplaceNodeWithTemplate::create(StringRef FromId, StringRef ToTemplate) {
}
} else {
size_t NextIndex = ToTemplate.find('$', Index + 1);
- ParsedTemplate.push_back(
- TemplateElement{TemplateElement::Literal,
- ToTemplate.substr(Index, NextIndex - Index)});
+ ParsedTemplate.push_back(TemplateElement{
+ TemplateElement::Literal,
+ std::string(ToTemplate.substr(Index, NextIndex - Index))});
Index = NextIndex;
}
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp
index aa8844771d37..1f192180ec45 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp
@@ -6,20 +6,32 @@
//
//===----------------------------------------------------------------------===//
#include "clang/Tooling/Syntax/BuildTree.h"
+#include "clang/AST/ASTFwd.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Stmt.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/TypeLocVisitor.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TokenKinds.h"
#include "clang/Lex/Lexer.h"
+#include "clang/Lex/LiteralSupport.h"
#include "clang/Tooling/Syntax/Nodes.h"
#include "clang/Tooling/Syntax/Tokens.h"
#include "clang/Tooling/Syntax/Tree.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Casting.h"
@@ -27,6 +39,7 @@
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
+#include <cstddef>
#include <map>
using namespace clang;
@@ -34,6 +47,207 @@ using namespace clang;
LLVM_ATTRIBUTE_UNUSED
static bool isImplicitExpr(clang::Expr *E) { return E->IgnoreImplicit() != E; }
+namespace {
+/// Get start location of the Declarator from the TypeLoc.
+/// E.g.:
+/// loc of `(` in `int (a)`
+/// loc of `*` in `int *(a)`
+/// loc of the first `(` in `int (*a)(int)`
+/// loc of the `*` in `int *(a)(int)`
+/// loc of the first `*` in `const int *const *volatile a;`
+///
+/// It is non-trivial to get the start location because TypeLocs are stored
+/// inside out. In the example above `*volatile` is the TypeLoc returned
+/// by `Decl.getTypeSourceInfo()`, and `*const` is what `.getPointeeLoc()`
+/// returns.
+struct GetStartLoc : TypeLocVisitor<GetStartLoc, SourceLocation> {
+ SourceLocation VisitParenTypeLoc(ParenTypeLoc T) {
+ auto L = Visit(T.getInnerLoc());
+ if (L.isValid())
+ return L;
+ return T.getLParenLoc();
+ }
+
+ // Types spelled in the prefix part of the declarator.
+ SourceLocation VisitPointerTypeLoc(PointerTypeLoc T) {
+ return HandlePointer(T);
+ }
+
+ SourceLocation VisitMemberPointerTypeLoc(MemberPointerTypeLoc T) {
+ return HandlePointer(T);
+ }
+
+ SourceLocation VisitBlockPointerTypeLoc(BlockPointerTypeLoc T) {
+ return HandlePointer(T);
+ }
+
+ SourceLocation VisitReferenceTypeLoc(ReferenceTypeLoc T) {
+ return HandlePointer(T);
+ }
+
+ SourceLocation VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc T) {
+ return HandlePointer(T);
+ }
+
+ // All other cases are not important, as they are either part of declaration
+ // specifiers (e.g. inheritors of TypeSpecTypeLoc) or introduce modifiers on
+ // existing declarators (e.g. QualifiedTypeLoc). They cannot start the
+ // declarator themselves, but their underlying type can.
+ SourceLocation VisitTypeLoc(TypeLoc T) {
+ auto N = T.getNextTypeLoc();
+ if (!N)
+ return SourceLocation();
+ return Visit(N);
+ }
+
+ SourceLocation VisitFunctionProtoTypeLoc(FunctionProtoTypeLoc T) {
+ if (T.getTypePtr()->hasTrailingReturn())
+ return SourceLocation(); // avoid recursing into the suffix of declarator.
+ return VisitTypeLoc(T);
+ }
+
+private:
+ template <class PtrLoc> SourceLocation HandlePointer(PtrLoc T) {
+ auto L = Visit(T.getPointeeLoc());
+ if (L.isValid())
+ return L;
+ return T.getLocalSourceRange().getBegin();
+ }
+};
+} // namespace
+
+static syntax::NodeKind getOperatorNodeKind(const CXXOperatorCallExpr &E) {
+ switch (E.getOperator()) {
+ // Comparison
+ case OO_EqualEqual:
+ case OO_ExclaimEqual:
+ case OO_Greater:
+ case OO_GreaterEqual:
+ case OO_Less:
+ case OO_LessEqual:
+ case OO_Spaceship:
+ // Assignment
+ case OO_Equal:
+ case OO_SlashEqual:
+ case OO_PercentEqual:
+ case OO_CaretEqual:
+ case OO_PipeEqual:
+ case OO_LessLessEqual:
+ case OO_GreaterGreaterEqual:
+ case OO_PlusEqual:
+ case OO_MinusEqual:
+ case OO_StarEqual:
+ case OO_AmpEqual:
+ // Binary computation
+ case OO_Slash:
+ case OO_Percent:
+ case OO_Caret:
+ case OO_Pipe:
+ case OO_LessLess:
+ case OO_GreaterGreater:
+ case OO_AmpAmp:
+ case OO_PipePipe:
+ case OO_ArrowStar:
+ case OO_Comma:
+ return syntax::NodeKind::BinaryOperatorExpression;
+ case OO_Tilde:
+ case OO_Exclaim:
+ return syntax::NodeKind::PrefixUnaryOperatorExpression;
+ // Prefix/Postfix increment/decrement
+ case OO_PlusPlus:
+ case OO_MinusMinus:
+ switch (E.getNumArgs()) {
+ case 1:
+ return syntax::NodeKind::PrefixUnaryOperatorExpression;
+ case 2:
+ return syntax::NodeKind::PostfixUnaryOperatorExpression;
+ default:
+ llvm_unreachable("Invalid number of arguments for operator");
+ }
+ // Operators that can be unary or binary
+ case OO_Plus:
+ case OO_Minus:
+ case OO_Star:
+ case OO_Amp:
+ switch (E.getNumArgs()) {
+ case 1:
+ return syntax::NodeKind::PrefixUnaryOperatorExpression;
+ case 2:
+ return syntax::NodeKind::BinaryOperatorExpression;
+ default:
+ llvm_unreachable("Invalid number of arguments for operator");
+ }
+ return syntax::NodeKind::BinaryOperatorExpression;
+ // Not yet supported by SyntaxTree
+ case OO_New:
+ case OO_Delete:
+ case OO_Array_New:
+ case OO_Array_Delete:
+ case OO_Coawait:
+ case OO_Call:
+ case OO_Subscript:
+ case OO_Arrow:
+ return syntax::NodeKind::UnknownExpression;
+ case OO_Conditional: // not overloadable
+ case NUM_OVERLOADED_OPERATORS:
+ case OO_None:
+ llvm_unreachable("Not an overloadable operator");
+ }
+ llvm_unreachable("Unknown OverloadedOperatorKind enum");
+}
+
+/// Gets the range of declarator as defined by the C++ grammar. E.g.
+/// `int a;` -> range of `a`,
+/// `int *a;` -> range of `*a`,
+/// `int a[10];` -> range of `a[10]`,
+/// `int a[1][2][3];` -> range of `a[1][2][3]`,
+/// `int *a = nullptr` -> range of `*a = nullptr`.
+/// FIMXE: \p Name must be a source range, e.g. for `operator+`.
+static SourceRange getDeclaratorRange(const SourceManager &SM, TypeLoc T,
+ SourceLocation Name,
+ SourceRange Initializer) {
+ SourceLocation Start = GetStartLoc().Visit(T);
+ SourceLocation End = T.getSourceRange().getEnd();
+ assert(End.isValid());
+ if (Name.isValid()) {
+ if (Start.isInvalid())
+ Start = Name;
+ if (SM.isBeforeInTranslationUnit(End, Name))
+ End = Name;
+ }
+ if (Initializer.isValid()) {
+ auto InitializerEnd = Initializer.getEnd();
+ assert(SM.isBeforeInTranslationUnit(End, InitializerEnd) ||
+ End == InitializerEnd);
+ End = InitializerEnd;
+ }
+ return SourceRange(Start, End);
+}
+
+namespace {
+/// All AST hierarchy roots that can be represented as pointers.
+using ASTPtr = llvm::PointerUnion<Stmt *, Decl *>;
+/// Maintains a mapping from AST to syntax tree nodes. This class will get more
+/// complicated as we support more kinds of AST nodes, e.g. TypeLocs.
+/// FIXME: expose this as public API.
+class ASTToSyntaxMapping {
+public:
+ void add(ASTPtr From, syntax::Tree *To) {
+ assert(To != nullptr);
+ assert(!From.isNull());
+
+ bool Added = Nodes.insert({From, To}).second;
+ (void)Added;
+ assert(Added && "mapping added twice");
+ }
+
+ syntax::Tree *find(ASTPtr P) const { return Nodes.lookup(P); }
+
+private:
+ llvm::DenseMap<ASTPtr, syntax::Tree *> Nodes;
+};
+} // namespace
+
/// A helper class for constructing the syntax tree while traversing a clang
/// AST.
///
@@ -57,30 +271,44 @@ public:
}
llvm::BumpPtrAllocator &allocator() { return Arena.allocator(); }
+ const SourceManager &sourceManager() const { return Arena.sourceManager(); }
/// Populate children for \p New node, assuming it covers tokens from \p
/// Range.
- void foldNode(llvm::ArrayRef<syntax::Token> Range, syntax::Tree *New);
-
- /// Must be called with the range of each `DeclaratorDecl`. Ensures the
- /// corresponding declarator nodes are covered by `SimpleDeclaration`.
- void noticeDeclaratorRange(llvm::ArrayRef<syntax::Token> Range);
+ void foldNode(llvm::ArrayRef<syntax::Token> Range, syntax::Tree *New,
+ ASTPtr From) {
+ assert(New);
+ Pending.foldChildren(Arena, Range, New);
+ if (From)
+ Mapping.add(From, New);
+ }
+ void foldNode(llvm::ArrayRef<syntax::Token> Range, syntax::Tree *New,
+ TypeLoc L) {
+ // FIXME: add mapping for TypeLocs
+ foldNode(Range, New, nullptr);
+ }
/// Notifies that we should not consume trailing semicolon when computing
/// token range of \p D.
- void noticeDeclaratorWithoutSemicolon(Decl *D);
+ void noticeDeclWithoutSemicolon(Decl *D);
/// Mark the \p Child node with a corresponding \p Role. All marked children
/// should be consumed by foldNode.
- /// (!) when called on expressions (clang::Expr is derived from clang::Stmt),
- /// wraps expressions into expression statement.
+ /// When called on expressions (clang::Expr is derived from clang::Stmt),
+ /// wraps expressions into expression statement.
void markStmtChild(Stmt *Child, NodeRole Role);
/// Should be called for expressions in non-statement position to avoid
/// wrapping into expression statement.
void markExprChild(Expr *Child, NodeRole Role);
-
/// Set role for a token starting at \p Loc.
void markChildToken(SourceLocation Loc, NodeRole R);
+ /// Set role for \p T.
+ void markChildToken(const syntax::Token *T, NodeRole R);
+
+ /// Set role for \p N.
+ void markChild(syntax::Node *N, NodeRole R);
+ /// Set role for the syntax node matching \p N.
+ void markChild(ASTPtr N, NodeRole R);
/// Finish building the tree and consume the root node.
syntax::TranslationUnit *finalize() && {
@@ -97,8 +325,16 @@ public:
return TU;
}
- /// getRange() finds the syntax tokens corresponding to the passed source
- /// locations.
+ /// Finds a token starting at \p L. The token must exist if \p L is valid.
+ const syntax::Token *findToken(SourceLocation L) const;
+
+ /// Finds the syntax tokens corresponding to the \p SourceRange.
+ llvm::ArrayRef<syntax::Token> getRange(SourceRange Range) const {
+ assert(Range.isValid());
+ return getRange(Range.getBegin(), Range.getEnd());
+ }
+
+ /// Finds the syntax tokens corresponding to the passed source locations.
/// \p First is the start position of the first token and \p Last is the start
/// position of the last token.
llvm::ArrayRef<syntax::Token> getRange(SourceLocation First,
@@ -109,23 +345,62 @@ public:
Arena.sourceManager().isBeforeInTranslationUnit(First, Last));
return llvm::makeArrayRef(findToken(First), std::next(findToken(Last)));
}
- llvm::ArrayRef<syntax::Token> getRange(const Decl *D) const {
- auto Tokens = getRange(D->getBeginLoc(), D->getEndLoc());
- if (llvm::isa<NamespaceDecl>(D))
- return Tokens;
- if (DeclsWithoutSemicolons.count(D))
- return Tokens;
- // FIXME: do not consume trailing semicolon on function definitions.
- // Most declarations own a semicolon in syntax trees, but not in clang AST.
- return withTrailingSemicolon(Tokens);
+
+ llvm::ArrayRef<syntax::Token>
+ getTemplateRange(const ClassTemplateSpecializationDecl *D) const {
+ auto Tokens = getRange(D->getSourceRange());
+ return maybeAppendSemicolon(Tokens, D);
}
+
+ /// Returns true if \p D is the last declarator in a chain and is thus
+ /// reponsible for creating SimpleDeclaration for the whole chain.
+ template <class T>
+ bool isResponsibleForCreatingDeclaration(const T *D) const {
+ static_assert((std::is_base_of<DeclaratorDecl, T>::value ||
+ std::is_base_of<TypedefNameDecl, T>::value),
+ "only DeclaratorDecl and TypedefNameDecl are supported.");
+
+ const Decl *Next = D->getNextDeclInContext();
+
+ // There's no next sibling, this one is responsible.
+ if (Next == nullptr) {
+ return true;
+ }
+ const auto *NextT = llvm::dyn_cast<T>(Next);
+
+ // Next sibling is not the same type, this one is responsible.
+ if (NextT == nullptr) {
+ return true;
+ }
+ // Next sibling doesn't begin at the same loc, it must be a different
+ // declaration, so this declarator is responsible.
+ if (NextT->getBeginLoc() != D->getBeginLoc()) {
+ return true;
+ }
+
+ // NextT is a member of the same declaration, and we need the last member to
+ // create declaration. This one is not responsible.
+ return false;
+ }
+
+ llvm::ArrayRef<syntax::Token> getDeclarationRange(Decl *D) {
+ llvm::ArrayRef<clang::syntax::Token> Tokens;
+ // We want to drop the template parameters for specializations.
+ if (const auto *S = llvm::dyn_cast<TagDecl>(D))
+ Tokens = getRange(S->TypeDecl::getBeginLoc(), S->getEndLoc());
+ else
+ Tokens = getRange(D->getSourceRange());
+ return maybeAppendSemicolon(Tokens, D);
+ }
+
llvm::ArrayRef<syntax::Token> getExprRange(const Expr *E) const {
- return getRange(E->getBeginLoc(), E->getEndLoc());
+ return getRange(E->getSourceRange());
}
+
/// Find the adjusted range for the statement, consuming the trailing
/// semicolon when needed.
llvm::ArrayRef<syntax::Token> getStmtRange(const Stmt *S) const {
- auto Tokens = getRange(S->getBeginLoc(), S->getEndLoc());
+ auto Tokens = getRange(S->getSourceRange());
if (isa<CompoundStmt>(S))
return Tokens;
@@ -138,17 +413,31 @@ public:
private:
llvm::ArrayRef<syntax::Token>
+ maybeAppendSemicolon(llvm::ArrayRef<syntax::Token> Tokens,
+ const Decl *D) const {
+ if (llvm::isa<NamespaceDecl>(D))
+ return Tokens;
+ if (DeclsWithoutSemicolons.count(D))
+ return Tokens;
+ // FIXME: do not consume trailing semicolon on function definitions.
+ // Most declarations own a semicolon in syntax trees, but not in clang AST.
+ return withTrailingSemicolon(Tokens);
+ }
+
+ llvm::ArrayRef<syntax::Token>
withTrailingSemicolon(llvm::ArrayRef<syntax::Token> Tokens) const {
assert(!Tokens.empty());
assert(Tokens.back().kind() != tok::eof);
- // (!) we never consume 'eof', so looking at the next token is ok.
+ // We never consume 'eof', so looking at the next token is ok.
if (Tokens.back().kind() != tok::semi && Tokens.end()->kind() == tok::semi)
return llvm::makeArrayRef(Tokens.begin(), Tokens.end() + 1);
return Tokens;
}
- /// Finds a token starting at \p L. The token must exist.
- const syntax::Token *findToken(SourceLocation L) const;
+ void setRole(syntax::Node *N, NodeRole R) {
+ assert(N->role() == NodeRole::Detached);
+ N->setRole(R);
+ }
/// A collection of trees covering the input tokens.
/// When created, each tree corresponds to a single token in the file.
@@ -166,12 +455,10 @@ private:
auto *L = new (A.allocator()) syntax::Leaf(&T);
L->Original = true;
L->CanModify = A.tokenBuffer().spelledForExpanded(T).hasValue();
- Trees.insert(Trees.end(), {&T, NodeAndRole{L}});
+ Trees.insert(Trees.end(), {&T, L});
}
}
- ~Forest() { assert(DelayedFolds.empty()); }
-
void assignRole(llvm::ArrayRef<syntax::Token> Range,
syntax::NodeRole Role) {
assert(!Range.empty());
@@ -181,56 +468,49 @@ private:
assert((std::next(It) == Trees.end() ||
std::next(It)->first == Range.end()) &&
"no child with the specified range");
- It->second.Role = Role;
+ assert(It->second->role() == NodeRole::Detached &&
+ "re-assigning role for a child");
+ It->second->setRole(Role);
}
/// Add \p Node to the forest and attach child nodes based on \p Tokens.
void foldChildren(const syntax::Arena &A,
llvm::ArrayRef<syntax::Token> Tokens,
syntax::Tree *Node) {
- // Execute delayed folds inside `Tokens`.
- auto BeginExecuted = DelayedFolds.lower_bound(Tokens.begin());
- auto It = BeginExecuted;
- for (; It != DelayedFolds.end() && It->second.End <= Tokens.end(); ++It)
- foldChildrenEager(A, llvm::makeArrayRef(It->first, It->second.End),
- It->second.Node);
- DelayedFolds.erase(BeginExecuted, It);
-
// Attach children to `Node`.
- foldChildrenEager(A, Tokens, Node);
- }
+ assert(Node->firstChild() == nullptr && "node already has children");
- /// Schedule a call to `foldChildren` that will only be executed when
- /// containing node is folded. The range of delayed nodes can be extended by
- /// calling `extendDelayedFold`. Only one delayed node for each starting
- /// token is allowed.
- void foldChildrenDelayed(llvm::ArrayRef<syntax::Token> Tokens,
- syntax::Tree *Node) {
- assert(!Tokens.empty());
- bool Inserted =
- DelayedFolds.insert({Tokens.begin(), DelayedFold{Tokens.end(), Node}})
- .second;
- (void)Inserted;
- assert(Inserted && "Multiple delayed folds start at the same token");
- }
+ auto *FirstToken = Tokens.begin();
+ auto BeginChildren = Trees.lower_bound(FirstToken);
- /// If there a delayed fold, starting at `ExtendedRange.begin()`, extends
- /// its endpoint to `ExtendedRange.end()` and returns true.
- /// Otherwise, returns false.
- bool extendDelayedFold(llvm::ArrayRef<syntax::Token> ExtendedRange) {
- assert(!ExtendedRange.empty());
- auto It = DelayedFolds.find(ExtendedRange.data());
- if (It == DelayedFolds.end())
- return false;
- assert(It->second.End <= ExtendedRange.end());
- It->second.End = ExtendedRange.end();
- return true;
+ assert((BeginChildren == Trees.end() ||
+ BeginChildren->first == FirstToken) &&
+ "fold crosses boundaries of existing subtrees");
+ auto EndChildren = Trees.lower_bound(Tokens.end());
+ assert(
+ (EndChildren == Trees.end() || EndChildren->first == Tokens.end()) &&
+ "fold crosses boundaries of existing subtrees");
+
+ // We need to go in reverse order, because we can only prepend.
+ for (auto It = EndChildren; It != BeginChildren; --It) {
+ auto *C = std::prev(It)->second;
+ if (C->role() == NodeRole::Detached)
+ C->setRole(NodeRole::Unknown);
+ Node->prependChildLowLevel(C);
+ }
+
+ // Mark that this node came from the AST and is backed by the source code.
+ Node->Original = true;
+ Node->CanModify = A.tokenBuffer().spelledForExpanded(Tokens).hasValue();
+
+ Trees.erase(BeginChildren, EndChildren);
+ Trees.insert({FirstToken, Node});
}
// EXPECTS: all tokens were consumed and are owned by a single root node.
syntax::Node *finalize() && {
assert(Trees.size() == 1);
- auto *Root = Trees.begin()->second.Node;
+ auto *Root = Trees.begin()->second;
Trees = {};
return Root;
}
@@ -243,66 +523,19 @@ private:
? (std::next(It)->first - It->first)
: A.tokenBuffer().expandedTokens().end() - It->first;
- R += llvm::formatv("- '{0}' covers '{1}'+{2} tokens\n",
- It->second.Node->kind(),
- It->first->text(A.sourceManager()), CoveredTokens);
- R += It->second.Node->dump(A);
+ R += std::string(llvm::formatv(
+ "- '{0}' covers '{1}'+{2} tokens\n", It->second->kind(),
+ It->first->text(A.sourceManager()), CoveredTokens));
+ R += It->second->dump(A);
}
return R;
}
private:
- /// Implementation detail of `foldChildren`, does acutal folding ignoring
- /// delayed folds.
- void foldChildrenEager(const syntax::Arena &A,
- llvm::ArrayRef<syntax::Token> Tokens,
- syntax::Tree *Node) {
- assert(Node->firstChild() == nullptr && "node already has children");
-
- auto *FirstToken = Tokens.begin();
- auto BeginChildren = Trees.lower_bound(FirstToken);
- assert((BeginChildren == Trees.end() ||
- BeginChildren->first == FirstToken) &&
- "fold crosses boundaries of existing subtrees");
- auto EndChildren = Trees.lower_bound(Tokens.end());
- assert(
- (EndChildren == Trees.end() || EndChildren->first == Tokens.end()) &&
- "fold crosses boundaries of existing subtrees");
-
- // (!) we need to go in reverse order, because we can only prepend.
- for (auto It = EndChildren; It != BeginChildren; --It)
- Node->prependChildLowLevel(std::prev(It)->second.Node,
- std::prev(It)->second.Role);
-
- // Mark that this node came from the AST and is backed by the source code.
- Node->Original = true;
- Node->CanModify = A.tokenBuffer().spelledForExpanded(Tokens).hasValue();
-
- Trees.erase(BeginChildren, EndChildren);
- Trees.insert({FirstToken, NodeAndRole(Node)});
- }
- /// A with a role that should be assigned to it when adding to a parent.
- struct NodeAndRole {
- explicit NodeAndRole(syntax::Node *Node)
- : Node(Node), Role(NodeRole::Unknown) {}
-
- syntax::Node *Node;
- NodeRole Role;
- };
-
/// Maps from the start token to a subtree starting at that token.
/// Keys in the map are pointers into the array of expanded tokens, so
/// pointer order corresponds to the order of preprocessor tokens.
- /// FIXME: storing the end tokens is redundant.
- /// FIXME: the key of a map is redundant, it is also stored in NodeForRange.
- std::map<const syntax::Token *, NodeAndRole> Trees;
-
- /// See documentation of `foldChildrenDelayed` for details.
- struct DelayedFold {
- const syntax::Token *End = nullptr;
- syntax::Tree *Node = nullptr;
- };
- std::map<const syntax::Token *, DelayedFold> DelayedFolds;
+ std::map<const syntax::Token *, syntax::Node *> Trees;
};
/// For debugging purposes.
@@ -314,49 +547,91 @@ private:
LocationToToken;
Forest Pending;
llvm::DenseSet<Decl *> DeclsWithoutSemicolons;
+ ASTToSyntaxMapping Mapping;
};
namespace {
class BuildTreeVisitor : public RecursiveASTVisitor<BuildTreeVisitor> {
public:
- explicit BuildTreeVisitor(ASTContext &Ctx, syntax::TreeBuilder &Builder)
- : Builder(Builder), LangOpts(Ctx.getLangOpts()) {}
+ explicit BuildTreeVisitor(ASTContext &Context, syntax::TreeBuilder &Builder)
+ : Builder(Builder), Context(Context) {}
bool shouldTraversePostOrder() const { return true; }
- bool WalkUpFromDeclaratorDecl(DeclaratorDecl *D) {
- // Ensure declarators are covered by SimpleDeclaration.
- Builder.noticeDeclaratorRange(Builder.getRange(D));
- // FIXME: build nodes for the declarator too.
- return true;
+ bool WalkUpFromDeclaratorDecl(DeclaratorDecl *DD) {
+ return processDeclaratorAndDeclaration(DD);
}
- bool WalkUpFromTypedefNameDecl(TypedefNameDecl *D) {
- // Also a declarator.
- Builder.noticeDeclaratorRange(Builder.getRange(D));
- // FIXME: build nodes for the declarator too.
- return true;
+
+ bool WalkUpFromTypedefNameDecl(TypedefNameDecl *TD) {
+ return processDeclaratorAndDeclaration(TD);
}
bool VisitDecl(Decl *D) {
assert(!D->isImplicit());
- Builder.foldNode(Builder.getRange(D),
- new (allocator()) syntax::UnknownDeclaration());
+ Builder.foldNode(Builder.getDeclarationRange(D),
+ new (allocator()) syntax::UnknownDeclaration(), D);
+ return true;
+ }
+
+ // RAV does not call WalkUpFrom* on explicit instantiations, so we have to
+ // override Traverse.
+ // FIXME: make RAV call WalkUpFrom* instead.
+ bool
+ TraverseClassTemplateSpecializationDecl(ClassTemplateSpecializationDecl *C) {
+ if (!RecursiveASTVisitor::TraverseClassTemplateSpecializationDecl(C))
+ return false;
+ if (C->isExplicitSpecialization())
+ return true; // we are only interested in explicit instantiations.
+ auto *Declaration =
+ cast<syntax::SimpleDeclaration>(handleFreeStandingTagDecl(C));
+ foldExplicitTemplateInstantiation(
+ Builder.getTemplateRange(C), Builder.findToken(C->getExternLoc()),
+ Builder.findToken(C->getTemplateKeywordLoc()), Declaration, C);
+ return true;
+ }
+
+ bool WalkUpFromTemplateDecl(TemplateDecl *S) {
+ foldTemplateDeclaration(
+ Builder.getDeclarationRange(S),
+ Builder.findToken(S->getTemplateParameters()->getTemplateLoc()),
+ Builder.getDeclarationRange(S->getTemplatedDecl()), S);
return true;
}
bool WalkUpFromTagDecl(TagDecl *C) {
// FIXME: build the ClassSpecifier node.
- if (C->isFreeStanding()) {
- // Class is a declaration specifier and needs a spanning declaration node.
- Builder.foldNode(Builder.getRange(C),
- new (allocator()) syntax::SimpleDeclaration);
+ if (!C->isFreeStanding()) {
+ assert(C->getNumTemplateParameterLists() == 0);
return true;
}
+ handleFreeStandingTagDecl(C);
return true;
}
+ syntax::Declaration *handleFreeStandingTagDecl(TagDecl *C) {
+ assert(C->isFreeStanding());
+ // Class is a declaration specifier and needs a spanning declaration node.
+ auto DeclarationRange = Builder.getDeclarationRange(C);
+ syntax::Declaration *Result = new (allocator()) syntax::SimpleDeclaration;
+ Builder.foldNode(DeclarationRange, Result, nullptr);
+
+ // Build TemplateDeclaration nodes if we had template parameters.
+ auto ConsumeTemplateParameters = [&](const TemplateParameterList &L) {
+ const auto *TemplateKW = Builder.findToken(L.getTemplateLoc());
+ auto R = llvm::makeArrayRef(TemplateKW, DeclarationRange.end());
+ Result =
+ foldTemplateDeclaration(R, TemplateKW, DeclarationRange, nullptr);
+ DeclarationRange = R;
+ };
+ if (auto *S = llvm::dyn_cast<ClassTemplatePartialSpecializationDecl>(C))
+ ConsumeTemplateParameters(*S->getTemplateParameters());
+ for (unsigned I = C->getNumTemplateParameterLists(); 0 < I; --I)
+ ConsumeTemplateParameters(*C->getTemplateParameterList(I - 1));
+ return Result;
+ }
+
bool WalkUpFromTranslationUnitDecl(TranslationUnitDecl *TU) {
- // (!) we do not want to call VisitDecl(), the declaration for translation
+ // We do not want to call VisitDecl(), the declaration for translation
// unit is built by finalize().
return true;
}
@@ -370,14 +645,14 @@ public:
Builder.markChildToken(S->getRBracLoc(), NodeRole::CloseParen);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::CompoundStatement);
+ new (allocator()) syntax::CompoundStatement, S);
return true;
}
// Some statements are not yet handled by syntax trees.
bool WalkUpFromStmt(Stmt *S) {
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::UnknownStatement);
+ new (allocator()) syntax::UnknownStatement, S);
return true;
}
@@ -386,27 +661,28 @@ public:
// RAV traverses it as a statement, we produce invalid node kinds in that
// case.
// FIXME: should do this in RAV instead?
- if (S->getInit() && !TraverseStmt(S->getInit()))
- return false;
- if (S->getLoopVariable() && !TraverseDecl(S->getLoopVariable()))
- return false;
- if (S->getRangeInit() && !TraverseStmt(S->getRangeInit()))
- return false;
- if (S->getBody() && !TraverseStmt(S->getBody()))
- return false;
- return true;
+ bool Result = [&, this]() {
+ if (S->getInit() && !TraverseStmt(S->getInit()))
+ return false;
+ if (S->getLoopVariable() && !TraverseDecl(S->getLoopVariable()))
+ return false;
+ if (S->getRangeInit() && !TraverseStmt(S->getRangeInit()))
+ return false;
+ if (S->getBody() && !TraverseStmt(S->getBody()))
+ return false;
+ return true;
+ }();
+ WalkUpFromCXXForRangeStmt(S);
+ return Result;
}
bool TraverseStmt(Stmt *S) {
if (auto *DS = llvm::dyn_cast_or_null<DeclStmt>(S)) {
// We want to consume the semicolon, make sure SimpleDeclaration does not.
for (auto *D : DS->decls())
- Builder.noticeDeclaratorWithoutSemicolon(D);
+ Builder.noticeDeclWithoutSemicolon(D);
} else if (auto *E = llvm::dyn_cast_or_null<Expr>(S)) {
- // (!) do not recurse into subexpressions.
- // we do not have syntax trees for expressions yet, so we only want to see
- // the first top-level expression.
- return WalkUpFromExpr(E->IgnoreImplicit());
+ return RecursiveASTVisitor::TraverseStmt(E->IgnoreImplicit());
}
return RecursiveASTVisitor::TraverseStmt(S);
}
@@ -415,19 +691,306 @@ public:
bool WalkUpFromExpr(Expr *E) {
assert(!isImplicitExpr(E) && "should be handled by TraverseStmt");
Builder.foldNode(Builder.getExprRange(E),
- new (allocator()) syntax::UnknownExpression);
+ new (allocator()) syntax::UnknownExpression, E);
+ return true;
+ }
+
+ syntax::NestedNameSpecifier *
+ BuildNestedNameSpecifier(NestedNameSpecifierLoc QualifierLoc) {
+ if (!QualifierLoc)
+ return nullptr;
+ for (auto it = QualifierLoc; it; it = it.getPrefix()) {
+ auto *NS = new (allocator()) syntax::NameSpecifier;
+ Builder.foldNode(Builder.getRange(it.getLocalSourceRange()), NS, nullptr);
+ Builder.markChild(NS, syntax::NodeRole::NestedNameSpecifier_specifier);
+ }
+ auto *NNS = new (allocator()) syntax::NestedNameSpecifier;
+ Builder.foldNode(Builder.getRange(QualifierLoc.getSourceRange()), NNS,
+ nullptr);
+ return NNS;
+ }
+
+ bool TraverseUserDefinedLiteral(UserDefinedLiteral *S) {
+ // The semantic AST node `UserDefinedLiteral` (UDL) may have one child node
+ // referencing the location of the UDL suffix (`_w` in `1.2_w`). The
+ // UDL suffix location does not point to the beginning of a token, so we
+ // can't represent the UDL suffix as a separate syntax tree node.
+
+ return WalkUpFromUserDefinedLiteral(S);
+ }
+
+ syntax::UserDefinedLiteralExpression *
+ buildUserDefinedLiteral(UserDefinedLiteral *S) {
+ switch (S->getLiteralOperatorKind()) {
+ case clang::UserDefinedLiteral::LOK_Integer:
+ return new (allocator()) syntax::IntegerUserDefinedLiteralExpression;
+ case clang::UserDefinedLiteral::LOK_Floating:
+ return new (allocator()) syntax::FloatUserDefinedLiteralExpression;
+ case clang::UserDefinedLiteral::LOK_Character:
+ return new (allocator()) syntax::CharUserDefinedLiteralExpression;
+ case clang::UserDefinedLiteral::LOK_String:
+ return new (allocator()) syntax::StringUserDefinedLiteralExpression;
+ case clang::UserDefinedLiteral::LOK_Raw:
+ case clang::UserDefinedLiteral::LOK_Template:
+ // For raw literal operator and numeric literal operator template we
+ // cannot get the type of the operand in the semantic AST. We get this
+ // information from the token. As integer and floating point have the same
+ // token kind, we run `NumericLiteralParser` again to distinguish them.
+ auto TokLoc = S->getBeginLoc();
+ auto TokSpelling =
+ Builder.findToken(TokLoc)->text(Context.getSourceManager());
+ auto Literal =
+ NumericLiteralParser(TokSpelling, TokLoc, Context.getSourceManager(),
+ Context.getLangOpts(), Context.getTargetInfo(),
+ Context.getDiagnostics());
+ if (Literal.isIntegerLiteral())
+ return new (allocator()) syntax::IntegerUserDefinedLiteralExpression;
+ else {
+ assert(Literal.isFloatingLiteral());
+ return new (allocator()) syntax::FloatUserDefinedLiteralExpression;
+ }
+ }
+ llvm_unreachable("Unknown literal operator kind.");
+ }
+
+ bool WalkUpFromUserDefinedLiteral(UserDefinedLiteral *S) {
+ Builder.markChildToken(S->getBeginLoc(), syntax::NodeRole::LiteralToken);
+ Builder.foldNode(Builder.getExprRange(S), buildUserDefinedLiteral(S), S);
+ return true;
+ }
+
+ bool WalkUpFromDeclRefExpr(DeclRefExpr *S) {
+ if (auto *NNS = BuildNestedNameSpecifier(S->getQualifierLoc()))
+ Builder.markChild(NNS, syntax::NodeRole::IdExpression_qualifier);
+
+ auto *unqualifiedId = new (allocator()) syntax::UnqualifiedId;
+ // Get `UnqualifiedId` from `DeclRefExpr`.
+ // FIXME: Extract this logic so that it can be used by `MemberExpr`,
+ // and other semantic constructs, now it is tied to `DeclRefExpr`.
+ if (!S->hasExplicitTemplateArgs()) {
+ Builder.foldNode(Builder.getRange(S->getNameInfo().getSourceRange()),
+ unqualifiedId, nullptr);
+ } else {
+ auto templateIdSourceRange =
+ SourceRange(S->getNameInfo().getBeginLoc(), S->getRAngleLoc());
+ Builder.foldNode(Builder.getRange(templateIdSourceRange), unqualifiedId,
+ nullptr);
+ }
+ Builder.markChild(unqualifiedId, syntax::NodeRole::IdExpression_id);
+
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::IdExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromParenExpr(ParenExpr *S) {
+ Builder.markChildToken(S->getLParen(), syntax::NodeRole::OpenParen);
+ Builder.markExprChild(S->getSubExpr(),
+ syntax::NodeRole::ParenExpression_subExpression);
+ Builder.markChildToken(S->getRParen(), syntax::NodeRole::CloseParen);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::ParenExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromIntegerLiteral(IntegerLiteral *S) {
+ Builder.markChildToken(S->getLocation(), syntax::NodeRole::LiteralToken);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::IntegerLiteralExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromCharacterLiteral(CharacterLiteral *S) {
+ Builder.markChildToken(S->getLocation(), syntax::NodeRole::LiteralToken);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::CharacterLiteralExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromFloatingLiteral(FloatingLiteral *S) {
+ Builder.markChildToken(S->getLocation(), syntax::NodeRole::LiteralToken);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::FloatingLiteralExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromStringLiteral(StringLiteral *S) {
+ Builder.markChildToken(S->getBeginLoc(), syntax::NodeRole::LiteralToken);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::StringLiteralExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromCXXBoolLiteralExpr(CXXBoolLiteralExpr *S) {
+ Builder.markChildToken(S->getLocation(), syntax::NodeRole::LiteralToken);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::BoolLiteralExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *S) {
+ Builder.markChildToken(S->getLocation(), syntax::NodeRole::LiteralToken);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::CxxNullPtrExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromUnaryOperator(UnaryOperator *S) {
+ Builder.markChildToken(S->getOperatorLoc(),
+ syntax::NodeRole::OperatorExpression_operatorToken);
+ Builder.markExprChild(S->getSubExpr(),
+ syntax::NodeRole::UnaryOperatorExpression_operand);
+
+ if (S->isPostfix())
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::PostfixUnaryOperatorExpression,
+ S);
+ else
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::PrefixUnaryOperatorExpression,
+ S);
+
+ return true;
+ }
+
+ bool WalkUpFromBinaryOperator(BinaryOperator *S) {
+ Builder.markExprChild(
+ S->getLHS(), syntax::NodeRole::BinaryOperatorExpression_leftHandSide);
+ Builder.markChildToken(S->getOperatorLoc(),
+ syntax::NodeRole::OperatorExpression_operatorToken);
+ Builder.markExprChild(
+ S->getRHS(), syntax::NodeRole::BinaryOperatorExpression_rightHandSide);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::BinaryOperatorExpression, S);
return true;
}
+ bool TraverseCXXOperatorCallExpr(CXXOperatorCallExpr *S) {
+ if (getOperatorNodeKind(*S) ==
+ syntax::NodeKind::PostfixUnaryOperatorExpression) {
+ // A postfix unary operator is declared as taking two operands. The
+ // second operand is used to distinguish from its prefix counterpart. In
+ // the semantic AST this "phantom" operand is represented as a
+ // `IntegerLiteral` with invalid `SourceLocation`. We skip visiting this
+ // operand because it does not correspond to anything written in source
+ // code
+ for (auto *child : S->children()) {
+ if (child->getSourceRange().isInvalid())
+ continue;
+ if (!TraverseStmt(child))
+ return false;
+ }
+ return WalkUpFromCXXOperatorCallExpr(S);
+ } else
+ return RecursiveASTVisitor::TraverseCXXOperatorCallExpr(S);
+ }
+
+ bool WalkUpFromCXXOperatorCallExpr(CXXOperatorCallExpr *S) {
+ switch (getOperatorNodeKind(*S)) {
+ case syntax::NodeKind::BinaryOperatorExpression:
+ Builder.markExprChild(
+ S->getArg(0),
+ syntax::NodeRole::BinaryOperatorExpression_leftHandSide);
+ Builder.markChildToken(
+ S->getOperatorLoc(),
+ syntax::NodeRole::OperatorExpression_operatorToken);
+ Builder.markExprChild(
+ S->getArg(1),
+ syntax::NodeRole::BinaryOperatorExpression_rightHandSide);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::BinaryOperatorExpression, S);
+ return true;
+ case syntax::NodeKind::PrefixUnaryOperatorExpression:
+ Builder.markChildToken(
+ S->getOperatorLoc(),
+ syntax::NodeRole::OperatorExpression_operatorToken);
+ Builder.markExprChild(S->getArg(0),
+ syntax::NodeRole::UnaryOperatorExpression_operand);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::PrefixUnaryOperatorExpression,
+ S);
+ return true;
+ case syntax::NodeKind::PostfixUnaryOperatorExpression:
+ Builder.markChildToken(
+ S->getOperatorLoc(),
+ syntax::NodeRole::OperatorExpression_operatorToken);
+ Builder.markExprChild(S->getArg(0),
+ syntax::NodeRole::UnaryOperatorExpression_operand);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::PostfixUnaryOperatorExpression,
+ S);
+ return true;
+ case syntax::NodeKind::UnknownExpression:
+ return RecursiveASTVisitor::WalkUpFromCXXOperatorCallExpr(S);
+ default:
+ llvm_unreachable("getOperatorNodeKind() does not return this value");
+ }
+ }
+
bool WalkUpFromNamespaceDecl(NamespaceDecl *S) {
- auto Tokens = Builder.getRange(S);
+ auto Tokens = Builder.getDeclarationRange(S);
if (Tokens.front().kind() == tok::coloncolon) {
// Handle nested namespace definitions. Those start at '::' token, e.g.
// namespace a^::b {}
// FIXME: build corresponding nodes for the name of this namespace.
return true;
}
- Builder.foldNode(Tokens, new (allocator()) syntax::NamespaceDefinition);
+ Builder.foldNode(Tokens, new (allocator()) syntax::NamespaceDefinition, S);
+ return true;
+ }
+
+ bool TraverseParenTypeLoc(ParenTypeLoc L) {
+ // We reverse order of traversal to get the proper syntax structure.
+ if (!WalkUpFromParenTypeLoc(L))
+ return false;
+ return TraverseTypeLoc(L.getInnerLoc());
+ }
+
+ bool WalkUpFromParenTypeLoc(ParenTypeLoc L) {
+ Builder.markChildToken(L.getLParenLoc(), syntax::NodeRole::OpenParen);
+ Builder.markChildToken(L.getRParenLoc(), syntax::NodeRole::CloseParen);
+ Builder.foldNode(Builder.getRange(L.getLParenLoc(), L.getRParenLoc()),
+ new (allocator()) syntax::ParenDeclarator, L);
+ return true;
+ }
+
+ // Declarator chunks, they are produced by type locs and some clang::Decls.
+ bool WalkUpFromArrayTypeLoc(ArrayTypeLoc L) {
+ Builder.markChildToken(L.getLBracketLoc(), syntax::NodeRole::OpenParen);
+ Builder.markExprChild(L.getSizeExpr(),
+ syntax::NodeRole::ArraySubscript_sizeExpression);
+ Builder.markChildToken(L.getRBracketLoc(), syntax::NodeRole::CloseParen);
+ Builder.foldNode(Builder.getRange(L.getLBracketLoc(), L.getRBracketLoc()),
+ new (allocator()) syntax::ArraySubscript, L);
+ return true;
+ }
+
+ bool WalkUpFromFunctionTypeLoc(FunctionTypeLoc L) {
+ Builder.markChildToken(L.getLParenLoc(), syntax::NodeRole::OpenParen);
+ for (auto *P : L.getParams()) {
+ Builder.markChild(P, syntax::NodeRole::ParametersAndQualifiers_parameter);
+ }
+ Builder.markChildToken(L.getRParenLoc(), syntax::NodeRole::CloseParen);
+ Builder.foldNode(Builder.getRange(L.getLParenLoc(), L.getEndLoc()),
+ new (allocator()) syntax::ParametersAndQualifiers, L);
+ return true;
+ }
+
+ bool WalkUpFromFunctionProtoTypeLoc(FunctionProtoTypeLoc L) {
+ if (!L.getTypePtr()->hasTrailingReturn())
+ return WalkUpFromFunctionTypeLoc(L);
+
+ auto *TrailingReturnTokens = BuildTrailingReturn(L);
+ // Finish building the node for parameters.
+ Builder.markChild(TrailingReturnTokens,
+ syntax::NodeRole::ParametersAndQualifiers_trailingReturn);
+ return WalkUpFromFunctionTypeLoc(L);
+ }
+
+ bool WalkUpFromMemberPointerTypeLoc(MemberPointerTypeLoc L) {
+ auto SR = L.getLocalSourceRange();
+ Builder.foldNode(Builder.getRange(SR),
+ new (allocator()) syntax::MemberPointer, L);
return true;
}
@@ -436,13 +999,13 @@ public:
// and fold resulting nodes.
bool WalkUpFromDeclStmt(DeclStmt *S) {
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::DeclarationStatement);
+ new (allocator()) syntax::DeclarationStatement, S);
return true;
}
bool WalkUpFromNullStmt(NullStmt *S) {
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::EmptyStatement);
+ new (allocator()) syntax::EmptyStatement, S);
return true;
}
@@ -451,7 +1014,7 @@ public:
syntax::NodeRole::IntroducerKeyword);
Builder.markStmtChild(S->getBody(), syntax::NodeRole::BodyStatement);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::SwitchStatement);
+ new (allocator()) syntax::SwitchStatement, S);
return true;
}
@@ -461,7 +1024,7 @@ public:
Builder.markExprChild(S->getLHS(), syntax::NodeRole::CaseStatement_value);
Builder.markStmtChild(S->getSubStmt(), syntax::NodeRole::BodyStatement);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::CaseStatement);
+ new (allocator()) syntax::CaseStatement, S);
return true;
}
@@ -470,7 +1033,7 @@ public:
syntax::NodeRole::IntroducerKeyword);
Builder.markStmtChild(S->getSubStmt(), syntax::NodeRole::BodyStatement);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::DefaultStatement);
+ new (allocator()) syntax::DefaultStatement, S);
return true;
}
@@ -483,7 +1046,7 @@ public:
Builder.markStmtChild(S->getElse(),
syntax::NodeRole::IfStatement_elseStatement);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::IfStatement);
+ new (allocator()) syntax::IfStatement, S);
return true;
}
@@ -491,7 +1054,7 @@ public:
Builder.markChildToken(S->getForLoc(), syntax::NodeRole::IntroducerKeyword);
Builder.markStmtChild(S->getBody(), syntax::NodeRole::BodyStatement);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::ForStatement);
+ new (allocator()) syntax::ForStatement, S);
return true;
}
@@ -500,7 +1063,7 @@ public:
syntax::NodeRole::IntroducerKeyword);
Builder.markStmtChild(S->getBody(), syntax::NodeRole::BodyStatement);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::WhileStatement);
+ new (allocator()) syntax::WhileStatement, S);
return true;
}
@@ -508,7 +1071,7 @@ public:
Builder.markChildToken(S->getContinueLoc(),
syntax::NodeRole::IntroducerKeyword);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::ContinueStatement);
+ new (allocator()) syntax::ContinueStatement, S);
return true;
}
@@ -516,7 +1079,7 @@ public:
Builder.markChildToken(S->getBreakLoc(),
syntax::NodeRole::IntroducerKeyword);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::BreakStatement);
+ new (allocator()) syntax::BreakStatement, S);
return true;
}
@@ -526,7 +1089,7 @@ public:
Builder.markExprChild(S->getRetValue(),
syntax::NodeRole::ReturnStatement_value);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::ReturnStatement);
+ new (allocator()) syntax::ReturnStatement, S);
return true;
}
@@ -534,13 +1097,13 @@ public:
Builder.markChildToken(S->getForLoc(), syntax::NodeRole::IntroducerKeyword);
Builder.markStmtChild(S->getBody(), syntax::NodeRole::BodyStatement);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::RangeBasedForStatement);
+ new (allocator()) syntax::RangeBasedForStatement, S);
return true;
}
bool WalkUpFromEmptyDecl(EmptyDecl *S) {
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::EmptyDeclaration);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::EmptyDeclaration, S);
return true;
}
@@ -549,76 +1112,175 @@ public:
syntax::NodeRole::StaticAssertDeclaration_condition);
Builder.markExprChild(S->getMessage(),
syntax::NodeRole::StaticAssertDeclaration_message);
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::StaticAssertDeclaration);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::StaticAssertDeclaration, S);
return true;
}
bool WalkUpFromLinkageSpecDecl(LinkageSpecDecl *S) {
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::LinkageSpecificationDeclaration);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::LinkageSpecificationDeclaration,
+ S);
return true;
}
bool WalkUpFromNamespaceAliasDecl(NamespaceAliasDecl *S) {
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::NamespaceAliasDefinition);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::NamespaceAliasDefinition, S);
return true;
}
bool WalkUpFromUsingDirectiveDecl(UsingDirectiveDecl *S) {
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::UsingNamespaceDirective);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::UsingNamespaceDirective, S);
return true;
}
bool WalkUpFromUsingDecl(UsingDecl *S) {
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::UsingDeclaration);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::UsingDeclaration, S);
return true;
}
bool WalkUpFromUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *S) {
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::UsingDeclaration);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::UsingDeclaration, S);
return true;
}
bool WalkUpFromUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *S) {
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::UsingDeclaration);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::UsingDeclaration, S);
return true;
}
bool WalkUpFromTypeAliasDecl(TypeAliasDecl *S) {
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::TypeAliasDeclaration);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::TypeAliasDeclaration, S);
return true;
}
private:
+ template <class T> SourceLocation getQualifiedNameStart(T *D) {
+ static_assert((std::is_base_of<DeclaratorDecl, T>::value ||
+ std::is_base_of<TypedefNameDecl, T>::value),
+ "only DeclaratorDecl and TypedefNameDecl are supported.");
+
+ auto DN = D->getDeclName();
+ bool IsAnonymous = DN.isIdentifier() && !DN.getAsIdentifierInfo();
+ if (IsAnonymous)
+ return SourceLocation();
+
+ if (const auto *DD = llvm::dyn_cast<DeclaratorDecl>(D)) {
+ if (DD->getQualifierLoc()) {
+ return DD->getQualifierLoc().getBeginLoc();
+ }
+ }
+
+ return D->getLocation();
+ }
+
+ SourceRange getInitializerRange(Decl *D) {
+ if (auto *V = llvm::dyn_cast<VarDecl>(D)) {
+ auto *I = V->getInit();
+ // Initializers in range-based-for are not part of the declarator
+ if (I && !V->isCXXForRangeDecl())
+ return I->getSourceRange();
+ }
+
+ return SourceRange();
+ }
+
+ /// Folds SimpleDeclarator node (if present) and in case this is the last
+ /// declarator in the chain it also folds SimpleDeclaration node.
+ template <class T> bool processDeclaratorAndDeclaration(T *D) {
+ SourceRange Initializer = getInitializerRange(D);
+ auto Range = getDeclaratorRange(Builder.sourceManager(),
+ D->getTypeSourceInfo()->getTypeLoc(),
+ getQualifiedNameStart(D), Initializer);
+
+ // There doesn't have to be a declarator (e.g. `void foo(int)` only has
+ // declaration, but no declarator).
+ if (Range.getBegin().isValid()) {
+ auto *N = new (allocator()) syntax::SimpleDeclarator;
+ Builder.foldNode(Builder.getRange(Range), N, nullptr);
+ Builder.markChild(N, syntax::NodeRole::SimpleDeclaration_declarator);
+ }
+
+ if (Builder.isResponsibleForCreatingDeclaration(D)) {
+ Builder.foldNode(Builder.getDeclarationRange(D),
+ new (allocator()) syntax::SimpleDeclaration, D);
+ }
+ return true;
+ }
+
+ /// Returns the range of the built node.
+ syntax::TrailingReturnType *BuildTrailingReturn(FunctionProtoTypeLoc L) {
+ assert(L.getTypePtr()->hasTrailingReturn());
+
+ auto ReturnedType = L.getReturnLoc();
+ // Build node for the declarator, if any.
+ auto ReturnDeclaratorRange =
+ getDeclaratorRange(this->Builder.sourceManager(), ReturnedType,
+ /*Name=*/SourceLocation(),
+ /*Initializer=*/SourceLocation());
+ syntax::SimpleDeclarator *ReturnDeclarator = nullptr;
+ if (ReturnDeclaratorRange.isValid()) {
+ ReturnDeclarator = new (allocator()) syntax::SimpleDeclarator;
+ Builder.foldNode(Builder.getRange(ReturnDeclaratorRange),
+ ReturnDeclarator, nullptr);
+ }
+
+ // Build node for trailing return type.
+ auto Return = Builder.getRange(ReturnedType.getSourceRange());
+ const auto *Arrow = Return.begin() - 1;
+ assert(Arrow->kind() == tok::arrow);
+ auto Tokens = llvm::makeArrayRef(Arrow, Return.end());
+ Builder.markChildToken(Arrow, syntax::NodeRole::ArrowToken);
+ if (ReturnDeclarator)
+ Builder.markChild(ReturnDeclarator,
+ syntax::NodeRole::TrailingReturnType_declarator);
+ auto *R = new (allocator()) syntax::TrailingReturnType;
+ Builder.foldNode(Tokens, R, L);
+ return R;
+ }
+
+ void foldExplicitTemplateInstantiation(
+ ArrayRef<syntax::Token> Range, const syntax::Token *ExternKW,
+ const syntax::Token *TemplateKW,
+ syntax::SimpleDeclaration *InnerDeclaration, Decl *From) {
+ assert(!ExternKW || ExternKW->kind() == tok::kw_extern);
+ assert(TemplateKW && TemplateKW->kind() == tok::kw_template);
+ Builder.markChildToken(ExternKW, syntax::NodeRole::ExternKeyword);
+ Builder.markChildToken(TemplateKW, syntax::NodeRole::IntroducerKeyword);
+ Builder.markChild(
+ InnerDeclaration,
+ syntax::NodeRole::ExplicitTemplateInstantiation_declaration);
+ Builder.foldNode(
+ Range, new (allocator()) syntax::ExplicitTemplateInstantiation, From);
+ }
+
+ syntax::TemplateDeclaration *foldTemplateDeclaration(
+ ArrayRef<syntax::Token> Range, const syntax::Token *TemplateKW,
+ ArrayRef<syntax::Token> TemplatedDeclaration, Decl *From) {
+ assert(TemplateKW && TemplateKW->kind() == tok::kw_template);
+ Builder.markChildToken(TemplateKW, syntax::NodeRole::IntroducerKeyword);
+
+ auto *N = new (allocator()) syntax::TemplateDeclaration;
+ Builder.foldNode(Range, N, From);
+ Builder.markChild(N, syntax::NodeRole::TemplateDeclaration_declaration);
+ return N;
+ }
+
/// A small helper to save some typing.
llvm::BumpPtrAllocator &allocator() { return Builder.allocator(); }
syntax::TreeBuilder &Builder;
- const LangOptions &LangOpts;
+ const ASTContext &Context;
};
} // namespace
-void syntax::TreeBuilder::foldNode(llvm::ArrayRef<syntax::Token> Range,
- syntax::Tree *New) {
- Pending.foldChildren(Arena, Range, New);
-}
-
-void syntax::TreeBuilder::noticeDeclaratorRange(
- llvm::ArrayRef<syntax::Token> Range) {
- if (Pending.extendDelayedFold(Range))
- return;
- Pending.foldChildrenDelayed(Range,
- new (allocator()) syntax::SimpleDeclaration);
-}
-
-void syntax::TreeBuilder::noticeDeclaratorWithoutSemicolon(Decl *D) {
+void syntax::TreeBuilder::noticeDeclWithoutSemicolon(Decl *D) {
DeclsWithoutSemicolons.insert(D);
}
@@ -628,31 +1290,55 @@ void syntax::TreeBuilder::markChildToken(SourceLocation Loc, NodeRole Role) {
Pending.assignRole(*findToken(Loc), Role);
}
+void syntax::TreeBuilder::markChildToken(const syntax::Token *T, NodeRole R) {
+ if (!T)
+ return;
+ Pending.assignRole(*T, R);
+}
+
+void syntax::TreeBuilder::markChild(syntax::Node *N, NodeRole R) {
+ assert(N);
+ setRole(N, R);
+}
+
+void syntax::TreeBuilder::markChild(ASTPtr N, NodeRole R) {
+ auto *SN = Mapping.find(N);
+ assert(SN != nullptr);
+ setRole(SN, R);
+}
+
void syntax::TreeBuilder::markStmtChild(Stmt *Child, NodeRole Role) {
if (!Child)
return;
- auto Range = getStmtRange(Child);
- // This is an expression in a statement position, consume the trailing
- // semicolon and form an 'ExpressionStatement' node.
- if (auto *E = dyn_cast<Expr>(Child)) {
- Pending.assignRole(getExprRange(E),
- NodeRole::ExpressionStatement_expression);
- // (!) 'getRange(Stmt)' ensures this already covers a trailing semicolon.
- Pending.foldChildren(Arena, Range,
- new (allocator()) syntax::ExpressionStatement);
- }
- Pending.assignRole(Range, Role);
+ syntax::Tree *ChildNode;
+ if (Expr *ChildExpr = dyn_cast<Expr>(Child)) {
+ // This is an expression in a statement position, consume the trailing
+ // semicolon and form an 'ExpressionStatement' node.
+ markExprChild(ChildExpr, NodeRole::ExpressionStatement_expression);
+ ChildNode = new (allocator()) syntax::ExpressionStatement;
+ // (!) 'getStmtRange()' ensures this covers a trailing semicolon.
+ Pending.foldChildren(Arena, getStmtRange(Child), ChildNode);
+ } else {
+ ChildNode = Mapping.find(Child);
+ }
+ assert(ChildNode != nullptr);
+ setRole(ChildNode, Role);
}
void syntax::TreeBuilder::markExprChild(Expr *Child, NodeRole Role) {
if (!Child)
return;
+ Child = Child->IgnoreImplicit();
- Pending.assignRole(getExprRange(Child), Role);
+ syntax::Tree *ChildNode = Mapping.find(Child);
+ assert(ChildNode != nullptr);
+ setRole(ChildNode, Role);
}
const syntax::Token *syntax::TreeBuilder::findToken(SourceLocation L) const {
+ if (L.isInvalid())
+ return nullptr;
auto It = LocationToToken.find(L.getRawEncoding());
assert(It != LocationToToken.end());
return It->second;
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/Mutations.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/Mutations.cpp
index 72458528202e..24048b297a11 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/Mutations.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/Mutations.cpp
@@ -35,7 +35,7 @@ public:
assert(!New->isDetached());
assert(Role != NodeRole::Detached);
- New->Role = static_cast<unsigned>(Role);
+ New->setRole(Role);
auto *P = Anchor->parent();
P->replaceChildRangeLowLevel(Anchor, Anchor, New);
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/Nodes.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/Nodes.cpp
index 5b0c5107c134..2435ae0a91dd 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/Nodes.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/Nodes.cpp
@@ -18,6 +18,38 @@ llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, NodeKind K) {
return OS << "TranslationUnit";
case NodeKind::UnknownExpression:
return OS << "UnknownExpression";
+ case NodeKind::ParenExpression:
+ return OS << "ParenExpression";
+ case NodeKind::IntegerLiteralExpression:
+ return OS << "IntegerLiteralExpression";
+ case NodeKind::CharacterLiteralExpression:
+ return OS << "CharacterLiteralExpression";
+ case NodeKind::FloatingLiteralExpression:
+ return OS << "FloatingLiteralExpression";
+ case NodeKind::StringLiteralExpression:
+ return OS << "StringLiteralExpression";
+ case NodeKind::BoolLiteralExpression:
+ return OS << "BoolLiteralExpression";
+ case NodeKind::CxxNullPtrExpression:
+ return OS << "CxxNullPtrExpression";
+ case NodeKind::IntegerUserDefinedLiteralExpression:
+ return OS << "IntegerUserDefinedLiteralExpression";
+ case NodeKind::FloatUserDefinedLiteralExpression:
+ return OS << "FloatUserDefinedLiteralExpression";
+ case NodeKind::CharUserDefinedLiteralExpression:
+ return OS << "CharUserDefinedLiteralExpression";
+ case NodeKind::StringUserDefinedLiteralExpression:
+ return OS << "StringUserDefinedLiteralExpression";
+ case NodeKind::PrefixUnaryOperatorExpression:
+ return OS << "PrefixUnaryOperatorExpression";
+ case NodeKind::PostfixUnaryOperatorExpression:
+ return OS << "PostfixUnaryOperatorExpression";
+ case NodeKind::BinaryOperatorExpression:
+ return OS << "BinaryOperatorExpression";
+ case NodeKind::UnqualifiedId:
+ return OS << "UnqualifiedId";
+ case NodeKind::IdExpression:
+ return OS << "IdExpression";
case NodeKind::UnknownStatement:
return OS << "UnknownStatement";
case NodeKind::DeclarationStatement:
@@ -58,6 +90,10 @@ llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, NodeKind K) {
return OS << "LinkageSpecificationDeclaration";
case NodeKind::SimpleDeclaration:
return OS << "SimpleDeclaration";
+ case NodeKind::TemplateDeclaration:
+ return OS << "TemplateDeclaration";
+ case NodeKind::ExplicitTemplateInstantiation:
+ return OS << "ExplicitTemplateInstantiation";
case NodeKind::NamespaceDefinition:
return OS << "NamespaceDefinition";
case NodeKind::NamespaceAliasDefinition:
@@ -68,6 +104,22 @@ llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, NodeKind K) {
return OS << "UsingDeclaration";
case NodeKind::TypeAliasDeclaration:
return OS << "TypeAliasDeclaration";
+ case NodeKind::SimpleDeclarator:
+ return OS << "SimpleDeclarator";
+ case NodeKind::ParenDeclarator:
+ return OS << "ParenDeclarator";
+ case NodeKind::ArraySubscript:
+ return OS << "ArraySubscript";
+ case NodeKind::TrailingReturnType:
+ return OS << "TrailingReturnType";
+ case NodeKind::ParametersAndQualifiers:
+ return OS << "ParametersAndQualifiers";
+ case NodeKind::MemberPointer:
+ return OS << "MemberPointer";
+ case NodeKind::NameSpecifier:
+ return OS << "NameSpecifier";
+ case NodeKind::NestedNameSpecifier:
+ return OS << "NestedNameSpecifier";
}
llvm_unreachable("unknown node kind");
}
@@ -84,6 +136,12 @@ llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, NodeRole R) {
return OS << "CloseParen";
case syntax::NodeRole::IntroducerKeyword:
return OS << "IntroducerKeyword";
+ case syntax::NodeRole::LiteralToken:
+ return OS << "LiteralToken";
+ case syntax::NodeRole::ArrowToken:
+ return OS << "ArrowToken";
+ case syntax::NodeRole::ExternKeyword:
+ return OS << "ExternKeyword";
case syntax::NodeRole::BodyStatement:
return OS << "BodyStatement";
case syntax::NodeRole::CaseStatement_value:
@@ -94,6 +152,14 @@ llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, NodeRole R) {
return OS << "IfStatement_elseKeyword";
case syntax::NodeRole::IfStatement_elseStatement:
return OS << "IfStatement_elseStatement";
+ case syntax::NodeRole::OperatorExpression_operatorToken:
+ return OS << "OperatorExpression_operatorToken";
+ case syntax::NodeRole::UnaryOperatorExpression_operand:
+ return OS << "UnaryOperatorExpression_operand";
+ case syntax::NodeRole::BinaryOperatorExpression_leftHandSide:
+ return OS << "BinaryOperatorExpression_leftHandSide";
+ case syntax::NodeRole::BinaryOperatorExpression_rightHandSide:
+ return OS << "BinaryOperatorExpression_rightHandSide";
case syntax::NodeRole::ReturnStatement_value:
return OS << "ReturnStatement_value";
case syntax::NodeRole::ExpressionStatement_expression:
@@ -104,10 +170,126 @@ llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, NodeRole R) {
return OS << "StaticAssertDeclaration_condition";
case syntax::NodeRole::StaticAssertDeclaration_message:
return OS << "StaticAssertDeclaration_message";
+ case syntax::NodeRole::SimpleDeclaration_declarator:
+ return OS << "SimpleDeclaration_declarator";
+ case syntax::NodeRole::TemplateDeclaration_declaration:
+ return OS << "TemplateDeclaration_declaration";
+ case syntax::NodeRole::ExplicitTemplateInstantiation_declaration:
+ return OS << "ExplicitTemplateInstantiation_declaration";
+ case syntax::NodeRole::ArraySubscript_sizeExpression:
+ return OS << "ArraySubscript_sizeExpression";
+ case syntax::NodeRole::TrailingReturnType_declarator:
+ return OS << "TrailingReturnType_declarator";
+ case syntax::NodeRole::ParametersAndQualifiers_parameter:
+ return OS << "ParametersAndQualifiers_parameter";
+ case syntax::NodeRole::ParametersAndQualifiers_trailingReturn:
+ return OS << "ParametersAndQualifiers_trailingReturn";
+ case syntax::NodeRole::IdExpression_id:
+ return OS << "IdExpression_id";
+ case syntax::NodeRole::IdExpression_qualifier:
+ return OS << "IdExpression_qualifier";
+ case syntax::NodeRole::NestedNameSpecifier_specifier:
+ return OS << "NestedNameSpecifier_specifier";
+ case syntax::NodeRole::ParenExpression_subExpression:
+ return OS << "ParenExpression_subExpression";
}
llvm_unreachable("invalid role");
}
+std::vector<syntax::NameSpecifier *> syntax::NestedNameSpecifier::specifiers() {
+ std::vector<syntax::NameSpecifier *> Children;
+ for (auto *C = firstChild(); C; C = C->nextSibling()) {
+ assert(C->role() == syntax::NodeRole::NestedNameSpecifier_specifier);
+ Children.push_back(llvm::cast<syntax::NameSpecifier>(C));
+ }
+ return Children;
+}
+
+syntax::NestedNameSpecifier *syntax::IdExpression::qualifier() {
+ return llvm::cast_or_null<syntax::NestedNameSpecifier>(
+ findChild(syntax::NodeRole::IdExpression_qualifier));
+}
+
+syntax::UnqualifiedId *syntax::IdExpression::unqualifiedId() {
+ return llvm::cast_or_null<syntax::UnqualifiedId>(
+ findChild(syntax::NodeRole::IdExpression_id));
+}
+
+syntax::Leaf *syntax::ParenExpression::openParen() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::OpenParen));
+}
+
+syntax::Expression *syntax::ParenExpression::subExpression() {
+ return llvm::cast_or_null<syntax::Expression>(
+ findChild(syntax::NodeRole::ParenExpression_subExpression));
+}
+
+syntax::Leaf *syntax::ParenExpression::closeParen() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::CloseParen));
+}
+
+syntax::Leaf *syntax::IntegerLiteralExpression::literalToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::LiteralToken));
+}
+
+syntax::Leaf *syntax::CharacterLiteralExpression::literalToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::LiteralToken));
+}
+
+syntax::Leaf *syntax::FloatingLiteralExpression::literalToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::LiteralToken));
+}
+
+syntax::Leaf *syntax::StringLiteralExpression::literalToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::LiteralToken));
+}
+
+syntax::Leaf *syntax::BoolLiteralExpression::literalToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::LiteralToken));
+}
+
+syntax::Leaf *syntax::CxxNullPtrExpression::nullPtrKeyword() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::LiteralToken));
+}
+
+syntax::Leaf *syntax::UserDefinedLiteralExpression::literalToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::LiteralToken));
+}
+
+syntax::Expression *syntax::BinaryOperatorExpression::lhs() {
+ return llvm::cast_or_null<syntax::Expression>(
+ findChild(syntax::NodeRole::BinaryOperatorExpression_leftHandSide));
+}
+
+syntax::Leaf *syntax::UnaryOperatorExpression::operatorToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::OperatorExpression_operatorToken));
+}
+
+syntax::Expression *syntax::UnaryOperatorExpression::operand() {
+ return llvm::cast_or_null<syntax::Expression>(
+ findChild(syntax::NodeRole::UnaryOperatorExpression_operand));
+}
+
+syntax::Leaf *syntax::BinaryOperatorExpression::operatorToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::OperatorExpression_operatorToken));
+}
+
+syntax::Expression *syntax::BinaryOperatorExpression::rhs() {
+ return llvm::cast_or_null<syntax::Expression>(
+ findChild(syntax::NodeRole::BinaryOperatorExpression_rightHandSide));
+}
+
syntax::Leaf *syntax::SwitchStatement::switchKeyword() {
return llvm::cast_or_null<syntax::Leaf>(
findChild(syntax::NodeRole::IntroducerKeyword));
@@ -226,8 +408,8 @@ syntax::Leaf *syntax::CompoundStatement::lbrace() {
std::vector<syntax::Statement *> syntax::CompoundStatement::statements() {
std::vector<syntax::Statement *> Children;
for (auto *C = firstChild(); C; C = C->nextSibling()) {
- if (C->role() == syntax::NodeRole::CompoundStatement_statement)
- Children.push_back(llvm::cast<syntax::Statement>(C));
+ assert(C->role() == syntax::NodeRole::CompoundStatement_statement);
+ Children.push_back(llvm::cast<syntax::Statement>(C));
}
return Children;
}
@@ -246,3 +428,98 @@ syntax::Expression *syntax::StaticAssertDeclaration::message() {
return llvm::cast_or_null<syntax::Expression>(
findChild(syntax::NodeRole::StaticAssertDeclaration_message));
}
+
+std::vector<syntax::SimpleDeclarator *>
+syntax::SimpleDeclaration::declarators() {
+ std::vector<syntax::SimpleDeclarator *> Children;
+ for (auto *C = firstChild(); C; C = C->nextSibling()) {
+ if (C->role() == syntax::NodeRole::SimpleDeclaration_declarator)
+ Children.push_back(llvm::cast<syntax::SimpleDeclarator>(C));
+ }
+ return Children;
+}
+
+syntax::Leaf *syntax::TemplateDeclaration::templateKeyword() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::IntroducerKeyword));
+}
+
+syntax::Declaration *syntax::TemplateDeclaration::declaration() {
+ return llvm::cast_or_null<syntax::Declaration>(
+ findChild(syntax::NodeRole::TemplateDeclaration_declaration));
+}
+
+syntax::Leaf *syntax::ExplicitTemplateInstantiation::templateKeyword() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::IntroducerKeyword));
+}
+
+syntax::Leaf *syntax::ExplicitTemplateInstantiation::externKeyword() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::ExternKeyword));
+}
+
+syntax::Declaration *syntax::ExplicitTemplateInstantiation::declaration() {
+ return llvm::cast_or_null<syntax::Declaration>(
+ findChild(syntax::NodeRole::ExplicitTemplateInstantiation_declaration));
+}
+
+syntax::Leaf *syntax::ParenDeclarator::lparen() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::OpenParen));
+}
+
+syntax::Leaf *syntax::ParenDeclarator::rparen() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::CloseParen));
+}
+
+syntax::Leaf *syntax::ArraySubscript::lbracket() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::OpenParen));
+}
+
+syntax::Expression *syntax::ArraySubscript::sizeExpression() {
+ return llvm::cast_or_null<syntax::Expression>(
+ findChild(syntax::NodeRole::ArraySubscript_sizeExpression));
+}
+
+syntax::Leaf *syntax::ArraySubscript::rbracket() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::CloseParen));
+}
+
+syntax::Leaf *syntax::TrailingReturnType::arrowToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::ArrowToken));
+}
+
+syntax::SimpleDeclarator *syntax::TrailingReturnType::declarator() {
+ return llvm::cast_or_null<syntax::SimpleDeclarator>(
+ findChild(syntax::NodeRole::TrailingReturnType_declarator));
+}
+
+syntax::Leaf *syntax::ParametersAndQualifiers::lparen() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::OpenParen));
+}
+
+std::vector<syntax::SimpleDeclaration *>
+syntax::ParametersAndQualifiers::parameters() {
+ std::vector<syntax::SimpleDeclaration *> Children;
+ for (auto *C = firstChild(); C; C = C->nextSibling()) {
+ if (C->role() == syntax::NodeRole::ParametersAndQualifiers_parameter)
+ Children.push_back(llvm::cast<syntax::SimpleDeclaration>(C));
+ }
+ return Children;
+}
+
+syntax::Leaf *syntax::ParametersAndQualifiers::rparen() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::CloseParen));
+}
+
+syntax::TrailingReturnType *syntax::ParametersAndQualifiers::trailingReturn() {
+ return llvm::cast_or_null<syntax::TrailingReturnType>(
+ findChild(syntax::NodeRole::ParametersAndQualifiers_trailingReturn));
+}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp
index 35a35f904069..c6b904822b8b 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp
@@ -35,6 +35,69 @@
using namespace clang;
using namespace clang::syntax;
+namespace {
+// Finds the smallest consecutive subsuquence of Toks that covers R.
+llvm::ArrayRef<syntax::Token>
+getTokensCovering(llvm::ArrayRef<syntax::Token> Toks, SourceRange R,
+ const SourceManager &SM) {
+ if (R.isInvalid())
+ return {};
+ const syntax::Token *Begin =
+ llvm::partition_point(Toks, [&](const syntax::Token &T) {
+ return SM.isBeforeInTranslationUnit(T.location(), R.getBegin());
+ });
+ const syntax::Token *End =
+ llvm::partition_point(Toks, [&](const syntax::Token &T) {
+ return !SM.isBeforeInTranslationUnit(R.getEnd(), T.location());
+ });
+ if (Begin > End)
+ return {};
+ return {Begin, End};
+}
+
+// Finds the smallest expansion range that contains expanded tokens First and
+// Last, e.g.:
+// #define ID(x) x
+// ID(ID(ID(a1) a2))
+// ~~ -> a1
+// ~~ -> a2
+// ~~~~~~~~~ -> a1 a2
+SourceRange findCommonRangeForMacroArgs(const syntax::Token &First,
+ const syntax::Token &Last,
+ const SourceManager &SM) {
+ SourceRange Res;
+ auto FirstLoc = First.location(), LastLoc = Last.location();
+ // Keep traversing up the spelling chain as longs as tokens are part of the
+ // same expansion.
+ while (!FirstLoc.isFileID() && !LastLoc.isFileID()) {
+ auto ExpInfoFirst = SM.getSLocEntry(SM.getFileID(FirstLoc)).getExpansion();
+ auto ExpInfoLast = SM.getSLocEntry(SM.getFileID(LastLoc)).getExpansion();
+ // Stop if expansions have diverged.
+ if (ExpInfoFirst.getExpansionLocStart() !=
+ ExpInfoLast.getExpansionLocStart())
+ break;
+ // Do not continue into macro bodies.
+ if (!ExpInfoFirst.isMacroArgExpansion() ||
+ !ExpInfoLast.isMacroArgExpansion())
+ break;
+ FirstLoc = SM.getImmediateSpellingLoc(FirstLoc);
+ LastLoc = SM.getImmediateSpellingLoc(LastLoc);
+ // Update the result afterwards, as we want the tokens that triggered the
+ // expansion.
+ Res = {FirstLoc, LastLoc};
+ }
+ // Normally mapping back to expansion location here only changes FileID, as
+ // we've already found some tokens expanded from the same macro argument, and
+ // they should map to a consecutive subset of spelled tokens. Unfortunately
+ // SourceManager::isBeforeInTranslationUnit discriminates sourcelocations
+ // based on their FileID in addition to offsets. So even though we are
+ // referring to same tokens, SourceManager might tell us that one is before
+ // the other if they've got different FileIDs.
+ return SM.getExpansionRange(CharSourceRange(Res, true)).getAsRange();
+}
+
+} // namespace
+
syntax::Token::Token(SourceLocation Location, unsigned Length,
tok::TokenKind Kind)
: Location(Location), Length(Length), Kind(Kind) {
@@ -67,7 +130,8 @@ FileRange syntax::Token::range(const SourceManager &SM,
auto F = First.range(SM);
auto L = Last.range(SM);
assert(F.file() == L.file() && "tokens from different files");
- assert((F == L || F.endOffset() <= L.beginOffset()) && "wrong order of tokens");
+ assert((F == L || F.endOffset() <= L.beginOffset()) &&
+ "wrong order of tokens");
return FileRange(F.file(), F.beginOffset(), L.endOffset());
}
@@ -120,19 +184,7 @@ llvm::StringRef FileRange::text(const SourceManager &SM) const {
}
llvm::ArrayRef<syntax::Token> TokenBuffer::expandedTokens(SourceRange R) const {
- if (R.isInvalid())
- return {};
- const Token *Begin =
- llvm::partition_point(expandedTokens(), [&](const syntax::Token &T) {
- return SourceMgr->isBeforeInTranslationUnit(T.location(), R.getBegin());
- });
- const Token *End =
- llvm::partition_point(expandedTokens(), [&](const syntax::Token &T) {
- return !SourceMgr->isBeforeInTranslationUnit(R.getEnd(), T.location());
- });
- if (Begin > End)
- return {};
- return {Begin, End};
+ return getTokensCovering(expandedTokens(), R, *SourceMgr);
}
CharSourceRange FileRange::toCharRange(const SourceManager &SM) const {
@@ -161,19 +213,109 @@ TokenBuffer::spelledForExpandedToken(const syntax::Token *Expanded) const {
// Our token could only be produced by the previous mapping.
if (It == File.Mappings.begin()) {
// No previous mapping, no need to modify offsets.
- return {&File.SpelledTokens[ExpandedIndex - File.BeginExpanded], nullptr};
+ return {&File.SpelledTokens[ExpandedIndex - File.BeginExpanded],
+ /*Mapping=*/nullptr};
}
--It; // 'It' now points to last mapping that started before our token.
// Check if the token is part of the mapping.
if (ExpandedIndex < It->EndExpanded)
- return {&File.SpelledTokens[It->BeginSpelled], /*Mapping*/ &*It};
+ return {&File.SpelledTokens[It->BeginSpelled], /*Mapping=*/&*It};
// Not part of the mapping, use the index from previous mapping to compute the
// corresponding spelled token.
return {
&File.SpelledTokens[It->EndSpelled + (ExpandedIndex - It->EndExpanded)],
- /*Mapping*/ nullptr};
+ /*Mapping=*/nullptr};
+}
+
+const TokenBuffer::Mapping *
+TokenBuffer::mappingStartingBeforeSpelled(const MarkedFile &F,
+ const syntax::Token *Spelled) {
+ assert(F.SpelledTokens.data() <= Spelled);
+ unsigned SpelledI = Spelled - F.SpelledTokens.data();
+ assert(SpelledI < F.SpelledTokens.size());
+
+ auto It = llvm::partition_point(F.Mappings, [SpelledI](const Mapping &M) {
+ return M.BeginSpelled <= SpelledI;
+ });
+ if (It == F.Mappings.begin())
+ return nullptr;
+ --It;
+ return &*It;
+}
+
+llvm::SmallVector<llvm::ArrayRef<syntax::Token>, 1>
+TokenBuffer::expandedForSpelled(llvm::ArrayRef<syntax::Token> Spelled) const {
+ if (Spelled.empty())
+ return {};
+ assert(Spelled.front().location().isFileID());
+
+ auto FID = sourceManager().getFileID(Spelled.front().location());
+ auto It = Files.find(FID);
+ assert(It != Files.end());
+
+ const MarkedFile &File = It->second;
+ // `Spelled` must be a subrange of `File.SpelledTokens`.
+ assert(File.SpelledTokens.data() <= Spelled.data());
+ assert(&Spelled.back() <=
+ File.SpelledTokens.data() + File.SpelledTokens.size());
+#ifndef NDEBUG
+ auto T1 = Spelled.back().location();
+ auto T2 = File.SpelledTokens.back().location();
+ assert(T1 == T2 || sourceManager().isBeforeInTranslationUnit(T1, T2));
+#endif
+
+ auto *FrontMapping = mappingStartingBeforeSpelled(File, &Spelled.front());
+ unsigned SpelledFrontI = &Spelled.front() - File.SpelledTokens.data();
+ assert(SpelledFrontI < File.SpelledTokens.size());
+ unsigned ExpandedBegin;
+ if (!FrontMapping) {
+ // No mapping that starts before the first token of Spelled, we don't have
+ // to modify offsets.
+ ExpandedBegin = File.BeginExpanded + SpelledFrontI;
+ } else if (SpelledFrontI < FrontMapping->EndSpelled) {
+ // This mapping applies to Spelled tokens.
+ if (SpelledFrontI != FrontMapping->BeginSpelled) {
+ // Spelled tokens don't cover the entire mapping, returning empty result.
+ return {}; // FIXME: support macro arguments.
+ }
+ // Spelled tokens start at the beginning of this mapping.
+ ExpandedBegin = FrontMapping->BeginExpanded;
+ } else {
+ // Spelled tokens start after the mapping ends (they start in the hole
+ // between 2 mappings, or between a mapping and end of the file).
+ ExpandedBegin =
+ FrontMapping->EndExpanded + (SpelledFrontI - FrontMapping->EndSpelled);
+ }
+
+ auto *BackMapping = mappingStartingBeforeSpelled(File, &Spelled.back());
+ unsigned SpelledBackI = &Spelled.back() - File.SpelledTokens.data();
+ unsigned ExpandedEnd;
+ if (!BackMapping) {
+ // No mapping that starts before the last token of Spelled, we don't have to
+ // modify offsets.
+ ExpandedEnd = File.BeginExpanded + SpelledBackI + 1;
+ } else if (SpelledBackI < BackMapping->EndSpelled) {
+ // This mapping applies to Spelled tokens.
+ if (SpelledBackI + 1 != BackMapping->EndSpelled) {
+ // Spelled tokens don't cover the entire mapping, returning empty result.
+ return {}; // FIXME: support macro arguments.
+ }
+ ExpandedEnd = BackMapping->EndExpanded;
+ } else {
+ // Spelled tokens end after the mapping ends.
+ ExpandedEnd =
+ BackMapping->EndExpanded + (SpelledBackI - BackMapping->EndSpelled) + 1;
+ }
+
+ assert(ExpandedBegin < ExpandedTokens.size());
+ assert(ExpandedEnd < ExpandedTokens.size());
+ // Avoid returning empty ranges.
+ if (ExpandedBegin == ExpandedEnd)
+ return {};
+ return {llvm::makeArrayRef(ExpandedTokens.data() + ExpandedBegin,
+ ExpandedTokens.data() + ExpandedEnd)};
}
llvm::ArrayRef<syntax::Token> TokenBuffer::spelledTokens(FileID FID) const {
@@ -182,9 +324,20 @@ llvm::ArrayRef<syntax::Token> TokenBuffer::spelledTokens(FileID FID) const {
return It->second.SpelledTokens;
}
+const syntax::Token *TokenBuffer::spelledTokenAt(SourceLocation Loc) const {
+ assert(Loc.isFileID());
+ const auto *Tok = llvm::partition_point(
+ spelledTokens(SourceMgr->getFileID(Loc)),
+ [&](const syntax::Token &Tok) { return Tok.location() < Loc; });
+ if (!Tok || Tok->location() != Loc)
+ return nullptr;
+ return Tok;
+}
+
std::string TokenBuffer::Mapping::str() const {
- return llvm::formatv("spelled tokens: [{0},{1}), expanded tokens: [{2},{3})",
- BeginSpelled, EndSpelled, BeginExpanded, EndExpanded);
+ return std::string(
+ llvm::formatv("spelled tokens: [{0},{1}), expanded tokens: [{2},{3})",
+ BeginSpelled, EndSpelled, BeginExpanded, EndExpanded));
}
llvm::Optional<llvm::ArrayRef<syntax::Token>>
@@ -194,8 +347,6 @@ TokenBuffer::spelledForExpanded(llvm::ArrayRef<syntax::Token> Expanded) const {
if (Expanded.empty())
return llvm::None;
- // FIXME: also allow changes uniquely mapping to macro arguments.
-
const syntax::Token *BeginSpelled;
const Mapping *BeginMapping;
std::tie(BeginSpelled, BeginMapping) =
@@ -213,12 +364,28 @@ TokenBuffer::spelledForExpanded(llvm::ArrayRef<syntax::Token> Expanded) const {
const MarkedFile &File = Files.find(FID)->second;
- // Do not allow changes that cross macro expansion boundaries.
+ // If both tokens are coming from a macro argument expansion, try and map to
+ // smallest part of the macro argument. BeginMapping && LastMapping check is
+ // only for performance, they are a prerequisite for Expanded.front() and
+ // Expanded.back() being part of a macro arg expansion.
+ if (BeginMapping && LastMapping &&
+ SourceMgr->isMacroArgExpansion(Expanded.front().location()) &&
+ SourceMgr->isMacroArgExpansion(Expanded.back().location())) {
+ auto CommonRange = findCommonRangeForMacroArgs(Expanded.front(),
+ Expanded.back(), *SourceMgr);
+ // It might be the case that tokens are arguments of different macro calls,
+ // in that case we should continue with the logic below instead of returning
+ // an empty range.
+ if (CommonRange.isValid())
+ return getTokensCovering(File.SpelledTokens, CommonRange, *SourceMgr);
+ }
+
+ // Do not allow changes that doesn't cover full expansion.
unsigned BeginExpanded = Expanded.begin() - ExpandedTokens.data();
unsigned EndExpanded = Expanded.end() - ExpandedTokens.data();
- if (BeginMapping && BeginMapping->BeginExpanded < BeginExpanded)
+ if (BeginMapping && BeginExpanded != BeginMapping->BeginExpanded)
return llvm::None;
- if (LastMapping && EndExpanded < LastMapping->EndExpanded)
+ if (LastMapping && LastMapping->EndExpanded != EndExpanded)
return llvm::None;
// All is good, return the result.
return llvm::makeArrayRef(
@@ -253,24 +420,30 @@ TokenBuffer::expansionStartingAt(const syntax::Token *Spelled) const {
ExpandedTokens.data() + M->EndExpanded);
return E;
}
-
llvm::ArrayRef<syntax::Token>
syntax::spelledTokensTouching(SourceLocation Loc,
- const syntax::TokenBuffer &Tokens) {
+ llvm::ArrayRef<syntax::Token> Tokens) {
assert(Loc.isFileID());
- llvm::ArrayRef<syntax::Token> All =
- Tokens.spelledTokens(Tokens.sourceManager().getFileID(Loc));
+
auto *Right = llvm::partition_point(
- All, [&](const syntax::Token &Tok) { return Tok.location() < Loc; });
- bool AcceptRight = Right != All.end() && Right->location() <= Loc;
- bool AcceptLeft = Right != All.begin() && (Right - 1)->endLocation() >= Loc;
+ Tokens, [&](const syntax::Token &Tok) { return Tok.location() < Loc; });
+ bool AcceptRight = Right != Tokens.end() && Right->location() <= Loc;
+ bool AcceptLeft =
+ Right != Tokens.begin() && (Right - 1)->endLocation() >= Loc;
return llvm::makeArrayRef(Right - (AcceptLeft ? 1 : 0),
Right + (AcceptRight ? 1 : 0));
}
+llvm::ArrayRef<syntax::Token>
+syntax::spelledTokensTouching(SourceLocation Loc,
+ const syntax::TokenBuffer &Tokens) {
+ return spelledTokensTouching(
+ Loc, Tokens.spelledTokens(Tokens.sourceManager().getFileID(Loc)));
+}
+
const syntax::Token *
syntax::spelledIdentifierTouching(SourceLocation Loc,
- const syntax::TokenBuffer &Tokens) {
+ llvm::ArrayRef<syntax::Token> Tokens) {
for (const syntax::Token &Tok : spelledTokensTouching(Loc, Tokens)) {
if (Tok.kind() == tok::identifier)
return &Tok;
@@ -278,6 +451,13 @@ syntax::spelledIdentifierTouching(SourceLocation Loc,
return nullptr;
}
+const syntax::Token *
+syntax::spelledIdentifierTouching(SourceLocation Loc,
+ const syntax::TokenBuffer &Tokens) {
+ return spelledIdentifierTouching(
+ Loc, Tokens.spelledTokens(Tokens.sourceManager().getFileID(Loc)));
+}
+
std::vector<const syntax::Token *>
TokenBuffer::macroExpansions(FileID FID) const {
auto FileIt = Files.find(FID);
@@ -293,7 +473,8 @@ TokenBuffer::macroExpansions(FileID FID) const {
return Expansions;
}
-std::vector<syntax::Token> syntax::tokenize(FileID FID, const SourceManager &SM,
+std::vector<syntax::Token> syntax::tokenize(const FileRange &FR,
+ const SourceManager &SM,
const LangOptions &LO) {
std::vector<syntax::Token> Tokens;
IdentifierTable Identifiers(LO);
@@ -308,18 +489,28 @@ std::vector<syntax::Token> syntax::tokenize(FileID FID, const SourceManager &SM,
Tokens.push_back(syntax::Token(T));
};
- Lexer L(FID, SM.getBuffer(FID), SM, LO);
+ auto SrcBuffer = SM.getBufferData(FR.file());
+ Lexer L(SM.getLocForStartOfFile(FR.file()), LO, SrcBuffer.data(),
+ SrcBuffer.data() + FR.beginOffset(),
+ // We can't make BufEnd point to FR.endOffset, as Lexer requires a
+ // null terminated buffer.
+ SrcBuffer.data() + SrcBuffer.size());
clang::Token T;
- while (!L.LexFromRawLexer(T))
+ while (!L.LexFromRawLexer(T) && L.getCurrentBufferOffset() < FR.endOffset())
AddToken(T);
- // 'eof' is only the last token if the input is null-terminated. Never store
- // it, for consistency.
- if (T.getKind() != tok::eof)
+ // LexFromRawLexer returns true when it parses the last token of the file, add
+ // it iff it starts within the range we are interested in.
+ if (SM.getFileOffset(T.getLocation()) < FR.endOffset())
AddToken(T);
return Tokens;
}
+std::vector<syntax::Token> syntax::tokenize(FileID FID, const SourceManager &SM,
+ const LangOptions &LO) {
+ return tokenize(syntax::FileRange(FID, 0, SM.getFileIDSize(FID)), SM, LO);
+}
+
/// Records information reqired to construct mappings for the token buffer that
/// we are collecting.
class TokenCollector::CollectPPExpansions : public PPCallbacks {
@@ -449,6 +640,19 @@ public:
for (const auto &File : Result.Files)
discard(File.first);
+#ifndef NDEBUG
+ for (auto &pair : Result.Files) {
+ auto &mappings = pair.second.Mappings;
+ assert(llvm::is_sorted(mappings, [](const TokenBuffer::Mapping &M1,
+ const TokenBuffer::Mapping &M2) {
+ return M1.BeginSpelled < M2.BeginSpelled &&
+ M1.EndSpelled < M2.EndSpelled &&
+ M1.BeginExpanded < M2.BeginExpanded &&
+ M1.EndExpanded < M2.EndExpanded;
+ }));
+ }
+#endif
+
return std::move(Result);
}
@@ -598,19 +802,20 @@ TokenBuffer TokenCollector::consume() && {
}
std::string syntax::Token::str() const {
- return llvm::formatv("Token({0}, length = {1})", tok::getTokenName(kind()),
- length());
+ return std::string(llvm::formatv("Token({0}, length = {1})",
+ tok::getTokenName(kind()), length()));
}
std::string syntax::Token::dumpForTests(const SourceManager &SM) const {
- return llvm::formatv("{0} {1}", tok::getTokenName(kind()), text(SM));
+ return std::string(llvm::formatv("Token(`{0}`, {1}, length = {2})", text(SM),
+ tok::getTokenName(kind()), length()));
}
std::string TokenBuffer::dumpForTests() const {
auto PrintToken = [this](const syntax::Token &T) -> std::string {
if (T.kind() == tok::eof)
return "<eof>";
- return T.text(*SourceMgr);
+ return std::string(T.text(*SourceMgr));
};
auto DumpTokens = [this, &PrintToken](llvm::raw_ostream &OS,
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp
index 9a6270ec4cce..37579e6145b6 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp
@@ -58,22 +58,33 @@ bool syntax::Leaf::classof(const Node *N) {
syntax::Node::Node(NodeKind Kind)
: Parent(nullptr), NextSibling(nullptr), Kind(static_cast<unsigned>(Kind)),
- Role(static_cast<unsigned>(NodeRole::Detached)), Original(false),
- CanModify(false) {}
+ Role(0), Original(false), CanModify(false) {
+ this->setRole(NodeRole::Detached);
+}
bool syntax::Node::isDetached() const { return role() == NodeRole::Detached; }
+void syntax::Node::setRole(NodeRole NR) {
+ this->Role = static_cast<unsigned>(NR);
+}
+
bool syntax::Tree::classof(const Node *N) { return N->kind() > NodeKind::Leaf; }
void syntax::Tree::prependChildLowLevel(Node *Child, NodeRole Role) {
- assert(Child->Parent == nullptr);
- assert(Child->NextSibling == nullptr);
assert(Child->role() == NodeRole::Detached);
assert(Role != NodeRole::Detached);
+ Child->setRole(Role);
+ prependChildLowLevel(Child);
+}
+
+void syntax::Tree::prependChildLowLevel(Node *Child) {
+ assert(Child->Parent == nullptr);
+ assert(Child->NextSibling == nullptr);
+ assert(Child->role() != NodeRole::Detached);
+
Child->Parent = this;
Child->NextSibling = this->FirstChild;
- Child->Role = static_cast<unsigned>(Role);
this->FirstChild = Child;
}
@@ -94,7 +105,7 @@ void syntax::Tree::replaceChildRangeLowLevel(Node *BeforeBegin, Node *End,
N != End;) {
auto *Next = N->NextSibling;
- N->Role = static_cast<unsigned>(NodeRole::Detached);
+ N->setRole(NodeRole::Detached);
N->Parent = nullptr;
N->NextSibling = nullptr;
if (N->Original)
diff --git a/contrib/llvm-project/clang/lib/Tooling/Tooling.cpp b/contrib/llvm-project/clang/lib/Tooling/Tooling.cpp
index 4a0618c50e42..40b6cff0d627 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Tooling.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Tooling.cpp
@@ -141,11 +141,13 @@ namespace clang {
namespace tooling {
/// Returns a clang build invocation initialized from the CC1 flags.
-CompilerInvocation *newInvocation(
- DiagnosticsEngine *Diagnostics, const llvm::opt::ArgStringList &CC1Args) {
+CompilerInvocation *newInvocation(DiagnosticsEngine *Diagnostics,
+ const llvm::opt::ArgStringList &CC1Args,
+ const char *const BinaryName) {
assert(!CC1Args.empty() && "Must at least contain the program name!");
CompilerInvocation *Invocation = new CompilerInvocation;
- CompilerInvocation::CreateFromArgs(*Invocation, CC1Args, *Diagnostics);
+ CompilerInvocation::CreateFromArgs(*Invocation, CC1Args, *Diagnostics,
+ BinaryName);
Invocation->getFrontendOpts().DisableFree = false;
Invocation->getCodeGenOpts().DisableFree = false;
return Invocation;
@@ -234,7 +236,7 @@ llvm::Expected<std::string> getAbsolutePath(llvm::vfs::FileSystem &FS,
if (auto EC = FS.makeAbsolute(AbsolutePath))
return llvm::errorCodeToError(EC);
llvm::sys::path::native(AbsolutePath);
- return AbsolutePath.str();
+ return std::string(AbsolutePath.str());
}
std::string getAbsolutePath(StringRef File) {
@@ -345,7 +347,7 @@ bool ToolInvocation::run() {
if (!CC1Args)
return false;
std::unique_ptr<CompilerInvocation> Invocation(
- newInvocation(&Diagnostics, *CC1Args));
+ newInvocation(&Diagnostics, *CC1Args, BinaryName));
// FIXME: remove this when all users have migrated!
for (const auto &It : MappedFileContents) {
// Inject the code as the given file name into the preprocessor options.
@@ -619,7 +621,8 @@ buildASTFromCode(StringRef Code, StringRef FileName,
std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
StringRef Code, const std::vector<std::string> &Args, StringRef FileName,
StringRef ToolName, std::shared_ptr<PCHContainerOperations> PCHContainerOps,
- ArgumentsAdjuster Adjuster, const FileContentMappings &VirtualMappedFiles) {
+ ArgumentsAdjuster Adjuster, const FileContentMappings &VirtualMappedFiles,
+ DiagnosticConsumer *DiagConsumer) {
std::vector<std::unique_ptr<ASTUnit>> ASTs;
ASTBuilderAction Action(ASTs);
llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
@@ -633,6 +636,7 @@ std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
ToolInvocation Invocation(
getSyntaxOnlyToolArgs(ToolName, Adjuster(Args, FileName), FileName),
&Action, Files.get(), std::move(PCHContainerOps));
+ Invocation.setDiagnosticConsumer(DiagConsumer);
InMemoryFileSystem->addFile(FileName, 0,
llvm::MemoryBuffer::getMemBufferCopy(Code));
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/Parsing.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/Parsing.cpp
new file mode 100644
index 000000000000..1579115b9313
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/Parsing.cpp
@@ -0,0 +1,279 @@
+//===--- Parsing.cpp - Parsing function implementations ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/Transformer/Parsing.h"
+#include "clang/AST/Expr.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Tooling/Transformer/RangeSelector.h"
+#include "clang/Tooling/Transformer/SourceCode.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
+#include <string>
+#include <utility>
+#include <vector>
+
+using namespace clang;
+using namespace transformer;
+
+// FIXME: This implementation is entirely separate from that of the AST
+// matchers. Given the similarity of the languages and uses of the two parsers,
+// the two should share a common parsing infrastructure, as should other
+// Transformer types. We intend to unify this implementation soon to share as
+// much as possible with the AST Matchers parsing.
+
+namespace {
+using llvm::Error;
+using llvm::Expected;
+
+template <typename... Ts> using RangeSelectorOp = RangeSelector (*)(Ts...);
+
+struct ParseState {
+ // The remaining input to be processed.
+ StringRef Input;
+ // The original input. Not modified during parsing; only for reference in
+ // error reporting.
+ StringRef OriginalInput;
+};
+
+// Represents an intermediate result returned by a parsing function. Functions
+// that don't generate values should use `llvm::None`
+template <typename ResultType> struct ParseProgress {
+ ParseState State;
+ // Intermediate result generated by the Parser.
+ ResultType Value;
+};
+
+template <typename T> using ExpectedProgress = llvm::Expected<ParseProgress<T>>;
+template <typename T> using ParseFunction = ExpectedProgress<T> (*)(ParseState);
+
+class ParseError : public llvm::ErrorInfo<ParseError> {
+public:
+ // Required field for all ErrorInfo derivatives.
+ static char ID;
+
+ ParseError(size_t Pos, std::string ErrorMsg, std::string InputExcerpt)
+ : Pos(Pos), ErrorMsg(std::move(ErrorMsg)),
+ Excerpt(std::move(InputExcerpt)) {}
+
+ void log(llvm::raw_ostream &OS) const override {
+ OS << "parse error at position (" << Pos << "): " << ErrorMsg
+ << ": " + Excerpt;
+ }
+
+ std::error_code convertToErrorCode() const override {
+ return llvm::inconvertibleErrorCode();
+ }
+
+ // Position of the error in the input string.
+ size_t Pos;
+ std::string ErrorMsg;
+ // Excerpt of the input starting at the error position.
+ std::string Excerpt;
+};
+
+char ParseError::ID;
+} // namespace
+
+static const llvm::StringMap<RangeSelectorOp<std::string>> &
+getUnaryStringSelectors() {
+ static const llvm::StringMap<RangeSelectorOp<std::string>> M = {
+ {"name", name},
+ {"node", node},
+ {"statement", statement},
+ {"statements", statements},
+ {"member", member},
+ {"callArgs", callArgs},
+ {"elseBranch", elseBranch},
+ {"initListElements", initListElements}};
+ return M;
+}
+
+static const llvm::StringMap<RangeSelectorOp<RangeSelector>> &
+getUnaryRangeSelectors() {
+ static const llvm::StringMap<RangeSelectorOp<RangeSelector>> M = {
+ {"before", before}, {"after", after}, {"expansion", expansion}};
+ return M;
+}
+
+static const llvm::StringMap<RangeSelectorOp<std::string, std::string>> &
+getBinaryStringSelectors() {
+ static const llvm::StringMap<RangeSelectorOp<std::string, std::string>> M = {
+ {"encloseNodes", range}};
+ return M;
+}
+
+static const llvm::StringMap<RangeSelectorOp<RangeSelector, RangeSelector>> &
+getBinaryRangeSelectors() {
+ static const llvm::StringMap<RangeSelectorOp<RangeSelector, RangeSelector>>
+ M = {{"enclose", range}};
+ return M;
+}
+
+template <typename Element>
+llvm::Optional<Element> findOptional(const llvm::StringMap<Element> &Map,
+ llvm::StringRef Key) {
+ auto it = Map.find(Key);
+ if (it == Map.end())
+ return llvm::None;
+ return it->second;
+}
+
+template <typename ResultType>
+ParseProgress<ResultType> makeParseProgress(ParseState State,
+ ResultType Result) {
+ return ParseProgress<ResultType>{State, std::move(Result)};
+}
+
+static llvm::Error makeParseError(const ParseState &S, std::string ErrorMsg) {
+ size_t Pos = S.OriginalInput.size() - S.Input.size();
+ return llvm::make_error<ParseError>(Pos, std::move(ErrorMsg),
+ S.OriginalInput.substr(Pos, 20).str());
+}
+
+// Returns a new ParseState that advances \c S by \c N characters.
+static ParseState advance(ParseState S, size_t N) {
+ S.Input = S.Input.drop_front(N);
+ return S;
+}
+
+static StringRef consumeWhitespace(StringRef S) {
+ return S.drop_while([](char c) { return c >= 0 && isWhitespace(c); });
+}
+
+// Parses a single expected character \c c from \c State, skipping preceding
+// whitespace. Error if the expected character isn't found.
+static ExpectedProgress<llvm::NoneType> parseChar(char c, ParseState State) {
+ State.Input = consumeWhitespace(State.Input);
+ if (State.Input.empty() || State.Input.front() != c)
+ return makeParseError(State,
+ ("expected char not found: " + llvm::Twine(c)).str());
+ return makeParseProgress(advance(State, 1), llvm::None);
+}
+
+// Parses an identitifer "token" -- handles preceding whitespace.
+static ExpectedProgress<std::string> parseId(ParseState State) {
+ State.Input = consumeWhitespace(State.Input);
+ auto Id = State.Input.take_while(
+ [](char c) { return c >= 0 && isIdentifierBody(c); });
+ if (Id.empty())
+ return makeParseError(State, "failed to parse name");
+ return makeParseProgress(advance(State, Id.size()), Id.str());
+}
+
+// For consistency with the AST matcher parser and C++ code, node ids are
+// written as strings. However, we do not support escaping in the string.
+static ExpectedProgress<std::string> parseStringId(ParseState State) {
+ State.Input = consumeWhitespace(State.Input);
+ if (State.Input.empty())
+ return makeParseError(State, "unexpected end of input");
+ if (!State.Input.consume_front("\""))
+ return makeParseError(
+ State,
+ "expecting string, but encountered other character or end of input");
+
+ StringRef Id = State.Input.take_until([](char c) { return c == '"'; });
+ if (State.Input.size() == Id.size())
+ return makeParseError(State, "unterminated string");
+ // Advance past the trailing quote as well.
+ return makeParseProgress(advance(State, Id.size() + 1), Id.str());
+}
+
+// Parses a single element surrounded by parens. `Op` is applied to the parsed
+// result to create the result of this function call.
+template <typename T>
+ExpectedProgress<RangeSelector> parseSingle(ParseFunction<T> ParseElement,
+ RangeSelectorOp<T> Op,
+ ParseState State) {
+ auto P = parseChar('(', State);
+ if (!P)
+ return P.takeError();
+
+ auto E = ParseElement(P->State);
+ if (!E)
+ return E.takeError();
+
+ P = parseChar(')', E->State);
+ if (!P)
+ return P.takeError();
+
+ return makeParseProgress(P->State, Op(std::move(E->Value)));
+}
+
+// Parses a pair of elements surrounded by parens and separated by comma. `Op`
+// is applied to the parsed results to create the result of this function call.
+template <typename T>
+ExpectedProgress<RangeSelector> parsePair(ParseFunction<T> ParseElement,
+ RangeSelectorOp<T, T> Op,
+ ParseState State) {
+ auto P = parseChar('(', State);
+ if (!P)
+ return P.takeError();
+
+ auto Left = ParseElement(P->State);
+ if (!Left)
+ return Left.takeError();
+
+ P = parseChar(',', Left->State);
+ if (!P)
+ return P.takeError();
+
+ auto Right = ParseElement(P->State);
+ if (!Right)
+ return Right.takeError();
+
+ P = parseChar(')', Right->State);
+ if (!P)
+ return P.takeError();
+
+ return makeParseProgress(P->State,
+ Op(std::move(Left->Value), std::move(Right->Value)));
+}
+
+// Parses input for a stencil operator(single arg ops like AsValue, MemberOp or
+// Id operator). Returns StencilType representing the operator on success and
+// error if it fails to parse input for an operator.
+static ExpectedProgress<RangeSelector>
+parseRangeSelectorImpl(ParseState State) {
+ auto Id = parseId(State);
+ if (!Id)
+ return Id.takeError();
+
+ std::string OpName = std::move(Id->Value);
+ if (auto Op = findOptional(getUnaryStringSelectors(), OpName))
+ return parseSingle(parseStringId, *Op, Id->State);
+
+ if (auto Op = findOptional(getUnaryRangeSelectors(), OpName))
+ return parseSingle(parseRangeSelectorImpl, *Op, Id->State);
+
+ if (auto Op = findOptional(getBinaryStringSelectors(), OpName))
+ return parsePair(parseStringId, *Op, Id->State);
+
+ if (auto Op = findOptional(getBinaryRangeSelectors(), OpName))
+ return parsePair(parseRangeSelectorImpl, *Op, Id->State);
+
+ return makeParseError(State, "unknown selector name: " + OpName);
+}
+
+Expected<RangeSelector> transformer::parseRangeSelector(llvm::StringRef Input) {
+ ParseState State = {Input, Input};
+ ExpectedProgress<RangeSelector> Result = parseRangeSelectorImpl(State);
+ if (!Result)
+ return Result.takeError();
+ State = Result->State;
+ // Discard any potentially trailing whitespace.
+ State.Input = consumeWhitespace(State.Input);
+ if (State.Input.empty())
+ return Result->Value;
+ return makeParseError(State, "unexpected input after selector");
+}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/RangeSelector.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/RangeSelector.cpp
index 9f81423c9022..29b1a5b0372e 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/RangeSelector.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/RangeSelector.cpp
@@ -23,8 +23,6 @@ using namespace clang;
using namespace transformer;
using ast_matchers::MatchFinder;
-using ast_type_traits::ASTNodeKind;
-using ast_type_traits::DynTypedNode;
using llvm::Error;
using llvm::StringError;
@@ -148,7 +146,7 @@ RangeSelector transformer::statement(std::string ID) {
};
}
-RangeSelector transformer::range(RangeSelector Begin, RangeSelector End) {
+RangeSelector transformer::enclose(RangeSelector Begin, RangeSelector End) {
return [Begin, End](const MatchResult &Result) -> Expected<CharSourceRange> {
Expected<CharSourceRange> BeginRange = Begin(Result);
if (!BeginRange)
@@ -167,8 +165,9 @@ RangeSelector transformer::range(RangeSelector Begin, RangeSelector End) {
};
}
-RangeSelector transformer::range(std::string BeginID, std::string EndID) {
- return transformer::range(node(std::move(BeginID)), node(std::move(EndID)));
+RangeSelector transformer::encloseNodes(std::string BeginID,
+ std::string EndID) {
+ return transformer::enclose(node(std::move(BeginID)), node(std::move(EndID)));
}
RangeSelector transformer::member(std::string ID) {
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/RewriteRule.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/RewriteRule.cpp
index 20d3a371950a..995bec03cd66 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/RewriteRule.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/RewriteRule.cpp
@@ -25,16 +25,14 @@ using namespace transformer;
using ast_matchers::MatchFinder;
using ast_matchers::internal::DynTypedMatcher;
-using ast_type_traits::ASTNodeKind;
using MatchResult = MatchFinder::MatchResult;
-Expected<SmallVector<transformer::detail::Transformation, 1>>
-transformer::detail::translateEdits(const MatchResult &Result,
- llvm::ArrayRef<ASTEdit> Edits) {
- SmallVector<transformer::detail::Transformation, 1> Transformations;
- for (const auto &Edit : Edits) {
- Expected<CharSourceRange> Range = Edit.TargetRange(Result);
+static Expected<SmallVector<transformer::Edit, 1>>
+translateEdits(const MatchResult &Result, ArrayRef<ASTEdit> ASTEdits) {
+ SmallVector<transformer::Edit, 1> Edits;
+ for (const auto &E : ASTEdits) {
+ Expected<CharSourceRange> Range = E.TargetRange(Result);
if (!Range)
return Range.takeError();
llvm::Optional<CharSourceRange> EditRange =
@@ -42,21 +40,34 @@ transformer::detail::translateEdits(const MatchResult &Result,
// FIXME: let user specify whether to treat this case as an error or ignore
// it as is currently done.
if (!EditRange)
- return SmallVector<Transformation, 0>();
- auto Replacement = Edit.Replacement->eval(Result);
+ return SmallVector<Edit, 0>();
+ auto Replacement = E.Replacement->eval(Result);
if (!Replacement)
return Replacement.takeError();
- transformer::detail::Transformation T;
+ transformer::Edit T;
T.Range = *EditRange;
T.Replacement = std::move(*Replacement);
- Transformations.push_back(std::move(T));
+ T.Metadata = E.Metadata;
+ Edits.push_back(std::move(T));
}
- return Transformations;
+ return Edits;
}
-ASTEdit transformer::changeTo(RangeSelector S, TextGenerator Replacement) {
+EditGenerator transformer::editList(SmallVector<ASTEdit, 1> Edits) {
+ return [Edits = std::move(Edits)](const MatchResult &Result) {
+ return translateEdits(Result, Edits);
+ };
+}
+
+EditGenerator transformer::edit(ASTEdit Edit) {
+ return [Edit = std::move(Edit)](const MatchResult &Result) {
+ return translateEdits(Result, {Edit});
+ };
+}
+
+ASTEdit transformer::changeTo(RangeSelector Target, TextGenerator Replacement) {
ASTEdit E;
- E.TargetRange = std::move(S);
+ E.TargetRange = std::move(Target);
E.Replacement = std::move(Replacement);
return E;
}
@@ -83,8 +94,9 @@ ASTEdit transformer::remove(RangeSelector S) {
return change(std::move(S), std::make_shared<SimpleTextGenerator>(""));
}
-RewriteRule transformer::makeRule(DynTypedMatcher M, SmallVector<ASTEdit, 1> Edits,
- TextGenerator Explanation) {
+RewriteRule transformer::makeRule(ast_matchers::internal::DynTypedMatcher M,
+ EditGenerator Edits,
+ TextGenerator Explanation) {
return RewriteRule{{RewriteRule::Case{
std::move(M), std::move(Edits), std::move(Explanation), {}}}};
}
@@ -105,10 +117,13 @@ static bool hasValidKind(const DynTypedMatcher &M) {
#endif
// Binds each rule's matcher to a unique (and deterministic) tag based on
-// `TagBase` and the id paired with the case.
+// `TagBase` and the id paired with the case. All of the returned matchers have
+// their traversal kind explicitly set, either based on a pre-set kind or to the
+// provided `DefaultTraversalKind`.
static std::vector<DynTypedMatcher> taggedMatchers(
StringRef TagBase,
- const SmallVectorImpl<std::pair<size_t, RewriteRule::Case>> &Cases) {
+ const SmallVectorImpl<std::pair<size_t, RewriteRule::Case>> &Cases,
+ ast_type_traits::TraversalKind DefaultTraversalKind) {
std::vector<DynTypedMatcher> Matchers;
Matchers.reserve(Cases.size());
for (const auto &Case : Cases) {
@@ -116,8 +131,10 @@ static std::vector<DynTypedMatcher> taggedMatchers(
// HACK: Many matchers are not bindable, so ensure that tryBind will work.
DynTypedMatcher BoundMatcher(Case.second.Matcher);
BoundMatcher.setAllowBind(true);
- auto M = BoundMatcher.tryBind(Tag);
- Matchers.push_back(*std::move(M));
+ auto M = *BoundMatcher.tryBind(Tag);
+ Matchers.push_back(!M.getTraversalKind()
+ ? M.withTraversalKind(DefaultTraversalKind)
+ : std::move(M));
}
return Matchers;
}
@@ -147,14 +164,21 @@ transformer::detail::buildMatchers(const RewriteRule &Rule) {
Buckets[Cases[I].Matcher.getSupportedKind()].emplace_back(I, Cases[I]);
}
+ // Each anyOf explicitly controls the traversal kind. The anyOf itself is set
+ // to `TK_AsIs` to ensure no nodes are skipped, thereby deferring to the kind
+ // of the branches. Then, each branch is either left as is, if the kind is
+ // already set, or explicitly set to `TK_IgnoreUnlessSpelledInSource`. We
+ // choose this setting, because we think it is the one most friendly to
+ // beginners, who are (largely) the target audience of Transformer.
std::vector<DynTypedMatcher> Matchers;
for (const auto &Bucket : Buckets) {
DynTypedMatcher M = DynTypedMatcher::constructVariadic(
DynTypedMatcher::VO_AnyOf, Bucket.first,
- taggedMatchers("Tag", Bucket.second));
+ taggedMatchers("Tag", Bucket.second, TK_IgnoreUnlessSpelledInSource));
M.setAllowBind(true);
// `tryBind` is guaranteed to succeed, because `AllowBind` was set to true.
- Matchers.push_back(*M.tryBind(RewriteRule::RootID));
+ Matchers.push_back(
+ M.tryBind(RewriteRule::RootID)->withTraversalKind(TK_AsIs));
}
return Matchers;
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCode.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCode.cpp
index 836401d1e605..26b204851f05 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCode.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCode.cpp
@@ -10,10 +10,24 @@
//
//===----------------------------------------------------------------------===//
#include "clang/Tooling/Transformer/SourceCode.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Comment.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
+#include <set>
using namespace clang;
+using llvm::errc;
+using llvm::StringError;
+
StringRef clang::tooling::getText(CharSourceRange Range,
const ASTContext &Context) {
return Lexer::getSourceText(Range, Context.getSourceManager(),
@@ -23,11 +37,45 @@ StringRef clang::tooling::getText(CharSourceRange Range,
CharSourceRange clang::tooling::maybeExtendRange(CharSourceRange Range,
tok::TokenKind Next,
ASTContext &Context) {
- Optional<Token> Tok = Lexer::findNextToken(
- Range.getEnd(), Context.getSourceManager(), Context.getLangOpts());
- if (!Tok || !Tok->is(Next))
+ CharSourceRange R = Lexer::getAsCharRange(Range, Context.getSourceManager(),
+ Context.getLangOpts());
+ if (R.isInvalid())
+ return Range;
+ Token Tok;
+ bool Err =
+ Lexer::getRawToken(R.getEnd(), Tok, Context.getSourceManager(),
+ Context.getLangOpts(), /*IgnoreWhiteSpace=*/true);
+ if (Err || !Tok.is(Next))
return Range;
- return CharSourceRange::getTokenRange(Range.getBegin(), Tok->getLocation());
+ return CharSourceRange::getTokenRange(Range.getBegin(), Tok.getLocation());
+}
+
+llvm::Error clang::tooling::validateEditRange(const CharSourceRange &Range,
+ const SourceManager &SM) {
+ if (Range.isInvalid())
+ return llvm::make_error<StringError>(errc::invalid_argument,
+ "Invalid range");
+
+ if (Range.getBegin().isMacroID() || Range.getEnd().isMacroID())
+ return llvm::make_error<StringError>(
+ errc::invalid_argument, "Range starts or ends in a macro expansion");
+
+ if (SM.isInSystemHeader(Range.getBegin()) ||
+ SM.isInSystemHeader(Range.getEnd()))
+ return llvm::make_error<StringError>(errc::invalid_argument,
+ "Range is in system header");
+
+ std::pair<FileID, unsigned> BeginInfo = SM.getDecomposedLoc(Range.getBegin());
+ std::pair<FileID, unsigned> EndInfo = SM.getDecomposedLoc(Range.getEnd());
+ if (BeginInfo.first != EndInfo.first)
+ return llvm::make_error<StringError>(
+ errc::invalid_argument, "Range begins and ends in different files");
+
+ if (BeginInfo.second > EndInfo.second)
+ return llvm::make_error<StringError>(
+ errc::invalid_argument, "Range's begin is past its end");
+
+ return llvm::Error::success();
}
llvm::Optional<CharSourceRange>
@@ -46,20 +94,308 @@ clang::tooling::getRangeForEdit(const CharSourceRange &EditRange,
// foo(DO_NOTHING(6))
// Decide whether the current behavior is desirable and modify if not.
CharSourceRange Range = Lexer::makeFileCharRange(EditRange, SM, LangOpts);
- if (Range.isInvalid())
- return None;
+ bool IsInvalid = llvm::errorToBool(validateEditRange(Range, SM));
+ if (IsInvalid)
+ return llvm::None;
+ return Range;
- if (Range.getBegin().isMacroID() || Range.getEnd().isMacroID())
- return None;
- if (SM.isInSystemHeader(Range.getBegin()) ||
- SM.isInSystemHeader(Range.getEnd()))
- return None;
+}
- std::pair<FileID, unsigned> BeginInfo = SM.getDecomposedLoc(Range.getBegin());
- std::pair<FileID, unsigned> EndInfo = SM.getDecomposedLoc(Range.getEnd());
- if (BeginInfo.first != EndInfo.first ||
- BeginInfo.second > EndInfo.second)
- return None;
+static bool startsWithNewline(const SourceManager &SM, const Token &Tok) {
+ return isVerticalWhitespace(SM.getCharacterData(Tok.getLocation())[0]);
+}
- return Range;
+static bool contains(const std::set<tok::TokenKind> &Terminators,
+ const Token &Tok) {
+ return Terminators.count(Tok.getKind()) > 0;
+}
+
+// Returns the exclusive, *file* end location of the entity whose last token is
+// at location 'EntityLast'. That is, it returns the location one past the last
+// relevant character.
+//
+// Associated tokens include comments, horizontal whitespace and 'Terminators'
+// -- optional tokens, which, if any are found, will be included; if
+// 'Terminators' is empty, we will not include any extra tokens beyond comments
+// and horizontal whitespace.
+static SourceLocation
+getEntityEndLoc(const SourceManager &SM, SourceLocation EntityLast,
+ const std::set<tok::TokenKind> &Terminators,
+ const LangOptions &LangOpts) {
+ assert(EntityLast.isValid() && "Invalid end location found.");
+
+ // We remember the last location of a non-horizontal-whitespace token we have
+ // lexed; this is the location up to which we will want to delete.
+ // FIXME: Support using the spelling loc here for cases where we want to
+ // analyze the macro text.
+
+ CharSourceRange ExpansionRange = SM.getExpansionRange(EntityLast);
+ // FIXME: Should check isTokenRange(), for the (rare) case that
+ // `ExpansionRange` is a character range.
+ std::unique_ptr<Lexer> Lexer = [&]() {
+ bool Invalid = false;
+ auto FileOffset = SM.getDecomposedLoc(ExpansionRange.getEnd());
+ llvm::StringRef File = SM.getBufferData(FileOffset.first, &Invalid);
+ assert(!Invalid && "Cannot get file/offset");
+ return std::make_unique<clang::Lexer>(
+ SM.getLocForStartOfFile(FileOffset.first), LangOpts, File.begin(),
+ File.data() + FileOffset.second, File.end());
+ }();
+
+ // Tell Lexer to return whitespace as pseudo-tokens (kind is tok::unknown).
+ Lexer->SetKeepWhitespaceMode(true);
+
+ // Generally, the code we want to include looks like this ([] are optional),
+ // If Terminators is empty:
+ // [ <comment> ] [ <newline> ]
+ // Otherwise:
+ // ... <terminator> [ <comment> ] [ <newline> ]
+
+ Token Tok;
+ bool Terminated = false;
+
+ // First, lex to the current token (which is the last token of the range that
+ // is definitely associated with the decl). Then, we process the first token
+ // separately from the rest based on conditions that hold specifically for
+ // that first token.
+ //
+ // We do not search for a terminator if none is required or we've already
+ // encountered it. Otherwise, if the original `EntityLast` location was in a
+ // macro expansion, we don't have visibility into the text, so we assume we've
+ // already terminated. However, we note this assumption with
+ // `TerminatedByMacro`, because we'll want to handle it somewhat differently
+ // for the terminators semicolon and comma. These terminators can be safely
+ // associated with the entity when they appear after the macro -- extra
+ // semicolons have no effect on the program and a well-formed program won't
+ // have multiple commas in a row, so we're guaranteed that there is only one.
+ //
+ // FIXME: This handling of macros is more conservative than necessary. When
+ // the end of the expansion coincides with the end of the node, we can still
+ // safely analyze the code. But, it is more complicated, because we need to
+ // start by lexing the spelling loc for the first token and then switch to the
+ // expansion loc.
+ bool TerminatedByMacro = false;
+ Lexer->LexFromRawLexer(Tok);
+ if (Terminators.empty() || contains(Terminators, Tok))
+ Terminated = true;
+ else if (EntityLast.isMacroID()) {
+ Terminated = true;
+ TerminatedByMacro = true;
+ }
+
+ // We save the most recent candidate for the exclusive end location.
+ SourceLocation End = Tok.getEndLoc();
+
+ while (!Terminated) {
+ // Lex the next token we want to possibly expand the range with.
+ Lexer->LexFromRawLexer(Tok);
+
+ switch (Tok.getKind()) {
+ case tok::eof:
+ // Unexpected separators.
+ case tok::l_brace:
+ case tok::r_brace:
+ case tok::comma:
+ return End;
+ // Whitespace pseudo-tokens.
+ case tok::unknown:
+ if (startsWithNewline(SM, Tok))
+ // Include at least until the end of the line.
+ End = Tok.getEndLoc();
+ break;
+ default:
+ if (contains(Terminators, Tok))
+ Terminated = true;
+ End = Tok.getEndLoc();
+ break;
+ }
+ }
+
+ do {
+ // Lex the next token we want to possibly expand the range with.
+ Lexer->LexFromRawLexer(Tok);
+
+ switch (Tok.getKind()) {
+ case tok::unknown:
+ if (startsWithNewline(SM, Tok))
+ // We're done, but include this newline.
+ return Tok.getEndLoc();
+ break;
+ case tok::comment:
+ // Include any comments we find on the way.
+ End = Tok.getEndLoc();
+ break;
+ case tok::semi:
+ case tok::comma:
+ if (TerminatedByMacro && contains(Terminators, Tok)) {
+ End = Tok.getEndLoc();
+ // We've found a real terminator.
+ TerminatedByMacro = false;
+ break;
+ }
+ // Found an unrelated token; stop and don't include it.
+ return End;
+ default:
+ // Found an unrelated token; stop and don't include it.
+ return End;
+ }
+ } while (true);
+}
+
+// Returns the expected terminator tokens for the given declaration.
+//
+// If we do not know the correct terminator token, returns an empty set.
+//
+// There are cases where we have more than one possible terminator (for example,
+// we find either a comma or a semicolon after a VarDecl).
+static std::set<tok::TokenKind> getTerminators(const Decl &D) {
+ if (llvm::isa<RecordDecl>(D) || llvm::isa<UsingDecl>(D))
+ return {tok::semi};
+
+ if (llvm::isa<FunctionDecl>(D) || llvm::isa<LinkageSpecDecl>(D))
+ return {tok::r_brace, tok::semi};
+
+ if (llvm::isa<VarDecl>(D) || llvm::isa<FieldDecl>(D))
+ return {tok::comma, tok::semi};
+
+ return {};
+}
+
+// Starting from `Loc`, skips whitespace up to, and including, a single
+// newline. Returns the (exclusive) end of any skipped whitespace (that is, the
+// location immediately after the whitespace).
+static SourceLocation skipWhitespaceAndNewline(const SourceManager &SM,
+ SourceLocation Loc,
+ const LangOptions &LangOpts) {
+ const char *LocChars = SM.getCharacterData(Loc);
+ int i = 0;
+ while (isHorizontalWhitespace(LocChars[i]))
+ ++i;
+ if (isVerticalWhitespace(LocChars[i]))
+ ++i;
+ return Loc.getLocWithOffset(i);
+}
+
+// Is `Loc` separated from any following decl by something meaningful (e.g. an
+// empty line, a comment), ignoring horizontal whitespace? Since this is a
+// heuristic, we return false when in doubt. `Loc` cannot be the first location
+// in the file.
+static bool atOrBeforeSeparation(const SourceManager &SM, SourceLocation Loc,
+ const LangOptions &LangOpts) {
+ // If the preceding character is a newline, we'll check for an empty line as a
+ // separator. However, we can't identify an empty line using tokens, so we
+ // analyse the characters. If we try to use tokens, we'll just end up with a
+ // whitespace token, whose characters we'd have to analyse anyhow.
+ bool Invalid = false;
+ const char *LocChars =
+ SM.getCharacterData(Loc.getLocWithOffset(-1), &Invalid);
+ assert(!Invalid &&
+ "Loc must be a valid character and not the first of the source file.");
+ if (isVerticalWhitespace(LocChars[0])) {
+ for (int i = 1; isWhitespace(LocChars[i]); ++i)
+ if (isVerticalWhitespace(LocChars[i]))
+ return true;
+ }
+ // We didn't find an empty line, so lex the next token, skipping past any
+ // whitespace we just scanned.
+ Token Tok;
+ bool Failed = Lexer::getRawToken(Loc, Tok, SM, LangOpts,
+ /*IgnoreWhiteSpace=*/true);
+ if (Failed)
+ // Any text that confuses the lexer seems fair to consider a separation.
+ return true;
+
+ switch (Tok.getKind()) {
+ case tok::comment:
+ case tok::l_brace:
+ case tok::r_brace:
+ case tok::eof:
+ return true;
+ default:
+ return false;
+ }
+}
+
+CharSourceRange tooling::getAssociatedRange(const Decl &Decl,
+ ASTContext &Context) {
+ const SourceManager &SM = Context.getSourceManager();
+ const LangOptions &LangOpts = Context.getLangOpts();
+ CharSourceRange Range = CharSourceRange::getTokenRange(Decl.getSourceRange());
+
+ // First, expand to the start of the template<> declaration if necessary.
+ if (const auto *Record = llvm::dyn_cast<CXXRecordDecl>(&Decl)) {
+ if (const auto *T = Record->getDescribedClassTemplate())
+ if (SM.isBeforeInTranslationUnit(T->getBeginLoc(), Range.getBegin()))
+ Range.setBegin(T->getBeginLoc());
+ } else if (const auto *F = llvm::dyn_cast<FunctionDecl>(&Decl)) {
+ if (const auto *T = F->getDescribedFunctionTemplate())
+ if (SM.isBeforeInTranslationUnit(T->getBeginLoc(), Range.getBegin()))
+ Range.setBegin(T->getBeginLoc());
+ }
+
+ // Next, expand the end location past trailing comments to include a potential
+ // newline at the end of the decl's line.
+ Range.setEnd(
+ getEntityEndLoc(SM, Decl.getEndLoc(), getTerminators(Decl), LangOpts));
+ Range.setTokenRange(false);
+
+ // Expand to include preceeding associated comments. We ignore any comments
+ // that are not preceeding the decl, since we've already skipped trailing
+ // comments with getEntityEndLoc.
+ if (const RawComment *Comment =
+ Decl.getASTContext().getRawCommentForDeclNoCache(&Decl))
+ // Only include a preceding comment if:
+ // * it is *not* separate from the declaration (not including any newline
+ // that immediately follows the comment),
+ // * the decl *is* separate from any following entity (so, there are no
+ // other entities the comment could refer to), and
+ // * it is not a IfThisThenThat lint check.
+ if (SM.isBeforeInTranslationUnit(Comment->getBeginLoc(),
+ Range.getBegin()) &&
+ !atOrBeforeSeparation(
+ SM, skipWhitespaceAndNewline(SM, Comment->getEndLoc(), LangOpts),
+ LangOpts) &&
+ atOrBeforeSeparation(SM, Range.getEnd(), LangOpts)) {
+ const StringRef CommentText = Comment->getRawText(SM);
+ if (!CommentText.contains("LINT.IfChange") &&
+ !CommentText.contains("LINT.ThenChange"))
+ Range.setBegin(Comment->getBeginLoc());
+ }
+ // Add leading attributes.
+ for (auto *Attr : Decl.attrs()) {
+ if (Attr->getLocation().isInvalid() ||
+ !SM.isBeforeInTranslationUnit(Attr->getLocation(), Range.getBegin()))
+ continue;
+ Range.setBegin(Attr->getLocation());
+
+ // Extend to the left '[[' or '__attribute((' if we saw the attribute,
+ // unless it is not a valid location.
+ bool Invalid;
+ StringRef Source =
+ SM.getBufferData(SM.getFileID(Range.getBegin()), &Invalid);
+ if (Invalid)
+ continue;
+ llvm::StringRef BeforeAttr =
+ Source.substr(0, SM.getFileOffset(Range.getBegin()));
+ llvm::StringRef BeforeAttrStripped = BeforeAttr.rtrim();
+
+ for (llvm::StringRef Prefix : {"[[", "__attribute__(("}) {
+ // Handle whitespace between attribute prefix and attribute value.
+ if (BeforeAttrStripped.endswith(Prefix)) {
+ // Move start to start position of prefix, which is
+ // length(BeforeAttr) - length(BeforeAttrStripped) + length(Prefix)
+ // positions to the left.
+ Range.setBegin(Range.getBegin().getLocWithOffset(static_cast<int>(
+ -BeforeAttr.size() + BeforeAttrStripped.size() - Prefix.size())));
+ break;
+ // If we didn't see '[[' or '__attribute' it's probably coming from a
+ // macro expansion which is already handled by makeFileCharRange(),
+ // below.
+ }
+ }
+ }
+
+ // Range.getEnd() is already fully un-expanded by getEntityEndLoc. But,
+ // Range.getBegin() may be inside an expansion.
+ return Lexer::makeFileCharRange(Range, SM, LangOpts);
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
index 8710e3cdf60f..2670bf7adabf 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
@@ -12,12 +12,14 @@
#include "clang/AST/Expr.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Lex/Lexer.h"
#include "clang/Tooling/Transformer/SourceCode.h"
#include "clang/Tooling/Transformer/SourceCodeBuilders.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
#include <atomic>
#include <memory>
#include <string>
@@ -26,7 +28,6 @@ using namespace clang;
using namespace transformer;
using ast_matchers::MatchFinder;
-using ast_type_traits::DynTypedNode;
using llvm::errc;
using llvm::Error;
using llvm::Expected;
@@ -81,14 +82,14 @@ struct SelectorData {
// A stencil operation to build a member access `e.m` or `e->m`, as appropriate.
struct AccessData {
AccessData(StringRef BaseId, Stencil Member)
- : BaseId(BaseId), Member(std::move(Member)) {}
+ : BaseId(std::string(BaseId)), Member(std::move(Member)) {}
std::string BaseId;
Stencil Member;
};
struct IfBoundData {
IfBoundData(StringRef Id, Stencil TrueStencil, Stencil FalseStencil)
- : Id(Id), TrueStencil(std::move(TrueStencil)),
+ : Id(std::string(Id)), TrueStencil(std::move(TrueStencil)),
FalseStencil(std::move(FalseStencil)) {}
std::string Id;
Stencil TrueStencil;
@@ -227,10 +228,37 @@ Error evalData(const UnaryOperationData &Data,
Error evalData(const SelectorData &Data, const MatchFinder::MatchResult &Match,
std::string *Result) {
- auto Range = Data.Selector(Match);
- if (!Range)
- return Range.takeError();
- *Result += tooling::getText(*Range, *Match.Context);
+ auto RawRange = Data.Selector(Match);
+ if (!RawRange)
+ return RawRange.takeError();
+ CharSourceRange Range = Lexer::makeFileCharRange(
+ *RawRange, *Match.SourceManager, Match.Context->getLangOpts());
+ if (Range.isInvalid()) {
+ // Validate the original range to attempt to get a meaningful error message.
+ // If it's valid, then something else is the cause and we just return the
+ // generic failure message.
+ if (auto Err = tooling::validateEditRange(*RawRange, *Match.SourceManager))
+ return handleErrors(std::move(Err), [](std::unique_ptr<StringError> E) {
+ assert(E->convertToErrorCode() ==
+ llvm::make_error_code(errc::invalid_argument) &&
+ "Validation errors must carry the invalid_argument code");
+ return llvm::createStringError(
+ errc::invalid_argument,
+ "selected range could not be resolved to a valid source range; " +
+ E->getMessage());
+ });
+ return llvm::createStringError(
+ errc::invalid_argument,
+ "selected range could not be resolved to a valid source range");
+ }
+ // Validate `Range`, because `makeFileCharRange` accepts some ranges that
+ // `validateEditRange` rejects.
+ if (auto Err = tooling::validateEditRange(Range, *Match.SourceManager))
+ return joinErrors(
+ llvm::createStringError(errc::invalid_argument,
+ "selected range is not valid for editing"),
+ std::move(Err));
+ *Result += tooling::getText(Range, *Match.Context);
return Error::success();
}
@@ -294,47 +322,41 @@ public:
};
} // namespace
-Stencil transformer::detail::makeStencil(StringRef Text) { return text(Text); }
-
-Stencil transformer::detail::makeStencil(RangeSelector Selector) {
- return selection(std::move(Selector));
+Stencil transformer::detail::makeStencil(StringRef Text) {
+ return std::make_shared<StencilImpl<RawTextData>>(std::string(Text));
}
-Stencil transformer::text(StringRef Text) {
- return std::make_shared<StencilImpl<RawTextData>>(Text);
-}
-
-Stencil transformer::selection(RangeSelector Selector) {
+Stencil transformer::detail::makeStencil(RangeSelector Selector) {
return std::make_shared<StencilImpl<SelectorData>>(std::move(Selector));
}
Stencil transformer::dPrint(StringRef Id) {
- return std::make_shared<StencilImpl<DebugPrintNodeData>>(Id);
+ return std::make_shared<StencilImpl<DebugPrintNodeData>>(std::string(Id));
}
Stencil transformer::expression(llvm::StringRef Id) {
return std::make_shared<StencilImpl<UnaryOperationData>>(
- UnaryNodeOperator::Parens, Id);
+ UnaryNodeOperator::Parens, std::string(Id));
}
Stencil transformer::deref(llvm::StringRef ExprId) {
return std::make_shared<StencilImpl<UnaryOperationData>>(
- UnaryNodeOperator::Deref, ExprId);
+ UnaryNodeOperator::Deref, std::string(ExprId));
}
Stencil transformer::maybeDeref(llvm::StringRef ExprId) {
return std::make_shared<StencilImpl<UnaryOperationData>>(
- UnaryNodeOperator::MaybeDeref, ExprId);
+ UnaryNodeOperator::MaybeDeref, std::string(ExprId));
}
Stencil transformer::addressOf(llvm::StringRef ExprId) {
return std::make_shared<StencilImpl<UnaryOperationData>>(
- UnaryNodeOperator::AddressOf, ExprId);
+ UnaryNodeOperator::AddressOf, std::string(ExprId));
}
Stencil transformer::maybeAddressOf(llvm::StringRef ExprId) {
return std::make_shared<StencilImpl<UnaryOperationData>>(
- UnaryNodeOperator::MaybeAddressOf, ExprId);
+ UnaryNodeOperator::MaybeAddressOf, std::string(ExprId));
}
Stencil transformer::access(StringRef BaseId, Stencil Member) {
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/Transformer.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/Transformer.cpp
index 71f0646f4c0e..e8fc00c4e953 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/Transformer.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/Transformer.cpp
@@ -12,6 +12,7 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Tooling/Refactoring/AtomicChange.h"
#include "llvm/Support/Error.h"
+#include <map>
#include <utility>
#include <vector>
@@ -31,7 +32,7 @@ void Transformer::run(const MatchFinder::MatchResult &Result) {
transformer::RewriteRule::Case Case =
transformer::detail::findSelectedCase(Result, Rule);
- auto Transformations = transformer::detail::translateEdits(Result, Case.Edits);
+ auto Transformations = Case.Edits(Result);
if (!Transformations) {
Consumer(Transformations.takeError());
return;
@@ -45,28 +46,39 @@ void Transformer::run(const MatchFinder::MatchResult &Result) {
return;
}
- // Record the results in the AtomicChange, anchored at the location of the
- // first change.
- AtomicChange AC(*Result.SourceManager,
- (*Transformations)[0].Range.getBegin());
+ // Group the transformations, by file, into AtomicChanges, each anchored by
+ // the location of the first change in that file.
+ std::map<FileID, AtomicChange> ChangesByFileID;
for (const auto &T : *Transformations) {
+ auto ID = Result.SourceManager->getFileID(T.Range.getBegin());
+ auto Iter = ChangesByFileID
+ .emplace(ID, AtomicChange(*Result.SourceManager,
+ T.Range.getBegin(), T.Metadata))
+ .first;
+ auto &AC = Iter->second;
if (auto Err = AC.replace(*Result.SourceManager, T.Range, T.Replacement)) {
Consumer(std::move(Err));
return;
}
}
- for (const auto &I : Case.AddedIncludes) {
- auto &Header = I.first;
- switch (I.second) {
- case transformer::IncludeFormat::Quoted:
- AC.addHeader(Header);
- break;
- case transformer::IncludeFormat::Angled:
- AC.addHeader((llvm::Twine("<") + Header + ">").str());
- break;
+ for (auto &IDChangePair : ChangesByFileID) {
+ auto &AC = IDChangePair.second;
+ // FIXME: this will add includes to *all* changed files, which may not be
+ // the intent. We should upgrade the representation to allow associating
+ // headers with specific edits.
+ for (const auto &I : Case.AddedIncludes) {
+ auto &Header = I.first;
+ switch (I.second) {
+ case transformer::IncludeFormat::Quoted:
+ AC.addHeader(Header);
+ break;
+ case transformer::IncludeFormat::Angled:
+ AC.addHeader((llvm::Twine("<") + Header + ">").str());
+ break;
+ }
}
- }
- Consumer(std::move(AC));
+ Consumer(std::move(AC));
+ }
}
diff --git a/contrib/llvm-project/clang/tools/driver/cc1_main.cpp b/contrib/llvm-project/clang/tools/driver/cc1_main.cpp
index 6d1a67f2a4fa..0872015e0ab0 100644
--- a/contrib/llvm-project/clang/tools/driver/cc1_main.cpp
+++ b/contrib/llvm-project/clang/tools/driver/cc1_main.cpp
@@ -177,7 +177,7 @@ static int PrintSupportedCPUs(std::string TargetStr) {
// the target machine will handle the mcpu printing
llvm::TargetOptions Options;
std::unique_ptr<llvm::TargetMachine> TheTargetMachine(
- TheTarget->createTargetMachine(TargetStr, "", "+cpuHelp", Options, None));
+ TheTarget->createTargetMachine(TargetStr, "", "+cpuhelp", Options, None));
return 0;
}
@@ -203,8 +203,8 @@ int cc1_main(ArrayRef<const char *> Argv, const char *Argv0, void *MainAddr) {
IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
TextDiagnosticBuffer *DiagsBuffer = new TextDiagnosticBuffer;
DiagnosticsEngine Diags(DiagID, &*DiagOpts, DiagsBuffer);
- bool Success =
- CompilerInvocation::CreateFromArgs(Clang->getInvocation(), Argv, Diags);
+ bool Success = CompilerInvocation::CreateFromArgs(Clang->getInvocation(),
+ Argv, Diags, Argv0);
if (Clang->getFrontendOpts().TimeTrace) {
llvm::timeTraceProfilerInitialize(
@@ -259,6 +259,7 @@ int cc1_main(ArrayRef<const char *> Argv, const char *Argv0, void *MainAddr) {
// FIXME(ibiryukov): make profilerOutput flush in destructor instead.
profilerOutput->flush();
llvm::timeTraceProfilerCleanup();
+ Clang->clearOutputFiles(false);
}
}
diff --git a/contrib/llvm-project/clang/tools/driver/cc1as_main.cpp b/contrib/llvm-project/clang/tools/driver/cc1as_main.cpp
index e1041f91bfd5..77b99b201364 100644
--- a/contrib/llvm-project/clang/tools/driver/cc1as_main.cpp
+++ b/contrib/llvm-project/clang/tools/driver/cc1as_main.cpp
@@ -207,7 +207,7 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
// Target Options
Opts.Triple = llvm::Triple::normalize(Args.getLastArgValue(OPT_triple));
- Opts.CPU = Args.getLastArgValue(OPT_target_cpu);
+ Opts.CPU = std::string(Args.getLastArgValue(OPT_target_cpu));
Opts.Features = Args.getAllArgValues(OPT_target_feature);
// Use the default target triple if unspecified.
@@ -238,13 +238,19 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
Opts.RelaxELFRelocations = Args.hasArg(OPT_mrelax_relocations);
Opts.DwarfVersion = getLastArgIntValue(Args, OPT_dwarf_version_EQ, 2, Diags);
- Opts.DwarfDebugFlags = Args.getLastArgValue(OPT_dwarf_debug_flags);
- Opts.DwarfDebugProducer = Args.getLastArgValue(OPT_dwarf_debug_producer);
- Opts.DebugCompilationDir = Args.getLastArgValue(OPT_fdebug_compilation_dir);
- Opts.MainFileName = Args.getLastArgValue(OPT_main_file_name);
-
- for (const auto &Arg : Args.getAllArgValues(OPT_fdebug_prefix_map_EQ))
- Opts.DebugPrefixMap.insert(StringRef(Arg).split('='));
+ Opts.DwarfDebugFlags =
+ std::string(Args.getLastArgValue(OPT_dwarf_debug_flags));
+ Opts.DwarfDebugProducer =
+ std::string(Args.getLastArgValue(OPT_dwarf_debug_producer));
+ Opts.DebugCompilationDir =
+ std::string(Args.getLastArgValue(OPT_fdebug_compilation_dir));
+ Opts.MainFileName = std::string(Args.getLastArgValue(OPT_main_file_name));
+
+ for (const auto &Arg : Args.getAllArgValues(OPT_fdebug_prefix_map_EQ)) {
+ auto Split = StringRef(Arg).split('=');
+ Opts.DebugPrefixMap.insert(
+ {std::string(Split.first), std::string(Split.second)});
+ }
// Frontend Options
if (Args.hasArg(OPT_INPUT)) {
@@ -260,8 +266,9 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
}
}
Opts.LLVMArgs = Args.getAllArgValues(OPT_mllvm);
- Opts.OutputPath = Args.getLastArgValue(OPT_o);
- Opts.SplitDwarfOutput = Args.getLastArgValue(OPT_split_dwarf_output);
+ Opts.OutputPath = std::string(Args.getLastArgValue(OPT_o));
+ Opts.SplitDwarfOutput =
+ std::string(Args.getLastArgValue(OPT_split_dwarf_output));
if (Arg *A = Args.getLastArg(OPT_filetype)) {
StringRef Name = A->getValue();
unsigned OutputType = StringSwitch<unsigned>(Name)
@@ -289,8 +296,9 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
Opts.NoExecStack = Args.hasArg(OPT_mno_exec_stack);
Opts.FatalWarnings = Args.hasArg(OPT_massembler_fatal_warnings);
Opts.NoWarn = Args.hasArg(OPT_massembler_no_warn);
- Opts.RelocationModel = Args.getLastArgValue(OPT_mrelocation_model, "pic");
- Opts.TargetABI = Args.getLastArgValue(OPT_target_abi);
+ Opts.RelocationModel =
+ std::string(Args.getLastArgValue(OPT_mrelocation_model, "pic"));
+ Opts.TargetABI = std::string(Args.getLastArgValue(OPT_target_abi));
Opts.IncrementalLinkerCompatible =
Args.hasArg(OPT_mincremental_linker_compatible);
Opts.SymbolDefs = Args.getAllArgValues(OPT_defsym);
@@ -421,12 +429,7 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts,
SrcMgr.getMemoryBuffer(BufferIndex)->getBuffer());
// Build up the feature string from the target feature list.
- std::string FS;
- if (!Opts.Features.empty()) {
- FS = Opts.Features[0];
- for (unsigned i = 1, e = Opts.Features.size(); i != e; ++i)
- FS += "," + Opts.Features[i];
- }
+ std::string FS = llvm::join(Opts.Features, ",");
std::unique_ptr<MCStreamer> Str;
@@ -490,7 +493,7 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts,
MCSection *AsmLabel = Ctx.getMachOSection(
"__LLVM", "__asm", MachO::S_REGULAR, 4, SectionKind::getReadOnly());
Str.get()->SwitchSection(AsmLabel);
- Str.get()->EmitZeros(1);
+ Str.get()->emitZeros(1);
}
// Assembly to object compilation should leverage assembly info.
diff --git a/contrib/llvm-project/clang/tools/driver/cc1gen_reproducer_main.cpp b/contrib/llvm-project/clang/tools/driver/cc1gen_reproducer_main.cpp
index 4aadab7301bc..472055ee2170 100644
--- a/contrib/llvm-project/clang/tools/driver/cc1gen_reproducer_main.cpp
+++ b/contrib/llvm-project/clang/tools/driver/cc1gen_reproducer_main.cpp
@@ -18,6 +18,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Host.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/YAMLTraits.h"
diff --git a/contrib/llvm-project/clang/tools/driver/driver.cpp b/contrib/llvm-project/clang/tools/driver/driver.cpp
index 7b3968341cc7..f24fd61e61a5 100644
--- a/contrib/llvm-project/clang/tools/driver/driver.cpp
+++ b/contrib/llvm-project/clang/tools/driver/driver.cpp
@@ -38,6 +38,7 @@
#include "llvm/Support/Host.h"
#include "llvm/Support/InitLLVM.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/Regex.h"
@@ -61,7 +62,7 @@ std::string GetExecutablePath(const char *Argv0, bool CanonicalPrefixes) {
if (llvm::ErrorOr<std::string> P =
llvm::sys::findProgramByName(ExecutablePath))
ExecutablePath = *P;
- return ExecutablePath.str();
+ return std::string(ExecutablePath.str());
}
// This just needs to be some symbol in the binary; C++ doesn't
@@ -72,7 +73,7 @@ std::string GetExecutablePath(const char *Argv0, bool CanonicalPrefixes) {
static const char *GetStableCStr(std::set<std::string> &SavedStrings,
StringRef S) {
- return SavedStrings.insert(S).first->c_str();
+ return SavedStrings.insert(std::string(S)).first->c_str();
}
/// ApplyQAOverride - Apply a list of edits to the input argument lists.
@@ -266,7 +267,7 @@ static void FixupDiagPrefixExeName(TextDiagnosticPrinter *DiagClient,
StringRef ExeBasename(llvm::sys::path::stem(Path));
if (ExeBasename.equals_lower("cl"))
ExeBasename = "clang-cl";
- DiagClient->setPrefix(ExeBasename);
+ DiagClient->setPrefix(std::string(ExeBasename));
}
// This lets us create the DiagnosticsEngine with a properly-filled-out
@@ -326,7 +327,7 @@ static int ExecuteCC1Tool(SmallVectorImpl<const char *> &ArgV) {
StringRef Tool = ArgV[1];
void *GetExecutablePathVP = (void *)(intptr_t)GetExecutablePath;
if (Tool == "-cc1")
- return cc1_main(makeArrayRef(ArgV).slice(2), ArgV[0], GetExecutablePathVP);
+ return cc1_main(makeArrayRef(ArgV).slice(1), ArgV[0], GetExecutablePathVP);
if (Tool == "-cc1as")
return cc1as_main(makeArrayRef(ArgV).slice(2), ArgV[0],
GetExecutablePathVP);
@@ -342,6 +343,9 @@ static int ExecuteCC1Tool(SmallVectorImpl<const char *> &ArgV) {
int main(int argc_, const char **argv_) {
noteBottomOfStack();
llvm::InitLLVM X(argc_, argv_);
+ llvm::setBugReportMsg("PLEASE submit a bug report to " BUG_REPORT_URL
+ " and include the crash backtrace, preprocessed "
+ "source, and associated run script.\n");
SmallVector<const char *, 256> argv(argv_, argv_ + argc_);
if (llvm::sys::Process::FixupStandardFileDescriptors())
@@ -507,6 +511,11 @@ int main(int argc_, const char **argv_) {
for (const auto &J : C->getJobs())
if (const Command *C = dyn_cast<Command>(&J))
FailingCommands.push_back(std::make_pair(-1, C));
+
+ // Print the bug report message that would be printed if we did actually
+ // crash, but only if we're crashing due to FORCE_CLANG_DIAGNOSTICS_CRASH.
+ if (::getenv("FORCE_CLANG_DIAGNOSTICS_CRASH"))
+ llvm::dbgs() << llvm::getBugReportMsg();
}
for (const auto &P : FailingCommands) {
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangASTNodesEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangASTNodesEmitter.cpp
index 1cc46cb06570..2b8d7a9efdf1 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangASTNodesEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangASTNodesEmitter.cpp
@@ -51,7 +51,7 @@ class ClangASTNodesEmitter {
const std::string &macroHierarchyName() {
assert(Root && "root node not yet derived!");
if (MacroHierarchyName.empty())
- MacroHierarchyName = macroName(Root.getName());
+ MacroHierarchyName = macroName(std::string(Root.getName()));
return MacroHierarchyName;
}
@@ -86,7 +86,7 @@ public:
// Called recursively to ensure that nodes remain contiguous
std::pair<ASTNode, ASTNode> ClangASTNodesEmitter::EmitNode(raw_ostream &OS,
ASTNode Base) {
- std::string BaseName = macroName(Base.getName());
+ std::string BaseName = macroName(std::string(Base.getName()));
ChildIterator i = Tree.lower_bound(Base), e = Tree.upper_bound(Base);
bool HasChildren = (i != e);
@@ -98,7 +98,7 @@ std::pair<ASTNode, ASTNode> ClangASTNodesEmitter::EmitNode(raw_ostream &OS,
for (; i != e; ++i) {
ASTNode Child = i->second;
bool Abstract = Child.isAbstract();
- std::string NodeName = macroName(Child.getName());
+ std::string NodeName = macroName(std::string(Child.getName()));
OS << "#ifndef " << NodeName << "\n";
OS << "# define " << NodeName << "(Type, Base) "
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangAttrEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangAttrEmitter.cpp
index 2fce9d428137..bd20e447a950 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangAttrEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangAttrEmitter.cpp
@@ -48,22 +48,19 @@ namespace {
class FlattenedSpelling {
std::string V, N, NS;
- bool K;
+ bool K = false;
public:
FlattenedSpelling(const std::string &Variety, const std::string &Name,
const std::string &Namespace, bool KnownToGCC) :
V(Variety), N(Name), NS(Namespace), K(KnownToGCC) {}
- explicit FlattenedSpelling(const Record &Spelling) :
- V(Spelling.getValueAsString("Variety")),
- N(Spelling.getValueAsString("Name")) {
-
+ explicit FlattenedSpelling(const Record &Spelling)
+ : V(std::string(Spelling.getValueAsString("Variety"))),
+ N(std::string(Spelling.getValueAsString("Name"))) {
assert(V != "GCC" && V != "Clang" &&
"Given a GCC spelling, which means this hasn't been flattened!");
if (V == "CXX11" || V == "C2x" || V == "Pragma")
- NS = Spelling.getValueAsString("Namespace");
- bool Unset;
- K = Spelling.getValueAsBitOrUnset("KnownToGCC", Unset);
+ NS = std::string(Spelling.getValueAsString("Namespace"));
}
const std::string &variety() const { return V; }
@@ -83,14 +80,15 @@ GetFlattenedSpellings(const Record &Attr) {
StringRef Variety = Spelling->getValueAsString("Variety");
StringRef Name = Spelling->getValueAsString("Name");
if (Variety == "GCC") {
- // Gin up two new spelling objects to add into the list.
- Ret.emplace_back("GNU", Name, "", true);
- Ret.emplace_back("CXX11", Name, "gnu", true);
+ Ret.emplace_back("GNU", std::string(Name), "", true);
+ Ret.emplace_back("CXX11", std::string(Name), "gnu", true);
+ if (Spelling->getValueAsBit("AllowInC"))
+ Ret.emplace_back("C2x", std::string(Name), "gnu", true);
} else if (Variety == "Clang") {
- Ret.emplace_back("GNU", Name, "", false);
- Ret.emplace_back("CXX11", Name, "clang", false);
+ Ret.emplace_back("GNU", std::string(Name), "", false);
+ Ret.emplace_back("CXX11", std::string(Name), "clang", false);
if (Spelling->getValueAsBit("AllowInC"))
- Ret.emplace_back("C2x", Name, "clang", false);
+ Ret.emplace_back("C2x", std::string(Name), "clang", false);
} else
Ret.push_back(FlattenedSpelling(*Spelling));
}
@@ -100,14 +98,16 @@ GetFlattenedSpellings(const Record &Attr) {
static std::string ReadPCHRecord(StringRef type) {
return StringSwitch<std::string>(type)
- .EndsWith("Decl *", "Record.GetLocalDeclAs<"
- + std::string(type, 0, type.size()-1) + ">(Record.readInt())")
- .Case("TypeSourceInfo *", "Record.readTypeSourceInfo()")
- .Case("Expr *", "Record.readExpr()")
- .Case("IdentifierInfo *", "Record.readIdentifier()")
- .Case("StringRef", "Record.readString()")
- .Case("ParamIdx", "ParamIdx::deserialize(Record.readInt())")
- .Default("Record.readInt()");
+ .EndsWith("Decl *", "Record.GetLocalDeclAs<" +
+ std::string(type.data(), 0, type.size() - 1) +
+ ">(Record.readInt())")
+ .Case("TypeSourceInfo *", "Record.readTypeSourceInfo()")
+ .Case("Expr *", "Record.readExpr()")
+ .Case("IdentifierInfo *", "Record.readIdentifier()")
+ .Case("StringRef", "Record.readString()")
+ .Case("ParamIdx", "ParamIdx::deserialize(Record.readInt())")
+ .Case("OMPTraitInfo *", "Record.readOMPTraitInfo()")
+ .Default("Record.readInt()");
}
// Get a type that is suitable for storing an object of the specified type.
@@ -119,14 +119,20 @@ static StringRef getStorageType(StringRef type) {
// Assumes that the way to get the value is SA->getname()
static std::string WritePCHRecord(StringRef type, StringRef name) {
- return "Record." + StringSwitch<std::string>(type)
- .EndsWith("Decl *", "AddDeclRef(" + std::string(name) + ");\n")
- .Case("TypeSourceInfo *", "AddTypeSourceInfo(" + std::string(name) + ");\n")
- .Case("Expr *", "AddStmt(" + std::string(name) + ");\n")
- .Case("IdentifierInfo *", "AddIdentifierRef(" + std::string(name) + ");\n")
- .Case("StringRef", "AddString(" + std::string(name) + ");\n")
- .Case("ParamIdx", "push_back(" + std::string(name) + ".serialize());\n")
- .Default("push_back(" + std::string(name) + ");\n");
+ return "Record." +
+ StringSwitch<std::string>(type)
+ .EndsWith("Decl *", "AddDeclRef(" + std::string(name) + ");\n")
+ .Case("TypeSourceInfo *",
+ "AddTypeSourceInfo(" + std::string(name) + ");\n")
+ .Case("Expr *", "AddStmt(" + std::string(name) + ");\n")
+ .Case("IdentifierInfo *",
+ "AddIdentifierRef(" + std::string(name) + ");\n")
+ .Case("StringRef", "AddString(" + std::string(name) + ");\n")
+ .Case("ParamIdx",
+ "push_back(" + std::string(name) + ".serialize());\n")
+ .Case("OMPTraitInfo *",
+ "writeOMPTraitInfo(" + std::string(name) + ");\n")
+ .Default("push_back(" + std::string(name) + ");\n");
}
// Normalize attribute name by removing leading and trailing
@@ -167,7 +173,7 @@ static ParsedAttrMap getParsedAttrList(const RecordKeeper &Records,
std::string AN;
if (Attr->isSubClassOf("TargetSpecificAttr") &&
!Attr->isValueUnset("ParseKind")) {
- AN = Attr->getValueAsString("ParseKind");
+ AN = std::string(Attr->getValueAsString("ParseKind"));
// If this attribute has already been handled, it does not need to be
// handled again.
@@ -196,8 +202,8 @@ namespace {
public:
Argument(const Record &Arg, StringRef Attr)
- : lowerName(Arg.getValueAsString("Name")), upperName(lowerName),
- attrName(Attr), isOpt(false), Fake(false) {
+ : lowerName(std::string(Arg.getValueAsString("Name"))),
+ upperName(lowerName), attrName(Attr), isOpt(false), Fake(false) {
if (!lowerName.empty()) {
lowerName[0] = std::tolower(lowerName[0]);
upperName[0] = std::toupper(upperName[0]);
@@ -299,8 +305,9 @@ namespace {
}
void writePCHWrite(raw_ostream &OS) const override {
- OS << " " << WritePCHRecord(type, "SA->get" +
- std::string(getUpperName()) + "()");
+ OS << " "
+ << WritePCHRecord(type,
+ "SA->get" + std::string(getUpperName()) + "()");
}
std::string getIsOmitted() const override {
@@ -331,9 +338,9 @@ namespace {
}
void writeDump(raw_ostream &OS) const override {
- if (type == "FunctionDecl *" || type == "NamedDecl *") {
+ if (StringRef(type).endswith("Decl *")) {
OS << " OS << \" \";\n";
- OS << " dumpBareDeclRef(SA->get" << getUpperName() << "());\n";
+ OS << " dumpBareDeclRef(SA->get" << getUpperName() << "());\n";
} else if (type == "IdentifierInfo *") {
// Some non-optional (comma required) identifier arguments can be the
// empty string but are then recorded as a nullptr.
@@ -355,6 +362,8 @@ namespace {
OS << " if (SA->get" << getUpperName() << "().isValid())\n ";
OS << " OS << \" \" << SA->get" << getUpperName()
<< "().getSourceIndex();\n";
+ } else if (type == "OMPTraitInfo *") {
+ OS << " OS << \" \" << SA->get" << getUpperName() << "();\n";
} else {
llvm_unreachable("Unknown SimpleArgument type!");
}
@@ -416,8 +425,8 @@ namespace {
}
void writeCtorBody(raw_ostream &OS) const override {
- OS << " if (!" << getUpperName() << ".empty())\n";
- OS << " std::memcpy(" << getLowerName() << ", " << getUpperName()
+ OS << " if (!" << getUpperName() << ".empty())\n";
+ OS << " std::memcpy(" << getLowerName() << ", " << getUpperName()
<< ".data(), " << getLowerName() << "Length);\n";
}
@@ -471,6 +480,7 @@ namespace {
void writeAccessors(raw_ostream &OS) const override {
OS << " bool is" << getUpperName() << "Dependent() const;\n";
+ OS << " bool is" << getUpperName() << "ErrorDependent() const;\n";
OS << " unsigned get" << getUpperName() << "(ASTContext &Ctx) const;\n";
@@ -495,12 +505,21 @@ namespace {
OS << " if (is" << getLowerName() << "Expr)\n";
OS << " return " << getLowerName() << "Expr && (" << getLowerName()
<< "Expr->isValueDependent() || " << getLowerName()
- << "Expr->isTypeDependent());\n";
+ << "Expr->isTypeDependent());\n";
OS << " else\n";
OS << " return " << getLowerName()
<< "Type->getType()->isDependentType();\n";
OS << "}\n";
+ OS << "bool " << getAttrName() << "Attr::is" << getUpperName()
+ << "ErrorDependent() const {\n";
+ OS << " if (is" << getLowerName() << "Expr)\n";
+ OS << " return " << getLowerName() << "Expr && " << getLowerName()
+ << "Expr->containsErrors();\n";
+ OS << " return " << getLowerName()
+ << "Type->getType()->containsErrors();\n";
+ OS << "}\n";
+
// FIXME: Do not do the calculation here
// FIXME: Handle types correctly
// A null pointer means maximum alignment
@@ -520,11 +539,11 @@ namespace {
void writeASTVisitorTraversal(raw_ostream &OS) const override {
StringRef Name = getUpperName();
OS << " if (A->is" << Name << "Expr()) {\n"
- << " if (!getDerived().TraverseStmt(A->get" << Name << "Expr()))\n"
- << " return false;\n"
+ << " if (!getDerived().TraverseStmt(A->get" << Name << "Expr()))\n"
+ << " return false;\n"
<< " } else if (auto *TSI = A->get" << Name << "Type()) {\n"
<< " if (!getDerived().TraverseTypeLoc(TSI->getTypeLoc()))\n"
- << " return false;\n"
+ << " return false;\n"
<< " }\n";
}
@@ -642,7 +661,7 @@ namespace {
VariadicArgument(const Record &Arg, StringRef Attr, std::string T)
: Argument(Arg, Attr), Type(std::move(T)),
ArgName(getLowerName().str() + "_"), ArgSizeName(ArgName + "Size"),
- RangeName(getLowerName()) {}
+ RangeName(std::string(getLowerName())) {}
const std::string &getType() const { return Type; }
const std::string &getArgName() const { return ArgName; }
@@ -653,7 +672,7 @@ namespace {
std::string IteratorType = getLowerName().str() + "_iterator";
std::string BeginFn = getLowerName().str() + "_begin()";
std::string EndFn = getLowerName().str() + "_end()";
-
+
OS << " typedef " << Type << "* " << IteratorType << ";\n";
OS << " " << IteratorType << " " << BeginFn << " const {"
<< " return " << ArgName << "; }\n";
@@ -681,8 +700,8 @@ namespace {
}
void writeCtorBody(raw_ostream &OS) const override {
- OS << " std::copy(" << getUpperName() << ", " << getUpperName()
- << " + " << ArgSizeName << ", " << ArgName << ");\n";
+ OS << " std::copy(" << getUpperName() << ", " << getUpperName() << " + "
+ << ArgSizeName << ", " << ArgName << ");\n";
}
void writeCtorInitializers(raw_ostream &OS) const override {
@@ -719,8 +738,8 @@ namespace {
// If we can't store the values in the current type (if it's something
// like StringRef), store them in a different type and convert the
// container afterwards.
- std::string StorageType = getStorageType(getType());
- std::string StorageName = getLowerName();
+ std::string StorageType = std::string(getStorageType(getType()));
+ std::string StorageName = std::string(getLowerName());
if (StorageType != getType()) {
StorageName += "Storage";
OS << " SmallVector<" << StorageType << ", 4> "
@@ -805,11 +824,10 @@ namespace {
public:
EnumArgument(const Record &Arg, StringRef Attr)
- : Argument(Arg, Attr), type(Arg.getValueAsString("Type")),
- values(Arg.getValueAsListOfStrings("Values")),
- enums(Arg.getValueAsListOfStrings("Enums")),
- uniques(uniqueEnumsInOrder(enums))
- {
+ : Argument(Arg, Attr), type(std::string(Arg.getValueAsString("Type"))),
+ values(Arg.getValueAsListOfStrings("Values")),
+ enums(Arg.getValueAsListOfStrings("Enums")),
+ uniques(uniqueEnumsInOrder(enums)) {
// FIXME: Emit a proper error
assert(!uniques.empty());
}
@@ -885,40 +903,48 @@ namespace {
OS << " }\n";
}
- void writeConversion(raw_ostream &OS) const {
- OS << " static bool ConvertStrTo" << type << "(StringRef Val, ";
- OS << type << " &Out) {\n";
- OS << " Optional<" << type << "> R = llvm::StringSwitch<Optional<";
+ void writeConversion(raw_ostream &OS, bool Header) const {
+ if (Header) {
+ OS << " static bool ConvertStrTo" << type << "(StringRef Val, " << type
+ << " &Out);\n";
+ OS << " static const char *Convert" << type << "ToStr(" << type
+ << " Val);\n";
+ return;
+ }
+
+ OS << "bool " << getAttrName() << "Attr::ConvertStrTo" << type
+ << "(StringRef Val, " << type << " &Out) {\n";
+ OS << " Optional<" << type << "> R = llvm::StringSwitch<Optional<";
OS << type << ">>(Val)\n";
for (size_t I = 0; I < enums.size(); ++I) {
- OS << " .Case(\"" << values[I] << "\", ";
+ OS << " .Case(\"" << values[I] << "\", ";
OS << getAttrName() << "Attr::" << enums[I] << ")\n";
}
- OS << " .Default(Optional<" << type << ">());\n";
- OS << " if (R) {\n";
- OS << " Out = *R;\n return true;\n }\n";
- OS << " return false;\n";
- OS << " }\n\n";
+ OS << " .Default(Optional<" << type << ">());\n";
+ OS << " if (R) {\n";
+ OS << " Out = *R;\n return true;\n }\n";
+ OS << " return false;\n";
+ OS << "}\n\n";
// Mapping from enumeration values back to enumeration strings isn't
// trivial because some enumeration values have multiple named
// enumerators, such as type_visibility(internal) and
// type_visibility(hidden) both mapping to TypeVisibilityAttr::Hidden.
- OS << " static const char *Convert" << type << "ToStr("
- << type << " Val) {\n"
- << " switch(Val) {\n";
+ OS << "const char *" << getAttrName() << "Attr::Convert" << type
+ << "ToStr(" << type << " Val) {\n"
+ << " switch(Val) {\n";
SmallDenseSet<StringRef, 8> Uniques;
for (size_t I = 0; I < enums.size(); ++I) {
if (Uniques.insert(enums[I]).second)
- OS << " case " << getAttrName() << "Attr::" << enums[I]
- << ": return \"" << values[I] << "\";\n";
+ OS << " case " << getAttrName() << "Attr::" << enums[I]
+ << ": return \"" << values[I] << "\";\n";
}
- OS << " }\n"
- << " llvm_unreachable(\"No enumerator with that value\");\n"
- << " }\n";
+ OS << " }\n"
+ << " llvm_unreachable(\"No enumerator with that value\");\n"
+ << "}\n";
}
};
-
+
class VariadicEnumArgument: public VariadicArgument {
std::string type, QualifiedTypeName;
std::vector<StringRef> values, enums, uniques;
@@ -934,20 +960,20 @@ namespace {
public:
VariadicEnumArgument(const Record &Arg, StringRef Attr)
- : VariadicArgument(Arg, Attr, Arg.getValueAsString("Type")),
- type(Arg.getValueAsString("Type")),
- values(Arg.getValueAsListOfStrings("Values")),
- enums(Arg.getValueAsListOfStrings("Enums")),
- uniques(uniqueEnumsInOrder(enums))
- {
+ : VariadicArgument(Arg, Attr,
+ std::string(Arg.getValueAsString("Type"))),
+ type(std::string(Arg.getValueAsString("Type"))),
+ values(Arg.getValueAsListOfStrings("Values")),
+ enums(Arg.getValueAsListOfStrings("Enums")),
+ uniques(uniqueEnumsInOrder(enums)) {
QualifiedTypeName = getAttrName().str() + "Attr::" + type;
-
+
// FIXME: Emit a proper error
assert(!uniques.empty());
}
bool isVariadicEnumArg() const override { return true; }
-
+
void writeDeclarations(raw_ostream &OS) const override {
auto i = uniques.cbegin(), e = uniques.cend();
// The last one needs to not have a comma.
@@ -960,7 +986,7 @@ namespace {
OS << " " << *e << "\n";
OS << " };\n";
OS << "private:\n";
-
+
VariadicArgument::writeDeclarations(OS);
}
@@ -997,33 +1023,42 @@ namespace {
OS << " " << WritePCHRecord(QualifiedTypeName, "(*i)");
}
- void writeConversion(raw_ostream &OS) const {
- OS << " static bool ConvertStrTo" << type << "(StringRef Val, ";
+ void writeConversion(raw_ostream &OS, bool Header) const {
+ if (Header) {
+ OS << " static bool ConvertStrTo" << type << "(StringRef Val, " << type
+ << " &Out);\n";
+ OS << " static const char *Convert" << type << "ToStr(" << type
+ << " Val);\n";
+ return;
+ }
+
+ OS << "bool " << getAttrName() << "Attr::ConvertStrTo" << type
+ << "(StringRef Val, ";
OS << type << " &Out) {\n";
- OS << " Optional<" << type << "> R = llvm::StringSwitch<Optional<";
+ OS << " Optional<" << type << "> R = llvm::StringSwitch<Optional<";
OS << type << ">>(Val)\n";
for (size_t I = 0; I < enums.size(); ++I) {
- OS << " .Case(\"" << values[I] << "\", ";
+ OS << " .Case(\"" << values[I] << "\", ";
OS << getAttrName() << "Attr::" << enums[I] << ")\n";
}
- OS << " .Default(Optional<" << type << ">());\n";
- OS << " if (R) {\n";
- OS << " Out = *R;\n return true;\n }\n";
- OS << " return false;\n";
- OS << " }\n\n";
-
- OS << " static const char *Convert" << type << "ToStr("
- << type << " Val) {\n"
- << " switch(Val) {\n";
+ OS << " .Default(Optional<" << type << ">());\n";
+ OS << " if (R) {\n";
+ OS << " Out = *R;\n return true;\n }\n";
+ OS << " return false;\n";
+ OS << "}\n\n";
+
+ OS << "const char *" << getAttrName() << "Attr::Convert" << type
+ << "ToStr(" << type << " Val) {\n"
+ << " switch(Val) {\n";
SmallDenseSet<StringRef, 8> Uniques;
for (size_t I = 0; I < enums.size(); ++I) {
if (Uniques.insert(enums[I]).second)
- OS << " case " << getAttrName() << "Attr::" << enums[I]
- << ": return \"" << values[I] << "\";\n";
+ OS << " case " << getAttrName() << "Attr::" << enums[I]
+ << ": return \"" << values[I] << "\";\n";
}
- OS << " }\n"
- << " llvm_unreachable(\"No enumerator with that value\");\n"
- << " }\n";
+ OS << " }\n"
+ << " llvm_unreachable(\"No enumerator with that value\");\n"
+ << "}\n";
}
};
@@ -1037,7 +1072,7 @@ namespace {
OS << " VersionTuple get" << getUpperName() << "() const {\n";
OS << " return " << getLowerName() << ";\n";
OS << " }\n";
- OS << " void set" << getUpperName()
+ OS << " void set" << getUpperName()
<< "(ASTContext &C, VersionTuple V) {\n";
OS << " " << getLowerName() << " = V;\n";
OS << " }";
@@ -1199,15 +1234,15 @@ namespace {
{}
void writeCtorBody(raw_ostream &OS) const override {
- OS << " for (size_t I = 0, E = " << getArgSizeName() << "; I != E;\n"
- " ++I) {\n"
- " StringRef Ref = " << getUpperName() << "[I];\n"
- " if (!Ref.empty()) {\n"
- " char *Mem = new (Ctx, 1) char[Ref.size()];\n"
- " std::memcpy(Mem, Ref.data(), Ref.size());\n"
- " " << getArgName() << "[I] = StringRef(Mem, Ref.size());\n"
- " }\n"
- " }\n";
+ OS << " for (size_t I = 0, E = " << getArgSizeName() << "; I != E;\n"
+ " ++I) {\n"
+ " StringRef Ref = " << getUpperName() << "[I];\n"
+ " if (!Ref.empty()) {\n"
+ " char *Mem = new (Ctx, 1) char[Ref.size()];\n"
+ " std::memcpy(Mem, Ref.data(), Ref.size());\n"
+ " " << getArgName() << "[I] = StringRef(Mem, Ref.size());\n"
+ " }\n"
+ " }\n";
}
void writeValueImpl(raw_ostream &OS) const override {
@@ -1241,8 +1276,9 @@ namespace {
}
void writePCHWrite(raw_ostream &OS) const override {
- OS << " " << WritePCHRecord(
- getType(), "SA->get" + std::string(getUpperName()) + "Loc()");
+ OS << " "
+ << WritePCHRecord(getType(),
+ "SA->get" + std::string(getUpperName()) + "Loc()");
}
};
@@ -1263,10 +1299,9 @@ createArgument(const Record &Arg, StringRef Attr,
Ptr = std::make_unique<EnumArgument>(Arg, Attr);
else if (ArgName == "ExprArgument")
Ptr = std::make_unique<ExprArgument>(Arg, Attr);
- else if (ArgName == "FunctionArgument")
- Ptr = std::make_unique<SimpleArgument>(Arg, Attr, "FunctionDecl *");
- else if (ArgName == "NamedArgument")
- Ptr = std::make_unique<SimpleArgument>(Arg, Attr, "NamedDecl *");
+ else if (ArgName == "DeclArgument")
+ Ptr = std::make_unique<SimpleArgument>(
+ Arg, Attr, (Arg.getValueAsDef("Kind")->getName() + "Decl *").str());
else if (ArgName == "IdentifierArgument")
Ptr = std::make_unique<SimpleArgument>(Arg, Attr, "IdentifierInfo *");
else if (ArgName == "DefaultBoolArgument")
@@ -1303,6 +1338,8 @@ createArgument(const Record &Arg, StringRef Attr,
Ptr = std::make_unique<VariadicIdentifierArgument>(Arg, Attr);
else if (ArgName == "VersionArgument")
Ptr = std::make_unique<VersionArgument>(Arg, Attr);
+ else if (ArgName == "OMPTraitInfoArgument")
+ Ptr = std::make_unique<SimpleArgument>(Arg, Attr, "OMPTraitInfo *");
if (!Ptr) {
// Search in reverse order so that the most-derived type is handled first.
@@ -1341,7 +1378,7 @@ static void writeDeprecatedAttrValue(raw_ostream &OS, std::string &Variety) {
OS << " OS << \"";
}
-static void writeGetSpellingFunction(Record &R, raw_ostream &OS) {
+static void writeGetSpellingFunction(const Record &R, raw_ostream &OS) {
std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(R);
OS << "const char *" << R.getName() << "Attr::getSpelling() const {\n";
@@ -1365,7 +1402,7 @@ static void writeGetSpellingFunction(Record &R, raw_ostream &OS) {
}
static void
-writePrettyPrintFunction(Record &R,
+writePrettyPrintFunction(const Record &R,
const std::vector<std::unique_ptr<Argument>> &Args,
raw_ostream &OS) {
std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(R);
@@ -1577,11 +1614,12 @@ static void writeAttrAccessorDefinition(const Record &R, raw_ostream &OS) {
static bool
SpellingNamesAreCommon(const std::vector<FlattenedSpelling>& Spellings) {
assert(!Spellings.empty() && "An empty list of spellings was provided");
- std::string FirstName = NormalizeNameForSpellingComparison(
- Spellings.front().name());
+ std::string FirstName =
+ std::string(NormalizeNameForSpellingComparison(Spellings.front().name()));
for (const auto &Spelling :
llvm::make_range(std::next(Spellings.begin()), Spellings.end())) {
- std::string Name = NormalizeNameForSpellingComparison(Spelling.name());
+ std::string Name =
+ std::string(NormalizeNameForSpellingComparison(Spelling.name()));
if (Name != FirstName)
return false;
}
@@ -1727,7 +1765,7 @@ struct AttributeSubjectMatchRule {
}
std::string getSpelling() const {
- std::string Result = MetaSubject->getValueAsString("Name");
+ std::string Result = std::string(MetaSubject->getValueAsString("Name"));
if (isSubRule()) {
Result += '(';
if (isNegatedSubRule())
@@ -1752,7 +1790,7 @@ struct AttributeSubjectMatchRule {
}
if (isAbstractRule())
Result += "_abstract";
- return Result.str();
+ return std::string(Result.str());
}
std::string getEnumValue() const { return "attr::" + getEnumValueName(); }
@@ -1801,7 +1839,7 @@ struct PragmaClangAttributeSupport {
void emitMatchRuleList(raw_ostream &OS);
- std::string generateStrictConformsTo(const Record &Attr, raw_ostream &OS);
+ void generateStrictConformsTo(const Record &Attr, raw_ostream &OS);
void generateParsingHelpers(raw_ostream &OS);
};
@@ -1950,6 +1988,11 @@ static std::string GenerateTestExpression(ArrayRef<Record *> LangOpts) {
Test += "(";
Test += Code;
Test += ")";
+ if (!E->getValueAsString("Name").empty()) {
+ PrintWarning(
+ E->getLoc(),
+ "non-empty 'Name' field ignored because 'CustomCode' was supplied");
+ }
} else {
Test += "LangOpts.";
Test += E->getValueAsString("Name");
@@ -1962,21 +2005,17 @@ static std::string GenerateTestExpression(ArrayRef<Record *> LangOpts) {
return Test;
}
-std::string
+void
PragmaClangAttributeSupport::generateStrictConformsTo(const Record &Attr,
raw_ostream &OS) {
- if (!isAttributedSupported(Attr))
- return "nullptr";
+ if (!isAttributedSupported(Attr) || Attr.isValueUnset("Subjects"))
+ return;
// Generate a function that constructs a set of matching rules that describe
// to which declarations the attribute should apply to.
- std::string FnName = "matchRulesFor" + Attr.getName().str();
- OS << "static void " << FnName << "(llvm::SmallVectorImpl<std::pair<"
+ OS << "void getPragmaAttributeMatchRules("
+ << "llvm::SmallVectorImpl<std::pair<"
<< AttributeSubjectMatchRule::EnumName
- << ", bool>> &MatchRules, const LangOptions &LangOpts) {\n";
- if (Attr.isValueUnset("Subjects")) {
- OS << "}\n\n";
- return FnName;
- }
+ << ", bool>> &MatchRules, const LangOptions &LangOpts) const override {\n";
const Record *SubjectObj = Attr.getValueAsDef("Subjects");
std::vector<Record *> Subjects = SubjectObj->getValueAsListOfDefs("Subjects");
for (const auto *Subject : Subjects) {
@@ -1993,7 +2032,6 @@ PragmaClangAttributeSupport::generateStrictConformsTo(const Record &Attr,
}
}
OS << "}\n\n";
- return FnName;
}
void PragmaClangAttributeSupport::generateParsingHelpers(raw_ostream &OS) {
@@ -2223,13 +2261,8 @@ static void emitClangAttrThisIsaIdentifierArgList(RecordKeeper &Records,
OS << "#endif // CLANG_ATTR_THIS_ISA_IDENTIFIER_ARG_LIST\n\n";
}
-// Emits the class definitions for attributes.
-void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
- emitSourceFileHeader("Attribute classes' definitions", OS);
-
- OS << "#ifndef LLVM_CLANG_ATTR_CLASSES_INC\n";
- OS << "#define LLVM_CLANG_ATTR_CLASSES_INC\n\n";
-
+static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
+ bool Header) {
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
ParsedAttrMap AttrMap = getParsedAttrList(Records);
@@ -2246,10 +2279,10 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
// When attribute documentation can be generated as part of the build
// itself, this code can be removed.
(void)R.getValueAsListOfDefs("Documentation");
-
+
if (!R.getValueAsBit("ASTNode"))
continue;
-
+
ArrayRef<std::pair<Record *, SMRange>> Supers = R.getSuperClasses();
assert(!Supers.empty() && "Forgot to specify a superclass for the attr");
std::string SuperName;
@@ -2258,12 +2291,15 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
const Record *R = Super.first;
if (R->getName() != "TargetSpecificAttr" &&
R->getName() != "DeclOrTypeAttr" && SuperName.empty())
- SuperName = R->getName();
+ SuperName = std::string(R->getName());
if (R->getName() == "InheritableAttr")
Inheritable = true;
}
- OS << "class " << R.getName() << "Attr : public " << SuperName << " {\n";
+ if (Header)
+ OS << "class " << R.getName() << "Attr : public " << SuperName << " {\n";
+ else
+ OS << "\n// " << R.getName() << "Attr implementation\n\n";
std::vector<Record*> ArgRecords = R.getValueAsListOfDefs("Args");
std::vector<std::unique_ptr<Argument>> Args;
@@ -2273,8 +2309,10 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
bool HasFakeArg = false;
for (const auto *ArgRecord : ArgRecords) {
Args.emplace_back(createArgument(*ArgRecord, R.getName()));
- Args.back()->writeDeclarations(OS);
- OS << "\n\n";
+ if (Header) {
+ Args.back()->writeDeclarations(OS);
+ OS << "\n\n";
+ }
// For these purposes, fake takes priority over optional.
if (Args.back()->isFake()) {
@@ -2284,7 +2322,8 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
}
}
- OS << "public:\n";
+ if (Header)
+ OS << "public:\n";
std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(R);
@@ -2297,8 +2336,11 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
// This maps spelling index values to semantic Spelling enumerants.
SemanticSpellingMap SemanticToSyntacticMap;
- if (!ElideSpelling)
- OS << CreateSemanticSpellings(Spellings, SemanticToSyntacticMap);
+ std::string SpellingEnum;
+ if (Spellings.size() > 1)
+ SpellingEnum = CreateSemanticSpellings(Spellings, SemanticToSyntacticMap);
+ if (Header)
+ OS << SpellingEnum;
const auto &ParsedAttrSpellingItr = llvm::find_if(
AttrMap, [R](const std::pair<std::string, const Record *> &P) {
@@ -2307,9 +2349,14 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
// Emit CreateImplicit factory methods.
auto emitCreate = [&](bool Implicit, bool emitFake) {
- OS << " static " << R.getName() << "Attr *Create";
- if (Implicit)
- OS << "Implicit";
+ if (Header)
+ OS << " static ";
+ OS << R.getName() << "Attr *";
+ if (!Header)
+ OS << R.getName() << "Attr::";
+ OS << "Create";
+ if (Implicit)
+ OS << "Implicit";
OS << "(";
OS << "ASTContext &Ctx";
for (auto const &ai : Args) {
@@ -2317,8 +2364,17 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
OS << ", ";
ai->writeCtorParameters(OS);
}
- OS << ", const AttributeCommonInfo &CommonInfo = {SourceRange{}}) {\n";
- OS << " auto *A = new (Ctx) " << R.getName();
+ OS << ", const AttributeCommonInfo &CommonInfo";
+ if (Header)
+ OS << " = {SourceRange{}}";
+ OS << ")";
+ if (Header) {
+ OS << ";\n";
+ return;
+ }
+
+ OS << " {\n";
+ OS << " auto *A = new (Ctx) " << R.getName();
OS << "Attr(Ctx, CommonInfo";
for (auto const &ai : Args) {
if (ai->isFake() && !emitFake) continue;
@@ -2327,18 +2383,23 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
}
OS << ");\n";
if (Implicit) {
- OS << " A->setImplicit(true);\n";
+ OS << " A->setImplicit(true);\n";
}
if (Implicit || ElideSpelling) {
- OS << " if (!A->isAttributeSpellingListCalculated() && "
+ OS << " if (!A->isAttributeSpellingListCalculated() && "
"!A->getAttrName())\n";
- OS << " A->setAttributeSpellingListIndex(0);\n";
+ OS << " A->setAttributeSpellingListIndex(0);\n";
}
- OS << " return A;\n }\n\n";
+ OS << " return A;\n}\n\n";
};
auto emitCreateNoCI = [&](bool Implicit, bool emitFake) {
- OS <<" static " << R.getName() << "Attr *Create";
+ if (Header)
+ OS << " static ";
+ OS << R.getName() << "Attr *";
+ if (!Header)
+ OS << R.getName() << "Attr::";
+ OS << "Create";
if (Implicit)
OS << "Implicit";
OS << "(";
@@ -2349,12 +2410,19 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
ai->writeCtorParameters(OS);
}
OS << ", SourceRange Range, AttributeCommonInfo::Syntax Syntax";
- if (!ElideSpelling)
- OS << ", " << R.getName()
- << "Attr::Spelling S = "
- "static_cast<Spelling>(SpellingNotCalculated)";
- OS << ") {\n";
- OS << " AttributeCommonInfo I(Range, ";
+ if (!ElideSpelling) {
+ OS << ", " << R.getName() << "Attr::Spelling S";
+ if (Header)
+ OS << " = static_cast<Spelling>(SpellingNotCalculated)";
+ }
+ OS << ")";
+ if (Header) {
+ OS << ";\n";
+ return;
+ }
+
+ OS << " {\n";
+ OS << " AttributeCommonInfo I(Range, ";
if (ParsedAttrSpellingItr != std::end(AttrMap))
OS << "AT_" << ParsedAttrSpellingItr->first;
@@ -2365,7 +2433,7 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
if (!ElideSpelling)
OS << ", S";
OS << ");\n";
- OS << " return Create";
+ OS << " return Create";
if (Implicit)
OS << "Implicit";
OS << "(Ctx";
@@ -2375,7 +2443,7 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
ai->writeImplicitCtorArgs(OS);
}
OS << ", I);\n";
- OS << " }\n";
+ OS << "}\n\n";
};
auto emitCreates = [&](bool emitFake) {
@@ -2385,6 +2453,9 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
emitCreateNoCI(false, emitFake);
};
+ if (Header)
+ OS << " // Factory methods\n";
+
// Emit a CreateImplicit that takes all the arguments.
emitCreates(true);
@@ -2399,7 +2470,11 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
if (arg->isOptional()) return emitOpt;
return true;
};
- OS << " " << R.getName()
+ if (Header)
+ OS << " ";
+ else
+ OS << R.getName() << "Attr::";
+ OS << R.getName()
<< "Attr(ASTContext &Ctx, const AttributeCommonInfo &CommonInfo";
OS << '\n';
for (auto const &ai : Args) {
@@ -2409,8 +2484,12 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
OS << "\n";
}
- OS << " )\n";
- OS << " : " << SuperName << "(Ctx, CommonInfo, ";
+ OS << " )";
+ if (Header) {
+ OS << ";\n";
+ return;
+ }
+ OS << "\n : " << SuperName << "(Ctx, CommonInfo, ";
OS << "attr::" << R.getName() << ", "
<< (R.getValueAsBit("LateParsed") ? "true" : "false");
if (Inheritable) {
@@ -2431,14 +2510,17 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
}
OS << " {\n";
-
+
for (auto const &ai : Args) {
if (!shouldEmitArg(ai)) continue;
ai->writeCtorBody(OS);
}
- OS << " }\n\n";
+ OS << "}\n\n";
};
+ if (Header)
+ OS << "\n // Constructors\n";
+
// Emit a constructor that includes all the arguments.
// This is necessary for cloning.
emitCtor(true, true);
@@ -2446,48 +2528,89 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
// Emit a constructor that takes all the non-fake arguments.
if (HasFakeArg)
emitCtor(true, false);
-
+
// Emit a constructor that takes all the non-fake, non-optional arguments.
if (HasOptArg)
emitCtor(false, false);
- OS << " " << R.getName() << "Attr *clone(ASTContext &C) const;\n";
- OS << " void printPretty(raw_ostream &OS,\n"
- << " const PrintingPolicy &Policy) const;\n";
- OS << " const char *getSpelling() const;\n";
-
+ if (Header) {
+ OS << '\n';
+ OS << " " << R.getName() << "Attr *clone(ASTContext &C) const;\n";
+ OS << " void printPretty(raw_ostream &OS,\n"
+ << " const PrintingPolicy &Policy) const;\n";
+ OS << " const char *getSpelling() const;\n";
+ }
+
if (!ElideSpelling) {
assert(!SemanticToSyntacticMap.empty() && "Empty semantic mapping list");
- OS << " Spelling getSemanticSpelling() const {\n";
- WriteSemanticSpellingSwitch("getAttributeSpellingListIndex()",
- SemanticToSyntacticMap, OS);
- OS << " }\n";
+ if (Header)
+ OS << " Spelling getSemanticSpelling() const;\n";
+ else {
+ OS << R.getName() << "Attr::Spelling " << R.getName()
+ << "Attr::getSemanticSpelling() const {\n";
+ WriteSemanticSpellingSwitch("getAttributeSpellingListIndex()",
+ SemanticToSyntacticMap, OS);
+ OS << "}\n";
+ }
}
- writeAttrAccessorDefinition(R, OS);
+ if (Header)
+ writeAttrAccessorDefinition(R, OS);
for (auto const &ai : Args) {
- ai->writeAccessors(OS);
+ if (Header) {
+ ai->writeAccessors(OS);
+ } else {
+ ai->writeAccessorDefinitions(OS);
+ }
OS << "\n\n";
// Don't write conversion routines for fake arguments.
if (ai->isFake()) continue;
if (ai->isEnumArg())
- static_cast<const EnumArgument *>(ai.get())->writeConversion(OS);
+ static_cast<const EnumArgument *>(ai.get())->writeConversion(OS,
+ Header);
else if (ai->isVariadicEnumArg())
- static_cast<const VariadicEnumArgument *>(ai.get())
- ->writeConversion(OS);
+ static_cast<const VariadicEnumArgument *>(ai.get())->writeConversion(
+ OS, Header);
}
- OS << R.getValueAsString("AdditionalMembers");
- OS << "\n\n";
+ if (Header) {
+ OS << R.getValueAsString("AdditionalMembers");
+ OS << "\n\n";
- OS << " static bool classof(const Attr *A) { return A->getKind() == "
- << "attr::" << R.getName() << "; }\n";
+ OS << " static bool classof(const Attr *A) { return A->getKind() == "
+ << "attr::" << R.getName() << "; }\n";
- OS << "};\n\n";
+ OS << "};\n\n";
+ } else {
+ OS << R.getName() << "Attr *" << R.getName()
+ << "Attr::clone(ASTContext &C) const {\n";
+ OS << " auto *A = new (C) " << R.getName() << "Attr(C, *this";
+ for (auto const &ai : Args) {
+ OS << ", ";
+ ai->writeCloneArgs(OS);
+ }
+ OS << ");\n";
+ OS << " A->Inherited = Inherited;\n";
+ OS << " A->IsPackExpansion = IsPackExpansion;\n";
+ OS << " A->setImplicit(Implicit);\n";
+ OS << " return A;\n}\n\n";
+
+ writePrettyPrintFunction(R, Args, OS);
+ writeGetSpellingFunction(R, OS);
+ }
}
+}
+// Emits the class definitions for attributes.
+void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
+ emitSourceFileHeader("Attribute classes' definitions", OS);
+
+ OS << "#ifndef LLVM_CLANG_ATTR_CLASSES_INC\n";
+ OS << "#define LLVM_CLANG_ATTR_CLASSES_INC\n\n";
+
+ emitAttributes(Records, OS, true);
OS << "#endif // LLVM_CLANG_ATTR_CLASSES_INC\n";
}
@@ -2496,38 +2619,9 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
void clang::EmitClangAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
emitSourceFileHeader("Attribute classes' member function definitions", OS);
- std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
-
- for (auto *Attr : Attrs) {
- Record &R = *Attr;
-
- if (!R.getValueAsBit("ASTNode"))
- continue;
-
- std::vector<Record*> ArgRecords = R.getValueAsListOfDefs("Args");
- std::vector<std::unique_ptr<Argument>> Args;
- for (const auto *Arg : ArgRecords)
- Args.emplace_back(createArgument(*Arg, R.getName()));
-
- for (auto const &ai : Args)
- ai->writeAccessorDefinitions(OS);
-
- OS << R.getName() << "Attr *" << R.getName()
- << "Attr::clone(ASTContext &C) const {\n";
- OS << " auto *A = new (C) " << R.getName() << "Attr(C, *this";
- for (auto const &ai : Args) {
- OS << ", ";
- ai->writeCloneArgs(OS);
- }
- OS << ");\n";
- OS << " A->Inherited = Inherited;\n";
- OS << " A->IsPackExpansion = IsPackExpansion;\n";
- OS << " A->setImplicit(Implicit);\n";
- OS << " return A;\n}\n\n";
+ emitAttributes(Records, OS, false);
- writePrettyPrintFunction(R, Args, OS);
- writeGetSpellingFunction(R, OS);
- }
+ std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr");
// Instead of relying on virtual dispatch we just create a huge dispatch
// switch. This is both smaller and faster than virtual functions.
@@ -2968,7 +3062,7 @@ static void GenerateHasAttrSpellingStringSwitch(
// them. If the attribute has no scope, the version information must not
// have the default value (1), as that's incorrect. Instead, the unscoped
// attribute version information should be taken from the SD-6 standing
- // document, which can be found at:
+ // document, which can be found at:
// https://isocpp.org/std/standing-documents/sd-6-sg10-feature-test-recommendations
int Version = 1;
@@ -3260,7 +3354,7 @@ void EmitClangAttrParsedAttrList(RecordKeeper &Records, raw_ostream &OS) {
OS << "#ifndef PARSED_ATTR\n";
OS << "#define PARSED_ATTR(NAME) NAME\n";
OS << "#endif\n\n";
-
+
ParsedAttrMap Names = getParsedAttrList(Records);
for (const auto &I : Names) {
OS << "PARSED_ATTR(" << I.first << ")\n";
@@ -3290,18 +3384,12 @@ static void emitArgInfo(const Record &R, raw_ostream &OS) {
// If there is a variadic argument, we will set the optional argument count
// to its largest value. Since it's currently a 4-bit number, we set it to 15.
- OS << ArgCount << ", " << (HasVariadic ? 15 : OptCount);
-}
-
-static void GenerateDefaultAppertainsTo(raw_ostream &OS) {
- OS << "static bool defaultAppertainsTo(Sema &, const ParsedAttr &,";
- OS << "const Decl *) {\n";
- OS << " return true;\n";
- OS << "}\n\n";
+ OS << " NumArgs = " << ArgCount << ";\n";
+ OS << " OptArgs = " << (HasVariadic ? 15 : OptCount) << ";\n";
}
static std::string GetDiagnosticSpelling(const Record &R) {
- std::string Ret = R.getValueAsString("DiagSpelling");
+ std::string Ret = std::string(R.getValueAsString("DiagSpelling"));
if (!Ret.empty())
return Ret;
@@ -3337,7 +3425,7 @@ static std::string CalculateDiagnostic(const Record &S) {
SmallVector<StringRef, 2> Frags;
llvm::SplitString(V, Frags, ",");
for (auto Str : Frags) {
- DiagList.push_back(Str.trim());
+ DiagList.push_back(std::string(Str.trim()));
}
}
}
@@ -3368,7 +3456,7 @@ static std::string CalculateDiagnostic(const Record &S) {
}
static std::string GetSubjectWithSuffix(const Record *R) {
- const std::string &B = R->getName();
+ const std::string &B = std::string(R->getName());
if (B == "DeclBase")
return "Decl";
return B + "Decl";
@@ -3378,16 +3466,14 @@ static std::string functionNameForCustomAppertainsTo(const Record &Subject) {
return "is" + Subject.getName().str();
}
-static std::string GenerateCustomAppertainsTo(const Record &Subject,
- raw_ostream &OS) {
+static void GenerateCustomAppertainsTo(const Record &Subject, raw_ostream &OS) {
std::string FnName = functionNameForCustomAppertainsTo(Subject);
- // If this code has already been generated, simply return the previous
- // instance of it.
+ // If this code has already been generated, we don't need to do anything.
static std::set<std::string> CustomSubjectSet;
auto I = CustomSubjectSet.find(FnName);
if (I != CustomSubjectSet.end())
- return *I;
+ return;
// This only works with non-root Decls.
Record *Base = Subject.getValueAsDef(BaseFieldName);
@@ -3396,7 +3482,7 @@ static std::string GenerateCustomAppertainsTo(const Record &Subject,
if (Base->isSubClassOf("SubsetSubject")) {
PrintFatalError(Subject.getLoc(),
"SubsetSubjects within SubsetSubjects is not supported");
- return "";
+ return;
}
OS << "static bool " << FnName << "(const Decl *D) {\n";
@@ -3408,14 +3494,13 @@ static std::string GenerateCustomAppertainsTo(const Record &Subject,
OS << "}\n\n";
CustomSubjectSet.insert(FnName);
- return FnName;
}
-static std::string GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) {
+static void GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) {
// If the attribute does not contain a Subjects definition, then use the
// default appertainsTo logic.
if (Attr.isValueUnset("Subjects"))
- return "defaultAppertainsTo";
+ return;
const Record *SubjectObj = Attr.getValueAsDef("Subjects");
std::vector<Record*> Subjects = SubjectObj->getValueAsListOfDefs("Subjects");
@@ -3423,52 +3508,46 @@ static std::string GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) {
// If the list of subjects is empty, it is assumed that the attribute
// appertains to everything.
if (Subjects.empty())
- return "defaultAppertainsTo";
+ return;
bool Warn = SubjectObj->getValueAsDef("Diag")->getValueAsBit("Warn");
// Otherwise, generate an appertainsTo check specific to this attribute which
- // checks all of the given subjects against the Decl passed in. Return the
- // name of that check to the caller.
+ // checks all of the given subjects against the Decl passed in.
//
// If D is null, that means the attribute was not applied to a declaration
// at all (for instance because it was applied to a type), or that the caller
// has determined that the check should fail (perhaps prior to the creation
// of the declaration).
- std::string FnName = "check" + Attr.getName().str() + "AppertainsTo";
- std::stringstream SS;
- SS << "static bool " << FnName << "(Sema &S, const ParsedAttr &Attr, ";
- SS << "const Decl *D) {\n";
- SS << " if (!D || (";
+ OS << "bool diagAppertainsToDecl(Sema &S, ";
+ OS << "const ParsedAttr &Attr, const Decl *D) const override {\n";
+ OS << " if (";
for (auto I = Subjects.begin(), E = Subjects.end(); I != E; ++I) {
- // If the subject has custom code associated with it, generate a function
- // for it. The function cannot be inlined into this check (yet) because it
- // requires the subject to be of a specific type, and were that information
- // inlined here, it would not support an attribute with multiple custom
- // subjects.
+ // If the subject has custom code associated with it, use the generated
+ // function for it. The function cannot be inlined into this check (yet)
+ // because it requires the subject to be of a specific type, and were that
+ // information inlined here, it would not support an attribute with multiple
+ // custom subjects.
if ((*I)->isSubClassOf("SubsetSubject")) {
- SS << "!" << GenerateCustomAppertainsTo(**I, OS) << "(D)";
+ OS << "!" << functionNameForCustomAppertainsTo(**I) << "(D)";
} else {
- SS << "!isa<" << GetSubjectWithSuffix(*I) << ">(D)";
+ OS << "!isa<" << GetSubjectWithSuffix(*I) << ">(D)";
}
if (I + 1 != E)
- SS << " && ";
+ OS << " && ";
}
- SS << ")) {\n";
- SS << " S.Diag(Attr.getLoc(), diag::";
- SS << (Warn ? "warn_attribute_wrong_decl_type_str" :
+ OS << ") {\n";
+ OS << " S.Diag(Attr.getLoc(), diag::";
+ OS << (Warn ? "warn_attribute_wrong_decl_type_str" :
"err_attribute_wrong_decl_type_str");
- SS << ")\n";
- SS << " << Attr << ";
- SS << CalculateDiagnostic(*SubjectObj) << ";\n";
- SS << " return false;\n";
- SS << " }\n";
- SS << " return true;\n";
- SS << "}\n\n";
-
- OS << SS.str();
- return FnName;
+ OS << ")\n";
+ OS << " << Attr << ";
+ OS << CalculateDiagnostic(*SubjectObj) << ";\n";
+ OS << " return false;\n";
+ OS << " }\n";
+ OS << " return true;\n";
+ OS << "}\n\n";
}
static void
@@ -3507,37 +3586,16 @@ emitAttributeMatchRules(PragmaClangAttributeSupport &PragmaAttributeSupport,
OS << "}\n\n";
}
-static void GenerateDefaultLangOptRequirements(raw_ostream &OS) {
- OS << "static bool defaultDiagnoseLangOpts(Sema &, ";
- OS << "const ParsedAttr &) {\n";
- OS << " return true;\n";
- OS << "}\n\n";
-}
-
-static std::string GenerateLangOptRequirements(const Record &R,
- raw_ostream &OS) {
+static void GenerateLangOptRequirements(const Record &R,
+ raw_ostream &OS) {
// If the attribute has an empty or unset list of language requirements,
- // return the default handler.
+ // use the default handler.
std::vector<Record *> LangOpts = R.getValueAsListOfDefs("LangOpts");
if (LangOpts.empty())
- return "defaultDiagnoseLangOpts";
-
- // Generate a unique function name for the diagnostic test. The list of
- // options should usually be short (one or two options), and the
- // uniqueness isn't strictly necessary (it is just for codegen efficiency).
- std::string FnName = "check";
- for (auto I = LangOpts.begin(), E = LangOpts.end(); I != E; ++I)
- FnName += (*I)->getValueAsString("Name");
- FnName += "LangOpts";
-
- // If this code has already been generated, simply return the previous
- // instance of it.
- static std::set<std::string> CustomLangOptsSet;
- auto I = CustomLangOptsSet.find(FnName);
- if (I != CustomLangOptsSet.end())
- return *I;
-
- OS << "static bool " << FnName << "(Sema &S, const ParsedAttr &Attr) {\n";
+ return;
+
+ OS << "bool diagLangOpts(Sema &S, const ParsedAttr &Attr) ";
+ OS << "const override {\n";
OS << " auto &LangOpts = S.LangOpts;\n";
OS << " if (" << GenerateTestExpression(LangOpts) << ")\n";
OS << " return true;\n\n";
@@ -3545,24 +3603,15 @@ static std::string GenerateLangOptRequirements(const Record &R,
OS << "<< Attr;\n";
OS << " return false;\n";
OS << "}\n\n";
-
- CustomLangOptsSet.insert(FnName);
- return FnName;
-}
-
-static void GenerateDefaultTargetRequirements(raw_ostream &OS) {
- OS << "static bool defaultTargetRequirements(const TargetInfo &) {\n";
- OS << " return true;\n";
- OS << "}\n\n";
}
-static std::string GenerateTargetRequirements(const Record &Attr,
- const ParsedAttrMap &Dupes,
- raw_ostream &OS) {
- // If the attribute is not a target specific attribute, return the default
+static void GenerateTargetRequirements(const Record &Attr,
+ const ParsedAttrMap &Dupes,
+ raw_ostream &OS) {
+ // If the attribute is not a target specific attribute, use the default
// target handler.
if (!Attr.isSubClassOf("TargetSpecificAttr"))
- return "defaultTargetRequirements";
+ return;
// Get the list of architectures to be tested for.
const Record *R = Attr.getValueAsDef("Target");
@@ -3590,55 +3639,51 @@ static std::string GenerateTargetRequirements(const Record &Attr,
std::string Test;
bool UsesT = GenerateTargetSpecificAttrChecks(R, Arches, Test, &FnName);
- // If this code has already been generated, simply return the previous
- // instance of it.
- static std::set<std::string> CustomTargetSet;
- auto I = CustomTargetSet.find(FnName);
- if (I != CustomTargetSet.end())
- return *I;
-
- OS << "static bool " << FnName << "(const TargetInfo &Target) {\n";
+ OS << "bool existsInTarget(const TargetInfo &Target) const override {\n";
if (UsesT)
OS << " const llvm::Triple &T = Target.getTriple(); (void)T;\n";
OS << " return " << Test << ";\n";
OS << "}\n\n";
-
- CustomTargetSet.insert(FnName);
- return FnName;
-}
-
-static void GenerateDefaultSpellingIndexToSemanticSpelling(raw_ostream &OS) {
- OS << "static unsigned defaultSpellingIndexToSemanticSpelling("
- << "const ParsedAttr &Attr) {\n";
- OS << " return UINT_MAX;\n";
- OS << "}\n\n";
}
-static std::string GenerateSpellingIndexToSemanticSpelling(const Record &Attr,
- raw_ostream &OS) {
+static void GenerateSpellingIndexToSemanticSpelling(const Record &Attr,
+ raw_ostream &OS) {
// If the attribute does not have a semantic form, we can bail out early.
if (!Attr.getValueAsBit("ASTNode"))
- return "defaultSpellingIndexToSemanticSpelling";
+ return;
std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(Attr);
// If there are zero or one spellings, or all of the spellings share the same
// name, we can also bail out early.
if (Spellings.size() <= 1 || SpellingNamesAreCommon(Spellings))
- return "defaultSpellingIndexToSemanticSpelling";
+ return;
// Generate the enumeration we will use for the mapping.
SemanticSpellingMap SemanticToSyntacticMap;
std::string Enum = CreateSemanticSpellings(Spellings, SemanticToSyntacticMap);
std::string Name = Attr.getName().str() + "AttrSpellingMap";
- OS << "static unsigned " << Name << "(const ParsedAttr &Attr) {\n";
+ OS << "unsigned spellingIndexToSemanticSpelling(";
+ OS << "const ParsedAttr &Attr) const override {\n";
OS << Enum;
OS << " unsigned Idx = Attr.getAttributeSpellingListIndex();\n";
WriteSemanticSpellingSwitch("Idx", SemanticToSyntacticMap, OS);
OS << "}\n\n";
+}
+
+static void GenerateHandleDeclAttribute(const Record &Attr, raw_ostream &OS) {
+ // Only generate if Attr can be handled simply.
+ if (!Attr.getValueAsBit("SimpleHandler"))
+ return;
- return Name;
+ // Generate a function which just converts from ParsedAttr to the Attr type.
+ OS << "AttrHandling handleDeclAttribute(Sema &S, Decl *D,";
+ OS << "const ParsedAttr &Attr) const override {\n";
+ OS << " D->addAttr(::new (S.Context) " << Attr.getName();
+ OS << "Attr(S.Context, Attr));\n";
+ OS << " return AttributeApplied;\n";
+ OS << "}\n\n";
}
static bool IsKnownToGCC(const Record &Attr) {
@@ -3661,19 +3706,19 @@ void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
ParsedAttrMap Dupes;
ParsedAttrMap Attrs = getParsedAttrList(Records, &Dupes);
- // Generate the default appertainsTo, target and language option diagnostic,
- // and spelling list index mapping methods.
- GenerateDefaultAppertainsTo(OS);
- GenerateDefaultLangOptRequirements(OS);
- GenerateDefaultTargetRequirements(OS);
- GenerateDefaultSpellingIndexToSemanticSpelling(OS);
-
- // Generate the appertainsTo diagnostic methods and write their names into
- // another mapping. At the same time, generate the AttrInfoMap object
- // contents. Due to the reliance on generated code, use separate streams so
- // that code will not be interleaved.
- std::string Buffer;
- raw_string_ostream SS {Buffer};
+ // Generate all of the custom appertainsTo functions that the attributes
+ // will be using.
+ for (auto I : Attrs) {
+ const Record &Attr = *I.second;
+ if (Attr.isValueUnset("Subjects"))
+ continue;
+ const Record *SubjectObj = Attr.getValueAsDef("Subjects");
+ for (auto Subject : SubjectObj->getValueAsListOfDefs("Subjects"))
+ if (Subject->isSubClassOf("SubsetSubject"))
+ GenerateCustomAppertainsTo(*Subject, OS);
+ }
+
+ // Generate a ParsedAttrInfo struct for each of the attributes.
for (auto I = Attrs.begin(), E = Attrs.end(); I != E; ++I) {
// TODO: If the attribute's kind appears in the list of duplicates, that is
// because it is a target-specific attribute that appears multiple times.
@@ -3683,33 +3728,63 @@ void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
// We need to generate struct instances based off ParsedAttrInfo from
// ParsedAttr.cpp.
- SS << " { ";
- emitArgInfo(*I->second, SS);
- SS << ", " << I->second->getValueAsBit("HasCustomParsing");
- SS << ", " << I->second->isSubClassOf("TargetSpecificAttr");
- SS << ", "
- << (I->second->isSubClassOf("TypeAttr") ||
- I->second->isSubClassOf("DeclOrTypeAttr"));
- SS << ", " << I->second->isSubClassOf("StmtAttr");
- SS << ", " << IsKnownToGCC(*I->second);
- SS << ", " << PragmaAttributeSupport.isAttributedSupported(*I->second);
- SS << ", " << GenerateAppertainsTo(*I->second, OS);
- SS << ", " << GenerateLangOptRequirements(*I->second, OS);
- SS << ", " << GenerateTargetRequirements(*I->second, Dupes, OS);
- SS << ", " << GenerateSpellingIndexToSemanticSpelling(*I->second, OS);
- SS << ", "
- << PragmaAttributeSupport.generateStrictConformsTo(*I->second, OS);
- SS << " }";
-
- if (I + 1 != E)
- SS << ",";
-
- SS << " // AT_" << I->first << "\n";
+ const std::string &AttrName = I->first;
+ const Record &Attr = *I->second;
+ auto Spellings = GetFlattenedSpellings(Attr);
+ if (!Spellings.empty()) {
+ OS << "static constexpr ParsedAttrInfo::Spelling " << I->first
+ << "Spellings[] = {\n";
+ for (const auto &S : Spellings) {
+ const std::string &RawSpelling = S.name();
+ std::string Spelling;
+ if (!S.nameSpace().empty())
+ Spelling += S.nameSpace() + "::";
+ if (S.variety() == "GNU")
+ Spelling += NormalizeGNUAttrSpelling(RawSpelling);
+ else
+ Spelling += RawSpelling;
+ OS << " {AttributeCommonInfo::AS_" << S.variety();
+ OS << ", \"" << Spelling << "\"},\n";
+ }
+ OS << "};\n";
+ }
+ OS << "struct ParsedAttrInfo" << I->first
+ << " final : public ParsedAttrInfo {\n";
+ OS << " ParsedAttrInfo" << I->first << "() {\n";
+ OS << " AttrKind = ParsedAttr::AT_" << AttrName << ";\n";
+ emitArgInfo(Attr, OS);
+ OS << " HasCustomParsing = ";
+ OS << Attr.getValueAsBit("HasCustomParsing") << ";\n";
+ OS << " IsTargetSpecific = ";
+ OS << Attr.isSubClassOf("TargetSpecificAttr") << ";\n";
+ OS << " IsType = ";
+ OS << (Attr.isSubClassOf("TypeAttr") ||
+ Attr.isSubClassOf("DeclOrTypeAttr")) << ";\n";
+ OS << " IsStmt = ";
+ OS << Attr.isSubClassOf("StmtAttr") << ";\n";
+ OS << " IsKnownToGCC = ";
+ OS << IsKnownToGCC(Attr) << ";\n";
+ OS << " IsSupportedByPragmaAttribute = ";
+ OS << PragmaAttributeSupport.isAttributedSupported(*I->second) << ";\n";
+ if (!Spellings.empty())
+ OS << " Spellings = " << I->first << "Spellings;\n";
+ OS << " }\n";
+ GenerateAppertainsTo(Attr, OS);
+ GenerateLangOptRequirements(Attr, OS);
+ GenerateTargetRequirements(Attr, Dupes, OS);
+ GenerateSpellingIndexToSemanticSpelling(Attr, OS);
+ PragmaAttributeSupport.generateStrictConformsTo(*I->second, OS);
+ GenerateHandleDeclAttribute(Attr, OS);
+ OS << "static const ParsedAttrInfo" << I->first << " Instance;\n";
+ OS << "};\n";
+ OS << "const ParsedAttrInfo" << I->first << " ParsedAttrInfo" << I->first
+ << "::Instance;\n";
}
- OS << "static const ParsedAttrInfo AttrInfoMap[ParsedAttr::UnknownAttribute "
- "+ 1] = {\n";
- OS << SS.str();
+ OS << "static const ParsedAttrInfo *AttrInfoMap[] = {\n";
+ for (auto I = Attrs.begin(), E = Attrs.end(); I != E; ++I) {
+ OS << "&ParsedAttrInfo" << I->first << "::Instance,\n";
+ }
OS << "};\n\n";
// Generate the attribute match rules.
@@ -3743,7 +3818,7 @@ void EmitClangAttrParsedAttrKinds(RecordKeeper &Records, raw_ostream &OS) {
std::string AttrName;
if (Attr.isSubClassOf("TargetSpecificAttr") &&
!Attr.isValueUnset("ParseKind")) {
- AttrName = Attr.getValueAsString("ParseKind");
+ AttrName = std::string(Attr.getValueAsString("ParseKind"));
if (Seen.find(AttrName) != Seen.end())
continue;
Seen.insert(AttrName);
@@ -3758,12 +3833,12 @@ void EmitClangAttrParsedAttrKinds(RecordKeeper &Records, raw_ostream &OS) {
const std::string &Variety = S.variety();
if (Variety == "CXX11") {
Matches = &CXX11;
- Spelling += S.nameSpace();
- Spelling += "::";
+ if (!S.nameSpace().empty())
+ Spelling += S.nameSpace() + "::";
} else if (Variety == "C2x") {
Matches = &C2x;
- Spelling += S.nameSpace();
- Spelling += "::";
+ if (!S.nameSpace().empty())
+ Spelling += S.nameSpace() + "::";
} else if (Variety == "GNU")
Matches = &GNU;
else if (Variety == "Declspec")
@@ -3983,7 +4058,7 @@ GetAttributeHeadingAndSpellings(const Record &Documentation,
"documented");
// Determine the heading to be used for this attribute.
- std::string Heading = Documentation.getValueAsString("Heading");
+ std::string Heading = std::string(Documentation.getValueAsString("Heading"));
if (Heading.empty()) {
// If there's only one spelling, we can simply use that.
if (Spellings.size() == 1)
@@ -3992,7 +4067,8 @@ GetAttributeHeadingAndSpellings(const Record &Documentation,
std::set<std::string> Uniques;
for (auto I = Spellings.begin(), E = Spellings.end();
I != E && Uniques.size() <= 1; ++I) {
- std::string Spelling = NormalizeNameForSpellingComparison(I->name());
+ std::string Spelling =
+ std::string(NormalizeNameForSpellingComparison(I->name()));
Uniques.insert(Spelling);
}
// If the semantic map has only one spelling, that is sufficient for our
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangCommentCommandInfoEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangCommentCommandInfoEmitter.cpp
index fc79d59713d6..eb2f23191c55 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangCommentCommandInfoEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangCommentCommandInfoEmitter.cpp
@@ -63,7 +63,7 @@ void clang::EmitClangCommentCommandInfo(RecordKeeper &Records, raw_ostream &OS)
std::vector<StringMatcher::StringPair> Matches;
for (size_t i = 0, e = Tags.size(); i != e; ++i) {
Record &Tag = *Tags[i];
- std::string Name = Tag.getValueAsString("Name");
+ std::string Name = std::string(Tag.getValueAsString("Name"));
std::string Return;
raw_string_ostream(Return) << "return &Commands[" << i << "];";
Matches.emplace_back(std::move(Name), std::move(Return));
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLNamedCharacterReferenceEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLNamedCharacterReferenceEmitter.cpp
index ed3f4bd6ef6c..15671a99a3fc 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLNamedCharacterReferenceEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLNamedCharacterReferenceEmitter.cpp
@@ -54,7 +54,7 @@ void clang::EmitClangCommentHTMLNamedCharacterReferences(RecordKeeper &Records,
for (std::vector<Record *>::iterator I = Tags.begin(), E = Tags.end();
I != E; ++I) {
Record &Tag = **I;
- std::string Spelling = Tag.getValueAsString("Spelling");
+ std::string Spelling = std::string(Tag.getValueAsString("Spelling"));
uint64_t CodePoint = Tag.getValueAsInt("CodePoint");
CLiteral.clear();
CLiteral.append("return ");
@@ -66,7 +66,7 @@ void clang::EmitClangCommentHTMLNamedCharacterReferences(RecordKeeper &Records,
}
CLiteral.append(";");
- StringMatcher::StringPair Match(Spelling, CLiteral.str());
+ StringMatcher::StringPair Match(Spelling, std::string(CLiteral.str()));
NameToUTF8.push_back(Match);
}
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLTagsEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLTagsEmitter.cpp
index 7b9fdfcb3f20..78bbbd1cba57 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLTagsEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLTagsEmitter.cpp
@@ -23,7 +23,8 @@ void clang::EmitClangCommentHTMLTags(RecordKeeper &Records, raw_ostream &OS) {
std::vector<Record *> Tags = Records.getAllDerivedDefinitions("Tag");
std::vector<StringMatcher::StringPair> Matches;
for (Record *Tag : Tags) {
- Matches.emplace_back(Tag->getValueAsString("Spelling"), "return true;");
+ Matches.emplace_back(std::string(Tag->getValueAsString("Spelling")),
+ "return true;");
}
emitSourceFileHeader("HTML tag name matcher", OS);
@@ -40,7 +41,7 @@ void clang::EmitClangCommentHTMLTagsProperties(RecordKeeper &Records,
std::vector<StringMatcher::StringPair> MatchesEndTagOptional;
std::vector<StringMatcher::StringPair> MatchesEndTagForbidden;
for (Record *Tag : Tags) {
- std::string Spelling = Tag->getValueAsString("Spelling");
+ std::string Spelling = std::string(Tag->getValueAsString("Spelling"));
StringMatcher::StringPair Match(Spelling, "return true;");
if (Tag->getValueAsBit("EndTagOptional"))
MatchesEndTagOptional.push_back(Match);
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
index f694c3e4380a..76d412203009 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
@@ -19,6 +19,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Casting.h"
#include "llvm/TableGen/Error.h"
@@ -62,7 +63,7 @@ static std::string
getCategoryFromDiagGroup(const Record *Group,
DiagGroupParentMap &DiagGroupParents) {
// If the DiagGroup has a category, return it.
- std::string CatName = Group->getValueAsString("CategoryName");
+ std::string CatName = std::string(Group->getValueAsString("CategoryName"));
if (!CatName.empty()) return CatName;
// The diag group may the subgroup of one or more other diagnostic groups,
@@ -88,7 +89,7 @@ static std::string getDiagnosticCategory(const Record *R,
}
// If the diagnostic itself has a category, get it.
- return R->getValueAsString("CategoryName");
+ return std::string(R->getValueAsString("CategoryName"));
}
namespace {
@@ -168,7 +169,8 @@ static void groupDiagnostics(const std::vector<Record*> &Diags,
continue;
assert(R->getValueAsDef("Class")->getName() != "CLASS_NOTE" &&
"Note can't be in a DiagGroup");
- std::string GroupName = DI->getDef()->getValueAsString("GroupName");
+ std::string GroupName =
+ std::string(DI->getDef()->getValueAsString("GroupName"));
DiagsInGroup[GroupName].DiagsInGroup.push_back(R);
}
@@ -179,7 +181,8 @@ static void groupDiagnostics(const std::vector<Record*> &Diags,
// groups (these are warnings that GCC supports that clang never produces).
for (unsigned i = 0, e = DiagGroups.size(); i != e; ++i) {
Record *Group = DiagGroups[i];
- GroupInfo &GI = DiagsInGroup[Group->getValueAsString("GroupName")];
+ GroupInfo &GI =
+ DiagsInGroup[std::string(Group->getValueAsString("GroupName"))];
if (Group->isAnonymous()) {
if (GI.DiagsInGroup.size() > 1)
ImplicitGroups.insert(&GI);
@@ -192,7 +195,8 @@ static void groupDiagnostics(const std::vector<Record*> &Diags,
std::vector<Record*> SubGroups = Group->getValueAsListOfDefs("SubGroups");
for (unsigned j = 0, e = SubGroups.size(); j != e; ++j)
- GI.SubGroups.push_back(SubGroups[j]->getValueAsString("GroupName"));
+ GI.SubGroups.push_back(
+ std::string(SubGroups[j]->getValueAsString("GroupName")));
}
// Assign unique ID numbers to the groups.
@@ -219,7 +223,8 @@ static void groupDiagnostics(const std::vector<Record*> &Diags,
ArrayRef<const Record *> GroupDiags = (*I)->DiagsInGroup;
if ((*I)->ExplicitDef) {
- std::string Name = (*I)->ExplicitDef->getValueAsString("GroupName");
+ std::string Name =
+ std::string((*I)->ExplicitDef->getValueAsString("GroupName"));
for (ArrayRef<const Record *>::const_iterator DI = GroupDiags.begin(),
DE = GroupDiags.end();
DI != DE; ++DI) {
@@ -244,7 +249,8 @@ static void groupDiagnostics(const std::vector<Record*> &Diags,
const DefInit *GroupInit = cast<DefInit>((*DI)->getValueInit("Group"));
const Record *NextDiagGroup = GroupInit->getDef();
- std::string Name = NextDiagGroup->getValueAsString("GroupName");
+ std::string Name =
+ std::string(NextDiagGroup->getValueAsString("GroupName"));
SrcMgr.PrintMessage((*DI)->getLoc().front(),
SourceMgr::DK_Error,
@@ -315,8 +321,8 @@ private:
bool InferPedantic::isSubGroupOfGroup(const Record *Group,
llvm::StringRef GName) {
-
- const std::string &GroupName = Group->getValueAsString("GroupName");
+ const std::string &GroupName =
+ std::string(Group->getValueAsString("GroupName"));
if (GName == GroupName)
return true;
@@ -330,13 +336,14 @@ bool InferPedantic::isSubGroupOfGroup(const Record *Group,
/// Determine if the diagnostic is an extension.
bool InferPedantic::isExtension(const Record *Diag) {
- const std::string &ClsName = Diag->getValueAsDef("Class")->getName();
+ const std::string &ClsName =
+ std::string(Diag->getValueAsDef("Class")->getName());
return ClsName == "CLASS_EXTENSION";
}
bool InferPedantic::isOffByDefault(const Record *Diag) {
- const std::string &DefSeverity =
- Diag->getValueAsDef("DefaultSeverity")->getValueAsString("Name");
+ const std::string &DefSeverity = std::string(
+ Diag->getValueAsDef("DefaultSeverity")->getValueAsString("Name"));
return DefSeverity == "Ignored";
}
@@ -344,7 +351,8 @@ bool InferPedantic::groupInPedantic(const Record *Group, bool increment) {
GMap::mapped_type &V = GroupCount[Group];
// Lazily compute the threshold value for the group count.
if (!V.second.hasValue()) {
- const GroupInfo &GI = DiagsInGroup[Group->getValueAsString("GroupName")];
+ const GroupInfo &GI =
+ DiagsInGroup[std::string(Group->getValueAsString("GroupName"))];
V.second = GI.SubGroups.size() + GI.DiagsInGroup.size();
}
@@ -1176,12 +1184,14 @@ std::string DiagnosticTextBuilder::buildForDefinition(const Record *R) {
//===----------------------------------------------------------------------===//
static bool isError(const Record &Diag) {
- const std::string &ClsName = Diag.getValueAsDef("Class")->getName();
+ const std::string &ClsName =
+ std::string(Diag.getValueAsDef("Class")->getName());
return ClsName == "CLASS_ERROR";
}
static bool isRemark(const Record &Diag) {
- const std::string &ClsName = Diag.getValueAsDef("Class")->getName();
+ const std::string &ClsName =
+ std::string(Diag.getValueAsDef("Class")->getName());
return ClsName == "CLASS_REMARK";
}
@@ -1226,7 +1236,8 @@ void clang::EmitClangDiagsDefs(RecordKeeper &Records, raw_ostream &OS,
if (isError(R)) {
if (DefInit *Group = dyn_cast<DefInit>(R.getValueInit("Group"))) {
const Record *GroupRec = Group->getDef();
- const std::string &GroupName = GroupRec->getValueAsString("GroupName");
+ const std::string &GroupName =
+ std::string(GroupRec->getValueAsString("GroupName"));
PrintFatalError(R.getLoc(), "Error " + R.getName() +
" cannot be in a warning group [" + GroupName + "]");
}
@@ -1256,8 +1267,8 @@ void clang::EmitClangDiagsDefs(RecordKeeper &Records, raw_ostream &OS,
// Warning associated with the diagnostic. This is stored as an index into
// the alphabetically sorted warning table.
if (DefInit *DI = dyn_cast<DefInit>(R.getValueInit("Group"))) {
- std::map<std::string, GroupInfo>::iterator I =
- DiagsInGroup.find(DI->getDef()->getValueAsString("GroupName"));
+ std::map<std::string, GroupInfo>::iterator I = DiagsInGroup.find(
+ std::string(DI->getDef()->getValueAsString("GroupName")));
assert(I != DiagsInGroup.end());
OS << ", " << I->second.IDNo;
} else if (DiagsInPedantic.count(&R)) {
@@ -1299,7 +1310,7 @@ static std::string getDiagCategoryEnum(llvm::StringRef name) {
SmallString<256> enumName = llvm::StringRef("DiagCat_");
for (llvm::StringRef::iterator I = name.begin(), E = name.end(); I != E; ++I)
enumName += isalnum(*I) ? *I : '_';
- return enumName.str();
+ return std::string(enumName.str());
}
/// Emit the array of diagnostic subgroups.
@@ -1335,7 +1346,8 @@ static void emitDiagSubGroups(std::map<std::string, GroupInfo> &DiagsInGroup,
// Emit the groups implicitly in "pedantic".
if (IsPedantic) {
for (auto const &Group : GroupsInPedantic) {
- const std::string &GroupName = Group->getValueAsString("GroupName");
+ const std::string &GroupName =
+ std::string(Group->getValueAsString("GroupName"));
std::map<std::string, GroupInfo>::const_iterator RI =
DiagsInGroup.find(GroupName);
assert(RI != DiagsInGroup.end() && "Referenced without existing?");
@@ -1572,8 +1584,8 @@ namespace {
struct RecordIndexElement
{
RecordIndexElement() {}
- explicit RecordIndexElement(Record const &R):
- Name(R.getName()) {}
+ explicit RecordIndexElement(Record const &R)
+ : Name(std::string(R.getName())) {}
std::string Name;
};
@@ -1614,7 +1626,7 @@ bool isRemarkGroup(const Record *DiagGroup,
bool AnyRemarks = false, AnyNonRemarks = false;
std::function<void(StringRef)> Visit = [&](StringRef GroupName) {
- auto &GroupInfo = DiagsInGroup.find(GroupName)->second;
+ auto &GroupInfo = DiagsInGroup.find(std::string(GroupName))->second;
for (const Record *Diag : GroupInfo.DiagsInGroup)
(isRemark(*Diag) ? AnyRemarks : AnyNonRemarks) = true;
for (const auto &Name : GroupInfo.SubGroups)
@@ -1630,7 +1642,8 @@ bool isRemarkGroup(const Record *DiagGroup,
}
std::string getDefaultSeverity(const Record *Diag) {
- return Diag->getValueAsDef("DefaultSeverity")->getValueAsString("Name");
+ return std::string(
+ Diag->getValueAsDef("DefaultSeverity")->getValueAsString("Name"));
}
std::set<std::string>
@@ -1639,7 +1652,7 @@ getDefaultSeverities(const Record *DiagGroup,
std::set<std::string> States;
std::function<void(StringRef)> Visit = [&](StringRef GroupName) {
- auto &GroupInfo = DiagsInGroup.find(GroupName)->second;
+ auto &GroupInfo = DiagsInGroup.find(std::string(GroupName))->second;
for (const Record *Diag : GroupInfo.DiagsInGroup)
States.insert(getDefaultSeverity(Diag));
for (const auto &Name : GroupInfo.SubGroups)
@@ -1714,7 +1727,8 @@ void clang::EmitClangDiagDocs(RecordKeeper &Records, raw_ostream &OS) {
DiagsInPedantic.begin(),
DiagsInPedantic.end());
for (auto *Group : GroupsInPedantic)
- PedDiags.SubGroups.push_back(Group->getValueAsString("GroupName"));
+ PedDiags.SubGroups.push_back(
+ std::string(Group->getValueAsString("GroupName")));
}
// FIXME: Write diagnostic categories and link to diagnostic groups in each.
@@ -1722,7 +1736,8 @@ void clang::EmitClangDiagDocs(RecordKeeper &Records, raw_ostream &OS) {
// Write out the diagnostic groups.
for (const Record *G : DiagGroups) {
bool IsRemarkGroup = isRemarkGroup(G, DiagsInGroup);
- auto &GroupInfo = DiagsInGroup[G->getValueAsString("GroupName")];
+ auto &GroupInfo =
+ DiagsInGroup[std::string(G->getValueAsString("GroupName"))];
bool IsSynonym = GroupInfo.DiagsInGroup.empty() &&
GroupInfo.SubGroups.size() == 1;
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp
index 41d33b550680..7c63cf51ecfa 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp
@@ -313,11 +313,11 @@ struct OpenCLTypeStruct {
// Vector size (if applicable; 0 for scalars and generic types).
const unsigned VectorWidth;
// 0 if the type is not a pointer.
- const bool IsPointer;
+ const bool IsPointer : 1;
// 0 if the type is not const.
- const bool IsConst;
+ const bool IsConst : 1;
// 0 if the type is not volatile.
- const bool IsVolatile;
+ const bool IsVolatile : 1;
// Access qualifier.
const OpenCLAccessQual AccessQualifier;
// Address space of the pointer (if applicable).
@@ -333,11 +333,11 @@ struct OpenCLBuiltinStruct {
// index SigTableIndex is the return type.
const unsigned NumTypes;
// Function attribute __attribute__((pure))
- const bool IsPure;
+ const bool IsPure : 1;
// Function attribute __attribute__((const))
- const bool IsConst;
+ const bool IsConst : 1;
// Function attribute __attribute__((convergent))
- const bool IsConv;
+ const bool IsConv : 1;
// OpenCL extension(s) required for this overload.
const unsigned short Extension;
// First OpenCL version in which this overload was introduced (e.g. CL20).
@@ -473,11 +473,18 @@ void BuiltinNameEmitter::EmitSignatureTable() {
// Store a type (e.g. int, float, int2, ...). The type is stored as an index
// of a struct OpenCLType table. Multiple entries following each other form a
// signature.
- OS << "static const unsigned SignatureTable[] = {\n";
+ OS << "static const unsigned short SignatureTable[] = {\n";
for (const auto &P : SignaturesList) {
OS << " // " << P.second << "\n ";
for (const Record *R : P.first) {
- OS << TypeMap.find(R)->second << ", ";
+ unsigned Entry = TypeMap.find(R)->second;
+ if (Entry > USHRT_MAX) {
+ // Report an error when seeing an entry that is too large for the
+ // current index type (unsigned short). When hitting this, the type
+ // of SignatureTable will need to be changed.
+ PrintFatalError("Entry in SignatureTable exceeds limit.");
+ }
+ OS << Entry << ", ";
}
OS << "\n";
}
@@ -553,7 +560,7 @@ void BuiltinNameEmitter::GroupBySignature() {
CurSignatureList->push_back(Signature.second);
}
// Sort the list to facilitate future comparisons.
- std::sort(CurSignatureList->begin(), CurSignatureList->end());
+ llvm::sort(*CurSignatureList);
// Check if we have already seen another function with the same list of
// signatures. If so, just add the name of the function.
@@ -597,7 +604,8 @@ void BuiltinNameEmitter::EmitStringMatcher() {
SS << "return std::make_pair(" << CumulativeIndex << ", " << Ovl.size()
<< ");";
SS.flush();
- ValidBuiltins.push_back(StringMatcher::StringPair(FctName, RetStmt));
+ ValidBuiltins.push_back(
+ StringMatcher::StringPair(std::string(FctName), RetStmt));
}
CumulativeIndex += Ovl.size();
}
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangOptionDocEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangOptionDocEmitter.cpp
index b944ad9608f5..23aa31cc732f 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangOptionDocEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangOptionDocEmitter.cpp
@@ -48,7 +48,7 @@ Documentation extractDocumentation(RecordKeeper &Records) {
std::map<std::string, Record*> OptionsByName;
for (Record *R : Records.getAllDerivedDefinitions("Option"))
- OptionsByName[R->getValueAsString("Name")] = R;
+ OptionsByName[std::string(R->getValueAsString("Name"))] = R;
auto Flatten = [](Record *R) {
return R->getValue("DocFlatten") && R->getValueAsBit("DocFlatten");
@@ -81,7 +81,7 @@ Documentation extractDocumentation(RecordKeeper &Records) {
}
// Pretend no-X and Xno-Y options are aliases of X and XY.
- std::string Name = R->getValueAsString("Name");
+ std::string Name = std::string(R->getValueAsString("Name"));
if (Name.size() >= 4) {
if (Name.substr(0, 3) == "no-" && OptionsByName[Name.substr(3)]) {
Aliases[OptionsByName[Name.substr(3)]].push_back(R);
@@ -223,7 +223,7 @@ std::string getRSTStringWithTextFallback(const Record *R, StringRef Primary,
return Field == Primary ? Value.str() : escapeRST(Value);
}
}
- return StringRef();
+ return std::string(StringRef());
}
void emitOptionWithArgs(StringRef Prefix, const Record *Option,
@@ -247,7 +247,7 @@ void emitOptionName(StringRef Prefix, const Record *Option, raw_ostream &OS) {
std::vector<std::string> Args;
if (HasMetaVarName)
- Args.push_back(Option->getValueAsString("MetaVarName"));
+ Args.push_back(std::string(Option->getValueAsString("MetaVarName")));
else if (NumArgs == 1)
Args.push_back("<arg>");
@@ -316,8 +316,8 @@ void emitOption(const DocumentedOption &Option, const Record *DocInfo,
std::vector<std::string> SphinxOptionIDs;
forEachOptionName(Option, DocInfo, [&](const Record *Option) {
for (auto &Prefix : Option->getValueAsListOfStrings("Prefixes"))
- SphinxOptionIDs.push_back(
- getSphinxOptionID((Prefix + Option->getValueAsString("Name")).str()));
+ SphinxOptionIDs.push_back(std::string(getSphinxOptionID(
+ (Prefix + Option->getValueAsString("Name")).str())));
});
assert(!SphinxOptionIDs.empty() && "no flags for option");
static std::map<std::string, int> NextSuffix;
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangSACheckersEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangSACheckersEmitter.cpp
index feefbeb41138..00d88274fc38 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangSACheckersEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangSACheckersEmitter.cpp
@@ -53,7 +53,7 @@ static std::string getCheckerFullName(const Record *R) {
static std::string getStringValue(const Record &R, StringRef field) {
if (StringInit *SI = dyn_cast<StringInit>(R.getValueInit(field)))
- return SI->getValue();
+ return std::string(SI->getValue());
return std::string();
}
@@ -282,6 +282,31 @@ void clang::EmitClangSACheckers(RecordKeeper &Records, raw_ostream &OS) {
OS << "\n"
"#endif // GET_CHECKER_DEPENDENCIES\n";
+ // Emit weak dependencies.
+ //
+ // CHECKER_DEPENDENCY(FULLNAME, DEPENDENCY)
+ // - FULLNAME: The full name of the checker that is supposed to be
+ // registered first.
+ // - DEPENDENCY: The full name of the checker FULLNAME weak depends on.
+ OS << "\n"
+ "#ifdef GET_CHECKER_WEAK_DEPENDENCIES\n";
+ for (const Record *Checker : checkers) {
+ if (Checker->isValueUnset("WeakDependencies"))
+ continue;
+
+ for (const Record *Dependency :
+ Checker->getValueAsListOfDefs("WeakDependencies")) {
+ OS << "CHECKER_WEAK_DEPENDENCY(";
+ OS << '\"';
+ OS.write_escaped(getCheckerFullName(Checker)) << "\", ";
+ OS << '\"';
+ OS.write_escaped(getCheckerFullName(Dependency)) << '\"';
+ OS << ")\n";
+ }
+ }
+ OS << "\n"
+ "#endif // GET_CHECKER_WEAK_DEPENDENCIES\n";
+
// Emit a package option.
//
// CHECKER_OPTION(OPTIONTYPE, CHECKERNAME, OPTIONNAME, DESCRIPTION, DEFAULT)
diff --git a/contrib/llvm-project/clang/utils/TableGen/MveEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/MveEmitter.cpp
index 431e5c477c2b..e9ae08ac4c05 100644
--- a/contrib/llvm-project/clang/utils/TableGen/MveEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/MveEmitter.cpp
@@ -60,10 +60,12 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/StringToOffsetTable.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
@@ -78,7 +80,7 @@ using namespace llvm;
namespace {
-class MveEmitter;
+class EmitterBase;
class Result;
// -----------------------------------------------------------------------------
@@ -138,6 +140,7 @@ public:
TypeKind typeKind() const { return TKind; }
virtual ~Type() = default;
virtual bool requiresFloat() const = 0;
+ virtual bool requiresMVE() const = 0;
virtual unsigned sizeInBits() const = 0;
virtual std::string cName() const = 0;
virtual std::string llvmName() const {
@@ -177,6 +180,7 @@ public:
VoidType() : Type(TypeKind::Void) {}
unsigned sizeInBits() const override { return 0; }
bool requiresFloat() const override { return false; }
+ bool requiresMVE() const override { return false; }
std::string cName() const override { return "void"; }
static bool classof(const Type *T) { return T->typeKind() == TypeKind::Void; }
@@ -192,6 +196,7 @@ public:
: Type(TypeKind::Pointer), Pointee(Pointee), Const(Const) {}
unsigned sizeInBits() const override { return 32; }
bool requiresFloat() const override { return Pointee->requiresFloat(); }
+ bool requiresMVE() const override { return Pointee->requiresMVE(); }
std::string cName() const override {
std::string Name = Pointee->cName();
@@ -241,7 +246,7 @@ public:
.Case("u", ScalarTypeKind::UnsignedInt)
.Case("f", ScalarTypeKind::Float);
Bits = Record->getValueAsInt("size");
- NameOverride = Record->getValueAsString("nameOverride");
+ NameOverride = std::string(Record->getValueAsString("nameOverride"));
}
unsigned sizeInBits() const override { return Bits; }
ScalarTypeKind kind() const { return Kind; }
@@ -272,6 +277,7 @@ public:
}
bool isInteger() const { return Kind != ScalarTypeKind::Float; }
bool requiresFloat() const override { return !isInteger(); }
+ bool requiresMVE() const override { return false; }
bool hasNonstandardName() const { return !NameOverride.empty(); }
static bool classof(const Type *T) {
@@ -289,11 +295,12 @@ public:
unsigned sizeInBits() const override { return Lanes * Element->sizeInBits(); }
unsigned lanes() const { return Lanes; }
bool requiresFloat() const override { return Element->requiresFloat(); }
+ bool requiresMVE() const override { return true; }
std::string cNameBase() const override {
return Element->cNameBase() + "x" + utostr(Lanes);
}
std::string llvmName() const override {
- return "llvm::VectorType::get(" + Element->llvmName() + ", " +
+ return "llvm::FixedVectorType::get(" + Element->llvmName() + ", " +
utostr(Lanes) + ")";
}
@@ -315,6 +322,7 @@ public:
}
unsigned registers() const { return Registers; }
bool requiresFloat() const override { return Element->requiresFloat(); }
+ bool requiresMVE() const override { return true; }
std::string cNameBase() const override {
return Element->cNameBase() + "x" + utostr(Registers);
}
@@ -339,13 +347,14 @@ public:
unsigned sizeInBits() const override { return 16; }
std::string cNameBase() const override { return "mve_pred16"; }
bool requiresFloat() const override { return false; };
+ bool requiresMVE() const override { return true; }
std::string llvmName() const override {
// Use <4 x i1> instead of <2 x i1> for two-lane vector types. See
// the comment in llvm/lib/Target/ARM/ARMInstrMVE.td for further
// explanation.
unsigned ModifiedLanes = (Lanes == 2 ? 4 : Lanes);
- return "llvm::VectorType::get(Builder.getInt1Ty(), " +
+ return "llvm::FixedVectorType::get(Builder.getInt1Ty(), " +
utostr(ModifiedLanes) + ")";
}
@@ -403,7 +412,7 @@ struct CodeGenParamAllocator {
// We rely on the recursive code generation working identically in passes 1
// and 2, so that the same list of calls to allocParam happen in the same
// order. That guarantees that the parameter numbers recorded in pass 1 will
- // match the entries in this vector that store what MveEmitter::EmitBuiltinCG
+ // match the entries in this vector that store what EmitterBase::EmitBuiltinCG
// decided to do about each one in pass 2.
std::vector<int> *ParamNumberMap = nullptr;
@@ -422,16 +431,16 @@ struct CodeGenParamAllocator {
// variable we should be keeping things in.
int MapValue = (*ParamNumberMap)[nparams++];
if (MapValue < 0)
- return Value;
+ return std::string(Value);
ParamNumber = MapValue;
}
// If we've allocated a new parameter variable for the first time, store
// its type and value to be retrieved after codegen.
if (ParamTypes && ParamTypes->size() == ParamNumber)
- ParamTypes->push_back(Type);
+ ParamTypes->push_back(std::string(Type));
if (ParamValues && ParamValues->size() == ParamNumber)
- ParamValues->push_back(Value);
+ ParamValues->push_back(std::string(Value));
// Unimaginative naming scheme for parameter variables.
return "Param" + utostr(ParamNumber);
@@ -500,8 +509,17 @@ public:
}
void setPredecessor(Ptr p) {
- assert(!Predecessor);
- Predecessor = p;
+ // If the user has nested one 'seq' node inside another, and this
+ // method is called on the return value of the inner 'seq' (i.e.
+ // the final item inside it), then we can't link _this_ node to p,
+ // because it already has a predecessor. Instead, walk the chain
+ // until we find the first item in the inner seq, and link that to
+ // p, so that nesting seqs has the obvious effect of linking
+ // everything together into one long sequential chain.
+ Result *r = this;
+ while (r->Predecessor)
+ r = r->Predecessor.get();
+ r->Predecessor = p;
}
// Each Result will be assigned a variable name in the output code, but not
@@ -514,7 +532,7 @@ public:
VarNameUsed = true;
return VarName;
}
- void setVarname(const StringRef s) { VarName = s; }
+ void setVarname(const StringRef s) { VarName = std::string(s); }
bool varnameUsed() const { return VarNameUsed; }
// Emit code to generate this result as a Value *.
@@ -713,14 +731,15 @@ public:
std::vector<Ptr> Args;
IRIntrinsicResult(StringRef IntrinsicID, std::vector<const Type *> ParamTypes,
std::vector<Ptr> Args)
- : IntrinsicID(IntrinsicID), ParamTypes(ParamTypes), Args(Args) {}
+ : IntrinsicID(std::string(IntrinsicID)), ParamTypes(ParamTypes),
+ Args(Args) {}
void genCode(raw_ostream &OS,
CodeGenParamAllocator &ParamAlloc) const override {
std::string IntNo = ParamAlloc.allocParam(
"Intrinsic::ID", "Intrinsic::" + IntrinsicID);
OS << "Builder.CreateCall(CGM.getIntrinsic(" << IntNo;
if (!ParamTypes.empty()) {
- OS << ", llvm::SmallVector<llvm::Type *, " << ParamTypes.size() << "> {";
+ OS << ", {";
const char *Sep = "";
for (auto T : ParamTypes) {
OS << Sep << ParamAlloc.allocParam("llvm::Type *", T->llvmName());
@@ -728,7 +747,7 @@ public:
}
OS << "}";
}
- OS << "), llvm::SmallVector<Value *, " << Args.size() << "> {";
+ OS << "), {";
const char *Sep = "";
for (auto Arg : Args) {
OS << Sep << Arg->asValue();
@@ -782,6 +801,9 @@ class ACLEIntrinsic {
// shares with at least one other intrinsic.
std::string ShortName, FullName;
+ // Name of the architecture extension, used in the Clang builtin name
+ StringRef BuiltinExtension;
+
// A very small number of intrinsics _only_ have a polymorphic
// variant (vuninitializedq taking an unevaluated argument).
bool PolymorphicOnly;
@@ -790,6 +812,10 @@ class ACLEIntrinsic {
// evaluate its argument(s) at all.
bool NonEvaluating;
+ // True if the intrinsic needs only the C header part (no codegen, semantic
+ // checks, etc). Used for redeclaring MVE intrinsics in the arm_cde.h header.
+ bool HeaderOnly;
+
const Type *ReturnType;
std::vector<const Type *> ArgTypes;
std::map<unsigned, ImmediateArg> ImmediateArgs;
@@ -812,6 +838,7 @@ class ACLEIntrinsic {
public:
const std::string &shortName() const { return ShortName; }
const std::string &fullName() const { return FullName; }
+ StringRef builtinExtension() const { return BuiltinExtension; }
const Type *returnType() const { return ReturnType; }
const std::vector<const Type *> &argTypes() const { return ArgTypes; }
bool requiresFloat() const {
@@ -822,13 +849,19 @@ public:
return true;
return false;
}
+ bool requiresMVE() const {
+ return ReturnType->requiresMVE() ||
+ any_of(ArgTypes, [](const Type *T) { return T->requiresMVE(); });
+ }
bool polymorphic() const { return ShortName != FullName; }
bool polymorphicOnly() const { return PolymorphicOnly; }
bool nonEvaluating() const { return NonEvaluating; }
+ bool headerOnly() const { return HeaderOnly; }
- // External entry point for code generation, called from MveEmitter.
+ // External entry point for code generation, called from EmitterBase.
void genCode(raw_ostream &OS, CodeGenParamAllocator &ParamAlloc,
unsigned Pass) const {
+ assert(!headerOnly() && "Called genCode for header-only intrinsic");
if (!hasCode()) {
for (auto kv : CustomCodeGenArgs)
OS << " " << kv.first << " = " << kv.second << ";\n";
@@ -865,10 +898,11 @@ public:
llvm::APInt i = iOrig.trunc(64);
SmallString<40> s;
i.toString(s, 16, true, true);
- return s.str();
+ return std::string(s.str());
}
std::string genSema() const {
+ assert(!headerOnly() && "Called genSema for header-only intrinsic");
std::vector<std::string> SemaChecks;
for (const auto &kv : ImmediateArgs) {
@@ -882,57 +916,59 @@ public:
break;
case ImmediateArg::BoundsType::UInt:
lo = 0;
- hi = IA.i1;
+ hi = llvm::APInt::getMaxValue(IA.i1).zext(128);
break;
}
- llvm::APInt typelo, typehi;
- unsigned Bits = IA.ArgType->sizeInBits();
- if (cast<ScalarType>(IA.ArgType)->kind() == ScalarTypeKind::SignedInt) {
- typelo = llvm::APInt::getSignedMinValue(Bits).sext(128);
- typehi = llvm::APInt::getSignedMaxValue(Bits).sext(128);
- } else {
- typelo = llvm::APInt::getMinValue(Bits).zext(128);
- typehi = llvm::APInt::getMaxValue(Bits).zext(128);
- }
-
std::string Index = utostr(kv.first);
- if (lo.sle(typelo) && hi.sge(typehi))
- SemaChecks.push_back("SemaBuiltinConstantArg(TheCall, " + Index + ")");
- else
+ // Emit a range check if the legal range of values for the
+ // immediate is smaller than the _possible_ range of values for
+ // its type.
+ unsigned ArgTypeBits = IA.ArgType->sizeInBits();
+ llvm::APInt ArgTypeRange = llvm::APInt::getMaxValue(ArgTypeBits).zext(128);
+ llvm::APInt ActualRange = (hi-lo).trunc(64).sext(128);
+ if (ActualRange.ult(ArgTypeRange))
SemaChecks.push_back("SemaBuiltinConstantArgRange(TheCall, " + Index +
", " + signedHexLiteral(lo) + ", " +
signedHexLiteral(hi) + ")");
if (!IA.ExtraCheckType.empty()) {
std::string Suffix;
- if (!IA.ExtraCheckArgs.empty())
- Suffix = (Twine(", ") + IA.ExtraCheckArgs).str();
+ if (!IA.ExtraCheckArgs.empty()) {
+ std::string tmp;
+ StringRef Arg = IA.ExtraCheckArgs;
+ if (Arg == "!lanesize") {
+ tmp = utostr(IA.ArgType->sizeInBits());
+ Arg = tmp;
+ }
+ Suffix = (Twine(", ") + Arg).str();
+ }
SemaChecks.push_back((Twine("SemaBuiltinConstantArg") +
IA.ExtraCheckType + "(TheCall, " + Index +
Suffix + ")")
.str());
}
+
+ assert(!SemaChecks.empty());
}
if (SemaChecks.empty())
return "";
- return (Twine(" return ") +
- join(std::begin(SemaChecks), std::end(SemaChecks),
- " ||\n ") +
- ";\n")
- .str();
+ return join(std::begin(SemaChecks), std::end(SemaChecks),
+ " ||\n ") +
+ ";\n";
}
- ACLEIntrinsic(MveEmitter &ME, Record *R, const Type *Param);
+ ACLEIntrinsic(EmitterBase &ME, Record *R, const Type *Param);
};
// -----------------------------------------------------------------------------
// The top-level class that holds all the state from analyzing the entire
// Tablegen input.
-class MveEmitter {
- // MveEmitter holds a collection of all the types we've instantiated.
+class EmitterBase {
+protected:
+ // EmitterBase holds a collection of all the types we've instantiated.
VoidType Void;
std::map<std::string, std::unique_ptr<ScalarType>> ScalarTypes;
std::map<std::tuple<ScalarTypeKind, unsigned, unsigned>,
@@ -951,7 +987,7 @@ public:
// maps stored in this object.
const VoidType *getVoidType() { return &Void; }
const ScalarType *getScalarType(StringRef Name) {
- return ScalarTypes[Name].get();
+ return ScalarTypes[std::string(Name)].get();
}
const ScalarType *getScalarType(Record *R) {
return getScalarType(R->getName());
@@ -1007,18 +1043,21 @@ public:
Result::Ptr getCodeForArg(unsigned ArgNum, const Type *ArgType, bool Promote,
bool Immediate);
+ void GroupSemaChecks(std::map<std::string, std::set<std::string>> &Checks);
+
// Constructor and top-level functions.
- MveEmitter(RecordKeeper &Records);
+ EmitterBase(RecordKeeper &Records);
+ virtual ~EmitterBase() = default;
- void EmitHeader(raw_ostream &OS);
- void EmitBuiltinDef(raw_ostream &OS);
- void EmitBuiltinSema(raw_ostream &OS);
+ virtual void EmitHeader(raw_ostream &OS) = 0;
+ virtual void EmitBuiltinDef(raw_ostream &OS) = 0;
+ virtual void EmitBuiltinSema(raw_ostream &OS) = 0;
void EmitBuiltinCG(raw_ostream &OS);
void EmitBuiltinAliases(raw_ostream &OS);
};
-const Type *MveEmitter::getType(Init *I, const Type *Param) {
+const Type *EmitterBase::getType(Init *I, const Type *Param) {
if (auto Dag = dyn_cast<DagInit>(I))
return getType(Dag, Param);
if (auto Def = dyn_cast<DefInit>(I))
@@ -1027,7 +1066,7 @@ const Type *MveEmitter::getType(Init *I, const Type *Param) {
PrintFatalError("Could not convert this value into a type");
}
-const Type *MveEmitter::getType(Record *R, const Type *Param) {
+const Type *EmitterBase::getType(Record *R, const Type *Param) {
// Pass to a subfield of any wrapper records. We don't expect more than one
// of these: immediate operands are used as plain numbers rather than as
// llvm::Value, so it's meaningless to promote their type anyway.
@@ -1046,7 +1085,7 @@ const Type *MveEmitter::getType(Record *R, const Type *Param) {
PrintFatalError(R->getLoc(), "Could not convert this record into a type");
}
-const Type *MveEmitter::getType(DagInit *D, const Type *Param) {
+const Type *EmitterBase::getType(DagInit *D, const Type *Param) {
// The meat of the getType system: types in the Tablegen are represented by a
// dag whose operators select sub-cases of this function.
@@ -1114,8 +1153,8 @@ const Type *MveEmitter::getType(DagInit *D, const Type *Param) {
PrintFatalError("Bad operator in type dag expression");
}
-Result::Ptr MveEmitter::getCodeForDag(DagInit *D, const Result::Scope &Scope,
- const Type *Param) {
+Result::Ptr EmitterBase::getCodeForDag(DagInit *D, const Result::Scope &Scope,
+ const Type *Param) {
Record *Op = cast<DefInit>(D->getOperator())->getDef();
if (Op->getName() == "seq") {
@@ -1128,7 +1167,7 @@ Result::Ptr MveEmitter::getCodeForDag(DagInit *D, const Result::Scope &Scope,
getCodeForDag(cast<DagInit>(D->getArg(i)), SubScope, Param);
StringRef ArgName = D->getArgNameStr(i);
if (!ArgName.empty())
- SubScope[ArgName] = V;
+ SubScope[std::string(ArgName)] = V;
if (PrevV)
V->setPredecessor(PrevV);
PrevV = V;
@@ -1174,6 +1213,18 @@ Result::Ptr MveEmitter::getCodeForDag(DagInit *D, const Result::Scope &Scope,
} else {
PrintFatalError("unsignedflag's argument should be a scalar type");
}
+ } else if (Op->getName() == "bitsize") {
+ if (D->getNumArgs() != 1)
+ PrintFatalError("bitsize should have exactly one argument");
+ Record *TypeRec = cast<DefInit>(D->getArg(0))->getDef();
+ if (!TypeRec->isSubClassOf("Type"))
+ PrintFatalError("bitsize's argument should be a type");
+ if (const auto *ST = dyn_cast<ScalarType>(getType(TypeRec, Param))) {
+ return std::make_shared<IntLiteralResult>(getScalarType("u32"),
+ ST->sizeInBits());
+ } else {
+ PrintFatalError("bitsize's argument should be a scalar type");
+ }
} else {
std::vector<Result::Ptr> Args;
for (unsigned i = 0, e = D->getNumArgs(); i < e; ++i)
@@ -1186,7 +1237,7 @@ Result::Ptr MveEmitter::getCodeForDag(DagInit *D, const Result::Scope &Scope,
if (sp->isSubClassOf("IRBuilderAddrParam")) {
AddressArgs.insert(Index);
} else if (sp->isSubClassOf("IRBuilderIntParam")) {
- IntegerArgs[Index] = sp->getValueAsString("type");
+ IntegerArgs[Index] = std::string(sp->getValueAsString("type"));
}
}
return std::make_shared<IRBuilderResult>(Op->getValueAsString("prefix"),
@@ -1195,7 +1246,7 @@ Result::Ptr MveEmitter::getCodeForDag(DagInit *D, const Result::Scope &Scope,
std::vector<const Type *> ParamTypes;
for (Record *RParam : Op->getValueAsListOfDefs("params"))
ParamTypes.push_back(getType(RParam, Param));
- std::string IntName = Op->getValueAsString("intname");
+ std::string IntName = std::string(Op->getValueAsString("intname"));
if (Op->getValueAsBit("appendKind"))
IntName += "_" + toLetter(cast<ScalarType>(Param)->kind());
return std::make_shared<IRIntrinsicResult>(IntName, ParamTypes, Args);
@@ -1205,9 +1256,9 @@ Result::Ptr MveEmitter::getCodeForDag(DagInit *D, const Result::Scope &Scope,
}
}
-Result::Ptr MveEmitter::getCodeForDagArg(DagInit *D, unsigned ArgNum,
- const Result::Scope &Scope,
- const Type *Param) {
+Result::Ptr EmitterBase::getCodeForDagArg(DagInit *D, unsigned ArgNum,
+ const Result::Scope &Scope,
+ const Type *Param) {
Init *Arg = D->getArg(ArgNum);
StringRef Name = D->getArgNameStr(ArgNum);
@@ -1215,7 +1266,7 @@ Result::Ptr MveEmitter::getCodeForDagArg(DagInit *D, unsigned ArgNum,
if (!isa<UnsetInit>(Arg))
PrintFatalError(
"dag operator argument should not have both a value and a name");
- auto it = Scope.find(Name);
+ auto it = Scope.find(std::string(Name));
if (it == Scope.end())
PrintFatalError("unrecognized variable name '" + Name + "'");
return it->second;
@@ -1239,8 +1290,8 @@ Result::Ptr MveEmitter::getCodeForDagArg(DagInit *D, unsigned ArgNum,
PrintFatalError("bad dag argument type for code generation");
}
-Result::Ptr MveEmitter::getCodeForArg(unsigned ArgNum, const Type *ArgType,
- bool Promote, bool Immediate) {
+Result::Ptr EmitterBase::getCodeForArg(unsigned ArgNum, const Type *ArgType,
+ bool Promote, bool Immediate) {
Result::Ptr V = std::make_shared<BuiltinArgResult>(
ArgNum, isa<PointerType>(ArgType), Immediate);
@@ -1259,7 +1310,7 @@ Result::Ptr MveEmitter::getCodeForArg(unsigned ArgNum, const Type *ArgType,
return V;
}
-ACLEIntrinsic::ACLEIntrinsic(MveEmitter &ME, Record *R, const Type *Param)
+ACLEIntrinsic::ACLEIntrinsic(EmitterBase &ME, Record *R, const Type *Param)
: ReturnType(ME.getType(R->getValueAsDef("ret"), Param)) {
// Derive the intrinsic's full name, by taking the name of the
// Tablegen record (or override) and appending the suffix from its
@@ -1270,7 +1321,8 @@ ACLEIntrinsic::ACLEIntrinsic(MveEmitter &ME, Record *R, const Type *Param)
(R->isSubClassOf("NameOverride") ? R->getValueAsString("basename")
: R->getName());
StringRef overrideLetter = R->getValueAsString("overrideKindLetter");
- FullName = (Twine(BaseName) + Param->acleSuffix(overrideLetter)).str();
+ FullName =
+ (Twine(BaseName) + Param->acleSuffix(std::string(overrideLetter))).str();
// Derive the intrinsic's polymorphic name, by removing components from the
// full name as specified by its 'pnt' member ('polymorphic name type'),
@@ -1297,8 +1349,11 @@ ACLEIntrinsic::ACLEIntrinsic(MveEmitter &ME, Record *R, const Type *Param)
}
ShortName = join(std::begin(NameParts), std::end(NameParts), "_");
+ BuiltinExtension = R->getValueAsString("builtinExtension");
+
PolymorphicOnly = R->getValueAsBit("polymorphicOnly");
NonEvaluating = R->getValueAsBit("nonEvaluating");
+ HeaderOnly = R->getValueAsBit("headerOnly");
// Process the intrinsic's argument list.
DagInit *ArgsDag = R->getValueAsDag("args");
@@ -1360,7 +1415,8 @@ ACLEIntrinsic::ACLEIntrinsic(MveEmitter &ME, Record *R, const Type *Param)
// into the variable-name scope that the code gen will refer to.
StringRef ArgName = ArgsDag->getArgNameStr(i);
if (!ArgName.empty())
- Scope[ArgName] = ME.getCodeForArg(i, ArgType, Promote, Immediate);
+ Scope[std::string(ArgName)] =
+ ME.getCodeForArg(i, ArgType, Promote, Immediate);
}
// Finally, go through the codegen dag and translate it into a Result object
@@ -1378,9 +1434,9 @@ ACLEIntrinsic::ACLEIntrinsic(MveEmitter &ME, Record *R, const Type *Param)
if (Name.empty()) {
PrintFatalError("Operands to CustomCodegen should have names");
} else if (auto *II = dyn_cast<IntInit>(CodeDag->getArg(i))) {
- CustomCodeGenArgs[Name] = itostr(II->getValue());
+ CustomCodeGenArgs[std::string(Name)] = itostr(II->getValue());
} else if (auto *SI = dyn_cast<StringInit>(CodeDag->getArg(i))) {
- CustomCodeGenArgs[Name] = SI->getValue();
+ CustomCodeGenArgs[std::string(Name)] = std::string(SI->getValue());
} else {
PrintFatalError("Operands to CustomCodegen should be integers");
}
@@ -1390,8 +1446,8 @@ ACLEIntrinsic::ACLEIntrinsic(MveEmitter &ME, Record *R, const Type *Param)
}
}
-MveEmitter::MveEmitter(RecordKeeper &Records) {
- // Construct the whole MveEmitter.
+EmitterBase::EmitterBase(RecordKeeper &Records) {
+ // Construct the whole EmitterBase.
// First, look up all the instances of PrimitiveType. This gives us the list
// of vector typedefs we have to put in arm_mve.h, and also allows us to
@@ -1399,7 +1455,7 @@ MveEmitter::MveEmitter(RecordKeeper &Records) {
// use it for operations such as 'find the unsigned version of this signed
// integer type'.
for (Record *R : Records.getAllDerivedDefinitions("PrimitiveType"))
- ScalarTypes[R->getName()] = std::make_unique<ScalarType>(R);
+ ScalarTypes[std::string(R->getName())] = std::make_unique<ScalarType>(R);
// Now go through the instances of Intrinsic, and for each one, iterate
// through its list of type parameters making an ACLEIntrinsic for each one.
@@ -1431,6 +1487,260 @@ public:
: string_holder(), raw_string_ostream(S) {}
};
+const char LLVMLicenseHeader[] =
+ " *\n"
+ " *\n"
+ " * Part of the LLVM Project, under the Apache License v2.0 with LLVM"
+ " Exceptions.\n"
+ " * See https://llvm.org/LICENSE.txt for license information.\n"
+ " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n"
+ " *\n"
+ " *===-----------------------------------------------------------------"
+ "------===\n"
+ " */\n"
+ "\n";
+
+// Machinery for the grouping of intrinsics by similar codegen.
+//
+// The general setup is that 'MergeableGroup' stores the things that a set of
+// similarly shaped intrinsics have in common: the text of their code
+// generation, and the number and type of their parameter variables.
+// MergeableGroup is the key in a std::map whose value is a set of
+// OutputIntrinsic, which stores the ways in which a particular intrinsic
+// specializes the MergeableGroup's generic description: the function name and
+// the _values_ of the parameter variables.
+
+struct ComparableStringVector : std::vector<std::string> {
+ // Infrastructure: a derived class of vector<string> which comes with an
+ // ordering, so that it can be used as a key in maps and an element in sets.
+ // There's no requirement on the ordering beyond being deterministic.
+ bool operator<(const ComparableStringVector &rhs) const {
+ if (size() != rhs.size())
+ return size() < rhs.size();
+ for (size_t i = 0, e = size(); i < e; ++i)
+ if ((*this)[i] != rhs[i])
+ return (*this)[i] < rhs[i];
+ return false;
+ }
+};
+
+struct OutputIntrinsic {
+ const ACLEIntrinsic *Int;
+ std::string Name;
+ ComparableStringVector ParamValues;
+ bool operator<(const OutputIntrinsic &rhs) const {
+ if (Name != rhs.Name)
+ return Name < rhs.Name;
+ return ParamValues < rhs.ParamValues;
+ }
+};
+struct MergeableGroup {
+ std::string Code;
+ ComparableStringVector ParamTypes;
+ bool operator<(const MergeableGroup &rhs) const {
+ if (Code != rhs.Code)
+ return Code < rhs.Code;
+ return ParamTypes < rhs.ParamTypes;
+ }
+};
+
+void EmitterBase::EmitBuiltinCG(raw_ostream &OS) {
+ // Pass 1: generate code for all the intrinsics as if every type or constant
+ // that can possibly be abstracted out into a parameter variable will be.
+ // This identifies the sets of intrinsics we'll group together into a single
+ // piece of code generation.
+
+ std::map<MergeableGroup, std::set<OutputIntrinsic>> MergeableGroupsPrelim;
+
+ for (const auto &kv : ACLEIntrinsics) {
+ const ACLEIntrinsic &Int = *kv.second;
+ if (Int.headerOnly())
+ continue;
+
+ MergeableGroup MG;
+ OutputIntrinsic OI;
+
+ OI.Int = &Int;
+ OI.Name = Int.fullName();
+ CodeGenParamAllocator ParamAllocPrelim{&MG.ParamTypes, &OI.ParamValues};
+ raw_string_ostream OS(MG.Code);
+ Int.genCode(OS, ParamAllocPrelim, 1);
+ OS.flush();
+
+ MergeableGroupsPrelim[MG].insert(OI);
+ }
+
+ // Pass 2: for each of those groups, optimize the parameter variable set by
+ // eliminating 'parameters' that are the same for all intrinsics in the
+ // group, and merging together pairs of parameter variables that take the
+ // same values as each other for all intrinsics in the group.
+
+ std::map<MergeableGroup, std::set<OutputIntrinsic>> MergeableGroups;
+
+ for (const auto &kv : MergeableGroupsPrelim) {
+ const MergeableGroup &MG = kv.first;
+ std::vector<int> ParamNumbers;
+ std::map<ComparableStringVector, int> ParamNumberMap;
+
+ // Loop over the parameters for this group.
+ for (size_t i = 0, e = MG.ParamTypes.size(); i < e; ++i) {
+ // Is this parameter the same for all intrinsics in the group?
+ const OutputIntrinsic &OI_first = *kv.second.begin();
+ bool Constant = all_of(kv.second, [&](const OutputIntrinsic &OI) {
+ return OI.ParamValues[i] == OI_first.ParamValues[i];
+ });
+
+ // If so, record it as -1, meaning 'no parameter variable needed'. Then
+ // the corresponding call to allocParam in pass 2 will not generate a
+ // variable at all, and just use the value inline.
+ if (Constant) {
+ ParamNumbers.push_back(-1);
+ continue;
+ }
+
+ // Otherwise, make a list of the values this parameter takes for each
+ // intrinsic, and see if that value vector matches anything we already
+ // have. We also record the parameter type, so that we don't accidentally
+ // match up two parameter variables with different types. (Not that
+ // there's much chance of them having textually equivalent values, but in
+ // _principle_ it could happen.)
+ ComparableStringVector key;
+ key.push_back(MG.ParamTypes[i]);
+ for (const auto &OI : kv.second)
+ key.push_back(OI.ParamValues[i]);
+
+ auto Found = ParamNumberMap.find(key);
+ if (Found != ParamNumberMap.end()) {
+ // Yes, an existing parameter variable can be reused for this.
+ ParamNumbers.push_back(Found->second);
+ continue;
+ }
+
+ // No, we need a new parameter variable.
+ int ExistingIndex = ParamNumberMap.size();
+ ParamNumberMap[key] = ExistingIndex;
+ ParamNumbers.push_back(ExistingIndex);
+ }
+
+ // Now we're ready to do the pass 2 code generation, which will emit the
+ // reduced set of parameter variables we've just worked out.
+
+ for (const auto &OI_prelim : kv.second) {
+ const ACLEIntrinsic *Int = OI_prelim.Int;
+
+ MergeableGroup MG;
+ OutputIntrinsic OI;
+
+ OI.Int = OI_prelim.Int;
+ OI.Name = OI_prelim.Name;
+ CodeGenParamAllocator ParamAlloc{&MG.ParamTypes, &OI.ParamValues,
+ &ParamNumbers};
+ raw_string_ostream OS(MG.Code);
+ Int->genCode(OS, ParamAlloc, 2);
+ OS.flush();
+
+ MergeableGroups[MG].insert(OI);
+ }
+ }
+
+ // Output the actual C++ code.
+
+ for (const auto &kv : MergeableGroups) {
+ const MergeableGroup &MG = kv.first;
+
+ // List of case statements in the main switch on BuiltinID, and an open
+ // brace.
+ const char *prefix = "";
+ for (const auto &OI : kv.second) {
+ OS << prefix << "case ARM::BI__builtin_arm_" << OI.Int->builtinExtension()
+ << "_" << OI.Name << ":";
+
+ prefix = "\n";
+ }
+ OS << " {\n";
+
+ if (!MG.ParamTypes.empty()) {
+ // If we've got some parameter variables, then emit their declarations...
+ for (size_t i = 0, e = MG.ParamTypes.size(); i < e; ++i) {
+ StringRef Type = MG.ParamTypes[i];
+ OS << " " << Type;
+ if (!Type.endswith("*"))
+ OS << " ";
+ OS << " Param" << utostr(i) << ";\n";
+ }
+
+ // ... and an inner switch on BuiltinID that will fill them in with each
+ // individual intrinsic's values.
+ OS << " switch (BuiltinID) {\n";
+ for (const auto &OI : kv.second) {
+ OS << " case ARM::BI__builtin_arm_" << OI.Int->builtinExtension()
+ << "_" << OI.Name << ":\n";
+ for (size_t i = 0, e = MG.ParamTypes.size(); i < e; ++i)
+ OS << " Param" << utostr(i) << " = " << OI.ParamValues[i] << ";\n";
+ OS << " break;\n";
+ }
+ OS << " }\n";
+ }
+
+ // And finally, output the code, and close the outer pair of braces. (The
+ // code will always end with a 'return' statement, so we need not insert a
+ // 'break' here.)
+ OS << MG.Code << "}\n";
+ }
+}
+
+void EmitterBase::EmitBuiltinAliases(raw_ostream &OS) {
+ // Build a sorted table of:
+ // - intrinsic id number
+ // - full name
+ // - polymorphic name or -1
+ StringToOffsetTable StringTable;
+ OS << "static const IntrinToName MapData[] = {\n";
+ for (const auto &kv : ACLEIntrinsics) {
+ const ACLEIntrinsic &Int = *kv.second;
+ if (Int.headerOnly())
+ continue;
+ int32_t ShortNameOffset =
+ Int.polymorphic() ? StringTable.GetOrAddStringOffset(Int.shortName())
+ : -1;
+ OS << " { ARM::BI__builtin_arm_" << Int.builtinExtension() << "_"
+ << Int.fullName() << ", "
+ << StringTable.GetOrAddStringOffset(Int.fullName()) << ", "
+ << ShortNameOffset << "},\n";
+ }
+ OS << "};\n\n";
+
+ OS << "ArrayRef<IntrinToName> Map(MapData);\n\n";
+
+ OS << "static const char IntrinNames[] = {\n";
+ StringTable.EmitString(OS);
+ OS << "};\n\n";
+}
+
+void EmitterBase::GroupSemaChecks(
+ std::map<std::string, std::set<std::string>> &Checks) {
+ for (const auto &kv : ACLEIntrinsics) {
+ const ACLEIntrinsic &Int = *kv.second;
+ if (Int.headerOnly())
+ continue;
+ std::string Check = Int.genSema();
+ if (!Check.empty())
+ Checks[Check].insert(Int.fullName());
+ }
+}
+
+// -----------------------------------------------------------------------------
+// The class used for generating arm_mve.h and related Clang bits
+//
+
+class MveEmitter : public EmitterBase {
+public:
+ MveEmitter(RecordKeeper &Records) : EmitterBase(Records){};
+ void EmitHeader(raw_ostream &OS) override;
+ void EmitBuiltinDef(raw_ostream &OS) override;
+ void EmitBuiltinSema(raw_ostream &OS) override;
+};
+
void MveEmitter::EmitHeader(raw_ostream &OS) {
// Accumulate pieces of the header file that will be enabled under various
// different combinations of #ifdef. The index into parts[] is made up of
@@ -1454,8 +1764,9 @@ void MveEmitter::EmitHeader(raw_ostream &OS) {
raw_ostream &OS = parts[ST->requiresFloat() ? Float : 0];
const VectorType *VT = getVectorType(ST);
- OS << "typedef __attribute__((neon_vector_type(" << VT->lanes() << "))) "
- << ST->cName() << " " << VT->cName() << ";\n";
+ OS << "typedef __attribute__((__neon_vector_type__(" << VT->lanes()
+ << "), __clang_arm_mve_strict_polymorphism)) " << ST->cName() << " "
+ << VT->cName() << ";\n";
// Every vector type also comes with a pair of multi-vector types for
// the VLD2 and VLD4 instructions.
@@ -1524,7 +1835,7 @@ void MveEmitter::EmitHeader(raw_ostream &OS) {
// included to be part of the type signature of a builtin that
// was known to clang already.
//
- // The declarations use __attribute__(__clang_arm_mve_alias),
+ // The declarations use __attribute__(__clang_arm_builtin_alias),
// so that each function declared will be recognized as the
// appropriate MVE builtin in spite of its user-facing name.
//
@@ -1563,8 +1874,8 @@ void MveEmitter::EmitHeader(raw_ostream &OS) {
// match your call".
OS << "static __inline__ __attribute__(("
- << (Polymorphic ? "overloadable, " : "")
- << "__clang_arm_mve_alias(__builtin_arm_mve_" << Int.fullName()
+ << (Polymorphic ? "__overloadable__, " : "")
+ << "__clang_arm_builtin_alias(__builtin_arm_mve_" << Int.fullName()
<< ")))\n"
<< RetTypeName << FunctionName << "(" << ArgTypesString << ");\n";
}
@@ -1578,19 +1889,8 @@ void MveEmitter::EmitHeader(raw_ostream &OS) {
OS << "/*===---- arm_mve.h - ARM MVE intrinsics "
"-----------------------------------===\n"
- " *\n"
- " *\n"
- " * Part of the LLVM Project, under the Apache License v2.0 with LLVM "
- "Exceptions.\n"
- " * See https://llvm.org/LICENSE.txt for license information.\n"
- " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n"
- " *\n"
- " *===-------------------------------------------------------------"
- "----"
- "------===\n"
- " */\n"
- "\n"
- "#ifndef __ARM_MVE_H\n"
+ << LLVMLicenseHeader
+ << "#ifndef __ARM_MVE_H\n"
"#define __ARM_MVE_H\n"
"\n"
"#if !__ARM_FEATURE_MVE\n"
@@ -1598,6 +1898,10 @@ void MveEmitter::EmitHeader(raw_ostream &OS) {
"#endif\n"
"\n"
"#include <stdint.h>\n"
+ "\n"
+ "#ifdef __cplusplus\n"
+ "extern \"C\" {\n"
+ "#endif\n"
"\n";
for (size_t i = 0; i < NumParts; ++i) {
@@ -1616,7 +1920,11 @@ void MveEmitter::EmitHeader(raw_ostream &OS) {
OS << "#endif /* " << condition << " */\n\n";
}
- OS << "#endif /* __ARM_MVE_H */\n";
+ OS << "#ifdef __cplusplus\n"
+ "} /* extern \"C\" */\n"
+ "#endif\n"
+ "\n"
+ "#endif /* __ARM_MVE_H */\n";
}
void MveEmitter::EmitBuiltinDef(raw_ostream &OS) {
@@ -1632,12 +1940,12 @@ void MveEmitter::EmitBuiltinDef(raw_ostream &OS) {
const ACLEIntrinsic &Int = *kv.second;
if (Int.polymorphic()) {
StringRef Name = Int.shortName();
- if (ShortNamesSeen.find(Name) == ShortNamesSeen.end()) {
+ if (ShortNamesSeen.find(std::string(Name)) == ShortNamesSeen.end()) {
OS << "BUILTIN(__builtin_arm_mve_" << Name << ", \"vi.\", \"nt";
if (Int.nonEvaluating())
OS << "u"; // indicate that this builtin doesn't evaluate its args
OS << "\")\n";
- ShortNamesSeen.insert(Name);
+ ShortNamesSeen.insert(std::string(Name));
}
}
}
@@ -1645,213 +1953,206 @@ void MveEmitter::EmitBuiltinDef(raw_ostream &OS) {
void MveEmitter::EmitBuiltinSema(raw_ostream &OS) {
std::map<std::string, std::set<std::string>> Checks;
-
- for (const auto &kv : ACLEIntrinsics) {
- const ACLEIntrinsic &Int = *kv.second;
- std::string Check = Int.genSema();
- if (!Check.empty())
- Checks[Check].insert(Int.fullName());
- }
+ GroupSemaChecks(Checks);
for (const auto &kv : Checks) {
for (StringRef Name : kv.second)
OS << "case ARM::BI__builtin_arm_mve_" << Name << ":\n";
- OS << kv.first;
+ OS << " return " << kv.first;
}
}
-// Machinery for the grouping of intrinsics by similar codegen.
+// -----------------------------------------------------------------------------
+// Class that describes an ACLE intrinsic implemented as a macro.
//
-// The general setup is that 'MergeableGroup' stores the things that a set of
-// similarly shaped intrinsics have in common: the text of their code
-// generation, and the number and type of their parameter variables.
-// MergeableGroup is the key in a std::map whose value is a set of
-// OutputIntrinsic, which stores the ways in which a particular intrinsic
-// specializes the MergeableGroup's generic description: the function name and
-// the _values_ of the parameter variables.
+// This class is used when the intrinsic is polymorphic in 2 or 3 types, but we
+// want to avoid a combinatorial explosion by reinterpreting the arguments to
+// fixed types.
-struct ComparableStringVector : std::vector<std::string> {
- // Infrastructure: a derived class of vector<string> which comes with an
- // ordering, so that it can be used as a key in maps and an element in sets.
- // There's no requirement on the ordering beyond being deterministic.
- bool operator<(const ComparableStringVector &rhs) const {
- if (size() != rhs.size())
- return size() < rhs.size();
- for (size_t i = 0, e = size(); i < e; ++i)
- if ((*this)[i] != rhs[i])
- return (*this)[i] < rhs[i];
- return false;
- }
-};
+class FunctionMacro {
+ std::vector<StringRef> Params;
+ StringRef Definition;
-struct OutputIntrinsic {
- const ACLEIntrinsic *Int;
- std::string Name;
- ComparableStringVector ParamValues;
- bool operator<(const OutputIntrinsic &rhs) const {
- if (Name != rhs.Name)
- return Name < rhs.Name;
- return ParamValues < rhs.ParamValues;
- }
-};
-struct MergeableGroup {
- std::string Code;
- ComparableStringVector ParamTypes;
- bool operator<(const MergeableGroup &rhs) const {
- if (Code != rhs.Code)
- return Code < rhs.Code;
- return ParamTypes < rhs.ParamTypes;
- }
+public:
+ FunctionMacro(const Record &R);
+
+ const std::vector<StringRef> &getParams() const { return Params; }
+ StringRef getDefinition() const { return Definition; }
};
-void MveEmitter::EmitBuiltinCG(raw_ostream &OS) {
- // Pass 1: generate code for all the intrinsics as if every type or constant
- // that can possibly be abstracted out into a parameter variable will be.
- // This identifies the sets of intrinsics we'll group together into a single
- // piece of code generation.
+FunctionMacro::FunctionMacro(const Record &R) {
+ Params = R.getValueAsListOfStrings("params");
+ Definition = R.getValueAsString("definition");
+}
- std::map<MergeableGroup, std::set<OutputIntrinsic>> MergeableGroupsPrelim;
+// -----------------------------------------------------------------------------
+// The class used for generating arm_cde.h and related Clang bits
+//
- for (const auto &kv : ACLEIntrinsics) {
- const ACLEIntrinsic &Int = *kv.second;
+class CdeEmitter : public EmitterBase {
+ std::map<StringRef, FunctionMacro> FunctionMacros;
- MergeableGroup MG;
- OutputIntrinsic OI;
+public:
+ CdeEmitter(RecordKeeper &Records);
+ void EmitHeader(raw_ostream &OS) override;
+ void EmitBuiltinDef(raw_ostream &OS) override;
+ void EmitBuiltinSema(raw_ostream &OS) override;
+};
- OI.Int = &Int;
- OI.Name = Int.fullName();
- CodeGenParamAllocator ParamAllocPrelim{&MG.ParamTypes, &OI.ParamValues};
- raw_string_ostream OS(MG.Code);
- Int.genCode(OS, ParamAllocPrelim, 1);
- OS.flush();
+CdeEmitter::CdeEmitter(RecordKeeper &Records) : EmitterBase(Records) {
+ for (Record *R : Records.getAllDerivedDefinitions("FunctionMacro"))
+ FunctionMacros.emplace(R->getName(), FunctionMacro(*R));
+}
- MergeableGroupsPrelim[MG].insert(OI);
- }
+void CdeEmitter::EmitHeader(raw_ostream &OS) {
+ // Accumulate pieces of the header file that will be enabled under various
+ // different combinations of #ifdef. The index into parts[] is one of the
+ // following:
+ constexpr unsigned None = 0;
+ constexpr unsigned MVE = 1;
+ constexpr unsigned MVEFloat = 2;
- // Pass 2: for each of those groups, optimize the parameter variable set by
- // eliminating 'parameters' that are the same for all intrinsics in the
- // group, and merging together pairs of parameter variables that take the
- // same values as each other for all intrinsics in the group.
+ constexpr unsigned NumParts = 3;
+ raw_self_contained_string_ostream parts[NumParts];
- std::map<MergeableGroup, std::set<OutputIntrinsic>> MergeableGroups;
+ // Write typedefs for all the required vector types, and a few scalar
+ // types that don't already have the name we want them to have.
- for (const auto &kv : MergeableGroupsPrelim) {
- const MergeableGroup &MG = kv.first;
- std::vector<int> ParamNumbers;
- std::map<ComparableStringVector, int> ParamNumberMap;
+ parts[MVE] << "typedef uint16_t mve_pred16_t;\n";
+ parts[MVEFloat] << "typedef __fp16 float16_t;\n"
+ "typedef float float32_t;\n";
+ for (const auto &kv : ScalarTypes) {
+ const ScalarType *ST = kv.second.get();
+ if (ST->hasNonstandardName())
+ continue;
+ // We don't have float64x2_t
+ if (ST->kind() == ScalarTypeKind::Float && ST->sizeInBits() == 64)
+ continue;
+ raw_ostream &OS = parts[ST->requiresFloat() ? MVEFloat : MVE];
+ const VectorType *VT = getVectorType(ST);
- // Loop over the parameters for this group.
- for (size_t i = 0, e = MG.ParamTypes.size(); i < e; ++i) {
- // Is this parameter the same for all intrinsics in the group?
- const OutputIntrinsic &OI_first = *kv.second.begin();
- bool Constant = all_of(kv.second, [&](const OutputIntrinsic &OI) {
- return OI.ParamValues[i] == OI_first.ParamValues[i];
- });
+ OS << "typedef __attribute__((__neon_vector_type__(" << VT->lanes()
+ << "), __clang_arm_mve_strict_polymorphism)) " << ST->cName() << " "
+ << VT->cName() << ";\n";
+ }
+ parts[MVE] << "\n";
+ parts[MVEFloat] << "\n";
- // If so, record it as -1, meaning 'no parameter variable needed'. Then
- // the corresponding call to allocParam in pass 2 will not generate a
- // variable at all, and just use the value inline.
- if (Constant) {
- ParamNumbers.push_back(-1);
- continue;
- }
+ // Write declarations for all the intrinsics.
- // Otherwise, make a list of the values this parameter takes for each
- // intrinsic, and see if that value vector matches anything we already
- // have. We also record the parameter type, so that we don't accidentally
- // match up two parameter variables with different types. (Not that
- // there's much chance of them having textually equivalent values, but in
- // _principle_ it could happen.)
- ComparableStringVector key;
- key.push_back(MG.ParamTypes[i]);
- for (const auto &OI : kv.second)
- key.push_back(OI.ParamValues[i]);
+ for (const auto &kv : ACLEIntrinsics) {
+ const ACLEIntrinsic &Int = *kv.second;
- auto Found = ParamNumberMap.find(key);
- if (Found != ParamNumberMap.end()) {
- // Yes, an existing parameter variable can be reused for this.
- ParamNumbers.push_back(Found->second);
+ // We generate each intrinsic twice, under its full unambiguous
+ // name and its shorter polymorphic name (if the latter exists).
+ for (bool Polymorphic : {false, true}) {
+ if (Polymorphic && !Int.polymorphic())
+ continue;
+ if (!Polymorphic && Int.polymorphicOnly())
continue;
- }
- // No, we need a new parameter variable.
- int ExistingIndex = ParamNumberMap.size();
- ParamNumberMap[key] = ExistingIndex;
- ParamNumbers.push_back(ExistingIndex);
+ raw_ostream &OS =
+ parts[Int.requiresFloat() ? MVEFloat
+ : Int.requiresMVE() ? MVE : None];
+
+ // Make the name of the function in this declaration.
+ std::string FunctionName =
+ "__arm_" + (Polymorphic ? Int.shortName() : Int.fullName());
+
+ // Make strings for the types involved in the function's
+ // prototype.
+ std::string RetTypeName = Int.returnType()->cName();
+ if (!StringRef(RetTypeName).endswith("*"))
+ RetTypeName += " ";
+
+ std::vector<std::string> ArgTypeNames;
+ for (const Type *ArgTypePtr : Int.argTypes())
+ ArgTypeNames.push_back(ArgTypePtr->cName());
+ std::string ArgTypesString =
+ join(std::begin(ArgTypeNames), std::end(ArgTypeNames), ", ");
+
+ // Emit the actual declaration. See MveEmitter::EmitHeader for detailed
+ // comments
+ OS << "static __inline__ __attribute__(("
+ << (Polymorphic ? "__overloadable__, " : "")
+ << "__clang_arm_builtin_alias(__builtin_arm_" << Int.builtinExtension()
+ << "_" << Int.fullName() << ")))\n"
+ << RetTypeName << FunctionName << "(" << ArgTypesString << ");\n";
}
+ }
- // Now we're ready to do the pass 2 code generation, which will emit the
- // reduced set of parameter variables we've just worked out.
-
- for (const auto &OI_prelim : kv.second) {
- const ACLEIntrinsic *Int = OI_prelim.Int;
-
- MergeableGroup MG;
- OutputIntrinsic OI;
-
- OI.Int = OI_prelim.Int;
- OI.Name = OI_prelim.Name;
- CodeGenParamAllocator ParamAlloc{&MG.ParamTypes, &OI.ParamValues,
- &ParamNumbers};
- raw_string_ostream OS(MG.Code);
- Int->genCode(OS, ParamAlloc, 2);
- OS.flush();
+ for (const auto &kv : FunctionMacros) {
+ StringRef Name = kv.first;
+ const FunctionMacro &FM = kv.second;
- MergeableGroups[MG].insert(OI);
- }
+ raw_ostream &OS = parts[MVE];
+ OS << "#define "
+ << "__arm_" << Name << "(" << join(FM.getParams(), ", ") << ") "
+ << FM.getDefinition() << "\n";
}
- // Output the actual C++ code.
-
- for (const auto &kv : MergeableGroups) {
- const MergeableGroup &MG = kv.first;
+ for (auto &part : parts)
+ part << "\n";
- // List of case statements in the main switch on BuiltinID, and an open
- // brace.
- const char *prefix = "";
- for (const auto &OI : kv.second) {
- OS << prefix << "case ARM::BI__builtin_arm_mve_" << OI.Name << ":";
- prefix = "\n";
- }
- OS << " {\n";
+ // Now we've finished accumulating bits and pieces into the parts[] array.
+ // Put it all together to write the final output file.
- if (!MG.ParamTypes.empty()) {
- // If we've got some parameter variables, then emit their declarations...
- for (size_t i = 0, e = MG.ParamTypes.size(); i < e; ++i) {
- StringRef Type = MG.ParamTypes[i];
- OS << " " << Type;
- if (!Type.endswith("*"))
- OS << " ";
- OS << " Param" << utostr(i) << ";\n";
- }
+ OS << "/*===---- arm_cde.h - ARM CDE intrinsics "
+ "-----------------------------------===\n"
+ << LLVMLicenseHeader
+ << "#ifndef __ARM_CDE_H\n"
+ "#define __ARM_CDE_H\n"
+ "\n"
+ "#if !__ARM_FEATURE_CDE\n"
+ "#error \"CDE support not enabled\"\n"
+ "#endif\n"
+ "\n"
+ "#include <stdint.h>\n"
+ "\n"
+ "#ifdef __cplusplus\n"
+ "extern \"C\" {\n"
+ "#endif\n"
+ "\n";
- // ... and an inner switch on BuiltinID that will fill them in with each
- // individual intrinsic's values.
- OS << " switch (BuiltinID) {\n";
- for (const auto &OI : kv.second) {
- OS << " case ARM::BI__builtin_arm_mve_" << OI.Name << ":\n";
- for (size_t i = 0, e = MG.ParamTypes.size(); i < e; ++i)
- OS << " Param" << utostr(i) << " = " << OI.ParamValues[i] << ";\n";
- OS << " break;\n";
- }
- OS << " }\n";
- }
+ for (size_t i = 0; i < NumParts; ++i) {
+ std::string condition;
+ if (i == MVEFloat)
+ condition = "__ARM_FEATURE_MVE & 2";
+ else if (i == MVE)
+ condition = "__ARM_FEATURE_MVE";
- // And finally, output the code, and close the outer pair of braces. (The
- // code will always end with a 'return' statement, so we need not insert a
- // 'break' here.)
- OS << MG.Code << "}\n";
+ if (!condition.empty())
+ OS << "#if " << condition << "\n\n";
+ OS << parts[i].str();
+ if (!condition.empty())
+ OS << "#endif /* " << condition << " */\n\n";
}
+
+ OS << "#ifdef __cplusplus\n"
+ "} /* extern \"C\" */\n"
+ "#endif\n"
+ "\n"
+ "#endif /* __ARM_CDE_H */\n";
}
-void MveEmitter::EmitBuiltinAliases(raw_ostream &OS) {
+void CdeEmitter::EmitBuiltinDef(raw_ostream &OS) {
for (const auto &kv : ACLEIntrinsics) {
+ if (kv.second->headerOnly())
+ continue;
const ACLEIntrinsic &Int = *kv.second;
- OS << "case ARM::BI__builtin_arm_mve_" << Int.fullName() << ":\n"
- << " return AliasName == \"" << Int.fullName() << "\"";
- if (Int.polymorphic())
- OS << " || AliasName == \"" << Int.shortName() << "\"";
- OS << ";\n";
+ OS << "TARGET_HEADER_BUILTIN(__builtin_arm_cde_" << Int.fullName()
+ << ", \"\", \"ncU\", \"arm_cde.h\", ALL_LANGUAGES, \"\")\n";
+ }
+}
+
+void CdeEmitter::EmitBuiltinSema(raw_ostream &OS) {
+ std::map<std::string, std::set<std::string>> Checks;
+ GroupSemaChecks(Checks);
+
+ for (const auto &kv : Checks) {
+ for (StringRef Name : kv.second)
+ OS << "case ARM::BI__builtin_arm_cde_" << Name << ":\n";
+ OS << " Err = " << kv.first << " break;\n";
}
}
@@ -1859,6 +2160,8 @@ void MveEmitter::EmitBuiltinAliases(raw_ostream &OS) {
namespace clang {
+// MVE
+
void EmitMveHeader(RecordKeeper &Records, raw_ostream &OS) {
MveEmitter(Records).EmitHeader(OS);
}
@@ -1879,4 +2182,26 @@ void EmitMveBuiltinAliases(RecordKeeper &Records, raw_ostream &OS) {
MveEmitter(Records).EmitBuiltinAliases(OS);
}
+// CDE
+
+void EmitCdeHeader(RecordKeeper &Records, raw_ostream &OS) {
+ CdeEmitter(Records).EmitHeader(OS);
+}
+
+void EmitCdeBuiltinDef(RecordKeeper &Records, raw_ostream &OS) {
+ CdeEmitter(Records).EmitBuiltinDef(OS);
+}
+
+void EmitCdeBuiltinSema(RecordKeeper &Records, raw_ostream &OS) {
+ CdeEmitter(Records).EmitBuiltinSema(OS);
+}
+
+void EmitCdeBuiltinCG(RecordKeeper &Records, raw_ostream &OS) {
+ CdeEmitter(Records).EmitBuiltinCG(OS);
+}
+
+void EmitCdeBuiltinAliases(RecordKeeper &Records, raw_ostream &OS) {
+ CdeEmitter(Records).EmitBuiltinAliases(OS);
+}
+
} // end namespace clang
diff --git a/contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp
index a0f3fb2ddc08..d5bf59ef04ad 100644
--- a/contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp
@@ -27,8 +27,9 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/None.h"
-#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
@@ -98,7 +99,8 @@ enum EltType {
Poly128,
Float16,
Float32,
- Float64
+ Float64,
+ BFloat16
};
} // end namespace NeonTypeFlags
@@ -146,6 +148,7 @@ private:
SInt,
UInt,
Poly,
+ BFloat16,
};
TypeKind Kind;
bool Immediate, Constant, Pointer;
@@ -198,6 +201,7 @@ public:
bool isInt() const { return isInteger() && ElementBitwidth == 32; }
bool isLong() const { return isInteger() && ElementBitwidth == 64; }
bool isVoid() const { return Kind == Void; }
+ bool isBFloat16() const { return Kind == BFloat16; }
unsigned getNumElements() const { return Bitwidth / ElementBitwidth; }
unsigned getSizeInBits() const { return Bitwidth; }
unsigned getElementSizeInBits() const { return ElementBitwidth; }
@@ -238,6 +242,11 @@ public:
NumVectors = 1;
}
+ void make32BitElement() {
+ assert_with_loc(Bitwidth > 32, "Not enough bits to make it 32!");
+ ElementBitwidth = 32;
+ }
+
void doubleLanes() {
assert_with_loc(Bitwidth != 128, "Can't get bigger than 128!");
Bitwidth = 128;
@@ -297,14 +306,12 @@ public:
/// The main grunt class. This represents an instantiation of an intrinsic with
/// a particular typespec and prototype.
class Intrinsic {
- friend class DagEmitter;
-
/// The Record this intrinsic was created from.
Record *R;
/// The unmangled name.
std::string Name;
/// The input and output typespecs. InTS == OutTS except when
- /// CartesianProductOfTypes is 1 - this is the case for vreinterpret.
+ /// CartesianProductWith is non-empty - this is the case for vreinterpret.
TypeSpec OutTS, InTS;
/// The base class kind. Most intrinsics use ClassS, which has full type
/// info for integers (s32/u32). Some use ClassI, which doesn't care about
@@ -337,7 +344,7 @@ class Intrinsic {
/// The set of intrinsics that this intrinsic uses/requires.
std::set<Intrinsic *> Dependencies;
/// The "base type", which is Type('d', OutTS). InBaseType is only
- /// different if CartesianProductOfTypes = 1 (for vreinterpret).
+ /// different if CartesianProductWith is non-empty (for vreinterpret).
Type BaseType, InBaseType;
/// The return variable.
Variable RetVar;
@@ -518,7 +525,8 @@ private:
std::pair<Type, std::string> emitDagDupTyped(DagInit *DI);
std::pair<Type, std::string> emitDagShuffle(DagInit *DI);
std::pair<Type, std::string> emitDagCast(DagInit *DI, bool IsBitCast);
- std::pair<Type, std::string> emitDagCall(DagInit *DI);
+ std::pair<Type, std::string> emitDagCall(DagInit *DI,
+ bool MatchMangledName);
std::pair<Type, std::string> emitDagNameReplace(DagInit *DI);
std::pair<Type, std::string> emitDagLiteral(DagInit *DI);
std::pair<Type, std::string> emitDagOp(DagInit *DI);
@@ -546,7 +554,8 @@ class NeonEmitter {
public:
/// Called by Intrinsic - this attempts to get an intrinsic that takes
/// the given types as arguments.
- Intrinsic &getIntrinsic(StringRef Name, ArrayRef<Type> Types);
+ Intrinsic &getIntrinsic(StringRef Name, ArrayRef<Type> Types,
+ Optional<std::string> MangledName);
/// Called by Intrinsic - returns a globally-unique number.
unsigned getUniqueNumber() { return UniqueNumber++; }
@@ -577,8 +586,11 @@ public:
// runFP16 - Emit arm_fp16.h.inc
void runFP16(raw_ostream &o);
- // runHeader - Emit all the __builtin prototypes used in arm_neon.h
- // and arm_fp16.h
+ // runBF16 - Emit arm_bf16.h.inc
+ void runBF16(raw_ostream &o);
+
+ // runHeader - Emit all the __builtin prototypes used in arm_neon.h,
+ // arm_fp16.h and arm_bf16.h
void runHeader(raw_ostream &o);
// runTests - Emit tests for all the Neon intrinsics.
@@ -603,6 +615,8 @@ std::string Type::str() const {
S += "poly";
else if (isFloating())
S += "float";
+ else if (isBFloat16())
+ S += "bfloat";
else
S += "int";
@@ -642,7 +656,10 @@ std::string Type::builtin_str() const {
case 128: S += "LLLi"; break;
default: llvm_unreachable("Unhandled case!");
}
- else
+ else if (isBFloat16()) {
+ assert(ElementBitwidth == 16 && "BFloat16 can only be 16 bits");
+ S += "y";
+ } else
switch (ElementBitwidth) {
case 16: S += "h"; break;
case 32: S += "f"; break;
@@ -696,6 +713,11 @@ unsigned Type::getNeonEnum() const {
Base = (unsigned)NeonTypeFlags::Float16 + (Addend - 1);
}
+ if (isBFloat16()) {
+ assert(Addend == 1 && "BFloat16 is only 16 bit");
+ Base = (unsigned)NeonTypeFlags::BFloat16;
+ }
+
if (Bitwidth == 128)
Base |= (unsigned)NeonTypeFlags::QuadFlag;
if (isInteger() && !isSigned())
@@ -719,6 +741,9 @@ Type Type::fromTypedefName(StringRef Name) {
} else if (Name.startswith("poly")) {
T.Kind = Poly;
Name = Name.drop_front(4);
+ } else if (Name.startswith("bfloat")) {
+ T.Kind = BFloat16;
+ Name = Name.drop_front(6);
} else {
assert(Name.startswith("int"));
Name = Name.drop_front(3);
@@ -817,6 +842,10 @@ void Type::applyTypespec(bool &Quad) {
if (isPoly())
NumVectors = 0;
break;
+ case 'b':
+ Kind = BFloat16;
+ ElementBitwidth = 16;
+ break;
default:
llvm_unreachable("Unhandled type code!");
}
@@ -843,6 +872,10 @@ void Type::applyModifiers(StringRef Mods) {
case 'U':
Kind = UInt;
break;
+ case 'B':
+ Kind = BFloat16;
+ ElementBitwidth = 16;
+ break;
case 'F':
Kind = Float;
break;
@@ -924,6 +957,9 @@ std::string Intrinsic::getInstTypeCode(Type T, ClassKind CK) const {
if (CK == ClassB)
return "";
+ if (T.isBFloat16())
+ return "bf16";
+
if (T.isPoly())
typeCode = 'p';
else if (T.isInteger())
@@ -961,7 +997,7 @@ std::string Intrinsic::getBuiltinTypeStr() {
Type RetT = getReturnType();
if ((LocalCK == ClassI || LocalCK == ClassW) && RetT.isScalar() &&
- !RetT.isFloating())
+ !RetT.isFloating() && !RetT.isBFloat16())
RetT.makeInteger(RetT.getElementSizeInBits(), false);
// Since the return value must be one type, return a vector type of the
@@ -1026,7 +1062,8 @@ std::string Intrinsic::mangleName(std::string Name, ClassKind LocalCK) const {
std::string S = Name;
if (Name == "vcvt_f16_f32" || Name == "vcvt_f32_f16" ||
- Name == "vcvt_f32_f64" || Name == "vcvt_f64_f32")
+ Name == "vcvt_f32_f64" || Name == "vcvt_f64_f32" ||
+ Name == "vcvt_f32_bf16")
return Name;
if (!typeCode.empty()) {
@@ -1257,7 +1294,7 @@ void Intrinsic::emitBodyAsBuiltinCall() {
if (!getReturnType().isVoid() && !SRet)
S += "(" + RetVar.getType().str() + ") ";
- S += "__builtin_neon_" + mangleName(N, LocalCK) + "(";
+ S += "__builtin_neon_" + mangleName(std::string(N), LocalCK) + "(";
if (SRet)
S += "&" + RetVar.getName() + ", ";
@@ -1383,8 +1420,8 @@ std::pair<Type, std::string> Intrinsic::DagEmitter::emitDag(DagInit *DI) {
return emitDagSaveTemp(DI);
if (Op == "op")
return emitDagOp(DI);
- if (Op == "call")
- return emitDagCall(DI);
+ if (Op == "call" || Op == "call_mangled")
+ return emitDagCall(DI, Op == "call_mangled");
if (Op == "name_replace")
return emitDagNameReplace(DI);
if (Op == "literal")
@@ -1398,25 +1435,26 @@ std::pair<Type, std::string> Intrinsic::DagEmitter::emitDagOp(DagInit *DI) {
if (DI->getNumArgs() == 2) {
// Unary op.
std::pair<Type, std::string> R =
- emitDagArg(DI->getArg(1), DI->getArgNameStr(1));
+ emitDagArg(DI->getArg(1), std::string(DI->getArgNameStr(1)));
return std::make_pair(R.first, Op + R.second);
} else {
assert(DI->getNumArgs() == 3 && "Can only handle unary and binary ops!");
std::pair<Type, std::string> R1 =
- emitDagArg(DI->getArg(1), DI->getArgNameStr(1));
+ emitDagArg(DI->getArg(1), std::string(DI->getArgNameStr(1)));
std::pair<Type, std::string> R2 =
- emitDagArg(DI->getArg(2), DI->getArgNameStr(2));
+ emitDagArg(DI->getArg(2), std::string(DI->getArgNameStr(2)));
assert_with_loc(R1.first == R2.first, "Argument type mismatch!");
return std::make_pair(R1.first, R1.second + " " + Op + " " + R2.second);
}
}
-std::pair<Type, std::string> Intrinsic::DagEmitter::emitDagCall(DagInit *DI) {
+std::pair<Type, std::string>
+Intrinsic::DagEmitter::emitDagCall(DagInit *DI, bool MatchMangledName) {
std::vector<Type> Types;
std::vector<std::string> Values;
for (unsigned I = 0; I < DI->getNumArgs() - 1; ++I) {
std::pair<Type, std::string> R =
- emitDagArg(DI->getArg(I + 1), DI->getArgNameStr(I + 1));
+ emitDagArg(DI->getArg(I + 1), std::string(DI->getArgNameStr(I + 1)));
Types.push_back(R.first);
Values.push_back(R.second);
}
@@ -1427,7 +1465,13 @@ std::pair<Type, std::string> Intrinsic::DagEmitter::emitDagCall(DagInit *DI) {
N = SI->getAsUnquotedString();
else
N = emitDagArg(DI->getArg(0), "").second;
- Intrinsic &Callee = Intr.Emitter.getIntrinsic(N, Types);
+ Optional<std::string> MangledName;
+ if (MatchMangledName) {
+ if (Intr.getRecord()->getValueAsBit("isLaneQ"))
+ N += "q";
+ MangledName = Intr.mangleName(N, ClassS);
+ }
+ Intrinsic &Callee = Intr.Emitter.getIntrinsic(N, Types, MangledName);
// Make sure the callee is known as an early def.
Callee.setNeededEarly();
@@ -1451,9 +1495,9 @@ std::pair<Type, std::string> Intrinsic::DagEmitter::emitDagCall(DagInit *DI) {
std::pair<Type, std::string> Intrinsic::DagEmitter::emitDagCast(DagInit *DI,
bool IsBitCast){
// (cast MOD* VAL) -> cast VAL to type given by MOD.
- std::pair<Type, std::string> R = emitDagArg(
- DI->getArg(DI->getNumArgs() - 1),
- DI->getArgNameStr(DI->getNumArgs() - 1));
+ std::pair<Type, std::string> R =
+ emitDagArg(DI->getArg(DI->getNumArgs() - 1),
+ std::string(DI->getArgNameStr(DI->getNumArgs() - 1)));
Type castToType = R.first;
for (unsigned ArgIdx = 0; ArgIdx < DI->getNumArgs() - 1; ++ArgIdx) {
@@ -1465,10 +1509,11 @@ std::pair<Type, std::string> Intrinsic::DagEmitter::emitDagCast(DagInit *DI,
// 5. The value "H" or "D" to half or double the bitwidth.
// 6. The value "8" to convert to 8-bit (signed) integer lanes.
if (!DI->getArgNameStr(ArgIdx).empty()) {
- assert_with_loc(Intr.Variables.find(DI->getArgNameStr(ArgIdx)) !=
- Intr.Variables.end(),
+ assert_with_loc(Intr.Variables.find(std::string(
+ DI->getArgNameStr(ArgIdx))) != Intr.Variables.end(),
"Variable not found");
- castToType = Intr.Variables[DI->getArgNameStr(ArgIdx)].getType();
+ castToType =
+ Intr.Variables[std::string(DI->getArgNameStr(ArgIdx))].getType();
} else {
StringInit *SI = dyn_cast<StringInit>(DI->getArg(ArgIdx));
assert_with_loc(SI, "Expected string type or $Name for cast type");
@@ -1485,6 +1530,8 @@ std::pair<Type, std::string> Intrinsic::DagEmitter::emitDagCast(DagInit *DI,
castToType.doubleLanes();
} else if (SI->getAsUnquotedString() == "8") {
castToType.makeInteger(8, true);
+ } else if (SI->getAsUnquotedString() == "32") {
+ castToType.make32BitElement();
} else {
castToType = Type::fromTypedefName(SI->getAsUnquotedString());
assert_with_loc(!castToType.isVoid(), "Unknown typedef");
@@ -1583,9 +1630,9 @@ std::pair<Type, std::string> Intrinsic::DagEmitter::emitDagShuffle(DagInit *DI){
// (shuffle arg1, arg2, sequence)
std::pair<Type, std::string> Arg1 =
- emitDagArg(DI->getArg(0), DI->getArgNameStr(0));
+ emitDagArg(DI->getArg(0), std::string(DI->getArgNameStr(0)));
std::pair<Type, std::string> Arg2 =
- emitDagArg(DI->getArg(1), DI->getArgNameStr(1));
+ emitDagArg(DI->getArg(1), std::string(DI->getArgNameStr(1)));
assert_with_loc(Arg1.first == Arg2.first,
"Different types in arguments to shuffle!");
@@ -1627,8 +1674,8 @@ std::pair<Type, std::string> Intrinsic::DagEmitter::emitDagShuffle(DagInit *DI){
std::pair<Type, std::string> Intrinsic::DagEmitter::emitDagDup(DagInit *DI) {
assert_with_loc(DI->getNumArgs() == 1, "dup() expects one argument");
- std::pair<Type, std::string> A = emitDagArg(DI->getArg(0),
- DI->getArgNameStr(0));
+ std::pair<Type, std::string> A =
+ emitDagArg(DI->getArg(0), std::string(DI->getArgNameStr(0)));
assert_with_loc(A.first.isScalar(), "dup() expects a scalar argument");
Type T = Intr.getBaseType();
@@ -1646,10 +1693,10 @@ std::pair<Type, std::string> Intrinsic::DagEmitter::emitDagDup(DagInit *DI) {
std::pair<Type, std::string> Intrinsic::DagEmitter::emitDagDupTyped(DagInit *DI) {
assert_with_loc(DI->getNumArgs() == 2, "dup_typed() expects two arguments");
- std::pair<Type, std::string> A = emitDagArg(DI->getArg(0),
- DI->getArgNameStr(0));
- std::pair<Type, std::string> B = emitDagArg(DI->getArg(1),
- DI->getArgNameStr(1));
+ std::pair<Type, std::string> A =
+ emitDagArg(DI->getArg(0), std::string(DI->getArgNameStr(0)));
+ std::pair<Type, std::string> B =
+ emitDagArg(DI->getArg(1), std::string(DI->getArgNameStr(1)));
assert_with_loc(B.first.isScalar(),
"dup_typed() requires a scalar as the second argument");
@@ -1668,10 +1715,10 @@ std::pair<Type, std::string> Intrinsic::DagEmitter::emitDagDupTyped(DagInit *DI)
std::pair<Type, std::string> Intrinsic::DagEmitter::emitDagSplat(DagInit *DI) {
assert_with_loc(DI->getNumArgs() == 2, "splat() expects two arguments");
- std::pair<Type, std::string> A = emitDagArg(DI->getArg(0),
- DI->getArgNameStr(0));
- std::pair<Type, std::string> B = emitDagArg(DI->getArg(1),
- DI->getArgNameStr(1));
+ std::pair<Type, std::string> A =
+ emitDagArg(DI->getArg(0), std::string(DI->getArgNameStr(0)));
+ std::pair<Type, std::string> B =
+ emitDagArg(DI->getArg(1), std::string(DI->getArgNameStr(1)));
assert_with_loc(B.first.isScalar(),
"splat() requires a scalar int as the second argument");
@@ -1687,13 +1734,13 @@ std::pair<Type, std::string> Intrinsic::DagEmitter::emitDagSplat(DagInit *DI) {
std::pair<Type, std::string> Intrinsic::DagEmitter::emitDagSaveTemp(DagInit *DI) {
assert_with_loc(DI->getNumArgs() == 2, "save_temp() expects two arguments");
- std::pair<Type, std::string> A = emitDagArg(DI->getArg(1),
- DI->getArgNameStr(1));
+ std::pair<Type, std::string> A =
+ emitDagArg(DI->getArg(1), std::string(DI->getArgNameStr(1)));
assert_with_loc(!A.first.isVoid(),
"Argument to save_temp() must have non-void type!");
- std::string N = DI->getArgNameStr(0);
+ std::string N = std::string(DI->getArgNameStr(0));
assert_with_loc(!N.empty(),
"save_temp() expects a name as the first argument");
@@ -1831,7 +1878,8 @@ void Intrinsic::indexBody() {
// NeonEmitter implementation
//===----------------------------------------------------------------------===//
-Intrinsic &NeonEmitter::getIntrinsic(StringRef Name, ArrayRef<Type> Types) {
+Intrinsic &NeonEmitter::getIntrinsic(StringRef Name, ArrayRef<Type> Types,
+ Optional<std::string> MangledName) {
// First, look up the name in the intrinsic map.
assert_with_loc(IntrinsicMap.find(Name.str()) != IntrinsicMap.end(),
("Intrinsic '" + Name + "' not found!").str());
@@ -1860,17 +1908,19 @@ Intrinsic &NeonEmitter::getIntrinsic(StringRef Name, ArrayRef<Type> Types) {
}
ErrMsg += ")\n";
+ if (MangledName && MangledName != I.getMangledName(true))
+ continue;
+
if (I.getNumParams() != Types.size())
continue;
- bool Good = true;
- for (unsigned Arg = 0; Arg < Types.size(); ++Arg) {
- if (I.getParamType(Arg) != Types[Arg]) {
- Good = false;
- break;
- }
- }
- if (Good)
+ unsigned ArgNum = 0;
+ bool MatchingArgumentTypes =
+ std::all_of(Types.begin(), Types.end(), [&](const auto &Type) {
+ return Type == I.getParamType(ArgNum++);
+ });
+
+ if (MatchingArgumentTypes)
GoodVec.push_back(&I);
}
@@ -1883,14 +1933,14 @@ Intrinsic &NeonEmitter::getIntrinsic(StringRef Name, ArrayRef<Type> Types) {
void NeonEmitter::createIntrinsic(Record *R,
SmallVectorImpl<Intrinsic *> &Out) {
- std::string Name = R->getValueAsString("Name");
- std::string Proto = R->getValueAsString("Prototype");
- std::string Types = R->getValueAsString("Types");
+ std::string Name = std::string(R->getValueAsString("Name"));
+ std::string Proto = std::string(R->getValueAsString("Prototype"));
+ std::string Types = std::string(R->getValueAsString("Types"));
Record *OperationRec = R->getValueAsDef("Operation");
- bool CartesianProductOfTypes = R->getValueAsBit("CartesianProductOfTypes");
bool BigEndianSafe = R->getValueAsBit("BigEndianSafe");
- std::string Guard = R->getValueAsString("ArchGuard");
+ std::string Guard = std::string(R->getValueAsString("ArchGuard"));
bool IsUnavailable = OperationRec->getValueAsBit("Unavailable");
+ std::string CartesianProductWith = std::string(R->getValueAsString("CartesianProductWith"));
// Set the global current record. This allows assert_with_loc to produce
// decent location information even when highly nested.
@@ -1905,17 +1955,20 @@ void NeonEmitter::createIntrinsic(Record *R,
CK = ClassMap[R->getSuperClasses()[1].first];
std::vector<std::pair<TypeSpec, TypeSpec>> NewTypeSpecs;
- for (auto TS : TypeSpecs) {
- if (CartesianProductOfTypes) {
+ if (!CartesianProductWith.empty()) {
+ std::vector<TypeSpec> ProductTypeSpecs = TypeSpec::fromTypeSpecs(CartesianProductWith);
+ for (auto TS : TypeSpecs) {
Type DefaultT(TS, ".");
- for (auto SrcTS : TypeSpecs) {
+ for (auto SrcTS : ProductTypeSpecs) {
Type DefaultSrcT(SrcTS, ".");
if (TS == SrcTS ||
DefaultSrcT.getSizeInBits() != DefaultT.getSizeInBits())
continue;
NewTypeSpecs.push_back(std::make_pair(TS, SrcTS));
}
- } else {
+ }
+ } else {
+ for (auto TS : TypeSpecs) {
NewTypeSpecs.push_back(std::make_pair(TS, TS));
}
}
@@ -2143,6 +2196,74 @@ void NeonEmitter::runHeader(raw_ostream &OS) {
genIntrinsicRangeCheckCode(OS, Defs);
}
+static void emitNeonTypeDefs(const std::string& types, raw_ostream &OS) {
+ std::string TypedefTypes(types);
+ std::vector<TypeSpec> TDTypeVec = TypeSpec::fromTypeSpecs(TypedefTypes);
+
+ // Emit vector typedefs.
+ bool InIfdef = false;
+ for (auto &TS : TDTypeVec) {
+ bool IsA64 = false;
+ Type T(TS, ".");
+ if (T.isDouble())
+ IsA64 = true;
+
+ if (InIfdef && !IsA64) {
+ OS << "#endif\n";
+ InIfdef = false;
+ }
+ if (!InIfdef && IsA64) {
+ OS << "#ifdef __aarch64__\n";
+ InIfdef = true;
+ }
+
+ if (T.isPoly())
+ OS << "typedef __attribute__((neon_polyvector_type(";
+ else
+ OS << "typedef __attribute__((neon_vector_type(";
+
+ Type T2 = T;
+ T2.makeScalar();
+ OS << T.getNumElements() << "))) ";
+ OS << T2.str();
+ OS << " " << T.str() << ";\n";
+ }
+ if (InIfdef)
+ OS << "#endif\n";
+ OS << "\n";
+
+ // Emit struct typedefs.
+ InIfdef = false;
+ for (unsigned NumMembers = 2; NumMembers <= 4; ++NumMembers) {
+ for (auto &TS : TDTypeVec) {
+ bool IsA64 = false;
+ Type T(TS, ".");
+ if (T.isDouble())
+ IsA64 = true;
+
+ if (InIfdef && !IsA64) {
+ OS << "#endif\n";
+ InIfdef = false;
+ }
+ if (!InIfdef && IsA64) {
+ OS << "#ifdef __aarch64__\n";
+ InIfdef = true;
+ }
+
+ const char Mods[] = { static_cast<char>('2' + (NumMembers - 2)), 0};
+ Type VT(TS, Mods);
+ OS << "typedef struct " << VT.str() << " {\n";
+ OS << " " << T.str() << " val";
+ OS << "[" << NumMembers << "]";
+ OS << ";\n} ";
+ OS << VT.str() << ";\n";
+ OS << "\n";
+ }
+ }
+ if (InIfdef)
+ OS << "#endif\n";
+}
+
/// run - Read the records in arm_neon.td and output arm_neon.h. arm_neon.h
/// is comprised of type definitions and function declarations.
void NeonEmitter::run(raw_ostream &OS) {
@@ -2191,12 +2312,22 @@ void NeonEmitter::run(raw_ostream &OS) {
OS << "#ifndef __ARM_NEON_H\n";
OS << "#define __ARM_NEON_H\n\n";
+ OS << "#ifndef __ARM_FP\n";
+ OS << "#error \"NEON intrinsics not available with the soft-float ABI. "
+ "Please use -mfloat-abi=softfp or -mfloat-abi=hard\"\n";
+ OS << "#else\n\n";
+
OS << "#if !defined(__ARM_NEON)\n";
OS << "#error \"NEON support not enabled\"\n";
- OS << "#endif\n\n";
+ OS << "#else\n\n";
OS << "#include <stdint.h>\n\n";
+ OS << "#ifdef __ARM_FEATURE_BF16\n";
+ OS << "#include <arm_bf16.h>\n";
+ OS << "typedef __bf16 bfloat16_t;\n";
+ OS << "#endif\n\n";
+
// Emit NEON-specific scalar typedefs.
OS << "typedef float float32_t;\n";
OS << "typedef __fp16 float16_t;\n";
@@ -2214,76 +2345,14 @@ void NeonEmitter::run(raw_ostream &OS) {
OS << "#else\n";
OS << "typedef int8_t poly8_t;\n";
OS << "typedef int16_t poly16_t;\n";
+ OS << "typedef int64_t poly64_t;\n";
OS << "#endif\n";
- // Emit Neon vector typedefs.
- std::string TypedefTypes(
- "cQcsQsiQilQlUcQUcUsQUsUiQUiUlQUlhQhfQfdQdPcQPcPsQPsPlQPl");
- std::vector<TypeSpec> TDTypeVec = TypeSpec::fromTypeSpecs(TypedefTypes);
-
- // Emit vector typedefs.
- bool InIfdef = false;
- for (auto &TS : TDTypeVec) {
- bool IsA64 = false;
- Type T(TS, ".");
- if (T.isDouble() || (T.isPoly() && T.getElementSizeInBits() == 64))
- IsA64 = true;
-
- if (InIfdef && !IsA64) {
- OS << "#endif\n";
- InIfdef = false;
- }
- if (!InIfdef && IsA64) {
- OS << "#ifdef __aarch64__\n";
- InIfdef = true;
- }
+ emitNeonTypeDefs("cQcsQsiQilQlUcQUcUsQUsUiQUiUlQUlhQhfQfdQdPcQPcPsQPsPlQPl", OS);
- if (T.isPoly())
- OS << "typedef __attribute__((neon_polyvector_type(";
- else
- OS << "typedef __attribute__((neon_vector_type(";
-
- Type T2 = T;
- T2.makeScalar();
- OS << T.getNumElements() << "))) ";
- OS << T2.str();
- OS << " " << T.str() << ";\n";
- }
- if (InIfdef)
- OS << "#endif\n";
- OS << "\n";
-
- // Emit struct typedefs.
- InIfdef = false;
- for (unsigned NumMembers = 2; NumMembers <= 4; ++NumMembers) {
- for (auto &TS : TDTypeVec) {
- bool IsA64 = false;
- Type T(TS, ".");
- if (T.isDouble() || (T.isPoly() && T.getElementSizeInBits() == 64))
- IsA64 = true;
-
- if (InIfdef && !IsA64) {
- OS << "#endif\n";
- InIfdef = false;
- }
- if (!InIfdef && IsA64) {
- OS << "#ifdef __aarch64__\n";
- InIfdef = true;
- }
-
- const char Mods[] = { static_cast<char>('2' + (NumMembers - 2)), 0};
- Type VT(TS, Mods);
- OS << "typedef struct " << VT.str() << " {\n";
- OS << " " << T.str() << " val";
- OS << "[" << NumMembers << "]";
- OS << ";\n} ";
- OS << VT.str() << ";\n";
- OS << "\n";
- }
- }
- if (InIfdef)
- OS << "#endif\n";
- OS << "\n";
+ OS << "#ifdef __ARM_FEATURE_BF16\n";
+ emitNeonTypeDefs("bQb", OS);
+ OS << "#endif\n\n";
OS << "#define __ai static __inline__ __attribute__((__always_inline__, "
"__nodebug__))\n\n";
@@ -2340,6 +2409,8 @@ void NeonEmitter::run(raw_ostream &OS) {
OS << "\n";
OS << "#undef __ai\n\n";
+ OS << "#endif /* if !defined(__ARM_NEON) */\n";
+ OS << "#endif /* ifndef __ARM_FP */\n";
OS << "#endif /* __ARM_NEON_H */\n";
}
@@ -2450,6 +2521,84 @@ void NeonEmitter::runFP16(raw_ostream &OS) {
OS << "#endif /* __ARM_FP16_H */\n";
}
+void NeonEmitter::runBF16(raw_ostream &OS) {
+ OS << "/*===---- arm_bf16.h - ARM BF16 intrinsics "
+ "-----------------------------------===\n"
+ " *\n"
+ " *\n"
+ " * Part of the LLVM Project, under the Apache License v2.0 with LLVM "
+ "Exceptions.\n"
+ " * See https://llvm.org/LICENSE.txt for license information.\n"
+ " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n"
+ " *\n"
+ " *===-----------------------------------------------------------------"
+ "------===\n"
+ " */\n\n";
+
+ OS << "#ifndef __ARM_BF16_H\n";
+ OS << "#define __ARM_BF16_H\n\n";
+
+ OS << "typedef __bf16 bfloat16_t;\n";
+
+ OS << "#define __ai static __inline__ __attribute__((__always_inline__, "
+ "__nodebug__))\n\n";
+
+ SmallVector<Intrinsic *, 128> Defs;
+ std::vector<Record *> RV = Records.getAllDerivedDefinitions("Inst");
+ for (auto *R : RV)
+ createIntrinsic(R, Defs);
+
+ for (auto *I : Defs)
+ I->indexBody();
+
+ llvm::stable_sort(Defs, llvm::deref<std::less<>>());
+
+ // Only emit a def when its requirements have been met.
+ // FIXME: This loop could be made faster, but it's fast enough for now.
+ bool MadeProgress = true;
+ std::string InGuard;
+ while (!Defs.empty() && MadeProgress) {
+ MadeProgress = false;
+
+ for (SmallVector<Intrinsic *, 128>::iterator I = Defs.begin();
+ I != Defs.end(); /*No step*/) {
+ bool DependenciesSatisfied = true;
+ for (auto *II : (*I)->getDependencies()) {
+ if (llvm::is_contained(Defs, II))
+ DependenciesSatisfied = false;
+ }
+ if (!DependenciesSatisfied) {
+ // Try the next one.
+ ++I;
+ continue;
+ }
+
+ // Emit #endif/#if pair if needed.
+ if ((*I)->getGuard() != InGuard) {
+ if (!InGuard.empty())
+ OS << "#endif\n";
+ InGuard = (*I)->getGuard();
+ if (!InGuard.empty())
+ OS << "#if " << InGuard << "\n";
+ }
+
+ // Actually generate the intrinsic code.
+ OS << (*I)->generate();
+
+ MadeProgress = true;
+ I = Defs.erase(I);
+ }
+ }
+ assert(Defs.empty() && "Some requirements were not satisfied!");
+ if (!InGuard.empty())
+ OS << "#endif\n";
+
+ OS << "\n";
+ OS << "#undef __ai\n\n";
+
+ OS << "#endif\n";
+}
+
void clang::EmitNeon(RecordKeeper &Records, raw_ostream &OS) {
NeonEmitter(Records).run(OS);
}
@@ -2458,6 +2607,10 @@ void clang::EmitFP16(RecordKeeper &Records, raw_ostream &OS) {
NeonEmitter(Records).runFP16(OS);
}
+void clang::EmitBF16(RecordKeeper &Records, raw_ostream &OS) {
+ NeonEmitter(Records).runBF16(OS);
+}
+
void clang::EmitNeonSema(RecordKeeper &Records, raw_ostream &OS) {
NeonEmitter(Records).runHeader(OS);
}
diff --git a/contrib/llvm-project/clang/utils/TableGen/SveEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/SveEmitter.cpp
new file mode 100644
index 000000000000..1d42edd8a94a
--- /dev/null
+++ b/contrib/llvm-project/clang/utils/TableGen/SveEmitter.cpp
@@ -0,0 +1,1436 @@
+//===- SveEmitter.cpp - Generate arm_sve.h for use with clang -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting arm_sve.h, which includes
+// a declaration and definition of each function specified by the ARM C/C++
+// Language Extensions (ACLE).
+//
+// For details, visit:
+// https://developer.arm.com/architectures/system-architectures/software-standards/acle
+//
+// Each SVE instruction is implemented in terms of 1 or more functions which
+// are suffixed with the element type of the input vectors. Functions may be
+// implemented in terms of generic vector operations such as +, *, -, etc. or
+// by calling a __builtin_-prefixed function which will be handled by clang's
+// CodeGen library.
+//
+// See also the documentation in include/clang/Basic/arm_sve.td.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/Error.h"
+#include <string>
+#include <sstream>
+#include <set>
+#include <cctype>
+#include <tuple>
+
+using namespace llvm;
+
+enum ClassKind {
+ ClassNone,
+ ClassS, // signed/unsigned, e.g., "_s8", "_u8" suffix
+ ClassG, // Overloaded name without type suffix
+};
+
+using TypeSpec = std::string;
+
+namespace {
+
+class ImmCheck {
+ unsigned Arg;
+ unsigned Kind;
+ unsigned ElementSizeInBits;
+
+public:
+ ImmCheck(unsigned Arg, unsigned Kind, unsigned ElementSizeInBits = 0)
+ : Arg(Arg), Kind(Kind), ElementSizeInBits(ElementSizeInBits) {}
+ ImmCheck(const ImmCheck &Other) = default;
+ ~ImmCheck() = default;
+
+ unsigned getArg() const { return Arg; }
+ unsigned getKind() const { return Kind; }
+ unsigned getElementSizeInBits() const { return ElementSizeInBits; }
+};
+
+class SVEType {
+ TypeSpec TS;
+ bool Float, Signed, Immediate, Void, Constant, Pointer, BFloat;
+ bool DefaultType, IsScalable, Predicate, PredicatePattern, PrefetchOp;
+ unsigned Bitwidth, ElementBitwidth, NumVectors;
+
+public:
+ SVEType() : SVEType(TypeSpec(), 'v') {}
+
+ SVEType(TypeSpec TS, char CharMod)
+ : TS(TS), Float(false), Signed(true), Immediate(false), Void(false),
+ Constant(false), Pointer(false), BFloat(false), DefaultType(false),
+ IsScalable(true), Predicate(false), PredicatePattern(false),
+ PrefetchOp(false), Bitwidth(128), ElementBitwidth(~0U), NumVectors(1) {
+ if (!TS.empty())
+ applyTypespec();
+ applyModifier(CharMod);
+ }
+
+ bool isPointer() const { return Pointer; }
+ bool isVoidPointer() const { return Pointer && Void; }
+ bool isSigned() const { return Signed; }
+ bool isImmediate() const { return Immediate; }
+ bool isScalar() const { return NumVectors == 0; }
+ bool isVector() const { return NumVectors > 0; }
+ bool isScalableVector() const { return isVector() && IsScalable; }
+ bool isChar() const { return ElementBitwidth == 8; }
+ bool isVoid() const { return Void & !Pointer; }
+ bool isDefault() const { return DefaultType; }
+ bool isFloat() const { return Float && !BFloat; }
+ bool isBFloat() const { return BFloat && !Float; }
+ bool isFloatingPoint() const { return Float || BFloat; }
+ bool isInteger() const { return !isFloatingPoint() && !Predicate; }
+ bool isScalarPredicate() const {
+ return !isFloatingPoint() && Predicate && NumVectors == 0;
+ }
+ bool isPredicateVector() const { return Predicate; }
+ bool isPredicatePattern() const { return PredicatePattern; }
+ bool isPrefetchOp() const { return PrefetchOp; }
+ bool isConstant() const { return Constant; }
+ unsigned getElementSizeInBits() const { return ElementBitwidth; }
+ unsigned getNumVectors() const { return NumVectors; }
+
+ unsigned getNumElements() const {
+ assert(ElementBitwidth != ~0U);
+ return Bitwidth / ElementBitwidth;
+ }
+ unsigned getSizeInBits() const {
+ return Bitwidth;
+ }
+
+ /// Return the string representation of a type, which is an encoded
+ /// string for passing to the BUILTIN() macro in Builtins.def.
+ std::string builtin_str() const;
+
+ /// Return the C/C++ string representation of a type for use in the
+ /// arm_sve.h header file.
+ std::string str() const;
+
+private:
+ /// Creates the type based on the typespec string in TS.
+ void applyTypespec();
+
+ /// Applies a prototype modifier to the type.
+ void applyModifier(char Mod);
+};
+
+
+class SVEEmitter;
+
+/// The main grunt class. This represents an instantiation of an intrinsic with
+/// a particular typespec and prototype.
+class Intrinsic {
+ /// The unmangled name.
+ std::string Name;
+
+ /// The name of the corresponding LLVM IR intrinsic.
+ std::string LLVMName;
+
+ /// Intrinsic prototype.
+ std::string Proto;
+
+ /// The base type spec for this intrinsic.
+ TypeSpec BaseTypeSpec;
+
+ /// The base class kind. Most intrinsics use ClassS, which has full type
+ /// info for integers (_s32/_u32), or ClassG which is used for overloaded
+ /// intrinsics.
+ ClassKind Class;
+
+ /// The architectural #ifdef guard.
+ std::string Guard;
+
+ // The merge suffix such as _m, _x or _z.
+ std::string MergeSuffix;
+
+ /// The types of return value [0] and parameters [1..].
+ std::vector<SVEType> Types;
+
+ /// The "base type", which is VarType('d', BaseTypeSpec).
+ SVEType BaseType;
+
+ uint64_t Flags;
+
+ SmallVector<ImmCheck, 2> ImmChecks;
+
+public:
+ Intrinsic(StringRef Name, StringRef Proto, uint64_t MergeTy,
+ StringRef MergeSuffix, uint64_t MemoryElementTy, StringRef LLVMName,
+ uint64_t Flags, ArrayRef<ImmCheck> ImmChecks, TypeSpec BT,
+ ClassKind Class, SVEEmitter &Emitter, StringRef Guard);
+
+ ~Intrinsic()=default;
+
+ std::string getName() const { return Name; }
+ std::string getLLVMName() const { return LLVMName; }
+ std::string getProto() const { return Proto; }
+ TypeSpec getBaseTypeSpec() const { return BaseTypeSpec; }
+ SVEType getBaseType() const { return BaseType; }
+
+ StringRef getGuard() const { return Guard; }
+ ClassKind getClassKind() const { return Class; }
+
+ SVEType getReturnType() const { return Types[0]; }
+ ArrayRef<SVEType> getTypes() const { return Types; }
+ SVEType getParamType(unsigned I) const { return Types[I + 1]; }
+ unsigned getNumParams() const { return Proto.size() - 1; }
+
+ uint64_t getFlags() const { return Flags; }
+ bool isFlagSet(uint64_t Flag) const { return Flags & Flag;}
+
+ ArrayRef<ImmCheck> getImmChecks() const { return ImmChecks; }
+
+ /// Return the type string for a BUILTIN() macro in Builtins.def.
+ std::string getBuiltinTypeStr();
+
+ /// Return the name, mangled with type information. The name is mangled for
+ /// ClassS, so will add type suffixes such as _u32/_s32.
+ std::string getMangledName() const { return mangleName(ClassS); }
+
+ /// Returns true if the intrinsic is overloaded, in that it should also generate
+ /// a short form without the type-specifiers, e.g. 'svld1(..)' instead of
+ /// 'svld1_u32(..)'.
+ static bool isOverloadedIntrinsic(StringRef Name) {
+ auto BrOpen = Name.find("[");
+ auto BrClose = Name.find(']');
+ return BrOpen != std::string::npos && BrClose != std::string::npos;
+ }
+
+ /// Return true if the intrinsic takes a splat operand.
+ bool hasSplat() const {
+ // These prototype modifiers are described in arm_sve.td.
+ return Proto.find_first_of("ajfrKLR@") != std::string::npos;
+ }
+
+ /// Return the parameter index of the splat operand.
+ unsigned getSplatIdx() const {
+ // These prototype modifiers are described in arm_sve.td.
+ auto Idx = Proto.find_first_of("ajfrKLR@");
+ assert(Idx != std::string::npos && Idx > 0 &&
+ "Prototype has no splat operand");
+ return Idx - 1;
+ }
+
+ /// Emits the intrinsic declaration to the ostream.
+ void emitIntrinsic(raw_ostream &OS) const;
+
+private:
+ std::string getMergeSuffix() const { return MergeSuffix; }
+ std::string mangleName(ClassKind LocalCK) const;
+ std::string replaceTemplatedArgs(std::string Name, TypeSpec TS,
+ std::string Proto) const;
+};
+
+class SVEEmitter {
+private:
+ // The reinterpret builtins are generated separately because they
+ // need the cross product of all types (121 functions in total),
+ // which is inconvenient to specify in the arm_sve.td file or
+ // generate in CGBuiltin.cpp.
+ struct ReinterpretTypeInfo {
+ const char *Suffix;
+ const char *Type;
+ const char *BuiltinType;
+ };
+ SmallVector<ReinterpretTypeInfo, 12> Reinterprets = {
+ {"s8", "svint8_t", "q16Sc"}, {"s16", "svint16_t", "q8Ss"},
+ {"s32", "svint32_t", "q4Si"}, {"s64", "svint64_t", "q2SWi"},
+ {"u8", "svuint8_t", "q16Uc"}, {"u16", "svuint16_t", "q8Us"},
+ {"u32", "svuint32_t", "q4Ui"}, {"u64", "svuint64_t", "q2UWi"},
+ {"f16", "svfloat16_t", "q8h"}, {"bf16", "svbfloat16_t", "q8y"},
+ {"f32", "svfloat32_t", "q4f"}, {"f64", "svfloat64_t", "q2d"}};
+
+ RecordKeeper &Records;
+ llvm::StringMap<uint64_t> EltTypes;
+ llvm::StringMap<uint64_t> MemEltTypes;
+ llvm::StringMap<uint64_t> FlagTypes;
+ llvm::StringMap<uint64_t> MergeTypes;
+ llvm::StringMap<uint64_t> ImmCheckTypes;
+
+public:
+ SVEEmitter(RecordKeeper &R) : Records(R) {
+ for (auto *RV : Records.getAllDerivedDefinitions("EltType"))
+ EltTypes[RV->getNameInitAsString()] = RV->getValueAsInt("Value");
+ for (auto *RV : Records.getAllDerivedDefinitions("MemEltType"))
+ MemEltTypes[RV->getNameInitAsString()] = RV->getValueAsInt("Value");
+ for (auto *RV : Records.getAllDerivedDefinitions("FlagType"))
+ FlagTypes[RV->getNameInitAsString()] = RV->getValueAsInt("Value");
+ for (auto *RV : Records.getAllDerivedDefinitions("MergeType"))
+ MergeTypes[RV->getNameInitAsString()] = RV->getValueAsInt("Value");
+ for (auto *RV : Records.getAllDerivedDefinitions("ImmCheckType"))
+ ImmCheckTypes[RV->getNameInitAsString()] = RV->getValueAsInt("Value");
+ }
+
+ /// Returns the enum value for the immcheck type
+ unsigned getEnumValueForImmCheck(StringRef C) const {
+ auto It = ImmCheckTypes.find(C);
+ if (It != ImmCheckTypes.end())
+ return It->getValue();
+ llvm_unreachable("Unsupported imm check");
+ }
+
+ /// Returns the enum value for the flag type
+ uint64_t getEnumValueForFlag(StringRef C) const {
+ auto Res = FlagTypes.find(C);
+ if (Res != FlagTypes.end())
+ return Res->getValue();
+ llvm_unreachable("Unsupported flag");
+ }
+
+ // Returns the SVETypeFlags for a given value and mask.
+ uint64_t encodeFlag(uint64_t V, StringRef MaskName) const {
+ auto It = FlagTypes.find(MaskName);
+ if (It != FlagTypes.end()) {
+ uint64_t Mask = It->getValue();
+ unsigned Shift = llvm::countTrailingZeros(Mask);
+ return (V << Shift) & Mask;
+ }
+ llvm_unreachable("Unsupported flag");
+ }
+
+ // Returns the SVETypeFlags for the given element type.
+ uint64_t encodeEltType(StringRef EltName) {
+ auto It = EltTypes.find(EltName);
+ if (It != EltTypes.end())
+ return encodeFlag(It->getValue(), "EltTypeMask");
+ llvm_unreachable("Unsupported EltType");
+ }
+
+ // Returns the SVETypeFlags for the given memory element type.
+ uint64_t encodeMemoryElementType(uint64_t MT) {
+ return encodeFlag(MT, "MemEltTypeMask");
+ }
+
+ // Returns the SVETypeFlags for the given merge type.
+ uint64_t encodeMergeType(uint64_t MT) {
+ return encodeFlag(MT, "MergeTypeMask");
+ }
+
+ // Returns the SVETypeFlags for the given splat operand.
+ unsigned encodeSplatOperand(unsigned SplatIdx) {
+ assert(SplatIdx < 7 && "SplatIdx out of encodable range");
+ return encodeFlag(SplatIdx + 1, "SplatOperandMask");
+ }
+
+ // Returns the SVETypeFlags value for the given SVEType.
+ uint64_t encodeTypeFlags(const SVEType &T);
+
+ /// Emit arm_sve.h.
+ void createHeader(raw_ostream &o);
+
+ /// Emit all the __builtin prototypes and code needed by Sema.
+ void createBuiltins(raw_ostream &o);
+
+ /// Emit all the information needed to map builtin -> LLVM IR intrinsic.
+ void createCodeGenMap(raw_ostream &o);
+
+ /// Emit all the range checks for the immediates.
+ void createRangeChecks(raw_ostream &o);
+
+ /// Create the SVETypeFlags used in CGBuiltins
+ void createTypeFlags(raw_ostream &o);
+
+ /// Create intrinsic and add it to \p Out
+ void createIntrinsic(Record *R, SmallVectorImpl<std::unique_ptr<Intrinsic>> &Out);
+};
+
+} // end anonymous namespace
+
+
+//===----------------------------------------------------------------------===//
+// Type implementation
+//===----------------------------------------------------------------------===//
+
+std::string SVEType::builtin_str() const {
+ std::string S;
+ if (isVoid())
+ return "v";
+
+ if (isVoidPointer())
+ S += "v";
+ else if (!isFloatingPoint())
+ switch (ElementBitwidth) {
+ case 1: S += "b"; break;
+ case 8: S += "c"; break;
+ case 16: S += "s"; break;
+ case 32: S += "i"; break;
+ case 64: S += "Wi"; break;
+ case 128: S += "LLLi"; break;
+ default: llvm_unreachable("Unhandled case!");
+ }
+ else if (isFloat())
+ switch (ElementBitwidth) {
+ case 16: S += "h"; break;
+ case 32: S += "f"; break;
+ case 64: S += "d"; break;
+ default: llvm_unreachable("Unhandled case!");
+ }
+ else if (isBFloat()) {
+ assert(ElementBitwidth == 16 && "Not a valid BFloat.");
+ S += "y";
+ }
+
+ if (!isFloatingPoint()) {
+ if ((isChar() || isPointer()) && !isVoidPointer()) {
+ // Make chars and typed pointers explicitly signed.
+ if (Signed)
+ S = "S" + S;
+ else if (!Signed)
+ S = "U" + S;
+ } else if (!isVoidPointer() && !Signed) {
+ S = "U" + S;
+ }
+ }
+
+ // Constant indices are "int", but have the "constant expression" modifier.
+ if (isImmediate()) {
+ assert(!isFloat() && "fp immediates are not supported");
+ S = "I" + S;
+ }
+
+ if (isScalar()) {
+ if (Constant) S += "C";
+ if (Pointer) S += "*";
+ return S;
+ }
+
+ assert(isScalableVector() && "Unsupported type");
+ return "q" + utostr(getNumElements() * NumVectors) + S;
+}
+
+std::string SVEType::str() const {
+ if (isPredicatePattern())
+ return "sv_pattern";
+
+ if (isPrefetchOp())
+ return "sv_prfop";
+
+ std::string S;
+ if (Void)
+ S += "void";
+ else {
+ if (isScalableVector())
+ S += "sv";
+ if (!Signed && !isFloatingPoint())
+ S += "u";
+
+ if (Float)
+ S += "float";
+ else if (isScalarPredicate() || isPredicateVector())
+ S += "bool";
+ else if (isBFloat())
+ S += "bfloat";
+ else
+ S += "int";
+
+ if (!isScalarPredicate() && !isPredicateVector())
+ S += utostr(ElementBitwidth);
+ if (!isScalableVector() && isVector())
+ S += "x" + utostr(getNumElements());
+ if (NumVectors > 1)
+ S += "x" + utostr(NumVectors);
+ if (!isScalarPredicate())
+ S += "_t";
+ }
+
+ if (Constant)
+ S += " const";
+ if (Pointer)
+ S += " *";
+
+ return S;
+}
+void SVEType::applyTypespec() {
+ for (char I : TS) {
+ switch (I) {
+ case 'P':
+ Predicate = true;
+ break;
+ case 'U':
+ Signed = false;
+ break;
+ case 'c':
+ ElementBitwidth = 8;
+ break;
+ case 's':
+ ElementBitwidth = 16;
+ break;
+ case 'i':
+ ElementBitwidth = 32;
+ break;
+ case 'l':
+ ElementBitwidth = 64;
+ break;
+ case 'h':
+ Float = true;
+ ElementBitwidth = 16;
+ break;
+ case 'f':
+ Float = true;
+ ElementBitwidth = 32;
+ break;
+ case 'd':
+ Float = true;
+ ElementBitwidth = 64;
+ break;
+ case 'b':
+ BFloat = true;
+ Float = false;
+ ElementBitwidth = 16;
+ break;
+ default:
+ llvm_unreachable("Unhandled type code!");
+ }
+ }
+ assert(ElementBitwidth != ~0U && "Bad element bitwidth!");
+}
+
+void SVEType::applyModifier(char Mod) {
+ switch (Mod) {
+ case '2':
+ NumVectors = 2;
+ break;
+ case '3':
+ NumVectors = 3;
+ break;
+ case '4':
+ NumVectors = 4;
+ break;
+ case 'v':
+ Void = true;
+ break;
+ case 'd':
+ DefaultType = true;
+ break;
+ case 'c':
+ Constant = true;
+ LLVM_FALLTHROUGH;
+ case 'p':
+ Pointer = true;
+ Bitwidth = ElementBitwidth;
+ NumVectors = 0;
+ break;
+ case 'e':
+ Signed = false;
+ ElementBitwidth /= 2;
+ break;
+ case 'h':
+ ElementBitwidth /= 2;
+ break;
+ case 'q':
+ ElementBitwidth /= 4;
+ break;
+ case 'b':
+ Signed = false;
+ Float = false;
+ BFloat = false;
+ ElementBitwidth /= 4;
+ break;
+ case 'o':
+ ElementBitwidth *= 4;
+ break;
+ case 'P':
+ Signed = true;
+ Float = false;
+ BFloat = false;
+ Predicate = true;
+ Bitwidth = 16;
+ ElementBitwidth = 1;
+ break;
+ case 's':
+ case 'a':
+ Bitwidth = ElementBitwidth;
+ NumVectors = 0;
+ break;
+ case 'R':
+ ElementBitwidth /= 2;
+ NumVectors = 0;
+ break;
+ case 'r':
+ ElementBitwidth /= 4;
+ NumVectors = 0;
+ break;
+ case '@':
+ Signed = false;
+ Float = false;
+ BFloat = false;
+ ElementBitwidth /= 4;
+ NumVectors = 0;
+ break;
+ case 'K':
+ Signed = true;
+ Float = false;
+ BFloat = false;
+ Bitwidth = ElementBitwidth;
+ NumVectors = 0;
+ break;
+ case 'L':
+ Signed = false;
+ Float = false;
+ BFloat = false;
+ Bitwidth = ElementBitwidth;
+ NumVectors = 0;
+ break;
+ case 'u':
+ Predicate = false;
+ Signed = false;
+ Float = false;
+ BFloat = false;
+ break;
+ case 'x':
+ Predicate = false;
+ Signed = true;
+ Float = false;
+ BFloat = false;
+ break;
+ case 'i':
+ Predicate = false;
+ Float = false;
+ BFloat = false;
+ ElementBitwidth = Bitwidth = 64;
+ NumVectors = 0;
+ Signed = false;
+ Immediate = true;
+ break;
+ case 'I':
+ Predicate = false;
+ Float = false;
+ BFloat = false;
+ ElementBitwidth = Bitwidth = 32;
+ NumVectors = 0;
+ Signed = true;
+ Immediate = true;
+ PredicatePattern = true;
+ break;
+ case 'J':
+ Predicate = false;
+ Float = false;
+ BFloat = false;
+ ElementBitwidth = Bitwidth = 32;
+ NumVectors = 0;
+ Signed = true;
+ Immediate = true;
+ PrefetchOp = true;
+ break;
+ case 'k':
+ Predicate = false;
+ Signed = true;
+ Float = false;
+ BFloat = false;
+ ElementBitwidth = Bitwidth = 32;
+ NumVectors = 0;
+ break;
+ case 'l':
+ Predicate = false;
+ Signed = true;
+ Float = false;
+ BFloat = false;
+ ElementBitwidth = Bitwidth = 64;
+ NumVectors = 0;
+ break;
+ case 'm':
+ Predicate = false;
+ Signed = false;
+ Float = false;
+ BFloat = false;
+ ElementBitwidth = Bitwidth = 32;
+ NumVectors = 0;
+ break;
+ case 'n':
+ Predicate = false;
+ Signed = false;
+ Float = false;
+ BFloat = false;
+ ElementBitwidth = Bitwidth = 64;
+ NumVectors = 0;
+ break;
+ case 'w':
+ ElementBitwidth = 64;
+ break;
+ case 'j':
+ ElementBitwidth = Bitwidth = 64;
+ NumVectors = 0;
+ break;
+ case 'f':
+ Signed = false;
+ ElementBitwidth = Bitwidth = 64;
+ NumVectors = 0;
+ break;
+ case 'g':
+ Signed = false;
+ Float = false;
+ BFloat = false;
+ ElementBitwidth = 64;
+ break;
+ case 't':
+ Signed = true;
+ Float = false;
+ BFloat = false;
+ ElementBitwidth = 32;
+ break;
+ case 'z':
+ Signed = false;
+ Float = false;
+ BFloat = false;
+ ElementBitwidth = 32;
+ break;
+ case 'O':
+ Predicate = false;
+ Float = true;
+ ElementBitwidth = 16;
+ break;
+ case 'M':
+ Predicate = false;
+ Float = true;
+ BFloat = false;
+ ElementBitwidth = 32;
+ break;
+ case 'N':
+ Predicate = false;
+ Float = true;
+ ElementBitwidth = 64;
+ break;
+ case 'Q':
+ Constant = true;
+ Pointer = true;
+ Void = true;
+ NumVectors = 0;
+ break;
+ case 'S':
+ Constant = true;
+ Pointer = true;
+ ElementBitwidth = Bitwidth = 8;
+ NumVectors = 0;
+ Signed = true;
+ break;
+ case 'W':
+ Constant = true;
+ Pointer = true;
+ ElementBitwidth = Bitwidth = 8;
+ NumVectors = 0;
+ Signed = false;
+ break;
+ case 'T':
+ Constant = true;
+ Pointer = true;
+ ElementBitwidth = Bitwidth = 16;
+ NumVectors = 0;
+ Signed = true;
+ break;
+ case 'X':
+ Constant = true;
+ Pointer = true;
+ ElementBitwidth = Bitwidth = 16;
+ NumVectors = 0;
+ Signed = false;
+ break;
+ case 'Y':
+ Constant = true;
+ Pointer = true;
+ ElementBitwidth = Bitwidth = 32;
+ NumVectors = 0;
+ Signed = false;
+ break;
+ case 'U':
+ Constant = true;
+ Pointer = true;
+ ElementBitwidth = Bitwidth = 32;
+ NumVectors = 0;
+ Signed = true;
+ break;
+ case 'A':
+ Pointer = true;
+ ElementBitwidth = Bitwidth = 8;
+ NumVectors = 0;
+ Signed = true;
+ break;
+ case 'B':
+ Pointer = true;
+ ElementBitwidth = Bitwidth = 16;
+ NumVectors = 0;
+ Signed = true;
+ break;
+ case 'C':
+ Pointer = true;
+ ElementBitwidth = Bitwidth = 32;
+ NumVectors = 0;
+ Signed = true;
+ break;
+ case 'D':
+ Pointer = true;
+ ElementBitwidth = Bitwidth = 64;
+ NumVectors = 0;
+ Signed = true;
+ break;
+ case 'E':
+ Pointer = true;
+ ElementBitwidth = Bitwidth = 8;
+ NumVectors = 0;
+ Signed = false;
+ break;
+ case 'F':
+ Pointer = true;
+ ElementBitwidth = Bitwidth = 16;
+ NumVectors = 0;
+ Signed = false;
+ break;
+ case 'G':
+ Pointer = true;
+ ElementBitwidth = Bitwidth = 32;
+ NumVectors = 0;
+ Signed = false;
+ break;
+ default:
+ llvm_unreachable("Unhandled character!");
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// Intrinsic implementation
+//===----------------------------------------------------------------------===//
+
+Intrinsic::Intrinsic(StringRef Name, StringRef Proto, uint64_t MergeTy,
+ StringRef MergeSuffix, uint64_t MemoryElementTy,
+ StringRef LLVMName, uint64_t Flags,
+ ArrayRef<ImmCheck> Checks, TypeSpec BT, ClassKind Class,
+ SVEEmitter &Emitter, StringRef Guard)
+ : Name(Name.str()), LLVMName(LLVMName), Proto(Proto.str()),
+ BaseTypeSpec(BT), Class(Class), Guard(Guard.str()),
+ MergeSuffix(MergeSuffix.str()), BaseType(BT, 'd'), Flags(Flags),
+ ImmChecks(Checks.begin(), Checks.end()) {
+ // Types[0] is the return value.
+ for (unsigned I = 0; I < Proto.size(); ++I) {
+ SVEType T(BaseTypeSpec, Proto[I]);
+ Types.push_back(T);
+
+ // Add range checks for immediates
+ if (I > 0) {
+ if (T.isPredicatePattern())
+ ImmChecks.emplace_back(
+ I - 1, Emitter.getEnumValueForImmCheck("ImmCheck0_31"));
+ else if (T.isPrefetchOp())
+ ImmChecks.emplace_back(
+ I - 1, Emitter.getEnumValueForImmCheck("ImmCheck0_13"));
+ }
+ }
+
+ // Set flags based on properties
+ this->Flags |= Emitter.encodeTypeFlags(BaseType);
+ this->Flags |= Emitter.encodeMemoryElementType(MemoryElementTy);
+ this->Flags |= Emitter.encodeMergeType(MergeTy);
+ if (hasSplat())
+ this->Flags |= Emitter.encodeSplatOperand(getSplatIdx());
+}
+
+std::string Intrinsic::getBuiltinTypeStr() {
+ std::string S = getReturnType().builtin_str();
+ for (unsigned I = 0; I < getNumParams(); ++I)
+ S += getParamType(I).builtin_str();
+
+ return S;
+}
+
+std::string Intrinsic::replaceTemplatedArgs(std::string Name, TypeSpec TS,
+ std::string Proto) const {
+ std::string Ret = Name;
+ while (Ret.find('{') != std::string::npos) {
+ size_t Pos = Ret.find('{');
+ size_t End = Ret.find('}');
+ unsigned NumChars = End - Pos + 1;
+ assert(NumChars == 3 && "Unexpected template argument");
+
+ SVEType T;
+ char C = Ret[Pos+1];
+ switch(C) {
+ default:
+ llvm_unreachable("Unknown predication specifier");
+ case 'd':
+ T = SVEType(TS, 'd');
+ break;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ T = SVEType(TS, Proto[C - '0']);
+ break;
+ }
+
+ // Replace templated arg with the right suffix (e.g. u32)
+ std::string TypeCode;
+ if (T.isInteger())
+ TypeCode = T.isSigned() ? 's' : 'u';
+ else if (T.isPredicateVector())
+ TypeCode = 'b';
+ else if (T.isBFloat())
+ TypeCode = "bf";
+ else
+ TypeCode = 'f';
+ Ret.replace(Pos, NumChars, TypeCode + utostr(T.getElementSizeInBits()));
+ }
+
+ return Ret;
+}
+
+std::string Intrinsic::mangleName(ClassKind LocalCK) const {
+ std::string S = getName();
+
+ if (LocalCK == ClassG) {
+ // Remove the square brackets and everything in between.
+ while (S.find("[") != std::string::npos) {
+ auto Start = S.find("[");
+ auto End = S.find(']');
+ S.erase(Start, (End-Start)+1);
+ }
+ } else {
+ // Remove the square brackets.
+ while (S.find("[") != std::string::npos) {
+ auto BrPos = S.find('[');
+ if (BrPos != std::string::npos)
+ S.erase(BrPos, 1);
+ BrPos = S.find(']');
+ if (BrPos != std::string::npos)
+ S.erase(BrPos, 1);
+ }
+ }
+
+ // Replace all {d} like expressions with e.g. 'u32'
+ return replaceTemplatedArgs(S, getBaseTypeSpec(), getProto()) +
+ getMergeSuffix();
+}
+
+void Intrinsic::emitIntrinsic(raw_ostream &OS) const {
+ // Use the preprocessor to
+ if (getClassKind() != ClassG || getProto().size() <= 1) {
+ OS << "#define " << mangleName(getClassKind())
+ << "(...) __builtin_sve_" << mangleName(ClassS)
+ << "(__VA_ARGS__)\n";
+ } else {
+ std::string FullName = mangleName(ClassS);
+ std::string ProtoName = mangleName(ClassG);
+
+ OS << "__aio __attribute__((__clang_arm_builtin_alias("
+ << "__builtin_sve_" << FullName << ")))\n";
+
+ OS << getTypes()[0].str() << " " << ProtoName << "(";
+ for (unsigned I = 0; I < getTypes().size() - 1; ++I) {
+ if (I != 0)
+ OS << ", ";
+ OS << getTypes()[I + 1].str();
+ }
+ OS << ");\n";
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// SVEEmitter implementation
+//===----------------------------------------------------------------------===//
+uint64_t SVEEmitter::encodeTypeFlags(const SVEType &T) {
+ if (T.isFloat()) {
+ switch (T.getElementSizeInBits()) {
+ case 16:
+ return encodeEltType("EltTyFloat16");
+ case 32:
+ return encodeEltType("EltTyFloat32");
+ case 64:
+ return encodeEltType("EltTyFloat64");
+ default:
+ llvm_unreachable("Unhandled float element bitwidth!");
+ }
+ }
+
+ if (T.isBFloat()) {
+ assert(T.getElementSizeInBits() == 16 && "Not a valid BFloat.");
+ return encodeEltType("EltTyBFloat16");
+ }
+
+ if (T.isPredicateVector()) {
+ switch (T.getElementSizeInBits()) {
+ case 8:
+ return encodeEltType("EltTyBool8");
+ case 16:
+ return encodeEltType("EltTyBool16");
+ case 32:
+ return encodeEltType("EltTyBool32");
+ case 64:
+ return encodeEltType("EltTyBool64");
+ default:
+ llvm_unreachable("Unhandled predicate element bitwidth!");
+ }
+ }
+
+ switch (T.getElementSizeInBits()) {
+ case 8:
+ return encodeEltType("EltTyInt8");
+ case 16:
+ return encodeEltType("EltTyInt16");
+ case 32:
+ return encodeEltType("EltTyInt32");
+ case 64:
+ return encodeEltType("EltTyInt64");
+ default:
+ llvm_unreachable("Unhandled integer element bitwidth!");
+ }
+}
+
+void SVEEmitter::createIntrinsic(
+ Record *R, SmallVectorImpl<std::unique_ptr<Intrinsic>> &Out) {
+ StringRef Name = R->getValueAsString("Name");
+ StringRef Proto = R->getValueAsString("Prototype");
+ StringRef Types = R->getValueAsString("Types");
+ StringRef Guard = R->getValueAsString("ArchGuard");
+ StringRef LLVMName = R->getValueAsString("LLVMIntrinsic");
+ uint64_t Merge = R->getValueAsInt("Merge");
+ StringRef MergeSuffix = R->getValueAsString("MergeSuffix");
+ uint64_t MemEltType = R->getValueAsInt("MemEltType");
+ std::vector<Record*> FlagsList = R->getValueAsListOfDefs("Flags");
+ std::vector<Record*> ImmCheckList = R->getValueAsListOfDefs("ImmChecks");
+
+ int64_t Flags = 0;
+ for (auto FlagRec : FlagsList)
+ Flags |= FlagRec->getValueAsInt("Value");
+
+ // Create a dummy TypeSpec for non-overloaded builtins.
+ if (Types.empty()) {
+ assert((Flags & getEnumValueForFlag("IsOverloadNone")) &&
+ "Expect TypeSpec for overloaded builtin!");
+ Types = "i";
+ }
+
+ // Extract type specs from string
+ SmallVector<TypeSpec, 8> TypeSpecs;
+ TypeSpec Acc;
+ for (char I : Types) {
+ Acc.push_back(I);
+ if (islower(I)) {
+ TypeSpecs.push_back(TypeSpec(Acc));
+ Acc.clear();
+ }
+ }
+
+ // Remove duplicate type specs.
+ llvm::sort(TypeSpecs);
+ TypeSpecs.erase(std::unique(TypeSpecs.begin(), TypeSpecs.end()),
+ TypeSpecs.end());
+
+ // Create an Intrinsic for each type spec.
+ for (auto TS : TypeSpecs) {
+ // Collate a list of range/option checks for the immediates.
+ SmallVector<ImmCheck, 2> ImmChecks;
+ for (auto *R : ImmCheckList) {
+ int64_t Arg = R->getValueAsInt("Arg");
+ int64_t EltSizeArg = R->getValueAsInt("EltSizeArg");
+ int64_t Kind = R->getValueAsDef("Kind")->getValueAsInt("Value");
+ assert(Arg >= 0 && Kind >= 0 && "Arg and Kind must be nonnegative");
+
+ unsigned ElementSizeInBits = 0;
+ if (EltSizeArg >= 0)
+ ElementSizeInBits =
+ SVEType(TS, Proto[EltSizeArg + /* offset by return arg */ 1])
+ .getElementSizeInBits();
+ ImmChecks.push_back(ImmCheck(Arg, Kind, ElementSizeInBits));
+ }
+
+ Out.push_back(std::make_unique<Intrinsic>(
+ Name, Proto, Merge, MergeSuffix, MemEltType, LLVMName, Flags, ImmChecks,
+ TS, ClassS, *this, Guard));
+
+ // Also generate the short-form (e.g. svadd_m) for the given type-spec.
+ if (Intrinsic::isOverloadedIntrinsic(Name))
+ Out.push_back(std::make_unique<Intrinsic>(
+ Name, Proto, Merge, MergeSuffix, MemEltType, LLVMName, Flags,
+ ImmChecks, TS, ClassG, *this, Guard));
+ }
+}
+
+void SVEEmitter::createHeader(raw_ostream &OS) {
+ OS << "/*===---- arm_sve.h - ARM SVE intrinsics "
+ "-----------------------------------===\n"
+ " *\n"
+ " *\n"
+ " * Part of the LLVM Project, under the Apache License v2.0 with LLVM "
+ "Exceptions.\n"
+ " * See https://llvm.org/LICENSE.txt for license information.\n"
+ " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n"
+ " *\n"
+ " *===-----------------------------------------------------------------"
+ "------===\n"
+ " */\n\n";
+
+ OS << "#ifndef __ARM_SVE_H\n";
+ OS << "#define __ARM_SVE_H\n\n";
+
+ OS << "#if !defined(__ARM_FEATURE_SVE)\n";
+ OS << "#error \"SVE support not enabled\"\n";
+ OS << "#else\n\n";
+
+ OS << "#if !defined(__LITTLE_ENDIAN__)\n";
+ OS << "#error \"Big endian is currently not supported for arm_sve.h\"\n";
+ OS << "#endif\n";
+
+ OS << "#include <stdint.h>\n\n";
+ OS << "#ifdef __cplusplus\n";
+ OS << "extern \"C\" {\n";
+ OS << "#else\n";
+ OS << "#include <stdbool.h>\n";
+ OS << "#endif\n\n";
+
+ OS << "typedef __fp16 float16_t;\n";
+ OS << "typedef float float32_t;\n";
+ OS << "typedef double float64_t;\n";
+
+ OS << "typedef __SVInt8_t svint8_t;\n";
+ OS << "typedef __SVInt16_t svint16_t;\n";
+ OS << "typedef __SVInt32_t svint32_t;\n";
+ OS << "typedef __SVInt64_t svint64_t;\n";
+ OS << "typedef __SVUint8_t svuint8_t;\n";
+ OS << "typedef __SVUint16_t svuint16_t;\n";
+ OS << "typedef __SVUint32_t svuint32_t;\n";
+ OS << "typedef __SVUint64_t svuint64_t;\n";
+ OS << "typedef __SVFloat16_t svfloat16_t;\n\n";
+
+ OS << "#if defined(__ARM_FEATURE_SVE_BF16) && "
+ "!defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)\n";
+ OS << "#error \"__ARM_FEATURE_BF16_SCALAR_ARITHMETIC must be defined when "
+ "__ARM_FEATURE_SVE_BF16 is defined\"\n";
+ OS << "#endif\n\n";
+
+ OS << "#if defined(__ARM_FEATURE_SVE_BF16)\n";
+ OS << "typedef __SVBFloat16_t svbfloat16_t;\n";
+ OS << "#endif\n\n";
+
+ OS << "#if defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)\n";
+ OS << "#include <arm_bf16.h>\n";
+ OS << "typedef __bf16 bfloat16_t;\n";
+ OS << "#endif\n\n";
+
+ OS << "typedef __SVFloat32_t svfloat32_t;\n";
+ OS << "typedef __SVFloat64_t svfloat64_t;\n";
+ OS << "typedef __clang_svint8x2_t svint8x2_t;\n";
+ OS << "typedef __clang_svint16x2_t svint16x2_t;\n";
+ OS << "typedef __clang_svint32x2_t svint32x2_t;\n";
+ OS << "typedef __clang_svint64x2_t svint64x2_t;\n";
+ OS << "typedef __clang_svuint8x2_t svuint8x2_t;\n";
+ OS << "typedef __clang_svuint16x2_t svuint16x2_t;\n";
+ OS << "typedef __clang_svuint32x2_t svuint32x2_t;\n";
+ OS << "typedef __clang_svuint64x2_t svuint64x2_t;\n";
+ OS << "typedef __clang_svfloat16x2_t svfloat16x2_t;\n";
+ OS << "typedef __clang_svfloat32x2_t svfloat32x2_t;\n";
+ OS << "typedef __clang_svfloat64x2_t svfloat64x2_t;\n";
+ OS << "typedef __clang_svint8x3_t svint8x3_t;\n";
+ OS << "typedef __clang_svint16x3_t svint16x3_t;\n";
+ OS << "typedef __clang_svint32x3_t svint32x3_t;\n";
+ OS << "typedef __clang_svint64x3_t svint64x3_t;\n";
+ OS << "typedef __clang_svuint8x3_t svuint8x3_t;\n";
+ OS << "typedef __clang_svuint16x3_t svuint16x3_t;\n";
+ OS << "typedef __clang_svuint32x3_t svuint32x3_t;\n";
+ OS << "typedef __clang_svuint64x3_t svuint64x3_t;\n";
+ OS << "typedef __clang_svfloat16x3_t svfloat16x3_t;\n";
+ OS << "typedef __clang_svfloat32x3_t svfloat32x3_t;\n";
+ OS << "typedef __clang_svfloat64x3_t svfloat64x3_t;\n";
+ OS << "typedef __clang_svint8x4_t svint8x4_t;\n";
+ OS << "typedef __clang_svint16x4_t svint16x4_t;\n";
+ OS << "typedef __clang_svint32x4_t svint32x4_t;\n";
+ OS << "typedef __clang_svint64x4_t svint64x4_t;\n";
+ OS << "typedef __clang_svuint8x4_t svuint8x4_t;\n";
+ OS << "typedef __clang_svuint16x4_t svuint16x4_t;\n";
+ OS << "typedef __clang_svuint32x4_t svuint32x4_t;\n";
+ OS << "typedef __clang_svuint64x4_t svuint64x4_t;\n";
+ OS << "typedef __clang_svfloat16x4_t svfloat16x4_t;\n";
+ OS << "typedef __clang_svfloat32x4_t svfloat32x4_t;\n";
+ OS << "typedef __clang_svfloat64x4_t svfloat64x4_t;\n";
+ OS << "typedef __SVBool_t svbool_t;\n\n";
+
+ OS << "#ifdef __ARM_FEATURE_SVE_BF16\n";
+ OS << "typedef __clang_svbfloat16x2_t svbfloat16x2_t;\n";
+ OS << "typedef __clang_svbfloat16x3_t svbfloat16x3_t;\n";
+ OS << "typedef __clang_svbfloat16x4_t svbfloat16x4_t;\n";
+ OS << "#endif\n";
+
+ OS << "typedef enum\n";
+ OS << "{\n";
+ OS << " SV_POW2 = 0,\n";
+ OS << " SV_VL1 = 1,\n";
+ OS << " SV_VL2 = 2,\n";
+ OS << " SV_VL3 = 3,\n";
+ OS << " SV_VL4 = 4,\n";
+ OS << " SV_VL5 = 5,\n";
+ OS << " SV_VL6 = 6,\n";
+ OS << " SV_VL7 = 7,\n";
+ OS << " SV_VL8 = 8,\n";
+ OS << " SV_VL16 = 9,\n";
+ OS << " SV_VL32 = 10,\n";
+ OS << " SV_VL64 = 11,\n";
+ OS << " SV_VL128 = 12,\n";
+ OS << " SV_VL256 = 13,\n";
+ OS << " SV_MUL4 = 29,\n";
+ OS << " SV_MUL3 = 30,\n";
+ OS << " SV_ALL = 31\n";
+ OS << "} sv_pattern;\n\n";
+
+ OS << "typedef enum\n";
+ OS << "{\n";
+ OS << " SV_PLDL1KEEP = 0,\n";
+ OS << " SV_PLDL1STRM = 1,\n";
+ OS << " SV_PLDL2KEEP = 2,\n";
+ OS << " SV_PLDL2STRM = 3,\n";
+ OS << " SV_PLDL3KEEP = 4,\n";
+ OS << " SV_PLDL3STRM = 5,\n";
+ OS << " SV_PSTL1KEEP = 8,\n";
+ OS << " SV_PSTL1STRM = 9,\n";
+ OS << " SV_PSTL2KEEP = 10,\n";
+ OS << " SV_PSTL2STRM = 11,\n";
+ OS << " SV_PSTL3KEEP = 12,\n";
+ OS << " SV_PSTL3STRM = 13\n";
+ OS << "} sv_prfop;\n\n";
+
+ OS << "/* Function attributes */\n";
+ OS << "#define __aio static inline __attribute__((__always_inline__, "
+ "__nodebug__, __overloadable__))\n\n";
+
+ // Add reinterpret functions.
+ for (auto ShortForm : { false, true } )
+ for (const ReinterpretTypeInfo &From : Reinterprets)
+ for (const ReinterpretTypeInfo &To : Reinterprets) {
+ const bool IsBFloat = StringRef(From.Suffix).equals("bf16") ||
+ StringRef(To.Suffix).equals("bf16");
+ if (IsBFloat)
+ OS << "#if defined(__ARM_FEATURE_SVE_BF16)\n";
+ if (ShortForm) {
+ OS << "__aio " << From.Type << " svreinterpret_" << From.Suffix;
+ OS << "(" << To.Type << " op) {\n";
+ OS << " return __builtin_sve_reinterpret_" << From.Suffix << "_"
+ << To.Suffix << "(op);\n";
+ OS << "}\n\n";
+ } else
+ OS << "#define svreinterpret_" << From.Suffix << "_" << To.Suffix
+ << "(...) __builtin_sve_reinterpret_" << From.Suffix << "_"
+ << To.Suffix << "(__VA_ARGS__)\n";
+ if (IsBFloat)
+ OS << "#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */\n";
+ }
+
+ SmallVector<std::unique_ptr<Intrinsic>, 128> Defs;
+ std::vector<Record *> RV = Records.getAllDerivedDefinitions("Inst");
+ for (auto *R : RV)
+ createIntrinsic(R, Defs);
+
+ // Sort intrinsics in header file by following order/priority:
+ // - Architectural guard (i.e. does it require SVE2 or SVE2_AES)
+ // - Class (is intrinsic overloaded or not)
+ // - Intrinsic name
+ std::stable_sort(
+ Defs.begin(), Defs.end(), [](const std::unique_ptr<Intrinsic> &A,
+ const std::unique_ptr<Intrinsic> &B) {
+ auto ToTuple = [](const std::unique_ptr<Intrinsic> &I) {
+ return std::make_tuple(I->getGuard(), (unsigned)I->getClassKind(), I->getName());
+ };
+ return ToTuple(A) < ToTuple(B);
+ });
+
+ StringRef InGuard = "";
+ for (auto &I : Defs) {
+ // Emit #endif/#if pair if needed.
+ if (I->getGuard() != InGuard) {
+ if (!InGuard.empty())
+ OS << "#endif //" << InGuard << "\n";
+ InGuard = I->getGuard();
+ if (!InGuard.empty())
+ OS << "\n#if " << InGuard << "\n";
+ }
+
+ // Actually emit the intrinsic declaration.
+ I->emitIntrinsic(OS);
+ }
+
+ if (!InGuard.empty())
+ OS << "#endif //" << InGuard << "\n";
+
+ OS << "#if defined(__ARM_FEATURE_SVE_BF16)\n";
+ OS << "#define svcvtnt_bf16_x svcvtnt_bf16_m\n";
+ OS << "#define svcvtnt_bf16_f32_x svcvtnt_bf16_f32_m\n";
+ OS << "#endif /*__ARM_FEATURE_SVE_BF16 */\n\n";
+
+ OS << "#if defined(__ARM_FEATURE_SVE2)\n";
+ OS << "#define svcvtnt_f16_x svcvtnt_f16_m\n";
+ OS << "#define svcvtnt_f16_f32_x svcvtnt_f16_f32_m\n";
+ OS << "#define svcvtnt_f32_x svcvtnt_f32_m\n";
+ OS << "#define svcvtnt_f32_f64_x svcvtnt_f32_f64_m\n\n";
+
+ OS << "#define svcvtxnt_f32_x svcvtxnt_f32_m\n";
+ OS << "#define svcvtxnt_f32_f64_x svcvtxnt_f32_f64_m\n\n";
+
+ OS << "#endif /*__ARM_FEATURE_SVE2 */\n\n";
+
+ OS << "#ifdef __cplusplus\n";
+ OS << "} // extern \"C\"\n";
+ OS << "#endif\n\n";
+ OS << "#endif /*__ARM_FEATURE_SVE */\n\n";
+ OS << "#endif /* __ARM_SVE_H */\n";
+}
+
+void SVEEmitter::createBuiltins(raw_ostream &OS) {
+ std::vector<Record *> RV = Records.getAllDerivedDefinitions("Inst");
+ SmallVector<std::unique_ptr<Intrinsic>, 128> Defs;
+ for (auto *R : RV)
+ createIntrinsic(R, Defs);
+
+ // The mappings must be sorted based on BuiltinID.
+ llvm::sort(Defs, [](const std::unique_ptr<Intrinsic> &A,
+ const std::unique_ptr<Intrinsic> &B) {
+ return A->getMangledName() < B->getMangledName();
+ });
+
+ OS << "#ifdef GET_SVE_BUILTINS\n";
+ for (auto &Def : Defs) {
+ // Only create BUILTINs for non-overloaded intrinsics, as overloaded
+ // declarations only live in the header file.
+ if (Def->getClassKind() != ClassG)
+ OS << "BUILTIN(__builtin_sve_" << Def->getMangledName() << ", \""
+ << Def->getBuiltinTypeStr() << "\", \"n\")\n";
+ }
+
+ // Add reinterpret builtins
+ for (const ReinterpretTypeInfo &From : Reinterprets)
+ for (const ReinterpretTypeInfo &To : Reinterprets)
+ OS << "BUILTIN(__builtin_sve_reinterpret_" << From.Suffix << "_"
+ << To.Suffix << +", \"" << From.BuiltinType << To.BuiltinType
+ << "\", \"n\")\n";
+
+ OS << "#endif\n\n";
+ }
+
+void SVEEmitter::createCodeGenMap(raw_ostream &OS) {
+ std::vector<Record *> RV = Records.getAllDerivedDefinitions("Inst");
+ SmallVector<std::unique_ptr<Intrinsic>, 128> Defs;
+ for (auto *R : RV)
+ createIntrinsic(R, Defs);
+
+ // The mappings must be sorted based on BuiltinID.
+ llvm::sort(Defs, [](const std::unique_ptr<Intrinsic> &A,
+ const std::unique_ptr<Intrinsic> &B) {
+ return A->getMangledName() < B->getMangledName();
+ });
+
+ OS << "#ifdef GET_SVE_LLVM_INTRINSIC_MAP\n";
+ for (auto &Def : Defs) {
+ // Builtins only exist for non-overloaded intrinsics, overloaded
+ // declarations only live in the header file.
+ if (Def->getClassKind() == ClassG)
+ continue;
+
+ uint64_t Flags = Def->getFlags();
+ auto FlagString = std::to_string(Flags);
+
+ std::string LLVMName = Def->getLLVMName();
+ std::string Builtin = Def->getMangledName();
+ if (!LLVMName.empty())
+ OS << "SVEMAP1(" << Builtin << ", " << LLVMName << ", " << FlagString
+ << "),\n";
+ else
+ OS << "SVEMAP2(" << Builtin << ", " << FlagString << "),\n";
+ }
+ OS << "#endif\n\n";
+}
+
+void SVEEmitter::createRangeChecks(raw_ostream &OS) {
+ std::vector<Record *> RV = Records.getAllDerivedDefinitions("Inst");
+ SmallVector<std::unique_ptr<Intrinsic>, 128> Defs;
+ for (auto *R : RV)
+ createIntrinsic(R, Defs);
+
+ // The mappings must be sorted based on BuiltinID.
+ llvm::sort(Defs, [](const std::unique_ptr<Intrinsic> &A,
+ const std::unique_ptr<Intrinsic> &B) {
+ return A->getMangledName() < B->getMangledName();
+ });
+
+
+ OS << "#ifdef GET_SVE_IMMEDIATE_CHECK\n";
+
+ // Ensure these are only emitted once.
+ std::set<std::string> Emitted;
+
+ for (auto &Def : Defs) {
+ if (Emitted.find(Def->getMangledName()) != Emitted.end() ||
+ Def->getImmChecks().empty())
+ continue;
+
+ OS << "case SVE::BI__builtin_sve_" << Def->getMangledName() << ":\n";
+ for (auto &Check : Def->getImmChecks())
+ OS << "ImmChecks.push_back(std::make_tuple(" << Check.getArg() << ", "
+ << Check.getKind() << ", " << Check.getElementSizeInBits() << "));\n";
+ OS << " break;\n";
+
+ Emitted.insert(Def->getMangledName());
+ }
+
+ OS << "#endif\n\n";
+}
+
+/// Create the SVETypeFlags used in CGBuiltins
+void SVEEmitter::createTypeFlags(raw_ostream &OS) {
+ OS << "#ifdef LLVM_GET_SVE_TYPEFLAGS\n";
+ for (auto &KV : FlagTypes)
+ OS << "const uint64_t " << KV.getKey() << " = " << KV.getValue() << ";\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifdef LLVM_GET_SVE_ELTTYPES\n";
+ for (auto &KV : EltTypes)
+ OS << " " << KV.getKey() << " = " << KV.getValue() << ",\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifdef LLVM_GET_SVE_MEMELTTYPES\n";
+ for (auto &KV : MemEltTypes)
+ OS << " " << KV.getKey() << " = " << KV.getValue() << ",\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifdef LLVM_GET_SVE_MERGETYPES\n";
+ for (auto &KV : MergeTypes)
+ OS << " " << KV.getKey() << " = " << KV.getValue() << ",\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifdef LLVM_GET_SVE_IMMCHECKTYPES\n";
+ for (auto &KV : ImmCheckTypes)
+ OS << " " << KV.getKey() << " = " << KV.getValue() << ",\n";
+ OS << "#endif\n\n";
+}
+
+namespace clang {
+void EmitSveHeader(RecordKeeper &Records, raw_ostream &OS) {
+ SVEEmitter(Records).createHeader(OS);
+}
+
+void EmitSveBuiltins(RecordKeeper &Records, raw_ostream &OS) {
+ SVEEmitter(Records).createBuiltins(OS);
+}
+
+void EmitSveBuiltinCG(RecordKeeper &Records, raw_ostream &OS) {
+ SVEEmitter(Records).createCodeGenMap(OS);
+}
+
+void EmitSveRangeChecks(RecordKeeper &Records, raw_ostream &OS) {
+ SVEEmitter(Records).createRangeChecks(OS);
+}
+
+void EmitSveTypeFlags(RecordKeeper &Records, raw_ostream &OS) {
+ SVEEmitter(Records).createTypeFlags(OS);
+}
+
+} // End namespace clang
diff --git a/contrib/llvm-project/clang/utils/TableGen/TableGen.cpp b/contrib/llvm-project/clang/utils/TableGen/TableGen.cpp
index 6ba90cee4aae..1d6ef8065bb8 100644
--- a/contrib/llvm-project/clang/utils/TableGen/TableGen.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/TableGen.cpp
@@ -63,6 +63,7 @@ enum ActionType {
GenClangOpenCLBuiltins,
GenArmNeon,
GenArmFP16,
+ GenArmBF16,
GenArmNeonSema,
GenArmNeonTest,
GenArmMveHeader,
@@ -70,6 +71,16 @@ enum ActionType {
GenArmMveBuiltinSema,
GenArmMveBuiltinCG,
GenArmMveBuiltinAliases,
+ GenArmSveHeader,
+ GenArmSveBuiltins,
+ GenArmSveBuiltinCG,
+ GenArmSveTypeFlags,
+ GenArmSveRangeChecks,
+ GenArmCdeHeader,
+ GenArmCdeBuiltinDef,
+ GenArmCdeBuiltinSema,
+ GenArmCdeBuiltinCG,
+ GenArmCdeBuiltinAliases,
GenAttrDocs,
GenDiagDocs,
GenOptDocs,
@@ -176,10 +187,21 @@ cl::opt<ActionType> Action(
"Generate OpenCL builtin declaration handlers"),
clEnumValN(GenArmNeon, "gen-arm-neon", "Generate arm_neon.h for clang"),
clEnumValN(GenArmFP16, "gen-arm-fp16", "Generate arm_fp16.h for clang"),
+ clEnumValN(GenArmBF16, "gen-arm-bf16", "Generate arm_bf16.h for clang"),
clEnumValN(GenArmNeonSema, "gen-arm-neon-sema",
"Generate ARM NEON sema support for clang"),
clEnumValN(GenArmNeonTest, "gen-arm-neon-test",
"Generate ARM NEON tests for clang"),
+ clEnumValN(GenArmSveHeader, "gen-arm-sve-header",
+ "Generate arm_sve.h for clang"),
+ clEnumValN(GenArmSveBuiltins, "gen-arm-sve-builtins",
+ "Generate arm_sve_builtins.inc for clang"),
+ clEnumValN(GenArmSveBuiltinCG, "gen-arm-sve-builtin-codegen",
+ "Generate arm_sve_builtin_cg_map.inc for clang"),
+ clEnumValN(GenArmSveTypeFlags, "gen-arm-sve-typeflags",
+ "Generate arm_sve_typeflags.inc for clang"),
+ clEnumValN(GenArmSveRangeChecks, "gen-arm-sve-sema-rangechecks",
+ "Generate arm_sve_sema_rangechecks.inc for clang"),
clEnumValN(GenArmMveHeader, "gen-arm-mve-header",
"Generate arm_mve.h for clang"),
clEnumValN(GenArmMveBuiltinDef, "gen-arm-mve-builtin-def",
@@ -190,6 +212,16 @@ cl::opt<ActionType> Action(
"Generate ARM MVE builtin code-generator for clang"),
clEnumValN(GenArmMveBuiltinAliases, "gen-arm-mve-builtin-aliases",
"Generate list of valid ARM MVE builtin aliases for clang"),
+ clEnumValN(GenArmCdeHeader, "gen-arm-cde-header",
+ "Generate arm_cde.h for clang"),
+ clEnumValN(GenArmCdeBuiltinDef, "gen-arm-cde-builtin-def",
+ "Generate ARM CDE builtin definitions for clang"),
+ clEnumValN(GenArmCdeBuiltinSema, "gen-arm-cde-builtin-sema",
+ "Generate ARM CDE builtin sema checks for clang"),
+ clEnumValN(GenArmCdeBuiltinCG, "gen-arm-cde-builtin-codegen",
+ "Generate ARM CDE builtin code-generator for clang"),
+ clEnumValN(GenArmCdeBuiltinAliases, "gen-arm-cde-builtin-aliases",
+ "Generate list of valid ARM CDE builtin aliases for clang"),
clEnumValN(GenAttrDocs, "gen-attr-docs",
"Generate attribute documentation"),
clEnumValN(GenDiagDocs, "gen-diag-docs",
@@ -330,6 +362,9 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenArmFP16:
EmitFP16(Records, OS);
break;
+ case GenArmBF16:
+ EmitBF16(Records, OS);
+ break;
case GenArmNeonSema:
EmitNeonSema(Records, OS);
break;
@@ -351,6 +386,36 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenArmMveBuiltinAliases:
EmitMveBuiltinAliases(Records, OS);
break;
+ case GenArmSveHeader:
+ EmitSveHeader(Records, OS);
+ break;
+ case GenArmSveBuiltins:
+ EmitSveBuiltins(Records, OS);
+ break;
+ case GenArmSveBuiltinCG:
+ EmitSveBuiltinCG(Records, OS);
+ break;
+ case GenArmSveTypeFlags:
+ EmitSveTypeFlags(Records, OS);
+ break;
+ case GenArmSveRangeChecks:
+ EmitSveRangeChecks(Records, OS);
+ break;
+ case GenArmCdeHeader:
+ EmitCdeHeader(Records, OS);
+ break;
+ case GenArmCdeBuiltinDef:
+ EmitCdeBuiltinDef(Records, OS);
+ break;
+ case GenArmCdeBuiltinSema:
+ EmitCdeBuiltinSema(Records, OS);
+ break;
+ case GenArmCdeBuiltinCG:
+ EmitCdeBuiltinCG(Records, OS);
+ break;
+ case GenArmCdeBuiltinAliases:
+ EmitCdeBuiltinAliases(Records, OS);
+ break;
case GenAttrDocs:
EmitClangAttrDocs(Records, OS);
break;
diff --git a/contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h b/contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h
index 7ac2e0eeb1f3..9717903ba52c 100644
--- a/contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h
+++ b/contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h
@@ -85,18 +85,31 @@ void EmitClangOpcodes(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitNeon(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitFP16(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitBF16(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitNeonSema(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitNeonTest(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitNeon2(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitNeonSema2(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitNeonTest2(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitSveHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitSveBuiltins(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitSveBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitSveTypeFlags(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitSveRangeChecks(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+
void EmitMveHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitMveBuiltinDef(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitMveBuiltinSema(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitMveBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitMveBuiltinAliases(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitCdeHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitCdeBuiltinDef(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitCdeBuiltinSema(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitCdeBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitCdeBuiltinAliases(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+
void EmitClangAttrDocs(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitClangDiagDocs(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitClangOptDocs(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);